diff --git "a/5466.jsonl" "b/5466.jsonl" new file mode 100644--- /dev/null +++ "b/5466.jsonl" @@ -0,0 +1,128 @@ +{"seq_id":"42008402376","text":"valor=int(input(\"ingrese un numero:\\t\"))\nresiduo=0\ncontar=0\n#llegar hasta el valor ingresado\nfor i in range(1, valor+1):\n residuo=valor%i\n if(residuo==0):\n contar+=1\nif(contar>2):\n print(valor,\" no es primo\")\nelse:\n print(valor, \" es un primo\")\n\n\n","repo_name":"MynorEscobar/EjemplosPython","sub_path":"Python3 - Primo.py","file_name":"Python3 - Primo.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12577652923","text":"#!/usr/bin/env python3\n\n#bugs to be fixed\n# 1. cancel() : Get order id and parent id properly \n# 2. Check why kite call is not in while loop\n# 3. add code for this..trade_start flag will be set if the 1min,5min and 15min lists are full \n# 4. check if kite.TRANSACTION_TYPE_SELL format is required instead of just SELL\n# 5. Add order limit to datafile.txt\n# 6. make sure ltp is always between the first and second leg and not out of the range\n# 7. Init all values to zero in the beginning before while loop\n# 8. if countcmpt is zero , if timelimit is hit, do we need to cancel all pending orders ?\n# 9. add timeout for complete wait \n\n# Data structure design\n# Incore list structure\n# =====================\n# \n# for open contracts, one of the ltps will be zero and status will be pending\n# if all fields are filled ,strip the and ; add to 'scrip'.txt file, under \n# in cancel_or_exit() check if cmpt+pndg = 0 is required \n\n# lists\n# =====\n# all generic lists will have elements in the follwing order\n# \n\nimport socket\nimport logging\nimport time\nimport datetime\nimport os\nimport sys\nimport pdb\nimport operator\nimport csv\nimport threading\nimport xml.etree.ElementTree as xml\nimport requests\nimport signal\n\nfrom lxml import etree\nfrom datetime import datetime, timedelta\nfrom time import sleep\nfrom kiteconnect import KiteConnect\nfrom kiteconnect import KiteTicker\nfrom bs4 import BeautifulSoup\nfrom random import randint # added for randint() call\nfrom urllib.request import urlopen # added for internet connectivity check\nfrom os import path\n\n# Tcp variables\nconn = None\n\ntestmode = 1 # 0 is for real trading and 1 is for testing with ltp from the internet\ndebug_flag = 1\nlogger = None\ncommoditylist = [0]\ntickLength = 0\ntickFlag = 0\norder_id = 1\n\n# check and delete..these might be unnecesssary\n#book keeping variables\noffline_orders = [0]\ncompleted_ordr = [[0]*10 for _ in range(5)]\nopen_ordr = [[0]*10 for _ in range(5)]\ncountpndg = [0] * 5 # +1 for buy and -1 for sell\ncountcmpt = [0] * 5 # +1 for buy and -1 for sell\norder_limit = [0] * 5\nconfirm_wait = [0] * 5 # Orders placed but still not updated in placed orders list\nconfirm_wait_t = [0] * 5 # Orders placed but still not updated in placed orders list\n#order_flag = 0 # +1 for open,-1 for cancel ,+100 for complete buy ,-100 for complete sell \ntrade_flag = 0\n#kite variables\napikey = None \nreqtoken = None\napisecret = None\naccesstoken = None\nclosetime = 0\n\n#variables for commodity fut\ninstoken = [0] * 5\nscrip = ['0'] * 5\ntradingsymbol = ['0'] * 5\nsymbol = ['0'] * 5\nexpiry = ['0'] * 5\nltpindex = ['0'] * 5\nexchange = \"kite.EXCHANGE_MCX\"\nquantity = 1\nscripfactor = [0.0] * 5\ndayloss = [0.0] * 5\ndayprofit = [0.0] * 5\nscripactive = ['1'] * 5 # 1 indicates,scrip can be traded today .0 indicates,it is blocked \nmoneyfactor = [0.0] * 5 # Rupees per unit change\n\n#variable for intraday trade\ntxtype = [0] * 5\ntrigger = [0.0] * 5 \ntrend = [0] * 5 #-1 for downtrend and +1 for uptrend\nstoploss = [0.0] * 5\ncandleStart = [0.0] * 5 # starting candle\ncandleEnd = [0.0] * 5 # ending candle\nsmallCandle = [0] * 5\norder_type = \"kite.ORDER_TYPE_SLM\"\nvariety = ['VARIETY_NRML'] * 5\nproduct = \"MIS\"\nlatest_txnid = 0\nltp = [0.0]*5 \nhigh_ref = 0 \nlow_ref = 0\n\ntotalgains = [0.0] * 5\ncounternrml = [0] * 5\ncountermis = [0] * 5\n\ntree = None\nday = None\nnow = None\ntimecounter = 0\nltp_write = 0 # SYNC flag for find_ohlc() \nmincount = 0\n\nonemin = [[0.0]*12 for _ in range(5)]\nfivemin = [[0.0]*12 for _ in range(5)] \nfifteenmin = [[0.0]*12 for _ in range(5)] \nthirtymin = [[0.0]*12 for _ in range(5)] \nonehour = [[0.0]*12 for _ in range(5)] \nrefband = onemin\nrefinterval = 1\n\n# strategies\nfirstcandle = 1\nsixpm = 2\neightpm = 4\n\nbookedprofit = [0.0]*5\nlosscount = 0\nprofitcount = 0\nfifteenflag = 0\nxmlfile = 'accounts.xml'\n\n# Simulation variables \nltpsim = [100.0]*5 \ntestltp = [[0],[0],[0]]\nltprecall = 0\n\ndef printandlog(string):\n print(string)\n logging.info(string)\n\ndef check_internet():\n global testmode\n if(testmode ==0):\n site='https://kite.zerodha.com/'\n elif(testmode == 1):\n site='https://moneycontrol.com/commodity'\n try:\n response = urlopen(site, timeout=10)\n return(1)\n except:\n return(0)\n\ndef data_validate(dataval,datatype):\n global countermis\n global counternrml\n\n logging.info(\"validating data..\")\n if(datatype == 'index'):\n if(dataval<0 and dataval > 5):\n return (0)\n elif(datatype == 'product'):\n if(dataval != 'PRODUCT_NRML' \n and dataval != 'PRODUCT_MIS' \n and dataval != 'PRODUCT_CO' \n and dataval != 'PRODUCT_BO' \n and dataval != 'PRODUCT_CNC'):\n return (0)\n elif(datatype == 'order'):\n if(counternrml[dataval] >=2 \n or countermis[dataval]>=2 \n or (counternrml[dataval]+\n countermis[dataval])>2):\n return(0)\n return (1) \n\ndef time_iterate (lis,interval):\n global mincount\n global ltp \n global tickLength\n global now \n global fifteenflag\n global refband\n global refinterval\n\n if((mincount%interval)==0):\n for i in range(0,tickLength):\n for j in range(0,4):\n lis[i].insert(0,ltp[i])\n lis[i].pop()\n if(interval == refinterval):\n #logging.info(\"Band array: {}\".format(refband[0]))\n #logging.info(\"Band array: {}\".format(refband[1]))\n #logging.info(\"Band array: {}\".format(refband[2]))\n fifteenflag = 1 \n\ndef marketdata():\n url = \"https://www.mcxindia.com/backpage.aspx/GetMarketWatch\"\n headers = { \n \"Host\": \"www.mcxindia.com\",\n \"Origin\": \"https://www.mcxindia.com\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36\",\n \"Content-Type\": \"application/json\",\n \"Referer\": \"https://www.mcxindia.com/market-data/market-watch\",\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\", \n }\n\n\n resp = requests.post(url, headers = headers)\n market_data = resp.json()['d']['Data'] \n return (market_data)\n\ndef init_ltp(marketval,index):\n global ltpindex\n global expiry\n global scrip\n global tickLength\n\n #print(ltpindex,\" \",len(marketval),\" \",tickLength,\" \",expiry,\" \",scrip)\n for i in range(0,len(marketval)):\n if(marketval[i]['Symbol'] == str(scrip[index])):\n if(marketval[i]['ExpiryDate'] in str(expiry[index])):\n ltpindex[index] = i\n break\n\ndef find_ohlc ():\n global ltp\n global onemin,fivemin,fifteenmin,thirtymin,onehour\n global mincount\n global timecounter\n global ltp_write \n\n threading.Timer(60,find_ohlc).start()\n if(mincount>15):\n read_flag = 1\n\n ltp_write = 1\n time_iterate(onehour,60)\n time_iterate(thirtymin,30)\n time_iterate(fifteenmin,15) \n time_iterate(fivemin,5) \n time_iterate(onemin,1)\n ltp_write = 0\n \n mincount = mincount +1\n timecounter = timecounter +1\n\ndef trend_valuate (oper,txn,index): \n global trend\n global txntype\n global trigger \n global stoploss\n global fivemin,onemin\n \n trend[index] = 1\n txtype[index] = txn\n trigger[index] = refband[index][-3]\n stoploss[index] = refband[index][-2]\n \n if(oper(fivemin[index][-1],fivemin[index][-4])):\n return 1\n elif(oper(onemin[index][-1], onemin[index][-4])):\n return 1\n return None\n\ndef get_trend(index):\n global ltp_write\n global refband\n\n while(ltp_write == 1):\n dummy = 0\n\n if (refband[index][-1] > refband[index][-4]):\n return(trend_valuate(operator.gt,'BUY',index))\n elif (refband[index][-1] < refband[index][-4]):\n return(trend_valuate(operator.lt,'SELL',index))\n\n#new\n# curl o/p Format: 55572743,217081,CRUDEOIL20APRFUT,\"CRUDEOIL\",0,2020-04-20,0,1,1,FUT,MCX-FUT,MCX\ndef scrip_init ():\n global tradingsymbol\n global exchange\n global symbol\n global scripfactor\n global tree\n global timecounter\n global tickLength\n global scrip\n global expiry\n global dayloss\n global dayprofit\n global order_limit\n \n datafile = datetime.now().strftime('datafile_%d%m%Y.txt')\n if (not path.exists(datafile)):\n logging.info (\"Data file does not Exist.Creating a new file\")\n os.system(\"cp datafile.txt \"+datafile)\n os.system(\"chmod 777 \"+datafile)\n else:\n logging.info (\"datafile Exists\")\n\n #index = tickLength\n tree = xml.parse(datafile)\n #scrip[index] = scrip_name.upper()\n marketval = marketdata()\n #print(tickLength)\n for index in range(0,tickLength):\n # get scrip specific data\n instoken[index] = str(tree.find('scrip/'+scrip[index]+'/instoken').text)\n tradingsymbol[index]= str(tree.find('scrip/'+scrip[index]+'/tradingsymbol').text)\n symbol[index] = str(tree.find('scrip/'+scrip[index]+'/symbol').text)\n scripfactor[index] = float(tree.find('scrip/'+scrip[index]+'/scripfactor').text)\n exchange = str(tree.find('scrip/'+scrip[index]+'/exchange').text)\n expiry[index] = str(tree.find('scrip/'+scrip[index]+'/expiry').text)\n order_limit[index] = tree.find('scrip/'+scrip[index]+'/orderlimit').text\n dayloss[index] = float(tree.find('scrip/'+scrip[index]+'/dayloss').text)\n dayprofit[index] = float(tree.find('scrip/'+scrip[index]+'/dayprofit').text)\n \n if(scripactive[index] != '0'):\n scripactive[index] = tree.find('scrip/'+scrip[index]+'/scripactive').text\n moneyfactor[index] = float(tree.find('scrip/'+scrip[index]+'/profitloss').text)\n \n \n command = \"curl \\\"https://api.kite.trade/instruments\\\" | grep \\\"%s2.*FUT\\\" | sort\" %scrip[index]\n \n result = None\n try:\n result = os.popen(command).read()\n except Exception as e:\n printandlog (\"Exception in reading trade instruments\")\n \n #time.sleep(5) \n #timecounter = 0\n #while(timecounter<5):\n #if(result):\n # break\n #time.sleep(1) \n #timecounter = 0\n if(result):\n line = result.split(\"\\n\")[0]\n out = line.split(\",\")\n out[5] = datetime.strptime(out[5],'%Y-%m-%d').strftime('%d%b%Y')\n out[5] = out[5].upper()\n out[3] = out[3][1:-1]\n if(out[3] == scrip[index]):\n instoken[index] = out[0]\n tradingsymbol[index]= out[2]\n #symbol[index] = 'MCX:' + tradingsymbol[index]\n symbol[index] = tradingsymbol[index]\n expiry[index] = str(out[5])\n #update the datasheet\n tree.find('scrip/'+scrip[index]+'/tradingsymbol').text = tradingsymbol[index]\n tree.find('scrip/'+scrip[index]+'/symbol').text = symbol[index]\n tree.find('scrip/'+scrip[index]+'/instoken').text = instoken[index]\n tree.find('scrip/'+scrip[index]+'/expiry').text = expiry[index]\n tree.find('scrip/'+scrip[index]+'/dayloss').text = str(dayloss[index])\n tree.find('scrip/'+scrip[index]+'/scripactive').text = scripactive[index]\n tree.write(datafile)\n \n init_ltp(marketval,index) \n #get the trend through first 15 minutes data\n val = get_trend(index)\n\n return val\n#new\ndef read_credentials ():\n global apikey\n global reqtoken\n global apisecret\n global accesstoken\n global closetime\n tree = xml.parse(\"datafile.txt\")\n\n # get credentials\n apikey = str(tree.find('credentials/apikey').text)\n reqtoken = str(tree.find('credentials/reqtoken').text)\n apisecret = str(tree.find('credentials/apisecret').text)\n accesstoken = str(tree.find('credentials/accesstoken').text)\n closetime = str(tree.find('credentials/closetime').text)\n\ndef socket_init ():\n global s\n host = '' # Symbolic name meaning all available interfaces\n port = 12345 # Arbitrary non-privileged port\n \n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((host, port))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\ndef kite_init ():\n global kite\n global logging \n global rqtkn\n global debug_flag\n\n data = None\n kite = KiteConnect(apikey)\n if(not kite):\n logging.info (\"Kite connect failed..Exiting\")\n exit()\n\n if(debug_flag):\n logging.info (\"Kite connect is successful\")\n\n url = kite.login_url()\n print (url)\n\n reqtoken= raw_input(\"Enter Request Token:\")\n\n data = kite.generate_session(reqtoken, api_secret=apisecret)\n if(not data):\n logging.info (\"Session gererate call failed..Exiting!\")\n exit()\n if(debug_flag):\n printandlog (\"session Generation is successful\")\n\n val = kite.set_access_token(data[accesstoken])\n if(not val):\n printandlog (\"Access token could not be set..Exiting!\")\n exit()\n if(debug_flag):\n logging.info (\"Access token is set\")\n \n tree = xml.parse(\"datafile.txt\")\n tree.find('credentials/reqtoken').text = reqtoken\n tree.find('credentials/accesstoken').text = data[accesstoken]\n tree.write(\"datafile.txt\")\n logging.info (\"reqtoken and accesstoken in the Datafile are updated\")\n return 1\n\n#new\ndef add_ele (rootele,newele):\n global xmlfile\n parser = etree.XMLParser(remove_blank_text=True)\n #tree = etree.parse('accounts.xml', parser)\n tree = etree.parse(xmlfile, parser)\n\n out = rootele.findall('./'+newele)\n if (not out):\n out = etree.SubElement(rootele,newele)\n else :\n out = out[0]\n\n tree.write(xmlfile, pretty_print=True)\n return out\n\ndef update_file (sym,ltp,txntypeval,prod):\n global xmlfile\n\n parser = etree.XMLParser(remove_blank_text=True)\n tree = etree.parse(xmlfile, parser)\n day = datetime.today().strftime('%Y%m%d')\n\n root = tree.getroot()\n daytag = add_ele(root,'day'+day)\n prodtag = add_ele(daytag,prod)\n symboltag = add_ele(prodtag,sym)\n \n leng = len(symboltag.getchildren())\n \n txntypetag = add_ele(symboltag,str(txntypeval)+str(leng+1))\n\n if(tree.find('day'+day+'/'+prod+'/'+sym+'/'+str(txntypeval)+str(leng+1)).text == None):\n tree.find('day'+day+'/'+prod+'/'+sym+'/'+str(txntypeval)+str(leng+1)).text = str(ltp)\n tree.write(xmlfile, pretty_print=True)\n return 1\n else :\n tree.write(xmlfile, pretty_print=True)\n return 0\n\ndef append_to_list(i,lis,liscntr,last_odr,index):\n global testmode\n\n if (testmode == 0):\n t_txntype = 'txntype'\n\n elif (testmode == 1):\n t_txntype = 5\n\n if(i[t_txntype] == 'BUY') :\n last_odr.append(1)\n liscntr[index] = liscntr[index]+1\n else:\n last_odr.append(-1)\n liscntr[index] = liscntr[index] - 1\n \n if(lis != [] and completed_ordr != []):\n # dont add but remove if we have a pair for this order \n if(lis == completed_ordr):\n j = [it for it, lst in enumerate(lis[index]) if it[4] == last_odr[4]*(-1)][0]\n if j:\n completed_ordr[index].remove(j[-1])\n return 1\n if (lis[index] != [0]):\n lis[index].append(last_odr)\n else:\n lis[index][0] = last_odr[:]\n #sort the list based on orderid.\n #This is not necessary now but doing it for future use\n sorted(lis[index], key=operator.itemgetter(1), reverse=True)\n return 0\n return 1\n\ndef frame_order(copyitem,index):\n global testmode\n global open_ordr\n global completed_ordr\n global countpndg\n global countcmpt\n\n ordr = []\n\n if (testmode == 0):\n t_order_id = 'order_id'\n t_parent_order_id = 'parent_order_id'\n t_status = 'status'\n t_txntype = 'txntype'\n t_average_price = 'average_price'\n t_product = 'product'\n\n elif (testmode == 1):\n t_order_id = 0\n t_parent_order_id = 1\n t_status = 2\n t_txntype = 5\n t_average_price = 6\n t_product = 7\n\n ordr.append(copyitem[t_order_id])\n ordr.append(copyitem[t_parent_order_id])\n ordr.append(copyitem[t_average_price])\n ordr.append(copyitem[t_txntype])\n\n if ((copyitem[t_status] == 'TRIGGER PENDING') or (copyitem[t_status] =='OPEN PENDING')):\n append_to_list(copyitem,open_ordr,countpndg,ordr,index)\n elif (copyitem[t_status] == 'COMPLETE'):\n append_to_list(copyitem,completed_ordr,countcmpt,ordr,index)\n ordr.clear()\n\ndef update_offline_orders():\n global offline_orders\n global instoken\n \n if (offline_orders[0] != 0):\n for i in range(len(offline_orders)):\n for j in range(len(instoken)):\n if (offline_orders[i][4] == instoken[j]):\n if(ltp[j] == offline_orders[i][6]):\n offline_orders[i][2] = 'COMPLETE'\n#new\ndef on_order_update (ws, data):\n global latest_txnid\n global countpndg\n global countcmpt\n global totalgains\n global counternrml\n global countermis\n global tree\n global testmode\n global scrip\n global offline_orders\n global xmlfile\n global tickLength\n\n for index in range(0,tickLength):\n counternrml[index] = 0\n countermis[index] = 0\n totalgains[index] = 0\n\n if (testmode == 0):\n t_order_id = 'order_id'\n t_parent_order_id = 'parent_order_id'\n t_status = 'status'\n t_instrument_token = 'instrument_token'\n t_txntype = 'txntype'\n t_average_price = 'average_price'\n t_product = 'product'\n\n if((not ws) and (not data)) :\n ws.subscribe(token)\n ws.set_mode(kws.MODE_LTP, token)\n \n # Fetch all orders\n val = kite.orders()\n\n elif (testmode == 1):\n t_order_id = 0\n t_parent_order_id = 1\n t_status = 2\n t_instrument_token = 4\n t_txntype = 5\n t_average_price = 6\n t_product = 7\n\n # Fetch all orders\n val = offline_orders\n #print(val)\n \n\n # make sure orders are fetched properly\n if(val!=[0] and len(val) != 0):\n # Open order and completed order lists are filled in this function.\n # if the lists are not cleared here, we need to find duplicate entry,\n # sync with completed orders, kite.order() etc.so, clear here itself\n open_ordr.clear()\n completed_ordr.clear()\n \n for i in val:\n # Update only those that are new \n #print(i[t_order_id])\n #print(latest_txnid)\n #if (i[t_order_id] > int(latest_txnid)): # This is not required now\n if(True):\n latest_txnid = i[t_order_id]\n it = 0\n \n #get trading symbol and place data in the corresponding sublist\n index = [it for it, lst in enumerate(instoken) if str(i[t_instrument_token]) in str(lst)][0]\n it = 0\n \n #print(\"index :\",index)\n if(confirm_wait[index]!=0):\n # for all orders including cancelled orders ,\n # check for the order in confirm_wait and remove it\n subindex = [it for it, lst in enumerate(confirm_wait[index]) if str(i[t_order_id]) in str(lst)][0]\n if(subindex):\n confirm_wait[index].remove(subindex)\n confirm_wait_t[index] = 0\n else:\n confirm_wait_t[index] = 1\n \n frame_order(i,index)\n #print(\"status:\",i[t_status])\n if (str(i[t_status]) == 'COMPLETE'):\n #print(\"order complete\")\n #update counters and calculate total gains\n if(str(i[t_product]) == 'PRODUCT_NRML'):\n counternrml[index] = counternrml[index] +1\n elif(str(i[t_product]) == 'PRODUCT_MIS'):\n countermis[index] = countermis[index] +1\n else:\n string = \"Error in product value \"+ str(i[t_product])\n printandlog(string)\n \n if(str(i[t_txntype]) == 'BUY'):\n totalgains[index] = totalgains[index] - float(i[t_average_price])\n elif(str(i[t_txntype]) == 'SELL'):\n totalgains[index] = totalgains[index] + float(i[t_average_price])\n else:\n string = \"Error in Transaction type value \"+ str(i[t_txntype])\n printandlog(string)\n\n if(open_ordr !=[]):\n # now check for the order in pending list and remove it\n subindex = [it for it, lst in enumerate(open_ordr[index]) if str(i[t_order_id]) in str(lst)][0]\n if(subindex):\n open_ordr[index].remove(subindex)\n order_limit[index] = order_limit[index] -1\n tree.find('scrip/'+scrip[index]+'/orderlimit').text = order_limit[index]\n \n # Check if this entry is already present in the file.\n file_update = 1\n with open(xmlfile) as f:\n dat = f.read()\n #if(not dat):\n #print(\"No Data\")\n # if(i[t_order_id] in dat):\n # print(\"orderid present\")\n # else:\n # print(\"orderid not present\")\n\n if(str(i[t_order_id]) in f.read()):\n file_update = 0\n f.close()\n if(file_update):\n # entry not present. update accounts.xml file\n data = str(i[t_order_id]) +':'+ str(i[t_average_price])\n val = update_file(scrip[index],i[t_average_price],i[t_txntype],i[t_product])\n f.close()\n return 1\n else : \n # Error getting order list from kite. Return none without doing anything\n return None \n\ndef frame_offline_orders(index,tx,product,stat):\n global local_order_id \n global now\n global order_id\n global tradingsymbol\n global instoken\n global ltp\n\n orderlocal = [0]\n\n# 0. order_id\n orderlocal[0] = order_id\n order_id = order_id + 1\n\n# 1. parent_order_id\n orderlocal.append(order_id-1)\n\n# 2. status\n orderlocal.append(stat)\n\n# 3. tradingsymbol\n orderlocal.append(tradingsymbol[index])\n\n# 4. instrument_token\n orderlocal.append(instoken[index])\n\n# 5. transaction_type\n orderlocal.append(tx)\n\n# 6. average_price\n orderlocal.append(ltp[index])\n\n# 7. product\n orderlocal.append(product)\n \n# 8. order_timestamp\n orderlocal.append(now)\n #print(\"framed offline orders: \",orderlocal)\n return orderlocal\n\ndef band_update(band,index,val):\n if(band[index][0] == 0): # open\n band[index][0] = val\n if(band[index][1] < val): # high\n band[index][1] = val\n if(band[index][2] > val): # low\n band[index][2] = val\n if(band[index][3] != val): # close\n band[index][3] = val \n\ndef update_candle():\n global testmode \n global tickLength\n global tickFlag\n global ltp \n global onemin\n global fivemin\n global fifteenmin\n global thirtymin\n global onehour\n \n for index in range(0,tickLength):\n band_update(onemin,index,ltp[index]) \n band_update(fivemin,index,ltp[index]) \n band_update(fifteenmin,index,ltp[index]) \n band_update(thirtymin,index,ltp[index]) \n band_update(onehour,index,ltp[index]) \n\ndef ltp_simulate():\n global tickFlag\n global tickLength\n global ltp\n global testltp\n global ltprecall\n global ltpsim\n\n threading.Timer(10,ltp_simulate).start()\n\n for index in range(0,tickLength) :\n ltpsim[index] = ltpsim[index] + testltp[0][ltprecall]\n ltp[index] = ltpsim[index]\n if(ltp[index] != 0.0):\n tickFlag = 1\n ltprecall = ltprecall +1\n\ndef get_ltp_online():\n global tickFlag\n global tickLength\n global ltpindex\n global ltp\n\n threading.Timer(20,get_ltp_online).start()\n marketval = '0'\n marketval = marketdata()\n if(marketval == '0'):\n printandlog(\"Could not fetch market data online!\")\n return(0)\n\n for index in range(0,tickLength) :\n ltp[index] = (float)(marketval[ltpindex[index]]['LTP'])\n if(ltp[index] != 0.0):\n tickFlag = 1\n \ndef on_ticks (ws, ticks):\n global ltp\n global tickLength\n global tickFlag\n\n # Callback to receive ticks.\n logging.debug(\"Ticks: {}\".format(ticks))\n \n tickLength = len(ticks)\n for i in range(0,tickLength):\n ltp[i] = float(ticks[i]['last_price'])\n if(ltp[i] != 0.0):\n tickFlag = 1\n\ndef on_connect (ws, response):\n # Callback on successful connect.\n # Subscribe to a list of instrument_tokens (CRUDEOILAPR20FUT and SILVERMAPR20FUT here).\n global inst_token\n\n print(inst_token )\n print( 'subscriptionlist element is ', type(inst_token) ) #to confirm subscriptionlist items are int\n ws.subscribe(inst_token)\n\n # Set script to tick in `full` mode.\n ws.set_mode(ws.MODE_LTP, [inst_token])\n\ndef on_close (ws, code, reason):\n # On connection close stop the event loop.\n # Reconnection will not happen after executing `ws.stop()`\n ws.stop()\n\ndef ticker_init ():\n global kws\n # Initialise\n kws = KiteTicker(apikey, accesstoken, debug=True)\n\n # Assign the callbacks.\n kws.on_ticks = on_ticks\n kws.on_connect = on_connect\n kws.on_close = on_close\n kws.on_order_update = on_order_update\n\n response = kws.connect( threaded=True, disable_ssl_verification=True, proxy=None)\n logging.info('KWS.connect done')\n\n# remove the entry list from scrip sublist under pending list \ndef list_remove (listhead,data):\n index = [it for it, lst in enumerate(listhead) if data in lst]\n listhead.pop(index[0])\n\n# calltype 1 for new order, 2 for modify ,3 for exit and 4 for cancel\ndef try_order(calltype,content):\n global orderupdate\n\n ordered_id = None\n try:\n if(calltype == 1) :\n logging.info(\"Placing a new order...\")\n ordered_id = kite.place_order(content)\n elif(calltype == 2) :\n logging.info(\"Modifying an existing order...\")\n ordered_id = kite.modify_order(content)\n elif(calltype == 3) :\n logging.info(\"Exiting an existing order...\")\n ordered_id = kite.exit_order(content)\n elif(calltype == 4) :\n logging.info(\"Cancelling an existing order...\")\n ordered_id = kite.cancel_order(content)\n \n #time.sleep(1)\n orderupdate = orderupdate + 1\n logging.info(\"Order placed. ID is: {}\".format(ordered_id))\n except Exception as e:\n logging.info(\"Order placement failed: {}\".format(e.message))\n #time.sleep(1)\n \n return ordered_id\n\n\n# item from only pending list can be cancelled.\n# Delete from pending_list if 1 is returned\n# This is most likely an EoD closure.Ensure no TRIGGER PENDING order in kite.order()\n# argument : item from pending list\ndef cancel (odr):\n global testmode\n t_order_id = 0\n t_parent_order_id = 1\n t_status = 2\n t_txntype = 5\n t_average_price = 6\n\n if(testmode == 0):\n content = \"variety=%s, order_id=%s\" %(variety,odr[t_order_id])\n if(odr[1] != 0):\n content = content + \"parent_order_id=%s\" %odr[t_parent_order_id]\n return(try_order(4,content))\n elif (testmode == 1):\n odr[t_status] = 'CANCELLED'\n return(0)\n\n# item from only pending list can exit.\n# check if this triggers on_order_update, if so, no need to do anything\n# else delete from pending list \n# argument : item from pending list\ndef end_order (odr):\n global testmode\n t_order_id = 0\n t_status = 2\n t_instrument_token = 4\n t_average_price = 6\n\n if(testmode == 0):\n content = \"variety=%s, order_id=%s\" %(variety,odr[t_order_id])\n return(try_order(3,content))\n elif (testmode == 1):\n index = [it for it, lst in enumerate(instoken) if odr[t_instrument_token] in lst][0]\n odr[t_status] = 'COMPLETE'\n odr[t_average_price] = ltp[index]\n return(0)\n\n# item from only pending list can be modified.\n# Delete from pending_list if 1 is returned\n# call book keeping if 1 is returned \n# argument : item from pending list,price,trigger_price\ndef modify (odr,pr,tpr,sl):\n global testmode\n t_order_id = 0\n t_parent_order_id = 1\n t_average_price = 6\n\n if(testmode == 0):\n content = \"variety=%s, order_id=%s, parent_order_id=%s, quantity=None, price=%s, order_type=None, trigger_price=%s, validity=None, disclosed_quantity=None\" %(variety[index],odr[t_order_id],odr[t_parent_order_id],pr,tpr)\n return(try_order(2,content))\n elif (testmode == 1):\n odr[t_average_price] = pr\n return(0)\n\n# def : call_order\n# arguments : commodity index, txntype: BUY/SELL\n# Function : wrapper function to place a new order\ndef call_order (index,txntype,ordertype,product):\n global testmode\n global trigger\n global trend\n global ltp\n global variety \n global offline_orders\n\n func_name = sys._getframe().f_code.co_name\n #print(\"Executing the function \"+func_name)\n\n if (variety[index] == 'VARIETY_CO'):\n sign = 0\n \n if (txntype == 'BUY'):\n sign = 1\n elif (txntype == 'SELL'):\n sign = -1\n \n price = 1 # dummy value...needs to be corrected\n #price = ltp[index] + float(10*(sign * scripfactor[index]) )\n #trig = trigger[index]\n \n if (testmode == 0):\n if (variety[index] == 'VARIETY_CO'):\n content = \"variety=%s,exchange=%s,tradingsymbol=\\\"%s\\\",transaction_type=%s,order_type=ORDER_TYPE_LIMIT,price=%s,trigger_price=trig\" %(variety[index],exchange,tradingsymbol[index],txntype,price,trig)\n else:\n content = \"variety=VARIETY_REGULAR,exchange=%s,tradingsymbol=\\\"%s\\\",transaction_type=%s,order_type=%s,product=%s\" %(variety[index],exchange,tradingsymbol[index],txntype,ordertype,product)\n\n return(try_order(1,content))\n\n elif (testmode == 1):\n if (variety[index] == 'VARIETY_CO'):\n offline_orders.append(frame_offline_orders(index,txntype,product,'OPEN PENDING'))\n if (txntype == 'BUY'): # place second leg\n offline_orders.append(frame_offline_orders(index,'SELL',product,'TRIGGER PENDING'))\n elif (txntype == 'SELL'): # place second leg\n offline_orders.append(frame_offline_orders(index,'BUY',product,'TRIGGER PENDING'))\n else:\n if(offline_orders != [0]):\n offline_orders.append(frame_offline_orders(index,txntype,product,'COMPLETE'))\n else:\n offline_orders[0] = frame_offline_orders(index,txntype,product,'COMPLETE')[:]\n\n return (order_id)\n\ndef place_second_leg(index):\n global closetime\n # Two possibilities here \n # i . Time limit (or)\n # ii. second leg order is in pending list \n for i in open_ordr[index]:\n val = None\n if(now.strftime('%H:%M:%S') >= closetime):\n logging.info (\"Trading Time limit reached.exit all pending orders!\")\n val = end_order(i)\n else: # Time is okay\n #i[2] is the price and i[3] is BUY/SELL \n delta = i[4]*(i[2] - ltp[index])\n # check for Rs.500/- difference\n if (delta >= scripfactor[index]):\n modifyprice = (ltp[index]+(i[4]*scripfactor[index]))\n val = modify_order(i[0],modifyprice,1.5*modifyprice)\n if(not val):\n return None\n elif(delta <0):\n # order gone below stop loss\n # this is less likely to happen\n val = end_order(i)\n if(not val):\n return (None)\n # call: record keeping here\n return(on_order_update(None,None))\n\n# The strategy is to set stoploss equivalent to Rs.500/-\n# and to trail by stoploss equivalent to Rs.500/- \ndef strategy5point (index):\n global countcmpt\n global countpndg\n global order_limit\n global closetime\n \n # countcmpt and countpndg can take 0,-1,+1.so,9 combinations here\n if(countcmpt[index] == 0):\n # Two possibilities here \n # i . Either this is the start of trade (or)\n # ii. All pending trades are closed \n if(now.strftime('%H:%M:%S') >= closetime):\n logging.info (\"Trading Time limit reached.Exit after all orders are closed!\")\n\n elif (order_limit[index] == 0) :\n print (\"Order limit reached for %s\" %symbol[index])\n exit()\n elif(not countpndg[index]):\n # This for start of the day or when no order is pending\n val = None\n val = get_trend(index)\n\n if(not val):\n print (\"Could not getting trigger value for %s\" %symbol[index])\n return None\n\n if(trigger[index] != None):\n logging.info (\"start_trading: calling call_order!\") \n val = call_order(index,txtype[index],'ORDER_TYPE_LIMIT','PRODUCT_MIS')\n if(val != None):\n if(variety != \"VARIETY_CO\"):\n print (\"Limit order has been placed.Place second leg order!\")\n # place stoploss order\n # make sure ,first leg order is active\n # call: record keeping here\n return(on_order_update(None,None))\n else: # call_order failed \n print (\"new order placement failed for %s\" %symbol[index])\n return None\n # if pending orders are present ,two cases\n # if time is reached , calcel all pending orders\n # else adjust the trigger\n elif (countpndg[index]) :# pending list has members\n return(place_second_leg(index))\n elif (countcmpt[index] != 0):\n if((countpndg[index] == 0) or (countpndg[index]+countcmpt[index] !=0)):\n # place second leg here\n if(countcmpt[index] > 0):\n trend[index] = -1\n txtype[index] = 'SELL'\n elif(countcmpt[index] < 0):\n trend[index] = 1\n txtype[index] = 'BUY'\n\n trigger[index] = ltp[index] + (trend[index]*scripfactor[index])\n stoploss[index] = ltp[index] - (trend[index]*scripfactor[index])\n \n #place the second leg order\n val = call_order(index,txtype[index],'ORDER_TYPE_LIMIT','PRODUCT_MIS')\n if(val != None):\n if(variety != \"VARIETY_CO\"):\n print (\"Limit order has been placed.Place second leg order!\")\n return(on_order_update(None,None))\n else: # call_order failed \n print (\"new order placement failed for %s\" %symbol[index])\n return (None)\n \n elif(countpndg[index] != 0): \n return(place_second_leg(index))\n\n# Check if any order is not confirmed\ndef check_confirm_wait(index):\n global confirm_wait_t\n global confirm_wait\n\n # if order is placed but confirmation hasnt come,\n # check update order and return \n if(confirm_wait[index]):\n if(confirm_wait_t[index] >= 20):\n for i in confirm_wait[index]:\n confirm_wait[index].remove(0)\n confirm_wait_t[index] = 0\n\n confirm_wait_t[index] = confirm_wait_t[index] +1\n \n # set total gains to 0 as PL is calculated \n # afresh everytime on_order_update is called \n val = on_order_update(None,None)\n\n if(confirm_wait[index]):\n logging.info(\"Order is in waiting queue.cant place a new order\") \n return (None)\n\n return(1)\n\ndef cancel_or_exit(lis,index):\n global confirm_wait_t\n global confirm_wait\n global countcmpt\n\n for i in lis:\n val = None\n if(countcmpt[index] == 0) :\n val = cancel(i)\n elif(countcmpt[index] != 0) :\n val = end_order(i)\n confirm_wait[index].append(val)\n confirm_wait_t[index] = 1\n return(0)\n\n# Check if end time is reached.if so,signal to close the trade \ndef time_check (index):\n global confirm_wait_t\n global confirm_wait\n global countcmpt\n global countpndg\n global closetime\n\n if(not check_confirm_wait(index)):\n return (None)\n\n if((now.strftime('%H:%M:%S') >= closetime) or (order_limit[index] == 0)):\n if((countcmpt[index] == 0) and (countpndg[index] == 0)):\n logging.info (\"Trading Time limit reached.No new trade! Exiting\")\n return (None)\n elif(countpndg[index] != 0) :\n if(now.strftime('%H:%M:%S') >= closetime) :\n # if timelimit is hit ,cancel or exit\n cancel_or_exit(open_ordr,index)\n return (None)\n return (1)\n\n# modify the order,moving towards ltp\ndef adjust_open_order(index,high,low):\n global open_ordr\n\n end = [it for it, i in open_ordr[index] if ((i[2] - ltp[index])*i[4] <0)]\n cancel_or_exit(end,index)\n \n # add logic to adjust sl based on realtime data\n\n # adjust trigger to Rs.500 diff with ltp\n adjust = [it for it, i in open_ordr[index] if ((i[2] - ltp[index])*i[4] > scripfactor[index])]\n trigger = ltp[index]- i[4]* scripfactor[index]\n for lis in adjust:\n modify(lis,trigger+i[4]*0.2*scripfactor[index],trigger,None)\n \n return(0)\n\n\n# The strategy is to set stoploss equivalent to close price\n# of last five minute band and exit if target is hit\ndef strategy5minband (index,end):\n global countcmpt\n global countpndg\n global totalgains \n # if time limit is hit,Either cancel,exit order or do nothing\n if(not timecheck(index)):\n return None\n \n # 9 conditions here : countcmpt =[0,1,-1] and countpndg= [0,1,-1] \n #if((countpndg[index] == 0) or (abs(countcmpt[index]) - abs(countpndg[index]) != 0)) :\n # under this case , only new orders are placed.no other possibility\n # 1. get trend for 30 mins and one hour\n # 2. place order at prev 15min open and second leg at 15min close\n # 3. Update the order list\n #if(countcmpt[index] == 0) :\n # New First leg order has to be placed \n #elif (countcmpt[index] != 0) :\n # New second leg order has to be placed\n # elif is not used as pending orders went in above case(abs()) are to be processed \n if(countpndg[index] != 0) :\n adjust_open_order(index)\n \n return(on_order_update(None,None))\n\ndef endscripTrade(index):\n global counternrml\n global countermis\n global ltp\n global scrip\n\n if(counternrml[index]%2 ==1):\n logging.info(\"**********************************\")\n logging.info(\"ORDER: closing {} NRML SELL for {}\".format(scrip[index],ltp[index]))\n logging.info(\"**********************************\")\n return(call_order(index,'SELL','ORDER_TYPE_MARKET','PRODUCT_NRML'))\n if(countermis[index]%2 ==1):\n logging.info(\"**********************************\")\n logging.info(\"ORDER: closing {} MIS BUY for {}\".format(scrip[index],ltp[index]))\n logging.info(\"**********************************\")\n return(call_order(index,'BUY','ORDER_TYPE_MARKET','PRODUCT_MIS'))\n\ndef netpl_reached(index):\n global totalgains\n global counternrml\n global countermis\n global dayloss\n global moneyfactor\n global ltp\n global refband\n global bookedprofit\n\n netpl = 0.0\n \n #print(scrip[index],\"band start:\" ,refband[index][4],\" band end: \",refband[index][7])\n print(\"nrml count =\",counternrml[index],\"mis count = \",countermis[index])\n if((counternrml[index] + countermis[index])%2 == 0):\n print(\"even count\")\n netpl = totalgains[index]\n elif((counternrml[index])%2 != 0):\n print(\"nrml on\")\n print(totalgains[index])\n print(ltp[index])\n print(netpl)\n netpl = totalgains[index] + ltp[index]\n elif((countermis[index])%2 != 0):\n print(\"mis on\")\n netpl = totalgains[index] - ltp[index]\n\n netpl = float(\"{:.2f}\".format(netpl))\n pl = scrip[index]+\":P/L for the day(in Rs.):\"+str(netpl * moneyfactor[index])\n print(\"netpl:\",netpl)\n printandlog(pl)\n \n logging.info(\"gains: {} day loss {} dayprofit {} netpl {}\".format(totalgains[index],dayloss[index],dayprofit[index],netpl))\n printandlog(\"**********************************\")\n print(\"Booked Profit:\",bookedprofit[index])\n print(\"dayloss:\",dayloss[index]) \n if((netpl - bookedprofit[index]) > (-1*(dayloss[index]/2))):\n string = str(scrip[index])+\": Step target reached.booking the profit !!\"\n printandlog(string)\n cancel_or_exit(open_ordr,index)\n bookedprofit[index] = netpl\n on_order_update(None,None)\n orderdecision(index)\n return(0)\n\n if((netpl-bookedprofit[index]) < dayloss[index]):\n string = str(scrip[index])+\": Maximum Loss for the scrip has been reached.Exiting this scrip!!\"\n printandlog(string)\n scripactive[index] = '0'\n return(1)\n\n if(netpl > dayprofit[index]): \n string = str(scrip[index])+\": Maximum profit for the scrip has been reached.Exiting this scrip!!\"\n printandlog(string)\n scripactive[index] = '0'\n return(1)\n return(0)\n\ndef orderdecision(index):\n global refband\n global totalgains\n global counternrml\n global countermis\n global now \n global ltp\n global closetime\n global candleStart\n global candleEnd\n global smallCandle\n global scripfactor\n \n if(now.strftime('%H:%M:%S') >= closetime):\n logging.info (\"Trading Time limit reached.Exit after all orders are closed!\")\n return(endscripTrade(index))\n\n if(refband[index][6] != 0.0):\n logging.info(\"candleEnd {} index {} refband{} close :{}\".format(candleEnd[index],index,refband[index],refband[index][7]))\n candleEnd[index] = refband[index][7]\n if(smallCandle[index] != 1):\n candleStart[index] = refband[index][4]\n else:\n logging.info(\"{}: last candle not updated\".format(scrip[index]))\n return(0)\n \n string = \"Open price:\",str(candleStart[index]),\" Close price:\",str(candleEnd[index])\n printandlog(string)\n if(candleEnd[index]>candleStart[index]):\n if(candleEnd[index]-candleStart[index]candleEnd[index]):\n if(candleStart[index]-candleEnd[index]< scripfactor[index]):\n smallCandle[index] = 1\n return(0)\n else:\n smallCandle[index] = 0\n\n if(countermis[index]%2 ==0):\n # initial condition ,when no order is done\n string = scrip[index] + \"ORDER: MIS Sell.Trend Down!\"\n printandlog(string)\n return(call_order(index,'SELL','ORDER_TYPE_MARKET','PRODUCT_MIS'))\n elif(counternrml[index] %2 ==1):\n # active buy order case\n string = scrip[index] + \"ORDER: NRML Sell to close Buy.Trend reversed Down!\"\n printandlog(string)\n return(call_order(index,'SELL','ORDER_TYPE_MARKET','PRODUCT_NRML'))\n return(0)\n\n# Nonpositional strategy\n#\n# Open: if the closing price of the last candle is greater than \n# the closing price of the previous green candle,go for a buy\n# else if the closing price of the last candle is lesser than the closing price \n# of the previous red candle ,go for sell\n# else if closing price of red candle is below the opening price of previous green\n# or vice versa, go for the respective position without closing the previous call\n#\n# After the above steps,if next candle closes in the same direction as previous ,\n# close the opposite side candle.else wait for the next candle\n# \n# Close : 1 unit increase \n#\n# Starting time : 3rd candle of the day\n# End time : till volatility increses , in the evening\n# \n# Margin Requirement : 1 NRML buy + 1 MIS Sell + losses + brokerage \n# Stop loss : 2 net losses per day\n# Stop Gain : 10 net gains per day\n# least Profit : Loss 18 days , Profit 4 days , Net Profit per month 4 Gains\n# Least Loss : Loss 19 days, Profit 3 days , Net loss per month 8 Losses\n# Maximum loss : Loss 22 days..44 Losses\n#\n# Call this function once every 15 minutes from main\ndef strategyNonPositional():\n global now\n global tickLength\n \n out = 0\n\n func_name = sys._getframe().f_code.co_name\n for index in range(0,tickLength):\n out = 1\n logging.info(\"***********************************\")\n if(scripactive[index] !='1'):\n string = scrip[index]+\" not active\"\n printandlog(string)\n continue\n \n if(not check_confirm_wait(index)):\n string = scrip[index]+\" Order Pending\"\n printandlog(string)\n continue\n \n if(refband[index][4] == 0.0 or refband[index][7] == 0.0):\n string = scrip[index]+\" LTP didn't initiate\"\n printandlog(string)\n continue\n\n if(refband[index][4] == refband[index][7]):\n string = scrip[index]+\" LTP didn't change\"\n printandlog(string)\n continue\n else:\n logging.info(\"{} open price:{} close price:{}\".format(scrip[index],refband[index][4],refband[index][7]))\n \n #if (netpl_reached(index)):\n # print(scrip[index],\" Maximum profit loss for the scrip has been reached.Exiting this scrip!!\")\n # logging.info(\"{} Maximum profit loss for the scrip has been reached.Exiting this scrip!!\".format(scrip[index]))\n # scripactive[index] = '0'\n # call_scrip()\n # if(endscripTrade(index)):\n # continue\n \n out = out + orderdecision(index)\n logging.info(\"***********************************\")\n\n return(out)\n\ndef parse (data):\n global txtype\n global trigger\n global index \n\n call_request = data.split(\"*\")\n if(call_request[0] == \"S\"):\n txtype = \"SELL\"\n elif(call_request[0] == \"B\"):\n txtype = \"BUY\"\n else:\n print (\"Error: Invalid Request..Neither BUY nor SELL\")\n return None\n \n trigger = str(call_request[1]) \n call_order(index,txtype,'ORDER_TYPE_MARKET','PRODUCT_MIS')\n conn.sendall(\"transaction_type=\"+txtype+\",trigger=\"+trigger)\n return 1\n\ndef call_scrip():\n global tickLength\n global scrip\n \n tickLength = 0\n logging.info(\"Init Script\")\n scrip[0] = 'NATURALGAS'\n tickLength = tickLength +1\n #scrip[1] = 'SILVERM'\n #tickLength = tickLength +1\n #scrip[2] = 'GOLDM'\n #tickLength = tickLength +1\n scrip_init() \n\ndef ctrlchandler(signum, frame):\n global tickLength\n global counternrml\n global countermis\n result = 1\n\n printandlog (\"Ctrl+c from user.Exit after all orders are closed!\")\n \n for index in range(0,tickLength):\n endscripTrade(index)\n sys.exit()\n\ndef main ():\n global conn\n global trade_flag\n global now\n global logger\n global debug_flag\n global mincount \n global tickLength\n global tickFlag\n global testmode\n global fifteenflag\n global closetime\n global xmlfile\n global refinterval\n global scrip\n global scripactive\n\n now = datetime.now()\n LOG_FILENAME = datetime.now().strftime('logs/logfile_%H%M%S_%d%m%Y.log')\n logger = logging.getLogger('my-logger')\n logger.propagate = True\n if(debug_flag):\n logging.basicConfig(filename=LOG_FILENAME,level=logging.INFO) \n \n signal.signal(signal.SIGINT, ctrlchandler)\n # wait till time hits multiples of 15 minutes\n printandlog(\"In main function\") \n call_scrip()\n printandlog(\"starting script run\")\n \n if(testmode == 0):\n xmlfile = 'accounts.xml'\n kite_init()\n ticker_init()\n \n # reconcilliation, incase of a restart.else, this will have no effect \n on_order_update(None,None)\n \n else : # test mode operation\n xmlfile = 'offlineaccounts.xml'\n printandlog(\"Getting LTP online\")\n get_ltp_online()\n #ltp_simulate()\n\n printandlog(\"waiting for tickFlag\")\n while(tickFlag != 1):\n time.sleep(1)\n \n\n printandlog(\"Reading credentials\")\n read_credentials()\n \n now = datetime.now()\n while (now.minute%refinterval):\n now = datetime.now()\n printandlog(\"Getting OHLC\")\n find_ohlc()\n minCount = 0\n \n index = 0\n logging.info(\"Getting on order flag\")\n while (1):\n if(tickFlag):\n update_candle()\n tickFlag = 0\n\n now = datetime.now()\n if(now.strftime('%H:%M:%S') >= closetime):\n end_trade = True\n logging.info (\"Trading Time limit reached.Exit after all orders are closed!\")\n \n if(fifteenflag and check_internet()):\n printandlog(\"***********************************\")\n print(\"Time: \",now.strftime('%H:%M:%S'))\n logging.info(\"Time: {}\".format(now.strftime('%H:%M:%S')))\n #if(strategyNonPositional()):\n # on_order_update(None,None)\n \n strategyNonPositional()\n for index in range(0,tickLength):\n if(netpl_reached(index)):\n call_scrip()\n if(endscripTrade(index)):\n continue\n on_order_update(None,None)\n\n printandlog(\"***********************************\")\n fifteenflag = 0\n\n #socket_init()\n \n #s.listen(1)\n #conn, addr = s.accept()\n\n #while True:\n # try:\n # data = conn.recv(1024)\n # if not data: \n # print (\"No Data.\")\n # #break\n # conn.close()\n # else:\n # print('Connected by', addr)\n # print (\"Requested: \"+data)\n # parse(data)\n #conn.sendall(\"Reply from Server: welcome!\")\n # except socket.error:\n # print (\"Socket Error Occured.\")\n # break\n \n #conn.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"littleadam/optiontrading","sub_path":"commodity.py","file_name":"commodity.py","file_ext":"py","file_size_in_byte":53217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"43758645346","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport featuretools as ft\n\n\nfrom scipy import stats\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# from sklearn.linear_model import Ridge\n# from sklearn.model_selection import KFold, cross_val_score\n# from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n# from sklearn.model_selection import GridSearchCV\n# from sklearn.model_selection import RandomizedSearchCV\n\ntest = pd.read_csv(\"D:\\\\SalesPred\\\\Input\\\\test_data\\\\test_t02dQwI.csv\")\ntrain = pd.read_csv(\"D:\\\\SalesPred\\\\Input\\\\train_data\\\\train_kOBLwZA.csv\")\n\nprint(test.shape, train.shape)\n\ndef concat(X,Y):\n \"\"\"\n X: test datset.\n Y: train datset.\n return: None\n \"\"\"\n df=pd.concat([X,Y], ignore_index=True)\n return df\ndf = concat(train, test)\n# print(df.shape)\n\ndef find_uniques(df):\n return df.apply(lambda x: len(x.unique()))\nfind_uniques(df)\n# print(find_uniques(df))\n\n#df.isnull().sum()\n#print(df.isnull().sum())\n \n# find_uniques(df)\n# print(find_uniques(df))\n\ndef frequency_each_item(df,col):\n \"\"\"\n This function prints uniques value of columns.\n df: dataframe\n col: column name.\n return: None\n \"\"\"\n for i in col:\n print(\"Frequency of each category for: \", i)\n print(df[i].value_counts())\n print(\"-\"*100)\ncol_name = [\"Item_Fat_Content\", \"Item_Type\", \"Outlet_Size\", \"Outlet_Location_Type\", \"Outlet_Type\"]\nfrequency_each_item(df, col_name)\n# print(frequency_each_item)\n\n# function for replacing \"low fat\", \"LF\", \"Reg\" into only 2 category \n# which will be \"Low Fat\" & \"RFegular\".\n\nname_dict = {\"low fat\" : 'Low Fat',\n \"LF\" : \"Low Fat\", \n \"reg\" : \"Regular\"}\n\ndef combine_name(df, col, values):\n return df[col].replace(values, inplace= True)\n\ncombine_name(df, \"Item_Fat_Content\", name_dict)\n# print(df[\"Item_Fat_Content\"].value_counts())\n\n# name_dict = {\"reg\" : \"Regular\",\n# \"LF\" : \"Low Fat\",\n# \"low fat\" : \"Low Fat\"}\n# def merge_str(df, col, value):\n# return df[col].replace(value, inplace=True)\n\n# merge_str(df, \"Item_Fat_Content\", name_dict) \n# print(df[\"Item_Fat_Content\"].value_counts())\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"AmiTamakuwala/BigMart_LinearReg_top","sub_path":"src/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15506772397","text":"from common import common\nimport logging\nimport argparse\nimport json\n\nclass sql_executor():\n \n def __init__(self,connection_type=None,config_section=None):\n \n #postgres configuration/initialization\n if connection_type == 'postgres':\n #trying import of modules\n try:\n import psycopg2 as connector\n except:\n logging.error('Failed to import psycopg2 - please check installed packages.')\n raise Exception('Failed to import psycopg2 - please check installed packages.')\n \n #trying fetching config and creating connection\n try:\n connection_info = common.fetch_config(section=config_section,format_type='postgres') \n self.connection = connector.connect(connection_info)\n self.cursor = self.connection.cursor()\n except Exception as ex:\n logging.error(f'{ex}')\n raise ex \n \n #mssql configuration/initialization\n elif connection_type == 'mssql':\n #trying import of modules\n try:\n import pyodbc as connector\n except:\n logging.error('Failed to import pyodbc - please check installed packages.')\n raise Exception('Failed to import pyodbc - please check installed packages.')\n \n #trying fetching config and creation connection\n try:\n connection_info = common.fetch_config(section=config_section,format_type='sql_server')\n self.connection = connector.connect(connection_info)\n self.cursor = self.connection.cursor()\n except Exception as ex:\n logging.error(f'{ex}')\n raise ex \n \n #mysql configuration/initialization\n elif connection_type == 'mysql':\n #trying import of modules\n try: \n import mysql.connector as connector\n except: \n logging.error('Failed to import mysql.connector - please check installed packages.')\n raise Exception('Failed to import mysql.connector - please check installed packages.') \n \n #trying fetching config and creation connection\n try:\n connection_info = common.fetch_config(section=config_section)\n self.connection = connector.connect(**connection_info)\n self.cursor = self.connection.cursor()\n except Exception as ex:\n logging.error(f'{ex}')\n raise ex\n \n #other variables\n self.param_identifier = ':' \n \n def setup_connection_and_cursor(self,connection):\n self.connection = connection\n self.cursor = connection.cursor()\n \n def execute_sql_statement(self,sql_statement,parameters=None):\n if parameters != None:\n sql_statement = common.param_query(sql_statement,parameters,param_identifier=self.param_identifier)\n try:\n logging.info(f'Executing sql statement: {sql_statement}')\n self.cursor.execute(sql_statement)\n try:\n rows_affected = self.cursor.rowcount\n except:\n rows_affected = None\n \n if sql_statement.strip().lower().startswith('insert into'):\n logging.info(f'Insert statement succeeded. Rows affected: {rows_affected}')\n \n if sql_statement.strip().lower().startswith('delete from'):\n logging.info(f'Delete statement succeeded. Rows affected: {rows_affected}')\n \n if sql_statement.strip().lower().startswith('create'):\n logging.info('Create statement succeeded.')\n \n if sql_statement.strip().lower().startswith('drop table'):\n logging.info('Drop table statement succeeded.')\n \n if sql_statement.strip().lower().startswith('truncate'):\n logging.info('Truncate statement succeeded.')\n \n if sql_statement.strip().lower().startswith('alter'):\n logging.info('Alter statement succeeded.')\n \n except Exception as ex:\n raise ex\n \n def execute_sql_file(self,file_name,parameters=None):\n queries = common.split_query(file_name)\n for query in queries:\n try:\n logging.info(f'SQL file: {file_name} executing statement: {query[0]} starting line: {query[2]}')\n self.execute_sql_statement(query[1],parameters=parameters)\n except Exception as ex:\n self.connection.rollback()\n logging.error(f'SQL file: {file_name} failed on line: {query[2]} with error: {str(ex)}')\n raise ex\n self.connection.commit()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-f','--file_name',help='String of location to file.',required=True)\n parser.add_argument('-c','--connection_type',help='Connection type.',required=True)\n parser.add_argument('-cf','--config_section',help='Config section.',required=True)\n parser.add_argument('-param','--parameters',help='SQL script parameters - will replace in sql script.',type=json.loads,required=False)\n \n parse_args, empty = parser.parse_known_args()\n args = vars(parse_args) \n \n file_name = args['file_name']\n connection_type = args['connection_type']\n config_section = args['config_section']\n parameters = args['parameters'] or None \n \n connections_supported = ('postgres','mssql','mysql') \n if connection_type not in connections_supported:\n raise Exception(f'Connection type not supported. Try again. Currently supported connection types: {\", \".join(connections_supported)}')\n \n executor = sql_executor(connection_type=connection_type,config_section=config_section)\n executor.execute_sql_file(file_name=file_name,parameters=parameters)\n \n ","repo_name":"oddkid80/jamesmford90","sub_path":"scripts/sql/execute_sql.py","file_name":"execute_sql.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17994341973","text":"#!/bin/python3\nfrom datetime import datetime as dt\nimport sys\n\ndef time_delta(t1, t2):\n fmt = '%a %d %b %Y %H:%M:%S %z'\n return int(abs((dt.strptime(t1, fmt)-dt.strptime(t2, fmt)).total_seconds()))\n \nif __name__ == \"__main__\":\n t = int(input().strip())\n for a0 in range(t):\n t1 = input().strip()\n t2 = input().strip()\n delta = time_delta(t1, t2)\n print(delta)\n","repo_name":"navjindervirdee/hackerrank","sub_path":"Python/Date and Time/Medium/Time Delta/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"43104897483","text":"import openai\nimport os\nimport sys\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\nopenai.api_key = os.environ[\"api_keyopenai\"]\n\ndata = [\n {\"role\": \"user\", \"content\": \"{}\".format(sys.argv[1])}]\n\ndatat = \"\"\n\n\nres = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": 'If I need to search for information on the internet, I will type \"&\" followed by what needs to be searched, and then send it to the user. After receiving the answer, I will provide a response based on that information'},\n {\"role\": \"user\", \"content\": \"cde in abc\"},\n {\"role\": \"assistant\", \"content\": \"&cde in abc\"},\n {\"role\": \"user\", \"content\": \"cde1:10\"},\n {\"role\": \"assistant\", \"content\": \"cde1: 10\"},\n\n] + data\n)\n\n\nprint(res)\n#print(ress)\nprint(res.choices[0].message.content.encode('utf-8').decode('utf-8') + \"\\n\")\n#print(ress.choices[0].message.content.encode('utf-8').decode('utf-8'))\n\n#추가 예정 목록\n#autogpt 연동\n#카톡연동","repo_name":"sestsn2403/kaot-gpt","sub_path":"gpt.py","file_name":"gpt.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3193536876","text":"import cv2\r\nimport numpy as np\r\n\r\ndef nothing(x):\r\n pass\r\ndef createTrackbar():\r\n cv2.namedWindow(\"thresholding\")\r\n cv2.createTrackbar(\"Hue min\",\"thresholding\",0,179,nothing)\r\n cv2.createTrackbar(\"Hue max\",\"thresholding\",179,179,nothing)\r\n cv2.createTrackbar(\"sat min\",\"thresholding\",0,255,nothing)\r\n cv2.createTrackbar(\"sat max\",\"thresholding\",255,255,nothing)\r\n cv2.createTrackbar(\"val min\",\"thresholding\",0,255,nothing)\r\n cv2.createTrackbar(\"val max\",\"thresholding\",255,255,nothing)\r\n\r\n\r\nimg =cv2.imread('Images/hand.jpg')\r\ncv2.imshow(\"original image\",img)\r\ngray_scale =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n#cv2.imshow(\"gray sacle image\",gray_scale)\r\n\r\ncreateTrackbar()\r\n\r\nwhile True:\r\n #T=cv2.getTrackbarPos(\"T\",\"thresholding\")\r\n #print(T)\r\n #img_hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n #lower=np.array([0,0,0])q\r\n #upper=np.array([180,255,255])\r\n #thresh_img=cv2.inRange(img,lower,upper)gra\r\n\r\n #_,thresh_img=cv2.threshold(gray_scale,T,255,cv2.THRESH_BINARY)\r\n #cv2.imshow(\"thresh image\",thresh_img)\r\n img_hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n huemin=cv2.getTrackbarPos(\"Hue min\",\"thresholding\")\r\n huemax=cv2.getTrackbarPos(\"Hue max\",\"thresholding\")\r\n satmin=cv2.getTrackbarPos(\"sat min\",\"thresholding\")\r\n satmax=cv2.getTrackbarPos(\"sat max\",\"thresholding\")\r\n valmin=cv2.getTrackbarPos(\"val min\",\"thresholding\")\r\n valmax=cv2.getTrackbarPos(\"val max\",\"thresholding\")\r\n lower=np.array([huemin,satmin,valmin])\r\n upper=np.array([huemax,satmax,valmax])\r\n print(lower,upper)\r\n thresh_img=cv2.inRange(img_hsv,lower,upper)\r\n cv2.imshow(\"thresh image\",thresh_img)\r\n imgCopy=img.copy()\r\n contours,_=cv2.findContours(thresh_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\r\n cv2.drawContours(imgCopy,contours,-1,(0,0,255),2)\r\n\r\n cv2.imshow(\"original image\",imgCopy)\r\n\r\n key=cv2.waitKey(1)\r\n if(key==ord('q')):\r\n break\r\ncv2.destroyAllWindows()\r\ncv2.waitKey(0)","repo_name":"Sunny70333/my1stproject","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3937897612","text":"from os.path import join\nimport json\nfrom json import encoder\n\nfrom sklearn.metrics import fbeta_score\nimport numpy as np\nimport matplotlib as mpl\n# For headless environments\nmpl.use('Agg') # NOQA\nimport matplotlib.pyplot as plt\n\nfrom rastervision.common.settings import VALIDATION\nfrom rastervision.common.utils import plot_img_row, _makedirs\n\nfrom rastervision.tagging.data.planet_kaggle import TagStore\n\nVALIDATION_EVAL = 'validation_eval'\n\nencoder.FLOAT_REPR = lambda o: format(o, '.5f')\n\n\nclass Scores():\n \"\"\"A set of scores for the performance of a model on a dataset.\"\"\"\n def __init__(self, y_true, y_pred, dataset, active_tags):\n self.f2_samples = fbeta_score(\n y_true, y_pred, beta=2, average='samples')\n self.f2_labels = fbeta_score(y_true, y_pred, beta=2,\n average='macro')\n f2_subscores = fbeta_score(y_true, y_pred, beta=2, average=None)\n\n self.atmos_scores, self.common_scores, self.rare_scores = {}, {}, {}\n for tag_ind, tag in enumerate(active_tags):\n f2_subscore = f2_subscores[tag_ind]\n if tag in dataset.atmos_tags:\n self.atmos_scores[tag] = f2_subscore\n if tag in dataset.common_tags:\n self.common_scores[tag] = f2_subscore\n if tag in dataset.rare_tags:\n self.rare_scores[tag] = f2_subscore\n\n def to_json(self):\n return json.dumps(self.__dict__, sort_keys=True, indent=4)\n\n def save(self, path):\n scores_json = self.to_json()\n with open(path, 'w') as scores_file:\n scores_file.write(scores_json)\n\n\ndef plot_prediction(generator, all_x, y_true, y_pred,\n file_path):\n dataset = generator.dataset\n fig = plt.figure()\n\n nb_subplot_cols = 3 + len(generator.active_input_inds)\n grid_spec = mpl.gridspec.GridSpec(1, nb_subplot_cols)\n\n all_x = generator.calibrate_image(all_x)\n rgb_input_im = all_x[:, :, dataset.rgb_inds]\n imgs = [rgb_input_im]\n titles = ['RGB']\n\n if generator.dataset.nb_channels > 3:\n ir_im = all_x[:, :, dataset.ir_ind]\n imgs.append(ir_im)\n titles.append('IR')\n\n ndvi_im = all_x[:, :, dataset.ndvi_ind]\n imgs.append(ndvi_im)\n titles.append('NDVI')\n\n plot_img_row(fig, grid_spec, 0, imgs, titles)\n\n add_pred_tags, remove_pred_tags = \\\n generator.tag_store.get_tag_diff(y_true, y_pred)\n y_true_strs = sorted(generator.tag_store.binary_to_strs(y_true))\n\n y_true_strs = ', '.join(y_true_strs)\n add_pred_tags = ', '.join(add_pred_tags)\n remove_pred_tags = ', '.join(remove_pred_tags)\n tag_info = 'ground truth: {}\\nfalse +: {}\\nfalse -: {}'.format(\n y_true_strs, add_pred_tags, remove_pred_tags)\n fig.text(0.15, 0.35, tag_info, fontsize=5)\n\n plt.savefig(file_path, bbox_inches='tight', format='png', dpi=300)\n plt.close(fig)\n\n\ndef plot_predictions(run_path, options, generator):\n validation_pred_path = join(run_path, 'validation_preds.csv')\n\n validation_plot_path = join(run_path, 'validation_plots')\n _makedirs(validation_plot_path)\n\n validation_pred_tag_store = TagStore(\n tags_path=validation_pred_path, active_tags=options.active_tags)\n split_gen = generator.make_split_generator(\n VALIDATION, target_size=None,\n batch_size=options.batch_size, shuffle=False, augment_methods=None,\n normalize=True, only_xy=False)\n\n sample_count = 0\n plot_sample_count = 0\n y_trues = []\n y_preds = []\n for batch_ind, batch in enumerate(split_gen):\n for sample_ind in range(batch.x.shape[0]):\n file_ind = batch.file_inds[sample_ind]\n all_x = batch.all_x[sample_ind, :, :, :]\n\n y_true = generator.tag_store.get_tag_array([file_ind])\n y_trues.append(y_true)\n y_pred = validation_pred_tag_store.get_tag_array([file_ind])\n y_preds.append(y_pred)\n\n if (options.nb_eval_plot_samples is None or\n plot_sample_count < options.nb_eval_plot_samples):\n is_mistake = not np.array_equal(y_true, y_pred)\n if is_mistake:\n plot_sample_count += 1\n plot_path = join(\n validation_plot_path, '{}_debug.png'.format(file_ind))\n plot_prediction(\n generator, all_x, y_true[0, :], y_pred[0, :],\n plot_path)\n\n sample_count += 1\n\n if (options.nb_eval_samples is not None and\n sample_count >= options.nb_eval_samples):\n break\n\n if (options.nb_eval_samples is not None and\n sample_count >= options.nb_eval_samples):\n break\n\n y_true = np.concatenate(y_trues, axis=0)\n y_pred = np.concatenate(y_preds, axis=0)\n if options.nb_eval_samples is not None:\n y_true = y_true[0:options.nb_eval_samples, :]\n y_pred = y_pred[0:options.nb_eval_samples, :]\n\n return y_true, y_pred\n\n\ndef validation_eval(run_path, options, generator):\n y_true, y_pred = plot_predictions(run_path, options, generator)\n\n scores = Scores(\n y_true, y_pred, generator.dataset, generator.tag_store.active_tags)\n scores_path = join(run_path, 'scores.json')\n scores.save(scores_path)\n","repo_name":"yoninachmany/raster-vision-deepglobe-semseg","sub_path":"src/rastervision/tagging/tasks/validation_eval.py","file_name":"validation_eval.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"47"} +{"seq_id":"16820312993","text":"import tkinter as tk\nimport tkinter.ttk as ttk\n\n\nclass MultipleAutocompleteEntry(ttk.Entry):\n \"\"\"\n Subclass of :class:`ttk.Entry` that features autocompletion.\n\n To enable autocompletion use :meth:`set_completion_list` to define\n a list of possible strings to hit.\n To cycle through hits use down and up arrow keys.\n \"\"\"\n\n def __init__(self, master=None, completevalues=None, **kwargs):\n \"\"\"\n Create an AutocompleteEntry.\n\n :param master: master widget\n :type master: widget\n :param completevalues: autocompletion values\n :type completevalues: list\n :param kwargs: keyword arguments passed to the :class:`ttk.Entry` initializer\n \"\"\"\n ttk.Entry.__init__(self, master, **kwargs)\n self._completion_list = completevalues\n self.set_completion_list(completevalues)\n self.position = 0\n\n def set_completion_list(self, completion_list):\n \"\"\"\n Set a new auto completion list\n\n :param completion_list: completion values\n :type completion_list: list\n \"\"\"\n self._completion_list = sorted(completion_list, key=str.lower) # Work with a sorted list\n self.position = 0\n self.bind('', self.handle_keyrelease)\n\n def autocomplete(self, delta=0):\n \"\"\"\n Autocomplete the Entry.\n\n :param delta: 0, 1 or -1: how to cycle through possible hits\n :type delta: int\n \"\"\"\n self.position = len(self.get())\n entry_value = self.get()\n separator_index = [i for i in range(len(entry_value)) if entry_value.startswith(',', i)]\n for element in self._completion_list:\n if separator_index:\n last_value = self.get()[separator_index[-1]+1:]\n if str(element).lower().startswith(str(last_value.strip()).lower()):\n if last_value.startswith(' '):\n self.delete(separator_index[-1] + 1, tk.END)\n self.insert(separator_index[-1] + 1, ' ')\n self.insert(separator_index[-1] + 2, element)\n self.select_range(self.position, tk.END)\n else:\n self.delete(separator_index[-1] + 1, tk.END)\n self.insert(separator_index[-1] + 1, element)\n self.select_range(self.position, tk.END)\n else:\n if str(element).lower().startswith(str(self.get()).lower()):\n self.delete(0, tk.END)\n self.insert(0, element)\n self.select_range(self.position, tk.END)\n break\n\n def handle_keyrelease(self, event):\n \"\"\"\n Event handler for the keyrelease event on this widget.\n\n :param event: Tkinter event\n \"\"\"\n if event.keysym == \"Return\":\n self.handle_return(None)\n return\n if len(event.keysym) == 1 or len(event.keysym) == 2:\n self.autocomplete()\n\n def handle_return(self, event):\n \"\"\"\n Function to bind to the Enter/Return key so if Enter is pressed the selection is cleared.\n\n :param event: Tkinter event\n \"\"\"\n self.icursor(tk.END)\n self.selection_clear()\n\n def config(self, **kwargs):\n \"\"\"Alias for configure\"\"\"\n self.configure(**kwargs)\n\n def configure(self, **kwargs):\n \"\"\"Configure widget specific keyword arguments in addition to :class:`ttk.Entry` keyword arguments.\"\"\"\n if \"completevalues\" in kwargs:\n self.set_completion_list(kwargs.pop(\"completevalues\"))\n return ttk.Entry.configure(self, **kwargs)\n\n def cget(self, key):\n \"\"\"Return value for widget specific keyword arguments\"\"\"\n if key == \"completevalues\":\n return self._completion_list\n return ttk.Entry.cget(self, key)\n\n def keys(self):\n \"\"\"Return a list of all resource names of this widget.\"\"\"\n keys = ttk.Entry.keys(self)\n keys.append(\"completevalues\")\n return keys\n\n def __setitem__(self, key, value):\n self.configure(**{key: value})\n\n def __getitem__(self, item):\n return self.cget(item)\n","repo_name":"alex-pnzv/contract-generator","sub_path":"Utills/multiple_autocomplete_entry.py","file_name":"multiple_autocomplete_entry.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72396660944","text":"import unittest\nfrom toyWaterPitcherProblem import Solver, read_file\n\nclass TestCalculator(unittest.TestCase):\n def test01(self):\n \n print(\"Test01\")\n capacities, target = read_file(\"project1/input.txt\")\n solver = Solver(capacities,target)\n result = solver.A_star()\n self.assertEqual(result,19)\n print('****************')\n \n \n\n def test02(self):\n print()\n print()\n print(\"Test02\")\n capacities, target = read_file(\"project1/input1.txt\")\n solver = Solver(capacities,target)\n result = solver.A_star()\n self.assertEqual(result,7)\n print('****************')\n \n\n def test03(self):\n print()\n print()\n print(\"Test03\")\n capacities, target = read_file(\"project1/input2.txt\")\n solver = Solver(capacities,target)\n result = solver.A_star()\n self.assertEqual(result,-1)\n print('****************')\n \n\n def test04(self):\n print()\n print()\n print(\"Test04\")\n capacities, target = read_file(\"project1/input3.txt\")\n solver = Solver(capacities,target)\n result = solver.A_star()\n self.assertEqual(result,-1)\n print('****************')\n \n\n def test05(self):\n print()\n print()\n print(\"Test05\")\n capacities, target = read_file(\"project1/input4.txt\")\n solver = Solver(capacities,target)\n result = solver.A_star()\n self.assertEqual(result,36)\n print('****************')\n \n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kizzy8848/GWU-Spring-2023-AI-Project","sub_path":"project1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"183832996","text":"# Game options(Shouldn't chnage)\nTITLE=\"PlatForm\"\nWIDTH = 640\nHEIGHT = 480\nFRAMES = 60\n\n\n# Physic options\nacceleration = 0.75\ndeceleration = -0.12 # This is a constant that slows the acceleration\n#(deceleration)\n\n\n\n\n# Color options(unchangeable)\nwhite = (255,255,255)\nblack = (0,0,0)\nred = (255,0,0)\ngreen = (0,155,0)\nblue = (0,0,255)\nskin = (237, 223, 95)\nbg = (3, 141, 200)\n","repo_name":"gitchristiancode/Platformer","sub_path":"vars.py","file_name":"vars.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74801000143","text":"#!/usr/bin/python3\r\nimport socket\r\nimport sys\r\nimport re\r\n\r\nCRLF = '\\r\\n'\r\n\r\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nclient.connect(('localhost', 80))\r\n\r\ndata = \"login=bee&password=bug&security_level=0&form=submit\"\r\nlength = len(data)\r\n\r\nrequest = f\"POST /login.php HTTP/1.1{CRLF}Host: localhost{CRLF}Content-Type: application/x-www-form-urlencoded{CRLF}Content-Length: {length}{CRLF}{CRLF}{data}\"\r\n\r\nclient.send(request.encode())\r\n\r\nresponse = \"\"\r\nwhile True:\r\n data = client.recv(8192)\r\n if not data:\r\n break\r\n response += data.decode()\r\n\r\nclient.close()\r\n\r\ncookies = re.findall(r'Set-Cookie: (.*?);', response)\r\n\r\ni = 0\r\ncookie = \"\"\r\nfor c in cookies:\r\n if i != 0:\r\n cookie = c + \"; \" + cookie;\r\n i += 1\r\n\r\nprint(cookie)","repo_name":"hfw8271/exploiting_bwapp","sub_path":"bwappLogin.py","file_name":"bwappLogin.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18594502416","text":"import asyncio\nfrom sqlalchemy import Table, Column, Integer, String, ForeignKey\nfrom sqlalchemy import MetaData\n\nfrom aiomysql.sa import create_engine\n\n\nmetadata = MetaData()\n\nusers = Table('users', metadata,\n Column('id', Integer, primary_key=True),\n Column('name',String(20)),\n Column('age',Integer)\n )\n\naddresses = Table('addresses',metadata,\n Column('id', Integer, primary_key=True),\n Column('user_id', None , ForeignKey('users.id'),nullable=False),\n Column('email',String(50),nullable=False)\n )\nloop = asyncio.get_event_loop()\n\n@asyncio.coroutine\ndef go():\n engine = yield from create_engine(minsize=1,\n maxsize=30,\n user='rjain',\n db='learning',\n host='localhost',\n password='pass@123',\n loop=loop)\n\n with (yield from engine) as conn:\n trans = yield from conn.begin()\n try:\n yield from conn.execute(users.insert().values(name='abc',age=33))\n res = yield from conn.execute(users.select())\n\n except Exception:\n yield from trans.rollback()\n else:\n yield from trans.commit()\n\n if res:\n for row in res:\n print(row.id, row.name, row.age)\n print(engine.freesize)\n#asyncio.get_event_loop().run_until_complete(go())\nloop.run_until_complete(go())\n","repo_name":"rajgouravjain/python3-practice","sub_path":"learning/sqlalchemy_learning/mysql-2.py","file_name":"mysql-2.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74219276941","text":"import numpy as np\nimport time\nimport os.path\nimport scipy.misc\nimport tensorflow as tf\nfrom distutils.version import LooseVersion\nfrom glob import glob\nimport warnings \nimport matplotlib.pyplot as plt\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to inference on your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\ndef gen_test_output(sess, logits, keep_prob, image_pl, data_folder, image_shape):\n \"\"\"\n Generate test output using the test images\n :param sess: TF session\n :param logits: TF Tensor for the logits\n :param keep_prob: TF Placeholder for the dropout keep probability\n :param image_pl: TF Placeholder for the image placeholder\n :param data_folder: Path to the folder that contains the image\n :param image_shape: Tuple - Shape of image\n :return: Output for for each test image\n \"\"\"\n image = scipy.misc.imresize(scipy.misc.imread(data_folder), image_shape)\n im_softmax = sess.run([tf.nn.softmax(logits)],{keep_prob: 1.0, image_pl: [image]})\n im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])\n segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)\n mask = np.dot(segmentation, np.array([[0, 255, 0, 127]]))\n mask = scipy.misc.toimage(mask, mode=\"RGBA\")\n street_im = scipy.misc.toimage(image)\n street_im.paste(mask, box=None, mask=mask)\n street_im = np.array(street_im)\n\n return street_im\n\ndef run():\n num_classes = 2\n image_shape = (160, 576)\n data_dir = './data'\n runs_dir = './runs'\n model_dir = './trained_model/'\n output_dir = './runs'\n #tests.test_for_kitti_dataset(data_dir)\n\n with tf.Session() as sess:\n # Download pretrained vgg model\n # helper.maybe_download_pretrained_vgg(data_dir)\n\n data_folder = './sample/um_000000.png' \n\n\n saver = tf.train.import_meta_graph('./trained_model/Semantic_seg_trained.ckpt.meta')\n print(\"Graph imported...\")\n saver.restore(sess,tf.train.latest_checkpoint('./trained_model'))\n print(\"Model restored successfully!\")\n\n # extract tensors, including input tensor and hyperparameter tensor.\n graph = tf.get_default_graph()\n input_image = graph.get_tensor_by_name('image_input:0')\n keep_prob = graph.get_tensor_by_name('keep_prob:0')\n logits = graph.get_tensor_by_name('logits:0') # note that here we call the TENSOR as the result of operation, not the operation itself. Call it by operation_name::x.\n\n print('Inferencing on image')\n last_time = time.time()\n image_output = gen_test_output(sess, logits, keep_prob, input_image, data_folder, image_shape)\n print(\"Inference Time: {} seconds\".format(time.time() - last_time))\n plt.imshow(image_output)\n plt.show()\n \nif __name__ == '__main__':\n run()","repo_name":"faizanalibugti/Semantic-Tensorflow","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4447695655","text":"\n\ndef bin_search(lo, hi):\n \n while (lo<=hi):\n \n mid = (lo + hi) // 2\n print(\"Guess \" + str(count)+ \" : The number you thought was \" + str(mid) )\n count += 1\n\n while enter != 1 and enter != 0 and enter != 0 :\n\n\n enter = int(input(\"Enter 1 if my guess was high, -1 if low, and 0 if correct: \"))\n \n\n \n if enter == 0:\n print(\"Thank you for playing the Guessing Game.\")\n return \n\n elif enter == 1:\n lo = mid + 1\n bin_search(lo, hi)\n \n elif enter == -1:\n hi = mid-1\n bin_search(lo, hi)\n\n else:\n enter = int(input(\"Enter 1 if my guess was high, -1 if low, and 0 if correct: \"))\n \n \n if count > 7:\n print (\"Either you guessed a number out of range or you had an incorrect entry.\")\n return \n \n \n if lo > hi:\n \n print (\"Either you guessed a number out of range or you had an incorrect entry.\")\n return\n\n\n\n\n\n\n\n\n\n\n\ndef main():\n count = 1\n print(\"Think of a number between 1 and 100 inclusive.\")\n print(\"And I will guess what it is in 7 tries or less.\")\n ready = input(\"Are you ready? (y/n): \")\n\n while (ready != 'y' and ready != 'n'):\n ready = input(\"Are you ready? (y/n): \")\n if ready == 'n':\n print(\"Bye\")\n return\n\n elif ready == 'y':\n bin_search(1, 100)\n\n\nmain() \n \n","repo_name":"mehranaman/Intro-to-computing-data-structures-and-algos-","sub_path":"Assignments/GuessingGamerec.py","file_name":"GuessingGamerec.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40498825407","text":"# 2022-04-17\n# 이코테 2회차 그리디 Q5 볼링공 고르기\n\n\"\"\"\n(무게, 번호)로 배열을 만들고 무게 순 정렬하기로 풀까 했으나\n무게별로 몇개의 공이 있는지만 체크해서 고려해도 충분해서 그렇게 했음\n\"\"\"\n\nn, m = map(int, input().split())\n\ndata = list(map(int, input().split()))\n\nballs = [0] * (m + 1)\n\nfor i in data:\n balls[i] += 1\n\nanswer = 0\n\ntotal = sum(balls)\nfor b in balls:\n if b == 0:\n continue\n answer += (b) * (total - b)\n total -= b\n\nprint(answer)\n\n\"\"\"\nTC 1 -> 8\n5 3\n1 3 2 3 2\n\nTC 2 -> 25\n8 5\n1 5 4 3 2 4 5 2\n\"\"\"\n","repo_name":"cheonyeji/algorithm_study","sub_path":"이코테2회/q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31490892343","text":"# wap a program to convert miles to kilometer\n\ndef miles_to_km():\n miles=float(miles_input.get())\n km=round(miles*1.609, 2)\n km_result_label.config(text=f\"{km}\")\n\nfrom tkinter import *\nwindow = Tk()\nwindow.title(\"Miles to Kilometer Converter\")\nwindow.config(padx=20, pady=20)\n\nmiles_input=Entry(width=10)\nmiles_input.grid(row= 0,column= 1)\n# Entry.config(padx=5, pady=5)\n\n\nmiles_label=Label(text=\"Miles\")\nmiles_label.grid(row= 0,column= 2)\n\nis_equal_label=Label(text=\"is equal to\")\nis_equal_label.grid(row= 1,column= 0)\n\nkm_result_label=Label(text=\"0\")\nkm_result_label.grid(row= 1,column=1)\n\nkm_label=Label(text=\"Km\")\nkm_label.grid(row= 1,column= 2)\n\ncalc_button=Button(text=\"Calculate\", command=miles_to_km)\ncalc_button.grid(row= 2,column= 1)\n\n\nwindow.mainloop()","repo_name":"subsangam/100DaysOfCode-Python","sub_path":"Day28/7-mile_to_km.py","file_name":"7-mile_to_km.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74354340941","text":"# https://www.hackerrank.com/challenges/abbr/problem\n#\n# Abbreviation\n#\n# State Transition:\n#\n# f[i,j]: whether it is possible to make A[:i] into B[:j]\n#\n# f[i,j] = f[i-1,j-1] or f[i-1,j] if A[i-1].upper = B[j-1]\n# f[i-1,j] if A[i-1] is lowercase\n# True if j == 0 and i == 0\n# False if i == 0 and j > 0\n#\n\ndef abbrev(A: str, B: str):\n m, n = len(A), len(B)\n dp = [[False] * (n+1) for _ in range(m+1)]\n dp[0][0] = True\n for i in range(1,m+1):\n if A[i-1].islower():\n dp[i][0] = dp[i-1][0]\n for j in range(1,n+1):\n for i in range(1,m+1):\n if A[i-1].islower():\n dp[i][j] = dp[i][j] or dp[i-1][j]\n if A[i-1].upper() == B[j-1]:\n dp[i][j] = dp[i][j] or dp[i-1][j-1]\n return dp[m][n]\n\nif __name__ == \"__main__\":\n for _ in range(int(input().rstrip())):\n A = input().rstrip()\n B = input().rstrip()\n ans = abbrev(A, B)\n if ans:\n print(\"YES\")\n else:\n print(\"NO\")\n","repo_name":"davll/practical-algorithms","sub_path":"HackerRank/dp/abbreviation.py","file_name":"abbreviation.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70333893902","text":"import os\nimport numpy as np\nimport json\nimport sys\nfrom scipy import spatial\nfrom tqdm import tqdm\nimport torch\nimport torch.nn.functional as F\nfrom lib.eval_utils import classification_accuracy, calc_building_point_iou, calc_building_mesh_iou, calc_shape_iou, \\\n calc_part_iou, LABELS, read_obj, calculate_face_area, read_point_cloud_ply\nNUM_POINTS = int(1e5)\n\n\ndef transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index):\n \"\"\"\n Transfer point predictions to triangles and components through avg pooling\n :param vertices: N x 3, numpy.ndarray(float)\n :param faces: M x 3, numpy.ndarray(int)\n :param components: M x 1, numpy.ndarray(int)\n :param points: K x 3, numpy.ndarray(float)\n :param point_feat: K x 31, numpy.ndarray(float)\n :param point_face_index: K x 1, numpy.ndarray(int)\n :return:\n face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int)\n face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int)\n face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float)\n face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float)\n \"\"\"\n\n n_components = len(np.unique(components))\n face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1]))\n face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1]))\n comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1]))\n face_point_index = {}\n\n # Find faces that have no corresponding points\n sampled = set(point_face_index.flatten())\n unsampled = list(set(np.arange(len(faces))) - sampled) # faces with no sample points\n\n # Compute center for unsampled faces\n v0 = vertices[faces[unsampled, 0]]\n v1 = vertices[faces[unsampled, 1]]\n v2 = vertices[faces[unsampled, 2]]\n face_centers = np.array([[v0[:, 0] + v1[:, 0] + v2[:, 0]],\n [v0[:, 1] + v1[:, 1] + v2[:, 1]],\n [v0[:, 2] + v1[:, 2] + v2[:, 2]]]) / 3.0\n face_centers = np.squeeze(face_centers).T\n\n ## Transfer point predictions to triangles\n # Find nearest point and assign its point feature to each unsampled face\n p_tree = spatial.cKDTree(points)\n _, k_nn_idx = p_tree.query(face_centers)\n for idx, face in enumerate(unsampled):\n face_feat_from_tr_avg_pool[face] = point_feat[k_nn_idx[idx]]\n face_point_index[face] = int(k_nn_idx[idx])\n\n # Use avg pooling for sampled faces\n for face in sampled:\n mask = np.squeeze(point_face_index == face)\n face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0)\n face_point_index[face] = mask.nonzero()[0].tolist()\n\n ## Transfer point predictions to components\n for comp_idx in range(comp_feat_avg_pool.shape[0]):\n face_idx = np.squeeze(components == comp_idx).nonzero()[0]\n point_idx = []\n for idx in face_idx:\n try:\n point_idx.extend(face_point_index[int(idx)])\n except:\n point_idx.append(face_point_index[int(idx)])\n comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0)\n face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx]\n\n face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:,\n np.newaxis] + 1 # we exclude undetermined (label 0) during training\n face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1\n\n return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\\n face_feat_from_comp_avg_pool, comp_feat_avg_pool\n\n\ndef get_split_models(split_fn):\n \"\"\"\n Read split.txt file and return model names\n :param split_fn:\n :return:\n models_fn: list(str)\n \"\"\"\n\n models_fn = []\n with open(split_fn, 'r') as fin:\n for line in fin:\n models_fn.append(line.strip())\n\n return models_fn\n\n\ndef get_point_cloud_data(model_name, pts_dir, pts_faceindex_dir, out_features, pts_label_dir=None):\n \"\"\"\n Get point cloud data needed for evaluation\n :param model_name: str\n :param pts_dir: str\n :param pts_faceindex_dir: str\n :param out_features: npz file\n :param pts_label_dir: str\n :return:\n points: N x 3, numpy.ndarray(float)\n point_gt_labels: N x 1, numpy.ndarray(int)\n point_pred_labels: N x 1, numpy.ndarray(int)\n point_pred_feat: N x 31, numpy.ndarray(float)\n point_face_index: N x 1, numpy.ndarray(int)\n \"\"\"\n\n # Get points\n vertex_properties = read_point_cloud_ply(os.path.join(pts_dir, model_name + \".ply\"))\n points = np.hstack((vertex_properties[\"x\"][\"data\"][:, np.newaxis], vertex_properties[\"y\"][\"data\"][:, np.newaxis],\n vertex_properties[\"z\"][\"data\"][:, np.newaxis])).astype(np.float32)\n assert points.shape[0] == NUM_POINTS\n\n point_gt_labels = None\n if pts_label_dir is not None:\n # Get ground truth labels\n with open(os.path.join(pts_label_dir, model_name + \"_label.json\"), 'r') as f_in:\n labels_json = json.load(f_in)\n point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis]\n assert point_gt_labels.shape[0] == NUM_POINTS\n\n # Get per point features\n point_feat = out_features[model_name + \".npy\"]\n assert point_feat.shape[0] == NUM_POINTS\n assert point_feat.shape[1] == (len(LABELS) - 1)\n\n # Calculate pred label\n point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label 0) during training\n\n # Get points face index\n with open(os.path.join(pts_faceindex_dir, model_name + \".txt\"), 'r') as f_in:\n point_face_index = f_in.readlines()\n point_face_index = np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis]\n assert point_face_index.shape[0] == NUM_POINTS\n\n return points, point_gt_labels, point_pred_labels, point_feat, point_face_index\n\n\ndef get_mesh_data(model_name, obj_dir, face_labels_dir=None):\n \"\"\"\n Get mesh data needed for evaluation\n :param model_name: str\n :param obj_dir: str\n :param face_labels_dir: str\n :return:\n vertices: N x 3, numpy.ndarray(float)\n faces: M x 3, numpy.ndarray(int)\n face_labels: M x 1, numpy.ndarray(int)\n components: M x 1, numpy.ndarray(float)\n face_area: M x 1, numpy.ndarray(float)\n \"\"\"\n\n # Load obj\n vertices, faces, components = read_obj(obj_fn=os.path.join(obj_dir, model_name + \".obj\"))\n faces -= 1\n\n # Calculate face area\n face_area = calculate_face_area(vertices=vertices, faces=faces)\n assert (face_area.shape[0] == faces.shape[0])\n\n face_labels = None\n if face_labels_dir is not None:\n # Read face labels\n with open(os.path.join(face_labels_dir, model_name + \".json\"), 'r') as f_in:\n labels_json = json.load(f_in)\n face_labels = np.fromiter(labels_json.values(), int)\n face_labels = face_labels[:, np.newaxis]\n\n return vertices, faces, face_labels, components, face_area\n\n\ndef phases_evaluation(out_feat_fn):\n assert os.path.isfile(out_feat_fn)\n assert out_feat_fn.endswith(\".npz\")\n out_features = np.load(out_feat_fn)\n\n # BuildingNet directories\n BUILDINGNET_BASE_DIR = os.path.join(\"Dataset\", \"BuildingNet\")\n assert os.path.isdir(BUILDINGNET_BASE_DIR)\n BUILDINGNET_PTS_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"POINT_CLOUDS\")\n assert os.path.isdir(BUILDINGNET_PTS_DIR)\n BUILDINGNET_OBJ_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"OBJ_MODELS\")\n assert os.path.isdir(BUILDINGNET_OBJ_DIR)\n BUILDINGNET_PTS_LABELS_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"point_labels\")\n assert os.path.isdir(BUILDINGNET_PTS_LABELS_DIR)\n BUILDINGNET_PTS_FACEINDEX_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"point_faceindex\")\n assert os.path.isdir(BUILDINGNET_PTS_FACEINDEX_DIR)\n BUILDINGNET_FACE_LABELS_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"face_labels\")\n assert os.path.isdir(BUILDINGNET_FACE_LABELS_DIR)\n BUILDINGNET_VAL_SPLIT = os.path.join(BUILDINGNET_BASE_DIR, \"splits\", \"val_split.txt\")\n assert os.path.isfile(BUILDINGNET_VAL_SPLIT)\n\n # Get model names\n models_fn = get_split_models(split_fn=BUILDINGNET_VAL_SPLIT)\n\n point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp = {}, {}, {}\n point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp = {}, {}, {}\n\n print(\"Calculate evaluation metrics for point and mesh phase\")\n for model_fn in tqdm(models_fn):\n # Get point cloud data\n points, point_gt_labels, point_pred_labels, point_feat, point_face_index = \\\n get_point_cloud_data(model_fn, BUILDINGNET_PTS_DIR, BUILDINGNET_PTS_FACEINDEX_DIR, out_features,\n BUILDINGNET_PTS_LABELS_DIR)\n # Get mesh data\n vertices, faces, face_gt_labels, components, face_area = \\\n get_mesh_data(model_fn, BUILDINGNET_OBJ_DIR, BUILDINGNET_FACE_LABELS_DIR)\n # Infer face labels from point predictions\n face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, comp_feat = \\\n transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index)\n # Calculate point building iou\n point_buildings_iou[model_fn] = calc_building_point_iou(point_gt_labels, point_pred_labels, LABELS)\n # Calculate mesh building iou\n mesh_buildings_iou_from_tr[model_fn] = calc_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr,\n face_area, LABELS)\n mesh_buildings_iou_from_comp[model_fn] = calc_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp,\n face_area, LABELS)\n # Calculate classification accuracy\n point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels)\n mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr,\n face_area)\n mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp,\n face_area)\n\n # Calculate avg point part and shape IOU\n point_shape_iou = calc_shape_iou(point_buildings_iou)\n point_part_iou = calc_part_iou(point_buildings_iou, LABELS)\n mesh_shape_iou_from_tr = calc_shape_iou(mesh_buildings_iou_from_tr)\n mesh_part_iou_from_tr = calc_part_iou(mesh_buildings_iou_from_tr, LABELS)\n mesh_shape_iou_from_comp = calc_shape_iou(mesh_buildings_iou_from_comp)\n mesh_part_iou_from_comp = calc_part_iou(mesh_buildings_iou_from_comp, LABELS)\n point_acc = np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc))\n mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / \\\n float(len(mesh_buildings_acc_from_tr))\n mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / \\\n float(len(mesh_buildings_acc_from_comp))\n\n # Log results\n str_len = 5\n cls_str = \"Classes |\" + \"|\".join([f\" {name[:str_len].rjust(str_len)} \"\n for name in point_part_iou.keys() if name != \"all\"])\n div_str = \"-\" * len(cls_str)\n buf = \"BuildingNet-Points Phase:\\n\"\n buf += \"-------------------------\\n\"\n buf += f\" Part IoU={point_part_iou['all'] * 100:.1f} %\\n\"\n buf += f\"Shape IoU={point_shape_iou['all'] * 100:.1f} %\\n\"\n buf += f\"Class acc={point_acc * 100:.1f} %\\n\"\n buf += cls_str + \"\\n\"\n buf += div_str + \"\\n\"\n iou_str = \"IoU |\" + \"|\".join([f\" {iou * 100:05.2f} \"\n for name, iou in point_part_iou.items() if name != \"all\"])\n buf += iou_str + \"\\n\\n\"\n buf += \"BuildingNet-Mesh Phase (from triangles):\\n\"\n buf += \"----------------------------------------\\n\"\n buf += f\" Part IoU={mesh_part_iou_from_tr['all'] * 100:.1f} %\\n\"\n buf += f\"Shape IoU={mesh_shape_iou_from_tr['all'] * 100:.1f} %\\n\"\n buf += f\"Class acc={mesh_acc_from_tr * 100:.1f} %\\n\"\n buf += cls_str + \"\\n\"\n buf += div_str + \"\\n\"\n iou_str = \"IoU |\" + \"|\".join([f\" {iou * 100:05.2f} \"\n for name, iou in mesh_part_iou_from_tr.items() if name != \"all\"])\n buf += iou_str + \"\\n\\n\"\n buf += \"BuildingNet-Mesh Phase (from components):\\n\"\n buf += \"-----------------------------------------\\n\"\n buf += f\" Part IoU={mesh_part_iou_from_comp['all'] * 100:.1f} %\\n\"\n buf += f\"Shape IoU={mesh_shape_iou_from_comp['all'] * 100:.1f} %\\n\"\n buf += f\"Class acc={mesh_acc_from_comp * 100:.1f} %\\n\"\n buf += cls_str + \"\\n\"\n buf += div_str + \"\\n\"\n iou_str = \"IoU |\" + \"|\".join([f\" {iou * 100:05.2f} \"\n for name, iou in mesh_part_iou_from_comp.items() if name != \"all\"])\n buf += iou_str\n\n print(buf)\n log_dir = os.path.dirname(out_feat_fn)\n with open(os.path.join(log_dir, \"evaluation_results.txt\"), 'w') as f_out:\n f_out.write(buf)\n\n\ndef export_predictions(out_feat_fn, split):\n assert os.path.isfile(out_feat_fn)\n assert out_feat_fn.endswith(\".npz\")\n out_features = np.load(out_feat_fn)\n\n # BuildingNet directories\n BUILDINGNET_BASE_DIR = os.path.join(\"Dataset\", \"BuildingNet\")\n assert os.path.isdir(BUILDINGNET_BASE_DIR)\n BUILDINGNET_PTS_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"POINT_CLOUDS\")\n assert os.path.isdir(BUILDINGNET_PTS_DIR)\n BUILDINGNET_OBJ_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"OBJ_MODELS\")\n assert os.path.isdir(BUILDINGNET_OBJ_DIR)\n BUILDINGNET_PTS_FACEINDEX_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"point_faceindex\")\n assert os.path.isdir(BUILDINGNET_PTS_FACEINDEX_DIR)\n BUILDINGNET_SPLIT = os.path.join(BUILDINGNET_BASE_DIR, \"splits\", f\"{split}_split.txt\")\n assert os.path.isfile(BUILDINGNET_SPLIT)\n\n # Get model names\n models_fn = get_split_models(split_fn=BUILDINGNET_SPLIT)\n\n print(f\"Export predictions for point and mesh phase for {split} split\")\n point_predicted_labels, face_predicted_labels_from_tr, face_predicted_labels_from_comp = {}, {}, {}\n for model_fn in tqdm(models_fn):\n # Get point cloud data\n points, _, point_pred_labels, point_feat, point_face_index = \\\n get_point_cloud_data(model_fn, BUILDINGNET_PTS_DIR, BUILDINGNET_PTS_FACEINDEX_DIR, out_features)\n # Get mesh data\n vertices, faces, _, components, face_area = get_mesh_data(model_fn, BUILDINGNET_OBJ_DIR)\n # Infer face labels from point predictions\n face_pred_labels_from_tr, face_pred_labels_from_comp, _, _, _ = \\\n transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index)\n model_name = os.path.basename(model_fn).split(\".\")[0]\n point_predicted_labels[f\"{model_name}\"] = point_pred_labels.astype(np.uint8)\n face_predicted_labels_from_tr[f\"{model_name}\"] = face_pred_labels_from_tr.astype(np.uint8)\n face_predicted_labels_from_comp[f\"{model_name}\"] = face_pred_labels_from_comp.astype(np.uint8)\n\n # Save predictions\n log_dir = os.path.dirname(out_feat_fn)\n model_name = os.path.basename(out_feat_fn).split(\"_\")[0]\n out_fn = os.path.join(log_dir, f\"{model_name}_{split}_pred_point_labels.npz\")\n np.savez(out_fn, **point_predicted_labels)\n out_fn = os.path.join(log_dir, f\"{model_name}_{split}_pred_face_labels_from_tr.npz\")\n np.savez(out_fn, **face_predicted_labels_from_tr)\n out_fn = os.path.join(log_dir, f\"{model_name}_{split}_pred_face_labels_from_comp.npz\")\n np.savez(out_fn, **face_predicted_labels_from_comp)\n\n\ndef export_features(out_feat_fn, split):\n assert os.path.isfile(out_feat_fn)\n assert out_feat_fn.endswith(\".npz\")\n out_features = np.load(out_feat_fn)\n\n # BuildingNet directories\n BUILDINGNET_BASE_DIR = os.path.join(\"Dataset\", \"BuildingNet\")\n assert os.path.isdir(BUILDINGNET_BASE_DIR)\n BUILDINGNET_PTS_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"POINT_CLOUDS\")\n assert os.path.isdir(BUILDINGNET_PTS_DIR)\n BUILDINGNET_OBJ_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"OBJ_MODELS\")\n assert os.path.isdir(BUILDINGNET_OBJ_DIR)\n BUILDINGNET_PTS_FACEINDEX_DIR = os.path.join(BUILDINGNET_BASE_DIR, \"point_faceindex\")\n assert os.path.isdir(BUILDINGNET_PTS_FACEINDEX_DIR)\n BUILDINGNET_SPLIT = os.path.join(BUILDINGNET_BASE_DIR, \"splits\", f\"{split}_split.txt\")\n assert os.path.isfile(BUILDINGNET_SPLIT)\n split_feat_dir = os.path.join(os.path.dirname(out_feat_fn), f\"{split}_comp_feat\")\n os.makedirs(split_feat_dir, exist_ok=True)\n\n # Get model names\n models_fn = get_split_models(split_fn=BUILDINGNET_SPLIT)\n\n print(f\"Export per-component features for {split} split\")\n\n for model_fn in tqdm(models_fn):\n # Get point cloud data\n points, _, _, point_feat, point_face_index = \\\n get_point_cloud_data(model_fn, BUILDINGNET_PTS_DIR, BUILDINGNET_PTS_FACEINDEX_DIR, out_features)\n # Get mesh data\n vertices, faces, _, components, face_area = get_mesh_data(model_fn, BUILDINGNET_OBJ_DIR)\n # Get component features\n _, _, _, _, per_comp_feat = \\\n transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index)\n per_comp_feat = F.normalize(torch.from_numpy(per_comp_feat.astype(np.float32)), dim=0)\n # Save features\n model_name = os.path.basename(model_fn).split(\".\")[0]\n torch.save(per_comp_feat, os.path.join(split_feat_dir, f\"{model_name}.pth\"))\n\n\n","repo_name":"buildingnet/buildingnet_dataset","sub_path":"MinkowskiNet/lib/buildingnet_eval.py","file_name":"buildingnet_eval.py","file_ext":"py","file_size_in_byte":17863,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"47"} +{"seq_id":"19996551935","text":"from flask import Flask, jsonify\nfrom flask_talisman import Talisman\n\nfrom utils import metadata, token_count\n\napplication = Flask(__name__)\nTalisman(application)\n\n\n@application.route(\"/\")\ndef index():\n return \"Nothing here :)\"\n\n\n@application.route(\"/contract/\")\ndef contract():\n contract_metadata = metadata.contract_metadata()\n return jsonify(contract_metadata)\n\n\n@application.route(\"/token/\")\ndef token_data(token_id):\n if token_id > 0 and token_id <= 888 and token_id <= token_count.token_count():\n token_metadata = metadata.token_metadata(token_id)\n else:\n token_metadata = {\n \"msg\": (\n \"Item not minted yet. \"\n \"If you just minted this token, please allow up to 10 minutes.\"\n )\n }\n return jsonify(token_metadata)\n\n\nif __name__ == \"__main__\":\n application.run()\n","repo_name":"wgmi-rambo/nft-api-template","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35925236297","text":"# NESNE YONELIMLI PROGRAMLAMA\n\n# 1 - class\n\nclass test():\n print(\"bu bir sinif örnegi\")\n\n# --------------------------------------------------------\n\n# class özellikleri\n\n\nclass calisanlar():\n bolum = ''\n sql = \"evet\"\n deneyim = 0\n bildigiDiller = []\n\n\n# sınıfların özelliklerine erişmek\nprint(calisanlar.bolum)\nprint(calisanlar.sql)\n\n# sınıfların özelliklerini değiştirmek\ncalisanlar.sql = \"hayir\"\nprint(calisanlar.sql)\n\n# sınıf örneklendirmesi - o class'a ait nesneler oluşturma - class'ın özelliklerini taşır\nali = calisanlar()\nprint(ali.sql)\nprint(ali.deneyim)\n\n# bu şekilde değiştirme yaptığımızda classta tanımlanan değer de değişir\nali.bildigiDiller.append(\"python\")\nprint(ali.bildigiDiller)\n\nveli = calisanlar()\nprint(veli.sql)\n\n# classta tanımlanan değer değiştiği için \"python\" olarak çıktı verir\nprint(veli.bildigiDiller)\n\n# aşağıda bunun çözümünün nasıl yapıldığı var\n\n# --------------------------------------------------------\n\n\nclass calisanlar():\n def __init__(self): # BURASI ONEMLI\n self.bildigiDiller = []\n\n\nali = calisanlar()\nali.bildigiDiller.append(\"python\")\nprint(ali.bildigiDiller)\n\nveli = calisanlar()\nveli.bildigiDiller.append(\"java\")\nprint(veli.bildigiDiller)\n\nprint(calisanlar().bildigiDiller) # bunun boş dönmesinin sebebi classtaki değer direkt olarak değiştirmememiz\n\n# --------------------------------------------------------\n\n\n# üsttekinin devamı\n\nclass calisanlar():\n bildigiDiller = [\"R\", \"Python\"] # class içinde genel tanımlama yaptık\n bolum = \"\"\n sql = \"\"\n deneyimYili = 0\n\n def __init__(self): # JAVADAKI CONSTRUCTOR MANTIGI\n self.bildigiDiller = []\n self.bolum = \"\"\n\n\nali = calisanlar()\nali.bildigiDiller.append(\"python\")\nali.bolum = \"end_muh\"\nprint(ali.bildigiDiller)\nprint(ali.bolum)\n\nveli = calisanlar()\nveli.deneyimYili = 2\nveli.bildigiDiller.append(\"java\")\nprint(veli.bildigiDiller)\nprint(veli.deneyimYili)\n\nprint(calisanlar.bildigiDiller)\n\n# --------------------------------------------------------\n\n\nclass veriBilimci():\n calisanlar = []\n\n def __init__(self): # her veri bilimcinin kendine ait değiştirilebilir özelliğinin olması için burası lazım\n self.bildigiDiller = []\n self.bolum = \"\"\n\n def dilEkle(self, eklenecekDil):\n self.bildigiDiller.append(eklenecekDil)\n\n\nali = veriBilimci()\nprint(ali.bildigiDiller)\nprint(ali.bolum)\n\nveli = veriBilimci()\nprint(veli.bildigiDiller)\nprint(veli.bolum)\n\nali.dilEkle(\"python\")\nprint(ali.bildigiDiller)\n\nveli.dilEkle(\"c++\")\nprint(veli.bildigiDiller)\n\n# --------------------------------------------------------\n\n# miras yapıları (inheritance)\n\n\nclass employees():\n def __init__(self):\n self.firstName = \"\"\n self.lastName = \"\"\n self.address = \"\"\n\n\nclass dataScience(employees): # parantez içinde referans olarak gönderince, employees() özellikleri miras alındı.\n def __init__(self):\n self.programming = \"\"\n\n\nclass marketing(employees):\n def __init__(self):\n self.storyTelling = \"\"\n\n\nemployee1 = dataScience()\nemployee1.firstName # employee() özellikleri miras alındığı için firstName kullanabildik.\n\nmar1 = marketing()\nmar1.lastName # employee() özellikleri miras alındığı için lastName kullanabildik.\n\n# --------------------------------------------------------\n\n\n# BASKA SEKILDE CLASS OLUSTURMA (MUHTEMELEN DAHA IYI)\n\nclass ogrenci(employees):\n def __init__(self, isim, no, tc):\n self.isim = isim\n self.no = no\n self.tc = tc\n\n\nali = ogrenci(\"ali\", \"11\", \"112233\")\nali.address # employee() özellikleri miras alındığı için lastName kullanabildik.\n\n# --------------------------------------------------------\n\n\n# # vektörel operasyonlar\n# # OOP\n\n# a = [1, 2, 3, 4]\n# b = [2, 3, 4, 5]\n\n# ab = []\n\n# for i in range(0, len(a)):\n# ab.append(a[i]*b[i])\n\n# print(ab)\n\n\n# # FP - functional programming\n\n# import numpy as np\n\n# a = np.array([1, 2, 3, 4])\n# b = np.array([2, 3, 4, 5])\n\n# print(a*b)\n\n# --------------------------------------------------------\n\n\n# map, filter, reduce fonksiyonları\n\n# her elemana 10 eklemek\nliste = [1, 2, 3, 4, 5]\n\nfor i in liste:\n print(i+10)\n\n# lambda fonksiyon örneği\n\nlist(map(lambda x: x+10, liste)) # map fonksiyonu = verilen dizinin içerisinde, verilen fonksiyonu çalıştırmaya yarar.\n\n# --------------\n# filter fonksiyonu\n# şartı sağlayan elemanları filtreler\n\nliste = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nlist(filter(lambda x: x % 2 == 0, liste))\nprint(list(filter(lambda x: x % 2 == 0, liste)))\n\n\n# --------------\n# reduce fonksiyonu\n# indirgeme işlemi yapar\n\nfrom functools import reduce\n\nliste = [1, 2, 3, 4]\n\nprint(reduce(lambda a, b: a + b, liste)) #bunu anlamadım","repo_name":"aykq/python","sub_path":"learningPython/OOP.py","file_name":"OOP.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"8267476680","text":"import time\n\ndef anagram_gen(text):\n\t\n\ttext = text.lower()\n\tword_dict = {}\n\trstart = time.time()\n\twith open(\"Tag9/deutsch2.txt\") as file:\n\t\tfor word in file.readlines():\n\t\t\tword = word.strip()\n\t\t\twlen = len(word)\n\t\t\tif not wlen in word_dict:\n\t\t\t\tword_dict[wlen] = list()\n\t\t\tword_dict[wlen].append(word.strip().lower())\n\trend = time.time()\n\trdelta = rend - rstart\n\tstart = time.time()\n\tanagrams = []\n\ttext_set = set(text)\n\ttext_counts = {c: text.count(c) for c in text_set}\n\tfor word in word_dict[len(text)]:\n\t\tdo_append = True\n\t\tfor c in text_set:\n\t\t\tif text_counts[c] != word.count(c):\n\t\t\t\tdo_append = False\n\t\t\t\tbreak\n\t\tif not do_append:\n\t\t\tcontinue\n\t\tif set(word) != text_set:\n\t\t\tcontinue\n\t\tanagrams.append(word)\n\tend = time.time()\n\tdelta = end - start\n\tprint(set(anagrams))\n\tprint(f\"Reading completed in only {rdelta} s\")\n\tprint(f\"Completed in only {delta} s\")\n\nanagram_gen(input(\"Text: \"))\n","repo_name":"WinPlay02/Python-Vorkurs","sub_path":"Tag9/a3opt.py","file_name":"a3opt.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4759590246","text":"\"\"\"\ntest outputs\n\"\"\"\n\nfrom nose.tools import ok_, eq_\nfrom simkit.core.outputs import Output\nfrom simkit.tests import PROJ_PATH\nimport os\n\n\ndef test_outputs_metaclass():\n \"\"\"\n Test Output Sources\n \"\"\"\n\n class OutputTest1(Output):\n class Meta:\n outputs_file = 'pvpower.json'\n outputs_path = os.path.join(PROJ_PATH, 'outputs')\n\n out_src_test1 = OutputTest1()\n ok_(isinstance(out_src_test1, Output))\n eq_(out_src_test1.param_file,\n os.path.join(PROJ_PATH, 'outputs', 'pvpower.json'))\n\n class OutputTest2(Output):\n timestamps = {\"isconstant\": True, \"size\": 8761}\n hourly_energy = {\n \"isconstant\": True,\n \"timeseries\": \"hourly_timeseries\", \"units\": \"Wh\",\n \"size\": 8760\n }\n hourly_timeseries = {\"isconstant\": True, \"units\": \"Wh\", \"size\": 8760}\n monthly_energy = {\"isconstant\": True, \"units\": \"Wh\", \"size\": 12}\n annual_energy = {\"isconstant\": True, \"units\": \"Wh\"}\n\n out_src_test2 = OutputTest2()\n ok_(isinstance(out_src_test2, Output))\n for k, v in out_src_test2.parameters.iteritems():\n eq_(out_src_test1.parameters[k], v)\n","repo_name":"BreakingBytes/simkit","sub_path":"simkit/tests/test_outputs.py","file_name":"test_outputs.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"47"} +{"seq_id":"3044354792","text":"from ctypes import c_byte, c_char, c_ubyte, c_ushort, c_uint, Structure, Union\n\n\nSECTOR_SIZE = 4096\nNUM_SECTORS_PER_SLOT = 14\n\ndef GetSectionLength(section_number):\n # https://bulbapedia.bulbagarden.net/wiki/Save_data_structure_(Generation_III)#Section_ID\n \n section_length = 3968 # 0xF80\n \n if section_number == 0:\n section_length = 3884\n elif section_number == 4:\n section_length == 3848\n elif section_number == 13:\n section_length = 2000\n elif section_number >= 0xFFFF or section_number < 0:\n section_length = 0\n \n return section_length\n\nclass SaveChunk(Structure):\n _fields_ = \\\n (\n ('data', c_ubyte * (SECTOR_SIZE - 128)),\n ('_padding', c_ubyte * (128 - 12)),\n ('ID', c_ushort),\n ('checksum', c_ushort),\n ('magic', c_uint),\n ('save_index', c_uint),\n )\n \n def GetLength(self):\n return GetSectionLength(self.ID)\n \n def Validate(self):\n return not self.GetError()\n \n def CalculateChecksum(self):\n buflen = self.GetLength()\n buf = self.data\n \n chk = 0\n i = 0\n while i < buflen:\n val = buf[i + 0]\n val |= buf[i + 1] << 8\n val |= buf[i + 2] << 16\n val |= buf[i + 3] << 24\n chk += val\n i += 4\n \n chk = (chk + (chk >> 16)) & 0xFFFF\n \n return chk\n \n def GetError(self):\n if self.magic != 0x08012025:\n return 1\n \n chk = self.CalculateChecksum()\n \n if chk != self.checksum:\n return 2\n \n return 0\n \n def Regenerate(self):\n self.magic = 0x08012025\n self.checksum = self.CalculateChecksum()\n \n assert self.Validate()\n","repo_name":"SonoSooS/EmeACEv2","sub_path":"libpoke_types/savefile.py","file_name":"savefile.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18679950199","text":"import unittest\nfrom ed_utils.decorators import number\n\nfrom mountain import Mountain\nfrom mountain_organiser import MountainOrganiser\nfrom mountain_manager import *\n\nclass TestInfiniteHash(unittest.TestCase):\n\n @number(\"6.1\")\n def test_example(self):\n m1 = Mountain(\"m1\", 2, 2)\n m2 = Mountain(\"m2\", 2, 9)\n m3 = Mountain(\"m3\", 3, 6)\n m4 = Mountain(\"m4\", 3, 1)\n m5 = Mountain(\"m5\", 4, 6)\n m6 = Mountain(\"m6\", 7, 3)\n m7 = Mountain(\"m7\", 7, 7)\n m8 = Mountain(\"m8\", 7, 8)\n m9 = Mountain(\"m9\", 7, 6)\n m10 = Mountain(\"m10\", 8, 4)\n\n\n # m1 = Mountain(\"m1\" , 2 , 3)\n # l1 = Mountain(\"l1\" , 2 , 5)\n # l2 = Mountain(\"l2\" , 4 , 1)\n # c1 = Mountain(\"c1\" , 4 , 4)\n # t1 = Mountain(\"desting\" , 76 , 3)\n # t2 = Mountain(\"testing\" , 76 , 3)\n\n mo = MountainOrganiser()\n # mm = MountainManager()\n\n # mm.add_mountain(mountain = m1)\n # mm.add_mountain(mountain = l1)\n # mm.add_mountain(mountain = l2)\n # mm.add_mountain(mountain = c1)\n # mm.add_mountain(mountain = t1)\n\n # diff2 = mm.mountains_with_difficulty(2)\n # print(\"diff with 2 \" , diff2)\n\n # mo.add_mountains(diff2)\n # print(\"org with 2 \" , mo.sorted_mountain_list)\n\n # diff4 = mm.mountains_with_difficulty(4)\n # print(\"diff with 4 \" , diff4)\n\n # mo.add_mountains(diff4)\n # print(\"org with 2 and 4 \" , mo.sorted_mountain_list)\n\n # diff76 = mm.mountains_with_difficulty(76)\n # print(\"diff with 76 \" , diff76)\n\n # mo.add_mountains(diff76)\n # print(\"org with 2 4 and 76 \" , mo.sorted_mountain_list)\n\n # rank = mo.cur_position(t1)\n # del mo.sorted_mountain_list[rank]\n \n # mm.edit_mountain(t1 , t2)\n \n # diff76 = mm.mountains_with_difficulty(76)\n # print(\"diff with 76 \" , diff76)\n\n # mo.add_mountains(diff76)\n # print(\"org with 2 4 and 76 \" , mo.sorted_mountain_list)\n \n\n\n mo.add_mountains([m2, m1])\n\n # print(\"list is \" , mo.sorted_mountain_list)\n\n self.assertEqual([mo.cur_position(m) for m in [m1, m2]], [0, 1])\n mo.add_mountains([m4, m3])\n self.assertEqual([mo.cur_position(m) for m in [m1, m2, m3, m4]], [1, 3, 2, 0])\n mo.add_mountains([m9])\n self.assertEqual([mo.cur_position(m) for m in [m1, m2, m3, m4, m9]], [1, 4, 2, 0, 3])\n mo.add_mountains([m7, m5, m6, m8])\n self.assertEqual([mo.cur_position(m) for m in [m1, m2, m3, m4, m5, m6, m7, m8, m9]], [1, 8, 3, 0, 4, 2, 6, 7, 5])\n\n # print(\"list is \" , mo.sorted_mountain_list)\n \n self.assertRaises(KeyError, lambda: mo.cur_position(m10)) \n","repo_name":"atg08/Mountain-Climbing","sub_path":"tests/test_mountain_organiser.py","file_name":"test_mountain_organiser.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13081411908","text":"import time ,sys ,threading\n\n__all__ = ['Color','Text','Square','Animation','ThreadAnimation']\n\nclass Color(str):\n colors = {\n \"BOLD\":\"\\033[1m\",\n \"UNDERLINE\": \"\\033[4m\",\n \"BLINK\": \"\\033[5m\",\n \"NORMAL\": \"\\033[0m\",\n\n \"WIHTEBG\": \"\\033[7m\",\n \"REDBG\": \"\\033[41m\",\n \"GREENBG\": \"\\033[42m\",\n \"YELLOWBG\": \"\\033[43m\",\n \"BLUEBG\": \"\\033[44m\",\n \"PINKBG\": \"\\033[45m\",\n \"CYANBG\": \"\\033[46m\",\n \"GRAYBG\": \"\\033[100m\",\n\n \"LREDBG\": \"\\033[101m\",\n \"LGREENBG\": \"\\033[102m\",\n \"LYELLOWBG\": \"\\033[103m\",\n \"LBLUEBG\": \"\\033[104m\",\n \"LPINKBG\": \"\\033[105m\",\n \"LCYANBG\": \"\\033[106m\",\n \"LGRAYBG\": \"\\033[47m\",\n\n \"WIHTE\": \"\\033[0;37m\",\n \"RED\": \"\\033[0;31m\",\n \"GREEN\": \"\\033[0;32m\",\n \"YELLOW\": \"\\033[0;33m\",\n \"BLUE\": \"\\033[0;34m\",\n \"PINK\": \"\\033[0;35m\",\n \"CYAN\": \"\\033[0;36m\",\n \"GRAY\": \"\\033[1;30m\",\n\n \"LWIHTE\": \"\\033[1;37m\",\n \"LRED\": \"\\033[1;31m\",\n \"LGREEN\": \"\\033[1;32m\",\n \"LYELLOW\": \"\\033[1;33m\",\n \"LBLUE\": \"\\033[1;34m\",\n \"LPINK\": \"\\033[1;35m\",\n \"LCYAN\": \"\\033[1;36m\",\n }\n def __dir__(self):\n return list(set(dir(__class__)+[attr for attr in self.colors.keys()]))\n\n def __getattr__(self, item):\n if item in self.colors:\n return self.__class__(self.colors[item])\n else: raise AttributeError(f\"type object '{__class__.__name__}' has no attribute '{item}'\")\n\n def __add__(self, other):\n return super().__add__(other)+self.NORMAL\n\n def reader(self,text:str) -> str:\n '''Read the Colors from string'''\n if type(text) != str:\n raise TypeError(f'reader function accept only string not ({type(text).__name__}: {text})')\n for name,color in self.colors.items():\n text = text.replace('[$'+name+']',color)\n text = text.replace('[$/]',self.colors['NORMAL'])\n return text\n\n def del_colors(self,text:str) -> str:\n '''Remove colors from string'''\n if type(text) != str:\n raise TypeError(f'del_colors function accept only string not ({type(text).__name__}: {text})')\n for name,color in self.colors.items():\n text = text.replace('[$'+name+']','')\n text = text.replace(color,'')\n return text\n\n def show_all_rgb_colors(self):\n '''Simple example to show all rgb colors'''\n temp = 0\n text = 'type : Foreground...\\n'\n for type in [38,48]:\n text += '\\n\\ntype : Background...\\n' if type==48 else ''\n for i in range(255):\n temp += 1\n _txt = '{}{:4d} \\033[0m'.format(f'\\033[{type};5;{i+1}m',i+1)\n if temp > 8:\n text += _txt+'\\n'\n temp = 0\n else:\n text += _txt\n temp += 1\n print (text)\n\n def rgb(self,rgb:int,type='FG') -> str:\n '''\n type :\n FG : Foreground\n BG : Background\n rgb : (int)\n '''\n if type not in ['FG','BG']:\n raise TypeError('please choose BG or FG')\n if rgb > 255 or rgb < 1:\n raise Exception(\"rgb max '255' \")\n return self.__class__(f'\\033[{38 if type==\"FG\" else 48};5;{rgb}m')\n\nclass Text:\n def get_size(self,text:str) -> dict:\n '''Get the text size [width,height]'''\n if type(text) != str:\n raise TypeError(f'get_size function accept only string not ({type(text).__name__}: {text})')\n text = Color().del_colors(text)\n width = sorted([len(i) for i in text.split('\\n')])[-1]\n height = len(text.split('\\n'))\n return {'width':width,'height':height}\n\n def del_padding(self,text:str) -> str:\n '''Delete text padding'''\n if type(text) != str:\n raise TypeError(f'del_padding function accept only string not ({type(text).__name__}: {text})')\n def delete_top(text:str) -> str:\n text = text.split('\\n')\n result = []\n top_space = True\n for t in text:\n if (list(Color().del_colors(t)) == [] or list(set(list(Color().del_colors(t)))) == [' ']) and top_space:\n pass\n else:\n top_space = False\n result.append(t)\n return '\\n'.join(result)\n\n def delete_bottom(text:str) -> str:\n text = text.split('\\n')[::-1]\n result = []\n bottom_space = True\n for t in text:\n if (list(Color().del_colors(t)) == [] or list(set(list(Color().del_colors(t)))) == [' ']) and bottom_space:\n pass\n else:\n bottom_space = False\n result.append(t)\n return '\\n'.join(result[::-1])\n\n def delete_left(text:str) -> str:\n text = text.split('\\n')\n left_space = []\n temp = None\n for t in text:\n for space in list(t):\n if space == ' ': temp = 1 if temp == None else temp+1\n else: break\n if temp != None:left_space.append(temp)\n temp = 0\n try: return '\\n'.join([string[sorted(left_space)[0]:] for string in text])\n except IndexError: return '\\n'.join(text)\n return delete_left( delete_bottom( delete_top(text) ) )\n\n def pos(self,text:str,x=0,y=0) -> str:\n '''Change text postion'''\n if type(text) != str:\n raise TypeError(f'pos function accept only string not ({type(text).__name__}: {text})')\n text = text.split('\\n')\n style = '\\n'*y\n for i in text:\n style = style+(' '*x)+i+'\\n'\n return Color().reader(style[:-1])\n\n def CentreAlign(self,text:str) -> str:\n '''Put the Text in the Middle'''\n if type(text) != str:\n raise TypeError(f'CentreAlign function accept only string not ({type(text).__name__}: {text})')\n text_size = self.get_size(text)['width']\n result = []\n for t in text.split('\\n'):\n if len(Color().del_colors(t)) == text_size:\n result.append(t)\n else:\n result.append(\n self.pos(\n t,\n x=(text_size//2)-(len(Color().del_colors(t))//2)\n )\n )\n return Color().reader('\\n'.join(result))\n\n def CentreAlignPro(self, ListTexts: list) -> str:\n '''Put the big Texts in the Middle'''\n if type(ListTexts) != list:\n raise TypeError(f'CentreAlign function accept only list not ({type(ListTexts).__name__}: {ListTexts})')\n width = 0\n for text in ListTexts:\n text = Color().del_colors(text)\n temp_width = self.get_size(text)\n width = temp_width['width'] if width < temp_width['width'] else width\n\n result = []\n for t in ListTexts:\n if self.get_size(t)['width'] == width:\n result.append(t)\n else:\n result.append(\n self.pos(\n t,\n x=(width // 2) - (self.get_size(t)['width'] // 2)\n )\n )\n return Color().reader('\\n'.join(result))\n\n def Figlet(self,text:str,font='epic') -> str:\n try:\n import pyfiglet\n except ModuleNotFoundError or ImportError:\n sys.exit(ModuleNotFoundError('ModuleNotFoundError: No module named figlet\\n\\t\"pip3 install pyfiglet\"'))\n if font not in pyfiglet.FigletFont.getFonts():\n raise TypeError(f'Figlet not support this font! ({font})')\n FIG = pyfiglet.Figlet(font=font)\n output = FIG.renderText(text)\n output = self.del_padding(str(output))\n return output\n\n def FigletFonts(self) -> list:\n try:\n import pyfiglet\n except ModuleNotFoundError or ImportError:\n sys.exit(ModuleNotFoundError('ModuleNotFoundError: No module named figlet\\n\\t\"pip3 install pyfiglet\"'))\n return pyfiglet.FigletFont.getFonts()\n\n def mix(self,List:list,spacing=0) -> str:\n '''Mix texts together'''\n height ,output = [] ,''\n for text in List:height += [self.get_size(text)['height']]\n temp = [\n self.full(self.pos(t,x=spacing).split('\\n'))+\n ([' '*(self.get_size(t)['width']+spacing)]*\n (sorted(height)[-1]-sorted(height)[0]))\n for t in List\n ]\n for text in zip(*temp):\n output += ''.join(text)+'\\n'\n return Color().reader(self.del_padding(output))\n\n def full(self,text):\n tmp = []\n for i in text:\n tmp += [self.get_size(i)['width']]\n Len = sorted(tmp)[-1]\n tmp = []\n for i in text:\n tmp += [f'{i}{\" \" * (Len - self.get_size(i)[\"width\"])}']\n return tmp\n\n def equal(self, text):\n tmp = []\n for i in text:\n tmp += [self.get_size(i)['width']]\n Len = sorted(tmp)[-1]\n tmp = []\n for i in text:\n i = f\"{' ' * ((Len - self.get_size(i)['width']) // 2)}{i}\"\n tmp += [f'{i}{\" \" * (Len - self.get_size(i)[\"width\"])}']\n return tmp\n\n def arabic(self,text):\n try:\n import arabic_reshaper\n from bidi.algorithm import get_display\n except ModuleNotFoundError:\n raise ModuleNotFoundError('''To fix this error install this libs\\n\"pip install arabic_reshaper\"\\n\"pip install python-bidi\"''')\n\n reshaped_text = arabic_reshaper.reshape(text)\n return get_display(reshaped_text)\n\n def CInput(self,text,completer=lambda text,state:[],clear_history=True):\n def _completer(text, state):\n return [i for i in completer if i.startswith(text)][state]\n\n import readline\n if clear_history:\n readline.clear_history()\n if type(completer) == type(_completer):\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(completer)\n elif type(completer) == list:\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(_completer)\n else:\n raise TypeError('completer accept only [ function or list ]')\n return input(Color().reader(text))\n\nclass Square:\n square=['╔', '║', '╚', '═', '╝', '║', '╗', '═']\n spacing=0\n padding= [0, 0, 0, 0]\n color=''\n cols=0\n equal=True\n center=False\n def __init__(self):\n self.SETTINGS = {\n 'square':['╔', '║', '╚', '═', '╝', '║', '╗', '═'],\n 'spacing':0,\n 'padding':[0,0,0,0],\n 'color':'',\n 'cols':0,\n 'equal':True,\n 'center':False,\n }\n\n def __setattr__(self, key, value):\n super(Square, self).__setattr__(key,value)\n if key in self.SETTINGS:\n self.set_settings({key:value})\n\n def __dir__(self):\n return list(set(dir(__class__)+[attr for attr in self.SETTINGS.keys()]))\n\n def style(self,List:list) -> str:\n if type(List) != list:\n raise TypeError(f'style function accept only list not {List.__class__.__name__}')\n\n if self.SETTINGS['equal']:\n if self.SETTINGS['center']:\n List = Text().equal(List)\n else:\n List = Text().full(List)\n\n if self.SETTINGS['cols'] == 0:\n output = Text().mix([self.base(sq) for sq in List],spacing=self.SETTINGS['spacing'])\n\n else:\n output = ''\n cols = self.SETTINGS['cols']\n temp1 = 0\n temp2 = cols\n while True:\n try:\n output += Text().mix([self.base(sq) for sq in List[temp1:temp2]],spacing=self.SETTINGS['spacing'])+'\\n'\n temp1 = temp2\n temp2 += cols\n if len(List) <= temp1:\n output = output[:-1]\n break\n except IndexError:\n output = output[:-1]\n break\n return output\n\n def set_settings(self,settings:dict) -> dict:\n for key,item in settings.items():\n if key == 'square':\n if type(item) == list and len(item) == 8:\n self.SETTINGS[key] = item\n else: raise TypeError('square accept only list and len list should be 8')\n\n elif key == 'spacing':\n if type(item) == int:\n self.SETTINGS[key] = item\n else: raise TypeError('spacing accept only (int)')\n\n elif key == 'padding':\n if type(item) == list and len(item) == 4:\n self.SETTINGS[key] = item\n else: raise TypeError('padding accept only (list) and 4 items')\n\n elif key == 'color':\n if type(item) == str or type(item) == Color:\n if item.replace('[$','').replace(']','') in Color().colors.keys() or item in [_color[1] for _color in Color().colors.items()]:\n self.SETTINGS[key] = str(item)\n else: raise TypeError(f'color accept only {[\"[$\"+c+\"]\" for c in Color().colors.keys()]}')\n else: raise TypeError('color accept only (str)')\n\n elif key == 'cols':\n if type(item) == int:\n self.SETTINGS[key] = item\n else: raise TypeError('cols accept only (int)')\n\n elif key == 'equal':\n if type(item) == bool:\n self.SETTINGS[key] = item\n else: raise TypeError('equal accept only (bool)')\n\n elif key == 'center':\n if type(item) == bool:\n self.SETTINGS[key] = item\n else: raise TypeError('center accept only (bool)')\n\n else: raise TypeError(f\"'{key}' is not in settings, use only {[key for key in self.SETTINGS.keys()]}\")\n\n return self.SETTINGS\n\n def base(self,text):\n PADDING = self.SETTINGS['padding']\n\n '''set padding and text size'''\n text = Text().pos(text,x=PADDING[0])\n text = ('\\n'*PADDING[1]) + text + ('\\n'*PADDING[3])\n text_size = Text().get_size(text)\n text_size = {'width':text_size['width']+PADDING[2]}\n\n SQUARE = self.SETTINGS['square']\n COLOR = self.SETTINGS['color']\n\n CO = (COLOR if COLOR else '[$NORMAL]')\n output = CO+SQUARE[0]+CO+(SQUARE[7]*text_size['width'])+CO+SQUARE[6]+'[$NORMAL]' # .......... ╔═════╗\n for t in text.split('\\n'):\n t_size = Text().get_size(t)\n output += '\\n'+CO+(SQUARE[1]+'[$NORMAL]'+t) # ........................................... ║\n output += ' '*(text_size['width']-t_size['width'])+CO+SQUARE[5]+'[$NORMAL]' # ................. ║\n\n output += '\\n'+CO+SQUARE[2]+CO+(SQUARE[3]*text_size['width'])+CO+SQUARE[4]+'[$NORMAL]' # .... ╚═════╝\n return Color().reader(output)\n\nclass AnimationTools:\n def __init__(self):\n self.load_anim = self.set_load_anim(['/', '-', '\\\\', '|'])\n self.text_anim = self.set_text_anim('Loading')\n\n def set_load_anim(self,List):\n while True:\n for i in List:\n yield i\n\n def set_text_anim(self,text):\n while True:\n for i in range(0,len(text)):\n t1 = text[:i]\n t2 = text[i].upper() if text[i] != text[i].upper() else text[i].lower()\n t3 = text[i+1:]\n yield t1+t2+t3\n\nAnimationTools = AnimationTools()\n\nclass Animation:\n def SlowText(self, text, timer=0.1):\n '''to write text by Index(System) slow motion'''\n for i in text:\n sys.stdout.write(i)\n sys.stdout.flush()\n time.sleep(timer)\n\n def SlowLine(self, text, timer=0.5):\n '''to write text by Line as slow motion'''\n for i in text.split('\\n'):\n print(i)\n time.sleep(timer)\n\n def Loading(self, text='Loading...'):\n if str(type(text)) == \"\":\n text = next(text)\n anim = next(AnimationTools.load_anim)\n return [text+anim]\n\n def Prograsse(self,prograsse=['│','█','▒','│'],text='Loading',width=24,min=1,max=10):\n text = Color().reader(str(text))\n prograsse = [Color().reader(str(i)) for i in prograsse]\n i = width*min//max\n return ['\\r'+text+prograsse[0]+(i*prograsse[1])+(prograsse[2]*(width-i))+prograsse[3]+' ']\n\nclass ThreadAnimation:\n def __init__(self,Animation=Animation().Loading,kwargs={},timer=.2):\n self.timer = timer\n self.Animation = Animation\n self.kwargs = kwargs\n\n def kill(self):\n self._kill = True\n self.THREAD_ANIM.join()\n\n def set_end(self,text):\n self.END = Color().reader(str(text))\n\n def set_kwargs(self,**kwargs):\n self.kwargs = kwargs\n\n def _anim(self):\n size = 0\n while True:\n _Animation = self.Animation(**self.kwargs)\n for text in _Animation:\n text = Color().reader(text+'[$NORMAL]')\n if self._kill:break\n sys.stdout.write('\\r'+text)\n size = len(Color().del_colors(text))\n time.sleep(self.timer)\n if self._kill:\n print('\\r' + (' ' *size),end='\\r')\n try:\n print('\\r' + self.END)\n except AttributeError:\n pass\n break\n\n def start_loop(self):\n self._kill = False\n self.THREAD_ANIM = threading.Thread(target=self._anim)\n self.THREAD_ANIM.daemon = True\n self.THREAD_ANIM.start()\n\n def __call__(self, func, *args, **kwargs):\n def wrapper(*args, **kwargs):\n self.start_loop()\n rv = func(self,*args, **kwargs)\n self._kill = True\n self.THREAD_ANIM.join()\n return rv\n return wrapper","repo_name":"MohamedAl-Kainai/N4Tools","sub_path":"N4Tools/Design.py","file_name":"Design.py","file_ext":"py","file_size_in_byte":18203,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"} +{"seq_id":"30182029801","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\n# from tempest.lib.services.network import base\nfrom itempest.services import base_neutron as base\n\n\n# refer to neutron CLI and {neutron-repo}/neutron/extensions/tag.py\nclass TagsClient(base.BaseNetworkClient):\n resource_base_path = '/{resource_type}/{resource_id}/tags'\n resource_object_path = '/{resource_type}/{resource_id}/tags/{tag}'\n\n def add_tag(self, **kwargs):\n \"\"\"Add tag is an update-operation, not create resource.\n \"\"\"\n # neutron tag-add --resource resource\n # --resource-type network --tag TAG\n uri = self.resource_object_path.format(\n **self._fix_args(**kwargs))\n # https://bugs.launchpad.net/neutron/+bug/1606659\n return self.update_resource(uri, None, 201)\n\n def remove_tag(self, **kwargs):\n # neutron tag-remove --resource resource\n # --resource-type network --tag TAG\n if 'all' in kwargs:\n return self.remove_all_tags(**kwargs)\n uri = self.resource_object_path.format(\n **self._fix_args(**kwargs))\n return self.delete_resource(uri)\n\n def remove_all_tags(self, **kwargs):\n # neutron tag-remove --resource resource\n # --resource-type network --all\n uri = self.resource_base_path.format(\n **self._fix_args(**kwargs))\n return self.delete_resource(uri)\n\n def replace_tag(self, **kwargs):\n # neutron tag-replace --resource resource\n # --resource-type network --tag TAG\n tag_list = kwargs.pop('tags', None)\n kwargs = self._fix_args(**kwargs)\n if 'tag' in kwargs:\n uri = self.resource_object_path.format(**kwargs)\n else:\n uri = self.resource_base_path.format(**kwargs)\n update_body = None if tag_list is None else {\"tags\": tag_list}\n return self.update_resource(uri, update_body)\n\n # RESOURCE can be a name.\n # To simplify the design we will only take resource-ID only\n def _fix_args(self, **kwargs):\n if 'resource' in kwargs and 'resource_id' not in kwargs:\n kwargs['resource_id'] = kwargs['resource']\n if 'resource_type' in kwargs:\n if kwargs['resource_type'][-1] != 's':\n kwargs['resource_type'] += \"s\"\n else:\n kwargs['resource_type'] = 'networks'\n return kwargs\n\n\ndef get_client(client_mgr,\n set_property=False, with_name=\"tags_client\"):\n \"\"\"create a tags client from manager or networks_client\n\n For itempest user:\n from itempest import load_our_solar_system as osn\n from vmware_nsx_tempest.services import tags_client\n client = tags_client.get_client(osn.adm.manager)\n For tempest user:\n client = tags_client.get_client(osn.adm)\n \"\"\"\n manager = getattr(client_mgr, 'manager', client_mgr)\n net_client = getattr(manager, 'networks_client')\n try:\n _params = manager.default_params_with_timeout_values.copy()\n except Exception:\n _params = {}\n client = TagsClient(net_client.auth_provider,\n net_client.service,\n net_client.region,\n net_client.endpoint_type,\n **_params)\n if set_property:\n setattr(manager, with_name, client)\n return client\n","repo_name":"gravity-tak/interactive-tempest","sub_path":"itempest/services/tags_client.py","file_name":"tags_client.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12859182941","text":"from __future__ import annotations\n\nfrom rka.components.cleanup import cleanup_manager\nfrom rka.components.concurrency.rkathread import RKAThread\nfrom rka.components.events.event_system import EventSystem\nfrom rka.components.impl.factories import HotkeyServiceFactory\nfrom rka.components.ui.hotkeys import IHotkeyFilter, IHotkeyService, HotkeyEventPumpType\nfrom rka.components.ui.overlay import Severity\nfrom rka.eq2.configs.shared.hosts import is_slave_window, is_master_window\nfrom rka.eq2.configs.shared.rka_constants import KEY_REPEAT\nfrom rka.eq2.master import IRuntime\nfrom rka.eq2.master.control import IHotkeySpec, logger\nfrom rka.eq2.master.control.hotkey_bus_events import HotkeyEvents\nfrom rka.eq2.master.ui import PermanentUIEvents\nfrom rka.eq2.master.ui.control_menu_ui import ControlMenuUIType\nfrom rka.eq2.master.ui.debug_helpers import print_mouse_info\n\n\nclass EmptyHotkeySpec(IHotkeySpec):\n def get_spec_count(self) -> int:\n return 1\n\n def register_keys(self, runtime: IRuntime, spec_id: int, keyfilter: IHotkeyFilter) -> str:\n return 'None'\n\n\nclass KeySpecManager:\n def __register_permanent_control_keys(self, keyfilter: IHotkeyFilter):\n # master control\n keyfilter.add_keys('consume alt control q', self.__server_cleanup_start)\n keyfilter.add_keys(['consume alt oem_4', 'consume alt f'], self.resume)\n keyfilter.add_keys('consume alt oem_6', lambda: self.pause(True))\n keyfilter.add_keys('consume alt oem_5', self.cycle_hotkeys)\n keyfilter.add_keys('escape', self.__runtime.control_menu.cancel_script_start)\n # panic stop\n keyfilter.add_keys(['consume control escape', 'consume alt escape', 'consume shift escape',\n 'consume control alt escape', 'consume control shift escape', 'consume alt shift escape',\n 'consume control alt shift escape'], lambda: self.__runtime.control_menu.stop_scripts())\n # menus\n keyfilter.add_keys('consume F11', lambda: self.__runtime.control_menu.select_menu(ControlMenuUIType.OVERLAY))\n keyfilter.add_keys('consume control F11', lambda: self.__runtime.control_menu.select_flag(ControlMenuUIType.OVERLAY))\n keyfilter.add_keys('consume F12', lambda: self.__runtime.control_menu.select_script(ControlMenuUIType.OVERLAY))\n keyfilter.add_keys('consume control F12', lambda: self.__runtime.control_menu.control_scripts(ControlMenuUIType.OVERLAY))\n # selecting character for script use\n keyfilter.add_keys(f'consume control oem_4', lambda: self.cycle_selection_id(-1))\n keyfilter.add_keys(f'consume control oem_6', lambda: self.cycle_selection_id(1))\n # utility\n keyfilter.add_keys('consume shift alt control k', self.__runtime.request_ctrl.request_toggle_keep_clicking)\n keyfilter.add_keys('consume shift alt control m', print_mouse_info)\n keyfilter.add_keys('consume shift alt control l', self.__runtime.zonemaps.save_location)\n\n def __register_running_control_keys(self, keyfilter: IHotkeyFilter):\n # automatic clicking\n keyfilter.add_keys('repeat 1', lambda: self.__runtime.automation.autocombat.sustain_clicking())\n # utility\n keyfilter.add_keys('control space', self.__runtime.overlay_controller.start_timer)\n # pausing\n keyfilter.add_keys(['r', 'oem_2', 'return'], lambda: self.pause(False))\n\n # noinspection PyMethodMayBeStatic\n def __register_programmable_event_hotkeys(self, keyfilter: IHotkeyFilter):\n bus = EventSystem.get_main_bus()\n keyfilter.add_keys('consume F1', lambda: bus.post(HotkeyEvents.FUNCTION_KEY(function_num=1)))\n keyfilter.add_keys('consume F2', lambda: bus.post(HotkeyEvents.FUNCTION_KEY(function_num=2)))\n keyfilter.add_keys('consume F3', lambda: bus.post(HotkeyEvents.FUNCTION_KEY(function_num=3)))\n keyfilter.add_keys('consume F4', lambda: bus.post(HotkeyEvents.FUNCTION_KEY(function_num=4)))\n keyfilter.add_keys('consume F5', lambda: bus.post(HotkeyEvents.FUNCTION_KEY(function_num=5)))\n keyfilter.add_keys('consume F6', lambda: bus.post(HotkeyEvents.FUNCTION_KEY(function_num=6)))\n\n # noinspection PyMethodMayBeStatic\n def __register_blocked_keys_in_remote(self, keyfilter: IHotkeyFilter):\n keyfilter.add_keys('consume alt F4', lambda: True)\n\n # noinspection PyMethodMayBeStatic\n def __register_window_switching_keys_in_remote(self, keyfilter: IHotkeyFilter):\n local_player = self.__runtime.playerselectors.local_online()\n keyfilter.add_keys('consume alt tab', lambda: self.__runtime.master_bridge.send_switch_to_client_window(local_player.resolve_first_player()))\n\n @staticmethod\n def __new_nonslave_window_filter() -> IHotkeyFilter:\n new_filter = HotkeyServiceFactory.create_filter()\n new_filter.set_window_name_filter(lambda win_title: not win_title or not is_slave_window(win_title))\n new_filter.set_repetition_delay(KEY_REPEAT)\n return new_filter\n\n @staticmethod\n def __new_slave_window_filter() -> IHotkeyFilter:\n new_filter = HotkeyServiceFactory.create_filter()\n new_filter.set_window_name_filter(is_slave_window)\n new_filter.set_repetition_delay(KEY_REPEAT)\n return new_filter\n\n @staticmethod\n def __new_master_or_slave_window_filter() -> IHotkeyFilter:\n new_filter = HotkeyServiceFactory.create_filter()\n new_filter.set_description('Window switching hotkeys')\n new_filter.set_window_name_filter(lambda win_title: is_slave_window(win_title) or is_master_window(win_title))\n new_filter.set_repetition_delay(KEY_REPEAT)\n return new_filter\n\n def __new_running_filters(self) -> [IHotkeyFilter]:\n nonslave_filter = KeySpecManager.__new_nonslave_window_filter()\n nonslave_filter.set_description('Playing state hotkeys')\n self.__register_permanent_control_keys(nonslave_filter)\n self.__register_running_control_keys(nonslave_filter)\n gameonly_filter = KeySpecManager.__new_master_or_slave_window_filter()\n self.__register_programmable_event_hotkeys(gameonly_filter)\n slaveonly_filter = KeySpecManager.__new_slave_window_filter()\n self.__register_blocked_keys_in_remote(slaveonly_filter)\n self.__register_window_switching_keys_in_remote(slaveonly_filter)\n return [nonslave_filter, gameonly_filter, slaveonly_filter]\n\n def __new_paused_filters(self) -> [IHotkeyFilter]:\n nonslave_filter = KeySpecManager.__new_nonslave_window_filter()\n nonslave_filter.set_description('Paused state hotkeys')\n self.__register_permanent_control_keys(nonslave_filter)\n slaveonly_filter = KeySpecManager.__new_slave_window_filter()\n self.__register_blocked_keys_in_remote(slaveonly_filter)\n self.__register_window_switching_keys_in_remote(slaveonly_filter)\n return [nonslave_filter, slaveonly_filter]\n\n def __init__(self, runtime: IRuntime):\n self.__runtime = runtime\n self.__hotkey_spec: IHotkeySpec = EmptyHotkeySpec()\n self.__current_spec_id = 0\n self.__current_spec_name = ''\n self.hotkey_service = HotkeyServiceFactory.create_service(service_type=HotkeyEventPumpType.SERVICE_TYPE_CURRENT_THREAD_PUMP)\n # this key filter is added when game hotkeys are active\n self.running_keyfilters = self.__new_running_filters()\n # this key filter is always active\n self.paused_keyfilters = self.__new_paused_filters()\n\n def __server_cleanup(self):\n logger.info('cleanup start')\n self.__runtime.close()\n cleanup_manager.close_all()\n\n def __server_cleanup_start(self):\n close_thread = RKAThread('Cleanup thread', target=self.__server_cleanup)\n # remove from resource list to prevent cleanup manager from waiting for it\n close_thread.close_resource()\n close_thread.start()\n\n def __update_status_info(self):\n status_info = f'Hotkeys: {self.__current_spec_name} {\"PAUSED\" if self.__runtime.processor.is_paused() else \"ON\"}'\n self.__runtime.overlay.log_event(status_info, Severity.Critical, event_id=PermanentUIEvents.HOTKEYS.str())\n\n def pause(self, clear_processor: bool):\n logger.debug(f'pausing hotkeys {clear_processor}')\n self.hotkey_service.clear_filters()\n self.hotkey_service.add_filters(self.paused_keyfilters)\n if self.__runtime.processor.pause():\n if self.__runtime.combatstate.is_combat():\n self.__runtime.tts.say('keys off')\n elif clear_processor:\n self.__runtime.processor.clear_processor()\n self.__runtime.overlay.log_event('Processor pruned', Severity.Normal)\n self.__update_status_info()\n\n def resume(self):\n logger.debug(f'resuming hotkeys')\n self.hotkey_service.clear_filters()\n self.hotkey_service.add_filters(self.running_keyfilters)\n if self.__runtime.processor.resume():\n if self.__runtime.combatstate.is_combat():\n self.__runtime.tts.say('keys on')\n self.__update_status_info()\n\n def set_hotkey_spec(self, hotkey_spec: IHotkeySpec):\n logger.debug(f'setting hotkey spec {hotkey_spec}')\n self.__hotkey_spec = hotkey_spec\n self.__current_spec_id = None\n self.cycle_hotkeys(None, self.hotkey_service)\n\n def unset_hotkey_spec(self):\n self.set_hotkey_spec(EmptyHotkeySpec())\n\n def cycle_hotkeys(self, _, hotkey_service: IHotkeyService):\n # get next keyspec ID\n if self.__current_spec_id is None:\n self.__current_spec_id = 0\n else:\n self.__current_spec_id = (self.__current_spec_id + 1) % (self.__hotkey_spec.get_spec_count() + 1)\n # get keyspec name and keys\n self.running_keyfilters = self.__new_running_filters()\n if self.__current_spec_id < self.__hotkey_spec.get_spec_count():\n new_keyspec_filter = KeySpecManager.__new_nonslave_window_filter()\n self.__current_spec_name = self.__hotkey_spec.register_keys(self.__runtime, self.__current_spec_id, new_keyspec_filter)\n self.running_keyfilters.append(new_keyspec_filter)\n else:\n self.__current_spec_name = 'Control & scripts only'\n # store new hotkey filters\n logger.info(f'rotating hotkey specification to {self.__current_spec_name}')\n # apply new hotkey filters\n hotkey_service.clear_filters()\n if self.__runtime.processor.is_paused():\n hotkey_service.add_filters(self.paused_keyfilters)\n else:\n hotkey_service.add_filters(self.running_keyfilters)\n self.__update_status_info()\n\n def cycle_selection_id(self, increment: int):\n current_selection_id = self.__runtime.overlay.get_selection_id()\n max_selection_id = self.__runtime.overlay.get_max_selection_id()\n new_selection_id = (current_selection_id + increment) % max_selection_id\n self.__runtime.overlay.set_selection_id(new_selection_id)\n\n def run_blocking(self):\n self.hotkey_service.start(self.running_keyfilters)\n","repo_name":"npstash/public_rka","sub_path":"rka/eq2/master/control/keyspecmgr.py","file_name":"keyspecmgr.py","file_ext":"py","file_size_in_byte":11132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19355406653","text":"from ..types import *\nfrom ..models.bert import *\nfrom ..utils.training import Trainer\nfrom ..utils.loss import BCEWithLogitsIgnore, BCEWithLogitsLoss\nfrom ..preprocessing import read_labeled, extract_class_weights\n\nfrom torch import manual_seed, save, load\nfrom torch.optim import AdamW \n\nfrom warnings import filterwarnings\nimport sys \nimport os \n\nSAVE_PREFIX = '/data/s3913171/nlp4ifchallenge/checkpoints'\nmanual_seed(0)\nfilterwarnings('ignore')\n\n\ndef sprint(s: str):\n print(s)\n sys.stdout.flush()\n\n\ndef main(name: str,\n train_path: str,\n dev_path: str,\n test_path: str,\n device: str,\n batch_size: int,\n early_stopping: Maybe[int],\n num_epochs: int,\n save_path: str,\n print_log: bool,\n with_class_weights: bool,\n ignore_nan: bool):\n data_tag = train_path.split('data')[1].split('/')[1]\n save_path = '/'.join([save_path, '-'.join([name, data_tag])])\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n save_path = '/'.join([save_path, 'model.p'])\n\n model = make_model(name, ignore_nan).to(device)\n\n train_ds, dev_ds = read_labeled(train_path), read_labeled(dev_path)\n train_dl = DataLoader(model.tensorize_labeled(train_ds), batch_size=batch_size,\n collate_fn=lambda b: collate_tuples(b, model.tokenizer.pad_token_id, device), shuffle=True)\n dev_dl = DataLoader(model.tensorize_labeled(dev_ds), batch_size=batch_size,\n collate_fn=lambda b: collate_tuples(b, model.tokenizer.pad_token_id, device), shuffle=False)\n\n # if provided test path\n if test_path != '':\n test_ds = read_labeled(test_path)\n test_dl = DataLoader(model.tensorize_labeled(test_ds), batch_size=batch_size,\n collate_fn=lambda b: collate_tuples(b, model.tokenizer.pad_token_id, device), shuffle=False)\n\n class_weights = tensor(extract_class_weights(train_path), dtype=floatt, device=device)\n criterion = BCEWithLogitsLoss(pos_weight=class_weights) if with_class_weights else BCEWithLogitsIgnore(ignore_index=-1)\n optimizer = AdamW(model.parameters(), lr=3e-05, weight_decay=1e-02)\n\n trainer = Trainer(model, (train_dl, dev_dl), optimizer, criterion, target_metric='mean_f1', early_stopping=early_stopping, print_log=print_log)\n\n best = trainer.iterate(num_epochs, with_save=save_path, with_test=test_dl if test_path != '' else None)\n sprint(f'{name}: {best}')\n if test_path != '':\n sprint(f'\\nbest test -- {trainer.logs[\"test\"][-1]}')\n \n # load best saved model and re-save with faiths \n faiths = array([c['f1'] for c in best['column_wise']])\n save({'faiths': faiths, 'model_state_dict': load(save_path)}, save_path)\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', help='name of the BERT model to load', type=str)\n parser.add_argument('-tr', '--train_path', help='path to the training data tsv', type=str, default='./data/english/covid19_disinfo_binary_english_train.tsv')\n parser.add_argument('-dev', '--dev_path', help='path to the development data tsv', type=str, default='./data/english/covid19_disinfo_binary_english_dev_input.tsv')\n parser.add_argument('-tst', '--test_path', help='path to the testing data tsv', type=str, default='')\n parser.add_argument('-d', '--device', help='cpu or cuda', type=str, default='cuda')\n parser.add_argument('-bs', '--batch_size', help='batch size to use for training', type=int, default=16)\n parser.add_argument('-e', '--num_epochs', help='how many epochs of training', type=int, default=20)\n parser.add_argument('-early', '--early_stopping', help='early stopping patience (default no)', type=int, default=0)\n parser.add_argument('-s', '--save_path', help='where to save best model', type=str, default=SAVE_PREFIX)\n parser.add_argument('--print_log', action='store_true', help='print training logs', default=False)\n parser.add_argument('--with_class_weights', action='store_true', help='compute class weights for loss penalization', default=False)\n parser.add_argument('--ignore_nan', action='store_true', help='set True to ignore (not penalize) nan labels', default=False)\n\n kwargs = vars(parser.parse_args())\n main(**kwargs)","repo_name":"gtziafas/nlp4ifchallenge","sub_path":"nlp4ifchallenge/scripts/train_bert.py","file_name":"train_bert.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"34820184296","text":"import numpy as np\nimport random\n\n#\n# a = np.array([[[1, 2], [3, 4]], [[1, 1], [1, 1]]])\n# b = np.array([[[5, 6], [7, 8]], [[1, 1], [1, 1]]])\n# c = np.concatenate([a, b], axis=1)\n#\n#\n# print(a)\n# print(b)\n# print(c)\n# print(c.shape)\n\n#\n# print(np.linspace(1, 70, 16, dtype=int))\n# index = np.linspace(1, 70, 16, dtype=int)\n# for i in range(1,71):\n# if i in index :\n# print(i)\n\n\n# print( (2 == 2))\n#\n# test = np.array([0,1,2,3,4,5,6,7,8,9,10,11], dtype=float)\n# test = (test/4).astype(int)\n#\n#\n# print(test)\n\n\n# view_list = []\n# for i in range(5):\n# view_list.extend(range(12))\n# print(view_list)\n#\n# factor = 2\n# for i in range(factor):\n# view_list.extend(view_list)\n# print(view_list)\n# import os\n# os.makedirs(\"./abc/111/222/333\")\n\n\n# view_list = []\n# for i in range(2):\n# view_list.extend([i] * 12)\n# print(view_list)\n#\n# factor = 2\n# for i in range(factor):\n# view_list.extend(view_list)\n# print(view_list)\n# print(len(view_list))\n\n\n# classes_to_generate = np.array([0, 1, 2, 3], dtype=int)\n# print(classes_to_generate)\n# classes_to_generate += 1\n# print(classes_to_generate % 4)\n\n\n\n\n\n\nnum_frames = 16\nchosen_frames = list(np.linspace(1, num_frames, 16, dtype=np.int))\nprint(chosen_frames)\n\n\ninterval = int((num_frames-16)/(15*2))\nprint(\"interval = \", interval)\nif (interval > 0):\n for i in range(16):\n if (i == 0 or i == 15): continue\n chosen_frames[i] += random.randint(-interval, interval)\n\n print(\"chosen_frames (update) = \", chosen_frames)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"dungbachviet/ViMoCoGAN-version1","sub_path":"src/testFile.py","file_name":"testFile.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5482492926","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\n#########################################################################\n#Ibrahim's part\nclass Segment:\n\tdef __init__(self, process_name = \"NULL\", name = \"UNK\", size = 0):\n\t\tself.process_name = process_name\n\t\tself.name = name\n\t\tself.size = size\n\nclass free_hole:\n\tdef __init__(self, base = 0, size = 0 ):\n\t\tself.size = size\n\t\tself.base = base\n\ndef edit_fh(memo, fh):\n\ttemp = 0\n\tcount = 0\n\tflag = 0 \n\tfh.clear()\n\tfor i in range(len(memo)):\n\t\tif memo[i].process_name == \"free\":\n\t\t\tcount += 1\n\t\t\ttemp = i\n\t\t\tflag = 1\n\t\telif flag == 1:\n\t\t\tfh.append(free_hole(temp - count + 1, count))\n\t\t\tcount = 0\n\t\t\tflag = 0 \n\tif flag == 1:\n\t\tfh.append(free_hole(temp - count + 1, count))\n\ndef Allocate(s, algorithm, memo, fh):\n\ttemp = 0 \n\tflag = 0\n\tfor i in range(len(fh)):\n\t\tif fh[i].size >= s.size:\n\t\t\tif algorithm == \"FF\":\n\t\t\t\tfor j in range(s.size):\n\t\t\t\t\tmemo[j + fh[i].base] = Segment(s.process_name, s.name, size = 1)\n\t\t\t\tedit_fh(memo, fh)\n\t\t\t\treturn True\n\t\t\telif algorithm == \"BF\":\n\t\t\t\tif flag == 0:\n\t\t\t\t\ttemp = i\n\t\t\t\t\tflag = 1\n\t\t\t\telif fh[i].size < fh[temp].size:\n\t\t\t\t\ttemp = i\n\tif flag == 0:\n\t\treturn False\n\telse:\n\t\tfor j in range(s.size):\n\t\t\tmemo[j + fh[temp].base] = Segment(s.process_name, s.name, size = 1)\n\t\tedit_fh(memo, fh)\n\t\treturn True\n\ndef hole(b, s, memo, fh):\n\tfor j in range(s):\n\t\tmemo[j + b] = Segment(\"free\", \"hole\", 1)\n\tedit_fh(memo, fh)\n\ndef deallocate(index, memo, fh):\n\tif memo[index].process_name == \"NULL\":\n\t\ti = index\n\t\twhile ((i < len(memo)) and (memo[i].process_name == \"NULL\")):\n\t\t\tmemo[i] = Segment(\"free\", \"hole\", 1)\n\t\t\ti += 1\n\t\ti = index - 1\n\t\twhile ((i >= 0) and (memo[i].process_name == \"NULL\")):\n\t\t\tmemo[i] = Segment(\"free\", \"hole\", 1)\n\t\t\ti -= 1\n\t\tedit_fh(memo, fh)\n\telse:\n\t\ttemp = memo[index].process_name\n\t\tfor i in range(len(memo)):\n\t\t\tif (memo[i].process_name == temp):\n\t\t\t\tmemo[i] = Segment(\"free\", \"hole\", 1)\n\t\tedit_fh(memo, fh)\n\ndef prints(k):\n\tfor i in range(len(k)):\n\t\tprint(k[i].process_name + \"\\t\" + k[i].name + \"\\n\")\n\ndef printh(k):\n\tfor i in range(len(k)):\n\t\tprint(\"base: \" + str(k[i].base) + \", \" + \"size: \" + str(k[i].size))\n\ndef organize_memory(memory):\n\ttemp_pn = memory[0].process_name\n\ttemp_n = memory[0].name\n\tseg_size = 1\n\tnew_memory = []\n\tfor i in range(1, len(memory)):\n\t\tif memory[i].process_name == temp_pn and memory[i].name == temp_n:\n\t\t\tseg_size += 1\n\t\telse:\n\t\t\tnew_memory.append(Segment(temp_pn, temp_n, seg_size))\n\t\t\ttemp_pn = memory[i].process_name\n\t\t\ttemp_n = memory[i].name\n\t\t\tseg_size = 1\n\tnew_memory.append(Segment(temp_pn, temp_n, seg_size))\n\treturn new_memory\n###########################################################################\ndef draw_memory(memory, canvas):\n\tcanvas.delete(ALL)\n\tglobal y_offset\n\tglobal x_offset\n\tglobal mag_factor\n\tcanvas.create_text(2, 0, text = \"Memory Layout\", font = \"Helvetica 22 bold\", anchor = \"nw\" )\n\tmemory_size = 0\n\tfor i in range (len(memory)):\n\t\tmemory_size += memory[i].size \n\ty_offset = 35\n\tx_offset = 50\n\tmag_factor = (canvas_height - y_offset)/memory_size\n\tacc_size = 0\n\tcanvas.create_text(25, (y_offset - 1), text = str(\"0\"), font = \"Times 11\")\n\tfor i in range(len(memory)):\n\t\tcanvas.create_text(25, (y_offset - 3 + mag_factor*(acc_size + memory[i].size)), text = str(acc_size + memory[i].size), font = \"Times 11\t\")\n\t\tif (memory[i].name != \"UNK\" and memory[i].name != \"hole\"):\n\t\t\tcanvas.create_rectangle(x_offset, (y_offset + mag_factor*acc_size), canvas_width, (y_offset + mag_factor*(acc_size + memory[i].size)))\n\t\t\tcanvas.create_text(0.5*(canvas_width + x_offset), (y_offset + mag_factor*(acc_size + 0.5*memory[i].size)), text = \"Process: \" + memory[i].process_name + \", Segment: \" + memory[i].name, font = \"Times 12 bold\")\n\t\t\t# canvas.create_text(0.5*canvas_width + x_offset, (y_offset + mag_factor*(acc_size + 0.5*memory[i].size) + 7), text = \"Segment: \" + memory[i].name, font = \"Times 12 bold\")\n\t\telif (memory[i].name == \"UNK\"):\n\t\t\tcanvas.create_rectangle(x_offset, (y_offset + mag_factor*acc_size), canvas_width, (y_offset + mag_factor*(acc_size + memory[i].size)), fill = \"red\")\n\t\t\tcanvas.create_line(x_offset, (y_offset + mag_factor*acc_size), canvas_width, (y_offset + mag_factor*(acc_size + memory[i].size)), fill = \"blue\")\n\t\t\tcanvas.create_line(canvas_width, (y_offset + mag_factor*acc_size), x_offset, (y_offset + mag_factor*(acc_size + memory[i].size)), fill = \"blue\")\n\t\telif (memory[i].name == \"hole\"):\n\t\t\tcanvas.create_rectangle(x_offset, (y_offset + mag_factor*acc_size), canvas_width, (y_offset + mag_factor*(acc_size + memory[i].size)), fill = \"green\")\n\t\tacc_size += memory[i].size\n\ndef draw(): \n\tmemory.clear()\n\tif mem_size.isdigit():\n\t\tfor i in range(int(mem_size)):\n\t\t\tmemory.append(Segment(size = 1))\n\t\tdraw_memory(organize_memory(memory), canvas)\n\telse:\n\t\tmessagebox.showerror(title = \"Error\", message = \"Memory Size MUST BE AN INTEGER!\\nابعدوا عننا بقى سودتوا عيشتنا \")\n\ndef create_hole():\n\tif (not mem_size.isdigit()):\n\t\tmessagebox.showerror(title = \"Error\", message = \"Enter Memory Size First!\")\n\telif hole_address.isdigit() and hole_size.isdigit():\n\t\tif (int(hole_address) + int(hole_size)) > int (mem_size):\n\t\t\tmessagebox.showerror(title = \"Error\", message = \"Out of Range!\\nاحنا هنهرج !\")\n\t\telse:\n\t\t\thole(int(hole_address), int(hole_size), memory, fh)\n\t\t\tdraw_memory(organize_memory(memory), canvas)\n\t\t\thole_address_entry.delete(0, END)\n\t\t\thole_size_entry.delete(0, END)\n\telse:\n\t\tmessagebox.showerror(title = \"Error\", message = \"Hole Starting Address and Hole Size Must Be Integers!\\nابعدوا عننا بقى سودتوا عيشتنا \")\n\ndef allocate_segment(algorithm):\n\tif (not mem_size.isdigit()):\n\t\tmessagebox.showerror(title = \"Error\", message = \"Enter Memory Size First!\")\n\telse:\n\t\tif (not segment_size.isdigit()):\n\t\t\tmessagebox.showerror(title = \"Error\", message = \"Segment Size Must Be an Integer!\\nهو حضرتك بتعمل ايه ؟\")\n\t\telse:\n\t\t\tdone = Allocate(Segment(process_text, segment_text, int(segment_size)), algorithm, memory, fh)\n\t\t\tif not done:\n\t\t\t\tmessagebox.showerror(\"Error\", \"Segment Can't Fit!\")\n\t\t\telse:\n\t\t\t\tdraw_memory(organize_memory(memory), canvas)\n\t\t\t\tsegment_name_entry.delete(0, END)\n\t\t\t\tsegment_size_entry.delete(0, END)\n\ndef deallocate_segment(event):\n\tif event.x > x_offset and event.y > y_offset:\n\t\ty_true = (event.y - y_offset)/mag_factor\n\t\tdeallocate(int(y_true), memory, fh)\n\t\tdraw_memory(organize_memory(memory), canvas)\n\t#print (\"x = \" + str(event.x) + \", y = \" + str(event.y))\n\ndef show_seg_table():\n\tdict_memory = organize_memory(memory)\n\tf = 0\n\tfor i in range (len(dict_memory)):\n\t\tif dict_memory[i].process_name == process_seg:\n\t\t\tf = 1\n\t\t\tbreak\n\tif f == 0:\n\t\tmessagebox.showerror(\"Error\", \"Process Doesn't Exist!\\nهو حضرتك بتعمل ايه ؟\")\n\telse:\n\t\tseg_window = Toplevel(root)\n\t\tLabel(seg_window, text = \"Segment Table for \" + process_seg, font = \"Times 20\").pack()\n\t\tseg_table = ttk.Treeview(seg_window)\n\t\tseg_table[\"columns\"] = (\"one\", \"two\")\n\t\tseg_table.column(\"#0\", width = 100, minwidth = 80, stretch = \"no\")\n\t\tseg_table.column(\"one\", width = 100, minwidth = 80, stretch = \"no\")\n\t\tseg_table.column(\"two\", width = 100, minwidth = 80, stretch = \"no\")\n\t\tseg_table.heading(\"#0\", text = \"Segment\", anchor = \"w\")\n\t\tseg_table.heading(\"one\", text = \"Starting Address\", anchor = \"w\")\n\t\tseg_table.heading(\"two\", text = \"Size\", anchor = \"w\")\n\t\tacc_size = 0\n\t\tfor i in range (len(dict_memory)):\n\t\t\tif dict_memory[i].process_name == process_seg:\n\t\t\t\tseg_table.insert(\"\", \"end\", text = dict_memory[i].name, values = (str(acc_size), str(dict_memory[i].size)))\n\t\t\tacc_size += dict_memory[i].size\n\t\tseg_table.pack()\n\ndef pretext(event, entry):\n\tglobal mem_size\n\tglobal hole_address\n\tglobal hole_size\n\tglobal process_text\n\tglobal segment_text\n\tglobal segment_size\n\tglobal process_seg\n\n\tif (event == ''):\n\t\t# print(\"I've entered\")\n\t\tif entry.get() == \"Memory Size\" or entry.get() == \"Hole Starting Address\" or entry.get() == \"Hole Size\" or entry.get() == \"Process Name\" or entry.get() == \"Segment Size\" or entry.get() == \"Segment Name\" or entry.get() == \"Process to Get Table\":\n\t\t\tentry.delete(0, END)\n\telif (event == ''):\n\t\t# print(\"I've left\")\n\t\tif entry is mem_size_entry:\n\t\t\tmem_size = entry.get()\n\t\telif entry is hole_address_entry:\n\t\t\thole_address = entry.get()\n\t\telif entry is hole_size_entry:\n\t\t\thole_size = entry.get()\n\t\telif entry is process_name_entry:\n\t\t\tprocess_text = entry.get()\n\t\telif entry is segment_name_entry:\n\t\t\tsegment_text = entry.get()\n\t\telif entry is segment_size_entry:\n\t\t\tsegment_size = entry.get()\n\t\telif entry is segment_table_entry:\n\t\t\tprocess_seg = entry.get()\n\n\t\tif entry.get() == \"\" and entry is mem_size_entry:\n\t\t\tentry.insert(0, \"Memory Size\")\n\t\telif entry.get() == \"\" and entry is hole_address_entry:\n\t\t\tentry.insert(0, \"Hole Starting Address\")\n\t\telif entry.get() == \"\" and entry is hole_size_entry:\n\t\t\tentry.insert(0, \"Hole Size\")\n\t\telif entry.get() == \"\" and entry is process_name_entry:\n\t\t\tentry.insert(0, \"Process Name\")\n\t\telif entry.get() == \"\" and entry is segment_name_entry:\n\t\t\tentry.insert(0, \"Segment Name\")\n\t\telif entry.get() == \"\" and entry is segment_size_entry:\n\t\t\tentry.insert(0, \"Segment Size\")\n\t\telif entry.get() == \"\" and entry is segment_table_entry:\n\t\t\tentry.insert(0, \"Process to Get Table\")\n\ndef create_label(text = \"\", y = 0, x = 0):\n\tlabel = Label(root, text = text, anchor = \"nw\")\n\tlabel.place(y = y, x = x)\n\treturn label\n\ndef create_entry(text = \"\", y = 0, x = 0):\n\tEnt = Entry(root)\n\tEnt.insert(0, text)\n\tEnt.bind(sequence = '', func = lambda entry: pretext('', Ent))\n\tEnt.bind(sequence = '', func = lambda entry: pretext('', Ent))\n\tEnt.place(y = y, x = x)\n\treturn Ent\n\ndef create_button(text = \"\", y = 0, x = 0, command = None):\n\tbtn = Button(root, text = text, command = command)\n\tbtn.place(y = y, x = x, width = 125)\n\treturn btn\n###########################################################################\nroot = Tk()\nroot.title(\"Memory Mangement Project\")\nroot.geometry(\"1005x705\")\n\nmemory = []\nfh = []\n\nmem_size = \"\"\nhole_address = \"\"\nhole_size = \"\"\nprocess_text = \"\"\nsegment_text = \"\"\nsegment_size = \"\"\nprocess_seg = \"\"\n\nradio_choice = StringVar()\nradio_choice.set(\"FF\")\n\ncanvas_width = 400\ncanvas_height = 700\ncanvas = Canvas(root, height = canvas_height, width = canvas_width)\n\ncanvas.bind(sequence = \"\", func = deallocate_segment)\ncanvas.create_text(2, 0, text = \"Memory Layout\", font = \"Helvetica 22 bold\", anchor = \"nw\")\ncanvas.place(y = 0, x = 1000 - canvas_width)\n\nclose_btn = create_button(\"Close\", 120, 290, root.destroy)\n\nmem_size_label = create_label(\"Memory Size:\", 1, 0)\n\nmem_size_entry = create_entry(\"Memory Size\", 3, 125)\n\nmem_size_btn = create_button(\"Enter\", 0, 290, draw)\n\nhole_address_label = create_label(\"Hole Starting Address:\", 31, 0)\n\nhole_address_entry = create_entry(\"Hole Starting Address\", 33, 125)\n\nhole_size_label = create_label(\"Hole Size:\", 61, 0)\n\nhole_size_entry = create_entry(\"Hole Size\", 63, 125)\n\nhole_btn = create_button(\"Enter Hole\", 30, 290, create_hole)\n\nprocess_name_label = create_label(\"Process Name:\", 91, 0)\n\nprocess_name_entry = create_entry(\"Process Name\", 93, 125)\n\nprocess_btn = create_button(\"Enter Segment\", 87, 290, command = lambda: allocate_segment(radio_choice.get()))\n\nsegment_name_label = create_label(\"Segment Name:\", 121, 0)\n\nsegment_name_entry = create_entry(\"Segment Name\", 123, 125)\n\nsegment_size_label = create_label(\"Segment Size:\", 151, 0)\n\nsegment_size_entry = create_entry(\"Segment Size\", 153, 125)\n\nRadiobutton(root, text = \"First Fit\", variable = radio_choice, value = \"FF\").place(y = 183, x = 0)\nRadiobutton(root, text = \"Best Fit\", variable = radio_choice, value = \"BF\").place(y = 213, x = 0)\n\nsegment_table_label = create_label(\"Segment Table:\", 243, 0)\n\nsegment_table_entry = create_entry(\"Process to Get Table\", 245, 125)\n\nsegment_table_btn = create_button(\"Show Segment Table\", 239, 290, show_seg_table)\n\nroot.mainloop()","repo_name":"omar-ashinawy/MemoryMangementUnit","sub_path":"MMU.py","file_name":"MMU.py","file_ext":"py","file_size_in_byte":11926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16351819828","text":"from alipay import AliPay\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom meiduo_mall.utils.response_code import RETCODE\n\nfrom meiduo_mall.utils.views import Lr\nfrom orders.models import OrderInfo\nfrom django import http\nfrom .models import Payment\nimport os\n# lemwrg9682@sandbox.com 支付宝沙箱\n\n\nclass PaymentView(Lr):\n \"\"\"订单支付功能\"\"\"\n\n def get(self, request, order_id):\n # 查询要支付的订单\n\n user = request.user\n\n try:\n order = OrderInfo.objects.get(order_id=order_id, user=user, status=OrderInfo.ORDER_STATUS_ENUM[\"UNPAID\"])\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden(\"无效订单\")\n\n # 创建支付宝支付对象\n alipay = AliPay(\n appid=settings.ALIPAY_APPID,\n app_notify_url=None, # 默认回调url\n app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), \"keys/app_private_key.pem\"),\n alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"keys/alipay_public_key.pem\"),\n sign_type=\"RSA2\",\n debug=settings.ALIPAY_DEBUG\n )\n\n # 生成支付宝登录连接\n order_string = alipay.api_alipay_trade_page_pay(\n out_trade_no=order_id,\n total_amount=str(order.total_amount),\n\n\n\n\n\n\n\n\n subject=\"怡红院%s\" % order_id,\n return_url=settings.ALIPAY_RETURN_URL\n )\n # 响应登录支付宝连接\n alipay_url = settings.ALIPAY_URL + \"?\" + order_string\n return http.JsonResponse({\"code\": RETCODE.OK, \"errmsg\": \"OK\", \"alipay_url\": alipay_url})\n\n\nclass PaymentStatusView(Lr):\n \"\"\"保存订单支付结果\"\"\"\n\n def get(self, request):\n # 获取前端传入的数据\n query_dict = request.GET\n data = query_dict.dict()\n # 获取并从请求参数中剔除signature\n signature = data.pop(\"sign\")\n\n # 创建支付宝连接对象\\\n alipay = AliPay(\n appid=settings.ALIPAY_APPID,\n app_notify_url=None,\n app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), \"keys/app_private_key.pem\"),\n alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"keys/alipay_public_key.pem\"),\n sign_type=\"RSA2\",\n debug=settings.ALIPAY_DEBUG,\n\n )\n # 校验这个重定向是否是alipay重定向过来的\n success = alipay.verify(data, signature)\n if success:\n # 读取order_id\n order_id = data.get(\"out_trade_no\")\n\n # 读取支付宝流水号\n trade_id = data.get(\"trade_no\")\n\n # 保存payment模型类数据\n Payment.objects.create(\n order_id=order_id,\n trade_id=trade_id\n )\n # 修改订单状态为待评价\n OrderInfo.objects.filter(order_id=order_id, status=OrderInfo.ORDER_STATUS_ENUM[\"UNPAID\"]).update(\n status=OrderInfo.ORDER_STATUS_ENUM[\"UNCOMMENT\"])\n\n # 响应trade_id\n context = {\n \"trade_id\": trade_id\n }\n return render(request, \"pay_success.html\", context)\n else:\n # 订单支付失败,重定向到我的订单\n return http.HttpResponseForbidden(\"非法请求\")\n","repo_name":"ZeroOneo/mdsc","sub_path":"meiduo_mall/meiduo_mall/apps/payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9095555522","text":"from chess.piece import *\n\ncols = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\nvalid_spaces = range(8)\n\ndef space_on_board(x, y):\n return x in valid_spaces and y in valid_spaces\n\ndef convert_from_chess_space(space):\n if len(space) != 2:\n raise ValueError(\"input string '{}' not chess space\".format(space))\n x = cols.index(space[0])\n y = int(space[1]) - 1\n if not space_on_board(x, y):\n raise ValueError(\"coords ({}, {}) not on board\".format(x, y))\n return (x, y)\n\ndef convert_to_chess_space(x, y):\n if not space_on_board(x, y):\n raise ValueError(\"coords ({}, {}) not on board\".format(x, y))\n return \"{}{}\".format(cols[x], y + 1)\n\ndef is_valid_piece_move(piece, move):\n if not isinstance(piece, Piece):\n raise ValueError(\"piece {} not a valid piece type\".format(piece))\n if piece.type == PieceType.PAWN:\n return is_valid_pawn_move(*move, color = piece.color)\n elif piece.type == PieceType.KNIGHT:\n return is_valid_knight_move(*move)\n elif piece.type == PieceType.BISHOP:\n return is_valid_bishop_move(*move)\n elif piece.type == PieceType.ROOK:\n return is_valid_rook_move(*move)\n elif piece.type == PieceType.QUEEN:\n return is_valid_queen_move(*move)\n elif piece.type == PieceType.KING:\n return is_valid_king_move(*move)\n\ndef valid_knight_moves(x, y):\n all_moves = {\n (x + 2, y + 1), (x + 2, y - 1),\n (x - 2, y + 1), (x - 2, y - 1),\n (x + 1, y + 2), (x - 1, y + 2),\n (x + 1, y - 2), (x - 1, y - 2)}\n\n valid_moves = set()\n for _x, _y in all_moves:\n if space_on_board(_x, _y):\n valid_moves.add((_x, _y))\n\n return valid_moves\n\ndef is_valid_knight_move(start_x, start_y, end_x, end_y):\n if not space_on_board(end_x, end_y):\n return False\n return (end_x, end_y) in valid_knight_moves(start_x, start_y)\n\ndef is_valid_rook_move(start_x, start_y, end_x, end_y):\n if not space_on_board(end_x, end_y):\n return False\n return (start_x != end_x) ^ (start_y != end_y)\n\ndef is_valid_bishop_move(start_x, start_y, end_x, end_y):\n if not space_on_board(end_x, end_y):\n return False\n dx = abs(start_x - end_x)\n dy = abs(start_y - end_y)\n return dx == dy and dx + dy != 0\n\ndef is_valid_queen_move(start_x, start_y, end_x, end_y):\n return (is_valid_rook_move(start_x, start_y, end_x, end_y)\n or is_valid_bishop_move(start_x, start_y, end_x, end_y))\n\ndef is_valid_king_move(start_x, start_y, end_x, end_y):\n if not space_on_board(end_x, end_y):\n return False\n dx = abs(start_x - end_x)\n dy = abs(start_y - end_y)\n return (dx <= 1 and dy <= 1 and dx + dy != 0) or ((start_y == 0 or\n start_y == 7) and start_x == 4 and dy == 0 and dx == 2)\n\ndef is_valid_pawn_move(start_x, start_y, end_x, end_y, color):\n if not space_on_board(end_x, end_y):\n return False\n dx = start_x - end_x\n dy = start_y - end_y\n\n if color == PieceColor.WHITE or color == True:\n return (dy == -1 and -1 <= dx <= 1) or (start_y <= 1 and dy == -2 and dx == 0)\n else:\n return (dy == 1 and -1 <= dx <= 1) or (start_y >= 6 and dy == 2 and dx == 0)\n\n","repo_name":"Rycieos/python-chess","sub_path":"chess/moves.py","file_name":"moves.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"5615292766","text":"import os\n\nSECRET_KEY = \"secreto\"\n\nSQLALCHEMY_DATABASE_URI = \\\n \"{SGBD}://{usuario}:{senha}@{servidor}/{database}\".format(\n SGBD = 'mysql',\n usuario = 'root',\n senha = 'rafael123',\n servidor = 'localhost',\n database = 'jogoteca'\n )\n\nUPLOAD_PATH = os.path.dirname(os.path.abspath(__file__)) + \"\\\\uploads\"","repo_name":"Rafael-leonardo/flask_jogos","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7515955237","text":"from math import sin, cos, asin, radians, sqrt\n\n# Implementing the haversine formula\ndef distance(point1, point2):\n\n lat1 = point1[0]\n lon1 = point1[1]\n \n lat2 = point2[0]\n lon2 = point2[1]\n\n lon1, lon2, lat1, lat2 = map(radians, [lon1, lon2, lat1, lat2])\n\n longitude_dist = lon2 - lon1\n latitude_dist = lat2 -lat1\n \n # Haversine formula\n a = sin(longitude_dist/2)**2 + cos(lat1) * cos(lat2) * sin(longitude_dist/2)**2\n c = 2 * asin(sqrt(a)) \n r = 3956 # Earth's radius in miles\n return c * r\n\ndef detour(A,B,C,D):\n\n #Is ACDB shorter or is CABD?\n\n #ACDB:\n # A-->C + C-->D + D-->B\n ACDB = distance(A,C) + distance(C,D) + distance(D,B)\n\n #CABD\n # C-->A + A-->B + B-->D\n CABD = distance(C,A) + distance(A,B) + distance(B,D)\n\n if ACDB < CABD:\n shorter_dist = \"ACDB\"\n else:\n shorter_dist = \"CABD\"\n\n print(\"The total detour distance for ACDB detour is = \" + str(ACDB))\n print(\"The total detour distance for CABD detour is = \" + str(CABD))\n print(\"The shorter detour is the \" + shorter_dist + \" route.\")\n\ndef main():\n #[latitude, longitude]\n A = [32,-76]\n B = [33,-75]\n C = [34,-74]\n D = [35,-73]\n\n detour(A,B,C,D)\n\nmain()","repo_name":"opalkale/detour-distance","sub_path":"detour.py","file_name":"detour.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13784577907","text":"import os\nimport logging\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom wavepy.waves import DatPrep\n\nLOG_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'\nlogging.basicConfig(format=LOG_FORMAT, level=logging.INFO)\n\nsample_rate = 4096.\n\ndtype = 'bias'\nfdir = './data/IMR_'+dtype\nsave_dir = './Plots/'+dtype\n\nif not os.path.exists(save_dir):\n\tos.makedirs(save_dir)\n\ntrain_file = fdir+'/IMR_hp_training_input.dat'\nval_file = fdir+'/IMR_hp_validation_input.dat'\ntest_file = fdir+'/IMR_hp_test_input.dat'\n\ndatprep = DatPrep(fdir)\n\ntrain, val = datprep.load_split(train_file, val_file)\ntrain, val = datprep.tolist_split()\ntrain, val = datprep.tofloat_split()\ntest = datprep.load(test_file)\ntest = datprep.tolist()\ntest = datprep.tofloat()\n\ni1 = train[0]\ni2 = val[-1]\ni3 = test[int(len(test)/2)]\n\ntrain_tgt = fdir+'/IMR_hp_training_target.dat'\nval_tgt = fdir+'/IMR_hp_validation_target.dat'\ntest_tgt = fdir+'/IMR_hp_test_target.dat'\n\ntraint, valt = datprep.load_split(train_tgt, val_tgt)\ntraint, valt = datprep.tolist_split()\ntraint, valt = datprep.tofloat_split()\ntraint, valt = datprep.nonzero_split()\ntestt = datprep.load(test_tgt)\ntestt = datprep.tolist()\ntestt = datprep.tofloat()\ntestt = datprep.nonzero()\n\nt1 = traint[0]\nt2 = valt[-1]\nt3 = testt[int(len(testt)/2)]\n\ntrain_m = fdir+'/mass_info_training.dat'\nval_m = fdir+'/mass_info_validation.dat'\ntest_m = fdir+'/mass_info_test.dat'\n\ntrain_mass = np.genfromtxt(train_m).T\nval_mass = np.genfromtxt(val_m).T\ntest_mass = np.genfromtxt(test_m).T\n\nm1 = train_mass[3][0]\nm2 = val_mass[3][-1]\nm3 = test_mass[3][len(test)/2]\n\nfor i in range(len(traint)):\n\ttrain[i] = len(train[i])\n\ttraint[i] = len(traint[i])\n\nfor j in range(len(valt)):\n\tval[j] = len(val[j])\n\tvalt[j] = len(valt[j])\n\nfor k in range(len(testt)):\n\ttest[k] = len(test[k])\n\ttestt[k] = len(testt[k])\n\ninputs = train + val + test\ntargets = traint + valt + testt\nmass = np.hstack((train_mass, val_mass, test_mass))\n\nf, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(8,3))\nax1.hist(inputs, 20)\nax1.grid(which='both', alpha=0.5, ls='dashed')\nax1.set_ylabel(\"Number\")\nax1.set_xlabel(\"Data size of I waveform\")\nax2.hist(targets, 20)\nax2.grid(which='both', alpha=0.5, ls='dashed')\nax2.set_xlabel(\"Data size of M and R waveform\")\nax3.hist(mass[3], 20)\nax3.grid(which='both', alpha=0.5, ls='dashed')\nax3.set_xlabel(\"Chirp mass\")\nplt.tight_layout()\nf.subplots_adjust(wspace=0)\nplt.setp(ax2.get_yticklabels(), visible=False)\nplt.setp(ax3.get_yticklabels(), visible=False)\nplt.savefig(save_dir+\"/number_distribution\")\nlogging.info(\"Plot is saved at %s.png\" % (save_dir+\"/number_distribution\"))\n\noverlap=1\n\nminp_x = np.arange(0,len(i1))/sample_rate\nmtgt_x = (len(i1)-overlap+np.arange(0,len(t1)))/sample_rate\nplt.figure()\nplt.gca().axes.get_yaxis().set_ticks([])\nplt.grid(which='both', axis='x', alpha=0.5, ls='--')\nplt.plot(minp_x, i1, color='g',\n\t\tlabel=\"inputs(I)\")\nplt.plot(mtgt_x, t1, \n\t\tcolor='b', label=\"targets(M, R)\")\nplt.annotate(r'$\\mathcal{M}=%.2fM_{\\odot}$'%m1, xy=(0.6, 0),\n\t\txytext=(0.6, -0.5e-20),\n\t\tarrowprops=dict(facecolor='black', shrink=0.05))\n\nuinp_x = np.arange(0,len(i2))/sample_rate\nutgt_x = (len(i2)-overlap+np.arange(0,len(t2)))/sample_rate\nuant_x = utgt_x[-1]+0.01\nutxt_x = utgt_x[-1]+0.11\nplt.plot(uinp_x,\n\t\tnp.array(i2)+1e-20, color='g')\nplt.plot(utgt_x, \n\t\tnp.array(t2)+1e-20, color='b')\nplt.annotate(r'$\\mathcal{M}=%.2fM_{\\odot}$'%m2, xy=(uant_x, 1e-20),\n\t\txytext=(utxt_x, 1e-20),\n\t\tarrowprops=dict(facecolor='black', shrink=0.05))\n\nbinp_x = np.arange(0,len(i3))/sample_rate\nbtgt_x = (len(i3)-overlap+np.arange(0,len(t3)))/sample_rate\nbant_x = btgt_x[-1]+0.01\nbtxt_x = btgt_x[-1]+0.11\nplt.plot(binp_x,\n\t\tnp.array(i3)-1e-20, color='g')\nplt.plot(btgt_x,\n\t\tnp.array(t3)-1e-20, color='b')\nplt.annotate(r'$\\mathcal{M}=%.2fM_{\\odot}$'%m3, xy=(bant_x, -1e-20),\n\t\txytext=(btxt_x, -1e-20),\n\t\tarrowprops=dict(facecolor='black', shrink=0.05))\nplt.xlabel(\"Time\")\nplt.legend()\nplt.savefig(save_dir+\"/Strain_inp_tgt\")\nlogging.info(\"Plot is saved at %s.png\" % (save_dir+\"/Strain_inp_tgt\"))\n\nplt.figure()\nplt.grid(which='both', alpha=0.5, ls='--')\nplt.scatter(mass[3], inputs, color='g', marker='.', label=\"input\")\nplt.scatter(mass[3], targets, color='b', marker='.', label=\"targets\")\nplt.yscale('log')\nplt.ylabel(\"log(Data size)\")\nplt.xlabel(\"Chirp Mass\")\nplt.legend()\nplt.savefig(save_dir+\"/CM_vs_N\")\nlogging.info(\"Plot is saved at %s.png\" % (save_dir+\"/CM_vs_N\"))\n\nplt.figure()\nplt.grid(which='both', alpha=0.5, ls='--')\nplt.scatter(mass[0], inputs, color='g', marker='.', label=\"input\")\nplt.scatter(mass[0], targets, color='b', marker='.', label=\"targets\")\nplt.yscale('log')\nplt.ylabel(\"log(Data size)\")\nplt.xlabel(\"Mass1\")\nplt.legend()\nplt.savefig(save_dir+\"/M1_vs_N\")\nlogging.info(\"Plot is saved at %s.png\" % (save_dir+\"/M1_vs_N\"))\n\nplt.figure()\nplt.grid(which='both', alpha=0.5, ls='--')\nplt.scatter(mass[1], inputs, color='g', marker='.', label=\"input\")\nplt.scatter(mass[1], targets, color='b', marker='.', label=\"targets\")\nplt.yscale('log')\nplt.ylabel(\"log(Data size)\")\nplt.xlabel(\"Mass2\")\nplt.legend()\nplt.savefig(save_dir+\"/M2_vs_N\")\nlogging.info(\"Plot is saved at %s.png\" % (save_dir+\"/M2_vs_N\"))\n\nplt.figure()\nplt.grid(which='both', alpha=0.5, ls='--')\nplt.scatter(mass[0], mass[1], marker='.')\nplt.xlabel(\"Mass1\")\nplt.ylabel(\"Mass2\")\nplt.savefig(save_dir+\"/M1_vs_M2\")\nlogging.info(\"Plot is saved at %s.png\" % (save_dir+\"/M1_vs_M2\"))\n","repo_name":"GooLee0123/GWFGen_public","sub_path":"data_statistic_plot.py","file_name":"data_statistic_plot.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43479294377","text":"\"\"\"##Import Libraries\"\"\"\n\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split \nfrom sklearn import preprocessing\nfrom sklearn import linear_model\nimport tkinter\nimport matplotlib\nmatplotlib.use('TkAgg')\n\n\"\"\"##Data Preprocessing\"\"\"\n\ndef prepare_data(df,forecast_col,forecast_out,test_size):\n label = df[forecast_col].shift(-forecast_out);#creating new column called label with the last 5 rows are nan\n X = np.array(df[[forecast_col]]); #creating the feature array\n X = preprocessing.scale(X) #processing the feature array\n X_lately = X[-forecast_out:] #creating the column i want to use later in the predicting method\n X = X[:-forecast_out] # X that will contain the training and testing\n label.dropna(inplace=True); #dropping na values\n y = np.array(label) # assigning Y\n X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=test_size) #cross validation \n\n response = [X_train,X_test , Y_train, Y_test , X_lately];\n return response;\n\n\ndef linear_regression_fun(df, name):\n \"\"\"##Ploting\"\"\"\n\n # df['Date'] = pd.to_datetime(df.Date,format='%Y-%m-%d')\n # df.index = df['Date']\n # plt.figure(figsize=(16,8))\n # plt.plot(df['Close'], label='Close Price history')\n\n \"\"\"##Split Datasets\"\"\"\n\n forecast_col = 'Close'#choosing which column to forecast\n forecast_out = 5 #how far to forecast \n test_size = 0.2; #the size of my test set\n X_train, X_test, Y_train, Y_test , X_lately =prepare_data(df,forecast_col,forecast_out,test_size)\n print(X_lately) ## This is our predicting test data\n\n \"\"\"##Use Linear Regression Model\"\"\"\n\n learner = linear_model.LinearRegression()\n learner.fit(X_train,Y_train); #training the linear regression model\n score=learner.score(X_test,Y_test);#testing the linear regression model\n forecast= learner.predict(X_lately); #set that will contain the forecasted data\n print(score)\n print(forecast)\n\n \"\"\"##Predicted Value Score Plot\"\"\"\n\n # plt.figure(figsize=forecast.shape)\n # plt.plot(forecast, label='Predicted price')\n plt.title(name)\n plt.plot(X_lately,forecast, label =\"predicted close price\")\n #plt.plot(X_lately,label=\"Previous day close price\")\n plt.ylabel(\"Predicted Price\")\n plt.xlabel(\"Previous Day Close Price\")\n plt.show()\n\n\n\"\"\"##Data Load\"\"\"\n\nfor i in range(3):\n if i==0:\n df = pd.read_excel(r'Apple.xlsx')\n print(df.head())\n linear_regression_fun(df, \"Apple\");\n elif i == 1:\n df = pd.read_excel(r'Google.xlsx')\n print(df.head())\n linear_regression_fun(df, \"Google\")\n else:\n df = pd.read_excel(r'Microsoft.xlsx')\n print(df.head())\n linear_regression_fun(df, \"Microsoft\")\n\n\n\n","repo_name":"princexoleo/stock_predictions_with_linear_regression","sub_path":"stock_predicted_linear_regression.py","file_name":"stock_predicted_linear_regression.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27780446223","text":"import random\nimport numpy as np\nimport torch\nimport os\nfrom pathlib import Path\nimport shutil\n\nfrom sklearn.utils import class_weight\n\n\nclass EarlyStopping:\n def __init__(self, device, config, test_set, test_generator, monitor='val_loss', mode='auto', verbose=False,\n delta=0):\n \"\"\"\n Args:\n \"\"\"\n print('Set EarlyStopping')\n self.config = config\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop = False\n self.val_loss_min = np.Inf\n self.delta = delta\n self.path = config['best_model_name'] + '.pt'\n self.path_final = config['best_model_name'] + '_state' + '.pt'\n random.seed(1)\n self.monitor = monitor\n\n self.device = device\n self.test_set = test_set\n self.test_generator = test_generator\n self.test_filenames = test_set.get_filenames()\n self.store_threshold = config['STORE_THRESHOLD']\n\n if mode == 'min':\n self.monitor_op = np.less\n elif mode == 'max':\n self.monitor_op = np.greater\n else:\n if 'loss' in self.monitor:\n self.monitor_op = np.greater\n else:\n self.monitor_op = np.less\n\n def __call__(self, score, model, val_acc):\n\n current = score\n\n if self.best_score is None:\n self.best_score = score\n self.save_checkpoint(score, model)\n if self.monitor_op(current - self.delta, self.best_score):\n self.counter += 1\n print(f'EarlyStopping counter: {self.counter} out of {self.config[\"PATIENCE\"]}')\n if self.counter >= self.config['PATIENCE']:\n self.early_stop = True\n else:\n self.save_checkpoint(score, model)\n if val_acc > self.config['STORE_THRESHOLD']:\n print(\"{} larger than {}\".format(val_acc, self.config['STORE_THRESHOLD']))\n y_test_pred = []\n for i, (inputs) in enumerate(self.test_generator):\n inputs = [i.float().to(self.device) for i in inputs]\n model.eval()\n if '_daux' in self.config['head']:\n outputs, _, _ = model(*inputs)\n elif '_aux' in self.config['head']:\n outputs, _ = model(*inputs)\n else:\n outputs = model(*inputs)\n _, y_pred = torch.max(outputs, 1)\n y_test_pred.append(y_pred.cpu().tolist())\n\n self.export(y_test_pred, self.test_filenames)\n\n self.best_score = score\n self.counter = 0\n\n return self.best_score\n\n def save_checkpoint(self, score, model):\n \"\"\"Saves model when validation loss decrease.\"\"\"\n if self.verbose:\n print(f'Validation {self.monitor} ({self.best_score:.6f} --> {score:.6f}). Saving model ...')\n torch.save(model.state_dict(), self.path)\n\n def restore(self, model):\n # load best model weights \n print(\"Restore best performing model {}\".format(self.best_score))\n # restored = \n model.load_state_dict(torch.load(self.path))\n torch.save(model, self.path_final)\n return model# restored\n\n def export(self, test_prediction, test_filenames):\n export_path = os.path.join('../fumo/', self.path.split('/')[-1])\n\n dirpath = Path(export_path)\n if dirpath.exists() and dirpath.is_dir():\n shutil.rmtree(dirpath)\n\n if not os.path.exists(export_path):\n os.makedirs(export_path)\n\n test_flat = [item for sublist in test_prediction for item in sublist]\n test_flat = self.test_set.get_real_labels_fromListoLabels(test_flat)\n for i, file in enumerate(test_filenames):\n with open(os.path.join(export_path, file.split('.')[0] + '.txt'), \"w\") as file:\n file.write(str(test_flat[i]))\n\n\n# functions \ndef class_weights(y_train):\n return class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)\n\n\ndef export_predictions(test_prediction, test_filenames, data_set, name):\n export_path = os.path.join('../fumo/', name)\n\n dirpath = Path(export_path)\n if dirpath.exists() and dirpath.is_dir():\n shutil.rmtree(dirpath)\n\n if not os.path.exists(export_path):\n os.makedirs(export_path)\n\n test_flat = [item for sublist in test_prediction for item in sublist]\n test_flat = data_set.get_real_labels_fromListoLabels(test_flat)\n print(test_flat)\n for i, file in enumerate(test_filenames):\n with open(os.path.join(export_path, file.split('.')[0] + '.txt'), \"w\") as file:\n file.write(str(test_flat[i]))\n\n\ndef freeze_unfreeze(UNFREEZE, model, layers_from_bottom, layers_from_top):\n if UNFREEZE:\n print('**Unfreeze mdoel')\n else:\n print('**Freeze model')\n print(\"if added from front or back\")\n\n if layers_from_bottom > 0:\n print(len(list(model.children())), 'children are available')\n for layer_no, child in enumerate(model.children(), 1):\n if layer_no <= layers_from_bottom:\n print('UNFREEZE: ', UNFREEZE, 'child modules no:', layer_no, len(list(child.parameters())))\n for param in child.parameters():\n param.requires_grad = UNFREEZE # if unfreeze = True -> freeze -> set grad to False\n elif layer_no <= layers_from_bottom + 1:\n print('first non affected layer:', child)\n\n if layers_from_top > 0:\n print(len(list(model.children())), 'children are available')\n for layer_no, child in enumerate(model.children(), 1):\n if layer_no <= layers_from_top:\n print('UNFREEZE: ', UNFREEZE, 'child modules no:', layer_no, len(list(child.parameters())))\n for param in child.parameters():\n param.requires_grad = UNFREEZE # if unfreeze = True -> freeze -> set grad to False\n elif layer_no <= layers_from_top + 1:\n print('first non affected layer:', child)\n\n return model\n","repo_name":"lstappen/XAWARE","sub_path":"x_aware/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"10587671683","text":"# -*- coding: UTF-8 -*-\nfrom . import VERSION as _VER\nfrom .Wikimedia_Commons import main as _wm_c_main\nfrom argparse import ArgumentParser as _ArgParser\nfrom functools import partial as _part\nfrom sys import modules as _mods\nfrom typing import Callable as _Call\n\n\ndef parser(parent: _Call[..., _ArgParser] | None = None):\n prog0 = _mods[__name__].__package__\n prog = prog0 if prog0 else __name__\n del prog0\n parser = (_ArgParser if parent is None else parent)(\n prog=f\"python -m {prog}\",\n description=\"archive data\",\n add_help=True,\n allow_abbrev=False,\n exit_on_error=False,\n )\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"version\",\n version=f\"{prog} v{_VER}\",\n help=\"print version and exit\",\n )\n subparsers = parser.add_subparsers(\n required=True,\n )\n _wm_c_main.parser(\n _part(subparsers.add_parser, _wm_c_main.__package__.replace(f\"{prog}.\", \"\"))\n )\n return parser\n","repo_name":"polyipseity/pyarchivist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"20852598642","text":"import constants\nimport math\nfrom game.scripting.action import Action\nfrom game.shared.point import Point\nfrom game.casting.projectile import Projectile\n\nclass HandleMouseButtonPressed(Action):\n\n def __init__(self, mouse_service, who_plays):\n self._mouse_service = mouse_service\n # self._who_plays = who_plays\n\n def execute(self, cast, script, scene_manager):\n player = scene_manager.who_plays\n if self._mouse_service.is_mouse_button_left_pressed():\n scene_manager.projecile_projections = []\n click_position = self._mouse_service.get_click_position()\n xc = click_position.get_x()\n yc = click_position.get_y()\n #print(f\"x: {xc}, y: {yc}\")\n if player == constants.ID_PLAYER1:\n # create projectile\n tank1 = cast.get_actors(\"tanks\")[0]\n position_t = tank1.get_position()\n xp = int(position_t.get_x() + constants.WIDTH_PLAYER1 / 2)\n yp = int(position_t.get_y() - constants.HEIGHT_PLAYER1 / 2) \n position_p = Point(xp, yp)\n h = math.sqrt(math.pow(yc - yp, 2) + math.pow(xc - xp, 2))\n theta = int(math.asin((yc - yp) / h) * 180 / math.pi)\n if xc > xp:\n theta = - theta\n else:\n theta = 180 + theta\n #elif xc < xp and yc < yp:\n # theta = 180 + theta\n #elif xc < xp and yc > yp:\n # theta = 180 - theta\n color = tank1.get_color()\n projectile = Projectile(position_p, constants.PROJECTILE_RADIUS, constants.PROJECTILE_EXAMPLE_V0, theta)\n projectile.set_color(color)\n scene_manager.who_plays = constants.ID_PLAYER2\n #cast.add_actor(\"projectiles\", projectile)\n else:\n # create projectile\n tank2 = cast.get_actors(\"tanks\")[1]\n position_t = tank2.get_position()\n xp = int(position_t.get_x() + constants.WIDTH_PLAYER2 / 2)\n yp = int(position_t.get_y() - constants.HEIGHT_PLAYER2 / 2) \n position_p = Point(xp, yp)\n h = math.sqrt(math.pow(yc - yp, 2) + math.pow(xc - xp, 2))\n theta = int(math.asin((yc - yp) / h) * 180 / math.pi)\n if xc > xp:\n theta = - theta\n else:\n theta = 180 + theta\n #elif xc < xp and yc < yp:\n # theta = 180 + theta\n #elif xc < xp and yc > yp:\n # theta = 180 - theta\n color = tank2.get_color()\n projectile = Projectile(position_p, constants.PROJECTILE_RADIUS, constants.PROJECTILE_EXAMPLE_V0, theta)\n projectile.set_color(color)\n scene_manager.who_plays = constants.ID_PLAYER1\n #position = projectile.get_position()\n #xp = position.get_x()\n #yp = position.get_y()\n #print(f\"x: {xp}, y: {yp}\")\n cast.add_actor(\"projectiles\", projectile)","repo_name":"Giyip/cse210-06","sub_path":"tank_game/game/scripting/handle_mouse_button_pressed.py","file_name":"handle_mouse_button_pressed.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8782033328","text":"import os\nfrom db_mgt.db_exec import DBExec\nfrom db_mgt.page_tables import Page, PageManager\nfrom db_mgt.group_tables import Group, GroupMembers, GroupTableManager\nfrom utilities.miscellaneous import get_temp_file_name, extract_fields_to_dict\nimport csv\nfrom lxml import etree, html\nimport lxml\nfrom flask import send_file, render_template\nfrom utilities.sst_exceptions import SsTSystemError\nfrom datetime import datetime as dt\n\n\ndef manage_group_functions(db_exec: DBExec, form):\n \"\"\"Functions to manage groups.\"\"\"\n \"\"\"\n Route: '/sysadmin/manage_groups' => manage_groups\n Template: manage_groups.jinja2\n Form: manage_groups_form.py\n Processor: manage_groups.py\n \"\"\"\n function_to_execute = form.work_function.data\n group_name = form.group_name.data\n owner = form.group_owner.data\n purpose = form.group_purpose.data\n member_name = form.member_name.data\n result_template = 'sysadmin/display_group_and_members.jinja2'\n # gr_cg gr_del gr_am gr_rm gr_lg gr_lm\n try:\n group_mgr = db_exec.create_group_manager()\n user_mgr = db_exec.create_user_manager()\n if function_to_execute == 'gr_cg': # 'Create New Group'\n if group_mgr.get_group_id_from_name(group_name):\n form.errors['group_name'] = [f'A group named {group_name} already exists']\n return False\n owner_id = user_mgr.get_user_id_from_name(owner)\n group = Group(owner=owner_id, group_name=group_name, group_purpose=purpose, status='active')\n group_mgr.create_group(group)\n group_mgr.add_member_to_group(group.id, owner_id)\n return True\n elif function_to_execute == 'gr_del': # Delete Existing Group\n group_id = group_mgr.get_group_id_from_name(group_name)\n if not group_id:\n form.errors['group_name'] = ['Group Does Not Exist']\n return False\n group_mgr.remove_group(group_id)\n return True\n elif function_to_execute == 'gr_am': # Add Member to an existing group\n group_id = group_mgr.get_group_id_from_name(group_name)\n if not group_id:\n form.errors['group_name'] = ['Group Does Not Exist']\n return False\n member_id = user_mgr.get_user_id_from_name(member_name)\n if not member_id:\n form.errors['member_name'] = [f'Member: {member_name} not found']\n return False\n res = group_mgr.add_member_to_group(group_id, member_id)\n if not res:\n form.errors['Already There'] = [f'{member_name} is already a member of {group_name}']\n return res\n elif function_to_execute == 'gr_rm': # Remove member from an existing group\n group_id = group_mgr.get_group_id_from_name(group_name)\n if not group_id:\n form.errors['group_name'] = ['Group Does Not Exist']\n return False\n member_id = user_mgr.get_user_id_from_name(member_name)\n if not member_id:\n form.errors['member_name'] = [f'Member: {member_name} not found']\n return False\n res = group_mgr.remove_member_from_group(group_id, member_id)\n if not res:\n raise SsTSystemError(f'Database did not find member or group after prior check for existence')\n return True\n elif function_to_execute == 'gr_lg': # List Groups - simple web page for now\n groups = group_mgr.get_all_groups()\n group_fields = group_mgr.get_table_fields('sstgroup')\n res_groups = []\n for a_group in groups:\n members = group_mgr.get_group_members(db_exec, a_group.id)\n dct = extract_fields_to_dict(a_group, group_fields)\n owner_id = dct['owner']\n dct['owner'] = user_mgr.get_user_name_from_id(owner_id)\n dct['nbr_members'] = len(members)\n dct['created'] = dct['created'].strftime('%m/%d/%Y')\n res_groups.append(dct)\n context = {'function': 'gr_lg',\n 'fields': ['id', 'group name', 'owner', 'group purpose', 'status', 'created', 'nbr of members'],\n 'values': res_groups}\n result = render_template(result_template, **context)\n return result\n elif function_to_execute == 'gr_lm': # List Members - simple web page for now\n member_fields = group_mgr.get_table_fields('sstgroup_member')\n group_id = group_mgr.get_group_id_from_name(group_name)\n members = group_mgr.get_group_members(db_exec, group_id)\n res_members = []\n for member in members:\n dct = extract_fields_to_dict(member, member_fields)\n dct['member_name'] = member.username\n res_members.append(dct)\n context = {'function': 'gr_lm',\n 'group_name': group_name,\n 'fields': ['member_name'],\n 'values': res_members}\n result = render_template(result_template, **context)\n return result\n else:\n form.errors['work_function'] = ['Selected Work Function Not Yet Implemented']\n return False\n except Exception as e:\n # TODO: handle error/log, and return useful message to user\n form.errors['work_function'] = ['manage groups - Exception occurred processing page']\n form.errors['work_function'] = [e.args]\n return False\n","repo_name":"donox/ssflask2","sub_path":"ssfl/sysadmin/manage_groups.py","file_name":"manage_groups.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"72668499342","text":"\r\nimport pygame as pg\r\nimport os\r\n\r\nWINDOW = (1720, 1040)\r\nDEFAULT_SPEED = 1\r\nRAY_LENGTH = 230\r\nRAY_JUMP_LENGTH = 25\r\nDRAW_RAYS = True\r\nKEEP_DIRECTION = True\r\nCAR_START_POS = (1370.0, 280.0)\r\nCAR_START_ANGLE = 180\r\n# CAR_START_POS = (1095.0, 110.0)\r\nINPUT_NUM = 24\r\nAI_TRAIN_SESSION = True\r\nAI_DISPLAY_RAYS = True\r\n# map colors - ROAD, GRASS, DISTANCE LINE\r\n\r\n# COLOR1 = (255, 255, 255, 255) # ROAD\r\n# COLOR2 = (100, 116, 55, 255) # GRASS\r\n# COLOR3 = (226, 40, 29, 255) # DISTANCE LINE\r\nCOLOR4 = (255, 204, 0) # RAYS COLOR\r\nCOLOR5 = (102, 204, 255) # DIRECTION RAY COLOR\r\n\r\nCOLOR1 = (255, 255, 255, 255) # ROAD\r\nCOLOR2 = (0, 0, 0, 255) # GRASS\r\nCOLOR3 = (0, 255, 255, 255) # DISTANCE LINE\r\n\r\n# road_map_sense_test_drive.png\r\n\r\nC_DIR = os.path.dirname(os.path.abspath(__file__))\r\n\r\n# MAP_BORDER = pg.image.load(os.path.join(\r\n# C_DIR, \"ai_images\", \"road_map_sense_test_drive.png\"))\r\n\r\nMAP_BORDER = pg.image.load(os.path.join(\r\n C_DIR, \"ai_images\", \"set_road_train6.png\"))\r\n# MAP_BORDER_SEMAFOR = pg.image.load(os.path.join(\r\n# C_DIR, \"ai_images\", \"set_road_train6.png\"))\r\n\r\n# MAP_BORDER = pg.image.load(os.path.join(\r\n# C_DIR, \"ai_images\", \"road_map_sense_TEST.png\"))\r\n# MAP_BORDER_SEMAFOR = pg.image.load(os.path.join(\r\n# C_DIR, \"ai_images\", \"road_map_sense_TEST.png\"))\r\n\r\nMAP_VIEW = pg.image.load(os.path.join(\r\n C_DIR, \"ai_images\", \"set_road_train6_view.png\"))\r\nCAR_SPRITE = pg.image.load(os.path.join(\r\n C_DIR, \"ai_images\", \"small_ai_car.png\"))\r\n\r\nDT = 0.016 # fixed DT during training is a better option due to possible skipping of frames\r\n","repo_name":"goranjosko/car_neat_ai","sub_path":"ai_settings.py","file_name":"ai_settings.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"73863711182","text":"\"\"\"LeetCode problem 53. Maximum Subarray\n\nLink: https://leetcode.com/problems/maximum-subarray/\nDifficulty: Easy\nStatus: Accepted (01/22/2022 15:21) 849 ms 28.7 MB\n\"\"\"\n\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n found = -1000000\n last_max = -1000000\n for i in range(len(nums)):\n if found + nums[i] < nums[i]:\n found = nums[i]\n else:\n found += nums[i]\n\n if last_max < found:\n last_max = found\n\n return last_max\n","repo_name":"bgrinko/leetcode","sub_path":"problems/0053E.py","file_name":"0053E.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37505097507","text":"class Solution:\n def multiply(self, A: List[List[int]], B: List[List[int]]) -> List[List[int]]:\n \n def pack(matrix):\n memo = {}\n \n for row in range(len(matrix)):\n for col in range(len(matrix[0])):\n if (matrix[row][col] != 0):\n memo[(row, col)] = matrix[row][col]\n \n return memo\n \n a_matrix = pack(A)\n b_matrix = pack(B)\n \n output = [[0 for col in range(len(B[0]))] for row in range(len(A))]\n for (row, col) in a_matrix:\n for k in range(len(A[0])):\n if (col, k) in b_matrix:\n output[row][k] += (a_matrix[(row, col)] * b_matrix[(col, k)])\n \n return output\n","repo_name":"vigneshkulo/leetCode","sub_path":"python3/SparseMatrix.py","file_name":"SparseMatrix.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21109773462","text":"# Aidan Roth and Christopher Roseberry\nclass DFA:\n Q = [] # set of all states in DFA\n alphabet = []\n delta = [] # transitions\n startState = 0\n acceptingStates = []\n\ndef nextState(currentState, symbol, modValue):\n product = 10 * currentState\n sum = product + symbol\n retval = sum % modValue\n return ((10*currentState) + symbol) % modValue\n\ndef MinString(numStates, alphabet, inverseAlpha, state_table, acceptingStates):\n\n isEmpty = not alphabet\n if isEmpty:\n return \"No Solution\"\n queue = []\n parent = [0 for i in range(numStates)]\n label = [0 for i in range(numStates)]\n visited = [0 for i in range(numStates)]\n next = 0\n current = 0\n prev = 0\n break_flag = False\n\n for i in range(len(alphabet)):\n next = state_table[0][alphabet[i]]\n visited[next] = True\n queue.append(next)\n parent[next] = prev\n label[next] = alphabet[i]\n\n while queue:\n current = queue[0]\n queue.pop(0)\n for i in range(len(alphabet)):\n next = state_table[current][alphabet[i]]\n for j in range(len(acceptingStates)):\n if next == acceptingStates[j]:\n break_flag = True\n break\n if break_flag:\n parent[next] = current\n label[next] = alphabet[i]\n break\n elif not visited[next]:\n visited[next] = True\n parent[next] = current\n label[next] = alphabet[i]\n queue.append(next)\n if break_flag:\n break\n\n if not break_flag:\n return \"No Solution\"\n else:\n temp_string = output = \"\"\n while parent[next] != 0:\n temp_string += inverseAlpha[label[next]]\n next = parent[next]\n temp_string += inverseAlpha[label[next]]\n j = len(temp_string) - 1\n while j >= 0:\n output += temp_string[j]\n j -= 1\n return output\n\n\ndef smallestMultiple(k, permitted_digits):\n\n queue = []\n visited = [False for i in range(k)]\n label = [0 for i in range(k)]\n parent = [0 for i in range(k)]\n Next = 0\n current = 0\n break_flag = False\n for i in permitted_digits:\n Next = nextState(0, i, k)\n visited[Next] = True\n queue.append(Next)\n parent[Next] = 0\n label[Next] = i\n\n while queue:\n placeholder = 5\n current = queue[0]\n queue.pop(0)\n for i in permitted_digits:\n Next = nextState(current, i, k)\n if Next == 0:\n break_flag = True\n if break_flag:\n parent[Next] = current\n label[Next] = i\n break\n elif not visited[Next]:\n visited[Next] = True\n parent[Next] = current\n label[Next] = i\n queue.append(Next)\n if break_flag:\n break\n\n if not break_flag:\n return \"No Solution\"\n else:\n temp_string = output = \"\"\n while parent[Next] != 0:\n temp_string += str(label[Next])\n Next = parent[Next]\n temp_string += str(label[Next])\n j = len(temp_string) -1\n while j >= 0:\n output += temp_string[j]\n j -= 1\n\n return output\n\ndef main():\n\n k = int(input(\"Enter value for k, the number of states in the DFA: \"))\n dfa = DFA()\n dfa.alphabet = input(\"\\nEnter the permitted numbers separated by spaces : \").strip().split()\n for i in range(len(dfa.alphabet)):\n dfa.alphabet[i] = int(dfa.alphabet[i])\n\n print(\"Smallest Multiple: \", smallestMultiple(k, dfa.alphabet))\n\n symbols = [\"a\", \"b\", \"c\", \"d\"]\n alphabet_values = [0, 1, 2, 3]\n accepting_states = [4]\n states = [[1, 2, 3, 5], [1, 2, 3, 4], [1, 5, 3, 5], [5, 2, 3, 4], [5, 5, 5, 4], [5, 5, 5, 5]]\n print(\"The shortest string in the DFA is: \", MinString(len(states), alphabet_values, symbols, states, accepting_states))\n\n symbols = [\"v\", \"w\", \"x\", \"y\", \"z\"]\n alphabet_values.append(4)\n accepting_states = [6]\n states = [[1, 1, 2, 2, 3], [1, 4, 7, 7, 7], [7, 7, 2, 2, 5], [1, 7, 7, 5, 3], [7, 4, 5, 5, 6], [1, 5, 3, 5, 5], [6, 6, 6, 6, 6], [7,7,7,7,7]]\n print(\"The shortest string in the DFA is: \", MinString(len(states), alphabet_values, symbols, states, accepting_states))\n\n return\n\n\nmain()","repo_name":"Aidanroth/DFA-Substring-Project","sub_path":"problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31913682414","text":"from operator import itemgetter\n\nimport graphene\nfrom django.db import transaction\nfrom graphene import InputObjectType, Mutation, Field, ObjectType\nfrom graphene_django.types import DjangoObjectType\nfrom graphql_jwt.decorators import login_required\nfrom rescape_graphene import REQUIRE, graphql_update_or_create, graphql_query, guess_update_or_create, \\\n CREATE, UPDATE, input_type_parameters_for_update_or_create, input_type_fields, merge_with_django_properties, \\\n DENY, FeatureCollectionDataType, resolver_for_dict_field, create_paginated_type_mixin, \\\n get_paginator\nfrom rescape_graphene import increment_prop_until_unique, enforce_unique_props\nfrom rescape_graphene.django_helpers.pagination import resolve_paginated_for_type, pagination_allowed_filter_arguments\nfrom rescape_graphene.graphql_helpers.schema_helpers import process_filter_kwargs, delete_if_marked_for_delete, \\\n update_or_create_with_revision, top_level_allowed_filter_arguments, allowed_filter_arguments\nfrom rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_and_safe_delete_types, \\\n DjangoObjectTypeRevisionedMixin\nfrom rescape_graphene.schema_models.geojson.types.feature_collection import feature_collection_data_type_fields\nfrom rescape_python_helpers import ramda as R\n\nfrom rescape_region.model_helpers import get_region_model\nfrom rescape_region.models.region import Region\nfrom .region_data_schema import RegionDataType, region_data_fields\n\nraw_region_fields = dict(\n id=dict(create=DENY, update=REQUIRE),\n key=dict(create=REQUIRE, unique_with=increment_prop_until_unique(Region, None, 'key', R.pick(['deleted']))),\n name=dict(create=REQUIRE),\n # This refers to the RegionDataType, which is a representation of all the json fields of Region.data\n data=dict(graphene_type=RegionDataType, fields=region_data_fields, default=lambda: dict()),\n # This is the OSM geojson\n geojson=dict(\n graphene_type=FeatureCollectionDataType,\n fields=feature_collection_data_type_fields\n ),\n **reversion_and_safe_delete_types\n)\n\n\nclass RegionType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):\n id = graphene.Int(source='pk')\n\n class Meta:\n model = get_region_model()\n\n\n# Modify data field to use the resolver.\n# I guess there's no way to specify a resolver upon field creation, since graphene just reads the underlying\n# Django model to generate the fields\nRegionType._meta.fields['data'] = Field(\n RegionDataType,\n resolver=resolver_for_dict_field\n)\n\n# Modify the geojson field to use the geometry collection resolver\nRegionType._meta.fields['geojson'] = Field(\n FeatureCollectionDataType,\n resolver=resolver_for_dict_field\n)\n\nregion_fields = merge_with_django_properties(RegionType, raw_region_fields)\n\n# Paginated version of RegionType\n(RegionPaginatedType, region_paginated_fields) = itemgetter('type', 'fields')(\n create_paginated_type_mixin(RegionType, region_fields)\n)\n\n\nclass RegionQuery(ObjectType):\n regions = graphene.List(\n RegionType,\n **top_level_allowed_filter_arguments(region_fields, RegionType)\n )\n regions_paginated = Field(\n RegionPaginatedType,\n **pagination_allowed_filter_arguments(region_paginated_fields, RegionPaginatedType)\n )\n\n @staticmethod\n def _resolve_regions(info, **kwargs):\n return region_resolver('filter', **kwargs)\n\n @login_required\n def resolve_regions(self, info, **kwargs):\n return RegionQuery._resolve_regions(info, **kwargs)\n\n @login_required\n def resolve_regions_paginated(self, info, **kwargs):\n return resolve_paginated_for_type(\n RegionPaginatedType,\n RegionQuery._resolve_regions,\n **kwargs\n )\n\ndef region_resolver(manager_method, **kwargs):\n \"\"\"\n\n Resolves the regions for model get_region_model()\n :param manager_method: 'filter', 'get', or 'count'\n :param kwargs: Filter arguments for the Region\n :return:\n \"\"\"\n\n q_expressions = process_filter_kwargs(get_region_model(), **R.merge(dict(deleted__isnull=True), kwargs))\n return getattr(get_region_model().objects, manager_method)(\n *q_expressions\n )\n\n\nregion_mutation_config = dict(\n class_name='Region',\n crud={\n CREATE: 'createRegion',\n UPDATE: 'updateRegion'\n },\n resolve=guess_update_or_create\n)\n\n\nclass UpsertRegion(Mutation):\n \"\"\"\n Abstract base class for mutation\n \"\"\"\n region = Field(RegionType)\n\n @transaction.atomic\n @login_required\n def mutate(self, info, region_data=None):\n\n deleted_region_response = delete_if_marked_for_delete(Region, UpsertRegion, 'region', region_data)\n if deleted_region_response:\n return deleted_region_response\n\n # We must merge in existing region.data if we are updating data\n if R.has('id', region_data) and R.has('data', region_data):\n # New data gets priority, but this is a deep merge.\n region_data['data'] = R.merge_deep(\n Region.objects.get(id=region_data['id']).data,\n region_data['data']\n )\n\n # Make sure that all props are unique that must be, either by modifying values or erring.\n modified_region_data = enforce_unique_props(region_fields, region_data)\n update_or_create_values = input_type_parameters_for_update_or_create(region_fields, modified_region_data)\n region, created = update_or_create_with_revision(Region, update_or_create_values)\n\n return UpsertRegion(region=region)\n\n\nclass CreateRegion(UpsertRegion):\n \"\"\"\n Create Region mutation class\n \"\"\"\n\n class Arguments:\n region_data = type('CreateRegionInputType', (InputObjectType,),\n input_type_fields(region_fields, CREATE, RegionType))(required=True)\n\n\nclass UpdateRegion(UpsertRegion):\n \"\"\"\n Update Region mutation class\n \"\"\"\n\n class Arguments:\n region_data = type('UpdateRegionInputType', (InputObjectType,),\n input_type_fields(region_fields, UPDATE, RegionType))(required=True)\n\n\nclass RegionMutation(graphene.ObjectType):\n create_region = CreateRegion.Field()\n update_region = UpdateRegion.Field()\n\n\ngraphql_update_or_create_region = graphql_update_or_create(region_mutation_config, region_fields)\ngraphql_query_regions = graphql_query(RegionType, region_fields, 'regions')\n\n\ndef graphql_query_regions_limited(region_fields):\n return graphql_query(RegionType, region_fields, 'regions')\n\n\ngraphql_query_regions_paginated = graphql_query(\n RegionPaginatedType,\n region_paginated_fields,\n 'regionsPaginated'\n)\n","repo_name":"rescapes/rescape-region","sub_path":"rescape_region/schema_models/scope/region/region_schema.py","file_name":"region_schema.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"15611981139","text":"import numpy as np\nimport imageio\nfrom scipy import misc\nimport os\nimport _pickle\nfrom keras.utils import to_categorical\nfrom PIL import Image\nimport numpy as np\nimport PIL\nimport Augmentor\n\n\nclass DataReaderMitSceneParsing(object):\n\n def __init__(self):\n self.data_dir = \"/home/milton/dataset/segmentation/mitsceneparsing\"\n self.train_dir = os.path.join(self.data_dir, \"ADEChallengeData2016\", \"images\", \"training\")\n self.validation_dir = os.path.join(self.data_dir, \"ADEChallengeData2016\", \"images\", \"validation\")\n self.test_dir = os.path.join(self.data_dir, \"ADEChallengeData2016\", \"images\", \"testing\")\n self.train_masks_dir = os.path.join(self.data_dir, \"ADEChallengeData2016\", \"annotations\", \"training\")\n self.validation_masks_dir = os.path.join(self.data_dir, \"ADEChallengeData2016\", \"annotations\", \"validation\")\n self.num_channels = 3\n self.image_height = 224\n self.image_width = 224\n self.num_classes = 151\n self.num_threads = 8\n self.resize_train_dir = os.path.join(self.data_dir, \"ADEChallengeData2016\", \"images\", \"224\", \"training\")\n self.resize_train_masks_dir = os.path.join(self.data_dir, \"ADEChallengeData2016\", \"annotations\", \"224\",\n \"training\")\n\n\n def resize_images(self, image_size):\n if not os.path.exists(self.resize_train_dir):\n os.makedirs(self.resize_train_dir)\n\n if not os.path.exists(self.resize_train_masks_dir):\n os.makedirs(self.resize_train_masks_dir)\n\n # for file_name in os.listdir(self.train_dir):\n # file_path = os.path.join(self.train_dir, file_name)\n # save_file_path = os.path.join(self.resize_train_dir, file_name)\n #\n # if os.path.exists(file_path):\n # #print(file_path);\n # image=Image.open(file_path)\n # image_resize = image.resize((224, 224), Image.ANTIALIAS)\n # #print(save_file_path)\n # image_resize.save(save_file_path)\n\n\n print(\"resizing mask.\")\n #https://stackoverflow.com/questions/23135552/resize-ground-truth-images-without-changing-the-labels\n for file_name in os.listdir(self.train_masks_dir):\n mask_file = os.path.join(self.train_masks_dir, os.path.basename(file_name).split(\".\")[0] + \".png\")\n mask_save_file_path = os.path.join(self.resize_train_masks_dir, os.path.basename(file_name).split(\".\")[0] + \".jpg\")\n\n # augmentor can not use diffrent mask from image\n if os.path.exists(mask_file):\n #print(mask_file)\n image_mask=Image.open(mask_file)\n #image__maskresize = image_mask.resize((224, 224))\n # print(save_file_path)\n #print(np.unique(image__maskresize))\n #print( np.unique(image_mask))\n #assert np.sum(np.unique(image__maskresize) == np.sum(np.unique(image_mask)))\n image_mask.save(mask_save_file_path)\n\n print(\"ended\")\n\n\n\n def get_train_files(self):\n train_files=[]\n train_mask_files=[]\n for file_name in os.listdir(self.train_dir):\n file_path = os.path.join(self.train_dir, file_name)\n if os.path.exists(file_path):\n train_files.append(file_path)\n mask_file = os.path.join(self.train_masks_dir, os.path.basename(file_path).split(\".\")[0]+\".png\")\n #print(mask_file)\n if os.path.exists(mask_file):\n train_mask_files.append(mask_file)\n return train_files, train_mask_files\n\n\n def get_validation_files(self):\n train_files=[]\n train_mask_files=[]\n for file_name in os.listdir(self.validation_dir):\n file_path = os.path.join(self.validation_dir, file_name)\n if os.path.exists(file_path):\n train_files.append(file_path)\n mask_file = os.path.join(self.validation_masks_dir, os.path.basename(file_path).split(\".\")[0]+\".png\")\n #print(mask_file)\n if os.path.exists(mask_file):\n train_mask_files.append(mask_file)\n return train_files, train_mask_files\n\n\n def get_test_files(self):\n train_files=[]\n train_mask_files=[]\n for file_name in os.listdir(self.test_dir):\n file_path = os.path.join(self.test_dir, file_name)\n print(file_path)\n if os.path.exists(file_path):\n train_files.append(file_path)\n return train_files\n\n\nif __name__ == '__main__':\n obj=DataReaderMitSceneParsing()\n #obj.resize_images(224)\n p = Augmentor.Pipeline(obj.train_dir)\n ##Point to a directory containing ground truth data.\n ##Images with the same file names will be added as ground truth data\n ##and augmented in parallel to the original data.\n p.ground_truth(obj.resize_train_masks_dir)\n # Add operations to the pipeline as normal:\n p.rotate(probability=1, max_left_rotation=5, max_right_rotation=5)\n p.flip_left_right(probability=0.5)\n p.zoom_random(probability=0.5, percentage_area=0.8)\n p.flip_top_bottom(probability=0.5)\n p.sample(50)\n\n #obj.resize_images(224)\n # train_images , train_masks = obj.get_validation_files()\n # mask=Image.open(train_masks[10])\n # print(\"max pixel mask {}\".format(np.unique(mask)))\n # mask_resized=mask.resize((224,224))\n # print(np.asarray(mask_resized))\n # print(\"max pixel mask {}\".format(np.unique(mask_resized)))\n #\n # print(\"{},{}\".format(len(train_images), len(train_masks)))\n\n\n\n\n","repo_name":"miltonbd/ai-artist","sub_path":"segmentation/mitsceneparsing/data_reader_mitsceneparsing.py","file_name":"data_reader_mitsceneparsing.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"23711678655","text":"\"\"\"\nEWVtuber\n项目名称:虚拟主播软件\n版权所有:北京光线传媒股份有限公司\n技术支持:北京光线传媒股份有限公司\nEditor:fengtao\nMails:fengtao23@mails.ucas.ac.cn\n\"\"\"\nimport time\n\nfrom ViewController.ewbase_controller import BaseController\nfrom EWMedia.mediaplayer import EWMediaPlayer\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import Qt\nfrom Platform.bilibili_livedanmaku import bilibiliDanmaku\nimport requests\nimport json\nimport API.vtuber_api\nfrom Utils.thread import *\nfrom bilibili_api import Credential\n\n\nclass Msg(object):\n user: str = ''\n uid: str = ''\n message: str = ''\n status: int = 0 # 0-未回答 1-获取回答中 2-已回答\n thread: Thread = None\n knowledge: str = None\n response: str = None\n vid: str = None\n\n\nclass ShuzirenController(BaseController):\n player: EWMediaPlayer = None\n scripts_array = []\n\n def setup_ui(self):\n self.window.shuziren_video_button.clicked.connect(self.play_video)\n # img = QPixmap('Assets/Icon/icon.png').scaled(100,100,aspectRatioMode=Qt.KeepAspectRatio)\n # self.window.image_label.setPixmap(img)\n # self.window.image_label.setFixedSize(100,100)\n self.window.shuziren_login_button.clicked.connect(self.login_bilibili)\n self.window.shuziren_qrcode_label.setHidden(True)\n self.get_scripts_lists()\n self.window.shuziren_script_comboBox.currentIndexChanged.connect(self.script_title_changes)\n self.window.shuziren_script_listWidget.currentRowChanged.connect(self.script_content_changes)\n self.window.shuziren_script_button.clicked.connect(self.insert_script)\n self.window.shuziren_roomid_button.clicked.connect(self.connect_bilibili_server)\n self.hide_qrcode(True)\n\n t = None\n # 等待回复列表\n reply_queue: [] = []\n is_autonext = False\n\n def push_queue(self, message: Msg):\n self.reply_queue.insert(0, message)\n if len(self.reply_queue) > 3:\n lst = self.reply_queue\n self.reply_queue = [lst[0], lst[1], lst[2]]\n # print(self.reply_queue)\n\n def pop_queue(self):\n if len(self.reply_queue) == 0:\n return None\n msg = self.reply_queue[0]\n self.reply_queue.remove(msg)\n return msg\n\n def reload_queue(self):\n self.window.shuziren_queue1_browser.setText('')\n self.window.shuziren_queue2_browser.setText('')\n try:\n msg = self.reply_queue[0]\n message = f'>> {msg.user}:{msg.message}'\n if msg.response is not None:\n message = f'>> {msg.user}:{msg.message}\\n>> AI:{msg.response}'\n self.window.shuziren_queue1_browser.setText(message)\n msg = self.reply_queue[1]\n message = f'>> {msg.user}:{msg.message}'\n if msg.response is not None:\n message = f'>> {msg.user}:{msg.message}\\n>> AI:{msg.response}'\n self.window.shuziren_queue2_browser.setText(message)\n except IndexError:\n print('IndexError')\n self.check_autoreply()\n\n player_thread = None\n\n is_replying = False\n\n def check_autoreply(self):\n if self.player_thread is not None or self.is_replying is True:\n return\n if self.is_autonext:\n self.reply()\n\n\n def reply(self):\n msg = self.pop_queue()\n if msg is None:\n return\n else:\n self.is_replying = True\n # print(msg.message)\n self.reload_queue()\n user_answer = f'>> {msg.user}:{msg.message}'\n\n reply = f'>> AI正在思考中...'\n message = f'{user_answer}\\n{reply}'\n self.window.shuziren_replying_browser.setText(message)\n # t = threading.Thread(target=self.thinking(msg), name='thinking')\n # t.start()\n # t.join(15)\n # asyncio.run(self.thinking(msg=msg))\n self.msg = msg\n self.send_request(msg=msg)\n\n request_thread = None\n\n def send_request(self, msg: Msg):\n if self.request_thread is not None:\n print('请等待问答完成')\n return\n t = Thread()\n self.request_thread = t\n t.start(func=self.thinking)\n\n def thinking(self):\n msg = self.msg\n user_answer = f'>> {msg.user}:{msg.message}'\n question = msg.message\n\n if msg.response is None:\n content = requests.get(f'http://172.23.0.191:10010/get_answer?question={question}')\n response_dic = json.loads(content.text)\n response = response_dic['a']\n msg.response = response\n msg.vid = response_dic['vid']\n self.insert_vid(msg.vid)\n self.request_thread.get_mainloop(message=response, func=self.ai_response)\n else:\n self.request_thread.get_mainloop(message=msg.response, func=self.ai_response)\n\n def ai_response(self, response: str):\n print(f'ai_response:{response}')\n msg = self.msg\n user_answer = f'>> {msg.user}:{msg.message}'\n reply = f'>> AI:{response}'\n message = f'{user_answer}\\n{reply}'\n\n self.window.shuziren_replying_browser.setText(message)\n\n msg = None\n\n def play_video(self):\n if self.player is None:\n self.player = EWMediaPlayer(widget=self.window.video_widget)\n # self.player.load_content_filepath('Tmp/1.mp4')\n self.player.load_stream('rtmp://172.23.0.199:1935/live/stream')\n self.player.play()\n self.is_autonext = True\n\n # 话术\n def get_scripts_lists(self):\n try:\n url = API.vtuber_api.shuziren_script_lists()\n content = requests.get(url)\n response = content.text\n response_dic = json.loads(response)\n self.scripts_array = response_dic['lists']\n self.window.shuziren_script_comboBox.clear()\n for item in response_dic['lists']:\n self.window.shuziren_script_comboBox.addItem(item['title'])\n self.script_title_changes()\n except:\n pass\n\n def script_title_changes(self):\n try:\n index = self.window.shuziren_script_comboBox.currentIndex()\n script = self.scripts_array[index]\n scripts = script['content']\n lists = scripts['lists']\n self.window.shuziren_script_listWidget.clear()\n for video in lists:\n text = video['content']\n video_id = video['video_id']\n self.window.shuziren_script_listWidget.addItem(text)\n self.script_content_changes()\n except IndexError:\n pass\n\n def script_content_changes(self):\n try:\n index_row = self.window.shuziren_script_listWidget.currentIndex().row()\n index_section = self.window.shuziren_script_comboBox.currentIndex()\n script = self.scripts_array[index_section]\n scripts = script['content']\n lists = scripts['lists']\n item = lists[index_row]\n text = item['content']\n self.window.shuziren_script_textBrowser.setText(text)\n except IndexError:\n self.window.shuziren_script_textBrowser.setText('')\n\n # 插入话术队列\n def insert_script(self):\n index_row = self.window.shuziren_script_listWidget.currentIndex().row()\n index_section = self.window.shuziren_script_comboBox.currentIndex()\n script = self.scripts_array[index_section]\n scripts = script['content']\n lists = scripts['lists']\n item = lists[index_row]\n text = item['content']\n video_id = item['video_id']\n url = API.vtuber_api.shuziren_script_insert()\n response = requests.get(f'{url}?video_id={video_id}')\n print(response.text)\n\n # 登录\n bilibili_manager = None\n is_login = False\n bilibili_credential = None\n\n def login_bilibili(self):\n if self.is_login is True:\n self.is_login = False\n self.window.shuziren_nickname_label.setText('未登录')\n self.window.shuziren_login_button.setText('登录')\n self.bilibili_credential = None\n manager: bilibiliDanmaku = self.bilibili_manager\n manager.clear_credential_cache()\n manager.destroy()\n self.bilibili_manager = None\n return\n # print('1')\n self.bilibili_manager = bilibiliDanmaku()\n self.hide_qrcode(False)\n credential, nickname = self.bilibili_manager.login_ui(qrcode_widget=self.window.shuziren_qrcode_label,\n target=self)\n\n def login_status_changed(self, status: str):\n self.window.shuziren_qrcodestatus_label.setText(status)\n self.window.shuziren_qrcodestatus_label.setStyleSheet(\"color: green\")\n\n def login_finished(self, info):\n nickname = info['nickname']\n self.bilibili_credential = info['credential']\n self.window.shuziren_nickname_label.setText(nickname)\n self.hide_qrcode(True)\n self.window.shuziren_login_button.setText('退出登录')\n self.is_login = True\n\n def connect_bilibili_server(self):\n if self.bilibili_credential is None:\n print('未登录')\n return\n roomid = self.window.cartoon_roomid_lineedit.text()\n print(roomid)\n manager: bilibiliDanmaku = self.bilibili_manager\n manager.start_server_connect(roomid=roomid, credential=self.bilibili_credential, target=self)\n\n def did_recieve_danmaku(self, danmaku):\n nickname = danmaku['nickname']\n uid = danmaku['uid']\n message = danmaku['message']\n print(message)\n msg = Msg()\n msg.user = nickname\n msg.message = message\n self.window.shuziren_queue1_browser.setText(message)\n self.push_queue(msg)\n self.reload_queue()\n self.add_message(msg)\n\n message_thread_lst = []\n thread_tag: int = 100\n\n def add_message(self, msg: Msg):\n t = Thread()\n t.tag = self.thread_tag\n self.thread_tag += 1\n self.message_thread_lst.insert(0, t)\n msg.thread = t\n t.start(func=self.run_message_request)\n\n def run_message_request(self):\n t = self.message_thread_lst[0]\n msg = None\n for obj in self.reply_queue:\n if obj.thread is None:\n continue\n if obj.thread.tag == t.tag:\n msg = obj\n if msg is None:\n return\n question = msg.message\n content = requests.get(f'http://172.23.0.191:10010/get_answer?question={question}')\n response_dic = json.loads(content.text)\n response = response_dic['a']\n msg.response = response\n msg.vid = response_dic['vid']\n self.insert_vid(msg.vid)\n t.get_mainloop(message=response, func=self.did_message_response)\n msg.thread = None\n self.message_thread_lst.remove(t)\n\n def did_message_response(self, response):\n self.reload_queue()\n\n def insert_vid(self, vid: str):\n try:\n requests.get(f'http://172.23.0.191:10010/insert_script?video_id={vid}')\n except:\n pass\n\n def hide_qrcode(self, is_hidden: bool):\n self.window.shuziren_qrcode_label.setHidden(is_hidden)\n self.window.shuziren_qrcodecancel_button.setHidden(is_hidden)\n self.window.shuziren_login_button.setHidden(not is_hidden)\n self.window.shuziren_qrcodestatus_label.setHidden(is_hidden)\n","repo_name":"Jacobs12/EWVtuber","sub_path":"ViewController/shuziren_controller.py","file_name":"shuziren_controller.py","file_ext":"py","file_size_in_byte":11513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17254454704","text":"import frappe\nfrom lxml import etree\nfrom werkzeug.wrappers import Response\nimport requests\nfrom requests import Request\nfrom requests.auth import HTTPBasicAuth\nimport json\nfrom urllib.parse import urlparse, parse_qs\n\ndef get_orders(start_date, end_date):\n\torders = frappe.get_all('Sales Order', fields=['name', 'transaction_date', 'status', 'modified', 'currency', 'grand_total', 'customer', \n\t\t\t\t\t\t\t\t\t'customer_address', 'shipping_address_name'], \n\t\t\t\t\t\t\t\t\tfilters={\"delivery_status\": (\"in\", (\"Not Delivered\", \"Partly Delivered\")), \"billing_status\": \"Fully Billed\", \n\t\t\t\t\t\t\t\t\t\"modified\": (\"between\", (start_date, end_date))})\n\treturn orders\n\n@frappe.whitelist(allow_guest=True)\ndef test():\n\t'''response = requests.get('https://ssapi.shipstation.com/stores',\n\t\t\t\tauth=('', ''))\n\tdata = {\"resource_url\": \"https://ssapi6.shipstation.com/shipments?batchId=190671332&includeShipmentItems=False\", \"resource_type\": \"SHIP_NOTIFY\" }\n\tresponse = requests.post('http://deverp.metactical.com/api/method/metactical.api.shipstation.orders_shipped_webhook?settingid=8f3a7e2cac',\n\t\t\t\tjson=data)\t\t\t\t\n\tprint(response)\n\tprint(response.json())'''\n\tdelivery_note = frappe.get_doc('Delivery Note', 'MAT-DN-2023-00001')\n\tsettings = frappe.get_doc(\"Shipstation Settings\", 'eb788d04eb')\n\treturn order_json(delivery_note, False, settings)\n\t\n\t\n@frappe.whitelist()\ndef sync_shipping_status():\n\tsettings = get_settings()\n\tresponse = requests.get('https://ssapi.shipstation.com/shipments?shipDateStart=2022-08-07',\n\t\t\t\tauth=(settings[0].api_key, settings[0].get_password('api_secret')))\n\tshipments = response.json()\n\tfrappe.set_user(settings[0].shipstation_user)\n\tfor shipment in shipments.get('shipments'):\n\t\texists = frappe.db.exists('Delivery Note', {'pick_list': shipment.get('orderKey'), 'docstatus': 0})\n\t\tif exists:\n\t\t\tweight_display = ''\n\t\t\tsize = ''\n\t\t\tweight = shipment.get('weight')\n\t\t\tif weight_display != '':\n\t\t\t\tweight_display =+ ' | '\n\t\t\tweight_display += str(weight.get('value')) + ' ' + weight.get('units')\n\t\t\tdimensions = shipment.get('dimensions')\n\t\t\tif size != '':\n\t\t\t\tsize += ' | '\n\t\t\tsize += str(dimensions.get('length')) + 'l x ' + str(dimensions.get('width')) + 'w x ' + str(dimensions.get('height')) + 'h'\n\t\t\t\n\t\t\t#For carrier mapping\n\t\t\ttransporter = ''\n\t\t\tfor row in settings[0].transporter_mapping:\n\t\t\t\tif row.carrier_code == shipment.get('carrierCode'):\n\t\t\t\t\ttransporter = row.transporter\n\t\t\tpick_list = shipment.get('orderNumber')\n\t\t\tshipDate = shipment.get('shipDate')\n\t\t\ttrackingNumber = shipment.get('trackingNumber')\n\t\t\tshipmentCost = shipment.get('shipmentCost')\n\t\t\t\n\t\t\t#Update delivery note\n\t\t\texisting_delivery = frappe.db.get_value('Delivery Note', {'pick_list': pick_list})\n\t\t\tdelivery_note = frappe.get_doc('Delivery Note', existing_delivery)\n\t\t\tdelivery_note.update({\n\t\t\t\t'lr_date': shipDate,\n\t\t\t\t'lr_no': trackingNumber,\n\t\t\t\t'transporter': transporter,\n\t\t\t\t'ais_shipment_cost': shipmentCost,\n\t\t\t\t'ais_package_weight': weight_display,\n\t\t\t\t'ais_package_size': size,\n\t\t\t\t'ais_updated_by_shipstation': 1,\n\t\t\t\t'ignore_pricing_rule': 1\n\t\t\t})\n\t\t\tdelivery_note.submit()\n\t\t\t\t\n\t\t\t#Delete order from other shipstation accounts\n\t\t\t'''for row in delivery_note.get('ais_shipstation_order_ids'):\n\t\t\t\tif row.settings_id != settingid[0]:\n\t\t\t\t\tshipstation_settings = frappe.get_doc('Shipstation Settings', row.settings_id)\n\t\t\t\t\tif shipstation_settings.disabled != 1:\n\t\t\t\t\t\tresponse = requests.delete('https://ssapi.shipstation.com/orders/' + row.shipstation_order_id,\n\t\t\t\t\t\t\tauth=(shipstation_settings.api_key, shipstation_settings.get_password('api_secret')))'''\n\n@frappe.whitelist(allow_guest=True)\ndef connect():\n\tresponse = requests.get('https://ssapi.shipstation.com/orders',\n\t\t\t\tauth=('', ''))\t\n\tprint(response)\n\tprint(response.json())\n\t\n@frappe.whitelist()\ndef create_shipstation_orders(order_no=None, is_cancelled=False):\n\t#order_no = 'MAT-DN-2022-00071'\n\tif order_no is not None:\n\t\torder = frappe.get_doc('Delivery Note', order_no)\n\t\tif order.get('is_return') == 1:\n\t\t\treturn\n\t\tsource = None\n\t\tif order.get('source') is not None:\n\t\t\tsource = order.get('source')\n\t\tshipstation_settings = get_settings(source)\n\t\tif len(shipstation_settings) == 0:\n\t\t\treturn\n\t\t\n\t\t#Determine already set orderIDs (from previous requests)\n\t\torderIds = []\n\t\tfor row in order.ais_shipstation_order_ids:\n\t\t\torderIds.append(row.settings_id)\n\t\t\n\t\tfor settings in shipstation_settings:\n\t\t\tdata = order_json(order, is_cancelled, settings)\n\t\t\torders_url = 'https://ssapi.shipstation.com/orders/createorder'\n\t\t\tresponse = requests.post(orders_url,\n\t\t\t\t\t\tauth=(settings.api_key, settings.get_password('api_secret')),\n\t\t\t\t\t\tjson=data)\n\t\t\tif response.status_code == 200:\n\t\t\t\t#To prevent adding orderIds multiple times\n\t\t\t\tif settings.name not in orderIds: \n\t\t\t\t\tsorder = response.json()\n\t\t\t\t\t#frappe.db.set_value('Delivery Note', order_no, \"ais_shipstation_orderid\", sorder.get('orderId'))\n\t\t\t\t\torder_table = frappe.new_doc('Shipstation Order ID', order, 'ais_shipstation_order_ids')\n\t\t\t\t\torder_table.update({\n\t\t\t\t\t\t\t\t\t'settings_id': settings.name,\n\t\t\t\t\t\t\t\t\t'shipstation_order_id': sorder.get('orderId')\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\torder_table.save()\n\t\t\telse:\n\t\t\t\t#Add it to Shipstation API requests for troubleshooting\n\t\t\t\tnew_req = frappe.new_doc('Shipstation API Requests')\n\t\t\t\tnew_req.update({\n\t\t\t\t\t\"resource_url\": orders_url,\n\t\t\t\t\t\"resource_type\": 'CREATE_ORDER',\n\t\t\t\t\t\"result\": response.text,\n\t\t\t\t\t\"reference_type\": \"Delivery Note\",\n\t\t\t\t\t\"reference_name\": order_no\n\t\t\t\t})\n\t\t\t\tnew_req.insert(ignore_permissions=True)\n\t\t\n\t\n\t\n\ndef order_json(order, is_cancelled, settings):\n\t#order = frappe.get_doc('Delivery Note', order_no)\n\t\n\t#Order no is either pick list name or delivery note name\n\torder_no = None\n\tif order.pick_list and order.pick_list is not None:\n\t\torder_no = order.pick_list\n\telse:\n\t\torder_no = order.name\n\t\t\n\torderStatus = \"awaiting_shipment\"\n\tif is_cancelled:\n\t\torderStatus = \"cancelled\"\n\t\n\t#For shipping and taxes charges\n\tshipping_settings = settings.shipping_charges_specified\n\tshipping_item = None\n\tshipping_charges = 0\n\ttaxes = 0\n\tif shipping_settings == 'In Item Table':\n\t\tshipping_item = settings.shipping_item\n\t\ttaxes = order.total_taxes_and_charges\n\telif shipping_settings == 'In Charges Table':\n\t\tshipping_item = settings.shipping_charge\n\t\tfor charge in order.taxes:\n\t\t\tif charge.account_head == shipping_item:\n\t\t\t\tshipping_charges = charge.tax_amount_after_discount_amount\n\t\t\telse:\n\t\t\t\ttaxes = taxes + float(charge.tax_amount_after_discount_amount)\n\t\n\t#For address\n\t'''customer_address = {\n\t\t'address_line1': None,\n\t\t'address_line2': None,\n\t\t'city': None,\n\t\t'state': None,\n\t\t'pincode': None,\n\t\t'phone': None,\n\t\t'email_id': None\n\t}'''\n\tcustomer_address = {}\n\tcustomer_country = None\n\tif order.customer_address and order.customer_address is not None:\n\t\tcustomer_address = frappe.get_doc('Address', order.customer_address)\n\t\tcustomer_country = frappe.get_value('Country', customer_address.country, \"code\")\n\t\tcustomer_country = customer_country.upper()\n\t\n\t#Get shipping address, if none, use customer address\n\tshipping_address = {}\n\tshipping_country = None\n\tif order.shipping_address_name and order.shipping_address_name is not None:\n\t\tshipping_address = frappe.get_doc('Address', order.shipping_address_name)\n\t\tshipping_country = frappe.get_value('Country', shipping_address.country, \"code\")\n\t\tshipping_country = shipping_country.upper()\n\telif order.customer_address and order.customer_address is not None:\n\t\tshipping_address = customer_address\n\t\tshipping_country = frappe.get_value('Country', shipping_address.country, \"code\")\n\t\tshipping_country = shipping_country.upper()\n\t\t\n\t#For stores\n\tstoreId = None\n\tif order.source and order.source is not None:\n\t\tfor store in settings.store_mapping:\n\t\t\tif order.source == store.source:\n\t\t\t\tstoreId = store.store_id\n\t\t\n\titems = []\n\tfor item in order.items:\n\t\t#Check if it is a shipping item\n\t\tif shipping_settings == 'In Item Table' and item.item_code == shipping_item:\n\t\t\tshipping_charges = item.amount\n\t\telse:\n\t\t\trow = {}\n\t\t\trow.update({\n\t\t\t\t\"lineItemKey\": item.name,\n\t\t\t\t\"sku\": item.item_code,\n\t\t\t\t\"name\": item.item_name,\n\t\t\t\t\"imageUrl\": item.get('image'),\n\t\t\t\t\"weight\": None,\n\t\t\t\t\"quantity\": int(item.qty),\n\t\t\t\t\"unitPrice\": float(item.rate),\n\t\t\t\t\"taxAmount\": None,\n\t\t\t\t\"shippingAmount\": None,\n\t\t\t\t\"warehouseLocation\": None,\n\t\t\t\t\"options\": None,\n\t\t\t\t\"productId\": None,\n\t\t\t\t\"fulfillmentSku\": None,\n\t\t\t\t\"adjustment\": False,\n\t\t\t\t\"upc\": None\n\t\t\t})\n\t\t\titems.append(row)\n\tdata = {}\n\t\n\t# Get customer and shipping name from the addresses\n\tcustomer_name = \"{} {}\".format(customer_address.get(\"ifw_first_name\"), customer_address.get(\"ifw_last_name\"))\n\tshipping_name = \"{} {}\".format(shipping_address.get(\"ifw_first_name\"), shipping_address.get(\"ifw_last_name\"))\n\t\n\tif customer_name.strip() == \"\":\n\t\tcustomer_name = order.customer\n\t\t\n\tif shipping_name.strip() == \"\":\n\t\tshipping_name = order.customer\n\t\n\tdata.update({\n\t\t\"orderNumber\": order_no,\n\t\t\"orderKey\": order_no,\n\t\t\"orderDate\": str(order.posting_date),\n\t\t\"paymentDate\": None,\n\t\t\"shipByDate\": \"\",\n\t\t\"orderStatus\": orderStatus,\n\t\t\"customerUsername\": order.customer,\n\t\t\"customerEmail\": customer_address.get('email_id'),\n\t\t\"billTo\": {\n\t\t\t\"name\": customer_name,\n\t\t\t\"company\": '',\n\t\t\t\"street1\": customer_address.get('address_line1'),\n\t\t\t\"street2\": customer_address.get('address_line2'),\n\t\t\t\"street3\": '',\n\t\t\t\"city\": customer_address.get('city'),\n\t\t\t\"state\": customer_address.get('state'),\n\t\t\t\"postalCode\": customer_address.get('pincode'),\n\t\t\t\"country\": customer_country,\n\t\t\t\"phone\": customer_address.get('phone'),\n\t\t\t\"residential\": None\n\t\t},\n\t\t\"shipTo\": {\n\t\t\t\"name\": shipping_name,\n\t\t\t\"company\": \"\",\n\t\t\t\"street1\": shipping_address.get('address_line1'),\n\t\t\t\"street2\": shipping_address.get('address_line2'),\n\t\t\t\"street3\": '',\n\t\t\t\"city\": shipping_address.get('city'),\n\t\t\t\"state\": shipping_address.get('state'),\n\t\t\t\"postalCode\": shipping_address.get('pincode'),\n\t\t\t\"country\": shipping_country,\n\t\t\t\"phone\": shipping_address.get('phone'),\n\t\t\t\"residential\": None\n\t\t},\n\t\t\"items\": items,\n\t\t\"amountPaid\": order.grand_total,\n\t\t\"taxAmount\": float(taxes),\n\t\t\"shippingAmount\": float(shipping_charges),\n\t\t\"customerNotes\": None,\n\t\t\"internalNotes\": None,\n\t\t\"gift\": False,\n\t\t\"giftMessage\": None,\n\t\t\"paymentMethod\": None,\n\t\t\"requestedShippingService\": None,\n\t\t\"carrierCode\": None,\n\t\t\"serviceCode\": None,\n\t\t\"packageCode\": None,\n\t\t\"confirmation\": \"none\",\n\t\t\"shipDate\": None,\n\t\t\"weight\": None,\n\t\t\"dimensions\": None,\n\t\t\"advancedOptions\": {\n\t\t\t\"storeId\": storeId\n\t\t}\n\t})\n\treturn data\n\ndef get_settings(source=None, settingid=None):\n\tsettings = []\n\tif source is not None:\n\t\tparents = frappe.db.sql('''SELECT ssm.parent FROM `tabShipstation Store Map` AS ssm \n\t\t\t\t\tLEFT JOIN \n\t\t\t\t\t\t`tabShipstation Settings` AS ss ON ssm.parent = ss.name\n\t\t\t\t\tWHERE \n\t\t\t\t\t\tssm.source = %(source)s AND ss.disabled = 0''', {\"source\": source}, as_dict=1)\n\t\tif len(parents) > 0:\n\t\t\tfor parent in parents:\n\t\t\t\tret = frappe.get_doc('Shipstation Settings', parent.parent)\n\t\t\t\tsettings.append(ret)\n\t\t\t\n\tif settingid is not None:\n\t\tret = frappe.get_doc('Shipstation Settings', settingid)\n\t\tsettings.append(ret)\n\t\t\n\tif len(settings) == 0 and settingid is None:\n\t\tdefault = frappe.db.get_value('Shipstation Settings', {\"is_default\": 1, \"disabled\": 0})\n\t\tif default:\n\t\t\tret = frappe.get_doc('Shipstation Settings', default)\n\t\t\tsettings.append(ret)\n\t\t\n\treturn settings\n\t\n@frappe.whitelist(allow_guest=True)\ndef orders_shipped_webhook():\n\turl = urlparse(frappe.request.url)\n\tparams = parse_qs(url.query)\n\tsettingid = params.get(\"settingid\")\n\tdata = json.loads(frappe.request.data)\n\tresource_url = data.get(\"resource_url\")\n\tresource_type = data.get(\"resource_type\")\n\tif settingid is not None:\n\t\tsettings = get_settings(settingid=settingid[0])\n\t\tif len(settings) > 0:\n\t\t\tfrappe.set_user(settings[0].shipstation_user)\n\t\t\t#Log the request\n\t\t\tnew_req = frappe.get_doc({\n\t\t\t\t\"doctype\": \"Shipstation API Requests\",\n\t\t\t\t\"resource_url\": resource_url,\n\t\t\t\t\"resource_type\": resource_type,\n\t\t\t\t\"settingid\": settingid[0]\n\t\t\t})\n\t\t\tif resource_type == 'SHIP_NOTIFY':\n\t\t\t\tresponse = requests.get(resource_url,\n\t\t\t\t\t\t\tauth=(settings[0].api_key, settings[0].get_password('api_secret')))\n\t\t\t\tnew_req.update({\n\t\t\t\t\t\"result\": json.dumps(response.json())\n\t\t\t\t})\n\t\t\t\tshipments = response.json()\n\t\t\t\tweight_display = ''\n\t\t\t\tsize = ''\n\t\t\t\tfor shipment in shipments.get('shipments'):\n\t\t\t\t\tweight = shipment.get('weight')\n\t\t\t\t\tif weight_display != '':\n\t\t\t\t\t\tweight_display =+ ' | '\n\t\t\t\t\tweight_display += str(weight.get('value')) + ' ' + weight.get('units')\n\t\t\t\t\tdimensions = shipment.get('dimensions')\n\t\t\t\t\tif size != '':\n\t\t\t\t\t\tsize += ' | '\n\t\t\t\t\tsize += str(dimensions.get('length')) + 'l x ' + str(dimensions.get('width')) + 'w x ' + str(dimensions.get('height')) + 'h'\n\t\t\t\t\t\n\t\t\t\t\t#For carrier mapping\n\t\t\t\t\ttransporter = ''\n\t\t\t\t\tfor row in settings[0].transporter_mapping:\n\t\t\t\t\t\tif row.carrier_code == shipment.get('carrierCode'):\n\t\t\t\t\t\t\ttransporter = row.transporter\n\t\t\t\t\tpick_list = shipment.get('orderNumber')\n\t\t\t\t\tshipDate = shipment.get('shipDate')\n\t\t\t\t\ttrackingNumber = shipment.get('trackingNumber')\n\t\t\t\t\tshipmentCost = shipment.get('shipmentCost')\n\t\t\t\t\t\n\t\t\t\t\t#Update delivery note\n\t\t\t\t\texisting_delivery = frappe.db.get_value('Delivery Note', {'pick_list': pick_list})\n\t\t\t\t\tif existing_delivery:\n\t\t\t\t\t\tdelivery_note = frappe.get_doc('Delivery Note', existing_delivery)\n\t\t\t\t\t\tdelivery_note.update({\n\t\t\t\t\t\t\t'lr_date': shipDate,\n\t\t\t\t\t\t\t'lr_no': trackingNumber,\n\t\t\t\t\t\t\t'transporter': transporter,\n\t\t\t\t\t\t\t'ais_shipment_cost': shipmentCost,\n\t\t\t\t\t\t\t'ais_package_weight': weight_display,\n\t\t\t\t\t\t\t'ais_package_size': size,\n\t\t\t\t\t\t\t'ais_updated_by_shipstation': 1,\n\t\t\t\t\t\t\t'ignore_pricing_rule': 1\n\t\t\t\t\t\t})\n\t\t\t\t\t\tdelivery_note.submit()\n\t\t\t\t\t\t\n\t\t\t\t\t\t#Add reference to Shipstation API Requests\n\t\t\t\t\t\tnew_req.update({\n\t\t\t\t\t\t\t'reference_type': 'Delivery Note',\n\t\t\t\t\t\t\t'reference_name': existing_delivery\n\t\t\t\t\t\t})\n\t\t\t\t\t\t\n\t\t\t\t\t\t#Delete order from other shipstation accounts\n\t\t\t\t\t\tfor row in delivery_note.get('ais_shipstation_order_ids'):\n\t\t\t\t\t\t\tif row.settings_id != settingid[0]:\n\t\t\t\t\t\t\t\tshipstation_settings = frappe.get_doc('Shipstation Settings', row.settings_id)\n\t\t\t\t\t\t\t\tif shipstation_settings.disabled != 1:\n\t\t\t\t\t\t\t\t\tresponse = requests.delete('https://ssapi.shipstation.com/orders/' + row.shipstation_order_id,\n\t\t\t\t\t\t\t\t\t\tauth=(shipstation_settings.api_key, shipstation_settings.get_password('api_secret')))\n\t\t\t\t\t\t\t\n\t\t\tnew_req.insert(ignore_if_duplicate=True)\n\t\n\t\n@frappe.whitelist(allow_guest=True)\ndef shipstation_xml():\n\troot = etree.Element(\"Orders\")\n\tout = etree.tostring(root, pretty_print=True)\n\tresponse = Response()\n\tresponse.mimetype = \"text/xml\"\n\tresponse.charset = \"utf-8\"\n\tresponse.data = out\n\treturn response\n\t\n@frappe.whitelist()\ndef get_shipment():\n\tresponse = requests.get('https://ssapi6.shipstation.com/shipments?batchId=187980859&includeShipmentItems=False',\n\t\t\t\tauth=('249b9201157349939742f12101a8cc80', '1d7b6409ba6e41e1aeae73b97384613d'))\n\tprint(response.status_code)\n\tprint(response.json())\n\tshipments = response.json()\n\tfor shipment in shipments.get('shipments'):\n\t\texisting_delivery = frappe.db.get_value('Delivery Note', {'po_no': shipment.get('orderNumber'), 'docstatus': 0})\n\t\tif existing_delivery:\n\t\t\tdelivery_note = frappe.get_doc('Delivery Note', existing_delivery)\n\t\t\tdelivery_note.update({\n\t\t\t\t'lr_date': shipment.get('shipDate'),\n\t\t\t\t'lr_no': shipment.get('trackingNumber')\n\t\t\t})\n\t\t\tdelivery_note.save()\n\t\t\t\ndef delete_order(order_no):\n\t#order_no = 'MAT-DN-2021-00030'\n\torder = frappe.get_doc('Delivery Note', order_no)\n\tfor row in order.get('ais_shipstation_order_ids'):\n\t\tsettings = frappe.get_doc('Shipstation Settings', row.settings_id)\n\t\tif settings.disabled == 0:\n\t\t\tresponse = requests.delete('https://ssapi.shipstation.com/orders/' + row.shipstation_order_id,\n\t\t\t\tauth=(settings.api_key, settings.get_password('api_secret')))\n","repo_name":"GPD-ERP/metactical","sub_path":"metactical/api/shipstation.py","file_name":"shipstation.py","file_ext":"py","file_size_in_byte":15750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7814792869","text":"#!/usr/bin/python3\n\ndef codegen(postfix):\n \"\"\"\n Cette fonction prends en paramètre une liste, puis va écrire dans un fichier\n cette expression en l'évaluant sous forme d'une pile\n \"\"\"\n with open(\"a.out\", \"w\") as file:\n i = 1\n cpt_t = 0\n file.write(\"#!/usr/bin/python3\\n\\n\")\n for elem in postfix:\n if elem in ['VRAI', 'FAUX']:\n if elem == \"VRAI\":\n boolean = \"True\"\n else:\n boolean = \"False\"\n file.write(\"t{} = {}\\n\".format(i, boolean))\n cpt_t += 1\n i += 1\n elif elem in [\"OU\", \"ET\"]:\n if elem == \"OU\":\n file.write(\"t{} = t{} or t{}\\n\".format(cpt_t-1, cpt_t-1, cpt_t))\n elif elem == \"ET\":\n file.write(\"t{} = t{} and t{}\\n\".format(cpt_t-1, cpt_t-1, cpt_t))\n\n cpt_t -= 1\n i -= 1\n\n elif elem == \"NON\":\n file.write(\"t{} = not t{}\\n\".format(cpt_t, cpt_t))\n\n file.write(\"print(t{})\".format(cpt_t))\n","repo_name":"BatBapt/traducteur_simple","sub_path":"bool_exp/codegen.py","file_name":"codegen.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4176137321","text":"from traceback import extract_tb\nimport sys\n\n\nclass ErrorHandler(object):\n \"\"\"\n Development mode error handler. Formats an exception traceback for\n generating an rpc error response suitable for displaying it on the client.\n \"\"\"\n\n def __init__(self, root, user_filenames):\n \"\"\"\n :param root: the root directory of the rpc package. This handler will\n strip this path from errors that occur under this package.\n :type root: str\n :param user_filenames: The file paths of the configured services.\n :type user_filenames: list of str\n \"\"\"\n self.filename_mapping = {}\n for user_filename in user_filenames:\n if user_filename.endswith('.pyc'):\n user_filename = user_filename[:-1]\n\n if user_filename.startswith(root):\n mapped_filename = user_filename[len(root):]\n else:\n mapped_filename = user_filename\n\n self.filename_mapping[user_filename] = mapped_filename\n\n def format_trace(self, traceback):\n \"\"\"\n Format a traceback for sending it to the client. Only show application\n errors, stripping frames belonging to system or 3rd party libs.\n \"\"\"\n formatted = []\n for filename, line_number, function_name, code in extract_tb(traceback):\n if filename in self.filename_mapping:\n formatted.append(dict(filename=self.filename_mapping[filename],\n line=line_number, function=function_name,\n code=code))\n return formatted\n\n def get_error_response(self):\n \"\"\"Build an rpc error response based on the latest exception.\"\"\"\n exception_type, value, traceback = sys.exc_info()\n return dict(success=False,\n error=dict(type=exception_type.__name__,\n message=value.message,\n traceback=self.format_trace(traceback))\n )\n\n","repo_name":"rdarder/rpc","sub_path":"rpc/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"73698529421","text":"import numpy as np\n\nclass BackPropagationAlgo:\n\n def Sigmoid(self, net):\n F = 1 / (1 + np.exp(-net))\n return F\n\n def TanH(self, net):\n F = (1 - np.exp(-net)) / (1 + np.exp(-net))\n return F\n\n def StartBackpropagationAlgo(self, TrainData, NumHiddenLayers, neuron_in_hidden_layers, ActivationFun, eta, epochs, BiasCheck):\n # create_random weights\n weights = []\n input_class = 5\n for i in range(NumHiddenLayers):\n rand_matrix = np.random.uniform(-1, 1, size=(neuron_in_hidden_layers[i], input_class + int(BiasCheck)))\n input_class = neuron_in_hidden_layers[i]\n weights.append(rand_matrix)\n\n weights.append(np.random.rand(3, input_class + int(BiasCheck))) # output weights\n\n for epoch in range(epochs):\n for i in range(len(TrainData)):\n x = TrainData[i][0:-1]\n y = TrainData[i][-1]\n NeuronsOutputs = self.Feedforward(x, weights, ActivationFun, BiasCheck)\n weights = self.BackPropagate(x, y, weights, NeuronsOutputs, ActivationFun, eta, BiasCheck, NumHiddenLayers)\n\n return weights\n\n def Feedforward(self, x, weights, active_func, bias_check):\n neurons_outputs = [] # list of neurons outputs from activation function\n for i in range(len(weights)):\n AllNetValues = np.dot(weights[i], x) # list\n Y_Activaton = []\n if bias_check and (i != len(weights) - 1):\n Y_Activaton.append(1)\n for n in AllNetValues:\n if active_func == 1:\n output = self.Sigmoid(n)\n Y_Activaton.append(output)\n else:\n output = self.TanH(n)\n Y_Activaton.append(output)\n\n x = Y_Activaton\n neurons_outputs.append(Y_Activaton)\n\n return neurons_outputs\n\n def OutputLayerError(self, y, revesoutput, ActivationFun):\n error = []\n for i in range(len(y)):\n y_acual = y[i] # y_acual\n y_predc = revesoutput[i] # y_predc\n\n if ActivationFun:\n e = (y_acual - y_predc) * y_predc * (1 - y_predc)\n error.append(e)\n else:\n e = (y_acual - y_predc) * (1 - (y_predc*y_predc))\n error.append(e)\n return error\n\n def HiddenLayerError(self, NumHiddenLayer, Nexterror, NodeWeights, revesoutput, ActivationFun, bias):\n error = []\n for i in range(NumHiddenLayer):\n if bias == 1:\n NodeWeights[i] = NodeWeights[i][:, 1:]\n\n shape = NodeWeights[i].shape\n w = np.reshape(NodeWeights[i], (shape[1], shape[0]))\n e = np.dot(w, Nexterror)\n\n Y_Act = []\n if ActivationFun:\n for c in range(len(revesoutput[i+1])):\n tmp = revesoutput[i+1][c] * (1-revesoutput[i+1][c])\n Y_Act.append(tmp)\n\n if bias == 1:\n Y_Act.pop(0)\n\n Final_E = e*Y_Act\n error.append(Final_E)\n else:\n for c in range(len(revesoutput[i+1])):\n tmp = 1 - (revesoutput[i + 1][c] * revesoutput[i + 1][c])\n Y_Act.append(tmp)\n\n if bias == 1:\n Y_Act.pop(0)\n\n Final_E = e * Y_Act\n error.append(Final_E)\n Nexterror = Final_E\n return error\n\n def BackPropagate(self, x, y, weights, outputs, activate_func, eta, bias_check, NumHiddenLayer):\n output_layer = outputs[-1]\n finalf = []\n for value in output_layer:\n e = np.max(outputs[-1])\n if value == e:\n finalf.append(1)\n else:\n finalf.append(0)\n\n check = 0\n for i, j in zip(y, finalf):\n if i == j:\n check += 1\n\n if len(y) != check:\n output2 = outputs.copy() # output reversed\n output2.reverse()\n\n # here start calc error in output layer\n OutputNodeError = self.OutputLayerError(y, output2[0], activate_func)\n\n weight2 = weights.copy() # weight reversed\n weight2.reverse()\n NodeWeights = weight2.copy()\n\n output2.append(x)\n\n # here start calc error in output layer\n Nodes_error = self.HiddenLayerError(NumHiddenLayer, OutputNodeError, NodeWeights, output2, activate_func, bias_check)\n count=0\n for Layer in range(len(output2) - 1): # start from hidden layer which before output layer\n if Layer == 0:\n for j in range(len(output2[Layer + 1])):\n XInput = output2[Layer + 1][j]\n for k in range(len(output2[Layer])):\n weight2[Layer][k, j] = weight2[Layer][k, j] + eta * OutputNodeError[k] * XInput\n else:\n LayerError = []\n LayerErrorTmp = Nodes_error[count]\n for g in range(len(LayerErrorTmp)):\n LayerError.append(LayerErrorTmp[g])\n\n PrvLayer = len(output2[Layer])\n\n if bias_check == 1:\n LayerError.append(1)\n PrvLayer = PrvLayer - 1\n\n for j in range(len(output2[Layer + 1])):\n XInput = output2[Layer + 1][j]\n for k in range(PrvLayer):\n weight2[Layer][k, j] = weight2[Layer][k, j] + eta * LayerError[k] * XInput\n count += 1\n #print(\"انتش واجري \")\n\n weights = weight2.copy()\n weights.reverse()\n\n return weights","repo_name":"EssamSheriff/Backpropagation-algorithm","sub_path":"backpropagationalgo.py","file_name":"backpropagationalgo.py","file_ext":"py","file_size_in_byte":5836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30255886442","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__Lisence__ = \"BSD\"\n__maintainer__ = \"Justin Furuness\"\n__email__ = \"jfuruness@gmail.com, agorbenko97@gmail.com\"\n__status__ = \"Development\"\n\nfrom math import e\nimport random\n\nfrom matplotlib.pyplot import Circle, text\n\nclass Anim_User:\n \"\"\"Animated User\"\"\"\n\n patch_radius = 1\n patch_padding = .25\n # Changing colors, or even removing color, made it slower\n og_face_color = \"g\"\n disconnected_location = (-10, -10)\n # Needs different locs for disconnceted and detected\n # Because based on location we animate\n detected_location = (-20, -20)\n\n def __init__(self, id, og_anim_bucket, high_res=False):\n \"\"\"Stores user values\"\"\"\n\n self.high_res = high_res\n # Used to differentiate users\n self.id = id\n # Used to track suspicions\n self.suspicions = []\n # Used to track location\n self.points = []\n\n if og_anim_bucket:\n center_x = og_anim_bucket.patch_center()\n else:\n center_x = self.disconnected_location[0]\n\n self.patch = Circle((center_x, 5),\n Anim_User.patch_radius,\n fc=Anim_User.og_face_color)\n\n if self.high_res:\n self.text = text(center_x,\n 5,\n self.id,\n horizontalalignment=\"center\",\n verticalalignment=\"center\")\n\n @property\n def anim_objects(self):\n \"\"\"Animation objects used by the animation\"\"\"\n\n return [self.patch, self.text] if self.high_res else [self.patch]\n\n @staticmethod\n def patch_length():\n \"\"\"Returns animation object length\"\"\"\n\n return Anim_User.patch_radius * 2 + Anim_User.patch_padding * 2\n\n def add_to_anim(self, ax, zorder):\n \"\"\"Adds user patches to animation\"\"\"\n\n # Add user patch\n self.patch.center = self.points[0]\n ax.add_patch(self.patch)\n self.patch.set_zorder(zorder)\n self.patch.set_facecolor(self.og_face_color)\n if self.high_res:\n # Add text. X is already set properly.\n self.text.set_y(self.points[0][1])\n self.text.set_zorder(zorder + 1)\n\n return zorder + 2\n\n def animate(self,\n frame, # Frame\n frames_per_round, # Frames per round\n track_sus, # Track suspicion\n *args,\n ):\n\n detected_loc = self.detected_location\n disconnected_loc = self.disconnected_location\n\n current_pt, future_pt = self._get_points(frame, frames_per_round)\n # If the points aren't the same or we're in the middle of a round\n if current_pt != future_pt or frame % frames_per_round != 0:\n self._move_user(current_pt, future_pt, frame, frames_per_round)\n\n # At the start of the round\n if frame % frames_per_round == 0:\n self._take_action(current_pt, future_pt)\n self._update_sus(track_sus, frame, frames_per_round)\n\n def _get_points(self, frame, frames_per_round):\n # Gets current point\n current_point = self.points[frame // frames_per_round]\n # Gets future point\n future_point = self.points[(frame // frames_per_round) + 1]\n return current_point, future_point\n\n def _move_user(self,\n cur_pt, # Current point\n future_pt, # Future point\n f, # Frame\n fpr # Frames per round\n ):\n\n next_point = self._get_next_point(cur_pt, future_pt, f, fpr)\n # Set the center\n self.patch.center = next_point\n if self.high_res:\n self.text.set_x(next_point[0])\n self.text.set_y(next_point[1])\n\n def _get_next_point(self,\n cur_pt, # Current point\n future_pt, # Future point\n f, # Frame\n fpr # Frames per round\n ):\n \"\"\"Gets next point using math equation\n\n probably distance along two points or something like that\n \"\"\"\n\n\n # Frames left in round\n remainder = f - ((f // fpr) * fpr)\n\n # Get the next point for x\n next_point_x1_contr = cur_pt[0] * ((fpr - remainder) / fpr)\n next_point_x2_contr = future_pt[0] * (remainder / fpr)\n\n # Get the next point for y\n next_point_y1_contr = cur_pt[1] * ((fpr - remainder) / fpr)\n next_point_y2_contr = future_pt[1] * (remainder / fpr)\n\n # Next point for the frame, not for the round\n # inbetween current and future point\n return (next_point_x1_contr + next_point_x2_contr,\n next_point_y1_contr + next_point_y2_contr)\n\n def _take_action(self, cur_pt, future_pt):\n\n detected_loc = self.detected_location\n disconnected_loc = self.disconnected_location\n\n # If we're going to the detected location\n if future_pt == detected_loc and cur_pt != detected_loc:\n self._become_detected()\n elif future_pt == disconnected_loc and cur_pt != disconnected_loc:\n self._become_disconnected()\n\n def _update_sus(self, track_sus, frame, frames_per_round):\n if track_sus and self.high_res:\n text = f\"{self.suspicions[(frame//frames_per_round) + 1]:.1f}\"\n self.text.set_text(f\"{self.id:2.0f}:{text}\")\n\n def _become_detected(self):\n \"\"\"Sets animation to detected\"\"\"\n\n if self.high_res:\n self.text.set_text(\"Detected\")\n self.patch.set_facecolor(\"grey\")\n\n def _become_disconnected(self):\n\n if self.high_res:\n self.text.set_text(\"Disconnected\")\n self.patch.set_facecolor(\"purple\")\n","repo_name":"jfuruness/lib_ddos_simulator","sub_path":"lib_ddos_simulator/animations/anim_user.py","file_name":"anim_user.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18293859944","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom Actress.items import ActressItem\n\n\nclass ActressSpiderSpider(scrapy.Spider):\n name = 'actress_spider'\n allowed_domains = ['javbus.com']\n start_urls = [\n 'https://www.javbus.com/actresses',\n 'https://www.javbus.com/uncensored/actresses'\n ]\n\n def parse(self, response):\n hrefs = response.xpath('//a[contains(@class, \"avatar-box\")]/@href').extract()\n for href in hrefs:\n yield scrapy.Request(url=href, callback=self.parse_detail)\n\n next_page = response.xpath('//a[@id=\"next\"]/@href').extract_first()\n if next is not None:\n next_url = response.urljoin(next_page)\n yield scrapy.Request(url=next_url, callback=self.parse)\n\n def parse_detail(self, response):\n item = ActressItem()\n name = response.xpath('//span[contains(@class, \"pb10\")]/text()').extract_first()\n\n # 以下信息有误,以后再处理\n # birth = response.xpath('//div[@class=\"photo-info\"]//p[1]/text()').extract_first()\n # age = response.xpath('//div[@class=\"photo-info\"]//p[2]/text()').extract_first()\n # # 腰围\n # waist = response.xpath('//div[@class=\"photo-info\"]//p[6]/text()').extract_first()\n # # 罩杯\n # cup = response.xpath('//div[@class=\"photo-info\"]//p[4]/text()').extract_first()\n # # 胸围\n # bust = response.xpath('//div[@class=\"photo-info\"]//p[5]/text()').extract_first()\n # # 臀围\n # hips = response.xpath('//div[@class=\"photo-info\"]//p[7]/text()').extract_first()\n # # 身高\n # height = response.xpath('//div[@class=\"photo-info\"]//p[3]/text()').extract_first()\n # 演员作品名\n film_list = response.xpath(\"//a[@class='movie-box']//span/text()\").extract()\n # 作品番号\n tags = response.xpath(\"//a[@class='movie-box']//date[1]/text()\").extract()\n # 作品时间\n dates = response.xpath(\"//a[@class='movie-box']//date[2]/text()\").extract()\n\n item['name'] = name\n # item['age'] = age\n # item['birth'] = birth\n # item['waist'] = waist\n # item['hips'] = hips\n # item['cup'] = cup\n # item['bust'] = bust\n # item['height'] = height\n\n films = []\n for film in film_list:\n # 去除换行符的干扰字符\n afilm = film.strip('\\t \\n \\r '' /')\n if afilm != '':\n films.append(afilm)\n for j in range(len(films)):\n item['film'] = films[j]\n item['tag'] = tags[j]\n item['date'] = dates[j]\n yield item\n next_page = response.xpath(\"//a[@id='next']/@href\").extract_first()\n if next_page:\n next_url = response.urljoin(next_page)\n yield scrapy.Request(url=next_url, callback=self.parse_detail)\n\n\n\n\n","repo_name":"Gaterny/ActressSearch","sub_path":"Actress/Actress/spiders/actress_spider.py","file_name":"actress_spider.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32200355567","text":"import time\nimport datetime\nfrom tkinter import filedialog\nfrom tkinter import *\nimport re\n\n\ndef progressBar(iterable, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\n \"\"\"\n total = len(iterable)\n # Progress Bar Printing Function\n def printProgressBar (iteration):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Initial Call\n printProgressBar(0)\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()\n\n\ndef get_run_data():\n # get seq run id next\n platform = \"ClearLabs\"\n seq_run_id = \"\"\n ask = True\n while ask:\n seq_run_id = input(\"\\nPlease copy/paste the seq_run_id value from the ClearLabs website below\\nExample: Run BB1L12.2021-06-16.01\\n--> \")\n \n # check that input is valid\n if not re.search(\"Run BB\\dL\\d{2}.\\d{4}-\\d{2}-\\d{2}.\\d{2}\", seq_run_id):\n print(\"Invalid input, try again.\")\n else:\n ask = False\n \n # now, pull meaningful information out of supplied data\n machine_num = seq_run_id[8:10]\n run_date = datetime.datetime.strptime(seq_run_id[11:21], '%Y-%m-%d').strftime(\"%m/%d/%Y\")\n day_run_num = int(seq_run_id[-2:])\n\n # get the run data from clearlabs21\n ask = True\n print(\"\\nPlease copy/paste all run data from the clearlabs website below\\n\")\n c = 0\n pos_dict = {\"A\":1, \"B\":2, \"C\":3, \"D\":4, \"E\":5, \"F\":6, \"G\":7, \"H\":8}\n run_data = {\"hsn\":[], \"position\":[], \"avg_depth\":[], \"percent_cvg\":[]}\n while c < 224:\n u_input = input(\"\")\n if c % 7 == 0: # it is a seq_run_position\n # format input first\n pos = (int(u_input[-1])*8 - 8) + pos_dict[u_input[0]]\n run_data[\"position\"].append(pos)\n elif c % 7 == 1: # it is an hsn\n hsn = \"\"\n if re.search(\"\\d{7}..\", u_input):\n hsn = u_input[0:-2]\n else:\n hsn = u_input\n run_data[\"hsn\"].append(hsn)\n elif c % 7 == 3: # it is depth\n depth = u_input.replace(\"x\", \"\")\n run_data[\"avg_depth\"].append(int(depth))\n elif c % 7 == 4: # it is coverage\n coverage = u_input.replace(\"%\", \"\")\n coverage = float(coverage)/100\n run_data[\"percent_cvg\"].append(coverage)\n else:\n pass\n c += 1\n \n return run_data, machine_num, run_date, day_run_num, platform ;\n\n\ndef get_path():\n time.sleep(1)\n print(\"Opening dialog box...\")\n time.sleep(1)\n root = Tk()\n root.withdraw()\n path_read = filedialog.askopenfilename()\n return path_read\n\n\ndef get_path_folder():\n time.sleep(1)\n print(\"Opening dialog box...\")\n time.sleep(1)\n root = Tk()\n root.withdraw()\n path = filedialog.askdirectory()\n return path\n\n","repo_name":"jbarnell1/KHEL_WGS_SARS_CoV_2","sub_path":"khel_wgs_sc2/workflow/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13403025623","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 4 19:23:24 2019\r\n\r\n@author: jasmine\r\n\"\"\"\r\n\r\nimport requests\r\nimport json\r\nfrom time import sleep\r\nimport pandas as pd\r\n\r\nmovies = pd.read_csv('movieids.csv')\r\nmovies = movies['tconst'].tolist()\r\n\r\nTitle = []\r\nRated = []\r\nReleased = []\r\nAwards = []\r\nimdbVotes = []\r\nDVD = []\r\nBoxOffice = []\r\nProduction = []\r\nimdbRating = [None] * len(movies)\r\nRottenTomatoes = [None] * len(movies)\r\nMetacritic = [None] * len(movies)\r\nimdbID = []\r\n\r\n\r\nfor i in movies:\r\n print(movies.index(i)+1,'/',len(movies))\r\n url = 'http://www.omdbapi.com/?i='+ str(i) +'&apikey=a79ea526'\r\n response = requests.get(url)\r\n data = response.json()\r\n try:\r\n BoxOffice.append(data['BoxOffice'])\r\n except KeyError: \r\n BoxOffice.append('N/A') \r\n try:\r\n Title.append(data['Title'])\r\n except KeyError:\r\n Title.append('N/A')\r\n try:\r\n Rated.append(data['Rated'])\r\n except KeyError:\r\n Rated.append('N/A')\r\n try:\r\n Released.append(data['Released'])\r\n except KeyError:\r\n Released.append('N/A')\r\n try:\r\n Awards.append(data['Awards'])\r\n except KeyError:\r\n Awards.append('N/A')\r\n try:\r\n imdbVotes.append(data['imdbVotes'])\r\n except KeyError:\r\n imdbVotes.append('N/A')\r\n try:\r\n DVD.append(data['DVD'])\r\n except KeyError:\r\n DVD.append('N/A')\r\n try:\r\n Production.append(data['Production'])\r\n except KeyError:\r\n Production.append('N/A')\r\n try:\r\n imdbID.append(data['imdbID'])\r\n except KeyError:\r\n imdbID.append('N/A')\r\n try: \r\n for j in range(len(data['Ratings'])):\r\n if data['Ratings'][j]['Source'] == 'Internet Movie Database':\r\n imdbRating[movies.index(i)] = data['Ratings'][j]['Value']\r\n if data['Ratings'][j]['Source'] == 'Rotten Tomatoes': \r\n RottenTomatoes[movies.index(i)] = data['Ratings'][j]['Value']\r\n if data['Ratings'][j]['Source'] == 'Metacritic': \r\n Metacritic[movies.index(i)] = data['Ratings'][j]['Value']\r\n except KeyError:\r\n imdbRating[movies.index(i)] = 'N/A'\r\n RottenTomatoes[movies.index(i)] = 'N/A'\r\n Metacritic[movies.index(i)] = 'N/A' \r\n sleep(2)\r\n\r\n\r\nmoviedf = pd.DataFrame(list(zip(imdbID, Title, Rated, Released, Awards, \r\n imdbVotes, DVD, BoxOffice, Production, imdbRating, \r\n RottenTomatoes, Metacritic)), \r\n columns = ['tconst','Title', 'Rated', 'Released', 'Awards',\r\n 'imdbVotes', 'DVD', 'BoxOffice', 'Production',\r\n 'imdbRating', 'RottenTomatoes', 'Metacritic'])\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n\r\n","repo_name":"JasmineYuer/BI-reporting-visualization-","sub_path":"Code/WebScraping Code.py","file_name":"WebScraping Code.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35434978417","text":"\nfrom genericpath import samefile\nfrom brownie import accounts\nfrom scripts.helpful import get_account\n\n\nfrom scripts.helpful import get_account, OPENSEA_URL\nfrom brownie import SimpleCollectible\n\nsample_token_uri = \"https://ipfs.io/ipfs/Qmd9MCGtdVz2miNumBHDbvj8bigSgTwnr4SbyH6DNnpWdt?filename=0-PUG.json\"\n\n\ndef deploy_create():\n account = get_account()\n\n simple = SimpleCollectible.deploy({\"from\": account})\n tx = simple.createCollectible(sample_token_uri, {\"from\": account})\n tx.wait(1)\n\n print(\n f\"Awesome, you can view your NFT at {OPENSEA_URL.format(simple.address, simple.tokenCounter() - 1)}\"\n )\n print(\"Please wait up to 20 minutes, and hit the refresh metadata button. \")\n\n return simple\n\n\ndef main():\n deploy_create()\n","repo_name":"isfang/learning-eth","sub_path":"nft/scripts/simple/deploy_create.py","file_name":"deploy_create.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18266056103","text":"import math\n\ndef bins():\n #dictionary of ASCII letters to binary\n asc = {' ': '00100000', 'A': '01000001', 'B': '01000010', 'C': '01000011', 'D': '01000100', 'E': '01000101', 'F': '01000110', 'G': '01000111', 'H': '01001000', 'I': '01001001', 'J': '01001010', 'K': '01001011', 'L': '01001100', 'M': '01001101', 'N': '01001110', 'O': '01001111', 'P': '01010000', 'Q': '01010001', 'R': '01010010', 'S': '01010011', 'T': '01010100', 'U': '01010101', 'V': '01010110', 'W': '01010111', 'X': '01011000', 'Y': '01011001', 'Z': '01011010', 'a': '01100001', 'b': '01100010', 'c': '01100011', 'd': '01100100', 'e': '01100101', 'f': '01100110', 'g': '01100111', 'h': '01101000', 'i': '01101001', 'j': '01101010', 'k': '01101011', 'l': '01101100', 'm': '01101101', 'n': '01101110', 'o': '01101111', 'p': '01110000', 'q': '01110001', 'r': '01110010', 's': '01110011', 't': '01110100', 'u': '01110101', 'v': '01110110', 'w': '01110111', 'x': '01111000', 'y': '01111001', 'Z': '01111010', '!': '00100001', '?': '00111111'}\n\n useri = input(\"Enter a value to be converted to binary: \")\n\n #if the user inputs an integer or float, the input will be converted to a float, otherwise it is a string. \n try:\n useri = float(useri)\n \n #intstring and floatstring will be appended to with the binary as the function executes\n intstring = []\n floatstring = []\n string = ''\n\n if useri > 0:\n \n def bina(bi):\n \n #This seperates the integer and float. The float is stored in flo, and the integer is stored in bi.\n flo = useri%1\n bi = useri - flo\n\n # returns the binary value of the integer. \n while bi > 0:\n r = bi%2\n if r == 0:\n bi = bi/2\n elif r == 1:\n bi = (bi-1)/2\n #removes the decimal point from the result. \n r = math.floor(r)\n #appends each iteration to the integer list.\n intstring.append(r)\n #reverses the list order as the function opperates in reverse order. \n intstring.reverse()\n #makes the integer list into a string for viewing purposes. \n news = ''.join(map(str, intstring))\n\n #converts the float value into binary. \n while flo != 0:\n #converts float into binary by multiplying the float by 2, if the float is greater than or equal to 1, then 1 is subracted from the number until the number is zero, and the binary value of 1 is appended to the float list. \n if flo >= 1:\n floatstring.append(1)\n flo = (flo-1) * 2\n\n #if the float is less than one then the result is multiplied by 2 again until it equals one, and a binary value of 0 is appended to the float list.\n elif flo < 1:\n floatstring.append(0)\n flo = flo *2\n\n #appends a decimal point to the float list.\n floatstring.insert(0, '.')\n #converts the float list to a string for viewing. \n new = ''.join(map(str, floatstring))\n print(news + new)\n bina(useri)\n\n # if the value entered by the user is not a number, then it is a string, so the ASCII to binary function is called. \n except ValueError:\n def binasc(word):\n #word2 is appended to with the binary results of the word. \n word2 = ''\n #The ASCII word is iterated through. \n for letter in word:\n #if the ascii value is in the ascii to binary dictionary, then the binary value of the ascii key is appended to the word 2 list. \n if letter in asc.keys(): \n word2 += asc.get(letter) + ' '\n print(word2)\n #print()\n binasc(useri)\n\n\n\n\n\n\n","repo_name":"monty123456789/coding2","sub_path":"final/bin_conversion.py","file_name":"bin_conversion.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17254917386","text":"from sqlalchemy import Column, Integer, create_engine, ForeignKey, Float, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship, backref, sessionmaker\n\nengine = create_engine('sqlite:///database.db')\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nBase = declarative_base()\n\nclass Supplier(Base):\n __tablename__ = \"suppliers\"\n\n id= Column( Integer(), primary_key=True)\n name= Column(String(), unique=True)\n address= Column(String(), unique=True)\n products= relationship('Product', backref=backref('supplier'))\n\n def __repr__(self):\n return f'Supplier(id={self.id}, ' + \\\n f'Name={self.name}, ' + \\\n f'Address={self.address})'\n \n @classmethod\n def search_supplier_by_id(cls, supplier_id):\n supplier = session.query(cls).filter(cls.id == supplier_id).first()\n return supplier\n\nclass Product(Base):\n __tablename__ = \"products\"\n\n id= Column( Integer(), primary_key=True)\n name= Column(String())\n unit_price= Column(Float())\n supplier_id= Column(Integer(), ForeignKey('suppliers.id'))\n\n def __repr__(self):\n return f'Product(id={self.id}, ' + \\\n f'Name={self.name}, ' + \\\n f'Supplier_id={self.supplier_id})'\n \n @classmethod\n def find_product_by_id(cls, product_id):\n return session.query(cls).filter(Product.id==product_id).first()\n \n @classmethod\n def find_product_by_name(cls, product_name):\n return session.query(cls).filter(Product.name==product_name).all()\n \n @classmethod\n def add_product(cls, name, unit_price, supplier_id):\n item = cls(name=name, unit_price=unit_price, supplier_id=supplier_id)\n session.add(item)\n session.commit()\n return item\n \n @classmethod\n def delete_item_by_id(cls, product_id):\n product = session.query(cls).filter(Product.id == product_id).first()\n session.delete(product)\n session.commit()\n \n @classmethod\n def update_price(cls,product_id,new_unit_price):\n product = session.query(cls).filter(Product.id == product_id).first()\n if product:\n product.unit_price = new_unit_price\n session.commit()\n else:\n print(f\"Product with id {product_id} not found.\")","repo_name":"jbigishiro/Phase3-python-CLI-project-Supplier-App","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40411518695","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 4 20:04:30 2020\n\n@author: yuesun\n\"\"\"\n\nimport numpy as np\nfrom Point import *\nimport matplotlib.pyplot as plt\n\nclass Node:\n #define tree node\n \n def __init__(self,x,y,w,h,points,level):\n self.x=x\n self.y=y\n self.width=w\n self.height=h\n self.level=level\n self.points=points#array of points\n self.children=[]\n #children is a list of nodes\n self.outmultipole=None\n self.inmultipole=None\n self.center=Point(x+w/2.0,y+h/2.0)\n \n def isleaf(self):\n return len(self.children)==0\n \n \n def getPoints(self):\n return self.points\n \n def hasChildren(self):\n return len(self.children)!=0\n \n \n \n def addChildren(self):\n return\n \n \n def contains(self,x,y,w,h,points):\n ptList=[]\n for p in points:\n if p.x>=x and p.x=y and p.ythreshold:\n w=self.width/2.0\n h=self.height/2.0\n #3,5\n #2,4\n x0=self.x\n y0=self.y\n \n p=self.contains(x0,y0,w,h,self.points)\n n1=Node(x0,y0,w,h,p,self.level+1)\n n1.split(threshold)\n \n p=self.contains(x0,y0+h,w,h,self.points)\n n2=Node(x0,y0+h,w,h,p,self.level+1)\n n2.split(threshold)\n \n p=self.contains(x0+w,y0,w,h,self.points)\n n3=Node(x0+w,y0,w,h,p,self.level+1)\n n3.split(threshold)\n \n p=self.contains(x0+w,y0+h,w,h,self.points)\n n4=Node(x0+w,y0+h,w,h,p,self.level+1)\n n4.split(threshold)\n \n self.children=[n1,n2,n3,n4]\n \n \nclass QTree:\n def __init__(self,k,points,n):\n \n self.threshold=k\n self.points=points\n self.root=Node(0,0,n,n,self.points,0)\n \n self.buildTree(self.threshold)\n \n def buildTree(self,threshold):\n self.root.split(threshold)\n \n def getRoot(self):\n return self.root\n \n def graph(self):\n fig = plt.figure(figsize=(12, 8))\n plt.title(\"Quadtree\")\n ax = fig.add_subplot(111)\n c = self.find_children(self.root)\n areas = set()\n for el in c:\n areas.add(el.width*el.height)\n \n for n in c:\n ax.add_patch(plt.Rectangle((n.x, n.y), n.width, n.height, fill=False))\n \n x = [point.x for point in self.points]\n y = [point.y for point in self.points]\n plt.plot(x, y, 'ro')\n plt.show()\n return\n \n \n def find_children(self,node):\n if not node.children:\n return [node]\n else:\n children = []\n for child in node.children:\n children += (self.find_children(child))\n return children\n \n \n \n \n \n \nif __name__=='__main__':\n #draw a quad tree, n is number of points, l is the range of the random number\n n=500\n l=100\n particles=[Particle(x,y,1) for x,y in np.random.randint(1,l,(n,2))]\n qt=QTree(5,particles,l)\n qt.graph()","repo_name":"syalexandra/Test","sub_path":"adaptive_quadtree.py","file_name":"adaptive_quadtree.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29124588194","text":"from transformers import pipeline\nimport streamlit as st\nfrom VocabWord import VocabWord\nimport torch\nimport torch.nn.functional as F\nfrom DeckHandler import DeckHandler\nimport deepl\n\n#text gen model decelarations\nmodel_name = \"gagan3012/k2t-new\"\n\ntext2text_generator = pipeline(\"text2text-generation\", model=model_name)\n\n#translator decleration\ntranslator = deepl.Translator(\"b92d0f2a-2c86-3f15-b9e9-4f44fc65fd02:fx\")\n\n#deck type declerations, maybe add additional word types?\nnouns = DeckHandler()\nverbs = DeckHandler()\nadjectives = DeckHandler()\n\n#temp words that are added just for testing, remove later\n\n\n\n#creates and outputs a new sentance with the key2text model\n#change to correct version of keytotext in comment above\n#needs to be changed so that it can also get in adjectives somtimes, maybe use a switch + randomizer to change between adjective and verb, or just use both and check which it actually uses\ndef Sgen(Cnoun, Cverb, Cadj):\n noIdeas = text2text_generator(Cnoun, Cverb, Cadj)\n st.text(Cnoun +\"\"+ Cverb + \"\"+ Cadj)\n return noIdeas\n\ndef app():\n ### need to figure out how to make the text go inside of the box maybe\n TheSentanceToGuess = str(goGetterDone())\n TheSentanceToGuess = TheSentanceToGuess[21:len(TheSentanceToGuess)-3]\n result = translator.translate_text(TheSentanceToGuess, target_lang=\"JA\") \n translated_text = result.text \n st.text(translated_text)\n st.text(TheSentanceToGuess)\n testing = st.text_input(\"Guess:\")\n\n st.button(\"Next word (temp)\")\n# bad name fix, generates the next sentacne\n#need to do testing on what is best to input for program\ndef goGetterDone():\n return Sgen(nouns.getNextReview(), verbs.getNextReview(), adjectives.getNextReview())\n\n","repo_name":"ArchaicAccount/Ap-Research-Final-Product","sub_path":"PlaceWhereYouDoReviews.py","file_name":"PlaceWhereYouDoReviews.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5767040976","text":"from django.contrib.auth.decorators import login_required\r\nfrom django.shortcuts import get_object_or_404, redirect, render\r\n\r\nfrom users.decorators import members_required\r\nfrom accounts.models import Member\r\nfrom campaigns.models import Campaign\r\n\r\n@login_required\r\n@members_required\r\ndef member_index(request):\r\n template_name = 'members/index.html'\r\n context = {}\r\n member = request.user.member\r\n\r\n campaigns = Campaign.objects.filter(initiator=member)[:5]\r\n\r\n context[\"campaigns\"] = campaigns\r\n context[\"member\"] = member\r\n\r\n return render(request, template_name, context)\r\n\r\n@login_required\r\n@members_required\r\ndef initiator_campaign_list(request):\r\n template_name = \"campaigns/initiator_list.html\"\r\n context = {}\r\n member = request.user.member\r\n\r\n campaigns = Campaign.objects.filter(initiator=member)\r\n \r\n context[\"campaigns\"] = campaigns\r\n context[\"member\"] = member\r\n return render(request, template_name, context)\r\n\r\n@login_required\r\n@members_required\r\ndef public_campaign_list(request, slug):\r\n template_name = \"campaigns/public_list.html\"\r\n context = {}\r\n\r\n if slug is None:\r\n return redirect('not-found')\r\n \r\n member = Member.objects.get(slug=slug)\r\n\r\n if member is None:\r\n return redirect('not-found')\r\n\r\n campaigns = Campaign.objects.filter(initiator=member)\r\n context[\"campaigns\"] = campaigns\r\n context[\"member\"] = member\r\n return render(request, template_name, context)","repo_name":"earth-emoji/love","sub_path":"src/accounts/views/member.py","file_name":"member.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74326041423","text":"#! /usr/bin/python3.8\nimport csv\nfrom typing import List\n\n\nclass Solution(object):\n def __init__(self, filename):\n self.filename = filename\n\n def _process_step(\n self,\n grid: List[List[List[int]]],\n step: List[int],\n on_lights: int,\n hi_bound: int,\n ) -> int:\n \"\"\"Turns lights on or off according to step and returns # of lights on.\n\n Args:\n grid: 3-D grid containing current light statuses (on/off).\n step: List containing in order: [on/off status, low x bound,\n high x bound, low y bound, high y bound, low z bound, high z\n bound].\n on_lights: Initial number of lights on on the grid.\n hi_bound: Higher bound of grid dimensions for all axes. Scaled to\n lo_bound being scaled to 0.\n\n Returns:\n Number of lights that are on after processing current step.\n \"\"\"\n on, x_lo, x_hi, y_lo, y_hi, z_lo, z_hi = step\n\n for x in range(max(x_lo, 0), min(x_hi, hi_bound) + 1):\n for y in range(max(y_lo, 0), min(y_hi, hi_bound) + 1):\n for z in range(max(z_lo, 0), min(z_hi, hi_bound) + 1):\n if grid[x][y][z] == 0 and on:\n grid[x][y][z] = on\n on_lights += 1\n elif grid[x][y][z] == 1 and not on:\n grid[x][y][z] = 0\n on_lights -= 1\n\n return on_lights\n\n def solve(self, lo_bound: int, hi_bound: int):\n with open(self.filename) as f:\n csv_reader = csv.reader(f)\n size = hi_bound - lo_bound\n grid = [\n [[0 for _ in range(size + 1)] for _ in range(size + 1)]\n for _ in range(size + 1)\n ]\n steps = []\n\n for row in csv_reader:\n on_str, x_str = row[0].split(\" \")\n on = 1 if on_str == \"on\" else 0\n x_bound_str = x_str.split(\"=\")[1].split(\"..\")\n x_lo, x_hi = [int(num) for num in x_bound_str]\n y_bound_str = row[1].split(\"=\")[1].split(\"..\")\n y_lo, y_hi = [int(num) for num in y_bound_str]\n z_bound_str = row[2].split(\"=\")[1].split(\"..\")\n z_lo, z_hi = [int(num) for num in z_bound_str]\n step = [on, x_lo, x_hi, y_lo, y_hi, z_lo, z_hi]\n\n for val_idx in range(1, len(step)):\n step[val_idx] -= lo_bound\n steps.append(step)\n\n on_lights = 0\n # Scaling hi_bound based on lo_bound scaling to 0\n hi_bound -= lo_bound\n\n for step in steps:\n on_lights = self._process_step(grid, step, on_lights, hi_bound)\n print(steps[0:3])\n\n print(on_lights)\n\n\nif __name__ == \"__main__\":\n sol = Solution(\"input.csv\")\n sol.solve(-50, 50)\n","repo_name":"danguan/aoc2021","sub_path":"day22/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4287307097","text":"import pandas\nimport plotly.graph_objects as go\nfrom Models.candle import Candle\nfrom tools.fibonacci_retracement import FibonacciRetracement\nfrom tools.order_block import OrderBlock, OrderBlockStatus\nfrom tools.point import Point\n\n\nclass SignalDetector:\n def __init__(self, order_block: OrderBlock, pullback_zone_df: pandas.DataFrame, timeframe):\n self.order_block = order_block\n self.pullback_zone_df = pullback_zone_df\n self.timeframe = timeframe\n\n def get_price_by_fib_level(self, fibo_level) -> float:\n # Find swing start and end\n if self.order_block.is_bullish:\n swing_start = self.order_block.get_top_left()\n end_time = self.pullback_zone_df.index[-1]\n price = self.pullback_zone_df.iloc[0]['High']\n swing_end = Point(end_time, price)\n else:\n swing_start = self.order_block.bottom_left\n end_time = self.pullback_zone_df.index[-1]\n price = self.pullback_zone_df.iloc[0]['Low']\n swing_end = Point(end_time, price)\n\n # Get fibo level price\n fibonacci_retracement = FibonacciRetracement(swing_start, swing_end)\n fibo_level_price = fibonacci_retracement.get_level_price(fibo_level)\n return fibo_level_price\n\n def get_signal_price_level(self) -> float:\n WANTED_FIBO_LEVEL = 80\n return self.get_price_by_fib_level(WANTED_FIBO_LEVEL)\n\n def get_signal_point(self) -> Point:\n # Get the price threshold to send signal\n price_to_send_signal = self.get_signal_price_level()\n\n # Find the time of the candle touched the \"signal level\"\n for candle_time, row in self.pullback_zone_df.iterrows():\n candle = Candle(candle_time, row, self.timeframe)\n\n if (self.order_block.is_bullish and candle.low_price <= price_to_send_signal) or \\\n (not self.order_block.is_bullish and candle.high_price >= price_to_send_signal):\n signal_point = Point(candle.time, price_to_send_signal)\n return signal_point\n\n return None\n\n def is_last_candle_reached_signal_price(self, chart: go.Figure = None) -> bool:\n signal_point = self.get_signal_point()\n\n if signal_point is None:\n return False\n\n # Get the last candle\n last_candle_datetime = self.pullback_zone_df.index[-1]\n is_to_send_signal = signal_point.datetime == last_candle_datetime\n\n if chart:\n signal_point.plot(chart, 'Orange', 10)\n\n return is_to_send_signal\n","repo_name":"OfekDayan/SMC_Trading_Bot","sub_path":"signal_detector.py","file_name":"signal_detector.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"1644247513","text":"def f():\n l_a = 2\n l_b = '마이콜'\n print(locals())\n\n\nclass MyClass:\n x = 10\n y = 10\n\n\ng_a = 1\ng_b = '둘리'\nprint(globals())\n\nf()\n\n# 1. 정의된 함수\nf.k = 'hello'\nprint(f.__dict__)\n\n# 2.\nMyClass.z = 10\nprint(MyClass.__dict__)\n\n\n# 내장 함수는 symbol table X 확장 금지\n# print(print.__dict__)\n# print.z = 10\n\n# 내장 클래스는 symbol table은 있으나 확장 X\n# str.z = 10\n# print(str.__dict__)\n\n# 내장 클래스로 생성된 객체\n# 심벌테이블 x -> 확장 X\n# g_a.z = 10\n# print(g_a.__dict__)\n\n# 사용자 정의된 클래스로 생성된 객체\n# 심벌테이블 x -> 확장 X\no = MyClass()\no.z = 10\nprint(o.__dict__)\n","repo_name":"bitcafe24-byungkwan/python_practice","sub_path":"ch2.3/symbol_table.py","file_name":"symbol_table.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6391992687","text":"class TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def findBottomLeftValue(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if root is None:\n return None\n leftMostValue = root.val\n nodeList = [root]\n while len(nodeList) > 0:\n tmpNodeList = []\n for node in nodeList:\n if node.left is not None:\n tmpNodeList.append(node.left)\n if node.right is not None:\n tmpNodeList.append(node.right)\n if len(tmpNodeList) > 0:\n leftMostValue = tmpNodeList[0].val\n nodeList = tmpNodeList\n return leftMostValue\n","repo_name":"mengyx-work/CS_algorithm_scripts","sub_path":"leetcode/LC_513. Find Bottom Left Tree Value.py","file_name":"LC_513. Find Bottom Left Tree Value.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"35638364012","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 12 17:48:13 2021\n\n@author: svc_ccg\n\"\"\"\n\n\nimport os\nimport time\nimport h5py\nimport numpy as np\nimport pandas as pd\nimport scipy.signal\nimport scipy.ndimage\nimport scipy.optimize\nimport matplotlib\nmatplotlib.rcParams['pdf.fonttype']=42\nimport matplotlib.pyplot as plt\nfrom numba import njit\nimport fileIO\n\n\n\ndef loadDatData(filePath,mode='r'):\n totalChannels = 136\n probeChannels = 128 \n data = np.memmap(filePath,dtype='int16',mode=mode) \n data = np.reshape(data,(int(data.size/totalChannels),-1)).T\n analogInData = {name: data[ch+probeChannels] for ch,name in enumerate(('vsync',\n 'photodiode',\n 'rotaryEncoder',\n 'cam1Exposure',\n 'cam2Exposure',\n 'led1',\n 'led2'))}\n return data[:probeChannels],analogInData\n\n\ndef filterDatData(filePath,highpass=300,commonRef=True,ledArtifactDur=6):\n t = time.perf_counter()\n \n probeData,analogInData = loadDatData(filePath,mode='r+')\n sampleRate = 30000\n totalSamples = probeData.shape[1]\n \n # mask led artifacts\n if ledArtifactDur:\n led1Onsets,led2Onsets = [np.array(findSignalEdges(analogInData[ch],edgeType='rising',thresh=5000,refractory=5)) for ch in ('led1','led2')]\n ledOnsets = np.union1d(led1Onsets,led2Onsets).astype(int)\n x = np.arange(ledArtifactDur)\n for i in ledOnsets-1:\n for ch in probeData:\n if i < totalSamples-ledArtifactDur:\n ch[i:i+ledArtifactDur] = np.interp(x,[0,ledArtifactDur],ch[[i,i+ledArtifactDur]])\n else:\n ch[i:] = ch[i]\n print('masked '+str(len(ledOnsets))+' led arftifacts')\n \n if highpass or commonRef:\n if highpass:\n Wn = highpass/(sampleRate/2) # cutoff freq normalized to nyquist\n b,a = scipy.signal.butter(2,Wn,btype='highpass')\n \n chunkSamples = int(15*sampleRate)\n offset = 0\n while offset < totalSamples:\n d = probeData[:,offset:offset+chunkSamples]\n \n # highpass filter\n if highpass:\n d[:,:] = scipy.signal.filtfilt(b,a,d,axis=1)\n \n # common reference median filter\n if commonRef:\n d -= np.median(d,axis=0).astype(d.dtype)\n \n print('filtered '+str(offset)+' of '+str(totalSamples)+' samples')\n offset += chunkSamples\n \n # flush results (overwrites existing data)\n print('flushing to disk')\n del(probeData)\n del(analogInData)\n \n print('completed in '+str(time.perf_counter()-t)+' s')\n \n\n@njit\ndef findSignalEdges(signal,edgeType,thresh,refractory):\n \"\"\"\n signal: typically a large memmap array (loop through values rather than load all into memory)\n edgeType: 'rising' or 'falling'\n thresh: difference between current and previous value\n refractory: samples after detected edge to ignore\n \"\"\"\n edges = []\n lastVal = signal[0]\n lastEdge = -refractory\n for i in range(1,signal.size):\n val = signal[i]\n if i-lastEdge>refractory and ((edgeType=='rising' and val-lastVal>thresh) or (edgeType=='falling' and val-lastVal posThresh:\n return i,i+j\n return None,None\n\n\ndef getPsth(spikes,startTimes,windowDur,binSize=0.01,avg=True):\n bins = np.arange(0,windowDur+binSize,binSize)\n counts = np.zeros((len(startTimes),bins.size-1)) \n for i,start in enumerate(startTimes):\n counts[i] = np.histogram(spikes[(spikes>=start) & (spikes<=start+windowDur)]-start,bins)[0]\n if avg:\n counts = counts.mean(axis=0)\n counts /= binSize\n return counts, bins[:-1]+binSize/2\n\n\ndef getSdf(spikes,startTimes,windowDur,sampInt=0.001,filt='exponential',filtWidth=0.005,avg=True):\n t = np.arange(0,windowDur+sampInt,sampInt)\n counts = np.zeros((startTimes.size,t.size-1))\n for i,start in enumerate(startTimes):\n counts[i] = np.histogram(spikes[(spikes>=start) & (spikes<=start+windowDur)]-start,t)[0]\n if filt in ('exp','exponential'):\n filtPts = int(5*filtWidth/sampInt)\n expFilt = np.zeros(filtPts*2)\n expFilt[-filtPts:] = scipy.signal.exponential(filtPts,center=0,tau=filtWidth/sampInt,sym=False)\n expFilt /= expFilt.sum()\n sdf = scipy.ndimage.filters.convolve1d(counts,expFilt,axis=1)\n else:\n sdf = scipy.ndimage.filters.gaussian_filter1d(counts,filtWidth/sampInt,axis=1)\n if avg:\n sdf = sdf.mean(axis=0)\n sdf /= sampInt\n return sdf,t[:-1]\n \n \ndef getSyncData():\n # get analog sync data acquired with NidaqRecorder\n syncPath = fileIO.getFile('Select sync file',fileType='*.hdf5')\n syncFile = h5py.File(syncPath,'r')\n syncData = syncFile['AnalogInput']\n syncSampleRate = syncData.attrs.get('sampleRate')\n channelNames = syncData.attrs.get('channelNames')\n vsync = syncData[:,channelNames=='vsync'][:,0]\n photodiode = syncData[:,channelNames=='photodiode'][:,0]\n led = syncData[:,channelNames=='led'][:,0]\n syncTime = np.arange(1/syncSampleRate,(syncData.shape[0]+1)/syncSampleRate,1/syncSampleRate)\n \n syncFile.close()\n \n frameSamples = np.array(findSignalEdges(vsync,edgeType='falling',thresh=-0.5,refractory=2))\n \n behavDataPath = fileIO.getFile('',fileType='*.hdf5')\n behavData = h5py.File(behavDataPath,'r')\n \n psychopyFrameIntervals = behavData['frameIntervals'][:]\n frameRate = round(1/np.median(psychopyFrameIntervals))\n \n assert(frameSamples.size==psychopyFrameIntervals.size+1)\n \n ntrials = behavData['trialEndFrame'].size\n stimStart = behavData['trialStimStartFrame'][:ntrials]\n trialOpenLoopFrames = behavData['trialOpenLoopFrames'][:ntrials]\n assert(np.unique(trialOpenLoopFrames).size==1)\n openLoopFrames = trialOpenLoopFrames[0]\n responseWindowFrames = behavData['maxResponseWaitFrames'][()]\n optoOnset = behavData['trialOptoOnset'][:ntrials]\n targetFrames = behavData['trialTargetFrames'][:ntrials]\n maskFrames = behavData['trialMaskFrames'][:ntrials]\n maskOnset = behavData['trialMaskOnset'][:ntrials]\n \n behavData.close()\n \n optoOnsetToPlot = 0\n opto = optoOnset==optoOnsetToPlot\n \n stimDur = []\n for st in stimStart:\n stimDur.append(psychopyFrameIntervals[st+1:st+3].sum())\n stimDur = np.array(stimDur)\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n samples = np.arange(frameSamples[0]-100,frameSamples[0]+201)\n t = (samples-frameSamples[0])/syncSampleRate\n ax.plot(t,vsync[samples],color='k',label='vsync')\n ax.plot(t,photodiode[samples],color='0.5',label='photodiode')\n for side in ('right','top'):\n ax.spines[side].set_visible(False)\n ax.tick_params(direction='out',top=False,right=False)\n ax.set_xlabel('Time from first frame (s)')\n ax.legend()\n plt.tight_layout()\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ind = frameSamples[stimStart[np.where(opto)[0][0]]]\n samples = np.arange(ind-1500,ind+3001)\n t = (samples-ind)/syncSampleRate\n ax.plot(t,vsync[samples],color='k',label='vsync')\n ax.plot(t,photodiode[samples],color='0.5',label='photodiode')\n ax.plot(t,led[samples],color='b',label='led')\n for side in ('right','top'):\n ax.spines[side].set_visible(False)\n ax.tick_params(direction='out',top=False,right=False)\n ax.set_xlim([-0.005,0.01])\n ax.set_xlabel('Time from trial start (s)')\n ax.legend()\n plt.tight_layout()\n \n\ndef fitCurve(func,x,y,initGuess=None,bounds=None):\n return scipy.optimize.curve_fit(func,x,y,p0=initGuess,bounds=bounds)[0]\n \n\ndef calcLogisticDistrib(x,a,b,m,s):\n # a: amplitude, b: offset, m: x at 50% max y, s: scale\n return a * (1 / (1 + np.exp(-(x - m) / s))) + b\n\ndef inverseLogistic(y,a,b,m,s):\n return m - s * np.log((a / (y - b)) - 1)\n\n\ndef calcWeibullDistrib(x,a,b,j,k):\n # a: amplitude, b: offset, j: shape, k: scale\n return a * (1 - np.exp(-(x / j) ** k)) + b\n\ndef inverseWeibull(y,a,b,j,k):\n return j * (-np.log(1 - ((y - b) / a))) ** (1/k)\n \n\nclass MaskTaskData():\n \n def __init__(self):\n self.behav = False\n self.rf = False\n self.ephys = False\n self.frameDisplayLag = 2\n self.earlyMoveFrames = 15\n \n \n def loadBehavData(self,filePath=None):\n if filePath is None:\n self.behavDataPath = fileIO.getFile('Select behavior data file',fileType='*.hdf5')\n else:\n self.behavDataPath = filePath\n if len(self.behavDataPath)==0:\n return\n self.behav = True\n print('\\n'+self.behavDataPath)\n \n behavData = h5py.File(self.behavDataPath,'r')\n self.rigName = behavData['rigName'].asstr()[()]\n self.behavFrameIntervals = behavData['frameIntervals'][:]\n self.frameRate = round(1/np.median(self.behavFrameIntervals))\n if self.ephys and self.behavFrameIntervals.size+1>self.frameSamples.size:\n self.ntrials = np.sum(behavData['trialEndFrame'][:] 0:\n self.keysPressed = behavData['keysPressed'].asstr()[:]\n if 'showVisibilityRating' in behavData and behavData['showVisibilityRating'][()]:\n self.visRating = behavData['visRating'].asstr()[:self.ntrials]\n self.visRatingScore = np.zeros(self.visRating.size)\n self.visRatingScore[['1' in v for v in self.visRating]] = -1\n self.visRatingScore[['3' in v for v in self.visRating]] = 1\n self.visRatingStartFrame = behavData['visRatingStartFrame'][:self.ntrials]\n self.visRatingEndFrame = behavData['visRatingEndFrame'][:self.ntrials]\n self.useContrastStaircase = behavData['useContrastStaircase'][()] if 'useContrastStaircase' in behavData else False\n \n behavData.close()\n \n self.findLongFrameTrials()\n self.findEngagedTrials()\n self.getWheelPos()\n self.findEarlyMoveTrials()\n self.calcReactionTime()\n \n \n def findLongFrameTrials(self):\n self.longFrameTrials = np.zeros(self.ntrials,dtype=bool)\n self.targetDur = np.full(self.ntrials,np.nan)\n self.maskOnsetDur = self.targetDur.copy()\n self.optoOnsetDur = self.targetDur.copy()\n tol = 0.5/self.frameRate\n for i,s in enumerate(self.stimStart):\n if self.trialType[i] in ('targetOnly','targetOnlyOpto','mask','maskOpto'):\n self.targetDur[i] = self.behavFrameIntervals[s:s+self.targetFrames[i]].sum()\n if self.targetDur[i] > self.targetFrames[i]/self.frameRate+tol:\n self.longFrameTrials[i] = True\n if self.trialType[i] in ('mask','maskOpto'):\n self.maskOnsetDur[i] = self.behavFrameIntervals[s:s+self.maskOnset[i]].sum()\n if self.maskOnsetDur[i] > self.maskOnset[i]/self.frameRate+tol:\n self.longFrameTrials[i] = True\n if not np.isnan(self.optoOnset[i]):\n self.optoOnsetDur[i] = self.behavFrameIntervals[s:s+int(self.optoOnset[i])].sum()\n if self.optoOnsetDur[i] > self.optoOnset[i]/self.frameRate+tol:\n self.longFrameTrials[i] = True\n print(str(round(100*np.sum(self.behavFrameIntervals > 1/self.frameRate+tol)/self.behavFrameIntervals.size,2))+' % frames long')\n print(str(self.longFrameTrials.sum())+' / '+str(self.ntrials)+' trials had long frames')\n \n \n def findEngagedTrials(self,engagedThresh=10):\n self.engaged = np.ones(self.ntrials,dtype=bool)\n trials = (self.trialType!='catch') & np.isnan(self.optoOnset)\n for i in range(self.ntrials):\n r = self.responseDir[:i+1][trials[:i+1]]\n if len(r)>engagedThresh:\n if all(np.isnan(r[-engagedThresh:])):\n self.engaged[i] = False\n print(str(self.engaged.sum())+' / '+str(self.ntrials)+' trials engaged')\n \n \n def getWheelPos(self,preFrames=0,postFrames=0): \n deltaWheel = np.zeros((self.ntrials,preFrames+self.openLoopFrames+self.responseWindowFrames+postFrames))\n for i,s in enumerate(self.stimStart):\n d = self.deltaWheelPos[s-preFrames:s-preFrames+self.openLoopFrames+self.responseWindowFrames+postFrames]\n deltaWheel[i,:len(d)] = d\n self.wheelPos = np.cumsum(deltaWheel,axis=1)\n self.wheelPos *= self.wheelRadius\n \n \n def findEarlyMoveTrials(self,earlyMoveThresh=None):\n if earlyMoveThresh is None:\n earlyMoveThresh = self.maxQuiescentMoveDist\n self.earlyMove = np.any(self.wheelPos[:,:self.earlyMoveFrames]>earlyMoveThresh,axis=1)\n print(str(self.earlyMove.sum())+' / '+str(self.ntrials)+' trials early move')\n \n \n def calcReactionTime(self,moveInitThresh=0.2):\n self.reactionTime = np.full(self.ntrials,np.nan)\n self.movementVelocity = np.full(self.ntrials,np.nan)\n if self.rigName == 'human':\n for i,(s,r) in enumerate(zip(self.stimStart+self.frameDisplayLag,self.responseFrame)):\n self.reactionTime[i] = self.behavFrameIntervals[s+1:r].sum()*1000\n if hasattr(self,'visRating'):\n self.visRatingReactionTime = np.full(self.ntrials,np.nan)\n for i,(s,r) in enumerate(zip(self.visRatingStartFrame+self.frameDisplayLag,self.visRatingEndFrame)):\n self.visRatingReactionTime[i] = self.behavFrameIntervals[s+1:r].sum()*1000 \n else:\n wp = self.wheelPos-self.wheelPos[:,self.earlyMoveFrames][:,None]\n wp[:,:self.earlyMoveFrames] = 0\n for i,(w,s) in enumerate(zip(wp,self.stimStart+self.frameDisplayLag)):\n frameIntervals = self.behavFrameIntervals[s:s+w.size]\n frameIntervals[0] = 0\n t = np.cumsum(frameIntervals)\n t *= 1000\n tinterp = np.arange(t[-1])\n winterp = np.interp(tinterp,t,np.absolute(w[:t.size]))\n respInd = np.where(winterp>=self.wheelRewardDistance)[0]\n if len(respInd)>0:\n belowThresh = np.where(winterp[:respInd[0]]0:\n initInd = belowThresh[-1]+1\n self.reactionTime[i] = tinterp[initInd]\n self.movementVelocity[i] = 1000*(self.wheelRewardDistance-moveInitThresh)/(tinterp[respInd[0]]-tinterp[initInd])\n \n \n def loadRFData(self,filePath=None):\n if filePath is None:\n self.rfDataPath = fileIO.getFile('Select rf mapping data file',fileType='*.hdf5')\n else:\n self.rfDataPath = filePath\n if len(self.rfDataPath)==0:\n return\n self.rf = True\n rfData = h5py.File(self.rfDataPath,'r')\n self.rfFrameIntervals = rfData['frameIntervals'][:]\n if not self.behav:\n self.frameRate = round(1/np.median(self.rfFrameIntervals))\n if 'stimStartFrame' in rfData:\n self.rfStimStart = rfData['stimStartFrame'][:-1]\n else:\n trialStartFrame = np.concatenate(([0],np.cumsum(rfData['preFrames']+rfData['trialStimFrames'][:-1]+rfData['postFrames'])))\n self.rfStimStart = trialStartFrame+rfData['preFrames']\n self.rfStimStart += self.frameSamples.size-(self.rfFrameIntervals.size+1)\n rfTrials = self.rfStimStart.size\n self.rfStimPos = rfData['trialGratingCenter'][:rfTrials]\n self.rfStimContrast = rfData['trialGratingContrast'][:rfTrials]\n self.rfOris = rfData['gratingOri'][:rfTrials]\n self.rfStimOri = rfData['trialGratingOri'][:rfTrials]\n self.rfStimFrames = rfData['trialStimFrames'][:rfTrials]\n \n \n def loadEphysData(self,led=False):\n self.datFilePath = fileIO.getFile('Select probe dat file',fileType='*.dat')\n if len(self.datFilePath)==0:\n return\n self.ephys = True\n\n probeData,analogInData = loadDatData(self.datFilePath)\n \n self.sampleRate = 30000\n self.totalSamples = probeData.shape[1]\n \n self.frameSamples = np.array(findSignalEdges(analogInData['vsync'],edgeType='falling',thresh=-5000,refractory=2))\n \n if led:\n self.led1Onsets,self.led2Onsets = [np.array(findSignalEdges(analogInData[ch],edgeType='rising',thresh=5000,refractory=5)) for ch in ('led1','led2')]\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n samples = np.arange(self.frameSamples[0]-1500,self.frameSamples[0]+3001)\n t = (samples-self.frameSamples[0])/self.sampleRate\n ax.plot(t,analogInData['vsync'][samples],color='k',label='vsync')\n ax.plot(t,analogInData['photodiode'][samples],color='0.5',label='photodiode')\n for side in ('right','top'):\n ax.spines[side].set_visible(False)\n ax.tick_params(direction='out',top=False,right=False)\n ax.set_xlabel('Time from first frame (s)')\n ax.legend()\n plt.tight_layout()\n \n \n def loadKilosortData(self,dirPath=None):\n if dirPath is None:\n self.kilosortDirPath = fileIO.getDir('Select directory containing kilosort data')\n else:\n self.kilosortDirPath = dirPath\n if len(self.kilosortDirPath)==0:\n return\n kilosortData = {key: np.load(os.path.join(self.kilosortDirPath,key+'.npy')) for key in ('spike_clusters',\n 'spike_times',\n 'templates',\n 'spike_templates',\n 'channel_positions',\n 'amplitudes')}\n clusterIDs = pd.read_csv(os.path.join(self.kilosortDirPath,'cluster_KSLabel.tsv'),sep='\\t')\n unitIDs = np.unique(kilosortData['spike_clusters'])\n self.units = {}\n for u in unitIDs:\n uind = np.where(kilosortData['spike_clusters']==u)[0]\n u = str(u)\n self.units[u] = {}\n self.units[u]['label'] = clusterIDs[clusterIDs['cluster_id']==int(u)]['KSLabel'].tolist()[0]\n self.units[u]['samples'] = kilosortData['spike_times'][uind].flatten()\n \n #choose 1000 spikes with replacement, then average their templates together\n chosen_spikes = np.random.choice(uind,1000)\n chosen_templates = kilosortData['spike_templates'][chosen_spikes].flatten()\n self.units[u]['template'] = np.mean(kilosortData['templates'][chosen_templates],axis=0)\n \n peakChan = np.unravel_index(np.argmin(self.units[u]['template']),self.units[u]['template'].shape)[1]\n self.units[u]['peakChan'] = peakChan\n self.units[u]['position'] = kilosortData['channel_positions'][peakChan]\n self.units[u]['amplitudes'] = kilosortData['amplitudes'][uind]\n \n template = self.units[u]['template'][:,peakChan]\n if any(np.isnan(template)):\n self.units[u]['peakToTrough'] = np.nan\n else:\n peakInd = np.argmin(template)\n self.units[u]['peakToTrough'] = np.argmax(template[peakInd:])/(self.sampleRate/1000)\n \n self.sortedUnits = np.array(list(self.units.keys()))[np.argsort([self.units[u]['peakChan'] for u in self.units])]\n self.findIsiViolations()\n self.getGoodUnits()\n \n \n def findIsiViolations(self,minIsi=0,refracPeriod=0.0015):\n totalTime = self.totalSamples/self.sampleRate\n for u in self.units:\n spikeTimes = self.units[u]['samples']/self.sampleRate\n duplicateSpikes = np.where(np.diff(spikeTimes)<=minIsi)[0]+1\n spikeTimes = np.delete(spikeTimes,duplicateSpikes)\n isis = np.diff(spikeTimes)\n numSpikes = len(spikeTimes)\n numViolations = sum(isisminRate]\n \n def saveToHdf5(self,filePath=None):\n fileIO.objToHDF5(self,filePath)\n \n \n def loadFromHdf5(self,filePath=None):\n fileIO.hdf5ToObj(self,filePath)\n\n\n","repo_name":"samgale/Masking","sub_path":"maskTaskAnalysisUtils.py","file_name":"maskTaskAnalysisUtils.py","file_ext":"py","file_size_in_byte":24143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5120980985","text":"import pywikibot as pwb\n\nsite = pwb.Site()\n\naffixes = [\n 'bo', 'dis', 'ek', 'eks', 'fi', 'ge', 'mal', 'mis', 'pra', 're', 'aĉ',\n 'ad', 'aĵ', 'an', 'ar', 'ĉj', 'ebl', 'ec', 'eg', 'ej', 'em', 'end',\n 'er', 'estr', 'et', 'id', 'ig', 'iĝ', 'il', 'ind', 'ing', 'in', 'ism',\n 'ist', 'nj', 'obl', 'on', 'op', 'uj', 'ul', 'um',\n]\nfor affix in affixes:\n page = pwb.Page(site, f'Utilisateur:Darmo117/Étymologies manquantes/{affix}')\n page.text = '{{colonnes|\\n'\n\n ctg = pwb.Category(site, title='espéranto')\n articles = ctg.articles()\n i = 1\n for article in articles:\n if affix in article.title():\n tps = article.raw_extracted_templates\n if 'ébauche-étym' in [t[0] for t in tps]:\n title = article.title()\n print(title)\n page.text += f'* [[{title}]]\\n'\n i += 1\n if i == 25:\n page.text = page.text.replace('}}', '')\n page.text += '}}'\n page.save()\n i = 1\n page.text = page.text.replace('}}', '')\n page.text += '}}'\n page.save()\n","repo_name":"Darmo117/Wiktionary","sub_path":"test/py.py","file_name":"py.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"2339881593","text":"from django.conf import settings\n\nADVISOR_TYPE = 'ADVISOR_CHECK'\n\nNOT_APPROVED = 'not_approved'\n\nSIGNUP_WITH_EMAIL = 'signup_with_email'\n\n'''\nconstants for pagination in the search\n'''\nCARDS_PER_PAGE = 15\nPAGE_RANGE = 5\nSTART_PAGES = 10\n\n# Advisor check registration number fields\nIRDA_REG_FIELD = 'irda_urn'\nSEBI_REG_FIELD = 'reg_no'\nAMFI_REG_FIELD = 'arn'\nCA_REG_FIELD = 'reg_id'\nBSE_REG_FIELD = 'bse_clearing_number'\nMY_REG_FIELD = 'licence_number'\nSG_REG_FIELD = 'member_number'\nUS_REG_FIELD = 'lic_id'\n\n# Advisor table registraion number fields\nIRDA_NUMBER = 'irda_number'\nSEBI_NUMBER = 'sebi_number'\nAMFI_NUMBER = 'amfi_number'\n\n# Claimed status\nCLAIMED_STATUS_VERIFIED = 'verified'\nCLAIMED_STATUS_NOT_VERIFIED = 'not_verified'\n\n# Category types\nCATEGORY_OTHER = 'other'\n\n# Page Titles\nSEARCH = 'Search'\nADVISOR_CHECK = 'Advisor Check'\n\n# page types\nADVISOR_PROFILE = 'profile'\nADVISOR_REPUTE = 'repute'\n\n# PAGE URLS\n''' need to pass -> %(id, catogery_type) '''\nAD_CHK_PROFILE_URL = settings.DEFAULT_DOMAIN_URL+'/advisor_check/profile/%s/%s/'\n''' need to pass -> %(batchcode) '''\nABOTMI_PROFILE_URL = settings.DEFAULT_DOMAIN_URL+'/profile/%s/'\n''' need to pass -> %(batchcode) '''\nABOTMI_REPUTE_URL = settings.DEFAULT_DOMAIN_URL+'/repute_index/%s/'\n\n# Action Type\nVIEW = 'view'\n","repo_name":"venki208/abotmi","sub_path":"abotmi/advisor_check/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7721853364","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.linalg import norm\n\nclass Kmeans():\n def __init__(self,k,max_iterations):\n self.k=k\n self.max_iterations = max_iterations\n\n \"\"\" calculate euclidean distance \"\"\"\n def euclidian_distance(self,a,b):\n dis = norm(a-b)\n return dis\n\n \"\"\" initial random centroids \"\"\"\n def initial_random_centroids(self,X):\n initial_centroids_index = np.random.choice(\n records_for_clustering.shape[0], number_of_clusters)\n\n initial_centroids = records_for_clustering[initial_centroids_index, :]\n return initial_centroids\n\n \"\"\" Predict \"\"\"\n def predict(self,X):\n prev_centroids = self.initial_random_centroids(X)\n distance_matrix = np.empty(\n (X.shape[0], number_of_clusters))\n\n clustering_id = np.empty(records_for_clustering.shape[0])\n\n for _ in range(self.max_iterations):\n \"\"\" calculating clustering id \"\"\"\n for i, record in enumerate(X):\n for j, centroid in enumerate(prev_centroids):\n distance_matrix[i, j] = self.euclidian_distance(record, centroid)\n\n clustering_id[i] = np.argmin(distance_matrix[i])\n\n\n\n \"\"\" calculate new centroids \"\"\"\n new_centroids = np.empty((self.k, X.shape[1]))\n for k in range(self.k):\n new_centroids[k] = np.mean(\n X[np.where(clustering_id == k)], axis=0)\n\n diff = new_centroids - prev_centroids\n\n if not np.any(diff):\n break\n prev_centroids = new_centroids\n\n return clustering_id, new_centroids\n\n\n# main method\nif __name__ == \"__main__\":\n records = pd.read_csv('Mall_Customers.csv')\n records_for_clustering = records.iloc[:, 3:].values\n number_of_clusters = 5\n max_iter =300\n\n kmean_obj = Kmeans(number_of_clusters, max_iter)\n clustering_id, centroids = kmean_obj.predict(records_for_clustering)\n\n\n colors = ['r', 'g', 'b', 'y', 'c', 'm']\n fig, ax = plt.subplots()\n for i in range(number_of_clusters):\n points = np.array([records_for_clustering[j] for j in range(\n len(records_for_clustering)) if clustering_id[j] == i])\n ax.scatter(points[:, 0], points[:, 1], s=7, c=colors[i])\n\n ax.scatter(centroids[:, 0],\n centroids[:, 1], marker='*', s=200, c='#050505')\n\n fig.savefig('clustering.png', dpi=fig.dpi)\n\n\n\n\n\n\n\n\n","repo_name":"sujitkoley9/Machine-Learning-algorithm-from-scratch","sub_path":"K-means/k-means.py","file_name":"k-means.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16425975052","text":"import sys\nimport numpy as np\nimport pylab as pl\nimport lib.animation as anim\nimport matplotlib.pyplot as plt\nimport new_plot_scripts.plotting_functions as pf\nimport new_sim_scripts.simulation_functions as sf\nimport glob\n\n\n\nif __name__ == \"__main__\":\n\n nrowE = ncolE = 120\n nrowI = ncolI = 60\n nE = nrowE * ncolE\n nI = nrowI * ncolI\n nN = nE + nI\n\n offsetE = 1\n offsetI = nE + 1\n\n for asymmetry in ['EI']:\n for r in [1.0]:\n for nmbE in [250., 300., 400., 500., 550., 600.]:\n for nmbI in [nmbE]:\n \n print(asymmetry, r, nmbE, nmbI)\n\n address = '/home/bhalla/shreyal/data/EI_brunel_{}/EI_{}_size3_std7-9-7-9_g8_J10_nmE0I0p0bE{}bI{}_base0_taumE10I10_Vth-50_ratio{}/'.format(asymmetry, asymmetry, int(nmbE), int(nmbI), r)\n nfiles = 1\n t_warmup = 100.\n t_sim = 1000.\n t_reset = 500.\n nfiles = 1 \n\n ts, gids = pf.get_spiketimes(address, nfiles)\n\n ot = np.argsort(ts)\n ts = ts[ot]\n gids = gids[ot]\n\n gidxE = gids - offsetE < nE\n tsE, gidsE = ts[gidxE], gids[gidxE] # Excitatory population\n tsI, gidsI = ts[~gidxE], gids[~gidxE] # Inhibitory population\n\n tres = 10.\n \n print(\"Read spiketimes.\")\n\n #text_all = [\n # ['Warming up', 0., 100.],\n # ['Low uniform input', 100., 600.],\n # ['Reset', 600., 1100.],\n # ['High uniform input', 1100., 1600.],\n # ['Reset', 1600., 2100.],\n # ['Directed Input', 2100., 2600.]\n # ]\n\n #a = pf.make_animation_old_label(tsE, gidsE - offsetE, nrowE, ncolE, nE, tres, text_all)\n #print(\"Animation made.\")\n #print(\"Saving animation...\")\n #a.save(address+'activity_anim.mp4', fps=5, writer='ffmpeg', extra_args=['-vcodec', 'libx264'])\n #print(\"Animation saved.\")\n #plt.show()\n\n #offsetE = 1\n #gidxE = ids - offsetE < nE\n #tsE, gidsE = ts[gidxE], ids[gidxE] # Excitatory population\n #tsI, gidsI = ts[~gidxE], ids[~gidxE] # Inhibitory population\n #tres = 10.\n tw, ts, tr = t_warmup, t_sim, t_reset\n text_all = [\n ['Warming up', 0., tw],\n ['Sparse uniform', tw, tw + ts],\n # ['Low uniform input', tw, tw + ts],\n # ['Reset', tw + ts, tw + ts + tr],\n # ['High uniform input', tw + ts + tr, tw + 2*ts + tr],\n # ['Reset', tw + 2*ts + tr, tw + 2*ts + 2*tr],\n # ['Directed Input', tw + 2*ts + 2*tr, tw + 3*ts + 2*tr],\n # ['Reset', tw + 3*ts + 2*tr, tw + 3*ts + 3*tr],\n # ['Sparse uniform input', tw + 3*ts + 3*tr, tw + 4*ts + 3*tr],\n # ['Reset', tw + 4*ts + 3*tr, tw + 4*ts + 4*tr],\n # ['Temporal - Random', tw + 4*ts + 4*tr, tw + 5*ts + 4*tr],\n # ['Reset', tw + 5*ts + 4*tr, tw + 5*ts + 5*tr],\n # ['Temporal - Interpolated', tw + 5*ts + 5*tr, tw + 6*ts + 5*tr]\n ]\n\n #a = pf.make_animation_old_label(tsE, gidsE - offsetE, nrowE, ncolE, nE, tres, text_all)\n a = pf.make_animation_old_label(tsI, gidsI - offsetI, nrowI, ncolI, nI, tres, text_all)\n print(\"Animation made.\")\n print(\"Saving animation...\")\n a.save(address + 'activity_anim_I.mp4', fps=5, writer='ffmpeg', extra_args=['-vcodec', 'libx264'])\n print(\"Animation saved.\\n\")\n","repo_name":"ShreyaLakhera/sequences_LCRN","sub_path":"new_plot_scripts/plot_animate_label.py","file_name":"plot_animate_label.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16424143098","text":"import tensorflow as tf\nimport reader\nimport params\nimport utils as u\n\n\nbatch = reader.get_justOne_batch(100,\"training\")\noh = tf.one_hot(batch[1], params.OUT_CLASSES, dtype=tf.int32)\n\nx_image = batch[0]\ny_ = oh\ncoords_ = batch[2]\n\n#CONVOLUTIONAL LAYERS\nx_image = tf.reshape(x_image, [-1, 128,128,3])\nsq1 = u.create_fire_module(x_image,16,64,64,3)\nmp1 = u.max_pool_2x2(sq1) #down to 64x64\n\nsq2 = u.create_fire_module(mp1, 16,64,64,128)\nsq3 = u.create_fire_module(sq2, 16,64,64,128)\nsq4 = u.create_fire_module(sq3, 32,128,128,128)\n\nmp4 = u.max_pool_2x2(sq4) #down to 32x32\n\nsq5 = u.create_fire_module(mp4, 32,128,128,256)\nsq6 = u.create_fire_module(sq5, 48,192,192,256)\nsq7 = u.create_fire_module(sq6, 48,192,192,384)\nsq8 = u.create_fire_module(sq7, 64,256,256,384)\n\nmp8 = u.max_pool_2x2(sq8)#down to 16x16\n\nsq9 = u.create_fire_module(mp8, 64,256,256,512)\n\nactivations = u.get_activations(sq9, 16, 512)\n\nout = tf.nn.softmax(activations)\n\n#Regressor\n\nkeep_prob = tf.placeholder(tf.float32)\nreg_sq = u.create_fire_module(sq8,8,2,2,512)\nfinal_count = tf.cast((params.IMAGE_SIZE/4)**2*4,tf.int32)\nh_sq8_flat = tf.reshape(reg_sq,[-1, final_count])\n\nW_reg1 = u.weight_variable([final_count, params.FC_NODES])\nb_reg1 = u.bias_variable([params.FC_NODES])\n\nh_reg1 = tf.nn.relu(tf.matmul(h_sq8_flat, W_reg1) + b_reg1)\nh_reg1_drop = tf.nn.dropout(h_reg1, keep_prob)\n\nW_reg2 = u.weight_variable([params.FC_NODES, 2])\nb_reg2 = u.bias_variable([2])\n\ncoord_predict = tf.sigmoid(tf.matmul(h_reg1_drop, W_reg2) + b_reg2)\n\nregression_loss = tf.nn.l2_loss(coords_ - coord_predict)\n\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(activations,y_))\nloss = cross_entropy + regression_loss\n\ntrain_step = tf.train.AdamOptimizer(0.001).minimize(loss)\ncorrect_prediction = tf.equal(tf.argmax(out,1), tf.argmax(y_,1))\n\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n\nprint(\"Model constructed!\")\n\nsess = tf.Session()\n\ncoordinate = tf.train.Coordinator()\nthreads = tf.train.start_queue_runners(sess=sess, coord=coordinate)\nsess.run(tf.global_variables_initializer())\n\n\n\nprint(\"Variables initialized!\")\n\nfor i in range(200):\n if i%1 == 0:\n\n train_accuracy = sess.run(accuracy, feed_dict = {keep_prob:1.0})\n pos_acc = sess.run(regression_loss, feed_dict = {keep_prob:1.0})\n print(\"step %d, class accuracy: %g, position loss: %g\" % (i, train_accuracy, pos_acc))\n\n sess.run(train_step, feed_dict = {keep_prob:1.0})\n\n\ntest_accuracy = sess.run(accuracy, feed_dict = {keep_prob:1.0})\n\nprint(\"Done! accuracy on test set: %g\" % (test_accuracy))\n\nimport os\nsaver = tf.train.Saver()\nif not os.path.exists('./networks/'):\n os.makedirs('./networks/')\nsaver.save(sess, './networks/squeeze1.cpt')\n","repo_name":"ollenorelius/tensorflow_learning","sub_path":"shapes/justOne_squeeze/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70861333583","text":"import sys\nimport json\nimport osmium\n\nfrom tools.utilities import dump\n\nways = []\nnodes = {}\n\ndef tag(entity,name):\n return entity.tags[name] if (name in entity.tags) else None\n\nclass Node():\n def __init__(self,ref,location):\n self.ref = ref\n self.lat = location.lat\n self.lon = location.lon\n self.ways = []\n def to_json(self):\n return {\n 'ref': self.ref,\n 'lat': self.lat,\n 'lon': self.lon,\n 'ways': self.ways\n }\n\nclass Way():\n def __init__(self,way):\n self.id = way.uid\n self.name = tag(way,'name')\n self.access = tag(way,'access')\n self.oneway = tag(way,'oneway')\n self.highway = tag(way,'highway')\n self.cycleway = tag(way,'cycleway')\n self.crossing = tag(way,'crossing')\n self.nodes = [ node.ref for node in way.nodes ]\n def update_nodes(self,locations):\n for (ref,location) in zip(self.nodes,locations):\n if (ref not in nodes):\n nodes[ref] = Node(ref,location)\n yield nodes[ref]\n def to_json(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'access': self.access,\n 'oneway': self.oneway,\n 'highway': self.highway,\n 'cycleway': self.cycleway,\n 'crossing': self.crossing,\n 'nodes': self.nodes\n }\n\nclass RouteHandler(osmium.SimpleHandler):\n\n def __init__(self,idx):\n osmium.SimpleHandler.__init__(self)\n self.idx = idx\n\n def way(self, way):\n way = Way(way)\n if (way.highway):\n self.highway(way)\n\n def highway(self, way):\n way.id = len(ways)\n ways.append(way)\n locations = [self.idx.get(node) for node in way.nodes]\n for node in way.update_nodes(locations):\n node.ways.append(way.id)\n\ndef main():\n\n idx = osmium.index.create_map('sparse_file_array,data/node-cache.nodecache')\n locations = osmium.NodeLocationsForWays(idx)\n locations.ignore_errors()\n\n osm_file = 'data/greater-london-latest.osm.pbf'\n\n node_reader = osmium.io.Reader(osm_file, osmium.osm.osm_entity_bits.NODE)\n osmium.apply(node_reader, locations)\n node_reader.close()\n\n handler = RouteHandler(idx)\n\n way_reader = osmium.io.Reader(osm_file, osmium.osm.osm_entity_bits.WAY)\n osmium.apply(way_reader, locations, handler)\n way_reader.close()\n\n dump(nodes,'data/nodes.json') \n dump(ways,'data/ways.json') \n","repo_name":"pedalpath/pedalpath-process","sub_path":"process/osm/import_osm.py","file_name":"import_osm.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7135279892","text":"from typing import List\n\ndef get_all_predecessors(word: str) -> List[str]:\n possible_predecessors = []\n for idx, ch in enumerate(word):\n candidate = word[:idx] + word[idx + 1:]\n if candidate:\n # print(f'the candidate: {candidate}')\n possible_predecessors.append(candidate)\n return possible_predecessors\n\nclass Solution:\n def longestStrChain(self, words: List[str]) -> int:\n \n ordered_words = sorted(words, key=lambda x: len(x))\n \n children_chain = {}\n longest_chain_count = 0\n \n for word in ordered_words:\n predecessors = get_all_predecessors(word)\n \n for predecessor in predecessors:\n if predecessor in children_chain:\n new_chain = children_chain[predecessor][:]\n new_chain.append(word)\n longest_chain_count = max(len(new_chain), longest_chain_count)\n if word in children_chain:\n children_chain[word] = new_chain if len(new_chain) > len(children_chain[word]) else children_chain[word]\n else:\n children_chain[word] = new_chain\n \n if not word in children_chain:\n children_chain[word] = [[word]]\n longest_chain_count = max(1, longest_chain_count)\n \n return longest_chain_count\n ","repo_name":"pykita/leetcode","sub_path":"longest_string_chain/nikitas.py","file_name":"nikitas.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"6716258650","text":"import numpy as np\nimport pandas as pd\nimport html5lib\nfrom flask import redirect, url_for, render_template, request, session, jsonify\nfrom sqlalchemy import desc, func\nfrom website import app, db\nfrom website.usage_class import Member\nfrom website.functions import update_stocks, is_number, count_profit\nfrom website.models import Stocks, Members, Orders, StockSchema\nfrom datetime import datetime\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n\tprint(str(datetime.now().isoformat(' ', 'seconds')))\n\tmember = Members.query.filter_by(id=1).one()\n\tif(request.method=='POST'):\n\t\tif request.form[\"action\"] == \"My stocks\":\n\t\t\treturn redirect(url_for('my_stocks'))\n\t\tif request.form[\"action\"] == \"Home\":\n\t\t\tschema = StockSchema(many=True)\n\t\t\tstocks_list = schema.dump(Stocks.query.all())\n\t\t\tMember.stocks = stocks_list\n\t\t\tMember.table_ordered = 0\n\t\t\treturn render_template('website.html', stocks = stocks_list, money = member.money, bought_stocks=Member.bought_stocks)\n\telse:\n\t\tupdate_stocks()\n\t\tMember.bought_stocks.clear()\n\t\t\n\t\tschema = StockSchema(many=True)\n\t\ta = schema.dump(Stocks.query.all())\n\t\tfor j,item in enumerate(a):\n\t\t\tb = Orders.query.with_entities(func.sum(Orders.owned).label(\"mySum\")).filter_by(stock_id=item['id']).first()\n\t\t\tif(b.mySum):\n\t\t\t\tMember.bought_stocks.update({ item['id']:{'quantity': int(b.mySum), 'value': round(item['price']*int(b.mySum),3), 'profit': 0}})\n\t\t\t\tMember.bought_stocks[item['id']]['profit'] = count_profit(item['id'])\n\t\t\n\t\tstocks_list = schema.dump(Stocks.query.all())\n\t\tMember.stocks = stocks_list\n\t\tMember.table_ordered = 0\n\t\treturn render_template('website.html', stocks = stocks_list, money = member.money, bought_stocks=Member.bought_stocks)\n\n@app.route('/mystocks', methods=['GET','POST'])\ndef my_stocks():\n\tif(request.method=='POST'):\n\t\tif request.form[\"action\"] == \"Home\":\n\t\t\treturn redirect(url_for('index'))\n\telse:\n\t\tmember = Members.query.filter_by(id=1).one()\n\n\t\tschema = StockSchema(many=True)\n\t\ta = schema.dump(Stocks.query.all())\n\t\tfor j,item in enumerate(a):\n\t\t\tb = Orders.query.with_entities(func.sum(Orders.owned).label(\"mySum\")).filter_by(stock_id=item['id']).first()\n\t\t\tif(b.mySum):\n\t\t\t\tMember.bought_stocks.update({ item['id']:{'quantity': int(b.mySum), 'value': round(item['price']*int(b.mySum),3), 'profit': 0}})\n\t\t\t\tMember.bought_stocks[item['id']]['profit'] = count_profit(item['id'])\n\n\t\tstocks=[]\n\t\tfor value in Member.bought_stocks:\n\t\t\tstocks.append(Stocks.query.filter_by(id=value).first())\n\t\tschema = StockSchema(many=True)\n\t\tstocks_list = schema.dump(stocks)\n\t\tMember.stocks = stocks_list\n\t\tMember.table_ordered = 0\n\t\treturn render_template('website.html', stocks = stocks_list, money = member.money, bought_stocks=Member.bought_stocks)\n\n@app.route('/process', methods=['POST'])\ndef counter():\n\tmember = Members.query.filter_by(id=1).one()\n\tids = int(request.form['id'])\n\t\n\tif(is_number(request.form['quantity'])):\n\t\tinput = int(request.form['quantity'])\n\t\tif(input<0 or input>1000000):\n\t\t\tif ids in Member.bought_stocks:\n\t\t\t\treturn jsonify({'money': member.money, 'quantity' : Member.bought_stocks[ids]['quantity'], 'value' : Member.bought_stocks[ids]['value'], 'profit': Member.bought_stocks[ids]['profit']})\n\t\t\telse:\n\t\t\t\treturn jsonify({'money': member.money, 'quantity' : 0, 'value' : 0, 'profit': 0})\n\telse:\n\t\tif ids in Member.bought_stocks:\n\t\t\treturn jsonify({'money': member.money, 'quantity' : Member.bought_stocks[ids]['quantity'], 'value' : Member.bought_stocks[ids]['value'], 'profit': Member.bought_stocks[ids]['profit']})\n\t\telse:\n\t\t\treturn jsonify({'money': member.money, 'quantity' : 0, 'value' : 0, 'profit': 0})\n\t\n\tupdate_stocks()\n\tstock = Stocks.query.filter_by(id=ids).first()\n\tprice = stock.price\n\n\tquantity = 0\n\ta = Orders.query.with_entities(func.sum(Orders.owned).label(\"mySum\")).filter_by(stock_id=ids).one()\n\tif a.mySum:\n\t\tquantity = int(a.mySum)\n\telse:\n\t\tquantity = 0\n\n\tmaximum=int(member.money/price)\n\tif(int(request.form['buy_sell'])==0):\n\t\tif(maximum!=0):\n\t\t\tMember.bought_stocks.update({ids:{'quantity': 0, 'value': 0, 'profit': 0}})\n\t\t\tif(input>maximum):\n\t\t\t\tinput = maximum\n\t\telse:\n\t\t\tif ids in Member.bought_stocks:\n\t\t\t\treturn jsonify({'money': member.money, 'quantity' : Member.bought_stocks[ids]['quantity'], 'value' : Member.bought_stocks[ids]['value'], 'profit': Member.bought_stocks[ids]['profit']})\n\t\t\telse:\n\t\t\t\treturn jsonify({'money': member.money, 'quantity' : 0, 'value' : 0, 'profit': 0})\n\t\t\n\t\tquantity = quantity + input\n\t\tMember.bought_stocks[ids]['value'] = round(price*quantity,3)\n\t\tMember.bought_stocks[ids]['quantity'] = Member.bought_stocks[ids]['quantity'] + input\n\t\tvalue = round(price*quantity,3)\n\t\tmember.money = round(member.money - price*input,4)\n\t\tnew = Orders(1,input,input,ids,price,False,str(datetime.now().isoformat(' ', 'seconds')))\n\t\tdb.session.add(new)\n\t\tdb.session.commit()\n\t\treturn jsonify({'money': member.money, 'quantity' : quantity, 'value' : value, 'profit': count_profit(ids)})\n\t\n\telse:\n\t\tif(input0).filter_by(stock_id=ids).order_by('order_id').first()\n\t\t\t\tif(left>=upd.owned):\n\t\t\t\t\tleft = left - upd.owned\n\t\t\t\t\tupd.owned = 0\n\t\t\t\telse:\n\t\t\t\t\tupd.owned = upd.owned - left\n\t\t\t\t\tleft = 0\n\t\t\t\tdb.session.commit()\n\t\t\tMember.bought_stocks[ids]['value']=value\n\t\t\treturn jsonify({'money': member.money, 'quantity' : quantity, 'value' : value, 'profit': count_profit(ids)})\n\n\t\telse:\n\t\t\tif(quantity!=0):\n\t\t\t\tinput=quantity\n\t\t\t\ttozero = Orders.query.filter(Orders.owned>0).filter_by(stock_id=ids).order_by('order_id').all()\n\t\t\t\tfor item in tozero:\n\t\t\t\t\titem.owned = 0\n\t\t\t\tquantity = 0\n\t\t\t\tvalue = 0\n\t\t\t\tprofit = 0\n\t\t\t\tmember.money = round(member.money + price*input,4)\n\t\t\t\tdel Member.bought_stocks[ids]\n\t\t\t\tnew = Orders(1,input,0,ids,price,True)\n\t\t\t\tdb.session.add(new)\n\t\t\t\tdb.session.commit()\n\t\t\t\treturn jsonify({'money': member.money, 'quantity' : quantity, 'value' : value, 'profit': profit})\n\t\t\telse:\n\t\t\t\treturn jsonify({'money': member.money, 'quantity' : 0, 'value' : 0, 'profit': 0})\n\n\n@app.route('/charts')\ndef charts():\n\tmember = Members.query.filter_by(id=1).one()\n\treturn render_template('charts.html', money = member.money)\n\n@app.route('/_upd_charts')\ndef upd_charts():\n\tschema = StockSchema(many=True)\n\ta = schema.dump(Stocks.query.all())\n\tfor j,item in enumerate(a):\n\t\tb = Orders.query.with_entities(func.sum(Orders.owned).label(\"mySum\")).filter_by(stock_id=item['id']).first()\n\t\tif(b.mySum):\n\t\t\tMember.bought_stocks.update({ item['id']:{'quantity': int(b.mySum), 'value': round(item['price']*int(b.mySum),3), 'profit': 0}})\n\t\t\tMember.bought_stocks[item['id']]['profit'] = count_profit(item['id'])\n\t\n\tstocks=[]\n\tfor value in Member.bought_stocks:\n\t\tstocks.append(Stocks.query.filter_by(id=value).first())\n\tschema = StockSchema(many=True)\n\tstocks_list = schema.dump(stocks)\n\tMember.stocks = stocks_list\n\tlabels = []\n\tvalues = []\n\tfor stock in stocks_list:\n\t\tlabels.append(stock['name'])\n\tdf= pd.DataFrame(Member.bought_stocks)\n\tvalues = df.loc['value'].sort_values().tolist()\n\t\n\treturn jsonify({'labels': labels, 'values': values})\n\n\n@app.route('/_update', methods = ['POST'])\ndef update():\n\tupdate_stocks()\n\tif(Member.bought_stocks):\n\t\tfor i,item in enumerate(Member.stocks):\n\t\t\tif item['id'] in Member.bought_stocks:\n\t\t\t\tMember.bought_stocks[item['id']]['value']=round(Member.bought_stocks[item['id']]['quantity']*Member.stocks[i]['price'],3)\n\t\t\t\tMember.bought_stocks[item['id']]['profit']=count_profit(item['id'])\n\treturn jsonify(stocks = Member.stocks, bought_stocks=Member.bought_stocks )\n\n@app.route('/order_table', methods = ['POST'])\ndef order():\n\tif(request.method=='POST'):\n\t\tnumber = request.form['name']\n\t\tMember.table_ordered = int(number)\n\t\tif(Member.table_ordered==0):\n\t\t\tMember.stocks = Stocks.query.all()\n\t\telif(Member.table_ordered==1):\n\t\t\tif(Member.top_down==True):\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['name'])\n\t\t\t\tMember.top_down = False\n\t\t\telse:\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['name'], reverse=True)\n\t\t\t\tMember.top_down = True\n\t\telif(Member.table_ordered==2):\n\t\t\tif(Member.top_down==True):\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['price'])\n\t\t\t\tMember.top_down=False\n\t\t\telse:\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['price'], reverse=True)\n\t\t\t\tMember.top_down = True\n\t\telif(Member.table_ordered==3):\n\t\t\tif(Member.top_down==True):\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['change'])\n\t\t\t\tMember.top_down=False\n\t\t\telse:\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['change'], reverse=True)\n\t\t\t\tMember.top_down = True\n\t\telif(Member.table_ordered==4):\n\t\t\tif(Member.top_down==True):\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['perc'])\n\t\t\t\tMember.top_down=False\n\t\t\telse:\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['perc'], reverse=True)\n\t\t\t\tMember.top_down = True\n\t\telif(Member.table_ordered==5):\n\t\t\tif(Member.top_down==True):\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['opening'])\n\t\t\t\tMember.top_down=False\n\t\t\telse:\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['opening'], reverse=True)\n\t\t\t\tMember.top_down = True\n\t\telif(Member.table_ordered==6):\n\t\t\tif(Member.top_down==True):\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['stock_max'])\n\t\t\t\tMember.top_down=False\n\t\t\telse:\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['stock_max'], reverse=True)\n\t\t\t\tMember.top_down = True\n\t\telse:\n\t\t\tif(Member.top_down==True):\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['stock_min'])\n\t\t\t\tMember.top_down=False\n\t\t\telse:\n\t\t\t\tMember.stocks = sorted(Member.stocks, key=lambda k: k['stock_min'], reverse=True)\n\t\t\t\tMember.top_down = True\n\t\tschema = StockSchema(many=True)\n\t\tstocks_list = schema.dump(Member.stocks)\n\t\treturn jsonify(stocks = stocks_list, bought_stocks = Member.bought_stocks)","repo_name":"Grzegorz-T/Python_web","sub_path":"website/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":10043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5892163789","text":"import pandas as pd\nimport numpy as np\nimport re \n\n# import necessary libraries\nimport os\nimport glob\nimport shutil\n\n# Import data into DB\nfrom sqlalchemy import create_engine, false\nfrom sqlalchemy.sql import text as sa_text\n\n# Database Connection\nconn_file = open(os.getcwd() +\"\\db_connection.ini\", mode=\"r\")\nconn_string = conn_file.read()\nconn_file.close()\n\n# Open database connection\ndb = create_engine(conn_string)\n\nsource_pdf_directory = \"\"\ntarget_pdf_directory = \"\"\ncsv_directory = \"\"\n\n# Read files directory\n# config_file = open(os.getcwd() +\"\\config.ini\", mode=\"r\")\n# config_params = config_file.read()\n# for line in config_params:\n# \tprint(line)\n# \tparam, value = line.split(\":\")\n# \tif (param == \"pdf_source_dir\"):\n# \t\tsource_pdf_directory = value.strip()\n# \tif (param == \"pdf_target_dir\"):\n# \t target_pdf_directory = value.strip()\n\t# if (param == \"pdf_target_dir\"):\n\t# csv_directory = value.strip()\n\ndef validate_email(email): \n if re.match(r\"[^@]+@[^@]+\\.[^@]+\", email): \n return True \n return False \n\ncols = [\"last_name\",\"first_name\",\"email_address\",\"dt_date\",\"year\"]\n\n# 2021\ndf_2021a = pd.DataFrame(columns=cols)\nindx = 0\n\ninv_data_2021a =[]\n\n#2021_Immunitaetsbescheinigungen.csv\nwith open(\"D:\\python-project\\csv_merger\\csv\\\\2021_Immunitaetsbescheinigungen.csv\", encoding=\"utf8\") as fp:\n Lines = fp.readlines()\n for line in Lines[1:]:\n words = line.split(\";\")\n if (len(words) < 4 or len(words) > 4):\n inv_data_2021a.append(line)\n else:\n dt_date=words[0].strip()\n last_name=words[1].strip()\n first_name=words[2].strip()\n email_address=words[3].strip()\n year = \"2021\"\n if (len(last_name)>0 and len(first_name)>0 and validate_email(email_address)== True):\n new_row = {'last_name': last_name, 'first_name': first_name, 'email_address':email_address, 'dt_date': dt_date, \"year\": year}\n df_2021a.loc[indx] = new_row\n indx = indx + 1\n #print(\"date=\",dt_date,\"last_name=\",last_name,\"first_name=\",first_name,\"email_address=\",email_address)\n else:\n inv_data_2021a.append(line)\n \n print(\"Sample Data: 2021_Immunitaetsbescheinigungen.csv\")\n print(df_2021a.head())\n\n\n#2021_Unterstuetzer.csv\ndf_2021b = pd.DataFrame(columns=cols)\ninv_data_2021b = []\nindx = 0\n\nwith open(\"D:\\python-project\\csv_merger\\csv\\\\2021_Unterstuetzer.csv\", encoding=\"utf8\") as fp:\n Lines = fp.readlines()\n for line in Lines[1:]:\n words = line.split(\";\")\n if (len(words) < 3 or len(words) > 3):\n inv_data_2021b.append(line)\n else:\n last_name=words[0].strip()\n first_name=words[1].strip()\n dt_date=words[2].strip()\n email_address=\"\"\n year = \"2021\"\n if (len(last_name)>0 and len(first_name)>0 ):\n new_row = {'last_name': last_name, 'first_name': first_name, 'email_address':email_address, 'dt_date': dt_date, \"year\": year}\n df_2021b.loc[indx] = new_row\n indx = indx + 1\n else:\n inv_data_2021b.append(line) \n\n print(\"Sample Data: 2021_Unterstuetzer.csv\")\n print(df_2021b.head())\n \n \n# 2022\ndf_2022a = pd.DataFrame(columns=cols)\nindx = 0\n\ninv_data_2022a =[]\n\n#2022_Immunitaetsbescheinigungen.csv\nwith open(\"D:\\python-project\\csv_merger\\csv\\\\2022_Immunitaetsbescheinigungen.csv\", encoding=\"utf8\") as fp:\n Lines = fp.readlines()\n for line in Lines[1:]:\n words = line.split(\";\")\n if (len(words) < 4 or len(words) > 4):\n inv_data_2022a.append(line)\n else:\n dt_date=words[0].strip()\n last_name=words[1].strip()\n first_name=words[2].strip()\n email_address=words[3].strip()\n year = \"2022\"\n if (len(last_name)>0 and len(first_name)>0 and validate_email(email_address)== True):\n new_row = {'last_name': last_name, 'first_name': first_name, 'email_address':email_address, 'dt_date': dt_date, \"year\": year}\n df_2022a.loc[indx] = new_row\n indx = indx + 1\n #print(\"date=\",dt_date,\"last_name=\",last_name,\"first_name=\",first_name,\"email_address=\",email_address)\n else:\n inv_data_2022a.append(line)\n \n print(\"Sample Data: 2022_Immunitaetsbescheinigungen.csv\")\n print(df_2022a.head())\n\n\n#2022_Unterstuetzer.csv\ndf_2022b = pd.DataFrame(columns=cols)\ninv_data_2022b = []\nindx = 0\n\nwith open(\"D:\\python-project\\csv_merger\\csv\\\\2022_Unterstuetzer.csv\", encoding=\"utf8\") as fp:\n Lines = fp.readlines()\n for line in Lines[1:]:\n words = line.split(\";\")\n if (len(words) < 3 or len(words) > 3):\n inv_data_2022b.append(line)\n else:\n last_name=words[0].strip()\n first_name=words[1].strip()\n dt_date=words[2].strip()\n email_address=\"\"\n year = \"2022\"\n if (len(last_name)>0 and len(first_name)>0 ):\n new_row = {'last_name': last_name, 'first_name': first_name, 'email_address':email_address, 'dt_date': dt_date, \"year\": year}\n df_2022b.loc[indx] = new_row\n indx = indx + 1\n else:\n inv_data_2022b.append(line) \n\n print(\"Sample Data: 2022_Unterstuetzer.csv\")\n print(df_2022b.head())\n\n\n#data_type={'last_name': create_engine.types.UnicodeText(), 'first_name': create_engine.types.UnicodeText(), 'email': create_engine.types.UnicodeText(), 'dt_date': create_engine.types.UnicodeText(), 'year': create_engine.types.UnicodeText()}\n\n#Load data into staging\nprint(\"Loading data ..............................\")\ntry:\n # Open Connection\n conn = db.connect()\n \n # Load data into staging\n df_2021a.to_sql(name = \"2021a\", con=conn, if_exists='replace', index=False)\n df_2021b.to_sql(name = \"2021b\", con=conn, if_exists='replace', index=False)\n df_2022a.to_sql(name = \"2022a\", con=conn, if_exists='replace', index=False)\n df_2022a.to_sql(name = \"2022b\", con=conn, if_exists='replace', index=False)\n\n conn.autocommit = True\n conn.close()\n\nexcept Exception as e:\n print(e.__str__)\n conn.close()\n\n # print(\"printing invalid records\")\n # for inv_line in inv_data_2021b:\n # print(inv_line)","repo_name":"obaidulsarker/csv_merger","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12921357058","text":"import math\nimport cv2\nimport numpy as np\n\n\ndef draw_points(img, pts1, pts2):\n \"\"\"\n stereo omnidirectional image\n :param img:\n :param pts1:\n :param pts2:\n :return:\n \"\"\"\n\n width = img.shape[1]\n height = img.shape[0]\n br = height / 2\n for i in range(pts1.shape[0]):\n point1 = pts1[i, :]\n ep1 = equirectangular(point1) + 1.0\n ep1[0] *= width / 2\n ep1[1] *= br / 2\n\n point2 = pts2[i, :]\n ep2 = equirectangular(point2) + 1.0\n ep2[0] *= width / 2\n ep2[1] *= br / 2\n ep2[1] += br\n\n cv2.line(img, ep2.astype(np.uint16), ep1.astype(np.uint16), [0, 255, 255])\n cv2.drawMarker(img, ep1.astype(np.uint16), [255, 0, 0])\n cv2.drawMarker(img, ep2.astype(np.uint16), [0, 255, 0])\n\n cv2.line(img, (0, int(br)), (width, int(br)), (255, 255, 255))\n\n\ndef equirectangular(src):\n \"\"\"\n transform 3d-point to 2d-equirectangular\n :param src: 3d point\n :return: 2d point on to equirectangular\n \"\"\"\n longitude = math.atan2(src[1], src[0])\n latitude = math.atan2(src[2], math.sqrt(src[0] ** 2 + src[1] ** 2))\n\n x = longitude / math.pi\n y = (latitude * 2) / math.pi\n\n ret = np.zeros(2)\n ret[0] = x\n ret[1] = y\n return ret\n\n\ndef gen_points(cp):\n \"\"\"\n generate random points (-10.0 to 10.0)\n :param cp: points quantity\n :return: points\n \"\"\"\n np.random.seed(seed=32)\n points = np.random.rand(cp, 3)\n points = (points - 0.5) * 20.0\n return points\n\n\ndef rotation_mat(rot):\n \"\"\"\n generate rotation matrix\n :param rot:\n :return: rotation matrix\n \"\"\"\n px = rot[0]\n py = rot[1]\n pz = rot[2]\n\n x = np.array([[1, 0, 0],\n [0, np.cos(px), np.sin(px)],\n [0, -np.sin(px), np.cos(px)]])\n y = np.array([[np.cos(py), 0, -np.sin(py)],\n [0, 1, 0],\n [np.sin(py), 0, np.cos(py)]])\n z = np.array([[np.cos(pz), np.sin(pz), 0],\n [-np.sin(pz), np.cos(pz), 0],\n [0, 0, 1]])\n return z @ y @ x\n\n\ndef transform(src, pos, rot):\n \"\"\"\n transform point\n :param src:\n :param pos:\n :param rot:\n :return: transformed point\n \"\"\"\n tmp = src.copy()\n\n rot_mat = rotation_mat(rot)\n tmp += pos\n tmp = tmp @ rot_mat\n\n return tmp\n\n\ndef projection(pos, rot, src):\n \"\"\"\n project a points onto a unit-sphere\n :param pos: sphere center\n :param rot: sphere rotation\n :param src: source points\n :return: projected points\n \"\"\"\n pts = src.copy()\n points = np.zeros((pts.shape[0], 3)) # output array\n for i in range(pts.shape[0]):\n tmp = transform(pts, pos, rot)\n points[i, :] = tmp[i, :] / np.linalg.norm(tmp[i, :]) # normalize\n return points\n\n\ndef skew_symmetric(src):\n return np.array([[0, -src[2], src[1]],\n [src[2], 0, -src[0]],\n [-src[1], src[0], 0]])\n\n\ndef find_corners(img, pattern_size):\n ret, corner = cv2.findChessboardCorners(img, pattern_size)\n\n pts = np.zeros((corner.shape[0], corner.shape[2]))\n pts[:, 0] = corner[:, 0, 0]\n pts[:, 1] = corner[:, 0, 1]\n\n corner = normalize_point(img, pts)\n return corner\n\n\ndef draw_point_on_img(img, corners):\n xg = img.shape[1] / 2\n yg = img.shape[0] / 2\n\n color = img.copy()\n for pt in corners:\n ip = (int((pt[0] + 1.0) * xg), int((pt[1] + 1.0) * yg))\n cv2.circle(color, ip, 20, [0, 0, 255], thickness=10)\n return color\n\n\ndef equi_to_xyz(src):\n longitude = src[0] * math.pi\n latitude = (src[1] * math.pi) / 2.0\n x = math.cos(latitude) * math.cos(longitude)\n y = math.cos(latitude) * math.sin(longitude)\n z = math.sin(latitude)\n return np.array([x, y, z])\n\n\ndef normalize_point(img, pts):\n shape = img.shape\n npts = pts.copy()\n\n npts[:, 0] = npts[:, 0] / (shape[1] / 2) - 1.0\n npts[:, 1] = npts[:, 1] / (shape[0] / 2) - 1.0\n return npts\n\n\ndef equi_to_xyz_array(corners):\n pts = np.zeros((corners.shape[0], 3))\n for i in range(corners.shape[0]):\n pts[i] = equi_to_xyz(corners[i])\n\n return pts\n\n\ndef find_essential_mat(pts1, pts2):\n emat = np.zeros((pts1.shape[0], 9))\n\n for i in range(pts1.shape[0]):\n tmp = np.array([pts1[i, 0] * pts2[i, 0], pts1[i, 0] * pts2[i, 1], pts1[i, 0] * pts2[i, 2],\n pts1[i, 1] * pts2[i, 0], pts1[i, 1] * pts2[i, 1], pts1[i, 1] * pts2[i, 2],\n pts1[i, 2] * pts2[i, 0], pts1[i, 2] * pts2[i, 1], pts1[i, 2] * pts2[i, 2]])\n emat[i, :] = tmp\n\n u, s, vh = np.linalg.svd(emat)\n vec = vh[8, :].reshape(3, 3)\n\n ue, se, vhe = np.linalg.svd(vec)\n ret = ue @ np.diag([1, 1, 0]) @ vhe\n # print(ret)\n # print(emat @ ret.reshape(9, -1))\n\n return ret\n\n\ndef decompose_essential_mat(e, pts, pts2):\n eet = e @ e.transpose()\n u, s, vh = np.linalg.svd(eet)\n t1 = vh[2, :]\n\n sst1 = skew_symmetric(t1)\n sum_ab = 0\n for i in range(pts.shape[0]):\n a = sst1 @ pts[i]\n b = e @ pts2[i]\n sum_ab += a.transpose() @ b\n\n t2 = -t1\n if sum_ab >= 0:\n e1 = e\n else:\n e1 = -e\n\n k = -sst1 @ e1\n uk, sk, vhk = np.linalg.svd(k)\n r = uk @ np.diag([1, 1, np.linalg.det(uk @ vhk)]) @ vhk\n\n sum_ab = 0\n for i in range(pts.shape[0]):\n a = np.cross(r.transpose() @ t1, r.transpose() @ pts[i])\n b = np.cross(r.transpose() @ pts[i], pts2[i])\n sum_ab += a.transpose() @ b\n\n if sum_ab > 0:\n t = t1\n else:\n t = t2\n\n return r, t\n\n\ndef rotate_equi(img_size, rot):\n width = img_size[0]\n height = img_size[1]\n\n x, y = np.meshgrid(np.linspace(-1.0, 1.0, width),\n np.linspace(-1.0, 1.0, height))\n\n longitude = x * math.pi\n latitude = (y * math.pi) / 2\n\n nx = np.cos(latitude) * np.cos(longitude)\n ny = np.cos(latitude) * np.sin(longitude)\n nz = np.sin(latitude)\n\n rotm = np.zeros([nx.shape[0], nx.shape[1], 3])\n mix = np.dstack([np.dstack([nx, ny]), nz])\n for i in range(height):\n for j in range(width):\n rotm[i, j, :] = rot @ mix[i, j, :]\n\n nxr = rotm[:, :, 0]\n nyr = rotm[:, :, 1]\n nzr = rotm[:, :, 2]\n\n longitude_r = np.arctan2(nyr, nxr)\n latitude_r = np.arctan2(nzr, np.sqrt(nxr * nxr + nyr * nyr))\n ex = longitude_r / math.pi\n ey = 2 * latitude_r / math.pi\n\n ex = (ex + 1.0) * (width / 2.0)\n ey = (ey + 1.0) * (height / 2.0)\n\n return ex.astype(np.float32), ey.astype(np.float32)\n\n\ndef direction_to_rotate(direction):\n xaxis = np.cross(direction, np.array([0, 1, 0]))\n yaxis = np.cross(direction, xaxis)\n\n r = np.array([\n [xaxis[0], xaxis[1], xaxis[2]],\n [yaxis[0], yaxis[1], yaxis[2]],\n [direction[0], direction[1], direction[2]]\n ])\n\n # return utils.rotation_mat([0, np.radians(90.0), np.radians(180.0)]) @ r\n return r\n\n\ndef load_rt(path):\n print(\"load calibration data\")\n cal = np.load(path)\n r = cal[\"arr_0\"]\n t = cal[\"arr_1\"]\n return r, t\n\n\ndef fisheye_to_equi(img_size, fisheye_size, aperture, polar=True):\n map_x = np.zeros(img_size)\n map_y = np.zeros(img_size)\n\n pol = 1\n if not polar:\n pol = -1\n\n for i in range(img_size[1]):\n for j in range(img_size[0]):\n\n # normalize coordinate\n x = (i - (img_size[1] / 2)) / (img_size[1] / 2)\n y = (j - (img_size[0] / 2)) / (img_size[0] / 2)\n\n # equirectangular to 3d vector\n longitude = pol * x * math.pi\n latitude = pol * y * math.pi / 2\n px = math.cos(latitude) * math.cos(longitude)\n py = math.cos(latitude) * math.sin(longitude)\n pz = math.sin(latitude)\n\n # 3d vector to 2d fisheye\n r = 2 * math.atan2(math.sqrt((px ** 2) + (pz ** 2)), py) / aperture\n theta = math.atan2(px, pz)\n tx = r * math.cos(theta)\n ty = r * math.sin(theta)\n\n # normalized coordinate to image space\n tmp = fisheye_size / 2\n map_x[j, i] = (tx * tmp) + tmp\n map_y[j, i] = (ty * tmp) + tmp\n\n return map_x.astype(np.float32), map_y.astype(np.float32)\n","repo_name":"3ccd/omni-calib-py","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25487726944","text":"import unittest\nfrom operator import not_\n\nfrom loadsbroker.util import retry\n\n\nclass TestRetry(unittest.TestCase):\n\n def test_retry_on_result(self):\n attempts = []\n\n @retry(attempts=4, on_result=not_)\n def foo():\n attempts.append(None)\n return len(attempts) == 3\n\n self.assertTrue(foo())\n self.assertEqual(len(attempts), 3)\n\n def test_retry_on_result_propagate(self):\n attempts = []\n\n @retry(attempts=3, on_result=not_)\n def foo():\n attempts.append(None)\n return False\n\n self.assertFalse(foo())\n self.assertEqual(len(attempts), 3)\n\n def test_retry_on_exception(self):\n attempts = []\n exc = ValueError\n\n @retry(attempts=2,\n on_exception=lambda e: isinstance(e, ValueError))\n def foo():\n attempts.append(None)\n l = len(attempts)\n if l == 1:\n raise exc\n elif l == 2:\n return \"foo\"\n assert False\n\n self.assertEqual(foo(), \"foo\")\n self.assertEqual(len(attempts), 2)\n\n attempts.clear()\n exc = ZeroDivisionError\n with self.assertRaises(ZeroDivisionError):\n foo()\n self.assertEqual(len(attempts), 1)\n\n def test_retry_on_exception_propagate(self):\n attempts = []\n\n @retry(attempts=4,\n on_exception=lambda e: isinstance(e, ZeroDivisionError))\n def foo():\n attempts.append(None)\n 1/0\n\n with self.assertRaises(ZeroDivisionError):\n foo()\n self.assertEqual(len(attempts), 4)\n","repo_name":"loads/loads-broker","sub_path":"loadsbroker/tests/test_units/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"} +{"seq_id":"41914088282","text":"import firebase_admin\n\nfrom firebase_admin import credentials\n\nfrom firebase_admin import firestore\n\nimport csv\n\nfrom google.cloud import storage\n\nimport os\n\nimport pandas as pd\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'gcp.json'\n\ncred = credentials.Certificate(\"firestore.json\")\n\nfirebase_admin.initialize_app(cred)\n\n\n\ndb = firestore.client()\n\ndef userLoginVisualization(request):\n\n lst = []\n\n docs = db.collection(u'customerFeedback').stream()\n\n\n\n for doc in docs:\n\n lst.append(doc.to_dict())\n\n df = pd.DataFrame(lst)\n\n bucketName = \"csci5410_visualization\"\n\n fileName = \"userloginDetails.csv\"\n\n client = storage.Client()\n\n bucket = client.bucket(bucketName) \n\n csvFile = df.to_csv(index=False)\n\n blob = bucket.blob(fileName)\n\n with blob.open(\"wt\") as f:\n\n f.write(csvFile)\n\n return \"succeeded\"","repo_name":"parth-gondaliya/B-B-Serverless","sub_path":"src/cloudFunctions/visualizationModule/UserLogin.py","file_name":"UserLogin.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30417127818","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\"\"\"\n问题: https://leetcode-cn.com/problems/top-k-frequent-elements/submissions/\n\n桶排序或者堆排序都行\n\nAuthors: fanzijian\nDate: 2020-04-16 23:50:56\n\n\"\"\"\nclass Solution(object):\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n N = len(nums)\n nums_map = {}\n for n in nums:\n if n not in nums_map:\n nums_map[n] = 0\n nums_map[n] += 1\n\n bulket = [[] for i in range(N+1)]\n for n in nums_map:\n bulket[nums_map[n]].append(n)\n # print bulket\n rst = []\n for i in xrange(0, N+1, 1):\n idx = N - i\n # print idx\n rst.extend(bulket[idx])\n if len(rst) >= k:\n return rst[:k]\n return []\n","repo_name":"fanzijian/leet-code-practice","sub_path":"src/code/code_347.py","file_name":"code_347.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"33790000456","text":"\"\"\"\nPierre-Charles Dussault\nMarch 17, 2021.\n\nRoll 2 D6 dice, calulating the sum of their values. Display the frequencies of\neach resulting sum with matplotlib.\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom die import Die\n\n\ndef main():\n\n die_1 = Die()\n die_2 = Die()\n num_of_rolls = 50000\n max_result = die_1.num_sides + die_2.num_sides\n results = [die_1.roll() + die_2.roll() for each_roll in\n range(num_of_rolls)]\n\n # For each possible result\n x = [i for i in range(2, max_result+1)]\n # Calculate its frequency\n frequencies = [results.count(value) for value in x]\n\n fig, ax = plt.subplots()\n ax.bar(x, frequencies)\n ax.set_title('Frequency of Sum Results when Throwing Two D6 Dice '\n '50,000 Times')\n ax.set_xlabel('Result')\n ax.set_ylabel('Frequency')\n plt.savefig('d6_d6.png')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PC-DUSS/PCC_Exercises","sub_path":"Data_analysis/die/dice_mpl.py","file_name":"dice_mpl.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33353386409","text":"#Programa que leia 5 valores e coloque em ordem crescente sem usar sorted\r\n#Precisa informar a posição da lista que foi adicionado\r\nlista = [] # Começa com uma lista vazia para adicionar os 5 valores inputados pelo usuario\r\nfor c in range(0, 5): # Quero 5 valores, portando o range é de 0 a 5, pois o 5 não conta\r\n n = int(input('Digite um valor: ')) # Peço pro usuário digitar um valor a cada loop do for\r\n if c == 0 or n > lista[-1]: # Checo com um if a cada for, pra ver se adiciono o n no final da lista.\r\n # O primeiro valor que é digitado em c == 0 sempre irá pro final da lista, pois é o primeiro da lista.\r\n # Depois só será adicionado ao final caso o n for maior que o valor na posição -1, que é a ultima da lista\r\n lista.append(n)\r\n print('Valor adicionado ao final da lista...')\r\n else: # Caso o primeiro if não seja verdadeiro, preciso checar a posição que o n irá tomar\r\n pos = 0 # começo na posição 0\r\n while pos < len(lista): # E preciso ir até a posição 4 (percorrer de 0 a 4 dentro da lista)\r\n if n <= lista[pos]: # Vou checar primeiro na posição 0 e adicionar +1 na posição caso o if não seja\r\n # verdadeiro na posição anterior\r\n lista.insert(pos, n) #Quando encontrar a posição que o n deve ser inserido,\r\n # ele entra na posição pos e insere o valor n\r\n print(f'Valor adicionado na posição {pos}') # E me informa a posição que o valor n entrou\r\n break # Quando ele insere ele para o while e volta pro for para pedir outro valor n\r\n # e verificar a posição novamente\r\n pos += 1 # Quando o if não for verdadeiro, ou seja, a posição não conter um maior ou igual a n\r\n # Eu adiciono +1 para procurar na próxima posição e assim por diante até a posição 4\r\nprint('~' * 30)\r\nprint(f'Os valores digitados em ordem crescente foram {lista}')","repo_name":"pennapatrick/Mundo-03-Python","sub_path":"Exercicios/ex080.py","file_name":"ex080.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22339746311","text":"import json\nimport csv\nfrom pathlib import Path\n\nroot_path = Path(__file__).parent / \"..\\\\..\\\\..\\\\\"\n\nfrom pathlib import Path\n\ndef saveInCSV(resultArray):\n repositories_file = str(root_path) + '\\\\repositories.csv'\n input_in_path = Path(__file__).parent / repositories_file\n data_file = open(str(input_in_path), 'w', newline='', encoding='utf-8')\n csv_writer = csv.writer(data_file)\n count = 0\n for rep in resultArray:\n\n rep['totalPullRequests'] = None\n\n if count == 0:\n # Writing headers of CSV file\n header = rep.keys()\n csv_writer.writerow(header)\n count += 1\n \n rep['owner'] = rep['owner']['login']\n rep['mergedPullRequests'] = rep['mergedPullRequests']['totalCount']\n rep['closedPullRequests'] = rep['closedPullRequests']['totalCount']\n rep['totalPullRequests'] = str(int(rep['mergedPullRequests'])+int(rep['closedPullRequests']))\n\n # Writing data of CSV file\n csv_writer.writerow(rep.values())\n \n data_file.close()\n\ndef saveJsonResult(resultArray):\n repositories_file = str(root_path) + '\\\\repositories.json'\n input_in_path = Path(__file__).parent / repositories_file\n with open(str(input_in_path), 'w', encoding='utf-8') as f:\n json.dump(resultArray, f, ensure_ascii=False, indent=4)\n\n\ndef saveRepositoriesInCSV(resultArray):\n saveJsonResult(resultArray)\n saveInCSV(resultArray)\n","repo_name":"Lucas-Angelo/SoftwareExperimentationLab","sub_path":"Lab03/src/utils/csv/saveRepositoriesCSV.py","file_name":"saveRepositoriesCSV.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24200317157","text":"\n\n#just testing stuff here\n\nimport time\nfrom ppadb.client import Client as AdbClient\n\n'''\n\nsetCameraControlFocus(camera:ATEMConstant, focus:int) -> None\n Args:\n camera: see ATEMCameras\n focus (int): 0-65535\n \n \nsetCameraControlVideomode(camera:ATEMConstant, fps:int, resolution:int, interlaced:int)\nsetCameraControlWhiteBalance\n\n\n'''\n\nclient = AdbClient(host=\"127.0.0.1\", port=5037) # Default is \"127.0.0.1\" and 5037\ndevices = client.devices()\n\nif len(devices) == 0:\n print('No devices')\n quit()\n\ndevice = devices[0]\n\nprint(f'Connected to {device}')\n\ndevice.shell('input touchscreen tap 370 1150')\ntime.sleep(1)\n\ndevice.shell('input tap 27')","repo_name":"CoponatRecords/AtemMini","sub_path":"Dji.py","file_name":"Dji.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"25109381137","text":"#!/usr/bin/python\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\nANSIBLE_METADATA = {'status': ['preview'],\n 'supported_by': 'community',\n 'version': '1.0'}\n\nDOCUMENTATION = '''\n---\nmodule: cloudformation_facts\nshort_description: Obtain facts about an AWS CloudFormation stack\ndescription:\n - Gets information about an AWS CloudFormation stack\nrequirements:\n - boto3 >= 1.0.0\n - python >= 2.6\nversion_added: \"2.2\"\nauthor: Justin Menga (@jmenga)\noptions:\n stack_name:\n description:\n - The name or id of the CloudFormation stack\n required: true\n all_facts:\n description:\n - Get all stack information for the stack\n required: false\n default: false\n stack_events:\n description:\n - Get stack events for the stack\n required: false\n default: false\n stack_template:\n description:\n - Get stack template body for the stack\n required: false\n default: false\n stack_resources:\n description:\n - Get stack resources for the stack\n required: false\n default: false\n stack_policy:\n description:\n - Get stack policy for the stack\n required: false\n default: false\nextends_documentation_fragment:\n - aws\n - ec2\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Get summary information about a stack\n- cloudformation_facts:\n stack_name: my-cloudformation-stack\n\n# Facts are published in ansible_facts['cloudformation'][]\n- debug:\n msg: '{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}'\n\n# Get all stack information about a stack\n- cloudformation_facts:\n stack_name: my-cloudformation-stack\n all_facts: true\n\n# Get stack resource and stack policy information about a stack\n- cloudformation_facts:\n stack_name: my-cloudformation-stack\n stack_resources: true\n stack_policy: true\n\n# Example dictionary outputs for stack_outputs, stack_parameters and stack_resources:\n\"stack_outputs\": {\n \"ApplicationDatabaseName\": \"dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com\",\n ...\n},\n\"stack_parameters\": {\n \"DatabaseEngine\": \"mysql\",\n \"DatabasePassword\": \"****\",\n ...\n},\n\"stack_resources\": {\n \"AutoscalingGroup\": \"dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7\",\n \"AutoscalingSecurityGroup\": \"sg-abcd1234\",\n \"ApplicationDatabase\": \"dazvlpr01xj55a\",\n \"EcsTaskDefinition\": \"arn:aws:ecs:ap-southeast-2:123456789:task-definition/dev-someapp-EcsTaskDefinition-1F2VM9QB0I7K9:1\"\n ...\n}\n'''\n\nRETURN = '''\nstack_description:\n description: Summary facts about the stack\n returned: always\n type: dict\nstack_outputs:\n description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each output 'OutputValue' parameter\n returned: always\n type: dict\nstack_parameters:\n description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of each parameter 'ParameterValue' parameter\n returned: always\n type: dict\nstack_events:\n description: All stack events for the stack\n returned: only if all_facts or stack_events is true\n type: list of events\nstack_policy:\n description: Describes the stack policy for the stack\n returned: only if all_facts or stack_policy is true\n type: dict\nstack_template:\n description: Describes the stack template for the stack\n returned: only if all_facts or stack_template is true\n type: dict\nstack_resource_list:\n description: Describes stack resources for the stack\n returned: only if all_facts or stack_resourses is true\n type: list of resources\nstack_resources:\n description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each resource 'PhysicalResourceId' parameter\n returned: only if all_facts or stack_resourses is true\n type: dict\n'''\n\ntry:\n import boto3\n import botocore\n HAS_BOTO3 = True\nexcept ImportError:\n HAS_BOTO3 = False\n\nfrom ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec\nfrom ansible.module_utils.basic import AnsibleModule\nfrom functools import partial\nimport json\nimport traceback\n\nclass CloudFormationServiceManager:\n \"\"\"Handles CloudFormation Services\"\"\"\n\n def __init__(self, module):\n self.module = module\n\n try:\n region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)\n self.client = boto3_conn(module, conn_type='client',\n resource='cloudformation', region=region,\n endpoint=ec2_url, **aws_connect_kwargs)\n except botocore.exceptions.NoRegionError:\n self.module.fail_json(msg=\"Region must be specified as a parameter, in AWS_DEFAULT_REGION environment variable or in boto configuration file\")\n except Exception as e:\n self.module.fail_json(msg=\"Can't establish connection - \" + str(e), exception=traceback.format_exc(e))\n\n def describe_stack(self, stack_name):\n try:\n func = partial(self.client.describe_stacks,StackName=stack_name)\n response = self.paginated_response(func, 'Stacks')\n if response:\n return response[0]\n self.module.fail_json(msg=\"Error describing stack - an empty response was returned\")\n except Exception as e:\n self.module.fail_json(msg=\"Error describing stack - \" + str(e), exception=traceback.format_exc(e))\n\n def list_stack_resources(self, stack_name):\n try:\n func = partial(self.client.list_stack_resources,StackName=stack_name)\n return self.paginated_response(func, 'StackResourceSummaries')\n except Exception as e:\n self.module.fail_json(msg=\"Error listing stack resources - \" + str(e), exception=traceback.format_exc(e))\n\n def describe_stack_events(self, stack_name):\n try:\n func = partial(self.client.describe_stack_events,StackName=stack_name)\n return self.paginated_response(func, 'StackEvents')\n except Exception as e:\n self.module.fail_json(msg=\"Error describing stack events - \" + str(e), exception=traceback.format_exc(e))\n\n def get_stack_policy(self, stack_name):\n try:\n response = self.client.get_stack_policy(StackName=stack_name)\n stack_policy = response.get('StackPolicyBody')\n if stack_policy:\n return json.loads(stack_policy)\n return dict()\n except Exception as e:\n self.module.fail_json(msg=\"Error getting stack policy - \" + str(e), exception=traceback.format_exc(e))\n\n def get_template(self, stack_name):\n try:\n response = self.client.get_template(StackName=stack_name)\n return response.get('TemplateBody')\n except Exception as e:\n self.module.fail_json(msg=\"Error getting stack template - \" + str(e), exception=traceback.format_exc(e))\n\n def paginated_response(self, func, result_key, next_token=None):\n '''\n Returns expanded response for paginated operations.\n The 'result_key' is used to define the concatenated results that are combined from each paginated response.\n '''\n args=dict()\n if next_token:\n args['NextToken'] = next_token\n response = func(**args)\n result = response.get(result_key)\n next_token = response.get('NextToken')\n if not next_token:\n return result\n return result + self.paginated_response(func, result_key, next_token)\n\ndef to_dict(items, key, value):\n ''' Transforms a list of items to a Key/Value dictionary '''\n if items:\n return dict(zip([i[key] for i in items], [i[value] for i in items]))\n else:\n return dict()\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n stack_name=dict(required=True, type='str' ),\n all_facts=dict(required=False, default=False, type='bool'),\n stack_policy=dict(required=False, default=False, type='bool'),\n stack_events=dict(required=False, default=False, type='bool'),\n stack_resources=dict(required=False, default=False, type='bool'),\n stack_template=dict(required=False, default=False, type='bool'),\n ))\n\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)\n\n if not HAS_BOTO3:\n module.fail_json(msg='boto3 is required.')\n\n # Describe the stack\n service_mgr = CloudFormationServiceManager(module)\n stack_name = module.params.get('stack_name')\n result = {\n 'ansible_facts': { 'cloudformation': { stack_name:{} } }\n }\n facts = result['ansible_facts']['cloudformation'][stack_name]\n facts['stack_description'] = service_mgr.describe_stack(stack_name)\n\n # Create stack output and stack parameter dictionaries\n if facts['stack_description']:\n facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')\n facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), 'ParameterKey', 'ParameterValue')\n\n # normalize stack description API output\n facts['stack_description'] = camel_dict_to_snake_dict(facts['stack_description'])\n # camel2snake doesn't handle NotificationARNs properly, so let's fix that\n facts['stack_description']['notification_arns'] = facts['stack_description'].pop('notification_ar_ns', [])\n\n # Create optional stack outputs\n all_facts = module.params.get('all_facts')\n if all_facts or module.params.get('stack_resources'):\n facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)\n facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), 'LogicalResourceId', 'PhysicalResourceId')\n if all_facts or module.params.get('stack_template'):\n facts['stack_template'] = service_mgr.get_template(stack_name)\n if all_facts or module.params.get('stack_policy'):\n facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)\n if all_facts or module.params.get('stack_events'):\n facts['stack_events'] = service_mgr.describe_stack_events(stack_name)\n\n result['changed'] = False\n module.exit_json(**result)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n","repo_name":"ansible/ansible-modules-extras","sub_path":"cloud/amazon/cloudformation_facts.py","file_name":"cloudformation_facts.py","file_ext":"py","file_size_in_byte":11217,"program_lang":"python","lang":"en","doc_type":"code","stars":944,"dataset":"github-code","pt":"47"} +{"seq_id":"2511086562","text":"def pascals_triangle(n):\n if type(n) is not int:\n raise TypeError('You must input an int')\n # Creates an empty array and sets the value of the first index\n new_arr = [[1],]\n # loops through each index\n for index in range(n-1):\n # Center and printing happens here\n\t new_arr.append(next_row(new_arr[index]))\n# printing needs to happen here\n return new_arr\n\ndef next_row(arr):\n # creates an empty nested array\n\tnew_arr = [arr[0]]\n #\n\tfor index, num in enumerate(arr[:-1]):\n\n\t\tnew_arr.append(num+arr[index+1])\n\n\tnew_arr.append(arr[-1])\n\n\treturn new_arr\nprint(pascals_triangle(10))\n\n# -------------------------------------------------------------------------\n\n\n# def PascalTriangle(n):\n\n# triangle_row = [1]\n# y = [0]\n# for x in range(n):\n# print(triangle_row)\n# triangle_row=[left+right for left,right in zip(triangle_row+y, y+triangle_row)]\n# return n>=1\n\n# PascalTriangle(10)\n","repo_name":"Alex2Pena/data-structures-and-algorithms","sub_path":"Python/code_challenges/pascals_triangle/pascals_triangle.py","file_name":"pascals_triangle.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32878916288","text":"class Account:\n\n def __init__(self, owner, balance):\n\n self.owner = owner\n self.balance = balance\n\n def __str__(self):\n\n return f\"\\nAccount holder: {self.owner}\\nBalance: {self.balance}\\n\"\n\n def deposit(self, amount):\n\n self.balance += amount\n\n def withdrawal(self, amount):\n\n if(amount > self.balance):\n print(\"\\n\\tNot enough money\\n\\tCurrent balance: {}\\n\"\n .format(self.balance))\n else:\n self.balance -= amount\n\n\ndef another_operation():\n\n again = input(\n \"Another operation? [ y / any key to exit ]: \").upper().strip()\n\n return again == \"Y\"\n\n\ndef main():\n\n client = Account('Bruno', 200)\n\n print(\"=\" * 18)\n print(\"\\tBank v1\")\n print(\"=\" * 18)\n print(str(client))\n\n while True:\n\n print(\"\\nWhat do you want to do?\")\n print(\"[ 1 ] > Deposit \\n[ 2 ] > Withdrawals\\n[ any ] exit\")\n option = int(input(\"\\n> \").strip())\n\n if option == 1:\n client.deposit(int(input(\"Deposit amount: \")))\n elif option == 2:\n client.withdrawal(int(input(\"Withdrawal amount: \")))\n\n if not another_operation():\n break\n\n print(\"\\n\\tEND\")\n print(\"=\" * 18)\n print(str(client))\n print(\"=\" * 18)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"btrentini/python_explore","sub_path":"bank_account_test/bank_account.py","file_name":"bank_account.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11135562750","text":"\n############################## Store Item Demand Forecasting #############################\n\n############################## Libraries and Utilities ###################################\nimport time\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport lightgbm as lgb\nimport warnings\nimport plotly.express as px\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom lightgbm import LGBMRegressor\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, cross_validate,\\\n validation_curve, train_test_split\nfrom sklearn.model_selection import TimeSeriesSplit, GridSearchCV\nfrom sklearn.metrics import make_scorer\n\nfrom helpers.eda import *\nfrom helpers.data_prep import *\n\npd.set_option('display.max_columns', None)\npd.set_option('display.width', 500)\nwarnings.filterwarnings('ignore')\n\n######################### Loading the datas ########################\n\ntrain = pd.read_csv(\"datasets/demand_forecasting/train.csv\", parse_dates=[\"date\"])\ntest = pd.read_csv(\"datasets/demand_forecasting/test.csv\", parse_dates=[\"date\"])\nsample_sub = pd.read_csv(\"datasets/demand_forecasting/train.csv\", parse_dates=[\"date\"])\ndf = pd.concat([train, test], sort=False)\n\n##################### Exploratory Data Analysis ###########################\n\ncheck_df(train)\ncheck_df(test)\n\ntrain[\"date\"].min(), train[\"date\"].max(), test[\"date\"].min(), test[\"date\"].max()\n\n# How is the sales distribution?\ndf[[\"sales\"]].describe([0.10, 0.30, 0.50, 0.70, 0.80, 0.90, 0.95, 0.99]).T\n\n# How many unique stores?\ndf[\"store\"].nunique()\n\n# How many of each product have been sold?\ndf[\"item\"].value_counts()\n\n# Top 5 most expensive sales\na = df[\"sales\"].sort_values(ascending=False).head().index\nfor index in a:\n print(df[df.index == index])\n\n# Are there an equal number of unique items in each store??\ndf.groupby([\"store\"])[\"item\"].nunique()\n\n# Sales statistics in store breakdown\ndf.groupby(\"store\").agg({\"sales\": [\"sum\", \"mean\", \"median\", \"std\"]})\ndf.groupby(\"item\").agg({\"sales\": [\"sum\", \"mean\", \"median\", \"std\"]})\n\nsns.lineplot(x=\"date\",y=\"sales\", legend=\"full\",data=train)\nplt.show()\n\ndf.plot(x=\"date\", y=\"sales\", alpha=0.5)\nplt.show()\n\n################################### FEATURE ENGINEERING ##################################\n\n######################### Date Features ########################\n\ndef create_date_features(dataframe):\n dataframe['month'] = dataframe.date.dt.month\n dataframe['day_of_month'] = dataframe.date.dt.day\n dataframe['day_of_year'] = dataframe.date.dt.dayofyear\n dataframe['week_of_year'] = dataframe.date.dt.weekofyear\n dataframe['day_of_week'] = dataframe.date.dt.dayofweek\n dataframe['year'] = dataframe.date.dt.year\n dataframe[\"is_wknd\"] = dataframe.date.dt.weekday // 4\n dataframe['is_month_start'] = dataframe.date.dt.is_month_start.astype(int)\n dataframe['is_month_end'] = dataframe.date.dt.is_month_end.astype(int)\n return df\n\ndf = create_date_features(df)\n\ncheck_df(df)\n\ndf.groupby([\"store\", \"month\"]).agg({\"sales\": [\"sum\", \"mean\", \"median\", \"std\"]})\n\n######################### Random Noise ########################\n\ndef random_noise(dataframe):\n return np.random.normal(scale=1.6, size=(len(dataframe),))\n\na = np.random.normal(scale=1.6, size=(len(df)))\n\na = pd.DataFrame(a)\na.quantile([0, 0.1, 0.15, 0.25, 0.50, 0.75, 0.8, 0.84, 0.9, 0.95, 0.99, 1])\n\nsns.distplot(a)\nplt.show()\n\n######################### Lag/Shifted Features ########################\n\ndf.sort_values(by=['store', 'item', 'date'], axis=0, inplace=True)\n\ncheck_df(df)\n\npd.DataFrame({\"sales\": df[\"sales\"].values[0:10],\n \"lag1\": df[\"sales\"].shift(1).values[0:10],\n \"lag2\": df[\"sales\"].shift(2).values[0:10],\n \"lag3\": df[\"sales\"].shift(3).values[0:10],\n \"lag4\": df[\"sales\"].shift(4).values[0:10]})\n\n\ndef lag_features(dataframe, lags):\n for lag in lags:\n dataframe['sales_lag_' + str(lag)] = dataframe.groupby([\"store\", \"item\"])['sales'].transform(\n lambda x: x.shift(lag)) + random_noise(dataframe)\n return dataframe\n\n\ndf = lag_features(df, [91, 98, 105, 112, 119, 126, 182, 364, 546, 728])\n\ncheck_df(df)\n\n\n######################### Rolling Mean Features ########################\n\n\npd.DataFrame({\"sales\": df[\"sales\"].values[0:10],\n \"roll2\": df[\"sales\"].rolling(window=2).mean().values[0:10],\n \"roll3\": df[\"sales\"].rolling(window=3).mean().values[0:10],\n \"roll5\": df[\"sales\"].rolling(window=5).mean().values[0:10]})\n\n\npd.DataFrame({\"sales\": df[\"sales\"].values[0:10],\n \"roll2\": df[\"sales\"].shift(1).rolling(window=2).mean().values[0:10],\n \"roll3\": df[\"sales\"].shift(1).rolling(window=3).mean().values[0:10],\n \"roll5\": df[\"sales\"].shift(1).rolling(window=5).mean().values[0:10]})\n\n\ndef roll_mean_features(dataframe, windows):\n for window in windows:\n dataframe['sales_roll_mean_' + str(window)] = dataframe.groupby([\"store\", \"item\"])['sales']. \\\n transform(\n lambda x: x.shift(1).rolling(window=window, min_periods=10, win_type=\"triang\").mean()) + random_noise(\n dataframe)\n return dataframe\n\n\ndf = roll_mean_features(df, [365, 546])\ndf.tail()\n\n\n\n######################### Exponentially Weighted Mean Features ########################\n\npd.DataFrame({\"sales\": df[\"sales\"].values[0:10],\n \"roll2\": df[\"sales\"].shift(1).rolling(window=2).mean().values[0:10],\n \"ewm099\": df[\"sales\"].shift(1).ewm(alpha=0.99).mean().values[0:10],\n \"ewm095\": df[\"sales\"].shift(1).ewm(alpha=0.95).mean().values[0:10],\n \"ewm07\": df[\"sales\"].shift(1).ewm(alpha=0.7).mean().values[0:10],\n \"ewm01\": df[\"sales\"].shift(1).ewm(alpha=0.1).mean().values[0:10]})\n\n\ndef ewm_features(dataframe, alphas, lags):\n for alpha in alphas:\n for lag in lags:\n dataframe['sales_ewm_alpha_' + str(alpha).replace(\".\", \"\") + \"_lag_\" + str(lag)] = \\\n dataframe.groupby([\"store\", \"item\"])['sales'].transform(lambda x: x.shift(lag).ewm(alpha=alpha).mean())\n return dataframe\n\nalphas = [0.95, 0.9, 0.8, 0.7, 0.5]\nlags = [91, 98, 105, 112, 180, 270, 365, 546, 728]\n\ndf = ewm_features(df, alphas, lags)\n\ncheck_df(df)\n\n######################### One-Hot Encoding ########################\n\ndf = pd.get_dummies(df, columns=['store', 'item', 'day_of_week', 'month'])\n\n######################### Converting sales to log(1+sales) ########################\n\ndf['sales'] = np.log1p(df[\"sales\"].values)\ncheck_df(df)\n\n###################################################### Model #####################################################\n\n######################### Custom Cost Function ########################\n\n# MAE: mean absolute error\n# MAPE: mean absolute percentage error\n# SMAPE: Symmetric mean absolute percentage error (adjusted MAPE)\n\ndef smape(preds, target):\n n = len(preds)\n masked_arr = ~((preds == 0) & (target == 0))\n preds, target = preds[masked_arr], target[masked_arr]\n num = np.abs(preds - target)\n denom = np.abs(preds) + np.abs(target)\n smape_val = (200 * np.sum(num / denom)) / n\n return smape_val\n\ndef lgbm_smape(preds, train_data):\n labels = train_data.get_label()\n smape_val = smape(np.expm1(preds), np.expm1(labels))\n return 'SMAPE', smape_val, False\n\n######################### Time-Based Validation Sets ########################\n\n# Train set until the beginning of 2017 (until the end of 2016).\ntrain = df.loc[(df[\"date\"] < \"2017-01-01\"), :]\n\n# Validation set for the first three months of 2017\nval = df.loc[(df[\"date\"] >= \"2017-01-01\") & (df[\"date\"] < \"2017-04-01\"), :]\n\ncols = [col for col in train.columns if col not in ['date', 'id', \"sales\", \"year\"]]\n\nY_train = train['sales']\nX_train = train[cols]\n\nY_val = val['sales']\nX_val = val[cols]\n\nY_train.shape, Y_train.shape, Y_val.shape, X_val.shape\n\n################################ Model ################################\n\nlgb_model = LGBMRegressor(random_state=1).fit(X_train, Y_train)\n\n# Train Error\nprint(\"Train SMAPE:\", \"{:,.4f}\".format(smape(np.expm1(Y_train), np.expm1(lgb_model.predict(X_train)))), \"\\n\")\n\n# Test Error\nprint(\"Test SMAPE:\", \"{:,.4f}\".format(smape(np.expm1(Y_val), np.expm1(lgb_model.predict(X_val)))), \"\\n\")\n\n\ntscv = TimeSeriesSplit(n_splits=3)\nlgb_model = LGBMRegressor(random_state=1)\n\nlightgbm_params = {\"learning_rate\": [0.01, 0.001],\n \"n_estimators\": [100, 1500],\n \"colsample_bytree\": [0.8, 1],\n \"max_depth\":[10, 14]}\n\nrf_best_grid = GridSearchCV(lgb_model,\n lightgbm_params,\n cv=tscv,\n scoring=make_scorer(smape),\n n_jobs=-1,\n verbose=True).fit(X_train, Y_train)\n\nlgb_final = lgb_model.set_params(**rf_best_grid.best_params_,\n random_state=1).fit(X_train, Y_train)\n\nprint(\"Train SMAPE:\", \"{:,.4f}\".format(smape(np.expm1(Y_train), np.expm1(lgb_final.predict(X_train)))), \"\\n\")\nprint(\"Train SMAPE:\", \"{:,.4f}\".format(smape(np.expm1(Y_val), np.expm1(lgb_final.predict(X_val)))), \"\\n\")\n\ndef plot_importance(model, features, num=25):\n feature_imp = pd.DataFrame({'Value': model.feature_importances_, 'Feature': features.columns})\n plt.figure(figsize=(15, 15))\n sns.set(font_scale=1)\n sns.barplot(x=\"Value\", y=\"Feature\", data=feature_imp.sort_values(by=\"Value\",\n ascending=False)[0:num])\n plt.title('Features')\n plt.tight_layout()\n plt.show()\n\nplot_importance(lgb_final, X_train)\n\n################################# Analyzing Model Complexity with Learning Curves ################################\n\ndef val_curve_params(model, X, y, param_name, param_range, scoring=\"roc_auc\", cv=tscv):\n train_score, test_score = validation_curve(\n model, X=X, y=y, param_name=param_name, param_range=param_range, scoring=scoring, cv=cv)\n\n mean_train_score = smape(np.expm1(train))\n mean_test_score = smape(np.expm1(test))\n\n plt.plot(param_range, mean_train_score,\n label=\"Training Score\", color='b')\n\n plt.plot(param_range, mean_test_score,\n label=\"Validation Score\", color='g')\n\n plt.title(f\"Validation Curve for {type(model).__name__}\")\n plt.xlabel(f\"Number of {param_name}\")\n plt.ylabel(f\"{scoring}\")\n plt.tight_layout()\n plt.legend(loc='best')\n plt.show()\n\nlgb_model = LGBMRegressor(random_state=17)\n\n\nlightgbm_params = [[\"learning_rate\", [0.01, 0.001]],\n [\"n_estimators\", [100, 1500]],\n [\"colsample_bytree\", [0.8, 1]],\n [\"max_dept\", [10, 14]]]\n\nfor i in range(len(lightgbm_params)):\n val_curve_params(lgb_model, X_train, Y_train, lightgbm_params[i][0], lightgbm_params[i][1], \"neg_root_mean_squared_error\")\n\n######################### LightGBM Model ########################\n\n# LightGBM parameters\nlgb_params = {'metric': {'mae'},\n 'num_leaves': 10,\n 'learning_rate': 0.02,\n 'feature_fraction': 0.8,\n 'max_depth': 5,\n 'verbose': 0,\n 'num_boost_round': 1000,\n 'early_stopping_rounds': 200,\n 'nthread': -1}\n\n\n# metric mae: l1, absolute loss, mean_absolute_error, regression_l1\n# l2, square loss, mean_squared_error, mse, regression_l2, regression\n# rmse, root square loss, root_mean_squared_error, l2_root\n# mape, MAPE loss, mean_absolute_percentage_error\n\nlgbtrain = lgb.Dataset(data=X_train, label=Y_train, feature_name=cols)\nlgbval = lgb.Dataset(data=X_val, label=Y_val, reference=lgbtrain, feature_name=cols)\n\nmodel = lgb.train(lgb_params, lgbtrain,\n valid_sets=[lgbtrain, lgbval],\n num_boost_round=lgb_params['num_boost_round'],\n early_stopping_rounds=lgb_params['early_stopping_rounds'],\n feval=lgbm_smape,\n verbose_eval=100)\n\ny_pred_val = model.predict(X_val, num_iteration=model.best_iteration)\n\nsmape(np.expm1(y_pred_val), np.expm1(Y_val))\n\n\n######################### Feature Importance ########################\n\n\ndef plot_lgb_importances(model, plot=False, num=10):\n\n gain = model.feature_importance('gain')\n feat_imp = pd.DataFrame({'feature': model.feature_name(),\n 'split': model.feature_importance('split'),\n 'gain': 100 * gain / gain.sum()}).sort_values('gain', ascending=False)\n if plot:\n plt.figure(figsize=(10, 10))\n sns.set(font_scale=1)\n sns.barplot(x=\"gain\", y=\"feature\", data=feat_imp[0:25])\n plt.title('feature')\n plt.tight_layout()\n plt.show()\n else:\n print(feat_imp.head(num))\n\n\nplot_lgb_importances(model, num=30)\nplot_lgb_importances(model, num=30, plot=True)\n\n######################### Final Model ########################\n\ntrain = df.loc[~df.sales.isna()]\nY_train = train['sales']\nX_train = train[cols]\n\ntest = df.loc[df.sales.isna()]\nX_test = test[cols]\n\nlgb_params = {'metric': {'mae'},\n 'num_leaves': 10,\n 'learning_rate': 0.02,\n 'feature_fraction': 0.8,\n 'max_depth': 5,\n 'verbose': 0,\n 'nthread': -1,\n \"num_boost_round\": model.best_iteration}\n\n\n# LightGBM dataset\nlgbtrain_all = lgb.Dataset(data=X_train, label=Y_train, feature_name=cols)\n\nmodel = lgb.train(lgb_params, lgbtrain_all, num_boost_round=model.best_iteration)\n\ntest_preds = model.predict(X_test, num_iteration=model.best_iteration)\n\n\nsubmission_df = test.loc[:, ['id', 'sales']]\nsubmission_df['sales'] = np.expm1(test_preds)\nsubmission_df['id'] = submission_df.id.astype(int)\n\nsubmission_df.to_csv('submission_demand.csv', index=False)\nsubmission_df.head(20)","repo_name":"mehmettuzcu/store_sales_time_series_forecasting","sub_path":"store_ıtem_demand_forecasting.py","file_name":"store_ıtem_demand_forecasting.py","file_ext":"py","file_size_in_byte":13835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3570265995","text":"#!/usr/bin/env python3\n\nimport PySimpleGUI as sg\nfrom gar_modul import GarantexIo, CalculateFin_1\n\ngi = GarantexIo.GarantexIo()\n\ndef thistime():\n named_tuple = time.localtime() # получить struct_time\n time_string = time.strftime(\"%m/%d/%Y, %H:%M:%S\", named_tuple)\n return time_string\nss = 1\nlayout = [[sg.Output(size=(60,10))],\n [sg.Button('Go'), sg.Button('Pause')] ]\n\nwindow = sg.Window('Window Title', layout)\n\nwhile True: # Event Loop\n event, values = window.read(timeout=1000)\n if event == sg.WIN_CLOSED:\n exit()\n break\n\n if event == 'Pause':\n quit()\n \n print(thistime())\nwindow.close()\n","repo_name":"donhua/garantexPY","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"11046319374","text":"from app import app\nfrom flask import request, render_template, jsonify\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\nfrom skimage import io as skio\nfrom skimage.color import rgb2gray\nfrom skimage.transform import resize\n\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), 'mnist'))\nimport model\n\n#Restore Convolutional MNIST Model\nx = tf.placeholder(\"float\", [None, 784])\nsess = tf.Session()\n\nwith tf.variable_scope(\"convolutional\"):\n keep_prob = tf.placeholder(\"float\")\n h_conv1, h_pool1, h_conv2, h_pool2, y2, variables = model.convolutional(x, keep_prob)\n# y2, variables = model.convolutional(x, keep_prob)\nsaver = tf.train.Saver(variables)\nstored_model_location = os.path.dirname(__file__) + '/mnist/convolutional.ckpt'\nsaver.restore(sess, stored_model_location)\n\n#Run input through convolutional model\n\ndef convolutional_layer1(input):\n return sess.run(h_conv1, feed_dict={x: input, keep_prob: 1.0}).flatten().tolist()\n\ndef convolutional_layer1_pooled(input):\n return sess.run(h_pool1, feed_dict={x: input, keep_prob: 1.0}).flatten().tolist()\n\ndef convolutional_layer2(input):\n return sess.run(h_conv2, feed_dict={x: input, keep_prob: 1.0}).flatten().tolist()\n\ndef convolutional_layer2_pooled(input):\n return sess.run(h_pool2, feed_dict={x: input, keep_prob: 1.0}).flatten().tolist()\n\ndef convolutional_prediction(input):\n return sess.run(y2, feed_dict={x: input, keep_prob: 1.0}).flatten().tolist()\n\nALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'bmp'])\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.lower().rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n@app.route('/')\n\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n@app.route('/process_photo', methods=['POST'])\ndef process_photo():\n request.get_data()\n file = request.files['digitPhoto']\n if file and allowed_file(file.filename):\n # get image from bmp file\n im = Image.open(file)\n pixels = np.array(im)\n print(pixels)\n # upload pixels to tensorflow model and output a digit\n return render_template('report.html') # how to pass tensorflow results to this template?\n return render_template('upload-error.html') # need to do generic error handling here\n\n@app.route('/api/mnist', methods=['GET'])\ndef mnist():\n output = \"API Calls\\n\"\n output += \"Send bmp image file associated with 'image' key in POST request\\n\"\n output += \"Apply up to convolution layer 1: /api/mnist/layer1\\n\"\n output += \"Apply up to convolution layer 1 and pooling: /api/mnist/layer1pooled\\n\"\n output += \"Apply up to convolution layer 2: /api/mnist/layer2\\n\"\n output += \"Apply up to convolution layer 2 and pooling: /api/mnist/layer2pooled\\n\"\n output += \"Apply entire model: /api/mnist/prediction\"\n return output\n\n@app.route('/api/mnist/layer1', methods=['POST'])\ndef layer1():\n input = resize(rgb2gray(np.invert(skio.imread(request.files['image']))),(28,28)).reshape(1,784)\n output = convolutional_layer1(input)\n return jsonify(results=output)\n\n@app.route('/api/mnist/layer1pooled', methods=['POST'])\ndef layer1pooled():\n input = resize(rgb2gray(np.invert(skio.imread(request.files['image']))),(28,28)).reshape(1,784)\n output = convolutional_layer1_pooled(input)\n return jsonify(results=output)\n\n@app.route('/api/mnist/layer2', methods=['POST'])\ndef layer2():\n input = resize(rgb2gray(np.invert(skio.imread(request.files['image']))),(28,28)).reshape(1,784)\n output = convolutional_layer2(input)\n return jsonify(results=output)\n\n@app.route('/api/mnist/layer2pooled', methods=['POST'])\ndef layer2pooled():\n input = resize(rgb2gray(np.invert(skio.imread(request.files['image']))),(28,28)).reshape(1,784)\n output = convolutional_layer2_pooled(input)\n return jsonify(results=output)\n\n@app.route('/api/mnist/prediction', methods=['POST'])\ndef prediction():\n input = resize(rgb2gray(np.invert(skio.imread(request.files['image']))),(28,28)).reshape(1,784)\n output = convolutional_prediction(input)\n return jsonify(results=output)\n","repo_name":"rlaboulaye/mnist","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"3115650549","text":"import rootpy\nfrom rootpy.extern import argparse\n\nfrom .categories import CATEGORIES\nfrom .massregions import DEFAULT_LOW_MASS, DEFAULT_HIGH_MASS\nfrom .variables import VARIABLES\nfrom .regions import REGIONS\nfrom .defaults import FAKES_REGION, TARGET_REGION\n\n\nclass formatter_class(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawTextHelpFormatter):\n pass\n\n\ndef base_parser():\n return argparse.ArgumentParser(formatter_class=formatter_class)\n\n\ndef general_parser(parser=None, multi_years=False, multi_categories=False):\n if parser is None:\n parser = base_parser()\n parser.add_argument('--systematics', action='store_true', default=False,\n help=\"enable systematics\")\n\n if multi_years:\n parser.add_argument('--years', type=int, default=[2012], choices=(2011, 2012),\n nargs='*',\n help='years')\n else:\n parser.add_argument('--year', type=int, default=2012, choices=(2011, 2012),\n help='the year')\n\n if multi_categories:\n parser.add_argument('--categories', default=['mva'],\n choices=CATEGORIES.keys(),\n nargs='*',\n help='category definitions')\n else:\n parser.add_argument('--categories', default='mva',\n choices=CATEGORIES.keys(),\n help='category definitions')\n\n parser.add_argument('--category-names', nargs=\"+\", default=None,\n help='category names')\n parser.add_argument('--controls', default='mva_workspace_controls',\n help='control definitions')\n parser.add_argument('--unblind', action='store_true', default=False,\n help='plot the data in the signal region of the classifier output')\n parser.add_argument('--masses', default='125')\n parser.add_argument('--suffix', default=None, nargs='?',\n help='suffix to add to any output files or plots')\n parser.add_argument('--output-suffix', default=None, nargs='?',\n help='suffix to add to any output files or plots')\n parser.add_argument('--systematics-components', default=None,\n help='only include the following systematics in plots Example: '\n 'TES_TRUE_INSITU_UP,QCD_SHAPE_UP')\n return parser\n\n\ndef analysis_parser(parser=None):\n if parser is None:\n parser = base_parser()\n parser.add_argument('--random-mu', action='store_true', default=False,\n help='set mu (signal strength) to a random number')\n parser.add_argument('--mu', default=1., type=float,\n help='set mu (signal strength)')\n parser.add_argument('--no-embedding', action='store_false', default=True,\n dest='embedding',\n help='use ALPGEN Z->tau+tau instead of embedding')\n parser.add_argument('--fakes-region', choices=REGIONS.keys(),\n default=FAKES_REGION,\n help='fakes shape region')\n parser.add_argument('--target-region', choices=REGIONS.keys(),\n default=TARGET_REGION,\n help='target signal region')\n parser.add_argument('--constrain-norms',\n action='store_true', default=False)\n parser.add_argument('--decouple-qcd-shape',\n action='store_true', default=False)\n parser.add_argument('--no-qcd-shape-systematic',\n dest='qcd_shape_systematic',\n action='store_false', default=True)\n parser.add_argument('--no-ggf-weight',\n dest='ggf_weight',\n action='store_false', default=True)\n return parser\n\n\ndef mass_parser(parser=None):\n if parser is None:\n parser = base_parser()\n parser.add_argument('--low-mass-cut', type=int,\n default=DEFAULT_LOW_MASS,\n help='the low mass window cut. '\n 'Norms of Z and QCD are fit below this and '\n 'the signal region of the classifier output is above this')\n parser.add_argument('--high-mass-cut', type=int,\n default=DEFAULT_HIGH_MASS,\n help='the high mass window cut. '\n 'Norms of Z and QCD are fit above this and '\n 'the signal region of the classifier output is below this')\n parser.add_argument('--no-sideband-in-control',\n dest='high_sideband_in_control',\n action='store_false',\n default=True,\n help='Exclude the high mass sideband in the mass control and include '\n 'it in the signal region')\n return parser\n\n\ndef plotting_parser(parser=None):\n if parser is None:\n parser = base_parser()\n parser.add_argument('--plots', nargs='*',\n help='only draw these plots. see the keys in variables.py')\n parser.add_argument('--plot-cut', default=None, nargs='?',\n help='extra cut to be applied on the plots, but excluded from the '\n 'QCD/Z normaliation and training and classifier output')\n parser.add_argument('--plot-expr', default=None, nargs='?',\n help='expression to plot, instead of predefined ones in variables.py')\n parser.add_argument('--plot-name', default=None, nargs='?',\n help='name of expr')\n parser.add_argument('--plot-min', type=float, default=0, nargs='?',\n help='minimum of expr')\n parser.add_argument('--plot-max', type=float, default=1, nargs='?',\n help='maximum of expr')\n parser.add_argument('--plot-bins', type=int, default=20, nargs='?',\n help='number of bins to plot expr in')\n parser.add_argument('--no-weight', action='store_true', default=False,\n help='do not apply correction weights')\n parser.add_argument('--output-formats', default=['png'], nargs='+',\n choices=('png', 'eps', 'pdf'),\n help='output formats')\n parser.add_argument('--no-data', action='store_true', default=False,\n help='do not display data on the plot')\n parser.add_argument('--show-ratio', action='store_true', default=False,\n help='Draw plot with a ratio plot below the main plot')\n return parser\n\n\ndef get_parser(actions=True):\n parser = general_parser()\n analysis_parser(parser)\n mass_parser(parser)\n plotting_parser(parser)\n if actions:\n parser.add_argument('actions', nargs='*',\n choices=[\n 'stability',\n 'validate',\n 'weights',\n '2d',\n 'plotevolving',\n 'money',\n 'scatter',\n 'correlate',\n 'evaluate',\n 'workspace',\n 'ntup',\n 'ntuptruth',\n 'top10',\n 'overlap',\n 'cuts_notmva',\n 'massplot'],\n default=[],\n help='only perform these actions')\n return parser\n","repo_name":"htautau/hhana","sub_path":"mva/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":6913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"8061436194","text":"#by Anissa and Pratyusha \n# It's cleaner to put all the imports at the beginning of \n#the file \n\nimport json\nimport csv\n\n# Read superheroes.json (in this folder)\n\nwith open('superheroes.json', 'r') as f:\n\tsuperheroes = json.load(f)\n\t\n# print(superheroes)\n# Creates an empty array called powers\n# powers = []\n\n# Write a header to the CSV file\nwith open('superheroes.csv', 'w') as f:\n\twriter = csv.writer(f)\n\theaders = ['name', 'age', 'secretIdentity', \n\t\t'powers', 'squadName', 'homeTown', 'formed', \n\t\t'secretBase', 'active']\n\twriter.writerow(headers)\n\n\t# Loop thorough the members of the squad, \n\t#and append the powers of each to the powers array.\n\tmembers = superheroes['members']\n\tfor member in members: \n\n\t\t# Define variables \n\t\tname = member['name']\n\t\tage = member['age']\n\t\tsecret_identity = member['secretIdentity']\n\t\tpowers = member['powers']\n\t\tsquad_name = superheroes['squadName']\n\t\thome_town = superheroes['homeTown']\n\t\tformed = superheroes['formed']\n\t\tsecret_base = superheroes['secretBase']\n\t\tactive = superheroes['active']\n\n\t\t# Writes all of the headers in a table in a csv file\n\t\trow = [name, age, secret_identity, powers, squad_name,home_town,formed,secret_base,active]\n\t\twriter.writerow(row)\n","repo_name":"aabdeljelil/python-playground","sub_path":"superheroes.py","file_name":"superheroes.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29702034277","text":"import functools\nfrom copy import copy\n\nimport torch\nfrom fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\nfrom fairscale.optim.oss import OSS\nfrom torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\ntry:\n from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy as auto_wrap_policy\nexcept ImportError:\n from torch.distributed.fsdp.wrap import default_auto_wrap_policy as auto_wrap_policy\n\nfrom atorch.auto.opt_lib.optimization import Optimization\nfrom atorch.auto.opt_lib.utils import find_modules, to_module_class_by_name\nfrom atorch.distributed.distributed import local_rank, parallel_group, parallel_group_size\nfrom atorch.modules.distributed_modules.materialize_modules import materialize_modules_to_device\nfrom atorch.utils.meta_model_utils import is_meta\nfrom atorch.utils.version import torch_version\n\n\nclass Zero1Optimization(Optimization):\n def __init__(self):\n super().__init__(name=\"zero1\", group=\"zero\", is_tunable=False, is_distributed=True)\n\n def tune(self, model_context, config=None, strategy=None, apply_transform=True, time_limit=None):\n if apply_transform:\n model_context = self.transform(model_context, config)\n return True, config, model_context\n\n def transform(self, model_context, config=None):\n \"\"\"Transform optimizer use Fairscale Zero1\n Args:\n model_context: ModelContext instance\n config(dict): config for Fairscale Zero1\n Returns:\n transformed ModelContext instance\n \"\"\"\n # skip zero1 optimization when user did not pass optim_func\n if model_context.optim_func is None:\n return model_context\n new_optim_args = {}\n new_optim_args[\"optim\"] = model_context.optim_func\n new_optim_args[\"group\"] = parallel_group(\"data\")\n new_optim_args.update(model_context.optim_args)\n\n model_context.optim_func = OSS\n model_context.optim_args = new_optim_args\n\n return model_context\n\n\nclass Zero2Optimization(Optimization):\n def __init__(self):\n super().__init__(name=\"zero2\", group=\"zero\", is_tunable=False, is_distributed=True)\n\n def tune(self, model_context, config=None, strategy=None, apply_transform=True, time_limit=None):\n if apply_transform:\n model_context = self.transform(model_context, config)\n return True, config, model_context\n\n def transform(self, model_context, config=None):\n \"\"\"Transform optimizer use Fairscale Zero2\n Args:\n model_context: ModelContext instance\n config(dict): config for Fairscale Zero2\n Returns:\n transformed ModelContext instance\n \"\"\"\n # skip zero2 optimization when user did not pass optim_func\n if model_context.optim_func is None:\n return model_context\n config = copy(config) or {}\n not_use_fsdp = config.pop(\"not_use_fsdp\", False)\n if not_use_fsdp or torch_version() < (1, 12, 0) or not torch.cuda.is_available():\n # use fairscale zero2 with OSS\n new_optim_args = {}\n new_optim_args[\"optim\"] = model_context.optim_func\n new_optim_args[\"group\"] = parallel_group(\"zero\") or parallel_group(\"data\")\n new_optim_args.update(model_context.optim_args)\n\n model_context.optim_func = OSS\n model_context.optim_args = new_optim_args\n model_context.add_wrapper(\n \"zero2\",\n Zero2Optimization.apply_wrapper,\n wrapper_config=config,\n is_pre_wrapper=False,\n )\n else:\n # use fsdp zero2\n from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy\n\n config[\"sharding_strategy\"] = ShardingStrategy.SHARD_GRAD_OP\n model_context.add_wrapper(\n \"fsdp\",\n FSDPOptimization.apply_wrapper,\n wrapper_config=config,\n is_pre_wrapper=True,\n )\n\n return model_context\n\n @staticmethod\n def apply_wrapper(model_context, wrapper_name, wrapper_config=None):\n \"\"\"Zero2 must be used after optimizer is created, it's a post wrapper\"\"\"\n if torch.cuda.is_available():\n torch.cuda.set_device(local_rank())\n device = torch.device(type=\"cuda\", index=local_rank())\n model_context.model.to(device)\n wrapper_config = wrapper_config or {}\n mixed_with_ddp = parallel_group_size(\"data\") and parallel_group_size(\"data\") > 1 and parallel_group(\"zero\")\n\n if torch_version() < (1, 12, 0) and mixed_with_ddp:\n raise ValueError(\"Zero + ddp only support pytorch 1.12.0 or later.\")\n model_context.model = ShardedDDP(\n model_context.model,\n model_context.optim,\n parallel_group(\"zero\") or parallel_group(\"data\"),\n **wrapper_config,\n )\n\n return model_context\n\n\nclass FSDPOptimization(Optimization):\n def __init__(self):\n super().__init__(name=\"fsdp\", group=\"zero\", is_tunable=False, is_distributed=True)\n\n def distributed_only(self, config=None):\n # cpu offload can be used in non-distributed mode\n if config is not None and config.get(\"cpu_offload\", False) is True:\n return False\n return True\n\n def tune(self, model_context, config=None, strategy=None, apply_transform=True, time_limit=None):\n if apply_transform:\n model_context = self.transform(model_context, config)\n return True, config, model_context\n\n def transform(self, model_context, config=None):\n \"\"\"Transform use FSDP\n Args:\n model_context: ModelContext instance\n config(dict): FSDP parameters and optional atorch specific configs below.\n atorch_wrap_cls:\n tuple/list of module classes to wrap with fsdp.\n atorch_ignored_cls:\n tuple of module classes, modules whose are instances of these classes would be ignored.\n atorch_size_based_min_num_params (default 1e5):\n if atorch_wrap_cls not exist, use size_based_auto_wrap_policy with this min_num_params.\n Returns:\n transformed ModelContext instance\n \"\"\"\n if not torch.cuda.is_available():\n raise ValueError(\"FSDP only support GPU !\")\n model_context.add_wrapper(\n \"fsdp\", FSDPOptimization.apply_wrapper, wrapper_config=copy(config), is_pre_wrapper=True\n )\n\n return model_context\n\n @staticmethod\n def apply_wrapper(model_context, wrapper_name, wrapper_config=None):\n \"\"\"FSDP must be created before optimizer is created, it's a pre wrapper\"\"\"\n torch.cuda.set_device(local_rank())\n wrapper_config = wrapper_config or {}\n # atorch_wrap_cls or atorch_size_based_min_num_params\n if \"atorch_wrap_cls\" in wrapper_config and torch_version() >= (1, 12, 1):\n wrap_cls = wrapper_config[\"atorch_wrap_cls\"]\n del wrapper_config[\"atorch_wrap_cls\"]\n # atorch_wrap_cls may contain string for module name, convert to module class.\n wrap_cls = to_module_class_by_name(model_context.model, wrap_cls)\n from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy\n\n wrap_policy = functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=set(wrap_cls))\n wrapper_config[\"auto_wrap_policy\"] = wrap_policy\n else:\n policy_param_name = \"auto_wrap_policy\" if torch_version() >= (1, 12, 0) else \"fsdp_auto_wrap_policy\"\n if \"atorch_size_based_min_num_params\" in wrapper_config:\n min_num_params = wrapper_config[\"atorch_size_based_min_num_params\"]\n del wrapper_config[\"atorch_size_based_min_num_params\"]\n else:\n min_num_params = 1e5\n wrapper_config[policy_param_name] = functools.partial(auto_wrap_policy, min_num_params=min_num_params)\n\n if torch_version() >= (1, 12, 0):\n # ignore embedding\n if \"atorch_ignored_cls\" in wrapper_config:\n ignored_cls = wrapper_config[\"atorch_ignored_cls\"]\n del wrapper_config[\"atorch_ignored_cls\"]\n ignored_cls = to_module_class_by_name(model_context.model, ignored_cls)\n ignore_modules = find_modules(model_context.model, ignored_cls)\n if ignore_modules:\n wrapper_config[\"ignored_modules\"] = set(ignore_modules)\n # default to use \"backward_prefetch\"\n if \"backward_prefetch\" not in wrapper_config:\n from torch.distributed.fsdp import BackwardPrefetch\n\n wrapper_config[\"backward_prefetch\"] = BackwardPrefetch.BACKWARD_PRE\n cpu_offload = wrapper_config.get(\"cpu_offload\", False)\n if not cpu_offload:\n # Initialize modules' params on gpu and set device id\n # support meta\n if is_meta(model_context.model):\n wrapper_config[\"param_init_fn\"] = functools.partial(\n materialize_modules_to_device, device=local_rank()\n )\n # set device to gpu\n wrapper_config.setdefault(\"device_id\", local_rank())\n else:\n if \"sync_module_states\" in wrapper_config:\n # cpu_offload do not support sync_module_states\n wrapper_config.pop(\"sync_module_states\")\n from torch.distributed.fsdp import CPUOffload\n\n wrapper_config[\"cpu_offload\"] = CPUOffload(offload_params=True)\n\n fsdp_clz = FSDP\n pg = parallel_group(\"zero\") or parallel_group(\"data\")\n extra_config = {}\n hybrid_with_ddp = (\n parallel_group_size(\"zero\")\n and parallel_group_size(\"data\")\n and parallel_group_size(\"zero\") > 1\n and parallel_group_size(\"data\") > 1\n )\n if hybrid_with_ddp:\n if torch_version() == (1, 12, 1):\n from atorch.data_parallel.zero_ddp_mix_112 import FSDPWithDDP\n\n fsdp_clz = FSDPWithDDP\n elif torch_version() < (2, 0):\n raise ValueError(f\"Pytorch version {torch_version()} does not support FSDP + DDP hybrid sharding.\")\n else:\n # pytorch version >= (2, 0), use fsdp HYBRID_SHARD\n from torch.distributed.fsdp.api import ShardingStrategy\n\n extra_config[\"sharding_strategy\"] = ShardingStrategy.HYBRID_SHARD\n pg = (parallel_group(\"zero\"), parallel_group(\"data\"))\n\n if wrapper_config.pop(\"parse_fsdp\", None) is not None:\n cb = wrapper_config.pop(\"parse_cb\", None)\n from atorch.utils.parse_fsdp_mapping import ParseFSDP\n\n fsdp_clz = ParseFSDP(fsdp_clz, fsdp_clz is FSDP, cb)\n\n wrapper_config.update(extra_config)\n model_context.model = fsdp_clz(\n model_context.model,\n pg,\n **wrapper_config,\n )\n if torch_version() < (1, 12, 0):\n model_context.model.to(local_rank())\n return model_context\n","repo_name":"SylviaSyp/test","sub_path":"atorch/atorch/auto/opt_lib/zero_optimization.py","file_name":"zero_optimization.py","file_ext":"py","file_size_in_byte":11317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10110376846","text":"# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-\n\nVERSION = '0.6.1'\nAPPNAME = 'ndn-tools'\nGIT_TAG_PREFIX = 'ndn-tools-'\n\nfrom waflib import Utils, Context\nimport os, subprocess\n\ndef options(opt):\n opt.load(['compiler_cxx', 'gnu_dirs'])\n opt.load(['default-compiler-flags', 'coverage', 'sanitizers', 'boost',\n 'sphinx_build'],\n tooldir=['.waf-tools'])\n\n opt.add_option('--with-tests', action='store_true', default=False,\n dest='with_tests', help='''Build unit tests''')\n\n opt.recurse('tools')\n\ndef configure(conf):\n conf.load(['compiler_cxx', 'gnu_dirs',\n 'default-compiler-flags', 'sphinx_build', 'boost'])\n\n if 'PKG_CONFIG_PATH' not in os.environ:\n os.environ['PKG_CONFIG_PATH'] = Utils.subst_vars('${LIBDIR}/pkgconfig', conf.env)\n conf.check_cfg(package='libndn-cxx', args=['--cflags', '--libs'],\n uselib_store='NDN_CXX', mandatory=True)\n\n boost_libs = 'system filesystem program_options regex thread log log_setup'\n if conf.options.with_tests:\n conf.env['WITH_TESTS'] = True\n conf.define('WITH_TESTS', 1)\n boost_libs += ' unit_test_framework'\n conf.check_boost(lib=boost_libs, mt=True)\n\n conf.recurse('tools')\n\n conf.check_compiler_flags()\n\n # Loading \"late\" to prevent tests from being compiled with profiling flags\n conf.load('coverage')\n conf.load('sanitizers')\n\n conf.msg('Tools to build', ', '.join(conf.env['BUILD_TOOLS']))\n\ndef build(bld):\n version(bld)\n\n bld(features='subst',\n source='core/version.cpp.in',\n target='core/version.cpp',\n name='version.cpp',\n VERSION_BUILD=VERSION)\n\n bld.objects(target='core-objects',\n source=bld.path.ant_glob(['core/*.cpp']) + ['core/version.cpp'],\n use='NDN_CXX BOOST',\n includes='.',\n export_includes='.')\n\n bld.recurse('tools')\n bld.recurse('tests')\n bld.recurse('manpages')\n\ndef version(ctx):\n # don't execute more than once\n if getattr(Context.g_module, 'VERSION_BASE', None):\n return\n\n Context.g_module.VERSION_BASE = Context.g_module.VERSION\n Context.g_module.VERSION_SPLIT = VERSION_BASE.split('.')\n\n # first, try to get a version string from git\n gotVersionFromGit = False\n try:\n cmd = ['git', 'describe', '--always', '--match', '%s*' % GIT_TAG_PREFIX]\n out = subprocess.check_output(cmd, universal_newlines=True).strip()\n if out:\n gotVersionFromGit = True\n if out.startswith(GIT_TAG_PREFIX):\n Context.g_module.VERSION = out.lstrip(GIT_TAG_PREFIX)\n else:\n # no tags matched\n Context.g_module.VERSION = '%s-commit-%s' % (VERSION_BASE, out)\n except (OSError, subprocess.CalledProcessError):\n pass\n\n versionFile = ctx.path.find_node('VERSION')\n if not gotVersionFromGit and versionFile is not None:\n try:\n Context.g_module.VERSION = versionFile.read()\n return\n except EnvironmentError:\n pass\n\n # version was obtained from git, update VERSION file if necessary\n if versionFile is not None:\n try:\n if versionFile.read() == Context.g_module.VERSION:\n # already up-to-date\n return\n except EnvironmentError as e:\n Logs.warn('%s exists but is not readable (%s)' % (versionFile, e.strerror))\n else:\n versionFile = ctx.path.make_node('VERSION')\n\n try:\n versionFile.write(Context.g_module.VERSION)\n except EnvironmentError as e:\n Logs.warn('%s is not writable (%s)' % (versionFile, e.strerror))\n\ndef dist(ctx):\n version(ctx)\n\ndef distcheck(ctx):\n version(ctx)\n","repo_name":"andredxc/ICNInstallation","sub_path":"mini-ndn/ndn-tools/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"35118093803","text":"import sys\n\n\nN, K = map(int, sys.stdin.readline().split())\nnumbers = [i for i in range(1, N + 1)]\nanswer = \"<\"\n\nwhile len(numbers) > 0:\n cnt = K - 1\n while cnt > 0:\n pop_number = numbers.pop(0)\n numbers.append(pop_number)\n cnt -= 1\n answer += str(numbers.pop(0)) + \", \"\nanswer = answer.rstrip(\", \")\nanswer += \">\"\nsys.stdout.write(answer)\n\n# 참고\n# https://st-lab.tistory.com/197\n","repo_name":"Gyusik-Choi/algorithm","sub_path":"baekjoon/11866_요세푸스 문제 0/B_11866_2.py","file_name":"B_11866_2.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18696286569","text":"# Created by Raju Kumar Mishra \n# Book PySpark Recipes\n# Chapter 9\n# Recipe 9-2. Create a Sparse Vector.\n# Run following PySpark code lines, line by line in PySpark shell\n\nfrom pyspark.mllib.linalg import SparseVector\nsparseDataList = [1.0,3.2]\nsparseDataVector = SparseVector(8,[0,7],sparseDataList)\nsparseDataVector\nsparseDataVector[1]\nsparseDataVector[7]\nsparseDataVector.numNonzeros()\nsparseDataList1 = [3.0,1.4,2.5,1.2]\nsparseDataVector1 = SparseVector(8,[0,3,4,6],sparseDataList1)\nsquaredDistance = sparseDataVector.squared_distance(sparseDataVector1)\nsquaredDistance\n","repo_name":"Apress/pyspark-recipes","sub_path":"code_mishra/chapter9/runLineByLine/recipe9-2.py","file_name":"recipe9-2.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"47"} +{"seq_id":"1847716355","text":"import datetime\n\nimport jpholiday\nimport workdays\n\n# 2019/01/01 ~ 2021/12/31の祝日取得\nholidays = [\n holiday_info[0]\n for holiday_info in jpholiday.between(\n start_date=datetime.datetime(2019, 1, 1),\n end_date=datetime.datetime(2021, 12, 31),\n )\n]\nprint(\"---------- 祝日 ----------\\n\")\n# 取得した祝日を表示するよ\nfor holiday in holidays:\n print(holiday)\n\n# 2020/11/24から2営業日前、祝日はさっき取ったやつ使うよ\nprint(\"---------- 2020/11/24から2営業日前 ----------\")\nstart_date = datetime.datetime(2020, 11, 24)\nprint(workdays.workday(start_date, days=-2, holidays=holidays))\n","repo_name":"nanato12/practice","sub_path":"python/workspace/workdate/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"3016957201","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'feedback'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('result', views.result, name='result'),\n path('assessments', views.assessments, name='assessments'),\n path('assessments/', views.assessment_detail, name='assessment_detail')\n]","repo_name":"Jcuperus/code_assessment","sub_path":"feedback_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30389929224","text":"from pathlib import Path\n\nimport grpc\n\nfrom takasho.packer import packer\nfrom takasho.schema.common_featureset.player_api import loot_box_pb2\nfrom takasho.schema.common_featureset.player_api import loot_box_pb2_grpc\n\n\nclass LootBoxV3(loot_box_pb2_grpc.LootBoxV3Servicer):\n \n def GetAvailableV1(self, request, context):\n response = loot_box_pb2.LootBoxV3GetAvailableV1.Response()\n if request.exclude_pickup_response:\n if not request.page_token:\n p = Path(__file__).with_name('loot_box.hex')\n else:\n p = Path(__file__).with_name('loot_box.' + request.page_token + '.hex')\n with p.open() as f:\n response = loot_box_pb2.LootBoxV3GetAvailableV1.Response. \\\n FromString(bytes.fromhex(f.read()))\n return response\n\n\ndef add_LootBoxV3Servicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'GetAvailableV1': grpc.unary_unary_rpc_method_handler(\n servicer.GetAvailableV1,\n request_deserializer=lambda x: loot_box_pb2.LootBoxV3GetAvailableV1.Request.FromString(packer.unpack(x)),\n response_serializer=lambda x: packer.pack(loot_box_pb2.LootBoxV3GetAvailableV1.Response.SerializeToString(x)),\n ),\n 'PurchaseV1': grpc.unary_unary_rpc_method_handler(\n servicer.PurchaseV1,\n request_deserializer=lambda x: loot_box_pb2.LootBoxV3PurchaseV1.Request.FromString(packer.unpack(x)),\n response_serializer=lambda x: packer.pack(loot_box_pb2.LootBoxV3PurchaseV1.Response.SerializeToString(x)),\n ),\n 'GetProbabilityV1': grpc.unary_unary_rpc_method_handler(\n servicer.GetProbabilityV1,\n request_deserializer=lambda x: loot_box_pb2.LootBoxV3GetProbabilityV1.Request.FromString(packer.unpack(x)),\n response_serializer=lambda x: packer.pack(loot_box_pb2.LootBoxV3GetProbabilityV1.Response.SerializeToString(x)),\n ),\n 'GetDetailV1': grpc.unary_unary_rpc_method_handler(\n servicer.GetDetailV1,\n request_deserializer=lambda x: loot_box_pb2.LootBoxV3GetDetailV1.Request.FromString(packer.unpack(x)),\n response_serializer=lambda x: packer.pack(loot_box_pb2.LootBoxV3GetDetailV1.Response.SerializeToString(x)),\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'takasho.schema.common_featureset.player_api.LootBoxV3', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n","repo_name":"RainbowUnicorn7297/dankagu-local","sub_path":"takasho/schema/common_featureset/player_api/loot_box.py","file_name":"loot_box.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"47"} +{"seq_id":"42205407133","text":"import hashlib\n\nfrom datetime import datetime\n\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib.auth.hashers import make_password\n\nfrom financeiro.models import Registro\n\n\nIGNORAR_STATUS = ['Anotacao De Credito']\n\n\ndef processa_linha(cnt, linha):\n \"\"\"\n extrai os dados do extrato da linha passada\n \"\"\"\n possui_data_e_num_doc = False\n\n ano=mes=dia=num_doc=historico=origem=_txt=saldo_no_dia = None\n\n retorno = \"\"\n\n try:\n ano, mes, dia = int(linha[6:10]), int(linha[3:5]), int(linha[0:2])\n data = datetime(ano, mes, dia).strftime('%Y-%m-%d')\n num_doc = int(linha[60:71].replace('.', ''))\n possui_data_e_num_doc = True\n except ValueError:\n pass\n\n if(possui_data_e_num_doc):\n historico = linha[20:48].strip()\n if(historico not in IGNORAR_STATUS):\n origem = linha[50:56].strip()\n num_doc = int(linha[60:71].replace('.', ''))\n valor = float(linha[73:87].replace(',', '#').replace('.', '') \\\n .replace('#', '.'))\n saldo_no_dia = float(linha[89:].replace(',', '#').replace('.', '') \\\n .replace('#', '.'))\n _txt = data+historico+origem+str(num_doc)+str(valor)+str(saldo_no_dia)\n # retorno = f\"data: {data} | hist: {historico} | orig: {origem} | num: {num_doc} | vlr: {valor} | sld: {saldo_no_dia}\"\n retorno = f\"{dia}.{mes}.{ano} {historico:28} {origem: >6} {num_doc: >11} {valor: >14}\"\n\n m = Registro()\n m.posicao=data\n m.historico=historico\n m.origem=origem\n m.numero_documento=num_doc\n m.valor=valor\n m.saldo_no_dia=saldo_no_dia\n m.hash = hashlib.md5(_txt.encode()).hexdigest()\n\n if(Registro.objects.filter(hash=m.hash)):\n retorno = '[ ] ' + retorno\n else:\n retorno = '[ok] ' + retorno\n m.save()\n\n return retorno\n\ndef upload(request):\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n\n results = []\n with open(fs.path(filename)) as arq:\n for cnt, line in enumerate(arq):\n r = processa_linha(cnt, line)\n if r != None:\n results.append(r)\n\n return render(request, 'financeiro/upload_receipts.html', {\n 'uploaded_file_url': uploaded_file_url,\n 'results': results\n })\n return render(request, 'financeiro/upload_receipts.html')\n","repo_name":"alduxx/csa","sub_path":"financeiro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72764076944","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 8 14:39:25 2022\r\n\r\nImplementation of IEEE C37.118 (2018) Annex D standard PMU algorithms\r\nBuilt to interface with \"pmuClass\"\r\n\r\n- takes inputs of sampling rate (fs), nominal freq (f0), report rate (RR), and \r\nclass (p or m)\r\n- All filters described in Annex D are implemented (f0=[50,60] and all combinations of RR)\r\n\r\n@author: Dylan Tarter\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport math\r\nfrom scipy import signal\r\nimport matplotlib.pyplot as plt\r\n\r\nclass ieeeClass:\r\n def __init__(self, fs, f0, RR, tclass, name = 'None'):\r\n self._fs = fs\r\n self._f0 = f0\r\n self._RR = RR\r\n self._tclass = tclass.lower() #force to lower case for later checks\r\n \r\n if(name == 'None'):\r\n self._name = 'IEEE '+self.tclass.upper()+'-Class'\r\n else:\r\n self._name = name\r\n \r\n \"\"\"Build the filters before-hand to reduce computations on run\"\"\"\r\n if(tclass == 'p'):\r\n \"\"\"From Annex D.6, equation D.5\"\"\"\r\n N = (fs/f0 - 1) * 2\r\n k = np.arange(-N/2,N/2+1)\r\n w = 1-2/(N+2)*abs(k)\r\n elif(self._tclass == 'm'):\r\n \"\"\"From Annex D.7, Table D.1\"\"\"\r\n if(self._f0 == 50):\r\n if(self._RR == 10):\r\n Ffr, N = 1.779, 806\r\n elif(self._RR == 25):\r\n Ffr, N = 4.355, 338\r\n elif(self._RR == 50):\r\n Ffr, N = 7.75, 142\r\n elif(self._RR == 100):\r\n Ffr, N = 14.1, 66\r\n else:\r\n print('[PMU/IEEE] M Class filter at 50Hz nominal only supports reports at (10,25,50,100)') \r\n elif(self._f0 == 60):\r\n if(self._RR == 10):\r\n Ffr, N = 1.78, 968\r\n elif(self._RR == 12):\r\n Ffr, N = 2.125, 816\r\n elif(self._RR == 15):\r\n Ffr, N = 2.64, 662\r\n elif(self._RR == 20):\r\n Ffr, N = 3.5, 502\r\n elif(self._RR == 30):\r\n Ffr, N = 5.02, 306\r\n elif(self._RR == 60):\r\n Ffr, N = 8.19, 164\r\n elif(self._RR == 120):\r\n Ffr, N = 16.25, 70 \r\n else:\r\n print('[PMU/IEEE] M Class filter at 60Hz nominal only supports reports at (10,12,15,20,30,60,120)') \r\n else:\r\n print('[PMU/IEEE] M Class filter only supports nominal frequencies of 60 or 50Hz')\r\n \r\n \"\"\"From Annex D.7, equation D.7\"\"\"\r\n k = np.arange(-N/2,N/2+1)\r\n h = np.hamming(N+1) #hamming filter of order N+1\r\n w = np.sin(2*math.pi*(2*Ffr)/self._fs*k) / (2*math.pi*(2*Ffr)/self._fs*k) * h\r\n #w = np.sinc(2*math.pi*(2*Ffr)/self._fs*k) * h\r\n w[int(N/2)] = 1\r\n else:\r\n print('[PMU/IEEE] Filter class not recognized. Try p or m.')\r\n \r\n \"\"\"From D.2, Equation D.1 and D.2. Just coefficients, no summation accross x\"\"\"\r\n w0 = 2*math.pi*f0\r\n G = sum(w) # D.2\r\n E = np.exp(-1j*k/fs*w0) # DFT\r\n # filter coefficients to be used in the summation\r\n self._filter = math.sqrt(2)/G * np.exp(1j*N/2/fs*w0) * w * np.flip(E)\r\n \r\n def __name__(self):\r\n return self._name\r\n \r\n @property\r\n def fs(self):\r\n \"\"\"Return the pmu's assumed sampling rate.\"\"\"\r\n return self._fs\r\n @property\r\n def f0(self):\r\n \"\"\"Return the pmu's assumed nominal frequency.\"\"\"\r\n return self._f0\r\n @property\r\n def RR(self):\r\n \"\"\"Return the pmu's report rate.\"\"\"\r\n return self._RR\r\n @property\r\n def tclass(self):\r\n \"\"\"Return the pmu's class type (p or m).\"\"\"\r\n return self._tclass \r\n \r\n @property\r\n def plotFilter(self):\r\n \r\n \"\"\"A debugger function to plot the filter's magnitude and angle.\"\"\"\r\n ax1 = plt.subplot()\r\n l1, = ax1.plot(abs(self._filter), color='red')\r\n ax2 = ax1.twinx()\r\n l2, = ax2.plot(np.angle(self._filter), color='orange')\r\n plt.legend([l1, l2], [\"Filter Magnitude\", \"Filter Angle/Phase\"])\r\n plt.show()\r\n \r\n def run(self, t, x):\r\n \"\"\"\r\n Called to estimate phasors of timestamps t and measurements x.\r\n Will output timestamps, phasors, frequency, and ROCOF at the report rate\r\n \r\n If x is 1D, it will output just a phasor, with NaN for frequency and ROCOF\r\n If x has 3 measurements it will assume 3 phase, and give 3 phasors + the positive sequence\r\n as well as frequency and ROCOF estimations\r\n If x has 3+3 measurements it will output 6 single phase phasors and 2 positive phase phasors\r\n in the order or [Xa,Xb,Xc,Xp,Ya,Yb,Yc,Yp]. And will only use X to calculate frequency and ROCOF.\r\n \"\"\"\r\n if(x.size == x.shape[0]): # if .size and .shape[0] are same it is 1D\r\n X = self.__estimate(t,x,self._fs,self._f0,self._filter)\r\n F = np.empty(np.size(x,0), dtype=complex) * np.nan # cannot calculate F w/o 3 Phases\r\n DF = np.empty(np.size(x,0), dtype=complex) * np.nan# cannot calculat ROCOF w/o 3 Phases\r\n else:\r\n # builds an empty NaN array shaped like the input but with additional positive seq columns\r\n X = np.empty((np.size(x,0),np.size(x,1)+math.floor(np.size(x,1)/3)), dtype=complex) * np.nan\r\n \r\n j = 0 # iterator for the output columns\r\n for i in range(np.size(x,1)): # for each input column write an output\r\n X[:,j] = self.__estimate(t,x[:,i],self._fs,self._f0,self._filter)\r\n j += 1;\r\n if((i+1) % 3 == 0): # for every 3 measurements, add a positive sequence calculation\r\n X[:,j] = ((0.5 - 1j*math.sqrt(3)/2)*X[:,j-3] + (0.5 + 1j*math.sqrt(3)/2)*X[:,j-2] - X[:,j-1]) / (1.5 - 1j*3*math.sqrt(3)/2)\r\n j += 1 # make sure to skip past where the pos seq. is stored.\r\n \r\n # only use the first 3phase system to calculate frequency. others are assumed to be same frequency\r\n \"\"\"Annex D.4 Equations D.3 and D.4\"\"\"\r\n F = signal.lfilter([1, 0,-1],1,self.__unwrap(np.angle(X[:,3])))/(4*math.pi*1/self._fs) + self._f0\r\n DF = signal.lfilter([1,-2, 1],1,self.__unwrap(np.angle(X[:,3])))/(2*math.pi/self._fs/self._fs)\r\n \r\n # account for group delay of the filters\r\n GrpDel = 1\r\n F = np.concatenate((F [GrpDel:],np.empty(GrpDel)*np.nan))\r\n DF = np.concatenate((DF[GrpDel:],np.empty(GrpDel)*np.nan))\r\n \r\n \"\"\"If P class, fix the magnitude using Annex D.6, Equation D.6\"\"\"\r\n if(self._tclass == 'p'):\r\n for i in range(np.size(X,1)):\r\n XM = abs(X[:,i]);\r\n XM = XM / np.sin(math.pi*(self._f0 + 1.625*(self._f0-F))/(2*self._f0))\r\n X[:,i] = XM*np.cos(np.angle(X[:,i])) + 1j*XM*np.sin(np.angle(X[:,i]))\r\n \r\n return X,F,DF\r\n \r\n @staticmethod\r\n def __estimate(t,x,fs,f0,b):\r\n \"\"\"From D.2, Equation D.1 and D.2. convolution with X\"\"\"\r\n w0 = 2*math.pi*f0;\r\n N = np.size(b) - 1;\r\n X = signal.lfilter(b,1,x); # this part does the summation/convolution\r\n \r\n X[:N] = np.NaN; # filter start-up\r\n XM = abs(X);\r\n XA = ((np.angle(X) - t*w0) + math.pi) % (2*math.pi) - math.pi #wrapToPi\r\n \r\n \"\"\"From D.3, group delay\"\"\"\r\n GrpDel = int(N/2); #GroupDelay = N/2\r\n XM = np.concatenate((XM[GrpDel:],np.empty(GrpDel)*np.nan))\r\n XA = np.concatenate((XA[GrpDel:],np.empty(GrpDel)*np.nan))\r\n \r\n return XM*np.cos(XA) + 1j*XM*np.sin(XA) # angle to complex\r\n \r\n @staticmethod \r\n def __unwrap(array): # unwrap from pi, ignoring NaN's\r\n array[~np.isnan(array)] = np.unwrap(array[~np.isnan(array)])\r\n return array","repo_name":"pnnl/OEDISI","sub_path":"ieeeClass.py","file_name":"ieeeClass.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"10492862712","text":"import math\nimport collections\n\nimport numpy as np\nimport numpy.random as random\n\nfrom common.batch_generator import BatchGenerator\n\n\nclass HeterogeneousBatchGenerator:\n def __init__(self,\n tf_records,\n is_training,\n partition,\n are_test_labels_available,\n name_to_metadata,\n input_type_list,\n output_type_list,\n batch_size,\n buffer_size,\n path_list_dict,\n use_autopad=False,\n augmentation_configuration=None,\n use_homogeneous_batches=False):\n self.tf_records = tf_records\n self.is_training = is_training\n self.partition = partition\n self.are_test_labels_available = are_test_labels_available\n self.name_to_metadata = name_to_metadata\n self.input_type_list = input_type_list\n self.output_type_list = output_type_list\n self.batch_size = batch_size\n self.buffer_size = buffer_size\n self.path_list_dict = path_list_dict\n self.use_autopad = use_autopad\n self.augmentation_configuration = augmentation_configuration\n self.use_homogeneous_batches = use_homogeneous_batches\n\n self.batch_generators = dict()\n\n print(self.path_list_dict.keys())\n\n original_modality_availability = \"orig\"\n\n use_modality_partition = dict()\n use_modality_partition[\"voice\"] = False\n use_modality_partition[\"breath\"] = False\n use_modality_partition[\"cough\"] = False\n for input_type in self.input_type_list:\n if \"voice\" in input_type:\n use_modality_partition[\"voice\"] = True\n if \"v\" not in original_modality_availability:\n original_modality_availability += \"_v\"\n if \"breath\" in input_type:\n use_modality_partition[\"breath\"] = True\n if \"b\" not in original_modality_availability:\n original_modality_availability += \"_b\"\n if \"cough\" in input_type:\n use_modality_partition[\"cough\"] = True\n if \"c\" not in original_modality_availability:\n original_modality_availability += \"_c\"\n\n self.batch_generator_config = collections.defaultdict(dict)\n\n self.sizes = dict()\n self.steps_per_epoch = dict()\n for modality_combination, path_list in self.path_list_dict.items():\n name_to_metadata_eff = dict()\n for name, metadata in name_to_metadata.items():\n if \"logmel_spectrogram\" not in name:\n name_to_metadata_eff[name] = metadata\n else:\n if (\"voice\" in name) and (use_modality_partition[\"voice\"]) and (\"voice\" in modality_combination):\n name_to_metadata_eff[name] = metadata\n if (\"breath\" in name) and (use_modality_partition[\"breath\"]) and (\"breath\" in modality_combination):\n name_to_metadata_eff[name] = metadata\n if (\"cough\" in name) and (use_modality_partition[\"cough\"]) and (\"cough\" in modality_combination):\n name_to_metadata_eff[name] = metadata\n\n num_available_modalities = 0\n\n input_type_list_eff = list()\n modality_combination_eff_2 = \"\"\n for input_type in self.input_type_list:\n if (\"voice\" in input_type) and (use_modality_partition[\"voice\"]) and (\"voice\" in modality_combination):\n input_type_list_eff.append(input_type)\n if \"support\" not in input_type:\n num_available_modalities += 1\n modality_combination_eff_2 += \"_voice\"\n if (\"breath\" in input_type) and (use_modality_partition[\"breath\"]) and (\"breath\" in modality_combination):\n input_type_list_eff.append(input_type)\n if \"support\" not in input_type:\n num_available_modalities += 1\n modality_combination_eff_2 += \"_breath\"\n if (\"cough\" in input_type) and (use_modality_partition[\"cough\"]) and (\"cough\" in modality_combination):\n input_type_list_eff.append(input_type)\n if \"support\" not in input_type:\n num_available_modalities += 1\n modality_combination_eff_2 += \"_cough\"\n\n print(modality_combination)\n print(input_type_list_eff)\n print(num_available_modalities)\n\n if num_available_modalities == 1:\n modality_combination_eff = \"single\"\n elif num_available_modalities == 2:\n modality_combination_eff = \"double\"\n elif num_available_modalities == 3:\n modality_combination_eff = \"triple\"\n else:\n raise ValueError\n\n if self.use_homogeneous_batches:\n if \"pos\" in modality_combination:\n asthma_str = \"_pos\"\n elif \"neg\" in modality_combination:\n asthma_str = \"_neg\"\n else:\n asthma_str = \"\"\n else:\n asthma_str = \"\"\n\n modality_combination_clean = modality_combination_eff + modality_combination_eff_2\n modality_combination_eff = original_modality_availability + modality_combination_eff + modality_combination_eff_2 + asthma_str\n\n self.batch_generator_config[modality_combination_eff][\"modality_combination_clean\"] = modality_combination_clean\n self.batch_generator_config[modality_combination_eff][\"name_to_metadata\"] = name_to_metadata_eff\n self.batch_generator_config[modality_combination_eff][\"input_type_list\"] = input_type_list_eff\n if \"path_list\" not in self.batch_generator_config[modality_combination_eff].keys():\n self.batch_generator_config[modality_combination_eff][\"path_list\"] = path_list\n else:\n self.batch_generator_config[modality_combination_eff][\"path_list\"].extend(path_list)\n\n for modality_combination_eff in self.batch_generator_config.keys():\n print()\n # dataset, \\\n # iterator, \\\n # next_element, \\\n # init_op\n self.batch_generators[modality_combination_eff] = \\\n BatchGenerator(tf_records_folder=self.tf_records,\n is_training=self.is_training,\n partition=self.partition,\n are_test_labels_available=self.are_test_labels_available,\n name_to_metadata=self.batch_generator_config[modality_combination_eff][\"name_to_metadata\"],\n input_type_list=self.batch_generator_config[modality_combination_eff][\"input_type_list\"],\n output_type_list=self.output_type_list,\n batch_size=self.batch_size,\n buffer_size=15 * self.batch_size,\n path_list=self.batch_generator_config[modality_combination_eff][\"path_list\"],\n augmentation_configuration=self.augmentation_configuration).get_tf_dataset()\n self.sizes[modality_combination_eff] = len(self.batch_generator_config[modality_combination_eff][\"path_list\"])\n self.steps_per_epoch[modality_combination_eff] = math.ceil(len(self.batch_generator_config[modality_combination_eff][\"path_list\"]) / batch_size)\n\n def get_tf_dataset(self):\n print(self.sizes)\n print(self.steps_per_epoch)\n return self.batch_generators\n\n def heterogeneous_generation(self, sess, shuffle):\n num_modality_combinations = len(self.sizes.keys())\n modality_combinations = sorted(self.steps_per_epoch.keys())\n\n init_op_list = [self.batch_generators[k][3] for k in modality_combinations]\n next_element_list = [self.batch_generators[k][2] for k in modality_combinations]\n\n for init_op in init_op_list:\n sess.run(init_op)\n\n counts = np.zeros((num_modality_combinations,), dtype=np.float32)\n total_steps_per_epoch = 0\n for t, modality_combination in enumerate(modality_combinations):\n counts[t] = self.steps_per_epoch[modality_combination]\n total_steps_per_epoch += self.steps_per_epoch[modality_combination]\n c = 0\n for step in range(total_steps_per_epoch):\n probabilities = counts / counts.sum()\n if shuffle:\n while True:\n c = random.choice(num_modality_combinations, p=probabilities)\n if counts[c] > 0:\n break\n counts[c] -= 1\n else:\n if counts[c] > 0:\n pass\n else:\n c += 1\n counts[c] -= 1\n\n modality_combination = modality_combinations[c]\n\n yield self.batch_generator_config[modality_combination][\"modality_combination_clean\"], sess.run(next_element_list[c])\n","repo_name":"glam-imperial/asthma-within-class-barlow","sub_path":"asthma_barlow/covid19_sounds/neurips21/heterogeneous_batch_generator.py","file_name":"heterogeneous_batch_generator.py","file_ext":"py","file_size_in_byte":9247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18764779353","text":"#basic encryption project\n\nkeys = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\", \"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\nalphabet = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\", \"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\nlowalphabet = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\", \"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n\nfor i in range(26):\n x = input(\"What do you want the key for \" + alphabet[i] + \" to be?\")\n keys[i] = x\n\ndef loopMain(): \n ask = input(\"To encrypt a message write E. To check ur key write K.\")\n if ask == \"E\":\n msg = input(\"Write your message: \")\n chars = list(msg)\n enc = \"\"\n for char in chars:\n cap = char\n if char in alphabet:\n num = alphabet.index(char)\n enc += keys[num] + \" \"\n elif char in lowalphabet:\n num = lowalphabet.index(char)\n enc += keys[num] + \" \"\n else:\n enc += char + \" \"\n \n print(enc)\n elif ask == \"K\":\n for i in range(26):\n print(alphabet[i] + \": \" + keys[i])\n else:\n loopMain() \n\nwhile True:\n loopMain()\n\n\n","repo_name":"Gaurav-Ban22/basencrypt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"74753588942","text":"import pandas as pd\nfrom nltk.util import ngrams \nfrom collections import Counter\n\nclass NGramExtractor(object):\n\tdef __init__(self, dataframe=None, top_n=10):\n\t\tself.df = pd.read_csv(\"data/inp_and_gt_name_near_food_no_inform.csv\")\n\t\ttext = \"\"\n\n\t\tutterances = self.df[\"target_language\"].to_list()\n\n\t\tfor utt in utterances:\n\t\t\ttext += utt\n\n\t\tself.all_bigrams = Counter(ngrams(text.split(), 2))\n\t\tself.all_trigrams = Counter(ngrams(text.split(), 3))\n\t\tself.top_n = top_n\n\n\tdef islice(self, iterable, *args):\n\t # islice('ABCDEFG', 2) --> A B\n\t # islice('ABCDEFG', 2, 4) --> C D\n\t # islice('ABCDEFG', 2, None) --> C D E F G\n\t # islice('ABCDEFG', 0, None, 2) --> A C E G\n\t s = slice(*args)\n\t start, stop, step = s.start or 0, s.stop or sys.maxsize, s.step or 1\n\t it = iter(range(start, stop, step))\n\t try:\n\t nexti = next(it)\n\t except StopIteration:\n\t # Consume *iterable* up to the *start* position.\n\t for i, element in zip(range(start), iterable):\n\t pass\n\t return\n\t try:\n\t for i, element in enumerate(iterable):\n\t if i == nexti:\n\t yield element\n\t nexti = next(it)\n\t except StopIteration:\n\t # Consume to *stop*.\n\t for i, element in zip(range(i + 1, stop), iterable):\n\t pass\n\n\tdef take(self, n, iterable):\n\t \"Return first n items of the iterable as a list\"\n\t return list(self.islice(iterable, n))\n\n\tdef get_top_bigrams(self, head_word):\n\t\ttop_bigrams = {k: v for k, v in self.all_bigrams.items() if k[0]==head_word}\n\n\t\ttop_bigrams = self.take(self.top_n, top_bigrams.items())\n\n\t\tbigrams_to_return = []\n\t\tfor bigram in top_bigrams:\n\t\t\tbigram_txt = bigram[0][0]+\" \"+bigram[0][1]\n\t\t\tbigrams_to_return.append(bigram_txt)\n\n\t\treturn bigrams_to_return\n\n\tdef get_top_trigrams(self, head_word):\n\t\ttop_trigrams = {k: v for k, v in self.all_trigrams.items() if k[0]==head_word}\n\n\t\ttop_trigrams = self.take(self.top_n, top_trigrams.items())\n\t\ttrigrams_to_return = []\n\t\tfor trigram in top_trigrams:\n\t\t\ttrigram_txt = trigram[0][0]+\" \"+trigram[0][1]\n\t\t\ttrigrams_to_return.append(trigram_txt)\n\n\t\treturn trigrams_to_return\n\n\n\n# extractor = NGramExtractor()\n# print(extractor.get_top_trigrams(\"x-vow-cuisine-food\"))\n\n","repo_name":"StefanL19/Imitation_Learning","sub_path":"utils_ngram.py","file_name":"utils_ngram.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4623839395","text":"\n\n# List Comprehension\n# new_list = [new_item for item in list]\n# new_list_if = [new_item for item in list if test]\n\n# new_dict = {new_key:new_value for item in list}\n# new_dict = {new_key:new_value for k,v in dict.items()}\n# new_dict_if = {new_key:new_value for k,v in dict.items() if test}\n\n# numbers = [1, 2, 3]\n#\n# new_list = [n + 1 for n in numbers]\n#\n# #For Loop\n# numbers = [1, 2, 3]\n# new_list = []\n# for n in numbers:\n# add_1 = n + 1\n# new_list.append(add_1)\n\n\nstudent_dict = {\n 'students': ['Tad', 'Tex', 'Ted'],\n 'score': [77, 87, 94]\n}\n\nimport pandas\n\nstudent_data_frame = pandas.DataFrame(student_dict)\n# print(student_data_frame)\n\n# for index , row in student_data_frame.iterrows():\n# print(row)\n\n# for index , row in student_data_frame.iterrows():\n# print(row.students)\n\nfor index , row in student_data_frame.iterrows():\n print(row.score)\n\n\n\n","repo_name":"rugby8724/100_Days_of_Code","sub_path":"twenty_six_NATO_phonetic_alphabet/prac_list_comp.py","file_name":"prac_list_comp.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70293308302","text":"class Solution(object):\n def findMaxConsecutiveOnes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n res = 0\n count = 0\n for i in range(len(nums)):\n if nums[i] == 1:\n count += 1\n res = max(res, count)\n else:\n count = 0\n return res","repo_name":"CrazyCoder4Carrot/leetcode","sub_path":"python/451-500/485. Max Consecutive Ones.py","file_name":"485. Max Consecutive Ones.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"71723945742","text":"import unittest\nimport numpy as np\n\n\ndef knapsack01(value, weight, capacity):\n\t\"\"\"\n\tkanapsack01 solves a 0-1 knapsack problem,\n\tinput: values(value) and weights(W) of items to put into knapsack ( size of which is capacity)\n\toutput: the index of items that maximize the value of items putted in the knapsack\n\tthe index of items counts from 0, and corresponding value\n\t\"\"\"\n\tn = len(value) # the number of items\n\t# maximum value that can be attained with weight <= weight using first i items\n\tm = np.zeros(shape=(n + 1, capacity + 1), dtype=int)\n\tfor i in range(capacity+1):\n\t\tm[0, i] = 0\n\n\tif not isinstance(capacity, int):\n\t\traise ValueError('knapsack_size should be an integer')\n\n\tfor i in range(1, n+1): # items\n\t\tfor j in range(1, capacity+1): # sizes\n\t\t\tif weight[i-1] > j:\n\t\t\t\tm[i, j] = m[i - 1, j]\n\t\t\telse:\n\t\t\t\tm[i, j] = max(m[i - 1, j], m[i - 1, j - weight[i-1]] + value[i-1])\n\n\tmax_val = m[-1, -1]\n\titems = set()\n\n\twhile max_val > 0 and i > 0:\n\t\tif max_val - value[i-1] in m[i - 1, :]:\n\t\t\tmax_val = max_val - value[i-1]\n\t\t\titems.add(i-1)\n\t\t\ti -= 1\n\t\telse:\n\t\t\ti -= 1\n\treturn items, m[-1, -1]\n\n\nclass TestKnapsack(unittest.TestCase):\n\n\n\tdef tests_big(self):\n\t\tvalues = []\n\t\tweights = []\n\t\twith open('input_random_19_100_1000.txt') as infile:\n\t\t\tline = infile.readline()\n\t\t\tline = line.strip('\\n').split(' ')\n\t\t\tknapsack_size, num_item = [int(item) for item in line]\n\t\t\tfor line in infile.readlines():\n\t\t\t\tline = line.strip('\\n').split(' ')\n\t\t\t\tval, weight = [int(item) for item in line]\n\t\t\t\tvalues.append(val)\n\t\t\t\tweights.append(weight)\n\t\tif num_item != len(values):\n\t\t\traise ValueError\n\n\t\titems, max_val = knapsack01(values, weights, knapsack_size)\n\t\tself.assertEqual(max_val, 1580)\n\n\tdef tests_small(self):\n\t\tvalues = []\n\t\tweights = []\n\t\twith open('small_test_case.txt') as infile:\n\t\t\tline = infile.readline()\n\t\t\tline = line.strip('\\n').split(' ')\n\t\t\tknapsack_size, num_item = [int(item) for item in line]\n\t\t\tfor line in infile.readlines():\n\t\t\t\tline = line.strip('\\n').split(' ')\n\t\t\t\tval, weight = [int(item) for item in line]\n\t\t\t\tvalues.append(val)\n\t\t\t\tweights.append(weight)\n\t\tif num_item != len(values):\n\t\t\traise ValueError\n\n\t\titems, max_val = knapsack01(values, weights, knapsack_size)\n\t\tself.assertEqual(items, set([2, 3]))\n\t\tself.assertEqual(max_val, 8)\n\n\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"chandanws/mooc_book","sub_path":"Algorithms Specialization - Greedy Algorithms, Minimum Spanning Trees, and Dynamic Programming/WEEK4/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33795740767","text":"from django.core.management.base import BaseCommand\n\nfrom editor.models import Template, TemplateLink, TemplateContextParagraph, Rule\n\nfrom editor.management.commands.template_fixtures import TEMPLATES\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n rules = Rule.objects.all()\n rules_as_dicts = []\n for rule in rules:\n rules_as_dicts.append(\n {\n 'room': rule.room,\n 'index': rule.index,\n 'name': rule.name,\n 'content': rule.content,\n }\n )\n\n Template.objects.all().delete()\n\n rules.delete()\n\n try:\n for t_index, t in enumerate(TEMPLATES):\n created_template = Template.objects.create(\n category=t['category'],\n index=t_index,\n name=t['name'],\n context=t['context'],\n )\n for link_index, link in enumerate(t['links']):\n TemplateLink.objects.create(\n template=created_template,\n index=link_index,\n type=link['type'],\n locale=link.get('locale', None),\n key=link.get('key', None),\n value_type=link.get('value_type', 'string'),\n predefined_choices=link.get('predefined_choices', ''),\n default_value=link.get('default_value', None),\n )\n\n context_paragraphs = t.get('context_paragraphs', [])\n for cp_index, cp in enumerate(context_paragraphs):\n TemplateContextParagraph.objects.create(\n template=created_template,\n index=cp_index,\n key=cp['key'],\n type=cp['type'],\n )\n except Exception as e:\n formatted_exception = \"{}: {}\".format(type(e).__name__, e)\n print(\"Something bad happened ({})\".format(formatted_exception))\n\n for rule in rules_as_dicts:\n Rule.objects.create(\n room=rule['room'],\n index=rule['index'],\n name=rule['name'],\n content=rule['content'],\n )\n","repo_name":"just-escape/justrelax","sub_path":"backend/storage/editor/management/commands/reload_templates.py","file_name":"reload_templates.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12524154198","text":"# coding:utf-8\n'''\n给定两个分别由字母组成的字符串A和字符串B,字符串B的长度比字符串A短。请问,如何最快地判断字符串B中所有字母是否都在字符串A里?\n\n方法:\n 先排序,在比较 (代码略)\n 直接比较\n'''\n\n\ndef f(str1, str2):\n '''\n :param str1: A\n :param str2: B\n :return: bool\n 方法一\n '''\n flag = True\n for i in str2:\n if i not in str1:\n flag = False\n return flag\n\n\ndef f1(str1, str2):\n '''\n :param str1: A\n :param str2: B\n :return: bool\n 方法二:BF暴力匹配\n '''\n s2_len = len(str2)\n s2 = str2\n s2_index = 0\n for s in str1:\n sub_str = s2[s2_index]\n if s == sub_str:\n s2_index += 1\n else:\n s2_index = 0\n\n\ndef kmp_match(s, p):\n m = len(s)\n n = len(p)\n cur = 0 # 起始指针cur\n table = partial_table(p)\n while cur <= m - n: # 只去匹配前m-n个\n for i in range(n):\n if s[i + cur] != p[i]:\n cur += max(i - table[i - 1], 1) # 有了部分匹配表,我们不只是单纯的1位1位往右移,可以一次移动多位\n break\n else: # for 循环中,如果没有从任何一个 break 中退出,则会执行和 for 对应的 else\n # 只要从 break 中退出了,则 else 部分不执行。\n return True\n return False\n\n\n# 部分匹配表\ndef partial_table(p):\n '''''partial_table(\"ABCDABD\") -> [0, 0, 0, 0, 1, 2, 0]'''\n prefix = set()\n postfix = set()\n ret = [0]\n for i in range(1, len(p)):\n prefix.add(p[:i])\n postfix = {p[j:i + 1] for j in range(1, i + 1)}\n ret.append(len((prefix & postfix or {''}).pop()))\n return ret\n\n\nif __name__ == '__main__':\n str1 = \"abcdfe\"\n str2 = \"bwc\"\n print(kmp_match(str1, str2))\n","repo_name":"RichieSong/algorithm","sub_path":"算法/字符串/字符串包含.py","file_name":"字符串包含.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29202097775","text":"from turtle import up\nfrom mcpi import minecraft\nimport block\n\nfrom plot import *\nfrom fast_query import *\nfrom utils import *\nfrom building import *\nfrom heightmap import *\n\nfrom brush import *\nimport socket\nimport random\n\nVILLAGE_BOUNDS_INCREMENT = 30\nDEBUG_DRAW_HEIGHT = 100\n\nclass Village:\n\n\tdef __init__(self, mc: minecraft.Minecraft):\n\t\tself.mc = mc\n\t\tself.centre = mc.player.getTilePos()\n\t\tself.bounds = Rectangle(\n\t\t\tVector2(self.centre.x - VILLAGE_BOUNDS_INCREMENT, self.centre.z - VILLAGE_BOUNDS_INCREMENT),\n\t\t\tVector2(self.centre.x + VILLAGE_BOUNDS_INCREMENT, self.centre.z + VILLAGE_BOUNDS_INCREMENT))\n\n\t\tself.DEBUG_DRAW_HEIGHT = DEBUG_DRAW_HEIGHT\n\t\tself.mc.postToChat(f'Generating height map {self.bounds}.')\n\t\tself.map = Heightmap(self.mc, self.bounds)\n\t\tself.mc.postToChat('Height map complete.')\n\t\t\n\t\t# village generation parameters\n\t\tself.num_buildings = random.randrange(5, 10)\n\t\tself.num_special_buildings = random.randrange(3, 7)\n\t\tself.plot_smooth_border = random.randrange(4, 6)\n\t\tself.plot_smooth_iterations = 3\n\t\tself.plot_size_range = Vector2(5, 20)\n\n\t\tself.plots = []\n\n\tdef construct(self):\n\t\tfor _ in range(0, self.num_buildings):\n\t\t\tnew_plot = Plot(self.mc, self)\n\t\t\tself.plots.append(new_plot)\n\t\tself.mc.postToChat('Houses constructed.')\n\t\tfor _ in range(0, self.num_special_buildings):\n\t\t\tif SPECIAL_BUILDINGS:\n\t\t\t\tnew_plot = Plot(self.mc, self, building=False)\n\t\t\t\tself.plots.append(new_plot)\n\t\tself.mc.postToChat('Additional structures constructed.')\n\t\tself.mc.postToChat(\"Building complete..\")\n\n\n\tdef expand_bounds(self, amount: int = VILLAGE_BOUNDS_INCREMENT, all_directions = False):\n\t\tif all_directions == False:\n\t\t\tdirection = random.choice([Facing.NORTH, Facing.SOUTH, Facing.EAST, Facing.WEST])\n\n\t\t\tif direction is Facing.NORTH:\n\t\t\t\tself.mc.postToChat('Expanding village to north.')\n\t\t\t\t# important to update new area first, as it references old bounds\n\t\t\t\tnew_area = \t Rectangle(Vector2(self.bounds.x0, self.bounds.z0 - amount),\n\t\t\t\t\t\t\t\t\t\tVector2(self.bounds.x1, self.bounds.z0))\n\t\t\t\tself.bounds = Rectangle(Vector2(self.bounds.x0, self.bounds.z0 - amount),\n\t\t\t\t\t\t\t\t\t\tVector2(self.bounds.x1, self.bounds.z1))\n\t\t\telif direction is Facing.SOUTH:\n\t\t\t\tself.mc.postToChat('Expanding village to south.')\n\t\t\t\tnew_area = Rectangle(Vector2(self.bounds.x0, self.bounds.z1),\n\t\t\t\t\t\t\t\t\t\tVector2(self.bounds.x1, self.bounds.z1 + amount))\n\t\t\t\tself.bounds = Rectangle(Vector2(self.bounds.x0, self.bounds.z0),\n\t\t\t\t\t\t\t\t\t\tVector2(self.bounds.x1, self.bounds.z1 + amount))\n\t\t\telif direction is Facing.EAST:\t# expand east\n\t\t\t\tself.mc.postToChat('Expanding village to east.')\n\t\t\t\tnew_area = Rectangle(Vector2(self.bounds.x1, self.bounds.z0),\n\t\t\t\t\t\t\t\t\t\tVector2(self.bounds.x1 + amount, self.bounds.z1))\n\t\t\t\tself.bounds = Rectangle(Vector2(self.bounds.x0, self.bounds.z0),\n\t\t\t\t\t\t\t\t\t\tVector2(self.bounds.x1 + amount, self.bounds.z1))\n\t\t\telse:\t# expand west\n\t\t\t\tself.mc.postToChat('Expanding village to west.')\n\t\t\t\tnew_area = Rectangle(Vector2(self.bounds.x0 - amount, self.bounds.z0),\n\t\t\t\t\t\t\t\t\t\tVector2(self.bounds.x0, self.bounds.z1))\n\t\t\t\tself.bounds = Rectangle(Vector2(self.bounds.x0 - amount, self.bounds.z0),\n\t\t\t\t\t\t\t\t\t\tVector2(self.bounds.x1, self.bounds.z1))\n\t\telse:\n\t\t\tnew_area = self.bounds.expanded_by(amount)\n\t\t\tself.bounds = new_area\n\n\t\tself.map.update_bounds(new_area, self.bounds)\n\t\tself.mc.postToChat(f'New bounds are {self.bounds}.')\n\n\tdef debug_draw(self):\n\t\t# mark the village centre\n\t\tself.mc.setBlock(self.centre.x, DEBUG_DRAW_HEIGHT + 10, self.centre.z, block.DIAMOND_BLOCK)\n\t\tself.mc.setBlock(self.centre.x, DEBUG_DRAW_HEIGHT + 11, self.centre.z, block.DIAMOND_BLOCK)\n\t\tself.mc.setBlock(self.centre.x, DEBUG_DRAW_HEIGHT + 12, self.centre.z, block.DIAMOND_BLOCK)\n\t\t# mark the village bounds\n\t\tBrush.draw_rect(self.mc,\n\t\t\t\t\t\tself.bounds.small_corner.asX_Z(DEBUG_DRAW_HEIGHT + 10),\n\t\t\t\t\t\tself.bounds.large_corner.asX_Z(DEBUG_DRAW_HEIGHT + 10),\n\t\t\t\t\t\tblock.GOLD_BLOCK)\n\t\t# mark plot bounds\n\t\tfor plot in self.plots:\n\t\t\tplot.debug_draw(self.mc)\n\n\t\t\t\t\nif __name__ == \"__main__\":\n\ttry:\n\t\tmc = minecraft.Minecraft.create()\n\texcept socket.error as e:\n\t\tprint(\"Cannot connect to Minecraft server\")\n\t\traise e\n\n\tif True:\n\t\ta_lovely_village = Village(mc)\n\t\ta_lovely_village.construct()\n\t# a_lovely_village.debug_draw()\n\n\tif False:\n\t\tfor plot in a_lovely_village.plots:\n\t\t\tlocation = plot.building.door_coords\n\t\t\tprint(location)\n\t# village pathing test\n\tif False:\n\t\t# General setup\n\t\ta_lovely_village = Village(mc)\n\t\ta_lovely_village.construct()\n\n\t\tpx, py, pz = a_lovely_village.centre\n\t\tsz = 70\n\n\t\ttest_cuboid = Cuboid((px - sz, px + sz + 1), (0, 1), (pz - sz, pz + sz + 1))\n\t\thm = fq_heights_and_surface_id_filtered(test_cuboid)\n\t\tmc.postToChat(\"Map created.\")\n\n\t\tdoor_locations = []\n\t\tfor plot in a_lovely_village.plots:\n\t\t\tdoor_coords = Vector3(plot.building.get_door_coords()[0], plot.building.get_door_coords()[1],\n\t\t\t\t\t\t\t\t plot.building.get_door_coords()[2])\n\t\t\tdoor_coords_to_append = (door_coords.x, door_coords.z)\n\t\t\tdoor_locations.append(door_coords_to_append)\n\t\t\tprint(door_coords_to_append)\n\t\t\tmc.postToChat(\"door location added:\")\n\t\t\tmc.postToChat(door_coords_to_append)\n\n\t\tcurrent_location = door_locations.pop()\n\t\twhile len(door_locations):\n\t\t\tnext = door_locations.pop()\n\t\t\tpath_finder(current_location, next, hm, mc)\n\n\tif False:\n\t\tposition = mc.player.getTilePos()\n\t\ttop_left = Vector3(position.x - 10, position.y, position.z - 10)\n\t\tbottom_right = Vector3(position.x + 10, position.y, position.z + 10)\n\t\tbuilding = Building(mc, top_left, bottom_right, 4)\n\t\tprint(building.door_location)\n","repo_name":"GeordieEK/Village-Generator","sub_path":"village.py","file_name":"village.py","file_ext":"py","file_size_in_byte":5539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21066838457","text":"\"\"\"Thin wrappers around google-api-client-python talking to sheets/drive API.\"\"\"\n\nimport re\n\nimport apiclient\n\n__all__ = ['build_service',\n 'iterfiles',\n 'spreadsheet',\n 'values'\n 'quote']\n\nSERVICES = {'sheets': {'serviceName': 'sheets', 'version': 'v4'},\n 'drive': {'serviceName': 'drive', 'version': 'v3'}}\n\nSHEET = 'application/vnd.google-apps.spreadsheet'\n\nFILEORDER = 'folder,name,createdTime'\n\nIS_ALPHANUMERIC_A1 = re.compile(r'[a-zA-Z]{1,3}' # last column 'ZZZ' (18_278)\n r'\\d{1,}').fullmatch\n\n\ndef build_service(name=None, **kwargs):\n \"\"\"Return a service endpoint for interacting with a Google API.\"\"\"\n if name is not None:\n for kw, value in SERVICES[name].items():\n kwargs.setdefault(kw, value)\n if 'cache_discovery' not in kwargs:\n try:\n from oauth2client import __version__ as o2c_version\n except ImportError: # pragma: no cover\n pass\n else:\n # ImportError: file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth\n if o2c_version == '4' or o2c_version.startswith('4.'):\n kwargs['cache_discovery'] = False\n\n return apiclient.discovery.build(**kwargs)\n\n\ndef iterfiles(service, *, name=None, mimeType=SHEET, order=FILEORDER): # noqa: N803\n \"\"\"Fetch and yield ``(id, name)`` pairs for Google drive files.\"\"\"\n params = {'orderBy': order, 'pageToken': None}\n q = []\n if name is not None:\n q.append(f\"name='{name}'\")\n if mimeType is not None:\n q.append(f\"mimeType='{mimeType}'\")\n if q:\n params['q'] = ' and '.join(q)\n\n while True:\n request = service.files().list(**params)\n response = request.execute()\n for f in response['files']:\n yield f['id'], f['name']\n try:\n params['pageToken'] = response['nextPageToken']\n except KeyError:\n return\n\n\ndef spreadsheet(service, id):\n \"\"\"Fetch and return spreadsheet meta data with Google sheets API.\"\"\"\n request = service.spreadsheets().get(spreadsheetId=id)\n try:\n response = request.execute()\n except apiclient.errors.HttpError as e:\n if e.resp.status == 404:\n raise KeyError(id)\n else: # pragma: no cover\n raise\n return response\n\n\ndef values(service, id, ranges):\n \"\"\"Fetch and return spreadsheet cell values with Google sheets API.\"\"\"\n params = {'majorDimension': 'ROWS',\n 'valueRenderOption': 'UNFORMATTED_VALUE',\n 'dateTimeRenderOption': 'FORMATTED_STRING',\n 'spreadsheetId': id,\n 'ranges': ranges}\n request = service.spreadsheets().values().batchGet(**params)\n response = request.execute()\n return response['valueRanges']\n\n\ndef quote(worksheet_name: str) -> str:\n \"\"\"Return ``worksheet_name``, single-quote if needed.\n\n >>> quote('spam')\n 'spam'\n\n >>> quote('spam spam')\n 'spam spam'\n\n >>> quote('DKC3')\n \"'DKC3'\"\n \"\"\"\n if IS_ALPHANUMERIC_A1(worksheet_name):\n # https://developers.google.com/sheets/api/guides/concepts#expandable-1\n return f\"'{worksheet_name}'\"\n return worksheet_name\n","repo_name":"xflr6/gsheets","sub_path":"gsheets/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"47"} +{"seq_id":"37086014337","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# модели машинного обучения\n# \n# Евгений Борисов \n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n\nimport logging\n\nimport numpy as np\n# import numpy.random as rng\n\nfrom .base import Loss\n\n\n# CCE: t log(s)\nclass CCE(Loss): # CategoricalCrossEntropy\n \n def _estimate(self,output,target):\n o = output.flatten()\n t = target.flatten()\n with np.errstate(divide='ignore',invalid='ignore'):\n lg = np.where(o>0., np.log(o), 0.)\n return -t.dot( lg.T )/len(target)\n\n def _gradient(self,input_data,target): \n p = self._model._partial(input_data)\n o = self._model.score(input_data)[:,np.newaxis,:]\n t = target[:,np.newaxis,:]\n with np.errstate(divide='ignore',invalid='ignore'):\n d = np.where(o!=0.,(1./o)*p , 1. )\n g = (d*t).sum(axis=0)/len(target)\n return self._norm(g)\n \n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \nif __name__ == '__main__': sys.exit(0)\n\n\n\n","repo_name":"mechanoid5/ml_lib","sub_path":"lib/loss/cce.py","file_name":"cce.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43758804060","text":"import adafruit_gps\nimport serial\nimport time\nimport json\n\nclass GPS:\n def __init__(self, UART_PORT, mqttClient):\n self.uart = serial.Serial(UART_PORT, baudrate=9600, timeout=10)\n self.gps = adafruit_gps.GPS(self.uart, debug=False)\n self.mqttClient = mqttClient\n self.latitude = 0\n self.longitude = 0\n\n def update_coords(self, license_plate_number):\n last_print = time.monotonic()\n last_update = time.time()\n while True:\n self.gps.update()\n current_print = time.monotonic()\n if current_print - last_print >= 1.0:\n last_print = current_print\n if not self.gps.has_fix:\n print(\"GPS waiting for fix\")\n continue\n\n self.longitude = self.gps.longitude\n self.latitude = self.gps.latitude\n# print(f\"Longitude: {self.longitude} latitude: {self.latitude}\" )\n current_update = time.time()\n if(current_update - last_update >=5):\n self.mqttClient.publish(f\"location/{license_plate_number}\", json.dumps({\"coords\": {\"longitude\": self.longitude, \"latitude\": self.latitude}, \"license_plate_number\":license_plate_number}))\n print(f\"Longitude: {self.longitude} latitude: {self.latitude}\" )\n last_update = current_update\n \n\n def get_coords(self):\n return {\"longitude\": self.longitude, \"latitude\": self.latitude}\n\n\n","repo_name":"ngqaza-g/mabuza","sub_path":"rpi_code/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31740640096","text":"from examples.pubsub.ps_init import pubsub\nfrom machine import UART\n\nprint(\"ESP32 + serial display pubsub test\")\n\nuart = UART(2, 115200) #UART2 > #U2TXD(SERVO1/PWM1_PIN) # 9600/115200\n# uart.write('C') #test quick clear display \n\ndef display_send(bleuart):\n uart.write(bleuart)\n\npubsub.subscribe('bleuart', display_send)\n","repo_name":"octopusengine/octopuslab","sub_path":"esp32-micropython/examples/pubsub/ps_serial_disp.py","file_name":"ps_serial_disp.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"47"} +{"seq_id":"2049019819","text":"import re\ndef reverse(string): \n string = \"\".join(reversed(string)) \n return string \nl1=[\"No1\",\"Iam2\", \"notcolorblind3\",\"myworld4 \",\"isblackandwhite5\",\"trytokeep6\"]\nl=[\"No 1\",\"Iam 2\", \"notcolorblind 33\",\"myworld44 \",\"isblackandwhite 5\",\"trytokeep 6\"]\nnum=[]\nfor i in range(len(l)):\n\tif (i+1)%2 == 0:\n\t\tprint(l[i])\nfor i in range(1,len(l)):\n\tif (i+1)%3==0:\n\t\tl[i]=l[i].upper()\n\t\tprint(l[i])\nl2=[reverse(i) for i in l]\nprint(l2)\n\nnewstring=\" \".join(l) \nprint(newstring)\n\nfor i in range(len(newstring)):\n for j in newstring[i].split():\n if(j.isdigit()):\n num.append(j)\nprint(num)\n\n#returns a list for each string\n\n","repo_name":"mohitkumartoshniwal/Scripting-Lab-5th-Sem","sub_path":"Part a/11/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72384374221","text":"\nfrom sandbox.util.PathDefaults import PathDefaults\n\nimport unittest\nimport logging\nimport sys\nimport apgl\nfrom apgl.egograph.SvmEgoSimulator import SvmEgoSimulator\nfrom apgl.generator import *\nfrom apgl.graph import * \n\n@apgl.skipIf(not apgl.checkImport('svm'), 'No module svm')\nclass SvmEgoSimulatorTest(unittest.TestCase):\n def setUp(self):\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n dataDir = PathDefaults.getDataDir() + \"infoDiffusion/\"\n #dataDir = \"/home/charanpal/Private/Postdoc/Code/APGL/data/\"\n matFileName = dataDir + \"EgoAlterTransmissions.mat\"\n self.svmEgoSimulator = SvmEgoSimulator(matFileName)\n\n def testSampleExamples(self):\n self.svmEgoSimulator.sampleExamples(100)\n\n self.assertEquals(self.svmEgoSimulator.examplesList.getNumSampledExamples(), 100)\n\n def testModelSelection(self):\n Cs = [1.0, 2.0]\n kernel = \"linear\"\n kernelParams = [0.0]\n errorCosts = [0.1, 0.2]\n folds = 5\n sampleSize = 1000\n \n CVal, kernelParamVal, errorCost, error = self.svmEgoSimulator.modelSelection(Cs, kernel, kernelParams, errorCosts, folds, sampleSize)\n\n self.assertTrue(CVal in Cs)\n self.assertTrue(kernelParamVal in kernelParams)\n self.assertTrue(errorCost in errorCosts)\n self.assertTrue(error >= 0.0 and error < 1.0)\n\n def testEvaluateClassifier(self):\n CVal = 1.0\n kernel = \"linear\"\n kernelParamVal = 0.0\n errorCost = 0.5\n folds = 6\n sampleSize = 1000\n invert = False\n\n (means, vars) = self.svmEgoSimulator.evaluateClassifier(CVal, kernel, kernelParamVal, errorCost, folds, sampleSize, invert)\n\n\n def testTrainClassifier(self):\n CVal = 1.0\n kernel = \"linear\"\n kernelParamVal = 0.0\n errorCost = 0.5\n folds = 6\n sampleSize = 1000\n\n self.svmEgoSimulator.trainClassifier(CVal, kernel, kernelParamVal, errorCost, sampleSize)\n\n def testGenerateRandomGraph(self):\n egoFileName = PathDefaults.getDataDir() + \"infoDiffusion/EgoData.csv\"\n alterFileName = PathDefaults.getDataDir() + \"infoDiffusion/AlterData.csv\"\n numVertices = 1000\n infoProb = 0.1\n\n \n p = 0.1\n neighbours = 10\n generator = SmallWorldGenerator(p, neighbours)\n graph = SparseGraph(VertexList(numVertices, 0))\n graph = generator.generate(graph)\n\n self.svmEgoSimulator.generateRandomGraph(egoFileName, alterFileName, infoProb, graph)\n\n def testRunSimulation(self):\n egoFileName = PathDefaults.getDataDir() + \"infoDiffusion/EgoData.csv\"\n alterFileName = PathDefaults.getDataDir() + \"infoDiffusion/AlterData.csv\"\n numVertices = 1000\n infoProb = 0.1\n p = 0.1\n neighbours = 10\n\n generator = SmallWorldGenerator(p, neighbours)\n graph = SparseGraph(VertexList(numVertices, 0))\n graph = generator.generate(graph)\n \n CVal = 1.0\n kernel = \"linear\"\n kernelParamVal = 0.0\n errorCost = 0.5\n folds = 6\n sampleSize = 1000\n\n maxIterations = 5\n\n self.svmEgoSimulator.trainClassifier(CVal, kernel, kernelParamVal, errorCost, sampleSize)\n self.svmEgoSimulator.generateRandomGraph(egoFileName, alterFileName, infoProb, graph)\n self.svmEgoSimulator.runSimulation(maxIterations)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"charanpald/wallhack","sub_path":"wallhack/egograph/test/SvmEgoSimulatorTest.py","file_name":"SvmEgoSimulatorTest.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"28824577726","text":"#############################################################################\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#############################################################################\n#\n# Project Name : Simulated MPEG DASH service\n#\n# Author : Alex Ashley\n#\n#############################################################################\n\nimport urllib\n\nfrom drm.base import DrmBase\n\n\nclass Marlin(DrmBase):\n MPD_SYSTEM_ID = '5e629af5-38da-4063-8977-97ffbd9902d4'\n\n def generate_manifest_context(self, stream, keys, cgi_params, la_url=None, locations=None):\n if la_url is None:\n la_url = cgi_params.get('marlin_la_url')\n if la_url is not None:\n la_url = urllib.unquote_plus(la_url)\n else:\n la_url = stream.marlin_la_url\n return {\n 'MarlinContentIds': True,\n 'laurl': la_url,\n 'scheme_id': self.dash_scheme_id(),\n }\n\n def generate_pssh(self, representation, keys):\n raise RuntimeError('generate_pssh has not been implemented for Marlin')\n\n def dash_scheme_id(self):\n \"\"\"\n Returns the DASH schemeIdUri for Marlin\n \"\"\"\n return \"urn:uuid:{0}\".format(self.MPD_SYSTEM_ID)\n\n @classmethod\n def is_supported_scheme_id(cls, uri):\n uri = uri.lower()\n if not uri.startswith(\"urn:uuid:\"):\n return False\n return uri[9:] == cls.MPD_SYSTEM_ID\n","repo_name":"asrashley/dash-live","sub_path":"src/drm/marlin.py","file_name":"marlin.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"41878330037","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .algorithms.shift_cipher import ShiftCipher\nfrom .algorithms.monoalphabetic_cipher import MonoalphabeticCipher\nfrom .algorithms.affine_cipher import AffineCipher\nfrom .algorithms.substitution_cipher import SubstitutionCipher\nfrom .algorithms.playfair_cipher import PlayfairCipher\nfrom .algorithms.vigenere_cipher import VigenereCipher\nfrom .algorithms.rail_fence_cipher import RailFenceCipher\nfrom .algorithms.row_transposition_cipher import RowTranspositionCipher\n\n\n# Create your views here.\nclass EncryptView(APIView):\n def post(self, request):\n algorithm = request.data['algorithm']\n plaintext = request.data['input']\n key = request.data['key']\n ciphertext = 'default'\n if(str(algorithm) == \"shift\"):\n cipher = ShiftCipher(key)\n ciphertext = cipher.encrypt(plaintext)\n elif(str(algorithm) == \"mono\"):\n cipher = MonoalphabeticCipher(key)\n ciphertext = cipher.encrypt(plaintext)\n elif(str(algorithm) == \"affine\"):\n a = int(key[1])\n b = int(key[3]) # \"(7,3)\"\n cipher = AffineCipher(a, b)\n ciphertext = cipher.encrypt(plaintext)\n elif(str(algorithm) == \"substitution\"):\n cipher = SubstitutionCipher(key)\n ciphertext = cipher.encrypt(plaintext)\n elif(str(algorithm) == \"playfair\"):\n cipher = PlayfairCipher(key)\n ciphertext = cipher.encrypt(plaintext)\n elif(str(algorithm) == \"vigenere\"):\n cipher = VigenereCipher(key)\n ciphertext = cipher.encrypt(plaintext)\n elif(str(algorithm) == \"rail\"):\n cipher = RailFenceCipher()\n ciphertext = cipher.encrypt(plaintext, int(key))\n elif(str(algorithm) == \"row\"):\n cipher = RowTranspositionCipher(key)\n ciphertext = cipher.encrypt(plaintext)\n return Response({\"ciphertext\": ciphertext}, status=status.HTTP_200_OK)\n \nclass DecryptView(APIView):\n def post(self, request):\n algorithm = request.data['algorithm']\n ciphertext = request.data['input']\n key = request.data['key']\n plaintext = 'default'\n print(request.data)\n if(str(algorithm) == \"shift\"):\n cipher = ShiftCipher(key)\n plaintext = cipher.decrypt(ciphertext)\n elif(str(algorithm) == \"mono\"):\n cipher = MonoalphabeticCipher(key)\n plaintext = cipher.decrypt(ciphertext)\n elif(str(algorithm) == \"affine\"):\n a = int(key[1])\n b = int(key[3]) # \"(7,3)\"\n cipher = AffineCipher(a, b)\n plaintext = cipher.decrypt(ciphertext)\n elif(str(algorithm) == \"substitution\"):\n cipher = SubstitutionCipher(key)\n plaintext = cipher.decrypt(ciphertext)\n elif(str(algorithm) == \"playfair\"):\n cipher = PlayfairCipher(key)\n plaintext = cipher.decrypt(ciphertext)\n elif(str(algorithm) == \"vigenere\"):\n cipher = VigenereCipher(key)\n plaintext = cipher.decrypt(ciphertext)\n elif(str(algorithm) == \"rail\"):\n cipher = RailFenceCipher()\n plaintext = cipher.decrypt(ciphertext, int(key))\n elif(str(algorithm) == \"row\"):\n cipher = RowTranspositionCipher(key)\n plaintext = cipher.decrypt(ciphertext)\n print(plaintext)\n return Response({\"plaintext\": plaintext}, status=status.HTTP_200_OK)\n","repo_name":"devkhedr/cipher-algorithms","sub_path":"backend/cipher_algorithms_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"19005370327","text":"import cv2\nimport keyboard\nimport imageio\nimport torch\nimport numpy as np\nimport random\nfrom transforms3d.axangles import axangle2mat\nimport pickle\nfrom PIL import Image\nimport torchvision\nimport time\nimport os,sys\n\nimport config\nimport constants\nfrom config import args\nfrom utils import save_obj\n\n \ndef get_video_bn(video_file_path):\n basename = os.path.basename(video_file_path)\n for ext in constants.video_exts:\n basename.replace(ext, '')\n return basename\n\ndef save_meshes(reorganize_idx, outputs, output_dir, smpl_faces):\n vids_org = np.unique(reorganize_idx)\n for idx, vid in enumerate(vids_org):\n verts_vids = np.where(reorganize_idx==vid)[0]\n img_path = outputs['meta_data']['imgpath'][verts_vids[0]]\n obj_name = os.path.join(output_dir, '{}'.format(os.path.basename(img_path))).replace('.mp4','').replace('.jpg','').replace('.png','')+'.obj'\n for subject_idx, batch_idx in enumerate(verts_vids):\n save_obj(outputs['verts'][batch_idx].detach().cpu().numpy().astype(np.float16), \\\n smpl_faces,obj_name.replace('.obj', '_{}.obj'.format(subject_idx)))\n\n\n\nclass OpenCVCapture:\n def __init__(self, video_file=None, show=False):\n if video_file is None:\n self.cap = cv2.VideoCapture(int(args().cam_id))\n else:\n self.cap = cv2.VideoCapture(video_file)\n self.length = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)\n self.whether_to_show=show\n\n\n def read(self, return_rgb=True):\n flag, frame = self.cap.read()\n if not flag:\n return None\n if self.whether_to_show:\n cv2.imshow('webcam',cv2.resize(frame, (240,320)))\n cv2.waitKey(1)\n if return_rgb:\n frame = np.flip(frame, -1).copy() # BGR to RGB\n return frame\n\nclass Image_Reader:\n def __init__(self, image_folder):\n self.image_folder = image_folder\n self.image_list = os.listdir(self.image_folder)\n self.current_num=0\n\n def read(self):\n frame = cv2.imread(os.path.join(self.image_folder,self.image_list[self.current_num]))\n self.current_num+=1\n if self.current_num==len(self.image_list):\n self.current_num=0\n return np.flip(frame, -1).copy() # BGR to RGB\n\n\nclass Time_counter():\n def __init__(self,thresh=0.1):\n self.thresh=thresh\n self.runtime = 0\n self.frame_num = 0\n\n def start(self):\n self.start_time = time.time()\n\n def count(self, frame_num=1):\n time_cost = time.time()-self.start_time\n if time_cost 1:\n self.ref_att = self.addAnimEnumParam(\"fkref\", \"Fk Ref\", 0, self.settings[\"fkrefarray\"].split(\",\"))\n\n if self.settings[\"ikrefarray\"]:\n ref_names = self.settings[\"ikrefarray\"].split(\",\")\n if len(ref_names) > 1:\n self.ikref_att = self.addAnimEnumParam(\"ikref\", \"Ik Ref\", 0, self.settings[\"ikrefarray\"].split(\",\"))\n\n if self.settings[\"upvrefarray\"]:\n ref_names = self.settings[\"upvrefarray\"].split(\",\")\n if len(ref_names) > 1:\n self.upvref_att = self.addAnimEnumParam(\"upvref\", \"UpV Ref\", 0, self.settings[\"upvrefarray\"].split(\",\"))\n\n # Setup ------------------------------------------\n # Eval Fcurve\n self.st_value = fcu.getFCurveValues(self.settings[\"st_profile\"], self.divisions)\n self.sq_value = fcu.getFCurveValues(self.settings[\"sq_profile\"], self.divisions)\n\n self.st_att = [ self.addSetupParam(\"stretch_%s\"%i, \"Stretch %s\"%i, \"double\", self.st_value[i], -1, 0) for i in range(self.divisions) ]\n self.sq_att = [ self.addSetupParam(\"squash_%s\"%i, \"Squash %s\"%i, \"double\", self.sq_value[i], 0, 1) for i in range(self.divisions) ]\n\n self.resample_att = self.addSetupParam(\"resample\", \"Resample\", \"bool\", True)\n self.absolute_att = self.addSetupParam(\"absolute\", \"Absolute\", \"bool\", False)\n\n def addOperators(self):\n\n # Visibilities -------------------------------------\n # fk\n fkvis_node = nod.createReverseNode(self.blend_att)\n\n for shp in self.fk0_ctl.getShapes():\n pm.connectAttr(fkvis_node+\".outputX\", shp.attr(\"visibility\"))\n for shp in self.fk0_roll_ctl.getShapes():\n pm.connectAttr(fkvis_node+\".outputX\", shp.attr(\"visibility\"))\n for shp in self.fk1_ctl.getShapes():\n pm.connectAttr(fkvis_node+\".outputX\", shp.attr(\"visibility\"))\n \n fkvis2_node = nod.createReverseNode(self.blend2_att)\n for shp in self.fk2_ctl.getShapes():\n pm.connectAttr(fkvis2_node+\".outputX\", shp.attr(\"visibility\"))\n\n # ik\n for shp in self.upv_ctl.getShapes():\n pm.connectAttr(self.blend_att, shp.attr(\"visibility\"))\n for shp in self.ikcns_ctl.getShapes():\n pm.connectAttr(self.blend_att, shp.attr(\"visibility\"))\n for shp in self.ik_ctl.getShapes():\n pm.connectAttr(self.blend_att, shp.attr(\"visibility\"))\n\n # Controls ROT order -----------------------------------\n att.setRotOrder(self.fk0_ctl, \"YZX\")\n att.setRotOrder(self.fk0_roll_ctl, \"YZX\")\n att.setRotOrder(self.fk1_ctl, \"XYZ\")\n att.setRotOrder(self.fk2_ctl, \"YZX\")\n # att.setRotOrder(self.ik_ctl, \"ZYX\")\n att.setRotOrder(self.ik_ctl, \"XYZ\")\n\n\n # IK Solver -----------------------------------------\n out = [self.bone0, self.bone1, self.ctrn_loc, self.eff_npo]\n \n #self.fk_ctl = [self.fk0_roll_ctl, self.fk1_ctl, self.fk2_mtx]\n node = aop.gear_ikfk2bone_op(out, self.root, self.ik_ref, self.upv_ctl, self.fk_ctl[0], self.fk_ctl[1], self.fk2_mtx, self.length0, self.length1, self.negate)\n\n pm.connectAttr(self.blend_att, node+\".blend\")\n pm.connectAttr(self.roll_att, node+\".roll\")\n pm.connectAttr(self.scale_att, node+\".scaleA\")\n pm.connectAttr(self.scale_att, node+\".scaleB\")\n pm.connectAttr(self.maxstretch_att, node+\".maxstretch\")\n pm.connectAttr(self.slide_att, node+\".slide\")\n pm.connectAttr(self.softness_att, node+\".softness\")\n pm.connectAttr(self.reverse_att, node+\".reverse\")\n\n # auto upvector -------------------------------------\n\n \n if self.negate:\n node = aop.aimCns(self.upv_auv, self.ik_ctl, axis=\"-xy\", wupType=4, wupVector=[0,1,0], wupObject=self.upv_auv, maintainOffset=False)\n else:\n node = aop.aimCns(self.upv_auv, self.ik_ctl, axis=\"xy\", wupType=4, wupVector=[0,1,0], wupObject=self.upv_auv, maintainOffset=False)\n pb_node = pm.createNode(\"pairBlend\")\n pb_node.attr(\"rotInterpolation\").set (1)\n\n pm.connectAttr(self.upv_auv.attr(\"rotate\"), pb_node+\".inRotate2\")\n pm.connectAttr(pb_node+\".outRotate\", self.upv_mtx.attr(\"rotate\"))\n pm.connectAttr(self.auv_att, pb_node+\".weight\")\n\n \n\n\n # fk2_npo position constraint to effector------------------------\n node = aop.gear_mulmatrix_op(self.eff_npo.attr(\"worldMatrix\"), self.fk2_npo.attr(\"parentInverseMatrix\"))\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(node+\".output\", dm_node+\".inputMatrix\")\n pm.connectAttr(dm_node+\".outputTranslate\", self.fk2_npo.attr(\"translate\"))\n # fk2_npo rotation constraint to bone1 (bugfixed) ------------------------\n node = aop.gear_mulmatrix_op(self.bone1.attr(\"worldMatrix\"), self.fk2_npo.attr(\"parentInverseMatrix\"))\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(node+\".output\", dm_node+\".inputMatrix\")\n pm.connectAttr(dm_node+\".outputRotate\", self.fk2_npo.attr(\"rotate\"))\n \n \n # hand ikfk blending from fk ref to ik ref (serious bugfix)--------------------------------\n node = aop.gear_mulmatrix_op(self.fk_ref.attr(\"worldMatrix\"), self.eff_loc.attr(\"parentInverseMatrix\"))\n dm_node = pm.createNode(\"decomposeMatrix\")\n pb_node = pm.createNode(\"pairBlend\")\n pb_node.attr(\"rotInterpolation\").set (1)\n pm.connectAttr(node+\".output\", dm_node+\".inputMatrix\")\n pm.connectAttr(dm_node+\".outputRotate\", pb_node+\".inRotate1\")\n pm.connectAttr(self.blend2_att, pb_node+\".weight\")\n pm.connectAttr(pb_node+\".outRotate\", self.eff_loc.attr(\"rotate\"))\n node = aop.gear_mulmatrix_op(self.ik_ref.attr(\"worldMatrix\"), self.eff_loc.attr(\"parentInverseMatrix\"))\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(node+\".output\", dm_node+\".inputMatrix\")\n pm.connectAttr(dm_node+\".outputRotate\", pb_node+\".inRotate2\")\n\n\n # Twist references ---------------------------------\n node = aop.gear_mulmatrix_op(self.mid_ctl.attr(\"worldMatrix\"), self.tws1_npo.attr(\"parentInverseMatrix\"))\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(node+\".output\", dm_node+\".inputMatrix\")\n pm.connectAttr(dm_node+\".outputTranslate\", self.tws1_npo.attr(\"translate\"))\n pm.connectAttr(dm_node+\".outputRotate\", self.tws1_npo.attr(\"rotate\"))\n pm.connectAttr(dm_node+\".outputScale\", self.tws1_npo.attr(\"scale\"))\n\n\n node = aop.gear_mulmatrix_op(self.eff_loc.attr(\"worldMatrix\"), self.tws2_npo.attr(\"parentInverseMatrix\"))\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(node+\".output\", dm_node+\".inputMatrix\")\n pm.connectAttr(dm_node+\".outputTranslate\", self.tws2_npo.attr(\"translate\"))\n pm.connectAttr(dm_node+\".outputRotate\", self.tws2_npo.attr(\"rotate\"))\n\n # orientConstraint(self.eff_loc, self.tws2_rot, maintainOffset=False)\n node = aop.gear_mulmatrix_op(self.eff_loc.attr(\"worldMatrix\"), self.tws2_rot.attr(\"parentInverseMatrix\"))\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(node+\".output\", dm_node+\".inputMatrix\")\n att.setRotOrder(self.tws2_rot, \"XYZ\")\n pm.connectAttr(dm_node+\".outputRotate\", self.tws2_rot+\".rotate\")\n\n self.tws0_rot.setAttr(\"sx\", .001)\n self.tws2_rot.setAttr(\"sx\", .001)\n\n add_node = nod.createAddNode(self.roundness_att, .001)\n pm.connectAttr(add_node+\".output\", self.tws1_rot.attr(\"sx\"))\n\n pm.connectAttr(self.armpit_roll_att, self.tws0_rot+\".rotateX\")\n\n #Roll Shoulder--use aimconstraint withour uovwctor to solve the stable twist\n \n if self.negate:\n node = aop.aimCns(self.tws0_loc, self.mid_ctl, axis=\"-xy\", wupType=4, wupVector=[0,1,0], wupObject=self.tws0_npo, maintainOffset=False)\n else:\n node = aop.aimCns(self.tws0_loc, self.mid_ctl, axis=\"xy\", wupType=4, wupVector=[0,1,0], wupObject=self.tws0_npo, maintainOffset=False)\n \n\n # Volume -------------------------------------------\n distA_node = nod.createDistNode(self.tws0_loc, self.tws1_loc)\n distB_node = nod.createDistNode(self.tws1_loc, self.tws2_loc)\n add_node = nod.createAddNode(distA_node+\".distance\", distB_node+\".distance\")\n div_node = nod.createDivNode(add_node+\".output\", self.root.attr(\"sx\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(self.root.attr(\"worldMatrix\"), dm_node+\".inputMatrix\")\n\n div_node2 = nod.createDivNode(div_node+\".outputX\", dm_node+\".outputScaleX\")\n self.volDriver_att = div_node2+\".outputX\"\n\n # Divisions ----------------------------------------\n # at 0 or 1 the division will follow exactly the rotation of the controler.. and we wont have this nice tangent + roll\n for i, div_cns in enumerate(self.div_cns):\n\n if i < (self.settings[\"div0\"]+1):\n perc = i*.5 / (self.settings[\"div0\"]+1.0)\n elif i < (self.settings[\"div0\"] + 2):\n perc = .49\n elif i < (self.settings[\"div0\"] + 3 ):\n perc = .50\n elif i < (self.settings[\"div0\"] + 4 ):\n perc = .51\n\n else:\n perc = .5 + (i-self.settings[\"div0\"]-3.0)*.5 / (self.settings[\"div1\"]+1.0)\n\n perc = max(.001, min(.990, perc))\n\n # Roll\n if self.negate:\n node = aop.gear_rollsplinekine_op(div_cns, [self.tws2_rot, self.tws1_rot, self.tws0_rot], 1-perc, 40)\n else:\n node = aop.gear_rollsplinekine_op(div_cns, [self.tws0_rot, self.tws1_rot, self.tws2_rot], perc, 40)\n\n pm.connectAttr(self.resample_att, node+\".resample\")\n pm.connectAttr(self.absolute_att, node+\".absolute\")\n\n # Squash n Stretch\n node = aop.gear_squashstretch2_op(div_cns, None, pm.getAttr(self.volDriver_att), \"x\")\n pm.connectAttr(self.volume_att, node+\".blend\")\n pm.connectAttr(self.volDriver_att, node+\".driver\")\n pm.connectAttr(self.st_att[i], node+\".stretch\")\n pm.connectAttr(self.sq_att[i], node+\".squash\")\n\n # match IK/FK ref\n pm.parentConstraint(self.bone0, self.match_fk0_off, mo=True)\n pm.parentConstraint(self.bone1, self.match_fk1_off, mo=True)\n\n return\n\n # =====================================================\n # CONNECTOR\n # =====================================================\n ## Set the relation beetween object from guide to rig.\\n\n # @param self\n # TODO: replace bone0 and control objects by loc connections\n def setRelation(self):\n self.relatives[\"root\"] = self.div_cns[0]\n self.relatives[\"elbow\"] = self.div_cns[self.settings[\"div0\"] + 2]\n self.relatives[\"wrist\"] = self.div_cns[-1]\n self.relatives[\"eff\"] = self.eff_loc\n\n self.jointRelatives[\"root\"] = 0\n self.jointRelatives[\"elbow\"] = self.settings[\"div0\"] + 2\n self.jointRelatives[\"wrist\"] = len(self.div_cns)-1\n self.jointRelatives[\"eff\"] = -1\n ## standard connection definition.\n # @param self\n def connect_standard(self):\n self.connect_standardWithIkRef()\n # fk isolation connection\n self.connect_standardWithRotRef(self.settings[\"fkrefarray\"], self.fk_cns)","repo_name":"chuckbruno/mgear","sub_path":"scripts/mgear/maya/shifter/component/arm_ms_2jnt_01/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":23331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"43230563444","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom project import Project\nfrom clause import Clause\n\nclass Verification(object):\n\n def __init__(self):\n pass\n \n def checkSpaces(self, project):\n project = Project()\n doc2Clause = project.getDocument2ClausesDict()\n windows = []\n orphans = []\n for documentName in doc2Clause:\n document = project.getDocument(documentName)\n for clauseId in doc2Clause[documentName]:\n clause = document.getClause(clauseId)\n clause = Clause()\n clauseType = clause.getType()\n parentsNeeded = {}\n childrenNeeded = {}\n possibleChildren = clauseType.getPossibleChildrenList()\n for child in possibleChildren:\n childrenNeeded[child] = clauseType.getChildMinCard(child)\n childLinks = clause.getChildClausesList()\n for childId in childLinks:\n child = clause.getChildLinkClause(childId)\n childTypeName = child.getType().getName()\n if child.getType().getName() in clauseType.getPossibleChildrenList():\n childrenNeeded[childTypeName] -= 1\n for need in childrenNeeded.keys():\n if childrenNeeded[need] > 0 :\n windows.append(clauseId, need)\n \n possibleParents = clauseType.getPossibleParentsList()\n for parent in possibleParents:\n parentsNeeded[parent] = clauseType.getParentMinCard(parent)\n for parent in possibleParents:\n parentsNeeded[parent] = clauseType.getParentMinCard(parent)\n parentLinks = clause.getParentClausesList()\n for parentId in parentLinks:\n parent = clause.getParentLinkClause(parentId)\n parentTypeName = parent.getType().getName()\n if parent.getType().getName() in clauseType.getPossibleParentsList():\n parentsNeeded[parentTypeName] -= 1\n for need in parentsNeeded.keys():\n if parentsNeeded[need] > 0 :\n windows.append(clauseId, need)\n return (windows, orphans)\n","repo_name":"fbarden/ProjectManager","sub_path":"verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5839291838","text":"from typing import Optional\nimport joblib\nfrom fastapi import FastAPI, Request\nfrom starlette.responses import Response\nfrom pydantic import BaseModel\nimport pandas as pd\nimport numpy as np\n\n\nclass Item(BaseModel):\n uuid: str \n account_amount_added_12_24m: Optional[int] = None \n account_days_in_dc_12_24m: Optional[float] = None \n account_days_in_rem_12_24m: Optional[float] = None \n account_days_in_term_12_24m: Optional[float] = None \n account_incoming_debt_vs_paid_0_24m: Optional[float] = None \n account_status: Optional[float] = None \n account_worst_status_0_3m: Optional[float] = None \n account_worst_status_12_24m: Optional[float] = None \n account_worst_status_3_6m: Optional[float] = None \n account_worst_status_6_12m: Optional[float] = None \n age: Optional[int] = None \n avg_payment_span_0_12m: Optional[float] = None \n avg_payment_span_0_3m: Optional[float] = None \n merchant_category: Optional[str] = None \n merchant_group: Optional[str] = None \n has_paid: Optional[bool] = None \n max_paid_inv_0_12m: Optional[float] = None \n max_paid_inv_0_24m: Optional[float] = None \n name_in_email: Optional[str] = None \n num_active_div_by_paid_inv_0_12m: Optional[float] = None \n num_active_inv: Optional[int] = None \n num_arch_dc_0_12m: Optional[int] = None \n num_arch_dc_12_24m: Optional[int] = None \n num_arch_ok_0_12m: Optional[int] = None \n num_arch_ok_12_24m: Optional[int] = None \n num_arch_rem_0_12m: Optional[int] = None \n num_arch_written_off_0_12m: Optional[float] = None \n num_arch_written_off_12_24m: Optional[float] = None \n num_unpaid_bills: Optional[int] = None \n status_last_archived_0_24m: Optional[int] = None \n status_2nd_last_archived_0_24m: Optional[int] = None \n status_3rd_last_archived_0_24m: Optional[int] = None \n status_max_archived_0_6_months: Optional[int] = None \n status_max_archived_0_12_months: Optional[int] = None \n status_max_archived_0_24_months: Optional[int] = None \n recovery_debt: Optional[int] = None \n sum_capital_paid_account_0_12m: Optional[int] = None \n sum_capital_paid_account_12_24m: Optional[int] = None \n sum_paid_inv_0_12m: Optional[int] = None \n time_hours: Optional[float] = None \n worst_status_active_inv: Optional[float] = None \n\n\napp = FastAPI()\n\nmodel = joblib.load('model_best.pkl')\n\n@app.get(\"/\")\ndef read_root():\n return {\"message\": \"Welcome from the API\"}\n\n\n@app.post(\"/prediction/\")\nasync def get_prediction(input_pred: Item):\n\tinput_dict = input_pred.dict()\n\tnp_ar = np.array(list(input_dict.values()))\n\tdf_pred = pd.DataFrame(np.transpose(np_ar.reshape(len(np_ar), 1)), columns=input_dict.keys())\n\tresult = model.predict_proba(df_pred)\n\treturn {\"uuid\": input_dict[\"uuid\"], \"pd\": result[0][1]}\n\n","repo_name":"pauagustin/test_aws_endpoint","sub_path":"fastapi/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6844264998","text":"import six\n\nfrom girder import events\nfrom girder.exceptions import ValidationException\nfrom girder.utility import setting_utilities\nfrom . import constants\nfrom .resource import ResourceExt\n\n\n@setting_utilities.validator(constants.PluginSettings.PROVENANCE_RESOURCES)\ndef validateProvenanceResources(doc):\n val = doc['value']\n\n if val:\n if not isinstance(val, six.string_types):\n raise ValidationException('Provenance Resources must be a string.', 'value')\n # accept comma or space separated lists\n resources = val.replace(',', ' ').strip().split()\n # reformat to a comma-separated list\n doc['value'] = ','.join(resources)\n\n\ndef load(info):\n ext = ResourceExt(info)\n events.bind('model.setting.save.after', 'provenanceMain', ext.bindModels)\n events.bind('provenance.initialize', 'provenanceMain', ext.bindModels)\n events.trigger('provenance.initialize', info={})\n events.bind('model.file.save', 'provenanceMain', ext.fileSaveHandler)\n events.bind('model.file.save.created', 'provenanceMain', ext.fileSaveCreatedHandler)\n events.bind('model.file.remove', 'provenance', ext.fileRemoveHandler)\n","repo_name":"ShenQianwithC/girder-pv","sub_path":"plugins/provenance/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38047818389","text":"Mydict = {\n \"Name\":\"Ten\",\n \"Age\" :[\"Tuoi\",\"Thoi ki\"],\n \"School\":\"Truong\",\n \"Class\":[\"Lop\", \"Phan loai\"],\n \"ID\" :\"Ma dinh danh\"\n }\nprint(Mydict)\nprint(len(Mydict))\nMydict[\"KTRB\"] = \"Ky thuat Robot\"\nprint(Mydict)\n\ndef Dict_SearchingKey(key, Dict):\n if key in Dict:\n print('{} from {}'.format(Dict[key], key))\n else:\n print('Can not find this word!!!')\n return 0;\n\nDict_SearchingKey(\"Age\", Mydict)\n\n\ndef Dict_SearchingValue(val,Dict):\n for key, value in Dict.items():\n if val == value:\n print(key)\n\nprint(\"Search 'Truong' : \\n\")\nDict_SearchingValue(\"Truong\", Mydict)\n\ndef Del_Key(key, Dict):\n if key in Dict:\n del Dict[key]\n print(Dict)\n else:\n print(\"Tu khong ton tai\")\n\nDel_Key(\"Name\", Mydict)\n\n","repo_name":"Hoang242225/PythonCourse","sub_path":"B4Tuples/Cau2.py","file_name":"Cau2.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31707273337","text":"from dcard import Api\nimport json\n\ndef export_json(filename : str, file : dict) -> None:\n with open(f'./data/{filename}.json', \"w\", encoding=\"utf8\") as outfile:\n json.dump(file, outfile, ensure_ascii=False)\n\ndef main():\n api = Api()\n\n popular_forums = api.get_popular_forums()\n sensity_forums = api.get_sensity_forums()\n\n api.close()\n\n # write data to the json file\n export_json(\"popular_forums\",\n {\"popular_forums\" : popular_forums})\n\n export_json(\"sensity_forums\",\n {\"sensity_forums\" : sensity_forums})\n\n print(\"Writed...\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Lin-jun-xiang/python-dcard","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32065134795","text":"import sys\nfrom collections import Counter\nclass Person(object):\n def __init__(self, name):\n self.name = name\n\n def get_details(self):\n \n return self.name\n def get_grade(self,score):\n\n grep = Counter(score)\n \n c = grep.most_common()\n totalpaas = 0\n totalfail = 0\n string = []\n for i in c:\n x,y = i\n string.append(x+\": \"+str(y))\n if x == \"D\":\n totalfail += y\n else:\n totalpaas +=y\n \n if \"student\" == self.name:\n\n print(\"Pass: {}, Fail: {}\".format(totalpaas,totalfail))\n else:\n print(\", \".join(string))\nperson1 = Person(sys.argv[1])\n\nperson1.get_grade(sys.argv[2])","repo_name":"creazy-code/evangelist","sub_path":"python/test/totalScore.py","file_name":"totalScore.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39221898628","text":"from flask import (\n Blueprint, flash, jsonify, redirect, render_template, request, send_from_directory, session, url_for\n)\nfrom flask_cors import cross_origin\nfrom downloader.linkedin_downloader import lk_downloader\nimport os\nimport socket\n\n\nbp = Blueprint('downloader', __name__)\n\ndef get_server_path():\n \"\"\"\n A function that gets the path of a server\n for locating a filename\n \"\"\"\n local_ip = socket.gethostbyname(socket.gethostname())\n # Check if the IP address in the URL matches the local IP address\n if '127.0.0.1' in local_ip or '127.0.1.1' in local_ip:\n path = os.path.expanduser(\"~\")+\"/Downloads/\"\n else:\n path = os.path.expanduser(\"~\")+\"/\"\n return path\n\n\n@bp.route('/', methods = ['GET', 'POST'])\n@cross_origin(origins=\"https://linkedinsave.xyz\")\ndef index():\n \"\"\"\n An index function that accepts user input as a url\n to a linkedin post and redirects to a success route\n \"\"\"\n path = get_server_path()\n # post request condition\n if request.method == 'POST' and request.headers.get('X-Requested-With'):\n # get user input \n linkedin_post = request.form['url']\n # call lk_downloader and pass in user input\n filename = lk_downloader(linkedin_post, path)\n return jsonify({'fileName': filename})\n # get request condition\n else:\n return render_template('index.html')\n \n@bp.route('/download/', methods = ['GET'])\n@cross_origin(origins=\"https://linkedinsave.xyz\")\ndef download(filename):\n \"\"\"\n An function that allows for video download\n \"\"\"\n path = get_server_path()\n print(path, filename)\n try:\n return send_from_directory(path, filename, as_attachment=True), os.remove((path+filename))\n except Exception as e:\n print(e)\n flash(\"Video file not found. Please copy and paste the link again.\", \"danger\")\n return redirect(url_for('index'))\n","repo_name":"izudada/linkedin_video_downloader","sub_path":"downloader/my_downloader.py","file_name":"my_downloader.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70241399502","text":"# This module contains the class that stores the environment specific information\n\nfrom utils.jsonParser import EnvironmentJsonParser\nfrom environments.environmentWrapper import EnvironmentWrapper\n\n# The classes for the kaggle environments\nfrom environments.kaggle.hungry_geese.hungryGeese import HungryGeese\nfrom environments.kaggle.rock_paper_scissor.rockPaperScissor import RockPaperScissor\nfrom environments.kaggle.halite.haliteTwoSigma import HaliteTwoSigma\nfrom environments.kaggle.connectX.connectX import ConnectX\nfrom environments.kaggle.santa_candy_cane.santaCandyCane import SantaCandyCane\nfrom environments.kaggle.gfootball.googleResearchFootball import GoogleResearchFootball\n\n\n#########################################################################################\n# Reference list of all the environments and their JSON files\n# NOTE: The path to JSON file must be relative to the root of project directory\n# For e.g.\n# environments/json/.json\n#\nENV_LIST = {\n HungryGeese: 'environments/json/hungry_geese.json',\n RockPaperScissor: 'environments/json/rock_paper_scissor.json',\n HaliteTwoSigma: 'environments/json/halite.json',\n ConnectX: 'environments/json/connect_x.json',\n SantaCandyCane: 'environments/json/santa_candy_cane.json',\n GoogleResearchFootball: 'environments/json/gfootball.json'\n}\n\n# This is a dummy environment used to display the information about the application\nDUMMY_ENVIRONMENT = None\nDUMMY_ENVIRONMENT_JSON = 'environments/json/_default_lab.json'\n\n#########################################################################################\n\n# List of supported environments (and their mappings to the references) till now\n# It's filled depending on the value of the supported flag in the environment's JSON file\nENV_SUPPORTED_LIST = []\nENV_MAP = {}\n\n\ndef build_dummy_environment():\n parser = EnvironmentJsonParser(DUMMY_ENVIRONMENT_JSON)\n try:\n parser.parse()\n except Exception:\n print(f'Unable to create dummy environment. Exiting.')\n exit(-1)\n\n global DUMMY_ENVIRONMENT\n DUMMY_ENVIRONMENT = EnvironmentWrapper(\n environment=None,\n title=parser.getTitle(),\n subtitle=parser.getSubtitle(),\n description=parser.getDescription(),\n link=parser.getLink(),\n min_agents=parser.getMinAgents(),\n max_agents=parser.getMaxAgents(),\n supported=False\n )\n\n\ndef registerEnvironments():\n \"\"\" Registers the given environment (overwrites if same title) in the supported environments \"\"\"\n\n build_dummy_environment()\n\n for env, json in ENV_LIST.items():\n parser = EnvironmentJsonParser(json)\n\n # Try parsing the JSON file -- Might throw errors\n # In such cases, simply skip the environment\n try:\n parser.parse()\n except Exception:\n print('Skipping the environment ... ')\n continue\n\n # Parsing successful -- Extract the contents\n title = parser.getTitle()\n subtitle = parser.getSubtitle()\n description = parser.getDescription()\n link = parser.getLink()\n supported = parser.isEnvironmentSupported()\n min_agents = parser.getMinAgents()\n max_agents = parser.getMaxAgents()\n\n # Add this environment to the supported list if it is supported.\n # Then go ahead and build the mapping between environment names and the wrapped environment\n if supported:\n ENV_SUPPORTED_LIST.append(title)\n\n ENV_MAP[title] = EnvironmentWrapper(environment=env,\n title=title,\n subtitle=subtitle,\n description=description,\n link=link,\n min_agents=min_agents,\n max_agents=max_agents,\n supported=supported)\n","repo_name":"0xd3ba/kaggle-simulations-lab","sub_path":"config/environmentConfig.py","file_name":"environmentConfig.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}