blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c259f5026a586e6ea50ad764940a3a142ae65202 | c7f4387733c95ced53dae485f36618a88f18ea45 | /Uri/1061.py | 3e14cd823da4b724d092a9f5fbb6458bae7fd7b6 | [] | no_license | douradodev/Uri | 25d7636b5d5553fafdbd61a38d7c465c4cb79c0c | e879ebca7a87de674d69d739617c4207156ce349 | refs/heads/main | 2023-06-03T18:53:11.749866 | 2021-06-22T12:40:11 | 2021-06-22T12:40:11 | 379,264,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | def main():
begin_day = input().split()
begin_time= input().split(' : ')
end_day = input().split()
end_time= input().split(' : ')
begin_time = int(begin_time[0]), int(begin_time[1]), int(begin_time[2])
end_time = int(end_time[0]), int(end_time[1]), int(end_time[2])
total_day = 0
total_time = [0,0,0]
total_day = int(end_day[1]) - int(begin_day[1])
if end_time[2] - begin_time[2] < 0:
total_time[2] = end_time[2] + 60 - begin_time[2]
dif_time = 1
else:
total_time[2] = end_time[2] - begin_time[2]
dif_time = 0
if (end_time[1] - dif_time) - begin_time[1] < 0:
total_time[1] = (end_time[1] - dif_time + 60) - begin_time[1]
dif_time = 1
else:
total_time[1] = (end_time[1] - dif_time) - begin_time[1]
dif_time = 0
if (end_time[0] - dif_time) - begin_time[0] < 0:
total_time[0] = (end_time[0] - dif_time + 24) - begin_time[0]
total_day -= 1
else:
total_time[0] = (end_time[0] - dif_time) - begin_time[0]
print('{} dia(s)\n{} hora(s)\n{} minuto(s)\n{} segundo(s)'.format(total_day, total_time[0], total_time[1], total_time[2]))
main() | [
"victorhenrique01000@gmail.com"
] | victorhenrique01000@gmail.com |
1522254803b17907540e7f62b7738bd022e97f1f | ce083128fa87ca86c65059893aa8882d088461f5 | /python/sistema-de-contatos/.venv/lib/python2.7/site-packages/toolz/__init__.py | 43226df7316aa0545101101540d51ff04f94c368 | [] | no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 314 | py | from .itertoolz import *
from .functoolz import *
from .dicttoolz import *
from .recipes import *
from .compatibility import map, filter
from . import sandbox
from functools import partial, reduce
sorted = sorted
# Aliases
comp = compose
functoolz._sigs.create_signature_registry()
__version__ = '0.8.0'
| [
"marcosptf@yahoo.com.br"
] | marcosptf@yahoo.com.br |
a299a1c5458db60b310cd56594e34e8b29f23903 | 6237fb1efc6f92581b81fb2739e33b6c67cd7ec1 | /plugin.video.sportsmania/default.py | f2e4c385b0815ce8470e4638b32e5db4c69ca5ff | [] | no_license | moga1061/My-Repo | d4900a99983ddb037e18b1df774f849d1357d9ec | 570a5bea5607ababac9c6b0440e68319230ecfdc | refs/heads/master | 2020-12-29T02:06:38.227694 | 2015-03-19T15:26:54 | 2015-03-19T15:26:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,982 | py | import xbmc, xbmcgui, xbmcaddon, xbmcplugin, urllib, re, string, os, time, json, urllib2, cookielib, md5, mknet
addon_id = 'plugin.video.sportsmania'
art = xbmc.translatePath(os.path.join('special://home/addons/' + addon_id + '/resources/art/'))
selfAddon = xbmcaddon.Addon(id=addon_id)
user = selfAddon.getSetting('snusername')
passw = selfAddon.getSetting('snpassword')
datapath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))
fanart = xbmc.translatePath(os.path.join('special://home/addons/' + addon_id , 'fanart.jpg'))
icon = xbmc.translatePath(os.path.join('special://home/addons/' + addon_id, 'icon.png'))
cookie_file = os.path.join(os.path.join(datapath,''), 'snhdcookie.lwp')
net = mknet.Net()
def setCookie(srDomain):
import hashlib
m = hashlib.md5()
m.update(passw)
net.http_POST('http://sportsmania.eu/login.php?do=login/?COLLCC=1',{'vb_login_username':user,'vb_login_password':passw,'vb_login_md5password':m.hexdigest(),'vb_login_md5password_utf':m.hexdigest(),'do':'login','securitytoken':'guest','url':'http://sportsmania.eu//view.php?pg=navigation','s':''})
net.save_cookies(cookie_file)
net.set_cookies(cookie_file)
if user == '' or passw == '':
dialog = xbmcgui.Dialog()
ret = dialog.yesno('Sports Mania', 'Please enter your account details','or register if you dont have an account','at http://sportsmania.eu','Cancel','Login')
if ret == 1:
keyb = xbmc.Keyboard('', 'Enter Username')
keyb.doModal()
if (keyb.isConfirmed()):
username = keyb.getText()
keyb = xbmc.Keyboard('', 'Enter Password:')
keyb.doModal()
if (keyb.isConfirmed()):
password = keyb.getText()
selfAddon.setSetting('snusername',username)
selfAddon.setSetting('snpassword',password)
def MainMenu():
setCookie('http://sportsmania.eu/view.php?pg=navigation')
net.set_cookies(cookie_file)
response = net.http_GET('http://sportsmania.eu/forum.php')
if '<li class="welcomelink">Welcome, <a href="member.php?' in response.content:
addDir('[COLOR cyan]----Calendar----[/COLOR]','url',3,icon,fanart)
addLink('','url','mode',icon,fanart)
addDir('[COLOR greenyellow]Free[/COLOR] Streams','channels',1,icon,fanart)
setCookie('http://sportsmania.eu/view.php?pg=navigation')
net.set_cookies(cookie_file)
response = net.http_GET('http://sportsmania.eu/view.php?pg=navigation')
if '>ACTIVE<'in response.content:
addDir('[COLOR red]Elite[/COLOR] Streams','channels',1,icon,fanart)
addDir('[COLOR red]Elite[/COLOR] VOD','vod_channels',1,icon,fanart)
addLink('','url','mode',icon,fanart)
addLink('Twitter Feed','url',5,icon,fanart)
addLink('Sports Mania Support','url',4,icon,fanart)
addLink('','url','mode',icon,fanart)
addLink('','url','mode',icon,fanart)
addLink('','url','mode',icon,fanart)
addLink('[COLOR blue][I]To Subscribe to Elite streams please visit http://sportsmania.eu/payments.php[/I][/COLOR]','url','mode',icon,fanart)
else:addLink('[COLOR greenyellow]Click here to login[/COLOR]','url',6,icon,fanart)
xbmc.executebuiltin('Container.SetViewMode(50)')
def refresh():
xbmc.executebuiltin('Container.Refresh')
def StreamMenu(name,url):
net.set_cookies(cookie_file)
channelurl='http://sportsmania.eu/apis/channels.php'
response = net.http_GET(channelurl)
link=json.loads(response.content)
data=link [url]
for field in data:
channel_name= field["channel_name"]
channel_title= field["channel_title"]
channel_online= field["channel_online"]
channel_url= field["channel_url"]
channel_description= field["channel_description"]
premium= field["premium"]
if channel_online == '1':channel_online = '[COLOR green][B] (Online)[/B][/COLOR]'
else:channel_online = '[COLOR red] (Offline)[/COLOR]'
if channel_title =='':
channel_title=channel_name.replace('Channel','Channel ')
namestring = channel_title+channel_online
print name
if 'Free' in name:
if premium == '0':
addLink(namestring,channel_url,2,icon,fanart)
else:addLink(namestring,channel_url,2,icon,fanart)
xbmc.executebuiltin('Container.SetViewMode(51)')
def PlayStream(url):
ok=True
liz=xbmcgui.ListItem(name, iconImage=icon,thumbnailImage=icon); liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
try:
xbmc.Player ().play(url, liz, False)
return ok
except:
pass
def schedule(url):
net.set_cookies(cookie_file)
response = net.http_GET('http://sportsmania.eu/calendar.php?c=1&do=displayweek')
link = response.content
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace(' ','')
month=re.findall('<h2 class="blockhead">([^<]+?)</h2>',link)
match=re.findall('<h3><span class=".+?">([^<]+?)</span><span class="daynum" style=".+?" onclick=".+?">(\d+)</span></h3><ul class="blockrow eventlist">(.+?)</ul>',link)
for day,num,data in match:
addLink('[COLOR greenyellow][B]'+day+' '+num+'[/B][/COLOR]','url','mode',icon,fanart)
match2=re.findall('<span class="eventtime">(.+?)</span><a href=".+?" title="">(.+?)</a>',data)
for time,title in match2:
title = title.replace('amp;','')
addLink('[COLOR gold]'+time+'[/COLOR] '+title,'url','mode',icon,fanart)
xbmc.executebuiltin('Container.SetViewMode(51)')
def suppop():
dialog = xbmcgui.Dialog()
dialog.ok('[COLOR greenyellow]Contact Us[/COLOR]', 'Via Our Support Section Link On The Streams Page ','Via Facebook - www.facebook.com/sportsnationhdtv','Via Twitter - @Sportsmania005')
def showText(heading, text):
id = 10147
xbmc.executebuiltin('ActivateWindow(%d)' % id)
xbmc.sleep(100)
win = xbmcgui.Window(id)
retry = 50
while (retry > 0):
try:
xbmc.sleep(10)
retry -= 1
win.getControl(1).setLabel(heading)
win.getControl(5).setText(text)
return
except:
pass
def twitter():
text = ''
twit = 'https://script.google.com/macros/s/AKfycbyBcUa5TlEQudk6Y_0o0ZubnmhGL_-b7Up8kQt11xgVwz3ErTo/exec?560774536678088704'
response = net.http_GET(twit)
link = response.content
link = link.replace('/n','')
link = link.encode('ascii', 'ignore').decode('ascii').decode('ascii').replace(''','\'').replace(' ','').replace('…','').replace('amp;','')
match=re.compile("<title>(.+?)</title>.+?<pubDate>(.+?)</pubDate>",re.DOTALL).findall(link)[1:]
for status, dte in match:
dte = dte[:-15]
dte = '[COLOR greenyellow][B]'+dte+'[/B][/COLOR]'
text = text+dte+'\n'+status+'\n'+'\n'
showText('[COLOR greenyellow][B]@sportsmania005[/B][/COLOR]', text)
quit()
def get_url(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def addDir(name,url,mode,iconimage,fanart,description=''):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&description="+str(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, 'plot': description } )
liz.setProperty('fanart_image', fanart)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addLink(name,url,mode,iconimage,fanart,description=''):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&description="+str(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, 'plot': description } )
liz.setProperty('fanart_image', fanart)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
return ok
def notification(title, message, ms, nart):
xbmc.executebuiltin("XBMC.notification(" + title + "," + message + "," + ms + "," + nart + ")")
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params(); url=None; name=None; mode=None; path=None; iconimage=None
try: name=urllib.unquote_plus(params["name"])
except: pass
try: url=urllib.unquote_plus(params["url"])
except: pass
try: mode=int(params["mode"])
except: pass
try:iconimage=urllib.unquote_plus(params["iconimage"])
except: pass
try: plot=urllib.unquote_plus(params["plot"])
except: pass
try: title=urllib.unquote_plus(params["title"])
except: pass
try: path=urllib.unquote_plus(params["path"])
except: pass
if mode==None or url==None or len(url)<1:MainMenu()
elif mode==1:StreamMenu(name,url)
elif mode==2:PlayStream(url)
elif mode==3:schedule(url)
elif mode==4:suppop()
elif mode==5:twitter()
elif mode==6:refresh()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| [
"tcz009@hotmail.co.uk"
] | tcz009@hotmail.co.uk |
02cd1a8335a5bb3b571f1771e9a15260965e9062 | 2bc3d01380d4b67b720972e81c2a6362f82ee654 | /server.py | 2312ae9dcb638d163cdf0b91be22e97420da6a57 | [] | no_license | ntoonio/Valnatt | 145e4fbc21f02bf579fed096e64e0ad4ba2fc5e7 | 8c1e1b5bc81a3e363199f8e0c1dfb3c40b308bf0 | refs/heads/master | 2020-03-11T14:28:06.843890 | 2018-09-04T11:44:45 | 2018-09-04T11:44:45 | 130,055,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | # -*- coding: utf-8 -*-
import getVotes
import getParties
import getRegions
import json
from flask import Flask, request, send_from_directory
app = Flask(__name__)
@app.route("/")
def indexRequest():
return "<h1>Hej!</h1>"
@app.route("/<path:reqPath>")
def jsRequest(reqPath):
if reqPath.endswith("/"):
reqPath += "index.html"
response = send_from_directory("pages/", reqPath)
return response
@app.route("/getVotes/")
def getVotesRequest():
electionType = request.args.get("election")
region = request.args.get("region")
response = app.response_class(
response= json.dumps(getVotes.getVotes(electionType, region), ensure_ascii=False),
mimetype="application/json"
)
return response
@app.route("/getParties/")
def getPartiesRequest():
electionType = request.args.get("election")
region = request.args.get("region")
response = app.response_class(
response= json.dumps(getParties.getParties(electionType, region), ensure_ascii=False),
mimetype="application/json"
)
return response
@app.route("/getRegions/")
def getRegionsRequest():
region = request.args.get("region")
rType = request.args.get("type")
if rType == "riksdagskrets":
regions = getRegions.getRiksdagskrets()
elif rType == "kommun":
regions = getRegions.getKommuner(region)
elif rType == "kommunKrets":
regions = getRegions.getKommunKretsar(region)
elif rType == "kommunDistrikt":
regions = getRegions.getKommunDistrikt(region)
else:
regions = {"error": "Not an allowed region type"}
response = app.response_class(
response= json.dumps(regions, ensure_ascii=False),
mimetype="application/json"
)
return response
if __name__ == "__main__":
app.run(debug=True) | [
"ntoonio@gmail.com"
] | ntoonio@gmail.com |
91e509fd52450a4ec57391030561780e171a2470 | 095ff3a7db143af55ab380274037533284cdde43 | /source_cloudsearch/lambda_function.py | 1882c48dbd01703c2a2541688b037129cbc4b910 | [] | no_license | ww2or3ww/near-near-map-function-search | 88e47c36b0f7b66c2ca27cc3efb494c89b42044f | 88cda0d4f8838869440b3d2891ac34ad698cabb4 | refs/heads/development | 2023-05-13T11:10:41.993164 | 2021-01-01T03:27:26 | 2021-01-01T03:27:26 | 261,432,873 | 0 | 0 | null | 2021-06-02T02:50:42 | 2020-05-05T10:49:54 | Python | UTF-8 | Python | false | false | 4,315 | py | import json
import os
import requests
from urllib.parse import urljoin
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
API_ADDRESS_CLOUDSEARCH = "" if("API_ADDRESS_CLOUDSEARCH" not in os.environ) else os.environ["API_ADDRESS_CLOUDSEARCH"]
def lambda_handler(event, context):
try:
logger.info("-----")
types = event["queryStringParameters"]["type"]
latlon = event["queryStringParameters"]["latlon"]
logger.info("type={0}, latlon={1}".format(types, latlon))
url = "{0}/2013-01-01/search?".format(API_ADDRESS_CLOUDSEARCH)
#urlPrm = "q={0}".format(types) + "&expr.distance=haversin({0},latlon.latitude,latlon.longitude)&return=distance,".format(latlon)
urlPrm = "q={0}&q.options={{fields:['type']}}".format(types) + "&expr.distance=haversin({0},latlon.latitude,latlon.longitude)&return=distance,".format(latlon)
urlField = "type,title,tel,address,latlon,image,candelivery,reservation,candrivethru,cantakeout,facebook,twitter,instagram,homepage,media,media1,media2,media3,media4,media5"
urlEtc = "&sort=distance asc &size=15"
url = url + urlPrm + urlField + urlEtc
logger.info(url)
response = requests.get(url)
response.encoding = response.apparent_encoding
content = response.content.decode("utf-8")
jsn = json.loads(content)
hits = jsn["hits"]["hit"]
result = []
for mark in hits:
tmp = {}
latlon = mark["fields"]["latlon"].split(",")
tmp["type"] = types
tmp["position"] = { "lat": float(latlon[0]), "lng": float(latlon[1]) }
tmp["title"] = mark["fields"]["title"]
tmp["tel"] = mark["fields"]["tel"]
tmp["address"] = mark["fields"]["address"]
if "image" in mark["fields"]:
tmp["image"] = urljoin("https://near-near-map.s3-ap-northeast-1.amazonaws.com/", mark["fields"]["image"])
if "facebook" in mark["fields"]:
tmp["facebook"] = mark["fields"]["facebook"]
if "twitter" in mark["fields"]:
tmp["twitter"] = mark["fields"]["twitter"]
if "instagram" in mark["fields"]:
tmp["instagram"] = mark["fields"]["instagram"]
if "homepage" in mark["fields"]:
tmp["homepage"] = mark["fields"]["homepage"]
if "reservation" in mark["fields"]:
tmp["reservation"] = int(mark["fields"]["reservation"])
if "candelivery" in mark["fields"]:
tmp["canDelivery"] = True if int(mark["fields"]["candelivery"]) == 1 else False
else:
tmp["canDelivery"] = False
if "cantakeout" in mark["fields"]:
tmp["canTakeout"] = True if int(mark["fields"]["cantakeout"]) == 1 else False
else:
tmp["canTakeout"] = False
if "candrivethru" in mark["fields"]:
tmp["canDriveThru"] = True if int(mark["fields"]["candrivethru"]) == 1 else False
else:
tmp["canDriveThru"] = False
if "media" in mark["fields"]:
tmp["media"] = mark["fields"]["media"]
if "media1" in mark["fields"]:
tmp["media1"] = mark["fields"]["media1"]
tmp["media"] = mark["fields"]["media1"]
if "media2" in mark["fields"]:
tmp["media2"] = mark["fields"]["media2"]
if "media3" in mark["fields"]:
tmp["media3"] = mark["fields"]["media3"]
if "media4" in mark["fields"]:
tmp["media4"] = mark["fields"]["media4"]
if "media5" in mark["fields"]:
tmp["media5"] = mark["fields"]["media5"]
result.append(tmp)
return {
"headers": {
"Access-Control-Allow-Origin" : "*",
"Access-Control-Allow-Credentials": "true"
},
'statusCode': 200,
"body": json.dumps(result, ensure_ascii=False, indent=2)
}
except Exception as e:
logger.exception(e)
return {
"statusCode": 500,
"body": "error"
}
| [
"w2or3w@gmail.com"
] | w2or3w@gmail.com |
d17fc95127131914a39bebb4baeb10f344ec6e06 | 3e6c10f13f3d70def2d67681475a3b99a354dd81 | /WebcamStream/resource.py | 686ce2958e2810717fb3256c89ed28de80fc54b9 | [] | no_license | wastedMynd/pythonProject | 11204ed93cc79f60f3f071015565ce49009835ae | 52be40d1b71708d4f24040a6bb17002d863cf2c6 | refs/heads/master | 2022-12-19T04:04:00.581103 | 2020-09-30T19:29:10 | 2020-09-30T19:29:10 | 287,287,748 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | def gen(camera):
while True:
frame = camera.get_frame()
yield (
b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' +
frame +
b'\r\n'
)
| [
"kin.afro@gmail.com"
] | kin.afro@gmail.com |
46c7ec22e04673371751fa9f59abf5a306202f3d | c2907af96097a003f7dfce6718e35299f8c62564 | /setup.py | 73648bd9aa15edc6531e739ea51738303a89cecf | [] | no_license | markcutajar/rllabs | d52fcc8104350e3b5431f6feeb78fef850a8f9e9 | bf5676054c3ac5f2d56387d84987989824e28512 | refs/heads/master | 2023-03-16T07:01:34.782183 | 2021-02-28T21:46:05 | 2021-02-28T21:46:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from setuptools import setup, find_packages
setup(
name='config',
packages=find_packages(),
) | [
"mcut94@gmail.com"
] | mcut94@gmail.com |
680806512eab14a55d2e056a2269f2faa910c457 | f9c8af20349f2fe4a73be4d038826caff87e0ab1 | /Problem Solving/Basic/staircase.py | 0559c2cbfb0eaf8579ceebaa0a06a9603c427ce8 | [] | no_license | danylo-boiko/HackerRank | 0ea14716328ac37377716df7c2fa997805d3f9bf | 1a5bb3462c59e9d8f4d675838a32c55e593f4b8a | refs/heads/main | 2023-08-14T12:36:18.179156 | 2021-10-05T23:13:29 | 2021-10-05T23:13:29 | 400,652,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # https://www.hackerrank.com/challenges/staircase/problem
# !/bin/python3
def staircase(n):
for i in range(1, n + 1):
s = '#' * i
print(s.rjust(n))
if __name__ == '__main__':
n = int(input().strip())
staircase(n)
| [
"danielboyko02@gmail.com"
] | danielboyko02@gmail.com |
811034c0c35cd0fccb11bcb53b93c1b425c7d22f | 928f919a3f138917f3dd6ff3696cd496d86ba4c2 | /message-thrift-py-service/message/api/MessageService.py | f73e9bab97c68bdd45137370c494e9cb1c8f30f3 | [] | no_license | kongdou/micro-service-project | 0b3552d8c4a1d44d5b31d49de13ac86308b71700 | a81c18ddb5931951974b92e0c55ffc2ebfaf47c4 | refs/heads/master | 2020-04-03T08:49:32.595519 | 2018-11-01T15:11:56 | 2018-11-01T15:11:56 | 155,145,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 14,758 | py | #
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
class Iface(object):
def sendMobileMessage(self, mobile, message):
"""
Parameters:
- mobile
- message
"""
pass
def sendMailMessage(self, email, message):
"""
Parameters:
- email
- message
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def sendMobileMessage(self, mobile, message):
"""
Parameters:
- mobile
- message
"""
self.send_sendMobileMessage(mobile, message)
return self.recv_sendMobileMessage()
def send_sendMobileMessage(self, mobile, message):
self._oprot.writeMessageBegin('sendMobileMessage', TMessageType.CALL, self._seqid)
args = sendMobileMessage_args()
args.mobile = mobile
args.message = message
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_sendMobileMessage(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = sendMobileMessage_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "sendMobileMessage failed: unknown result")
def sendMailMessage(self, email, message):
"""
Parameters:
- email
- message
"""
self.send_sendMailMessage(email, message)
return self.recv_sendMailMessage()
def send_sendMailMessage(self, email, message):
self._oprot.writeMessageBegin('sendMailMessage', TMessageType.CALL, self._seqid)
args = sendMailMessage_args()
args.email = email
args.message = message
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_sendMailMessage(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = sendMailMessage_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "sendMailMessage failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["sendMobileMessage"] = Processor.process_sendMobileMessage
self._processMap["sendMailMessage"] = Processor.process_sendMailMessage
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_sendMobileMessage(self, seqid, iprot, oprot):
args = sendMobileMessage_args()
args.read(iprot)
iprot.readMessageEnd()
result = sendMobileMessage_result()
try:
result.success = self._handler.sendMobileMessage(args.mobile, args.message)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("sendMobileMessage", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_sendMailMessage(self, seqid, iprot, oprot):
args = sendMailMessage_args()
args.read(iprot)
iprot.readMessageEnd()
result = sendMailMessage_result()
try:
result.success = self._handler.sendMailMessage(args.email, args.message)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("sendMailMessage", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class sendMobileMessage_args(object):
"""
Attributes:
- mobile
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'mobile', 'UTF8', None, ), # 1
(2, TType.STRING, 'message', 'UTF8', None, ), # 2
)
def __init__(self, mobile=None, message=None,):
self.mobile = mobile
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mobile = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('sendMobileMessage_args')
if self.mobile is not None:
oprot.writeFieldBegin('mobile', TType.STRING, 1)
oprot.writeString(self.mobile.encode('utf-8') if sys.version_info[0] == 2 else self.mobile)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class sendMobileMessage_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('sendMobileMessage_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class sendMailMessage_args(object):
"""
Attributes:
- email
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'email', 'UTF8', None, ), # 1
(2, TType.STRING, 'message', 'UTF8', None, ), # 2
)
def __init__(self, email=None, message=None,):
self.email = email
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.email = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('sendMailMessage_args')
if self.email is not None:
oprot.writeFieldBegin('email', TType.STRING, 1)
oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class sendMailMessage_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('sendMailMessage_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| [
"zhao.xiao.jie@outlook.com"
] | zhao.xiao.jie@outlook.com |
517f21fac637648cd6e23eefffb68fd925e0fc0c | 3bb1aad442fa6202bfb44977ef1aab8c81790cdc | /CNN.py | 068467537ae6e17aab2300eb89d1775865078e80 | [] | no_license | peymanbey/faceKeyPointDetection | 5bca6caf04f98aaca6f99f3a675d20a88906c86d | 1594c296cc83241edcbcba0ffbd4ea9ae45c9a8f | refs/heads/master | 2021-01-19T02:54:48.578222 | 2016-12-14T20:41:29 | 2016-12-14T20:41:29 | 52,149,913 | 2 | 5 | null | null | null | null | UTF-8 | Python | false | false | 8,758 | py | from utils import data_set, shared_dataset, build_update_functions, early_stop_train
import numpy as np
from lasagne.layers import InputLayer, DenseLayer, NonlinearityLayer, count_params
from lasagne.layers import DropoutLayer, get_all_layers, batch_norm, ElemwiseSumLayer
from lasagne.layers import Pool2DLayer as PoolLayer
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.nonlinearities import identity, rectify
import theano.tensor as T
import cPickle as pickle
def single_conv_layer(input_layer, **kwargs):
complex_layer = ConvLayer(incoming=input_layer,**kwargs)
# complex_layer = PoolLayer(complex_layer, pool_size=2, stride=2, mode='average_exc_pad')
return complex_layer
def build_model_vanila_CNN(X, channel = 1,stride=1):
# TODO: set according to daniels guide
conv1filters = 64
conv2filters = 64
conv3filters = 128
conv4filters = 256
net = {}
non_linear_function = rectify
net['input'] = InputLayer((None, channel, 96, 96), input_var=X)
net['conv1'] = single_conv_layer(net['input'],
num_filters=conv1filters,
filter_size=3,
stride=stride,
pad=1,
nonlinearity=non_linear_function,
flip_filters=False)
net['conv2'] = single_conv_layer(net['conv1'],
num_filters=conv2filters,
filter_size=2,
stride=stride,
pad=1,
nonlinearity=non_linear_function,
flip_filters=False)
net['conv2'] = PoolLayer(net['conv2'], pool_size=2, stride=2, mode='average_exc_pad')
net['conv3'] = single_conv_layer(net['conv2'],
num_filters=conv3filters,
filter_size=2,
stride=stride,
pad=1,
nonlinearity=non_linear_function,
flip_filters=False)
#
net['conv4'] = single_conv_layer(net['conv3'],
num_filters=conv4filters,
filter_size=3,
stride=stride,
pad=1,
nonlinearity=non_linear_function,
flip_filters=False)
net['conv4'] = PoolLayer(net['conv4'], pool_size=2, stride=2, mode='average_exc_pad')
# net['fc5'] = DenseLayer(net['conv4'], num_units=512, nonlinearity=non_linear_function)
net['fc5'] = ConvLayer(incoming=net['conv4'],
num_filters=500,
filter_size=1,
stride=1,
pad=0,
nonlinearity=non_linear_function,
flip_filters=False)
# net['fc5'] = DropoutLayer(net['fc5'], p=0.5)
# net['fc5'] = ConvLayer(incoming=net['fc5'],
# num_filters=500,
# filter_size=1,
# stride=1,
# pad=0,
# nonlinearity=non_linear_function,
# flip_filters=False)
# net['fc5'] = DropoutLayer(net['fc5'], p=0.3)
net['fc6'] = DenseLayer(net['fc5'], num_units=30, nonlinearity=identity)
net['prob'] = NonlinearityLayer(net['fc6'], nonlinearity=identity)
return net
def build_CNN_nopool(in_shape,
num_filter,
fil_size,
strides,
num_out,
nlin_func=rectify,
in_var=None):
# build a CNN
net = InputLayer(input_var=in_var,
shape=in_shape)
for i in xrange(len(fil_size)):
net = batch_norm(ConvLayer(net,
num_filters=num_filter[i],
filter_size=fil_size[i],
stride=strides[i],
pad=1,
nonlinearity=nlin_func,
flip_filters=False))
net = DenseLayer(incoming=net,
num_units=num_out,
nonlinearity=identity)
return net
#def resnet_base(net,
# n_f,
# f_size,
# strides,
# num_out,
# nlin_func=rectify,
# in_var=None):
#
# temp = ConvLayer(net, num_filters=n_f, filter_size=3, stride=1, pad=1, nonlinearity=identity, flip_filters=False )
# temp = ConvLayer(temp, num_filters=n_f, filter_size=1, stride=1, pad=0, nonlinearity=identity, flip_filters=False )
#
#
# return net
if __name__ == "__main__":
# path to train and testing data
PATH_train = "../data/training.csv"
PATH_test = "../data/test.csv"
# load data
print 'loading data \n'
data = data_set(path_train=PATH_train, path_test=PATH_test)
print 'sobel stacking image'
data.stack_origi_sobel()
# augmentation
# data.augment()
# center data
# print 'center alexnet \n'
# data.center_alexnet()
# print 'center Xs VGG Style, X doesnt have missing values \n'
# data.center_VGG()
# generate test validation split
data.split_trainval()
train_set_x = data.X
valid_set_x = data.X_val
train_set_y = data.y
valid_set_y = data.y_val
n_ch = train_set_x.shape[1]
print 'shape of train X', train_set_x.shape, 'and y', train_set_y.shape,'\n'
print 'shape of validation X', valid_set_x.shape, 'and y', valid_set_y.shape, '\n'
# build the mask matrix for missing values, load it into theano shared variable
# build masks where 0 values correspond to nan values
temp = np.isnan(train_set_y)
train_MASK = np.ones(temp.shape)
train_MASK[temp] = 0
# still have to replace nan with something to avoid propagation in theano
train_set_y[temp] = -1000
temp = np.isnan(valid_set_y)
val_MASK = np.ones(temp.shape)
val_MASK[temp] = 0
# still have to replace nan with something to avoid propagation in theano
valid_set_y[temp] = -1000
# load into theano shared variable
print 'load data to gpu \n'
train_set_x, train_set_y = shared_dataset(train_set_x, train_set_y)
valid_set_x, valid_set_y = shared_dataset(valid_set_x, valid_set_y)
val_MASK, train_MASK = shared_dataset(val_MASK, train_MASK)
X = T.ftensor4('X')
y = T.matrix('y')
batch_size = 32
l2 = .0002
learn_rate = 1e-3
#####################################################
# # Continue a previous run
# with open("results_backup.p", "rb") as f:
# best_network_params, best_val_loss_, best_epoch_,train_loss_history_, val_loss_history_, network = pickle.load(f)
# # extract input var
# print 'extract input var \n'
# X = get_all_layers(network)[0].input_var
#####################################################
# # VGG run
# net = build_model_vanila_CNN(X=X, channel= n_ch, stride=1 )
# network = net['prob']
#####################################################
# FULLCCN run
network = build_CNN_nopool(in_shape = (None, n_ch,96,96),
num_filter = [64,64,128,128,128,128],
fil_size = [ 3, 1, 3, 3, 3, 12],
strides = [ 1, 1, 2, 2, 2, 1],
num_out = 30,
nlin_func=rectify,
in_var=X)
print "num_params", count_params(network)
#####################################################
train_fn, val_fn = build_update_functions(train_set_x=train_set_x, train_set_y=train_set_y,
valid_set_x=valid_set_x,valid_set_y= valid_set_y,
y= y,X= X,network=network,
val_MASK=val_MASK, train_MASK=train_MASK,
learning_rate=learn_rate,batch_size=batch_size,l2_reg=l2)
print 'compile done successfully \n'
# call early_stop_train function
early_stop_train(train_set_x, train_set_y,
valid_set_x, valid_set_y,
network, train_fn, val_fn,
batch_size=batch_size) | [
"p.beyranvand@gmail.com"
] | p.beyranvand@gmail.com |
3056ee16f7416b3552aad2023ab5e45bf0fe7280 | c81fe584dca70292c44367194c662487c890ff7f | /alibi/api/defaults.py | c6e49ce5b9a674fe9cfcf4f3e04201bd15e5d698 | [
"Apache-2.0"
] | permissive | alexcombessie/alibi | f3d7ea022ed4a013c5f1f7accc705b8749a3633c | 6fa5c3968e387259c8d5e781e509fbcc09c90a79 | refs/heads/master | 2023-04-20T04:01:09.620927 | 2021-05-14T09:35:29 | 2021-05-14T09:35:29 | 366,784,442 | 0 | 0 | Apache-2.0 | 2021-05-12T16:41:13 | 2021-05-12T16:41:12 | null | UTF-8 | Python | false | false | 4,980 | py | """
This module defines the default metadata and data dictionaries for each explanation method.
Note that the "name" field is automatically populated upon initialization of the corresponding
Explainer class.
"""
# Anchors
DEFAULT_META_ANCHOR = {"name": None,
"type": ["blackbox"],
"explanations": ["local"],
"params": {}}
"""
Default anchor metadata.
"""
DEFAULT_DATA_ANCHOR = {"anchor": [],
"precision": None,
"coverage": None,
"raw": None} # type: dict
"""
Default anchor data.
"""
DEFAULT_DATA_ANCHOR_IMG = {"anchor": [],
"segments": None,
"precision": None,
"coverage": None,
"raw": None} # type: dict
"""
Default anchor image data.
"""
# CEM
DEFAULT_META_CEM = {"name": None,
"type": ["blackbox", "tensorflow", "keras"],
"explanations": ["local"],
"params": {}}
"""
Default CEM metadata.
"""
DEFAULT_DATA_CEM = {"PN": None,
"PP": None,
"PN_pred": None,
"PP_pred": None,
"grads_graph": None,
"grads_num": None,
"X": None,
"X_pred": None
} # type: dict
"""
Default CEM data.
"""
# Counterfactuals
DEFAULT_META_CF = {"name": None,
"type": ["blackbox", "tensorflow", "keras"],
"explanations": ["local"],
"params": {}}
"""
Default counterfactual metadata.
"""
DEFAULT_DATA_CF = {"cf": None,
"all": [],
"orig_class": None,
"orig_proba": None,
"success": None} # type: dict
"""
Default counterfactual data.
"""
# CFProto
DEFAULT_META_CFP = {"name": None,
"type": ["blackbox", "tensorflow", "keras"],
"explanations": ["local"],
"params": {}}
"""
Default counterfactual prototype metadata.
"""
DEFAULT_DATA_CFP = {"cf": None,
"all": [],
"orig_class": None,
"orig_proba": None,
"id_proto": None
} # type: dict
"""
Default counterfactual prototype metadata.
"""
# KernelSHAP
KERNEL_SHAP_PARAMS = [
'link',
'group_names',
'grouped',
'groups',
'weights',
'summarise_background',
'summarise_result',
'transpose',
'kwargs',
]
"""
KernelShap parameters updated and return in metadata['params'].
"""
DEFAULT_META_KERNEL_SHAP = {
"name": None,
"type": ["blackbox"],
"task": None,
"explanations": ["local", "global"],
"params": dict.fromkeys(KERNEL_SHAP_PARAMS)
} # type: dict
"""
Default KernelShap metadata.
"""
DEFAULT_DATA_KERNEL_SHAP = {
"shap_values": [],
"expected_value": [],
"categorical_names": {},
"feature_names": [],
"raw": {
"raw_prediction": None,
"prediction": None,
"instances": None,
"importances": {},
}
} # type: dict
"""
Default KernelShap data.
"""
# ALE
DEFAULT_META_ALE = {
"name": None,
"type": ["blackbox"],
"explanations": ["global"],
"params": {}
} # type: dict
"""
Default ALE metadata.
"""
DEFAULT_DATA_ALE = {
"ale_values": [],
"constant_value": None,
"ale0": [],
"feature_values": [],
"feature_names": None,
"target_names": None,
"feature_deciles": None
} # type: dict
"""
Default ALE data.
"""
# TreeShap
TREE_SHAP_PARAMS = [
'model_output',
'summarise_background',
'summarise_result',
'approximate',
'interactions',
'explain_loss',
'algorithm',
'kwargs'
]
"""
TreeShap parameters updated and return in metadata['params'].
"""
DEFAULT_META_TREE_SHAP = {
"name": None,
"type": ["whitebox"],
"task": None, # updates with 'classification' or 'regression'
"explanations": ["local", "global"],
"params": dict.fromkeys(TREE_SHAP_PARAMS)
} # type: dict
"""
Default TreeShap metadata.
"""
DEFAULT_DATA_TREE_SHAP = {
"shap_values": [],
"shap_interaction_values": [],
"expected_value": [],
"categorical_names": {},
"feature_names": [],
"raw": {
"raw_prediction": None,
"loss": None,
"prediction": None,
"instances": None,
"labels": None,
"importances": {},
}
} # type: dict
"""
Default TreeShap data.
"""
# Integrated gradients
DEFAULT_META_INTGRAD = {
"name": None,
"type": ["whitebox"],
"explanations": ["local"],
"params": {}
} # type: dict
"""
Default IntegratedGradients metadata.
"""
DEFAULT_DATA_INTGRAD = {
"attributions": None,
"X": None,
"baselines": None,
"predictions": None,
"deltas": None
} # type: dict
"""
Default IntegratedGradients data.
"""
| [
"noreply@github.com"
] | noreply@github.com |
88890e99af2180a2a755d3c8e097e85249847f22 | 782331de990cedc6b1e616cbcd53e65e635449ee | /code/gui_me_code.py | d474a49c73c7042e9940416c9c22b0b709aa0244 | [
"MIT"
] | permissive | masonrogers14/speedbot | 7462b62b05a621d35fada68106931ffdd5bc181a | f71f4b4dbdb60b68c111e9c9bb935c0e7b6644c6 | refs/heads/master | 2020-12-14T04:57:40.908610 | 2020-01-20T16:32:55 | 2020-01-20T16:32:55 | 234,648,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 2 10:17:50 2019
@author: Mason
"""
from PyQt5 import QtCore, QtWidgets
import gui_code as gc
import jump_classes as jc
uiME = None; MainWindow = None; ui = None; events = None
def setup_me(tempUIME, tempMainWindow, tempUI, tempEvents):
#Initialize global variables
global uiME, MainWindow, ui, events
uiME=tempUIME; MainWindow=tempMainWindow; ui=tempUI; events=tempEvents
#Prepare Pushbutton handling
uiME.pushButton.clicked.connect(reorder_events)
uiME.pushButton_2.clicked.connect(delete_event)
#Initialize event list widget
_translate = QtCore.QCoreApplication.translate
for name in events:
item = QtWidgets.QListWidgetItem()
item.setText(_translate("Dialog", name))
uiME.listWidget.addItem(item)
if isinstance(events[name], jc.Rest) or isinstance(events[name], jc.Freestyle):
item.setFlags(QtCore.Qt.ItemFlags(4))
#Delete event from directory
def delete_event():
name = uiME.listWidget.currentItem().text()
for i, eventName in enumerate(events):
if eventName == name:
del events[eventName]
uiME.listWidget.takeItem(i)
break
gc.update_libr()
#Reorder event list
def reorder_events():
while uiME.listWidget.item(0):
name = uiME.listWidget.takeItem(0).text()
event = events.pop(name)
events[name] = event
gc.update_libr()
MainWindow.ME.close()
| [
"Mason@dhcp-18-20-190-187.dyn.mit.edu"
] | Mason@dhcp-18-20-190-187.dyn.mit.edu |
9bf5fa02f6351b3f46596cb21455d8c9b6cfaf90 | 36e369fa4f09955a76796a46d5364502e3445266 | /server/server/__main__.py | 460050f492a672ab9c99e6d8d738e37bb7169748 | [] | no_license | darrenswhite/plagiarism-detection-plugin | 480b56b2c04c1d26aa3ab5913ce12a3abf3de3b2 | 1398f44c9cac0fe553a9e7e7e5e0be3ebbdb1ed0 | refs/heads/master | 2021-03-27T10:29:46.258155 | 2020-07-24T14:52:51 | 2020-07-24T14:52:51 | 120,503,426 | 0 | 0 | null | 2020-07-24T14:53:20 | 2018-02-06T18:20:58 | Python | UTF-8 | Python | false | false | 377 | py | # This file is used for executing the module when using "python -m server"
import sys
if __package__ is None and not hasattr(sys, 'frozen'):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
from server import main
if __name__ == '__main__':
main()
| [
"darrenswhite95@gmail.com"
] | darrenswhite95@gmail.com |
757ad5797b4182e0b1dc39f8fd424e66c7e6df6b | 23307f8e889f232724756bb26b1def1f0ba3323b | /fairseq/tasks/speech_to_text.py | 9388047a5e92e1c66236022de664b0480b9862be | [] | no_license | krisjeong/fairseq_data | 9395cb574d91147c95b6f08eecd814e4cb2fdad8 | f29e7dae3c2be3a908e795bfc952cc845b80280d | refs/heads/master | 2023-07-12T22:21:22.349970 | 2021-08-18T06:20:11 | 2021-08-18T06:20:11 | 397,152,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,214 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os.path as op
from argparse import Namespace
from fairseq.data import Dictionary, encoders
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
SpeechToTextDataset,
SpeechToTextDatasetCreator,
)
from fairseq.tasks import FairseqTask, register_task
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
@register_task("speech_to_text")
class SpeechToTextTask(FairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=6000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
dict_path = op.join(args.data, data_cfg.vocab_filename)
if not op.isfile(dict_path):
raise FileNotFoundError(f"Dict not found: {dict_path}")
tgt_dict = Dictionary.load(dict_path)
logger.info(
f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}"
)
if getattr(args, "train_subset", None) is not None:
if not all(s.startswith("train") for s in args.train_subset.split(",")):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1:
raise ValueError(
'Please set "--ignore-prefix-size 1" since '
"target language ID token is prepended as BOS."
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return None
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
return super(SpeechToTextTask, self).build_model(args)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1:
raise ValueError(
'Please set "--prefix-size 1" since '
"target language ID token is prepended as BOS."
)
lang_token_ids = {
i
for s, i in self.tgt_dict.indices.items()
if SpeechToTextDataset.is_lang_tag(s)
}
extra_gen_cls_kwargs = {"symbols_to_strip_from_output": lang_token_ids}
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_tokenizer(self, args):
logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
@classmethod
def build_dataset_for_inference(cls, audio_paths, n_frames):
return SpeechToTextDataset("interactive", False, {}, audio_paths, n_frames)
| [
"krisjeong00@gmail.com"
] | krisjeong00@gmail.com |
d932b6575f84114548c86880689a28bd8eb32410 | 026051fd279d7d9fe5852ab060933ff75201f702 | /Web_Development/django_project/manage.py | 3b86e4c0b0548e52b13dee56fe55108495228681 | [] | no_license | cutnada/Python-Notes | 2a211f3c15c1c83c38fb7c1f549741a3726a1ea9 | 37445db1d871911ec0045a1cebce4982e657b574 | refs/heads/master | 2022-09-23T07:38:36.322331 | 2019-07-19T12:08:43 | 2019-07-19T12:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'd_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
fa350fdb3e72dd4791fd8ec26ddfb37adacabbf3 | c84a3895e6fdcaff5a9f97abe9c3efbecbad535f | /trader/connector/bitmex/trader.py | 8a25600d285d7b0035c8a73b3cceaf9e557e151c | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | cal97g/siis | 5a171eb34dd3f7ae6e19d8065ff1e2f8b6251319 | adc06e48e5df6ffd7bed6ee6b79d0aa3cfe80e0d | refs/heads/master | 2020-07-23T18:11:57.267225 | 2019-09-05T01:00:37 | 2019-09-05T01:00:37 | 207,663,001 | 0 | 1 | null | 2019-09-10T21:05:25 | 2019-09-10T21:05:25 | null | UTF-8 | Python | false | false | 22,885 | py | # @date 2018-08-21
# @author Frederic SCHERMA
# @license Copyright (c) 2018 Dream Overflow
# Trader/autotrader connector for bitmex.com
import time
import base64
import uuid
import copy
import requests
from datetime import datetime
from notifier.notifiable import Notifiable
from notifier.signal import Signal
from trader.trader import Trader
from trader.market import Market
from .account import BitMexAccount
from trader.position import Position
from trader.order import Order
from connector.bitmex.connector import Connector
from config import config
import logging
logger = logging.getLogger('siis.trader.bitmex')
class BitMexTrader(Trader):
"""
BitMex real or testnet trader based on the BitMexWatcher.
@todo verify than on_order_updated is working without the temporary fixture now it has signal from watchers
"""
REST_OR_WS = False # True if REST API sync else do with the state returned by WS events
def __init__(self, service):
super().__init__("bitmex.com", service)
self._watcher = None
self._account = BitMexAccount(self)
self._last_position_update = 0
self._last_order_update = 0
def connect(self):
super().connect()
# retrieve the ig.com watcher and take its connector
self.lock()
self._watcher = self.service.watcher_service.watcher(self._name)
if self._watcher:
self.service.watcher_service.add_listener(self)
self.unlock()
if self._watcher and self._watcher.connected:
self.on_watcher_connected(self._watcher.name)
def disconnect(self):
super().disconnect()
self.lock()
if self._watcher:
self.service.watcher_service.remove_listener(self)
self._watcher = None
self.unlock()
def on_watcher_connected(self, watcher_name):
super().on_watcher_connected(watcher_name)
# markets, orders and positions
self.lock()
# fetch tradable markets
if '*' in self.configured_symbols():
# all symbols from the watcher
symbols = self._watcher.instruments
else:
# only configured symbols
symbols = self.configured_symbols()
for symbol in symbols:
self.market(symbol, True)
self.__fetch_orders()
self.__fetch_positions()
self.unlock()
# initial account update
self.account.update(self._watcher.connector)
def on_watcher_disconnected(self, watcher_name):
super().on_watcher_disconnected(watcher_name)
def market(self, market_id, force=False):
"""
Fetch from the watcher and cache it. It rarely changes so assume it once per connection.
@param force Force to update the cache
"""
market = self._markets.get(market_id)
if (market is None or force) and self._watcher is not None:
try:
market = self._watcher.fetch_market(market_id)
self._markets[market_id] = market
except Exception as e:
logger.error("fetch_market: %s" % repr(e))
return None
return market
@property
def authenticated(self):
return self.connected and self._watcher.connector.authenticated
@property
def connected(self):
return self._watcher is not None and self._watcher.connector is not None and self._watcher.connector.connected
def pre_update(self):
super().pre_update()
if self._watcher is None:
self.connect()
elif self._watcher.connector is None or not self._watcher.connector.connected:
# wait for the watcher be connected
retry = 0
while self._watcher.connector is None or not self._watcher.connector.connected:
retry += 1
if retry >= int(5 / 0.01):
self._watcher.connect()
# and wait 0.5 second to be connected
time.sleep(0.5)
# don't waste the CPU
time.sleep(0.01)
def update(self):
"""
Here we use the WS API so its only a simple sync we process here.
"""
if not super().update():
return False
if self._watcher is None or not self._watcher.connected:
return True
if BitMexTrader.REST_OR_WS:
# account data update
try:
self.lock()
self.__fetch_account()
except Exception as e:
import traceback
logger.error(traceback.format_exc())
finally:
self.unlock()
# positions
try:
self.lock()
self.__fetch_positions()
now = time.time()
self._last_update = now
except Exception as e:
import traceback
logger.error(traceback.format_exc())
finally:
self.unlock()
# orders
try:
self.lock()
self.__fetch_orders()
now = time.time()
self._last_update = now
except Exception as e:
import traceback
logger.error(traceback.format_exc())
finally:
self.unlock()
return True
def post_update(self):
super().post_update()
# don't wast the CPU 5 ms loop
time.sleep(0.005)
@Trader.mutexed
def create_order(self, order):
if not self.has_market(order.symbol):
logger.error("%s does not support market %s in order %s !" % (self.name, order.symbol, order.order_id))
return
if not self._activity:
return False
postdict = {
'symbol': order.symbol,
'clOrdID': order.ref_order_id,
}
qty = order.quantity
# short means negative quantity
if order.direction == Position.SHORT:
qty = -qty
exec_inst = []
# order type
# @todo Order.ORDER_STOP_LIMIT
if order.order_type == Order.ORDER_MARKET:
postdict['ordType'] = 'Market'
postdict['orderQty'] = qty
elif order.order_type == Order.ORDER_LIMIT:
postdict['ordType'] = 'Limit'
postdict['orderQty'] = qty
postdict['price'] = order.price
# only possible with limit order
if order.post_only:
exec_inst.append("ParticipateDoNotInitiate")
elif order.order_type == Order.ORDER_STOP:
postdict['ordType'] = 'Stop'
postdict['orderQty'] = qty
postdict['stopPx'] = order.stop_price
elif order.order_type == Order.ORDER_STOP_LIMIT:
postdict['ordType'] = 'StopLimit'
postdict['orderQty'] = qty
postdict['price'] = order.price
postdict['stopPx'] = order.stop_price
elif order.order_type == Order.ORDER_TAKE_PROFIT:
postdict['ordType'] = 'MarketIfTouched'
postdict['orderQty'] = qty
postdict['stopPx'] = order.stop_price
elif order.order_type == Order.ORDER_TAKE_PROFIT_LIMIT:
postdict['ordType'] = 'LimitIfTouched'
postdict['orderQty'] = qty
postdict['price'] = order.price
postdict['stopPx'] = order.stop_price
else:
postdict['ordType'] = 'Market'
postdict['orderQty'] = qty
# execution price
if order.price_type == Order.PRICE_LAST:
exec_inst.append('LastPrice')
elif order.price_type == Order.PRICE_INDEX:
exec_inst.append('IndexPrice')
elif order.price_type == Order.PRICE_MARK:
exec_inst.append('MarkPrice')
if order.reduce_only:
exec_inst.append("ReduceOnly")
# exec_inst.append("Close") # distinct for reduce only but close imply reduceOnly
# close implies a qty or a side
if exec_inst:
postdict['execInst'] = ','.join(exec_inst)
logger.info("Trader %s order %s %s @%s %s" % (self.name, order.direction_to_str(), order.symbol, order.price, order.quantity))
try:
result = self._watcher.connector.request(path="order", postdict=postdict, verb='POST', max_retries=15)
except Exception as e:
logger.error(str(e))
return False
if result and result.get('ordRejReason'):
logger.error("%s rejected order %s from %s %s - cause : %s !" % (
self.name, order.direction_to_str(), order.quantity, order.symbol, result['ordRejReason']))
return False
# store the order with its order id
order.set_order_id(result['orderID'])
order.created_time = self._parse_datetime(result.get('timestamp')).timestamp()
order.transact_time = self._parse_datetime(result.get('transactTime')).timestamp()
self._orders[order.order_id] = order
return True
@Trader.mutexed
def cancel_order(self, order_or_id):
# DELETE endpoint=order
if type(order_or_id) is str:
order = self._orders.get(order_or_id)
else:
order = order_or_id
if not self._activity:
return False
if order is None:
return False
order_id = order.order_id if order else order_or_id
symbol = order.symbol or ""
postdict = {
'orderID': order_id,
}
try:
result = self._watcher.connector.request(path="order", postdict=postdict, verb='DELETE', max_retries=15)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
# no longer exist, accepts as ok
return True
else:
logger.error(str(e))
return False
except Exception as e:
logger.error(str(e))
return False
# if result and result.get('ordRejReason'):
# logger.error("%s rejected cancel order %s from %s - cause : %s !" % (
# self.name, order_id, symbol, result['ordRejReason']))
# return False
return True
@Trader.mutexed
def close_position(self, position_id, market=True, limit_price=None):
if not self._activity:
return False
position = self._positions.get(position_id)
if position is None or not position.is_opened():
return False
if not self.has_market(position.symbol):
logger.error("%s does not support market %s on close position %s !" % (
self.name, position.symbol, position.position_id))
return False
ref_order_id = "siis_" + base64.b64encode(uuid.uuid4().bytes).decode('utf8').rstrip('=\n')
# keep for might be useless in this case
order.set_ref_order_id(ref_order_id)
order = Order(self, position.symbol)
order.set_position_id(position.position_id)
order.quantity = position.quantity
order.direction = -position.direction # neg direction
postdict = {
'symbol': order.symbol,
'clOrdID': ref_order_id,
'execInst': 'Close',
# 'execInst': 'ReduceOnly,Close' # @todo why rejected with ReduceOnly ?
}
# short mean negative quantity
if order.direction == Position.SHORT:
qty = -qty
# fully close (using Close and need 'side' when qty is not defined)
# qty = None
# order type
if market:
order.order_type = Order.ORDER_MARKET
postdict['ordType'] = "Market"
postdict['orderQty'] = qty
else:
order.order_type = Order.ORDER_LIMIT
order.price = limit_price
postdict['ordType'] = "Limit"
postdict['price'] = order.price
postdict['orderQty'] = qty
if qty is None:
postdict['side'] = "Buy" if order.direction > 0 else "Sell"
try:
result = self._watcher.connector.request(path="order", postdict=postdict, verb='POST', max_retries=15)
except Exception as e:
logger.error(str(e))
return False
if result and result.get('ordRejReason'):
logger.error("%s rejected closing order %s from %s %s - cause : %s !" % (
self.name, order.direction_to_str(), order.quantity, order.symbol, result['ordRejReason']))
return False
# store the order with its order id
order.set_order_id(result['orderID'])
# and store the order
self._orders[order.order_id] = order
# set position closing until we get confirmation on a next update
position.closing(limit_price)
return True
@Trader.mutexed
def modify_position(self, position_id, stop_loss_price=None, take_profit_price=None):
"""Not supported"""
return False
def positions(self, market_id):
self.lock()
position = self._positions.get(market_id)
if position:
positions = [copy.copy(position)]
else:
positions = []
self.unlock()
return positions
#
# slots
#
@Trader.mutexed
def on_order_updated(self, market_id, order_data, ref_order_id):
market = self._markets.get(order_data['symbol'])
if market is None:
# not interested by this market
return
try:
# @todo temporary substitution
self.__update_orders()
except Exception as e:
logger.error(repr(e))
#
# private
#
def _parse_datetime(self, date_str):
return datetime.strptime(date_str or '1970-01-01 00:00:00.000Z', "%Y-%m-%dT%H:%M:%S.%fZ") # .%fZ")
#
# protected
#
def __fetch_account(self):
# @todo use REST API to fetch account state
self._account.update(self._watcher.connector)
def __fetch_positions(self):
# @todo use REST API to fetch open positions
for symbol, market in self._markets.items():
return self.__update_positions(symbol, market)
def __fetch_orders(self):
# @todo use REST API to fetch open orders
return self.__update_orders()
def __update_positions(self, symbol, market):
if not self.connected:
return
# position for each configured market
for symbol, market in self._markets.items():
pos = self._watcher.connector.ws.position(symbol)
position = None
if self._positions.get(symbol):
position = self._positions.get(symbol)
elif pos['isOpen']:
# insert the new position
position = Position(self)
position.set_position_id(symbol)
position.set_key(self.service.gen_key())
quantity = abs(float(pos['currentQty']))
direction = Position.SHORT if pos['currentQty'] < 0 else Position.LONG
position.entry(direction, symbol, quantity)
position.leverage = pos['leverage']
position.entry_price = pos['avgEntryPrice']
position.created_time = datetime.strptime(pos['openingTimestamp'], "%Y-%m-%dT%H:%M:%S.%fZ").timestamp() # .%fZ")
# id is symbol
self._positions[symbol] = position
elif (not pos['isOpen'] or pos['currentQty'] == 0) and self._positions.get(symbol):
# no more position
del self._positions[symbol]
if position:
# absolute value because we work with positive quantity + direction information
position.quantity = abs(float(pos['currentQty']))
position.direction = Position.SHORT if pos['currentQty'] < 0 else Position.LONG
position.leverage = pos['leverage']
# position.market_close = pos['market_close']
position.entry_price = pos['avgEntryPrice']
position.created_time = datetime.strptime(pos['openingTimestamp'], "%Y-%m-%dT%H:%M:%S.%fZ").timestamp() # .%fZ")
# XBt to XBT
# ratio = 1.0
# if pos['currency'] == 'XBt':
# ratio = 1.0 / 100000000.0
# don't want them because they are in XBt or XBT
# position.profit_loss = (float(pos['unrealisedPnl']) * ratio)
# position.profit_loss_rate = float(pos['unrealisedPnlPcnt'])
# # must be updated using the market taker fee
# position.profit_loss_market = (float(pos['unrealisedPnl']) * ratio)
# position.profit_loss_market_rate = float(pos['unrealisedPnlPcnt'])
# compute profit loss in base currency
# @todo disabled for now util fix contract_size and value_per_pip calculation
# position.update_profit_loss(market)
def __update_orders(self):
if not self.connected:
return
# filters only siis managed orders
src_orders = self._watcher.connector.ws.open_orders("") # "siis_")
# first delete older orders
order_rm_list = []
for k, order in self._orders.items():
found = False
for src_order in src_orders:
src_order_id = src_order['clOrdID'] or src_order['orderID']
if order.order_id == src_order['clOrdID'] or order.order_id == src_order['orderID']:
found = True
break
if not found:
order_rm_list.append(order.order_id)
for order_id in order_rm_list:
del self._orders[order_id]
# insert or update active orders
for src_order in src_orders:
found = False
src_order_id = src_order['clOrdID'] or src_order['orderID']
order = self._orders.get(src_order_id)
if order is None:
# insert
order = Order(self, src_order['symbol'])
order.set_order_id(src_order_id)
self._orders[order.order_id] = order
else:
order = self._orders.get(src_order_id)
# logger.info(src_order)
# probably modifier or when leavesQty is update the ordStatus must change
# if src_order['ordStatus'] != "New":
# continue
# update
order.direction = Position.LONG if src_order['side'] == 'Buy' else Position.SHORT
# 'orderQty' (ordered qty), 'cumQty' (cumulative done), 'leavesQty' (remaning)
order.quantity = src_order.get('leavesQty', src_order.get('orderQty', 0))
if src_order.get('transactTime'):
order.transact_time = self._parse_datetime(src_order.get('transactTime')).timestamp()
if src_order['ordType'] == "Market":
order.order_type = Order.ORDER_MARKET
elif src_order['ordType'] == "Limit":
order.order_type = Order.ORDER_LIMIT
order.price = src_order.get('price')
elif src_order['ordType'] == "Stop":
order.order_type = Order.ORDER_STOP
order.stop_price = src_order.get('stopPx')
elif src_order['ordType'] == "StopLimit":
order.order_type = Order.ORDER_STOP_LIMIT
order.price = src_order.get('price')
order.stop_price = src_order.get('stopPx')
elif src_order['ordType'] == "MarketIfTouched":
order.order_type = Order.ORDER_TAKE_PROFIT
order.stop_price = src_order.get('stopPx')
elif src_order['ordType'] == "LimitIfTouched":
order.order_type = Order.ORDER_TAKE_PROFIT_LIMIT
order.price = src_order.get('price')
order.stop_price = src_order.get('stopPx')
if src_order['timeInForce'] == 'GoodTillCancel':
order.time_in_force = Order.TIME_IN_FORCE_GTC
elif src_order['timeInForce'] == 'ImmediateOrCancel':
order.time_in_force = Order.TIME_IN_FORCE_IOC
elif src_order['timeInForce'] == 'FillOrKill':
order.time_in_force = Order.TIME_IN_FORCE_FOK
else:
order.time_in_force = Order.TIME_IN_FORCE_GTC
# triggered, ordRejReason, currency
# @todo
# execution options
exec_inst = src_order['execInst'].split(',')
# taker or maker fee
if 'ParticipateDoNotInitiate' in exec_inst:
order.post_only = True
else:
order.post_only = False
# close reduce only
if 'Close' in exec_inst:
# close only order (must be used with reduce only, only reduce a position, and close opposites orders)
order.close_only = True
else:
order.close_only = False
# close reduce only
if 'ReduceOnly' in exec_inst:
# reduce only order (only reduce a position)
order.reduce_only = True
else:
order.redeuce_only = False
# execution price
if 'LastPrice' in exec_inst:
order.price_type = Order.PRICE_LAST
elif 'IndexPrice' in exec_inst:
order.price_type = Order.PRICE_MARK
elif 'MarkPrice' in exec_inst:
order.price_type = Order.PRICE_INDEX
# {'orderID': 'f1b0e6b1-3459-9fc8-d948-911d5032a521', 'clOrdID': '', 'clOrdLinkID': '', 'account': 513190, 'symbol': 'XBTUSD', 'side': 'Buy', 'simpleOrderQty': None,
# 'orderQty': 500, 'price': 7092.5, 'displayQty': None, 'stopPx': None, 'pegOffsetValue': None, 'pegPriceType': '', 'currency': 'USD', 'settlCurrency': 'XBt',
# 'ordType': 'Limit', 'timeInForce': 'GoodTillCancel', 'execInst': 'ParticipateDoNotInitiate', 'contingencyType': '', 'exDestination': 'XBME', 'ordStatus': 'New',
# 'triggered': '', 'workingIndicator': True, 'ordRejReason': '', 'simpleLeavesQty': 0.0705, 'leavesQty': 500, 'simpleCumQty': 0, 'cumQty': 0, 'avgPx': None,
# 'multiLegReportingType': 'SingleSecurity', 'text': 'Amended price: Amend from www.bitmex.com\nSubmission from www.bitmex.com', 'transactTime': '2018-09-01T21:09:09.688Z',
# 'timestamp': '2018-09-01T21:09:09.688Z'}
| [
"frederic.scherma@gmail.com"
] | frederic.scherma@gmail.com |
43c06f8278a5366020f9d1faef6d11fbe0df03ae | 82ebc6142f7044f8e908ffd6b2dc9e699191fd36 | /users/serializers.py | a7c000728c4688b5ce63c1f4c258ca68ee3a3d0d | [] | no_license | 32dantey/shopbuild | 4f775209e5b320364a8a845583c0d3c77f9844ee | 745b6cf73c8da52ed93b8bfe49055624dfa0aea2 | refs/heads/master | 2023-08-25T17:29:23.470994 | 2021-11-14T14:17:05 | 2021-11-14T14:17:05 | 427,917,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from django.contrib.auth.models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'email', 'is_staff']
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
a09e1fca395152b9a322a0ac515257f8df319da7 | 81c713f21ba0bbc5d2a82c6739ff08ace13cc4ce | /PyGo/Go.py | 55612c0ad094a145bfc218efe1afa9203f365ee4 | [] | no_license | Shua1Chao/18S103138 | 29aa4e9aa737aacdd84bb962a2973b27706ab492 | 8bd2e85371d17b36d3a4ecf1a5f719aeb61c6615 | refs/heads/master | 2020-06-27T04:54:54.895821 | 2019-07-31T12:41:07 | 2019-07-31T12:41:07 | 199,849,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from GoGame import GoGame
from GoGame import BoardSize
from GoDisplay import GoDisplay
import sys
import pygame
game = GoGame(BoardSize.Large)
print(game.to_string())
gameDisplay = GoDisplay(game)
gameDisplay.start() | [
"491922401@qq.com"
] | 491922401@qq.com |
aa29aa9dd6c0b5f6833afd90f618c86c2bebc4b7 | 0386591b51fdbf5759faef6afb8729b64a3f1589 | /layerserver/widgets/creationuser.py | 0d14842f70435682d0eb6129fb35fbba132c0939 | [
"BSD-3-Clause"
] | permissive | giscube/giscube-admin | 1e155402e094eb4db1f7ca260a8d1402e27a31df | 4ce285a6301f59a8e48ecf78d58ef83c3827b5e0 | refs/heads/main | 2023-07-11T17:23:56.531443 | 2023-02-06T15:12:31 | 2023-02-06T15:12:31 | 94,087,469 | 7 | 1 | BSD-3-Clause | 2023-07-07T13:22:09 | 2017-06-12T11:12:56 | Python | UTF-8 | Python | false | false | 371 | py | from .base import BaseWidget
class CreationUserWidget(BaseWidget):
base_type = 'string'
@staticmethod
def create(request, validated_data, widget):
validated_data[widget['name']] = request.user.username
@staticmethod
def is_valid(cleaned_data):
if not cleaned_data['readonly']:
return BaseWidget.ERROR_READONLY_REQUIRED
| [
"abusquets@gmail.com"
] | abusquets@gmail.com |
db2c4c8a768349cb60470463d4f49679f5f70077 | 95576660c9afd77f671f7fcff98ca7e34af6dca9 | /project/yolo more.py | 56cfd1e4e8e8f704d14e1fcef2110246d8fb1ab8 | [] | no_license | PrafulAradhyamth/Yolo | e5cd4d12137390e4c59865aa1214eb87a8ff7cea | 3e6d08f4c3b6949f8914b52bd9eae8185b169921 | refs/heads/master | 2022-12-27T06:35:49.222795 | 2020-09-12T05:59:51 | 2020-09-12T05:59:51 | 294,881,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,703 | py | import cv2
import matplotlib.pyplot as plt
import numpy as np
import pyttsx3
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
vidfeed = cv2.VideoCapture(0)
def getFrame(sec):
vidfeed.set(cv2.CAP_PROP_POS_MSEC,sec*1000)
hasFrames,image = vidfeed.read()
if hasFrames:
# save frame as JPG file
blob = cv2.dnn.blobFromImage(image, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
img = cv2.resize(image, None, fx=1, fy=1,interpolation=cv2.INTER_AREA)
height, width, channels = img.shape
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
print(indexes)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
print(label)
engine = pyttsx3.init()
engine.say(label)
engine.runAndWait()
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, label, (x, y + 30), font, 3, color, 3)
cv2.imshow("image",img)
return hasFrames
sec = 0
frameRate = 0.25 #//it will capture image in each 0.5 second
count=1
success = getFrame(sec)
while success:
count = count + 1
sec = sec + frameRate
sec = round(sec, 2)
success = getFrame(sec) | [
"noreply@github.com"
] | noreply@github.com |
40ba18cb7fe2eda9948ef8ce918e76f44ed9cdac | f6e3040ba1bf5ebf85ccc3691d62d929711c5ab7 | /personal_portfolio/settings.py | 8da8602337553126fc5aa542ae118e1b001446c3 | [] | no_license | YdvBikAsh/django3-portfolio | 946e07d9dfacec94c6ee00d56a699cbdf774755b | f31216a31208800bf32fbb5a6db2c902bf10172d | refs/heads/master | 2022-11-30T15:48:57.542321 | 2020-08-15T04:25:19 | 2020-08-15T04:25:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,428 | py | """
Django settings for personal_portfolio project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h6icws4g5q-=hje3m_xor#yyz9*+pg&v-@*8z$qdn00@u84a8k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['YDbikash.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'portfolio',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'personal_portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'personal_portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL ='/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
try:
from .local_settings import *
except ImportError:
print("Looks like no local file.You must be on production")
| [
"yadavbeecash33@gmail.com"
] | yadavbeecash33@gmail.com |
815579dd6d44ae403fc012a4f99d9bb8a607d842 | 4aec44fe50fa5c40f80c45bfb160d2fa7a98a0a9 | /students/jsward/lesson07/assignment/linear.py | f11fc105aa5a6d00df68e15542e76269dc162e4d | [] | no_license | UWPCE-PythonCert-ClassRepos/220-Advanced-Summer-2019 | 4e51fde79921e6e75f590bef223bc1b0f118ef41 | 6ffd7b4ab8346076d3b6cc02ca1ebca3bf028697 | refs/heads/master | 2022-12-13T01:22:01.063023 | 2019-09-22T10:21:37 | 2019-09-22T10:21:37 | 194,944,978 | 4 | 18 | null | 2022-12-08T01:22:40 | 2019-07-02T22:51:07 | HTML | UTF-8 | Python | false | false | 3,005 | py | # They are not, in fact, constants...
# pylint: disable=C0103
# pylint: disable=W0703
"""
Lesson 7: Linear
Relational concept Mongo DB equivalent
Database Database
Tables Collections
Rows Documents
Index Index
"""
import cProfile
import csv
import datetime
import logging
import sys
import time
from pymongo import MongoClient
from pymongo import errors as pymongo_errors
log_format = "%(asctime)s\t%(message)s"
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler("mongo_{}.log".format(datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")))
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
mongo_client = MongoClient("mongodb://localhost:27017")
assignment_db = mongo_client["assignment"]
def import_data(data_dir, *files):
""" Imports data from file(s) to mongodb"""
list_of_tuples = []
for file_path in files:
processed = 0
collection_name = file_path.split(".")[0]
try:
count_prior = sum(1 for _ in assignment_db[collection_name].find())
except Exception:
logger.info("No existing records found in collection %s", collection_name)
count_prior = 0
with open("/".join([data_dir, file_path])) as file:
reader = csv.reader(file, delimiter=",")
header = False
start_time = time.time()
for row in reader:
if not header:
header = [h.strip("\ufeff") for h in row]
else:
data = {header[i]: v for i, v in enumerate(row)}
try:
assignment_db[collection_name].insert_one(data)
processed += 1
logger.debug("Inserted record %s into collection %s", data, collection_name)
except pymongo_errors.ServerSelectionTimeoutError as ex:
logger.error("Timeout or connection refused when connecting to MongoDB: %s", ex)
break
except Exception as ex:
logger.error("Error inserting record %s into table %s in MongoDB %s Error: %s",
data, assignment_db.name, mongo_client, ex)
continue
end_time = time.time()
list_of_tuples.append(tuple([processed, count_prior, (count_prior + processed), (end_time - start_time)]))
logger.info("Inserted %s records into collection %s in %s", processed, collection_name, (end_time - start_time))
logger.info("Collection now contains %s records", (count_prior + processed))
return list_of_tuples
if __name__ == "__main__":
import_data('data', 'customers.csv', 'products.csv')
# print(results)
| [
"james@Jamess-MacBook-Pro.local"
] | james@Jamess-MacBook-Pro.local |
691a94982dc700efbbb5c794b5cbfbf7c32a3c7b | 8159f252c93f6c0668fa814dff6b1c52d48c6cfd | /ch02_test_bigram.py | 077349fcaf79717f0dca52ae5ee69ac284325248 | [] | no_license | hirobo/nlptutorial_myscript | 67772b9c9011e256664c35149ae5e62350f72b4b | 4269d5f38d76ba1fdb809d6ebb3fcc2947a2199f | refs/heads/master | 2016-08-10T23:53:19.117274 | 2016-04-05T17:07:32 | 2016-04-05T17:07:32 | 55,445,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | # coding: utf-8
from collections import defaultdict
import math
import ch01_test_unigram
def load_model(model_file):
# format is same to the unigram model
return ch01_test_unigram.load_model(model_file)
def test_bigram(model_file, test_file):
H = 0 # entropy
W = 0 # word count
lambda_1 = 0.95 # I don't know which value is the best
lambda_2 = 0.95 # I don't know which value is the best
probs = load_model(model_file)
V = len(probs) # total words
for line in open(test_file, "r"):
words = line.strip().split()
words.insert(0, "<s>")
words.append("</s>")
for i in range(1, len(words)): # beggins after <s>
w_i1 = words[i-1]
w_i = words[i]
n_gram = "%s %s"%(w_i1, w_i)
if w_i not in probs:
probs[w_i] = 0
if n_gram not in probs:
probs[n_gram] = 0
P1 = lambda_1*probs[w_i] + (1 - lambda_1)/V # probability of smoothed 1-gram
P2 = lambda_2*probs[n_gram] + (1 - lambda_2)*P1 # probability of smoothed 2-gram
H += -math.log(P2, 2)
W += 1
H = H/W
return(H)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', dest='model_file', default="output/wiki-en-train_bigram.model", help='input model file')
parser.add_argument('-t', '--test', dest='test_file', default="../data/wiki-en-test.word", help='input test data')
args = parser.parse_args()
entroty = test_bigram(args.model_file, args.test_file)
print("entropy = %f"%(entroty)) | [
"hiroko.hirobo@gmail.com"
] | hiroko.hirobo@gmail.com |
4ad5a4ee7546234cfab7fe73fe7792776a900554 | eb9b1197192e79b5deb60cee45a9d90f3a442ce7 | /env/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/base.py | b073af6af00b05eea644d24669c324f50e2edc83 | [
"MIT"
] | permissive | lindsaygrizzard/ArtUP | c8f2c816fde14ff554330673fe3852523537768d | 3eb4a5c4aa1b9708b37f8a15edc5782ffd6d7588 | refs/heads/master | 2020-05-16T22:20:19.545464 | 2016-02-15T03:47:18 | 2016-02-15T03:47:18 | 35,400,410 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 62,971 | py | # mssql/base.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql
:name: Microsoft SQL Server
Auto Increment Behavior
-----------------------
SQL Server provides so-called "auto incrementing" behavior using the
``IDENTITY`` construct, which can be placed on an integer primary key.
SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior,
described at :paramref:`.Column.autoincrement`; this means
that by default, the first integer primary key column in a :class:`.Table`
will be considered to be the identity column and will generate DDL as such::
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
The above example will generate DDL as:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
For the case where this default generation of ``IDENTITY`` is not desired,
specify ``autoincrement=False`` on all integer primary key columns::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
m.create_all(engine)
.. note::
An INSERT statement which refers to an explicit value for such
a column is prohibited by SQL Server, however SQLAlchemy will detect this
and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution
time. As this is not a high performing process, care should be taken to
set the ``autoincrement`` flag appropriately for columns that will not
actually require IDENTITY behavior.
Controlling "Start" and "Increment"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specific control over the parameters of the ``IDENTITY`` value is supported
using the :class:`.schema.Sequence` object. While this object normally
represents an explicit "sequence" for supporting backends, on SQL Server it is
re-purposed to specify behavior regarding the identity column, including
support of the "start" and "increment" values::
from sqlalchemy import Table, Integer, Sequence, Column
Table('test', metadata,
Column('id', Integer,
Sequence('blah', start=100, increment=10),
primary_key=True),
Column('name', String(20))
).create(some_engine)
would yield:
.. sourcecode:: sql
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
Note that the ``start`` and ``increment`` values for sequences are
optional and will default to 1,1.
INSERT behavior
^^^^^^^^^^^^^^^^
Handling of the ``IDENTITY`` column at INSERT time involves two key
techniques. The most common is being able to fetch the "last inserted value"
for a given ``IDENTITY`` column, a process which SQLAlchemy performs
implicitly in many cases, most importantly within the ORM.
The process for fetching this value has several variants:
* In the vast majority of cases, RETURNING is used in conjunction with INSERT
statements on SQL Server in order to get newly generated primary key values:
.. sourcecode:: sql
INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
* When RETURNING is not available or has been disabled via
``implicit_returning=False``, either the ``scope_identity()`` function or
the ``@@identity`` variable is used; behavior varies by backend:
* when using PyODBC, the phrase ``; select scope_identity()`` will be
appended to the end of the INSERT statement; a second result set will be
fetched in order to receive the value. Given a table as::
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False)
an INSERT will look like:
.. sourcecode:: sql
INSERT INTO t (x) VALUES (?); select scope_identity()
* Other dialects such as pymssql will call upon
``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
statement. If the flag ``use_scope_identity=False`` is passed to
:func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid``
is used instead.
A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
that refers to the identity column explicitly. The SQLAlchemy dialect will
detect when an INSERT construct, created using a core :func:`.insert`
construct (not a plain string SQL), refers to the identity column, and
in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
engine.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
how SQLAlchemy handles this:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
COMMIT
SET IDENTITY_INSERT t ON
INSERT INTO t (id, x) VALUES (?, ?)
((1, 1), (2, 2))
SET IDENTITY_INSERT t OFF
COMMIT
This
is an auxilliary use case suitable for testing and bulk insert scenarios.
Collation Support
-----------------
Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`.Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
.. versionadded:: 0.8 Character collations are now part of the base string
types.
LIMIT/OFFSET Support
--------------------
MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
.. _mssql_large_type_deprecation:
Large Text/Binary Type Deprecation
----------------------------------
Per `SQL Server 2012/2014 Documentation <http://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server
in a future release. SQLAlchemy normally relates these types to the
:class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes.
In order to accommodate this change, a new flag ``deprecate_large_types``
is added to the dialect, which will be automatically set based on detection
of the server version in use, if not otherwise set by the user. The
behavior of this flag is as follows:
* When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
respectively. This is a new behavior as of the addition of this flag.
* When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NTEXT``, ``TEXT``, and ``IMAGE``,
respectively. This is the long-standing behavior of these types.
* The flag begins with the value ``None``, before a database connection is
established. If the dialect is used to render DDL without the flag being
set, it is interpreted the same as ``False``.
* On first connection, the dialect detects if SQL Server version 2012 or greater
is in use; if the flag is still at ``None``, it sets it to ``True`` or
``False`` based on whether 2012 or greater is detected.
* The flag can be set to either ``True`` or ``False`` when the dialect
is created, typically via :func:`.create_engine`::
eng = create_engine("mssql+pymssql://user:pass@host/db",
deprecate_large_types=True)
* Complete control over whether the "old" or "new" types are rendered is
available in all SQLAlchemy versions by using the UPPERCASE type objects
instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`,
:class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain
fixed and always output exactly that type.
.. versionadded:: 1.0.0
.. _mssql_indexes:
Clustered Index Support
-----------------------
The MSSQL dialect supports clustered indexes (and primary keys) via the
``mssql_clustered`` option. This option is available to :class:`.Index`,
:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
To generate a clustered index::
Index("my_index", table.c.x, mssql_clustered=True)
which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
.. versionadded:: 0.8
To generate a clustered primary key use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
.. versionadded:: 0.9.2
MSSQL-Specific Index Options
-----------------------------
In addition to clustering, the MSSQL dialect supports other special options
for :class:`.Index`.
INCLUDE
^^^^^^^
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
.. versionadded:: 0.8
Index ordering
^^^^^^^^^^^^^^
Index ordering is available via functional expressions, such as::
Index("my_index", table.c.x.desc())
would render the index as ``CREATE INDEX my_index ON table (x DESC)``
.. versionadded:: 0.8
.. seealso::
:ref:`schema_indexes_functional`
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatible with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibility level information. Because of this, if running under
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Triggers
--------
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
generated primary key values via IDENTITY columns or other
server side defaults. MS-SQL does not
allow the usage of OUTPUT INSERTED on tables that have triggers.
To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`.create_engine`.
Enabling Snapshot Isolation
---------------------------
Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
Background on SQL Server snapshot isolation is available at
http://msdn.microsoft.com/en-us/library/ms175095.aspx.
Known Issues
------------
* No support for more than one ``IDENTITY`` column per table
* reflection of indexes does not work with versions older than
SQL Server 2005
"""
import datetime
import operator
import re
from ... import sql, schema as sa_schema, exc, util
from ...sql import compiler, expression, util as sql_util
from ... import engine
from ...engine import reflection, default
from ... import types as sqltypes
from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
from ...util import update_wrapper
from . import information_schema as ischema
# http://sqlserverbuilds.blogspot.com/
MS_2012_VERSION = (11,)
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
'containstable', 'continue', 'convert', 'create', 'cross', 'current',
'current_date', 'current_time', 'current_timestamp', 'current_user',
'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
'reconfigure', 'references', 'replication', 'restore', 'restrict',
'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
'writetext',
])
class REAL(sqltypes.REAL):
__visit_name__ = 'REAL'
def __init__(self, **kw):
# REAL is a synonym for FLOAT(24) on SQL server
kw['precision'] = 24
super(REAL, self).__init__(**kw)
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
return datetime.date(*[
int(x or 0)
for x in self._reg.match(value).groups()
])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time())
elif isinstance(value, datetime.time):
value = datetime.datetime.combine(self.__zero_date, value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
return datetime.time(*[
int(x or 0)
for x in self._reg.match(value).groups()])
else:
return value
return process
_MSTime = TIME
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'SMALLDATETIME'
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'DATETIME2'
def __init__(self, precision=None, **kw):
super(DATETIME2, self).__init__(**kw)
self.precision = precision
# TODO: is this not an Interval ?
class DATETIMEOFFSET(sqltypes.TypeEngine):
__visit_name__ = 'DATETIMEOFFSET'
def __init__(self, precision=None, **kwargs):
self.precision = precision
class _StringType(object):
"""Base for MSSQL string types."""
def __init__(self, collation=None):
super(_StringType, self).__init__(collation=collation)
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = 'NTEXT'
class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
"""The MSSQL VARBINARY type.
This type extends both :class:`.types.VARBINARY` and
:class:`.types.LargeBinary`. In "deprecate_large_types" mode,
the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)``
on SQL Server.
.. versionadded:: 1.0.0
.. seealso::
:ref:`mssql_large_type_deprecation`
"""
__visit_name__ = 'VARBINARY'
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = 'MONEY'
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = 'SMALLMONEY'
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = 'SQL_VARIANT'
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
'int': INTEGER,
'bigint': BIGINT,
'smallint': SMALLINT,
'tinyint': TINYINT,
'varchar': VARCHAR,
'nvarchar': NVARCHAR,
'char': CHAR,
'nchar': NCHAR,
'text': TEXT,
'ntext': NTEXT,
'decimal': DECIMAL,
'numeric': NUMERIC,
'float': FLOAT,
'datetime': DATETIME,
'datetime2': DATETIME2,
'datetimeoffset': DATETIMEOFFSET,
'date': DATE,
'time': TIME,
'smalldatetime': SMALLDATETIME,
'binary': BINARY,
'varbinary': VARBINARY,
'bit': BIT,
'real': REAL,
'image': IMAGE,
'timestamp': TIMESTAMP,
'money': MONEY,
'smallmoney': SMALLMONEY,
'uniqueidentifier': UNIQUEIDENTIFIER,
'sql_variant': SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, 'collation', None):
collation = 'COLLATE %s' % type_.collation
else:
collation = None
if not length:
length = type_.length
if length:
spec = spec + "(%s)" % length
return ' '.join([c for c in (spec, collation)
if c is not None])
def visit_FLOAT(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': precision}
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_, **kw):
if type_.precision:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_DATETIME2(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_, **kw):
return "SMALLDATETIME"
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARCHAR(type_, **kw)
else:
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_NVARCHAR(type_, **kw)
else:
return self.visit_NTEXT(type_, **kw)
def visit_NTEXT(self, type_, **kw):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_, **kw):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_, **kw):
return self._extend("VARCHAR", type_, length=type_.length or 'max')
def visit_CHAR(self, type_, **kw):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_, **kw):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_, **kw):
return self._extend("NVARCHAR", type_, length=type_.length or 'max')
def visit_date(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_DATE(type_, **kw)
def visit_time(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_TIME(type_, **kw)
def visit_large_binary(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARBINARY(type_, **kw)
else:
return self.visit_IMAGE(type_, **kw)
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_VARBINARY(self, type_, **kw):
return self._extend(
"VARBINARY",
type_,
length=type_.length or 'max')
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return 'SMALLMONEY'
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_, **kw):
return 'SQL_VARIANT'
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_result_proxy = None
_lastrowid = None
def _opt_encode(self, statement):
if not self.dialect.supports_unicode_statements:
return self.dialect._encoder(statement)[0]
else:
return statement
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0] or \
(
self.compiled.statement.parameters and (
(
self.compiled.statement._has_multi_parameters
and
seq_column.key in
self.compiled.statement.parameters[0]
) or (
not
self.compiled.statement._has_multi_parameters
and
seq_column.key in
self.compiled.statement.parameters
)
)
)
else:
self._enable_identity_insert = False
self._select_lastrowid = insert_has_sequence and \
not self.compiled.returning and \
not self._enable_identity_insert and \
not self.executemany
if self._enable_identity_insert:
self.root_connection._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl)),
(),
self)
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
conn = self.root_connection
if self._select_lastrowid:
if self.dialect.use_scope_identity:
conn._cursor_execute(
self.cursor,
"SELECT scope_identity() AS lastrowid", (), self)
else:
conn._cursor_execute(self.cursor,
"SELECT @@identity AS lastrowid",
(),
self)
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
conn._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer. format_table(
self.compiled.statement.table)),
(),
self)
def get_lastrowid(self):
return self._lastrowid
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
self._opt_encode(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer. format_table(
self.compiled.statement.table)))
except Exception:
pass
def get_result_proxy(self):
if self._result_proxy:
return self._result_proxy
else:
return engine.ResultProxy(self)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'microseconds': 'microsecond'
})
def __init__(self, *args, **kwargs):
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op_binary(self, binary, operator, **kw):
return "%s + %s" % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def get_select_precolumns(self, select, **kw):
""" MS-SQL puts TOP, it's version of LIMIT here """
s = ""
if select._distinct:
s += "DISTINCT "
if select._simple_int_limit and not select._offset:
# ODBC drivers and possibly others
# don't support bind params in the SELECT clause on SQL Server.
# so have to use literal here.
s += "TOP %d " % select._limit
if s:
return s
else:
return compiler.SQLCompiler.get_select_precolumns(
self, select, **kw)
def get_from_hint_text(self, table, text):
return text
def get_crud_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in mssql is after the select keyword
return ""
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if (
(
not select._simple_int_limit and
select._limit_clause is not None
) or (
select._offset_clause is not None and
not select._simple_int_offset or select._offset
)
) and not getattr(select, '_mssql_visit', None):
# to use ROW_NUMBER(), an ORDER BY is required.
if not select._order_by_clause.clauses:
raise exc.CompileError('MSSQL requires an order_by when '
'using an OFFSET or a non-simple '
'LIMIT clause')
_order_by_clauses = select._order_by_clause.clauses
limit_clause = select._limit_clause
offset_clause = select._offset_clause
kwargs['select_wraps_for'] = select
select = select._generate()
select._mssql_visit = True
select = select.column(
sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
.label("mssql_rn")).order_by(None).alias()
mssql_rn = sql.column('mssql_rn')
limitselect = sql.select([c for c in select.c if
c.key != 'mssql_rn'])
if offset_clause is not None:
limitselect.append_whereclause(mssql_rn > offset_clause)
if limit_clause is not None:
limitselect.append_whereclause(
mssql_rn <= (limit_clause + offset_clause))
else:
limitselect.append_whereclause(
mssql_rn <= (limit_clause))
return self.process(limitselect, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def _schema_aliased_table(self, table):
if getattr(table, 'schema', None) is not None:
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=table, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
def visit_alias(self, alias, **kwargs):
# translate for schema-qualified table aliases
kwargs['mssql_aliased'] = alias.original
return super(MSSQLCompiler, self).visit_alias(alias, **kwargs)
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
return "SAVE TRANSACTION %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
% self.preparer.format_savepoint(savepoint_stmt))
def visit_column(self, column, add_to_result_map=None, **kwargs):
if column.table is not None and \
(not self.isupdate and not self.isdelete) or \
self.is_subquery():
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
t, column)
if add_to_result_map is not None:
add_to_result_map(
column.name,
column.name,
(column, column.name, column.key),
column.type
)
return super(MSSQLCompiler, self).\
visit_column(converted, **kwargs)
return super(MSSQLCompiler, self).visit_column(
column, add_to_result_map=add_to_result_map, **kwargs)
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression.BindParameter)
and binary.operator == operator.eq
and not isinstance(binary.right, expression.BindParameter)
):
return self.process(
expression.BinaryExpression(binary.right,
binary.left,
binary.operator),
**kwargs)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
columns = [
self._label_select_column(None, adapter.traverse(c),
True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'OUTPUT ' + ', '.join(columns)
def get_cte_preamble(self, recursive):
# SQL Server finds it too inconvenient to accept
# an entirely optional, SQL standard specified,
# "RECURSIVE" word with their "WITH",
# so here we go
return "WITH"
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).\
label_select_column(select, column, asfrom)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
order_by = self.process(select._order_by_clause, **kw)
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
be updated, then the table itself must be added to the FROM list as
well. Otherwise, it is optional. Here, we add it regardless.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in [from_table] + extra_froms)
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_notin_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).\
render_literal_value(value, type_)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column)
)
if column.nullable is not None:
if not column.nullable or column.primary_key or \
isinstance(column.default, sa_schema.Sequence):
colspec += " NOT NULL"
else:
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL")
# install an IDENTITY Sequence if we either a sequence or an implicit
# IDENTITY column
if isinstance(column.default, sa_schema.Sequence):
if column.default.start == 0:
start = 0
else:
start = column.default.start or 1
colspec += " IDENTITY(%s,%s)" % (start,
column.default.increment or 1)
elif column is column.table._autoincrement_column:
colspec += " IDENTITY(1,1)"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_create_index(self, create, include_schema=False):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
# handle clustering option
if index.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table),
', '.join(
self.sql_compiler.process(expr,
include_table=False,
literal_binds=True) for
expr in index.expressions)
)
# handle other included columns
if index.dialect_options['mssql']['include']:
inclusions = [index.table.c[col]
if isinstance(col, util.string_types) else col
for col in
index.dialect_options['mssql']['include']
]
text += " INCLUDE (%s)" \
% ', '.join([preparer.quote(c.name)
for c in inclusions])
return text
def visit_drop_index(self, drop):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table)
)
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
if constraint.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "UNIQUE "
if constraint.dialect_options['mssql']['clustered']:
text += "CLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
final_quote=']')
def _escape_identifier(self, value):
return value
def quote_schema(self, schema, force=None):
"""Prepare a quoted table and schema name."""
result = '.'.join([self.quote(x, force) for x in schema.split('.')])
return result
def _db_plus_owner_listing(fn):
def wrap(dialect, connection, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _db_plus_owner(fn):
def wrap(dialect, connection, tablename, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
tablename, dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _switch_db(dbname, connection, fn, *arg, **kw):
if dbname:
current_db = connection.scalar("select db_name()")
connection.execute("use %s" % dbname)
try:
return fn(*arg, **kw)
finally:
if dbname:
connection.execute("use %s" % current_db)
def _owner_plus_db(dialect, schema):
if not schema:
return None, dialect.default_schema_name
elif "." in schema:
return schema.split(".", 1)
else:
return None, schema
class MSDialect(default.DefaultDialect):
name = 'mssql'
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
colspecs = {
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate,
sqltypes.Time: TIME,
}
ischema_names = ischema_names
supports_native_boolean = False
supports_unicode_binds = True
postfetch_lastrowid = True
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
construct_arguments = [
(sa_schema.PrimaryKeyConstraint, {
"clustered": False
}),
(sa_schema.UniqueConstraint, {
"clustered": False
}),
(sa_schema.Index, {
"clustered": False,
"include": None
})
]
def __init__(self,
query_timeout=None,
use_scope_identity=True,
max_identifier_length=None,
schema_name="dbo",
deprecate_large_types=None, **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
self.deprecate_large_types = deprecate_large_types
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
# give the DBAPI a push
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
super(MSDialect, self).do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
pass
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
self._setup_version_attributes()
def _setup_version_attributes(self):
if self.server_version_info[0] not in list(range(8, 17)):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
# that is. So emit warning.
util.warn(
"Unrecognized server version info '%s'. Version specific "
"behaviors may not function properly. If using ODBC "
"with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, "
"is configured in the FreeTDS configuration." %
".".join(str(x) for x in self.server_version_info))
if self.server_version_info >= MS_2005_VERSION and \
'implicit_returning' not in self.__dict__:
self.implicit_returning = True
if self.server_version_info >= MS_2008_VERSION:
self.supports_multivalues_insert = True
if self.deprecate_large_types is None:
self.deprecate_large_types = \
self.server_version_info >= MS_2012_VERSION
def _get_default_schema_name(self, connection):
if self.server_version_info < MS_2005_VERSION:
return self.schema_name
query = sql.text("""
SELECT default_schema_name FROM
sys.database_principals
WHERE principal_id=database_principal_id()
""")
default_schema_name = connection.scalar(query)
if default_schema_name is not None:
return util.text_type(default_schema_name)
else:
return self.schema_name
@_db_plus_owner
def has_table(self, connection, tablename, dbname, owner, schema):
columns = ischema.columns
whereclause = columns.c.table_name == tablename
if owner:
whereclause = sql.and_(whereclause,
columns.c.table_schema == owner)
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select([ischema.schemata.c.schema_name],
order_by=[ischema.schemata.c.schema_name]
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
@_db_plus_owner_listing
def get_table_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'BASE TABLE'
),
order_by=[tables.c.table_name]
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
@_db_plus_owner_listing
def get_view_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'VIEW'
),
order_by=[tables.c.table_name]
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
@reflection.cache
@_db_plus_owner
def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
# using system catalogs, don't support index reflection
# below MS 2005
if self.server_version_info < MS_2005_VERSION:
return []
rp = connection.execute(
sql.text("select ind.index_id, ind.is_unique, ind.name "
"from sys.indexes as ind join sys.tables as tab on "
"ind.object_id=tab.object_id "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name = :tabname "
"and sch.name=:schname "
"and ind.is_primary_key=0",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={
'name': sqltypes.Unicode()
}
)
)
indexes = {}
for row in rp:
indexes[row['index_id']] = {
'name': row['name'],
'unique': row['is_unique'] == 1,
'column_names': []
}
rp = connection.execute(
sql.text(
"select ind_col.index_id, ind_col.object_id, col.name "
"from sys.columns as col "
"join sys.tables as tab on tab.object_id=col.object_id "
"join sys.index_columns as ind_col on "
"(ind_col.column_id=col.column_id and "
"ind_col.object_id=tab.object_id) "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name=:tabname "
"and sch.name=:schname",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={'name': sqltypes.Unicode()}
),
)
for row in rp:
if row['index_id'] in indexes:
indexes[row['index_id']]['column_names'].append(row['name'])
return list(indexes.values())
@reflection.cache
@_db_plus_owner
def get_view_definition(self, connection, viewname,
dbname, owner, schema, **kw):
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
"sys.views as views, "
"sys.schemas as sch"
" where "
"mod.object_id=views.object_id and "
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname",
bindparams=[
sql.bindparam('viewname', viewname,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
]
)
)
if rp:
view_def = rp.scalar()
return view_def
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
# Get base columns
columns = ischema.columns
if owner:
whereclause = sql.and_(columns.c.table_name == tablename,
columns.c.table_schema == owner)
else:
whereclause = columns.c.table_name == tablename
s = sql.select([columns], whereclause,
order_by=[columns.c.ordinal_position])
c = connection.execute(s)
cols = []
while True:
row = c.fetchone()
if row is None:
break
(name, type, nullable, charlen,
numericprec, numericscale, default, collation) = (
row[columns.c.column_name],
row[columns.c.data_type],
row[columns.c.is_nullable] == 'YES',
row[columns.c.character_maximum_length],
row[columns.c.numeric_precision],
row[columns.c.numeric_scale],
row[columns.c.column_default],
row[columns.c.collation_name]
)
coltype = self.ischema_names.get(type, None)
kwargs = {}
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
if charlen == -1:
charlen = 'max'
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'" %
(type, name))
coltype = sqltypes.NULLTYPE
else:
if issubclass(coltype, sqltypes.Numeric) and \
coltype is not MSReal:
kwargs['scale'] = numericscale
kwargs['precision'] = numericprec
coltype = coltype(**kwargs)
cdict = {
'name': name,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement': False,
}
cols.append(cdict)
# autoincrement and identity
colmap = {}
for col in cols:
colmap[col['name']] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute("sp_columns @table_name = '%s', "
"@table_owner = '%s'"
% (tablename, owner))
ic = None
while True:
row = cursor.fetchone()
if row is None:
break
(col_name, type_name) = row[3], row[5]
if type_name.endswith("identity") and col_name in colmap:
ic = col_name
colmap[col_name]['autoincrement'] = True
colmap[col_name]['sequence'] = dict(
name='%s_identity' % col_name)
break
cursor.close()
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (owner, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
row = cursor.first()
if row is not None and row[0] is not None:
colmap[ic]['sequence'].update({
'start': int(row[0]),
'increment': int(row[1])
})
return cols
@reflection.cache
@_db_plus_owner
def get_pk_constraint(self, connection, tablename,
dbname, owner, schema, **kw):
pkeys = []
TC = ischema.constraints
C = ischema.key_constraints.alias('C')
# Primary key constraints
s = sql.select([C.c.column_name,
TC.c.constraint_type,
C.c.constraint_name],
sql.and_(TC.c.constraint_name == C.c.constraint_name,
TC.c.table_schema == C.c.table_schema,
C.c.table_name == tablename,
C.c.table_schema == owner)
)
c = connection.execute(s)
constraint_name = None
for row in c:
if 'PRIMARY' in row[TC.c.constraint_type.name]:
pkeys.append(row[0])
if constraint_name is None:
constraint_name = row[C.c.constraint_name.name]
return {'constrained_columns': pkeys, 'name': constraint_name}
@reflection.cache
@_db_plus_owner
def get_foreign_keys(self, connection, tablename,
dbname, owner, schema, **kw):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias('C')
R = ischema.key_constraints.alias('R')
# Foreign key constraints
s = sql.select([C.c.column_name,
R.c.table_schema, R.c.table_name, R.c.column_name,
RR.c.constraint_name, RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule],
sql.and_(C.c.table_name == tablename,
C.c.table_schema == owner,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name ==
RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by=[RR.c.constraint_name, R.c.ordinal_position]
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
fknm, scols, rcols = (None, [], [])
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec['name'] = rfknm
if not rec['referred_table']:
rec['referred_table'] = rtbl
if schema is not None or owner != rschema:
if dbname:
rschema = dbname + "." + rschema
rec['referred_schema'] = rschema
local_cols, remote_cols = \
rec['constrained_columns'],\
rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
return list(fkeys.values())
| [
"lindsaygrizzard@gmail.com"
] | lindsaygrizzard@gmail.com |
4ee7c2582572d9c3047f0d40ba2dfdeebec648d4 | d83b03e5ede166cc4b21acf9bb600b7e5c0186cb | /flask/bin/pip | 63fe7d5cd33486c89ab2b45e2e504204a5a8d853 | [] | no_license | amanankur/book_transport | 810f46448c343b1a06df4aeca7ad3a86c55e403c | c6b66c14d17066405631ab9941a5e1fabd456d97 | refs/heads/master | 2021-01-01T05:33:37.307928 | 2015-09-13T22:27:07 | 2015-09-13T22:27:07 | 42,414,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | #!/Users/amanankur/projects/microblog/flask/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"amanankur1110@gmail.com"
] | amanankur1110@gmail.com | |
44025f8e0a8f02def9c3f49323a3d5c96e90b93b | 91ea407d5750c04646a988b46c52ace04af86928 | /models/basic_conv.py | 17e923bf31e0be889fce780d8b1e95555af5c15f | [] | no_license | Quantum-Entropy/PoseGraph | 0d8c39211a9c532e82bd1ff66faeb5d90d0c74cc | a9f91a5aab6336c20413975b0a893f6ed58047cf | refs/heads/master | 2023-02-28T17:03:35.012596 | 2021-02-08T03:40:41 | 2021-02-08T03:40:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | from __future__ import absolute_import, division
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicGraphConv(nn.Module):
"""
Basic Graph Convolution Layer.
"""
def __init__(self, in_features, out_features, adj, bias=True):
super(BasicGraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Parameter(torch.zeros(size=(2, in_features, out_features), dtype=torch.float))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.adj = adj
self.m = (self.adj > 0)
self.e = nn.Parameter(torch.zeros(1, len(self.m.nonzero()), dtype=torch.float))
nn.init.constant_(self.e.data, 1)
if bias:
self.bias = nn.Parameter(torch.zeros(out_features, dtype=torch.float))
stdv = 1. / math.sqrt(self.W.size(2))
self.bias.data.uniform_(-stdv, stdv)
else:
self.register_parameter('bias', None)
def forward(self, input):
h0 = torch.matmul(input, self.W[0].to(input.device))
h1 = torch.matmul(input, self.W[1].to(input.device))
adj = -9e15 * torch.ones_like(self.adj).to(input.device)
adj[self.m] = self.e.to(input.device)
adj = F.softmax(adj, dim=1)
M = torch.eye(adj.size(0), dtype=torch.float).to(input.device)
output = torch.matmul(adj * M, h0) + torch.matmul(adj * (1 - M), h1)
if self.bias is not None:
return output + self.bias.view(1, 1, -1).to(input.device)
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| [
"yiranupup@gmail.com"
] | yiranupup@gmail.com |
ba6b29d49bdbf778c8073b6ef9f093ca22f74bfe | ceae98e18e66d07ae3e86c45b6fae1766dad6259 | /controllers/unit_test/quad_unittest.py | 6c47471dbdb306660a64e2a9f994b247fa3628f5 | [] | no_license | wallischau/tempproject | b20e1b8f1d797e066b16601d9b3ec13d7d677c90 | 0d84ca2d4a4fbdbe125b99a33b3fe30b15d11da1 | refs/heads/master | 2020-03-21T01:15:57.400391 | 2018-07-05T18:36:06 | 2018-07-05T18:36:06 | 137,933,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | import unittest
from ..quadattenuator import QuadAttenuator
class QuadAttenuator_Test(unittest.TestCase):
quad = QuadAttenuator('10.3.65.153')
def test_get_info(self):
fields = self.quad.get_info()
self.assertTrue('QA' in fields['dev_name'])
def test_get_atten(self):
self.assertTrue(self.quad.get_atten(1) > 0)
def test_get_atten_db_max(self):
self.assertTrue(self.quad.get_atten_db_max() > 0)
def test_get_atten_rf_count(self):
self.assertTrue(self.quad.get_atten_rf_count() > 0)
def test_get_dev_name(self):
print(self.quad.get_dev_name())
self.assertTrue(self.quad.get_dev_name() != '')
def test_get_dev_serial(self):
self.assertTrue('QA' in self.quad.get_dev_serial())
def test_get_dev_type(self):
self.assertTrue(self.quad.get_dev_type() != '')
def test_get_ether_mac(self):
self.assertTrue(':' in self.quad.get_ether_mac())
def test_get_ip_static_address(self):
self.assertTrue('.' in self.quad.get_ip_static_address())
def test_get_ip_static_gateway(self):
self.assertTrue('.' in self.quad.get_ip_static_gateway())
def test_get_ip_static_subnet(self):
self.assertTrue('.' in self.quad.get_ip_static_subnet())
def test_get_version_firmware(self):
self.assertTrue('.' in self.quad.get_version_firmware())
def test_set_atten(self):
self.quad.set_atten(1, 1.5)
fields = self.quad.get_info()
self.assertTrue(fields['atten1'] == '1.5')
self.quad.set_atten(1, 2.0)
fields = self.quad.get_info()
self.assertTrue(fields['atten1'] == '2.0')
if __name__ == '__main__':
unittest.main()
| [
"wchau8899@gmail.com"
] | wchau8899@gmail.com |
d6d3bcf5c2af59efd9530bb1120bca225c9cc3d5 | cfb8b2403d64ca6aa2a336c67159144c55bf58e8 | /module4/zyBooksChpt5/halfArrow.py | e10912482ae4c3deb1dde76fa122f34002a24c7e | [] | no_license | DomfeLacre/zyBooksPython_CS200 | 77bd43f89fbeec7589e50842365100cc9ce284f8 | 959dcc12dec808b39a42a25adee69dbf152bac96 | refs/heads/master | 2020-08-06T22:51:05.160759 | 2018-05-15T01:38:38 | 2018-05-15T01:38:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # zyBooks Chpt.5 Exercise 5.13 Drawing a half arrow (Python 3)
print ('Enter arrow base height: ')
arrow_base_height = int(input())
print ('Enter arrow base width: ')
arrow_base_width = int(input())
print ('Enter arrow head width: ')
arrow_head_width = int(input())
while arrow_head_width <= arrow_base_width:
print ('Please enter an arrow head width that is larger than the arrow base: ')
arrow_head_width = int(input())
arrow_head_width = arrow_head_width
for symbol in range(arrow_base_height):
print(('*') * arrow_base_width)
i = arrow_head_width
for symbol in range(arrow_head_width):
print(('*') * i)
i -= 1
| [
"chad@thewickk.com"
] | chad@thewickk.com |
0a3c43e80309e905dfeb62af395d1d634b1f8e89 | cf52e9223630410fe0b4ab5ac89cd7d09c61ad66 | /finance/biz/client_bank_account.py | 2eed05fe690fd66e7a2ae262504fa31ff60ddc90 | [] | no_license | ydtg1993/shaibao-server-python | f6ba2afecc62e1be77f238e90b415eaedbe5f762 | a4cb2794d9a9c1ecfaa324a6ad0787a80db2c8ee | refs/heads/master | 2022-12-13T11:24:05.459781 | 2019-12-14T13:53:21 | 2019-12-14T13:53:21 | 228,029,404 | 0 | 0 | null | 2022-12-08T06:16:48 | 2019-12-14T13:42:14 | JavaScript | UTF-8 | Python | false | false | 195 | py | from finance.models.bank_account import BankAccount
def search():
pass
def add():
pass
def remove():
pass
def search():
pass
def switch():
pass
| [
"ydtg1993@outlook.com"
] | ydtg1993@outlook.com |
17e40ff1b093bbdab0f8dfb815287979849dcf2b | d2a51bd4765d1fbcb769ae9cea7c9e528e7acf1f | /hw1/hw1.py | 230fe0ea95fa0883d3ba754e2478db7b073550d7 | [] | no_license | nba556677go/ML2019SPRING | 272d94ff199e707c6678c9417d83f4acda565145 | 9c00f5a3d096ce78fc9c15fc4a76432a2b926e93 | refs/heads/master | 2022-09-09T16:13:09.934493 | 2019-06-07T04:03:16 | 2019-06-07T04:03:16 | 172,427,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py |
import numpy as np
import math
import sys
import csv
testing_data_path = sys.argv[1]
output_file_path = sys.argv[2]
w = np.load('model-5.52295.npy')
test_x = []
count = 0
#feature_list = [5,6,7,8,9,12]
feature_list = [i for i in range(18)]
#feature_list =[3,4,5,6,7,8,9,10,12,13]
#feature_list = ["NMHC", "NO" ,"NO2" , "NOx" , "O3" ,"RAINFALL" ,"PM10" , "PM2.5" , "SO2"]
#feature_list = ["PM2.5"]
with open(testing_data_path , 'r') as f:
lines = f.readlines()
for line in lines :
line = line.rstrip('\n').split(',')
#print(line)
#add bias
if count % 18 ==0 :
test_x.append([])
if count % 18 in feature_list:
for i in range(2 , 11):
if line[i] != 'NR':
test_x[count//18].append(float(line[i]))
else :
test_x[count//18].append(float(0))
count += 1
#print(test_x)
test_x = np.array(test_x)
#print(test_x.shape)
#test_x = feature1(test_x , feature_list)
#concatanate x**2
#test_x = np.concatenate((test_x , test_x**2) , axis=1)
#add bias
test_x = np.concatenate( (np.ones((test_x.shape[0] , 1)) , test_x), axis = 1)
ans = []
predict = np.dot(test_x , w)
for i in range(len(predict)):
ans.append(["id_"+ str(i), predict[i] ] )
with open(output_file_path , "w") as f:
subwriter = csv.writer(f , delimiter = ',')
subwriter.writerow(["id" , "value"])
for i in range(len(ans)):
subwriter.writerow(ans[i])
#print(test_x.shape) | [
"b04507025@ntu.edu.tw"
] | b04507025@ntu.edu.tw |
bc99ce65235a3ffa79223116c532a78ee3ef3d86 | 4273f162abb12ef1939271c2aabee9547ac6afee | /studio_usd_pipe/resource/push/maya/uv/extractor_thumbnail.py | 8f6da8661609560730437f9504ee9bfc291638a7 | [] | no_license | xiyuhao/subins_tutorials | 2717c47aac0adde099432e5dfd231606bf45a266 | acbe4fe16483397e9b0f8e240ca23bdca652b92d | refs/heads/master | 2023-07-28T13:42:41.445399 | 2021-09-12T11:02:37 | 2021-09-12T11:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | NAME = 'Extract UV Thumbnail'
ORDER = 1
VALID = True
TYPE = 'extractor'
KEY = 'uv_thumbnail'
OWNER = 'Subin Gopi'
COMMENTS = 'To create uv thumbnail file'
VERSION = '0.0.0'
MODIFIED = 'April 19, 2020'
def execute(output_path=None, **kwargs):
import os
from studio_usd_pipe.core import common
from studio_usd_pipe.utils import maya_asset
if not os.path.isfile(kwargs['thumbnail']):
return False, [kwargs['thumbnail']], 'not found input thumbnail!...'
ouput_image_path = os.path.join(
output_path,
'{}.png'.format(kwargs['caption'])
)
premission = common.data_exists(ouput_image_path, True)
if not premission:
return False, [ouput_image_path], 'not able to save thumbnail!...'
thumbnail = maya_asset.create_thumbnail(kwargs['thumbnail'], ouput_image_path)
return True, [thumbnail], 'success!...'
| [
"subing85@gmail.com"
] | subing85@gmail.com |
f49da3128a76bc15d196a17fba356f2e9ff87149 | 5ee644f78fb22a5af93b52d870f6b3b4c90d6453 | /Python/models/official/nlp/bert/tokenization_test.py | f405819f9ceeb50aac4970e4d0cb5b629726f75f | [
"Apache-2.0"
] | permissive | ZeinaMaan/masters_repo | 247b2cd86abdd78fd7802ed57411b1dabbf4e49d | 01058969acc26be7e0cdbd1993e9c788c8ac0965 | refs/heads/master | 2023-05-03T03:02:58.534777 | 2021-05-26T00:11:21 | 2021-05-26T00:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,003 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
import tensorflow as tf
from official.nlp.bert import tokenization
class TokenizationTest(tf.test.TestCase):
"""Tokenization test.
The implementation is forked from
https://github.com/google-research/bert/blob/master/tokenization_test.py."
"""
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
if six.PY2:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
else:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens
]).encode("utf-8"))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_chinese(self):
tokenizer = tokenization.BasicTokenizer()
self.assertAllEqual(
tokenizer.tokenize(u"ah\u535A\u63A8zz"),
[u"ah", u"\u535A", u"\u63A8", u"zz"])
def test_basic_tokenizer_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello", "!", "how", "are", "you", "?"])
self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["HeLLo", "!", "how", "Are", "yoU", "?"])
def test_wordpiece_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
self.assertAllEqual(tokenizer.tokenize(""), [])
self.assertAllEqual(
tokenizer.tokenize("unwanted running"),
["un", "##want", "##ed", "runn", "##ing"])
self.assertAllEqual(
tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_convert_tokens_to_ids(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
self.assertAllEqual(
tokenization.convert_tokens_to_ids(
vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
def test_is_whitespace(self):
self.assertTrue(tokenization._is_whitespace(u" "))
self.assertTrue(tokenization._is_whitespace(u"\t"))
self.assertTrue(tokenization._is_whitespace(u"\r"))
self.assertTrue(tokenization._is_whitespace(u"\n"))
self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
self.assertFalse(tokenization._is_whitespace(u"A"))
self.assertFalse(tokenization._is_whitespace(u"-"))
def test_is_control(self):
self.assertTrue(tokenization._is_control(u"\u0005"))
self.assertFalse(tokenization._is_control(u"A"))
self.assertFalse(tokenization._is_control(u" "))
self.assertFalse(tokenization._is_control(u"\t"))
self.assertFalse(tokenization._is_control(u"\r"))
self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
def test_is_punctuation(self):
self.assertTrue(tokenization._is_punctuation(u"-"))
self.assertTrue(tokenization._is_punctuation(u"$"))
self.assertTrue(tokenization._is_punctuation(u"`"))
self.assertTrue(tokenization._is_punctuation(u"."))
self.assertFalse(tokenization._is_punctuation(u"A"))
self.assertFalse(tokenization._is_punctuation(u" "))
if __name__ == "__main__":
tf.test.main()
| [
"sarmadtanveer@gmail.com"
] | sarmadtanveer@gmail.com |
dd384ca5264931a9c225d9165e50e3e1a69d3935 | ff644162d29c5de65b472c86ae1d8494179f5f13 | /histogram_equalization.py | eef361f096a24f6892fb83f8e4948382f3ad9f06 | [] | no_license | alrivero/histogram_equalization | caadd88437bd5afca7ed855c49ea0de9d3a50c70 | dd76c3e5b3f175beef9dff5af8d2857b3630e617 | refs/heads/master | 2022-12-19T07:10:34.296611 | 2020-09-17T22:43:45 | 2020-09-17T22:43:45 | 296,457,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,736 | py | import cv2
import numpy as np
from math import floor
def compute_cdf(histogram, img_dim):
cdf = np.copy(histogram)
cdf = np.cumsum(cdf)
cdf = cdf / (img_dim[0] * img_dim[1])
return cdf
def compute_hist_equalizer(img):
hist_blue = cv2.calcHist([img], [0], None, [256], [0, 256])
hist_green = cv2.calcHist([img], [1], None, [256], [0, 256])
hist_red = cv2.calcHist([img], [2], None, [256], [0, 256])
blue_equalizer = compute_cdf(hist_blue, img.shape)
green_equalizer = compute_cdf(hist_green, img.shape)
red_equalizer = compute_cdf(hist_red, img.shape)
repeated_255 = np.repeat(255, 256)
blue_lookup = blue_equalizer * repeated_255
green_lookup = green_equalizer * repeated_255
red_lookup = red_equalizer * repeated_255
return np.dstack((blue_lookup, green_lookup, red_lookup)).astype(np.uint8)
def gather_block_equalizers(img, block_size):
block_lookups = []
for i in range(0, img.shape[0], block_size[0]):
lookups_row = []
for j in range(0, img.shape[1], block_size[1]):
img_block = img[i:i+block_size[0], j:j+block_size[1]]
lookups_row.append(compute_hist_equalizer(img_block))
block_lookups.append(lookups_row)
return block_lookups
def block_histogram_equalization(img, block_div):
block_size = (
int(img.shape[0]/block_div[0]),
int(img.shape[1]/block_div[1]))
img = img.copy()
block_equalizers = gather_block_equalizers(img, block_size)
for i in range(0, block_div[0]):
for j in range(0, block_div[1]):
img_block = img[
i*block_size[0]:(i+1)*block_size[0],
j*block_size[1]:(j+1)*block_size[1]]
equalized_block = cv2.LUT(img_block, block_equalizers[i][j])
img[
i*block_size[0]:(i+1)*block_size[0],
j*block_size[1]:(j+1)*block_size[1]] = equalized_block
return img
def global_histogram_equalization(img):
hist_blue = cv2.calcHist([img], [0], None, [256], [0, 256])
hist_green = cv2.calcHist([img], [1], None, [256], [0, 256])
hist_red = cv2.calcHist([img], [2], None, [256], [0, 256])
blue_equalizer = compute_cdf(hist_blue, img.shape)
green_equalizer = compute_cdf(hist_green, img.shape)
red_equalizer = compute_cdf(hist_red, img.shape)
repeated_255 = np.repeat(255, 256)
blue_lookup = np.ndarray.flatten(blue_equalizer) * repeated_255
green_lookup = np.ndarray.flatten(green_equalizer) * repeated_255
red_lookup = np.ndarray.flatten(red_equalizer) * repeated_255
lookup = np.dstack((
blue_lookup,
green_lookup,
red_lookup
)).astype(np.uint8)
return cv2.LUT(img, lookup) | [
"alfredo.rivero@outlook.com"
] | alfredo.rivero@outlook.com |
f9753db2629117a89fe194bb4578912ece1a39d0 | 5b1300ce5b9c2095957967e09a1213c2a506195f | /codes/list/recurve.py | a3a4e1e0d867782d54d7809e83936cbf7d65f2e6 | [] | no_license | zqiang3/getmore | 18e6131e0432125b56b9f2dc51467c27ce572215 | 1d8766f16caf0d4ee2bc4757625fa8bd9bd1d44e | refs/heads/master | 2020-05-30T00:48:51.273662 | 2019-05-30T20:03:16 | 2019-05-30T20:03:16 | 189,466,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | class Node(object):
def __init__(self, value):
self.next = None
self.value = value
class List:
def __init__(self):
self.head = None
def insert(self, value):
newnode = Node(value)
newnode.next = self.head
self.head = newnode
def display(self):
cursor = self.head
while cursor != None:
print cursor.value
cursor = cursor.next
def check_recurve(la):
if la.head == None:
return False
lb = List()
cursor = la.head
while cursor != None:
lb.insert(cursor.value)
cursor = cursor.next
ca = la.head
cb = lb.head
flag = True
while ca != None:
if ca.value != cb.value:
flag = False
break
ca = ca.next
cb = cb.next
return flag
def reverse_list(root):
if not root:
return root
if root.next is None:
return root
pre = root
cur = pre.next
next = cur.next
root.next = None
while True:
cur.next = pre
if next is None:
break
pre = cur
cur = next
next = next.next
return cur
def check_recurve_v2(la):
if la.head is None:
return False
if la.head.next is None:
return True
length = 0
cur = la.head
while cur:
length += 1
cur = cur.next
mid = length / 2
if length % 2 != 0:
mid += 1
mid_cur = la.head
for i in range(length / 2):
mid_cur = mid_cur.next
cb = reverse_list(mid_cur)
ca = la.head
flag = True
while cb != None:
if ca.value != cb.value:
flag = False
break
ca = ca.next
cb = cb.next
return flag
#====================
al = List()
al.insert(2)
al.insert(3)
al.insert(5)
al.insert(3)
al.insert(2)
al.display()
print check_recurve_v2(al)
| [
"zqiang3@126.com"
] | zqiang3@126.com |
0716e9b663e856dbfcba23874219d938f3e669a9 | e54b736fbded93258cf1ebef5931634400986013 | /apps/analyzer.py | 3d67242a663d220c512eac84908b3741d9959e77 | [] | no_license | uniray7/jagereye_ng | 0842c9b627a9ba8345e973cfdc5a7e7024b94983 | 2fb0fa0f213e844732edfc2d383ada9402e60906 | refs/heads/master | 2020-03-11T02:40:20.482329 | 2018-04-18T10:52:34 | 2018-04-18T10:52:34 | 129,725,729 | 0 | 0 | null | 2018-04-16T10:27:34 | 2018-04-16T10:27:34 | null | UTF-8 | Python | false | false | 12,036 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import asyncio
import time, datetime
from dask.distributed import LocalCluster, Client
from multiprocessing import Process, Pipe, TimeoutError
from concurrent.futures import ThreadPoolExecutor
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from intrusion_detection import IntrusionDetector
from jagereye_ng import video_proc as vp
from jagereye_ng import gpu_worker
from jagereye_ng.api import APIConnector
from jagereye_ng.io.streaming import VideoStreamReader, ConnectionBrokenError
from jagereye_ng.io.notification import Notification
from jagereye_ng import logging
class HotReconfigurationError(Exception):
def __str__(self):
return ("Hot re-configuring analyzer is not allowed, please"
" stop analyzer first before updating it.")
def create_pipeline(anal_id, pipelines, frame_size):
result = []
for p in pipelines:
if p["type"] == "IntrusionDetection":
params = p["params"]
result.append(IntrusionDetector(
anal_id,
params["roi"],
params["triggers"],
frame_size))
return result
class Driver(object):
def __init__(self):
self._driver_process = None
self._sig_parent = None
self._sig_child = None
def start(self, func, *argv):
self._sig_parent, self._sig_child = Pipe()
self._driver_process = Process(
target=Driver.run_driver_func,
args=(func,
self._sig_child,
argv))
self._driver_process.daemon = True
self._driver_process.start()
def terminate(self, timeout=5):
assert self._driver_process is not None, "It's an error to attempt to \
terminate a driver before it has been started."
try:
self._driver_process.join(timeout)
except TimeoutError:
logging.error("The driver was not terminated for some reason "
"(exitcode: {}), force to terminate it."
.format(self._driver_process.exitcode))
self._driver_process.terminate()
time.sleep(0.1)
finally:
self._sig_parent.close()
self._sig_parent = None
self._sig_child = None
self._driver_process = None
def poll(self, timeout=None):
if self._sig_parent is not None:
if timeout is not None:
return self._sig_parent.poll(timeout)
else:
return self._sig_parent.poll()
else:
return False
def send(self, msg):
self._sig_parent.send(msg)
def recv(self):
return self._sig_parent.recv()
@staticmethod
def run_driver_func(driver_func, signal, *argv):
try:
driver_func(signal, *argv[0])
finally:
signal.close()
class Analyzer():
STATUS_CREATED = "created"
STATUS_RUNNING = "running"
STATUS_SRC_DOWN = "source_down"
STATUS_STOPPED = "stopped"
def __init__(self, cluster, anal_id, name, source, pipelines):
self._cluster = cluster
self._id = anal_id
self._name = name
self._source = source
self._pipelines = pipelines
self._status = Analyzer.STATUS_CREATED
self._driver = Driver()
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if self._status == Analyzer.STATUS_RUNNING:
raise HotReconfigurationError()
self._name = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
if self._status == Analyzer.STATUS_RUNNING:
raise HotReconfigurationError()
self._source = value
@property
def pipelines(self):
return self._pipelines
@pipelines.setter
def pipelines(self, value):
if self._status == Analyzer.STATUS_RUNNING:
raise HotReconfigurationError()
self._pipelines = value
def get_status(self):
if self._driver.poll():
status = self._driver.recv()
if status == "source_down":
self._status = Analyzer.STATUS_SRC_DOWN
elif status == "internal_error":
self._status = Analyzer.STATUS_STOPPED
return self._status
def start(self):
if self._status != Analyzer.STATUS_RUNNING:
self._driver.start(analyzer_main_func,
self._cluster,
self._id,
self._name,
self._source,
self._pipelines)
self._status = Analyzer.STATUS_RUNNING
def stop(self):
if self._status == Analyzer.STATUS_RUNNING:
self._driver.send("stop")
self._driver.terminate()
self._status = Analyzer.STATUS_STOPPED
def setup_db_client(host="mongodb://localhost:27017"):
db_client = MongoClient(host)
try:
# Check if the connection is established
db_client.admin.command("ismaster")
except ConnectionFailure as e:
logging.error("Mongo server is not available: {}".format(e))
raise
else:
return db_client
def analyzer_main_func(signal, cluster, anal_id, name, source, pipelines):
logging.info("Starts running Analyzer: {}".format(name))
executor = ThreadPoolExecutor(max_workers=1)
notification = Notification(["nats://localhost:4222"])
try:
db_client = setup_db_client()
except ConnectionFailure:
return
def save_event(event):
logging.info("Saving event: {}".format(str(event)))
try:
result = db_client["jager_test"]["events"].insert_one({
"analyzerId": anal_id,
"type": event.name,
"timestamp": event.timestamp,
"date": datetime.datetime.fromtimestamp(event.timestamp),
"content": event.content
})
except Exception as e:
logging.error("Failed to save event: {}".format(e))
try:
# TODO: Get the address of scheduler from the configuration
# file.
dask = Client(cluster.scheduler_address)
src_reader = VideoStreamReader()
src_reader.open(source["url"])
video_info = src_reader.get_video_info()
pipelines = create_pipeline(
anal_id,
pipelines,
video_info["frame_size"])
while True:
frames = src_reader.read(batch_size=5)
motion = vp.detect_motion(frames)
results = [p.run(frames, motion) for p in pipelines]
for event in results:
if event is not None:
notification_msg = {
"type": event.name,
"analyzerId": anal_id,
"name": name
}
notification_msg.update(event.content)
notification.push("Analyzer", notification_msg)
executor.submit(save_event, event)
if signal.poll() and signal.recv() == "stop":
break
except ConnectionBrokenError:
logging.error("Error occurred when trying to connect to source {}"
.format(source["url"]))
# TODO: Should push a notifcation of this error
signal.send("source_down")
finally:
src_reader.release()
for p in pipelines:
p.release()
dask.close()
executor.shutdown()
logging.info("Analyzer terminated: {}".format(name))
class AnalyzerManager(APIConnector):
def __init__(self, cluster, io_loop, nats_hosts=None):
super().__init__("analyzer", io_loop, nats_hosts)
self._cluster = cluster
self._analyzers = dict()
def on_create(self, params):
logging.info("Creating Analyzer, params: {}".format(params))
try:
sid = params["id"]
name = params["name"]
source = params["source"]
pipelines = params["pipelines"]
# Create analyzer object
self._analyzers[sid] = Analyzer(
self._cluster, sid, name, source, pipelines)
# Start analyzer
self._analyzers[sid].start()
except KeyError as e:
raise RuntimeError("Invalid request format: {}".format(e.args[0]))
except ConnectionBrokenError:
raise RuntimeError("Failed to establish connection to {}"
.format(source["url"]))
except Exception as e:
logging.error(e)
def _get_analyzer_status(self, sid):
if sid not in self._analyzers:
raise RuntimeError("Analyzer not found: {}".format(sid))
return self._analyzers[sid].get_status()
def on_read(self, params):
logging.info("Getting Analyzer information, params: {}".format(params))
if isinstance(params, list):
result = dict()
for sid in params:
result[sid] = self._get_analyzer_status(sid)
else:
# TODO: check if params is an ObjectID
result = self._get_analyzer_status(params)
return result
def on_update(self, update):
logging.info("Updating Analyzer, params: {}".format(update))
try:
sid = update["id"]
params = update["params"]
analyzer = self._analyzers[sid]
if "name" in params:
analyzer.name = params["name"]
if "source" in params:
analyzer.source = params["source"]
if "pipelines" in params:
analyzer.pipelines = params["pipelines"]
except KeyError as e:
raise RuntimeError("Invalid request format: missing "
"field '{}'.".format(e.args[0]))
except HotReconfigurationError as e:
raise RuntimeError(str(e))
def _delete_analyzer(self, sid):
self._analyzers[sid].stop()
del self._analyzers[sid]
def on_delete(self, params):
logging.info("Deleting Analyzer: {}".format(params))
try:
# TODO: Need to make sure the allocated resources for
# analyzer "sid" also been deleted completely
if isinstance(params, list):
for sid in params:
self._delete_analyzer(sid)
else:
sid = params
self._delete_analyzer(sid)
except KeyError:
raise RuntimeError("Invalid request foramt")
def on_start(self, sid):
logging.info("Starting Analyzer: {}".format(sid))
if sid not in self._analyzers:
raise RuntimeError("Analyzer not found")
else:
self._analyzers[sid].start()
def on_stop(self, sid):
logging.info("Stopping Analyzer: {}".format(sid))
if sid not in self._analyzers:
raise RuntimeError("Analyzer not found")
else:
self._analyzers[sid].stop()
if __name__ == "__main__":
cluster = LocalCluster(n_workers=0)
# Add GPU workers
# TODO: Get the number of GPU from configuration file
cluster.start_worker(name="GPU_WORKER-1", resources={"GPU": 1})
with cluster, Client(cluster.scheduler_address) as client:
# Initialize GPU workers
results = client.run(gpu_worker.init_worker, ".")
assert all([v == "OK" for _, v in results.items()]), "Failed to initialize GPU workers"
# Start analyzer manager
io_loop = asyncio.get_event_loop()
manager = AnalyzerManager(cluster, io_loop, ["nats://localhost:4222"])
io_loop.run_forever()
io_loop.close()
| [
"wtchou1209@gmail.com"
] | wtchou1209@gmail.com |
069ccad9aaaa0e7b13d4b0c7e69ca03e953c533d | fbf9d596f55099ad2e4d319df1ae634f085f39c5 | /hw/show.py | 046394238d25e00306e9de466a8b197a12a25618 | [] | no_license | wym037935/flanker | d29150d7313b132b2ba071197f6e59f61bf3ca1e | c8ca436960481d47fe5b3c631dfe4fbed591d45c | refs/heads/master | 2021-01-10T01:54:06.890653 | 2016-03-14T15:25:19 | 2016-03-14T15:25:19 | 53,789,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | #coding=utf-8
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render_to_response
import MySQLdb
import re
import os
import database
import init
import newtask
import thread
import subprocess
from apscheduler.schedulers.blocking import BlockingScheduler
@csrf_exempt
def show(request):
init.first()
try:
task = ''
res = ''
if request.POST.has_key('tasklist'):
task = str(request.POST['tasklist'])
fw = open('result_store.sql','w')
fw.write('use python;\n')
fw.write('select * from %s;\n' % task)
fw.close()
cmd = "mysql -h localhost -u root -pflanker < result_store.sql > result_store.res"
child = subprocess.Popen(cmd, shell = True)
child.wait()
fr = open('result_store.res','r')
res = fr.read()
fr.close()
ll = database.getlist('show tables')
return render_to_response('showpage.html', {'tasklist': ll,'cons': res})
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
ferr = open("error.txt","w")
ferr.write("Mysql Error %d: %s" % (e.args[0], e.args[1]))
ferr.close()
| [
"wym6110@gmail.com"
] | wym6110@gmail.com |
02745d439ba2092e295b7fc772592e5856fe923b | 812ec66111f3f71acbc6ca98a557edee419e3e76 | /detectors/s3fd/__init__.py | e203b65f8fa459b3046415bfda680eaaf8c85421 | [
"MIT"
] | permissive | AlanSavio25/AVSR-Dataset-Pipeline | 7a35d922607901f0fa70593d1e34b89f3276e993 | 6e6d44eca6133c2e0223e9be8d011be0b68c73d1 | refs/heads/main | 2023-07-07T00:01:28.025308 | 2021-08-11T16:47:27 | 2021-08-11T16:47:27 | 389,610,550 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,095 | py | import time
import numpy as np
import cv2
import torch
from torchvision import transforms
from .nets import S3FDNet
from .box_utils import nms_
PATH_WEIGHT = './detectors/s3fd/weights/sfd_face.pth'
img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype('float32')
class S3FD():
def __init__(self, device='cuda'):
tstamp = time.time()
self.device = device
print('[S3FD] loading with', self.device)
self.net = S3FDNet(device=self.device).to(self.device)
state_dict = torch.load(PATH_WEIGHT, map_location=self.device)
self.net.load_state_dict(state_dict)
self.net.eval()
print('[S3FD] finished loading (%.4f sec)' % (time.time() - tstamp))
def detect_faces(self, image, conf_th=0.8, scales=[1]):
w, h = image.shape[1], image.shape[0]
bboxes = np.empty(shape=(0, 5))
with torch.no_grad():
for s in scales:
scaled_img = cv2.resize(image, dsize=(0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
scaled_img = np.swapaxes(scaled_img, 1, 2)
scaled_img = np.swapaxes(scaled_img, 1, 0)
scaled_img = scaled_img[[2, 1, 0], :, :]
scaled_img = scaled_img.astype('float32')
scaled_img -= img_mean
scaled_img = scaled_img[[2, 1, 0], :, :]
x = torch.from_numpy(scaled_img).unsqueeze(0).to(self.device)
y = self.net(x)
detections = y.data
scale = torch.Tensor([w, h, w, h])
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] > conf_th:
score = detections[0, i, j, 0]
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
bbox = (pt[0], pt[1], pt[2], pt[3], score)
bboxes = np.vstack((bboxes, bbox))
j += 1
keep = nms_(bboxes, 0.1)
bboxes = bboxes[keep]
return bboxes
| [
"s1768177@hessdalen.inf.ed.ac.uk"
] | s1768177@hessdalen.inf.ed.ac.uk |
8c3bb4ff9108f69d52a356f2f3f50384b9c530d6 | da44df49207eb26aa5c3e4e01b4d4ee882ae72c6 | /venv/Lib/site-packages/botocore/__init__.py | a8298b3e4d4640b23254ec63c18db3abb6b0a5f6 | [] | no_license | juliojose3000/ObjectDetentionServerPython | 221fd89cd152fce26afbf518ee63e2e5b693c26e | 65f9182def9685495e859416276475184d35ff21 | refs/heads/master | 2020-09-01T05:35:29.308485 | 2019-11-10T17:31:17 | 2019-11-10T17:31:17 | 218,890,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,486 | py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.13.1'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
| [
"juliojose3000@gmail.com"
] | juliojose3000@gmail.com |
fe721a5d634410d1e7eae1f657adedf3d2a421f4 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/python/keras/preprocessing/image.py | f2a6b9eb3dcc6002673a3e3a13516299983498ad | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:dd9edf94ef6b932c96aa6c9f40e3c19503ccfed4c5a10d0871bd11259eafd357
size 21747
| [
"github@cuba12345"
] | github@cuba12345 |
b68be730fe886ebea5a66fb439c78439510f4794 | e7a46c0f63e7595a533ab58a7db07b1c12ef6092 | /begpyprog/integr.py | 6504b69f2a91ac9fcea08095da70da492eb0ce9f | [] | no_license | sockduct/Books | 263ab81b72e39a11acc83b698c76b41104d8bd20 | 954039ff4abf51bbfec05944e5175cefe232a68f | refs/heads/master | 2021-01-10T03:37:47.340931 | 2016-10-29T12:34:58 | 2016-10-29T12:34:58 | 55,922,532 | 0 | 1 | null | 2016-10-29T12:34:59 | 2016-04-10T21:06:00 | HTML | UTF-8 | Python | false | false | 970 | py | ####################################################################################################
'''
Simple program to convert a string of integers separated by commas into an integer list
'''
# Imports
import sys
from BadInput import BadInput
__version__ = '0.0.1'
def parse(input):
curlst = input.replace(' ', '')
curlst = curlst.split(',')
try:
newlst = [int(i) for i in curlst]
except ValueError as e:
raise BadInput(curlst)
#except ValueError as e:
# newlst = None
# print 'Skipping invalid input - {}'.format(str(curlst))
#except Exception as e:
# print 'Unhandled except - {}, aborting...'.format(str(e))
# sys.exit(-2)
return newlst
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: {} <string of integers separated by commas> [<str2> ...]'.format(
sys.argv[0])
sys.exit(-1)
for elmt in sys.argv[1:]:
print parse(elmt)
| [
"james.r.small@outlook.com"
] | james.r.small@outlook.com |
dd5fb141bcd9f73f628e6e312f3c1f15a25b8810 | 7bc6fcbb3da7bcc32e11286a5d22718c4c9f8b33 | /data_collection/gazette/spiders/sp_sao_roque.py | ba8b30302a0ff864ce82a0afc7def0fe6860de62 | [
"MIT"
] | permissive | tiagofer/querido-diario | c6db924f6963788679c76489abd1922fab6da92f | feef1d36d540b052ec0b178015872a215352ba80 | refs/heads/main | 2023-01-31T00:07:31.814159 | 2020-11-20T02:33:03 | 2020-11-20T02:33:03 | 310,901,697 | 1 | 0 | MIT | 2020-11-07T17:55:15 | 2020-11-07T17:55:15 | null | UTF-8 | Python | false | false | 275 | py | from gazette.spiders.instar_base import BaseInstarSpider
class SpSaoRoqueSpider(BaseInstarSpider):
TERRITORY_ID = "3550605"
name = "sp_sao_roque"
allowed_domains = ["saoroque.sp.gov.br"]
start_urls = ["https://www.saoroque.sp.gov.br/portal/diario-oficial"]
| [
"noreply@github.com"
] | noreply@github.com |
967dc456ae8754460e5768a8eb7b68d269bb5fd9 | d4bbbb07826fd11d071624761c3a452e431cec8f | /models/process_data.py | 398631223021b2ea0a47c8b791f81c6922aaaaa5 | [
"MIT"
] | permissive | planetnest/epl-prediction | ecb88fb1b9fbea8d93637a547fb559b004f29bb7 | ffd4eb626d18829df49e07663ef74cd3735ca9d3 | refs/heads/master | 2021-07-06T19:07:14.132246 | 2017-09-27T23:45:15 | 2017-09-27T23:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,335 | py | import os.path
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from .preprocess import process_data
DATASET_DIR = '../datasets/'
DATA_FILES = ['epl-2015-2016.csv', 'epl-2016-2017.csv', 'epl-2017-2018.csv']
CURR_SEASON_DATA = os.path.join(DATASET_DIR, DATA_FILES[-1])
USELESS_ROWS = ['Referee', 'Div', 'Date', 'HomeTeam', 'AwayTeam']
def load_data():
dataset = pd.read_csv(CURR_SEASON_DATA)
dataset.drop(USELESS_ROWS, axis=1, inplace=True)
for d_file in DATA_FILES[:-1]:
d_file = os.path.join(DATASET_DIR, d_file)
data = pd.read_csv(d_file)
data.drop(USELESS_ROWS, axis=1, inplace=True)
dataset = pd.concat([dataset, data])
return dataset
def get_remaining_features(home, away):
df = pd.read_csv(CURR_SEASON_DATA)
# Home team and Away team
home_team = df['HomeTeam'].values
away_team = df['AwayTeam'].values
# Get the indexes for home and away team
home_idx = get_index(home_team.tolist(), home)
away_idx = get_index(away_team.tolist(), away)
# Drop string columns
df.drop(['Div', 'Date', 'HomeTeam', 'AwayTeam', 'FTR', 'HTR', 'Referee'], axis=1, inplace=True)
# Get rows where the home and away team shows up respectively
home_data = df.values[home_idx]
away_data = df.values[away_idx]
return np.average(home_data, axis=0), np.average(away_data, axis=0)
def get_index(teams, value):
value = value.title()
indexes = [i for i, team in enumerate(teams) if team == value]
return indexes
def preprocess_features(X):
# init new output dataframe
"""
Cleans up any non-numeric data.
:param X:
Features to be cleaned.
:return: output `pd.DataFrame`
A new pandas DataFrame object with clean numeric values.
"""
output = pd.DataFrame(index=X.index)
# investigate each feature col for data
for col, col_data in X.iteritems():
# if data is categorical, convert to dummy variables
if col_data.dtype == object:
print('obj lets get dummies')
col_data = pd.get_dummies(col_data, prefix=col)
# collect the converted cols
output = output.join(col_data)
return output
def process(filename=None, test_size=None, train_size=None):
"""
Process data into training and testing set.
:param filename: str or None (default is None)
The path to the `csv` file which contains the dataset. If
set to None, it will load all the datasets.
:param test_size: float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
:param train_size: float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
:return: X_train, X_test, y_train, y_test
`np.ndarray` o
"""
if filename:
data = pd.read_csv(filename)
else:
data = load_data()
print(data.columns.values)
# FTR = full time result
X_all = data.drop(['FTR'], axis=1)
y_all = data['FTR']
X_all = process_data(X_all)
# Split into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all,
test_size=test_size, train_size=train_size,
random_state=42, stratify=y_all)
return np.array(X_train), np.array(X_test), np.array(y_train), np.array(y_test)
if __name__ == '__main__':
# home_data, away_data = get_remaining_features(home='arsenal', away='chelsea')
# print(home_data, '\n')
# print(away_data)
# data = load_data()
# print(data.tail(3))
X_train, X_test, y_train, y_test = process(filename=None)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
| [
"javafolabi@gmail.com"
] | javafolabi@gmail.com |
84fe21610da001e48a5e0167042b87fd278622ed | 19a58949170a819cd9d427b4ec121dc5bae87035 | /post/api/serializers.py | c3457dcc242b7d5c7bd7917f0e49962a0609c5f4 | [] | no_license | Shamsullo/social_media | 54f1cf3a5dccd93813e5ee4cd40875f68288c177 | dbf6cfab5a699a5e51bcd6a0228d42dcf5201b2d | refs/heads/main | 2023-04-13T20:20:30.235865 | 2021-04-23T19:12:30 | 2021-04-23T19:12:30 | 360,502,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | from datetime import datetime
from django.db.models import Count
from rest_framework import serializers
from ..models import Post, PostLike
class PostSerializer(serializers.ModelSerializer):
# author = serializers.SerializerMethodField(read_only=True)
likes = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Post
fields = ['id', 'author', 'content', 'image', 'likes', 'timestamp']
def get_likes(self, post):
return post.likes.count()
# def get_author(self, obj):
# return self.context.get('request').user.id
class SinglePostAnalyticsSerializer(serializers.ModelSerializer):
likes = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Post
fields = ['id', 'author', 'likes']
def get_likes(self, post):
params = self.context.get('request').GET
date_from = datetime.strptime(params['date_from'],
'%Y-%m-%d').astimezone()
date_to = datetime.strptime(params['date_to'],
'%Y-%m-%d').astimezone()
likes_by_day = (
PostLike.objects
.filter(post=post, timestamp__gte=date_from,timestamp__lte=date_to)
.values('timestamp__day')
.annotate(likes=Count('timestamp__day'))
.values('timestamp__date', 'likes')
)
return likes_by_day
| [
"shamsulloismatov@gmail.com"
] | shamsulloismatov@gmail.com |
e95aec4dfc086b6babea0099f94c9fd140043879 | 743c897a6e6094759cad6c28169482089e454ca5 | /agent/ddqn_prioritized_replay_agent.py | 76c8c31b597adedb420c7efc1bb2e2eeef599d51 | [] | no_license | frandres/Udacity_DeepRL_p1_navigation | 3c9fb72c5fd45d50ff08a19b11dec3940f814d98 | 3654cd33aa12c6817edd9d5a9aeac3a55ba2d286 | refs/heads/main | 2023-04-03T14:21:01.416803 | 2021-05-02T07:58:22 | 2021-05-02T07:58:22 | 363,205,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,958 | py | import numpy as np
import random
from collections import namedtuple
from agent.dqn_model import QNetwork
import torch
import torch.optim as optim
from torch import nn
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # default minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def annealing_generator(start: float,
end: float,
factor: float):
decreasing = start > end
eps = start
while True:
yield eps
f = max if decreasing else min
eps = f(end, factor*eps)
class Agent():
'''
DDQN Agent for solving the navegation system project.
Skeleton adapted from Udacity exercise sample code.
'''
"""Interacts with and learns from the environment."""
def __init__(self,
state_size,
action_size,
hyperparams,
seed=13):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.epsilon_gen = annealing_generator(start=hyperparams['eps_start'],
end=hyperparams['eps_end'],
factor=hyperparams['eps_decay'])
self.beta_gen = annealing_generator(start=hyperparams['beta_start'],
end=hyperparams['beta_end'],
factor=hyperparams['beta_factor'])
# Q-Network
self.qnetwork_local = QNetwork(state_size,
action_size,
hyperparams['topology'],
seed).to(device)
self.qnetwork_target = QNetwork(state_size,
action_size,
hyperparams['topology'],
seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.batch_size = hyperparams.get('batch_size', BATCH_SIZE)
self.memory = PrioritizedReplayBuffer(BUFFER_SIZE,
self.batch_size,
seed,
per_epsilon=hyperparams.get(
'per_epsilon'),
per_alpha=hyperparams.get('per_alpha'))
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.criterion = nn.MSELoss(reduce=False)
def step(self,
state: torch.Tensor,
action: int,
reward: float,
next_state: torch.Tensor,
done: bool):
'''
Function to be called after every interaction between the agent
and the environment.
Updates the memory and learns.
'''
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > self.batch_size:
self.learn(GAMMA)
def act(self,
state: torch.Tensor,
training: bool = True):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
training (bool): whether the agent is training or not.
"""
eps = next(self.epsilon_gen)
self.beta = next(self.beta_gen)
rand = random.random()
if training and rand < eps:
# eps greedy exploration.
return random.choice(np.arange(self.action_size))
else:
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
return np.argmax(action_values.cpu().data.numpy())
def learn(self, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
gamma (float): discount factor
"""
"""Update value parameters using given batch of experience tuples.
Params
======
gamma (float): discount factor
"""
memory_indices, priorities, experiences = self.memory.sample()
states, actions, rewards, next_states, dones = experiences
## TODO: compute and minimize the loss
self.optimizer.zero_grad()
output = self.qnetwork_local.forward(states).gather(1, actions)
# Build the targets
# Use the local network for calculating the indices of the max.
self.qnetwork_local.eval()
with torch.no_grad():
local_estimated_action_values = self.qnetwork_local(next_states)
local_network_max_indices = torch.max(
local_estimated_action_values, dim=1)[1].reshape(-1, 1)
# Use the target network for using the estimated value.
self.qnetwork_target.eval()
with torch.no_grad():
target_estimated_action_values = self.qnetwork_target(next_states)
estimated_max_value = target_estimated_action_values.gather(
1, local_network_max_indices)
labels = rewards + (1-dones)*gamma*estimated_max_value
self.memory.update_batches(memory_indices, (output-labels))
beta = self.beta
bias_correction = ((1/len(self.memory))*(1/priorities))**beta
bias_correction = bias_correction/torch.max(bias_correction)
loss = (self.criterion(output, labels)*bias_correction).mean()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self,
local_model: nn.Module,
target_model: nn.Module,
tau: float):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
This is an alterative to the original formulation of the DQN
paper, in which the target agent is updated with the local
model every X steps.
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(
tau*local_param.data + (1.0-tau)*target_param.data)
class SumTree(object):
'''
SumTree for efficiently performing weighted sampling.
Adapted from https://pylessons.com/CartPole-PER/
'''
# Here we initialize the tree with all nodes = 0, and initialize the data with all values = 0
def __init__(self, capacity):
# Number of leaf nodes (final nodes) that contains experiences
self.capacity = capacity
# Generate the tree with all nodes values = 0
# To understand this calculation (2 * capacity - 1) look at the schema below
# Remember we are in a binary node (each node has max 2 children) so 2x size of leaf (capacity) - 1 (root node)
# Parent nodes = capacity - 1
# Leaf nodes = capacity
self.tree = np.zeros(2 * capacity - 1)
self.data_pointer = 0 # Pointer to the next leave to update.
# Contains the experiences (so the size of data is capacity)
self.data = [None]*capacity
def add(self, priority, data):
# Look at what index we want to put the experience
tree_index = self.data_pointer + (self.capacity - 1)
""" tree:
0
/ \
0 0
/ \ / \
tree_index 0 0 0 We fill the leaves from left to right
"""
# Update data frame
self.data[self.data_pointer] = data
# Update the leaf
self.update(tree_index, priority)
# Add 1 to data_pointer
self.data_pointer += 1
# If we're above the capacity, we go back to first index (we overwrite)
if self.data_pointer >= self.capacity:
self.data_pointer = 0
def update(self, tree_index, priority):
# Change = new priority score - former priority score
change = priority - self.tree[tree_index]
self.tree[tree_index] = priority
# then propagate the change through tree
# this method is faster than the recursive loop
while tree_index != 0:
tree_index = (tree_index - 1) // 2
self.tree[tree_index] += change
def get_leaf(self, v):
parent_index = 0
while True:
left_child_index = 2 * parent_index + 1
right_child_index = left_child_index + 1
# If we reach bottom, end the search
if left_child_index >= len(self.tree):
leaf_index = parent_index
break
else: # downward search, always search for a higher priority node
if v <= self.tree[left_child_index]:
parent_index = left_child_index
else:
v -= self.tree[left_child_index]
parent_index = right_child_index
data_index = leaf_index - self.capacity + 1
return leaf_index, self.tree[leaf_index], self.data[data_index]
@property
def total_priority(self):
return self.tree[0] # Returns the root node
@property
def maximum_priority(self):
return np.max(self.tree[-self.capacity:]) # Returns the root node
def __len__(self):
"""Return the current size of internal memory."""
return np.sum(~(self.tree[-self.capacity:] == 0))
class PrioritizedReplayBuffer:
"""Fixed-size buffer to store experience tuples.
Leverages a SumTree for efficiently sampling."""
def __init__(self,
buffer_size,
batch_size,
seed,
per_epsilon: float = None,
per_alpha: float = None,):
"""Initialize a PrioritizedReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.tree = SumTree(buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=[
"state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.per_epsilon = per_epsilon or 0.0001
self.per_alpha = per_alpha or 0
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
maximum_priority = self.tree.maximum_priority + \
self.per_epsilon # TODO use clipped abs error?
if maximum_priority == 0:
maximum_priority = 1
self.tree.add(maximum_priority, e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = []
indices = []
priorities = []
# We divide the priority into buckets and sample from each of those
segments = self.tree.total_priority/self.batch_size
values = []
for i in range(self.batch_size):
value = random.uniform(i*segments, (i+1)*segments)
leaf_index, priority, data = self.tree.get_leaf(value)
experiences.append(data)
indices.append(leaf_index)
priorities.append(priority)
values.append(value)
states = torch.from_numpy(
np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(
np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(
np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack(
[e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack(
[e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return indices, torch.Tensor(priorities), (states, actions, rewards, next_states, dones)
def update_batches(self, indices, errors):
for index, error in zip(indices, errors.detach().numpy()):
self.tree.update(
index, (abs(error)+self.per_epsilon)**self.per_alpha)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.tree)
| [
"francisco.rodriguez@glovoapp.com"
] | francisco.rodriguez@glovoapp.com |
95a41e8e1bf20627b0ddfce2c69ca3075ee1ef0b | afd6cd04f45720ec0271acc2af23aa508583cde1 | /bloom_database/lib/bloom_database/models/models.py | 4b96c11579e6c5f6ff32da0819701965f7a91598 | [] | no_license | misodope/BloomCreditChallenge | 15e8518ab156b3d2540f2803ef46dc150f45fb03 | 98567354c5e88c31700dce8bce3f214316a9212e | refs/heads/main | 2023-06-30T05:24:57.546938 | 2021-08-09T15:55:33 | 2021-08-09T15:55:33 | 392,828,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,466 | py | from . import db
from sqlalchemy.dialects import postgresql
from sqlalchemy import Integer, Text
class CreditRecords(db.Model):
__tablename__ = 'credit_records'
uuid = db.Column(Text, primary_key=True)
name = db.Column(Text)
ssn = db.Column(Text)
x0001 = db.Column(Integer)
x0002 = db.Column(Integer)
x0003 = db.Column(Integer)
x0004 = db.Column(Integer)
x0005 = db.Column(Integer)
x0006 = db.Column(Integer)
x0007 = db.Column(Integer)
x0008 = db.Column(Integer)
x0009 = db.Column(Integer)
x0010 = db.Column(Integer)
x0011 = db.Column(Integer)
x0012 = db.Column(Integer)
x0013 = db.Column(Integer)
x0014 = db.Column(Integer)
x0015 = db.Column(Integer)
x0016 = db.Column(Integer)
x0017 = db.Column(Integer)
x0018 = db.Column(Integer)
x0019 = db.Column(Integer)
x0020 = db.Column(Integer)
x0021 = db.Column(Integer)
x0022 = db.Column(Integer)
x0023 = db.Column(Integer)
x0024 = db.Column(Integer)
x0025 = db.Column(Integer)
x0026 = db.Column(Integer)
x0027 = db.Column(Integer)
x0028 = db.Column(Integer)
x0029 = db.Column(Integer)
x0030 = db.Column(Integer)
x0031 = db.Column(Integer)
x0032 = db.Column(Integer)
x0033 = db.Column(Integer)
x0034 = db.Column(Integer)
x0035 = db.Column(Integer)
x0036 = db.Column(Integer)
x0037 = db.Column(Integer)
x0038 = db.Column(Integer)
x0039 = db.Column(Integer)
x0040 = db.Column(Integer)
x0041 = db.Column(Integer)
x0042 = db.Column(Integer)
x0043 = db.Column(Integer)
x0044 = db.Column(Integer)
x0045 = db.Column(Integer)
x0046 = db.Column(Integer)
x0047 = db.Column(Integer)
x0048 = db.Column(Integer)
x0049 = db.Column(Integer)
x0050 = db.Column(Integer)
x0051 = db.Column(Integer)
x0052 = db.Column(Integer)
x0053 = db.Column(Integer)
x0054 = db.Column(Integer)
x0055 = db.Column(Integer)
x0056 = db.Column(Integer)
x0057 = db.Column(Integer)
x0058 = db.Column(Integer)
x0059 = db.Column(Integer)
x0060 = db.Column(Integer)
x0061 = db.Column(Integer)
x0062 = db.Column(Integer)
x0063 = db.Column(Integer)
x0064 = db.Column(Integer)
x0065 = db.Column(Integer)
x0066 = db.Column(Integer)
x0067 = db.Column(Integer)
x0068 = db.Column(Integer)
x0069 = db.Column(Integer)
x0070 = db.Column(Integer)
x0071 = db.Column(Integer)
x0072 = db.Column(Integer)
x0073 = db.Column(Integer)
x0074 = db.Column(Integer)
x0075 = db.Column(Integer)
x0076 = db.Column(Integer)
x0077 = db.Column(Integer)
x0078 = db.Column(Integer)
x0079 = db.Column(Integer)
x0080 = db.Column(Integer)
x0081 = db.Column(Integer)
x0082 = db.Column(Integer)
x0083 = db.Column(Integer)
x0084 = db.Column(Integer)
x0085 = db.Column(Integer)
x0086 = db.Column(Integer)
x0087 = db.Column(Integer)
x0088 = db.Column(Integer)
x0089 = db.Column(Integer)
x0090 = db.Column(Integer)
x0091 = db.Column(Integer)
x0092 = db.Column(Integer)
x0093 = db.Column(Integer)
x0094 = db.Column(Integer)
x0095 = db.Column(Integer)
x0096 = db.Column(Integer)
x0097 = db.Column(Integer)
x0098 = db.Column(Integer)
x0099 = db.Column(Integer)
x0100 = db.Column(Integer)
x0101 = db.Column(Integer)
x0102 = db.Column(Integer)
x0103 = db.Column(Integer)
x0104 = db.Column(Integer)
x0105 = db.Column(Integer)
x0106 = db.Column(Integer)
x0107 = db.Column(Integer)
x0108 = db.Column(Integer)
x0109 = db.Column(Integer)
x0110 = db.Column(Integer)
x0111 = db.Column(Integer)
x0112 = db.Column(Integer)
x0113 = db.Column(Integer)
x0114 = db.Column(Integer)
x0115 = db.Column(Integer)
x0116 = db.Column(Integer)
x0117 = db.Column(Integer)
x0118 = db.Column(Integer)
x0119 = db.Column(Integer)
x0120 = db.Column(Integer)
x0121 = db.Column(Integer)
x0122 = db.Column(Integer)
x0123 = db.Column(Integer)
x0124 = db.Column(Integer)
x0125 = db.Column(Integer)
x0126 = db.Column(Integer)
x0127 = db.Column(Integer)
x0128 = db.Column(Integer)
x0129 = db.Column(Integer)
x0130 = db.Column(Integer)
x0131 = db.Column(Integer)
x0132 = db.Column(Integer)
x0133 = db.Column(Integer)
x0134 = db.Column(Integer)
x0135 = db.Column(Integer)
x0136 = db.Column(Integer)
x0137 = db.Column(Integer)
x0138 = db.Column(Integer)
x0139 = db.Column(Integer)
x0140 = db.Column(Integer)
x0141 = db.Column(Integer)
x0142 = db.Column(Integer)
x0143 = db.Column(Integer)
x0144 = db.Column(Integer)
x0145 = db.Column(Integer)
x0146 = db.Column(Integer)
x0147 = db.Column(Integer)
x0148 = db.Column(Integer)
x0149 = db.Column(Integer)
x0150 = db.Column(Integer)
x0151 = db.Column(Integer)
x0152 = db.Column(Integer)
x0153 = db.Column(Integer)
x0154 = db.Column(Integer)
x0155 = db.Column(Integer)
x0156 = db.Column(Integer)
x0157 = db.Column(Integer)
x0158 = db.Column(Integer)
x0159 = db.Column(Integer)
x0160 = db.Column(Integer)
x0161 = db.Column(Integer)
x0162 = db.Column(Integer)
x0163 = db.Column(Integer)
x0164 = db.Column(Integer)
x0165 = db.Column(Integer)
x0166 = db.Column(Integer)
x0167 = db.Column(Integer)
x0168 = db.Column(Integer)
x0169 = db.Column(Integer)
x0170 = db.Column(Integer)
x0171 = db.Column(Integer)
x0172 = db.Column(Integer)
x0173 = db.Column(Integer)
x0174 = db.Column(Integer)
x0175 = db.Column(Integer)
x0176 = db.Column(Integer)
x0177 = db.Column(Integer)
x0178 = db.Column(Integer)
x0179 = db.Column(Integer)
x0180 = db.Column(Integer)
x0181 = db.Column(Integer)
x0182 = db.Column(Integer)
x0183 = db.Column(Integer)
x0184 = db.Column(Integer)
x0185 = db.Column(Integer)
x0186 = db.Column(Integer)
x0187 = db.Column(Integer)
x0188 = db.Column(Integer)
x0189 = db.Column(Integer)
x0190 = db.Column(Integer)
x0191 = db.Column(Integer)
x0192 = db.Column(Integer)
x0193 = db.Column(Integer)
x0194 = db.Column(Integer)
x0195 = db.Column(Integer)
x0196 = db.Column(Integer)
x0197 = db.Column(Integer)
x0198 = db.Column(Integer)
x0199 = db.Column(Integer)
x0200 = db.Column(Integer)
| [
"misodope@cap-rx.com"
] | misodope@cap-rx.com |
94dbeb2675acafae474f9db4dcc4d4115a25d94f | ecae7275fd43ec93ca5771083e05ae864685faf9 | /list/list_multiplication.py | e7375b7967c1e5ea8c97cb6557e6b9a2c5eae460 | [] | no_license | shamoldas/pythonBasic | 104ca8d50099c2f511802db1f161f6d050f879cc | 3a7252a15f6d829f55700ec2ff7f7d153f3ec663 | refs/heads/main | 2023-01-09T06:38:55.357476 | 2020-11-11T12:27:31 | 2020-11-11T12:27:31 | 311,960,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | import numpy as np
a = [1,2,3,4]
b = [2,3,4,5]
d=a*b
print(d)
c=np.multiply(a,b)
print(c)
| [
"noreply@github.com"
] | noreply@github.com |
8bf6f30a0b6898775a955c99c1135e2fb41fbb1c | 9f46d82b1bbb561d663fbdbaa14331b9193fb18d | /buses/migrations/0002_auto_20200903_0438.py | eba7853d4e57eefbd553a172fc37a6f95240605f | [] | no_license | ziaurjoy/simple-class-based | 32012b56bb727ca5891d3938b024cdda4c4f30c8 | 9fd881d83e2e573c7974caeefc89bb7b03a78a05 | refs/heads/master | 2022-12-07T23:50:03.114676 | 2020-09-07T14:11:06 | 2020-09-07T14:11:06 | 293,546,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | # Generated by Django 3.1 on 2020-09-03 04:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('buses', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bussescompany',
options={'verbose_name_plural': 'bus companis'},
),
migrations.CreateModel(
name='Bus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serial_number', models.CharField(db_index=True, max_length=15)),
('operator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='buses.bussescompany')),
],
),
]
| [
"ziaurjoy802@gmail.com"
] | ziaurjoy802@gmail.com |
55a76b7c36242321864d93bf1b2295f2d8f36aec | 49fab88db7591c9d57d129a43b29f95f4c8ec7a7 | /geo-graph-part-clean/main.py | 76f91be69a24f2d3cd3c8d74d78bf32cd82ecc01 | [] | no_license | thomasxu14/geo-graph-part-clean | ee09c1eb04037d882df459e8ae77c30caec017e5 | 9f20ff0694f49f822a12648f212717b608fb457e | refs/heads/master | 2020-03-22T03:56:02.337574 | 2018-07-02T15:31:53 | 2018-07-02T15:31:53 | 139,461,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,548 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 11:28:34 2018
@author: A671118
"""
import igraph as ig
import coarse
import random
from graph_utils import visualize, delete_isolated
import data.graph_generator as gen
import numpy as np
from LP import partitionLP
import refining as ref
nData = 100
nTasks = 30
edgeProb = 0.10
outgoingEdges = 2
avgDataDep = 4
stddDataDep = 2
tolerance = 0.01
g = gen.generate_realistic(nData, nTasks, avgDataDep, stddDataDep)
# g = gen.generate_graph(nData, nTasks, edgeProb)
#g = gen.generate_barabasi(nData + nTasks, outgoingEdges)
# from data.testgraph3 import *
# delete_isolated(g)
volumes = g.vs["volume"]
maxVolume = max(volumes)
nVertices = len(g.vs)
nEdges = len(g.es)
visual_style = {}
original_layout = g.layout("fr")
visual_style["layout"] = original_layout
visual_style["vertex_size"] = [8 * (1 + volumes[i]/maxVolume) for i in range(nVertices)]
visual_style["bbox"] = (500, 500)
visual_style["margin"] = 20
visualize(g, visual_style, original_graph=True)
ig.plot(g, "viz/graph.png", **visual_style)
ig.plot(g, **visual_style)
nClusters = 10
clusters = range(nClusters)
costexp = 30
coststdd = 30
cost = [[0 for c1 in range(nClusters)] for c2 in range(nClusters)]
for c1 in range(nClusters):
for c2 in range(c1+1, nClusters):
cost[c1][c2] = max(round(random.gauss(costexp, coststdd),2),2.)
cost[c2][c1] = cost[c1][c2]
cost = np.array(cost)
print("Cost matrix:\n%s" % cost)
vertexLimit = 30
graphs = coarse.coarsed_graphs(g, nClusters, quad_assign=True, group_isolated=True)
cg = graphs[-1]
print("Coarsest graphs volumes: %s" % cg.vs["volume"])
volumes = cg.vs["volume"]
maxVolume = max(volumes)
loadPerCluster = sum(volumes) * (1+tolerance) / nClusters
loadLimits = [loadPerCluster for i in range(nClusters)]
nVertices = len(cg.vs)
vertices = range(nVertices)
nEdges = len(cg.es)
visual_style = {}
coarse_layout = cg.layout("fr")
visual_style["layout"] = coarse_layout
visual_style["vertex_size"] = [8 * (1 + volumes[i]/maxVolume) for i in range(nVertices)]
visual_style["bbox"] = (500, 500)
visual_style["margin"] = 20
print("The graph has been coarsened %s times." % (len(graphs)-1))
ig.plot(cg, "viz/coarsest_graph.png", **visual_style)
ig.plot(cg, **visual_style)
# cluster_color = [[random.randint(0,255) for i in range(3)] for cl in range(nClusters)]
cluster_color = ["#%06x" % random.randint(0, 0xFFFFFF) for cl in range(nClusters)]
print("Colors: %s" % cluster_color)
placement = partitionLP(cost, cg, tolerance, relaxed=0, quad_assign=True)
partition = []
for vertex in range(len(cg.vs)):
maxProb = 0
maxPlacement = -1
for c in range(nClusters):
if placement[vertex][c] > maxProb:
maxProb = placement[vertex][c]
maxPlacement = c
partition.append(maxPlacement)
checkCost = ref.edgeCut(cg, partition, cost, nClusters)
print("Verifying total cost: " + str(checkCost))
visual_style["vertex_size"] = 20
visual_style["bbox"] = (600, 600)
visual_style["margin"] = 60
volumes = cg.vs["volume"]
maxVolume = max(volumes)
visual_style["vertex_size"] = [10 + volumes[i]*50/(2*maxVolume) for i in range(len(cg.vs))]
visualize(cg, visual_style, partition=partition, color_dict=cluster_color, colored=True)
ig.plot(cg, "viz/initial_partition.png", **visual_style)
ig.plot(cg, **visual_style)
partitions = [partition]
partCosts = [checkCost]
for i in range(len(graphs)-1, 0, -1):
fg = graphs[i-1]
cg = graphs[i]
newPart = coarse.uncoarsen(cg, fg, partitions[0])
volumes = fg.vs["volume"]
maxVolume = max(volumes)
newPart = ref.K_L(fg, newPart, nClusters, cost, loadLimits, more_balanced=True)
partitions.insert(0, newPart)
visual_style = {}
layout = fg.layout("fr")
visual_style["layout"]=layout
visual_style["vertex_size"] = [8 * (1 + volumes[i]/maxVolume) for i in range(len(fg.vs))]
visualize(fg, visual_style, partition=newPart, color_dict=cluster_color, original_graph=(i==1), colored=True)
ig.plot(fg, "viz/partition%s.png" % i, **visual_style)
print("Free Space: %s" % ref.freeSpace(fg, newPart, loadLimits))
partCosts.insert(0, ref.edgeCut(fg, newPart, cost, nClusters))
print("\nFinal partition: %s" % partitions[0])
visual_style["layout"] = original_layout
ig.plot(g, **visual_style)
print("Final cost: %s" % ref.edgeCut(g, partitions[0], cost, nClusters))
print("Partial costs: %s" % partCosts) | [
"noreply@github.com"
] | noreply@github.com |
b6aa8f21a3a33363272b6cea39b75dea74785535 | 557d35f398f8bfdedae9ec747336ccf7eef872ae | /kg.py | 91045ed92ee663fbaeae9d13b907556bd3c4c235 | [] | no_license | walxc1218/git | 1ffc3ae82a46b8c987cbbf61f6acd364a7f007a7 | 4a043689da2222a1b42ac6a40a508d43e2dabc18 | refs/heads/master | 2020-04-19T16:46:11.264043 | 2019-01-30T08:07:30 | 2019-01-30T08:07:30 | 168,314,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | b = input("请输入一个字符串:")
print(b[0])
print(b[len(b)-1])
if len(b) % 2 == 1:
print(le)
| [
"jialeiboy@163.com"
] | jialeiboy@163.com |
314e10e1247fac1da48844e2bdf3687af9894a4c | 7548442e3c36039fe87a3a3b5e5056000a92f061 | /first_project/settings.py | a1cc9ff086886f64ed1f8d665f20d12a1c91e9fe | [] | no_license | fastaro/ffwebsite | 8ee5738fbd08f66807af2fe6b6af4c8aecdf8b39 | b6de58ee55582cdc7979d9f7913223d0d5aa41ab | refs/heads/master | 2023-04-16T17:19:25.004277 | 2021-04-26T13:59:52 | 2021-04-26T13:59:52 | 361,766,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,453 | py | """
Django settings for first_project project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
STATIC_DIR = os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'btd2yma5)cb@)3f28n!_ur$$l$ryb1*a-(dzl7+ht^h_7ba0s&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['https://thawing-temple-77223.herokuapp.com/', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'livereload',
'first_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'first_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'first_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
django_heroku.settings(locals())
| [
"robertfustero@gmail.com"
] | robertfustero@gmail.com |
37c437109dc0a5eb263881371b0d2111b53e300d | 4bed9370c2ca2355bbcacd900f00934707a27151 | /ChestRayXNet/code/data_preproces.py | daca354457950e66ebda3e2fffe3d78bdd884579 | [] | no_license | NoicFank/ChestRayXNet | 0bd529d90cf45e038cb674c4e26e5610a36f33d2 | 07b1d9169b3e9d596d06e6ff53049ef878bc4f63 | refs/heads/master | 2020-03-12T20:23:05.883865 | 2018-04-23T14:02:20 | 2018-04-23T14:02:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,102 | py | """Provides utilities to preprocess images for networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def preprocess_for_train(image, height, width, scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image.set_shape([None, None, 3])
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(image, [height, width],
align_corners=True) ## remember in original this option is False
image = tf.squeeze(image, [0])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(image)
base_color_scale = tf.constant([255.0])
image = tf.subtract(image, base_color_scale)
# the mean and std of ImageNet is as fellow:
total_mean = tf.constant([0.485, 0.456, 0.406])
std = tf.constant([0.229, 0.224, 0.225])
# in chest x ray dataset, the total mean and std is as fellow:
# total_mean = tf.constant([126.973])
# std = tf.constant([66.0])
image = tf.subtract(image, total_mean)
image = tf.div(image, std)
return image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would cropt the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(image, [height, width],
align_corners=True) ## remember in original this option is False
image = tf.squeeze(image, [0])
## keep the mean and std the seem as train:
base_color_scale = tf.constant([255.0])
image = tf.subtract(image, base_color_scale)
# the mean and std of ImageNet is as fellow:
total_mean = tf.constant([0.485, 0.456, 0.406])
std = tf.constant([0.229, 0.224, 0.225])
image = tf.subtract(image, total_mean)
image = tf.div(image, std)
return image
def preprocess_image(image, height, width,
is_training=False,
bbox=None,
fast_mode=True):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image.
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
# image = tf.image.resize_image_with_crop_or_pad(image, height, width)
if is_training:
return preprocess_for_train(image, height, width)
else:
return preprocess_for_eval(image, height, width)
| [
"rubans@Rubans.local"
] | rubans@Rubans.local |
e411a3f2ff7be97ff72496885a1285324ae4b0cd | b40d1a26ea04a19ec0da7bf55db84b7ee36cc898 | /leetcode.com/python/895_Maximum_Frequency_Stack.py | b083d8b11400faa43d9540631aac9d70eb9f35a3 | [
"MIT"
] | permissive | partho-maple/coding-interview-gym | 5e8af7d404c28d4b9b52e5cffc540fd51d8025cf | 20ae1a048eddbc9a32c819cf61258e2b57572f05 | refs/heads/master | 2022-09-11T16:36:01.702626 | 2022-03-14T08:39:47 | 2022-03-14T08:39:47 | 69,802,909 | 862 | 438 | MIT | 2022-08-18T06:42:46 | 2016-10-02T14:51:31 | Python | UTF-8 | Python | false | false | 797 | py | from collections import defaultdict
import heapq
class FreqStack(object):
def __init__(self):
self.counter = defaultdict(int)
self.stackIdx = -1 # initially the stack is empty
self.maxHeap = []
def push(self, x):
"""
:type x: int
:rtype: None
"""
self.counter[x] += 1
self.stackIdx += 1
heapq.heappush(self.maxHeap, (-self.counter[x], -self.stackIdx, x))
def pop(self):
"""
:rtype: int
"""
topElement = heapq.heappop(self.maxHeap)
count, idx, x = -topElement[0], -topElement[1], topElement[2]
self.counter[x] -= 1
return x
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(x)
# param_2 = obj.pop() | [
"partho.biswas@aurea.com"
] | partho.biswas@aurea.com |
a6e979ce1a312de10484182a1911c6a85077783f | 1a89fa74c121a4556c49dde9a205acad0259834d | /patterns/pattern.py | 44c3fc9f8473b5de701a245a3924a08aa5d65f17 | [
"MIT"
] | permissive | cassm/pyzzazz | d0dcfc5bd10c024355119269f21b1d23a0cdac8b | a21a03a670a14847dcc35bac068596613ecd4b1d | refs/heads/master | 2022-06-19T01:54:00.586439 | 2022-05-22T19:16:06 | 2022-05-22T19:16:06 | 179,993,183 | 3 | 1 | null | 2019-05-17T20:52:34 | 2019-04-07T16:34:03 | Python | UTF-8 | Python | false | false | 270 | py | class Pattern:
def set_vars(self, args):
pass
def update(self, leds, time, palette_handler, palette_name):
pass
def cache_positions(self, leds):
pass
def get_pixel_colours(self, leds, time, palette, palette_name):
pass
| [
"ingeniousmammal@gmail.com"
] | ingeniousmammal@gmail.com |
a07d7cf0536d204bd1cda0df92806034ba497890 | e8e8a8c4923ce564f7f7cfc1aab11d6adf711bdf | /Web/DashBoard/main.py | 07398f276c546b129bcdec2ccecaab4465e284a5 | [] | no_license | st2257st2257/Chess | 4b6f15cc5cd3970a14fd8b4515ed8a3b51478163 | 9bd8099f8fbb6e58187fcf1f10a8a0e7a835789a | refs/heads/main | 2023-02-23T23:33:23.596199 | 2020-12-17T10:25:32 | 2020-12-17T10:25:32 | 312,585,090 | 4 | 2 | null | 2020-12-04T21:12:24 | 2020-11-13T13:36:15 | Python | UTF-8 | Python | false | false | 5,098 | py | # import pygame
import pygame
from numpy import pi
import numpy
import random
import socket
import sqlite3
conn = None
cursor = None
vis = 0
def init():
global vis, conn, cursor
conn = sqlite3.connect('example.db')
if vis:
print("...Start connection...")
cursor = conn.cursor()
def cancel():
global vis
conn.commit()
conn.close()
if vis:
print("\n...End connection...")
def get_users():
global vis, conn, cursor
cursor.execute("SELECT * FROM chess_players WHERE id = (SELECT MAX(id) FROM chess_players);");
answer = cursor.fetchone()
return str(answer[0])
def get_games():
global vis, conn, cursor
cursor.execute("SELECT * FROM chess WHERE id = (SELECT MAX(id) FROM chess);");
answer = cursor.fetchone()
return str(answer[0])
def get_request():
global vis, conn, cursor
cursor.execute("SELECT * FROM History WHERE id = (SELECT MAX(id) FROM History);");
answer = cursor.fetchone()
return answer[0]
def get_ping():
global vis, conn, cursor
cursor.execute("SELECT * FROM History WHERE id = (SELECT MAX(id) FROM History);");
answer = cursor.fetchone()
if answer[4] == None:
return 0
else:
return answer[4]
def show_diagram(value, name, min_v, max_v, x, y, r, font_size = 12):
new_font = pygame.font.SysFont('arial', font_size)
angle = pi*int(value)/(max_v - min_v)
pygame.draw.arc(screen, WHITE,[x, y, r*2, r*2], 0, pi, 3)
pygame.draw.line(screen, GREEN, [x + r, y + r], [x + r - r*numpy.cos(angle),
y + r - r*numpy.sin(angle)], 5)
text_1 = new_font.render(str(name) + ": " + str(value), False, (255, 255, 255))
screen.blit(text_1, [x + r/2, y + r/2])
text_2 = new_font.render(str((max_v-min_v)/2), False, (255, 255, 255))
screen.blit(text_2, [x + 5*r/6, y - r/4])
text_3 = new_font.render(str(min_v), False, (255, 255, 255))
screen.blit(text_3, [x, y + 7*r/6])
text_4 = new_font.render(str(max_v), False, (255, 255, 255))
screen.blit(text_4, [x + r*2 - len(str(max_v)*5), y + 7*r/6])
# initialize game engine
pygame.init()
f2 = pygame.font.SysFont('arial', 24)
f3 = pygame.font.SysFont('arial', 36)
f1 = pygame.font.SysFont('arial', 12)
f4 = pygame.font.SysFont('arial', 30)
f5 = pygame.font.SysFont('arial', 18)
window_width=480
window_height=640
animation_increment=10
clock_tick_rate=20
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
# Open a window
size = (window_width, window_height)
screen = pygame.display.set_mode(size)
# Set title to the window
pygame.display.set_caption("DASH BOARD")
dead=False
clock = pygame.time.Clock()
background_image = pygame.image.load("background_4").convert()
angle = pi/4
while(dead==False):
init()
# getting local IP
a = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)),
s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
for event in pygame.event.get():
if event.type == pygame.QUIT:
dead = True
b_b_image = pygame.image.load("b_1_1.jpg").convert()
background = pygame.Surface((window_width-150, window_height-150))
background.blit(b_b_image, [0, 0])
adjusted_IP_text = f5.render("Adjusted IP: ", False, (255, 255, 255))
adjusted_IP = f5.render("192.168.1.74", False, (255, 255, 255))
real_IP_text = f5.render(" Real IP: ", False, (255, 255, 255))
real_IP = f5.render(a, False, (255, 255, 255))
background.blit(adjusted_IP, [150, 420])
background.blit(real_IP, [150, 450])
background.blit(adjusted_IP_text, [50, 420])
background.blit(real_IP_text, [50, 450])
# total users value
text_u1 = f2.render("Total", False, (255, 255, 255))
text_u2 = f2.render("Users: ", False, (255, 255, 255))
text_g1 = f2.render("Total", False, (255, 255, 255))
text_g2 = f2.render("Games: ", False, (255, 255, 255))
background.blit(text_u1, [10, 10])
background.blit(text_u2, [10, 30])
text_value_u = f3.render(str(get_users()), False, (255, 255, 255))
background.blit(text_value_u, [80, 20])
# total games value
background.blit(text_g1, [190, 10])
background.blit(text_g2, [190, 30])
text_value_u = f3.render(str(get_games()), False, (255, 255, 255))
background.blit(text_value_u, [270, 20])
screen.blit(background_image, [0, 0])
screen.blit(background, [75, 75])
pygame.draw.rect(screen, BLACK, [75, 75, window_width-150 , window_height-150], 3)
# boards
show_diagram(get_request(), "Requests", 0, 100000, 150, 250, 100, 18)
show_diagram(get_ping(), "Ping", 0, 5, 270, 400, 50, 12)
show_diagram(get_games(), "Party", 0, 1000, 120, 400, 50, 12)
pygame.display.flip()
clock.tick(clock_tick_rate)
cancel()
| [
"noreply@github.com"
] | noreply@github.com |
2039099e81f34c9b58cb20f1d92e9e1ff970b34b | d67ae9fb7fa47cbcc7e26fb21a18dcb8afc82167 | /easypap-se/plots/runningSableLil.py | a735a5b6db1d9c67fc9b75af027a269efb352671 | [] | no_license | tfurelaud/PAP | 1cc5f3ba94882fcd13dd8ba95e5b63c7be02b110 | ee11c79607e4aa6140e0333872b9ae9bc0e75b44 | refs/heads/master | 2023-01-02T12:59:30.071338 | 2020-10-28T20:40:48 | 2020-10-28T20:40:48 | 308,101,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | #!/usr/bin/env python3
from graphTools import *
from expTools import *
import os
#8 THREADS seq alea 480
easyspap_options = {}
easyspap_options["--kernel "] = ["sable"]
easyspap_options["--variant "] = ["seq -a alea -s 3840"]
omp_icv = {}
omp_icv["OMP_NUM_THREADS="] = [24]
omp_icv["OMP_SCHEDULE="] = ["static"]
execute('./run', omp_icv, easyspap_options, nbrun=1)
easyspap_options = {}
easyspap_options["--kernel "] = ["sable"]
easyspap_options["--variant "] = ["tiled_stable_omp -a alea -s 3840 -g 120"]
omp_icv = {}
omp_icv["OMP_NUM_THREADS="] = [24]
omp_icv["OMP_SCHEDULE="] = ["static"]
execute('./run', omp_icv, easyspap_options, nbrun=1)
#8 THREADS tiled alea 480 | [
"thomas.furelaud@gmail.com"
] | thomas.furelaud@gmail.com |
21122cdf9ef4f1ff2f14dc0b9b81ae7c9dd2aacb | 87152c554687b972b1c951bd93ae27603d66d801 | /74.py | 1f111496d760f1ce002cd8381227d7e97341bd09 | [] | no_license | logeswari-j/python | a983cfcd1b7791f827e3aa47e711fd358b41a49a | 6e965a60468bfab28bd81ce898978af3fbf191a1 | refs/heads/master | 2020-06-07T14:27:02.918526 | 2019-08-03T04:54:44 | 2019-08-03T04:54:44 | 193,041,241 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | import math
aa=float(input())
aa=math.ceil(aa)
print(aa)
| [
"noreply@github.com"
] | noreply@github.com |
0822c6d268c72e35841f6ce36cf3344327461159 | 328b2a4c2a2bfa83367c96afae39a1689b98f033 | /cash_task_app/views.py | 649f9be9871135db228edd03d072769bfbaef8dd | [] | no_license | vincesalazar/Cash-Task | a75699783cb6503a828e0c2a7d63c2f298ee18cf | 4b51828a43f44b1acc03a55bd05cd35f28ad07ff | refs/heads/master | 2022-10-05T05:13:49.892979 | 2020-06-12T06:21:25 | 2020-06-12T06:21:25 | 266,043,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,555 | py | from django.shortcuts import render, redirect
from django.contrib import messages
import bcrypt
from time_man_app.models import User, Task, Collection
from cash_task_app.models import Job
from django.core.paginator import Paginator
"""
TEMPLATES
"""
def index(request):
return render(request, "cash/index.html")
def homepage(request):
if 'user_id' not in request.session:
messages.error(request, 'Must be logged in')
return redirect('/')
context = {
"all_users":User.objects.all(),
"user": User.objects.get(id = request.session["user_id"]),
"jobs": Job.objects.all(),
}
return render(request, "cash/homepage.html", context)
def createJobPage(request):
return render(request, "cash/createJobPage.html")
def pagination(request):
user_list = User.objects.all()
paginator = Paginator(user_list, 2) # Show 2 users per page.
page_number = request.GET.get('page')
context = {
"page_obj": paginator.get_page(page_number)
}
return render(request, "cash/pagination.html", context)
def userPage(request, email):
if 'user_id' not in request.session:
messages.error(request, 'Must be logged in')
return redirect('/')
user = User.objects.get(email = email)
loggedUser = User.objects.get(id = request.session['user_id'])
context = {
"user": user,
"loggedUser": loggedUser, #so i can check belongings
}
return render(request, "userpage.html", context)
""""""
"""
LOGIN / REGISTER / LOGOUT
"""
def register(request):
post = request.POST
errors = User.objects.basic_validator(post)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/cashtask')
lowerCaseEmail = post['email'].lower()
if User.objects.filter(email = lowerCaseEmail).exists():
messages.error(request, "That email already exists")
return redirect('/cashtask')
capitalizedFirstName = post['first_name'].capitalize()
capitalizedLastName = post['last_name'].capitalize()
password = post['password']
pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
user = User.objects.create(
first_name = capitalizedFirstName,
last_name = capitalizedLastName,
email = lowerCaseEmail,
password = pw_hash
)
Collection.objects.create(
title = "General",
desc = "Things that just need to get done.",
user = user
)
request.session['user_id'] = user.id
return redirect('/cashtask/homepage')
def login(request):
post = request.POST
lowerEmail = post['email'].lower()
try:
user = User.objects.get(email = lowerEmail)
except:
messages.error(request, "Please check your password or email.")
return redirect('/cashtask')
if bcrypt.checkpw(request.POST['password'].encode(), user.password.encode()):
request.session["user_id"] = user.id
return redirect('/cashtask/homepage')
else:
messages.error(request, "please check your email and password.")
return redirect('/cashtask')
def logout(request):
request.session.clear()
return redirect('/')
"""
JOB PROCESS
"""
def createJob(request):
print("INSIDE CREATE JOB")
if 'user_id' not in request.session:
messages.error(request, 'Must be logged in')
return redirect('/')
if request.method == "POST":
post = request.POST
loggedInUser = User.objects.get(id = request.session["user_id"])
Job.objects.create(title = post["title"].capitalize(), description = post["description"], price = post["price"], city = post["city"].capitalize(), state = post["state"].upper(), user = loggedInUser)
return redirect("/cashtask/homepage")
else:
request.session.clear()
return redirect('/homepage')
# unfinished
def updateJob(request, id):
if 'user_id' not in request.session:
messages.error(request, 'Must be logged in')
return redirect('/')
if request.method == "POST":
job = Job.objects.get(id = id)
if job.user_id != request.session["user_id"]:
return redirect("/")
else:
post = request.POST
job.title = post["title"]
job.description = post["description"]
job.price = post["price"]
job.city = post["city"]
job.state = post["state"]
job.save()
else:
return redirect("/")
# unfinished
def pinJob(request, id):
if 'user_id' not in request.session:
messages.error(request, 'Must be logged in')
return redirect('/')
user = User.objects.get(id = request.session["user_id"])
job = Job.objects.get(id = id)
if job.pinned.filter(user_id = request.session['user_id']).exists():
job.pinned.remove(user)
return redirect("/homepage")
else:
job.pinned.add(user)
# unfinished
def deleteJob(request, id):
if 'user_id' not in request.session:
messages.error(request, 'Must be logged in')
return redirect('/')
user = User.objects.get(id = request.session["user_id"])
job = Job.objects.get(id = id)
if user.id != job.user_id:
return redirect("/")
else:
job.delete()
| [
"noreply@github.com"
] | noreply@github.com |
7c5f10be6bb29de0efad4fe84a70e7dd2449fd64 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D01B/MSCONSD01BUN.py | eb4f55884635f28f92c53726be35f53eb089349d | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,763 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD01BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 9},
{ID: 'CUX', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'NAD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'NAD', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'LOC', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'CCI', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99},
]},
{ID: 'LIN', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'PIA', MIN: 0, MAX: 9},
{ID: 'IMD', MIN: 0, MAX: 9},
{ID: 'PRI', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 0, MAX: 9},
{ID: 'QTY', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'STS', MIN: 0, MAX: 9},
]},
{ID: 'CCI', MIN: 0, MAX: 99, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 99},
{ID: 'DTM', MIN: 0, MAX: 9},
]},
]},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 99},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
2c0bdac1bb4f9a3ff2e6e7d54b86516a03ea5aa3 | d8ffc7077b962cef053def9e1bf391b000dd133f | /Linear/input_data.py | 8cd7edb1f9c401a36b05605c7a011c168e0c391c | [] | no_license | nrgdoublex/Statistical-Machine-Learning-Project | 85a5c907bcb32eea27a8f2dc320e57cf938f89b1 | 3523e5342894a2df7049eddea798133ec81ff78a | refs/heads/master | 2021-01-12T17:07:56.031580 | 2017-08-16T17:58:20 | 2017-08-16T17:58:20 | 71,514,392 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | import numpy as np
import matrix_operation as mo
test = False
def read_trainingdata_MS(feature_dim):
training = open("training_data.txt")
training_matrix = []
lines = training.readlines()
for line in lines:
list = line.rstrip('\n').split(',')
list = map(float,list)
training_matrix.append(list)
x = np.array(mo.col_submatrix(training_matrix,0,feature_dim))
y = np.array(mo.getcolumnvector(training_matrix,feature_dim))
if test == False:
x_min = np.min(x, axis=0)
x_max = np.max(x, axis=0)
x = (x - x_min) / (x_max - x_min)
y_min = np.min(y)
y_max = np.max(y)
y = (y - y_min) / (y_max - y_min)
else:
x_min = 0
x_max = 0
y_min = 0
y_max = 0
return x,y,x_min,x_max,y_min,y_max
def read_testingdata_MS(feature_dim,x_min,x_max,y_min,y_max):
testing = open("testing_data.txt")
testing_matrix = []
lines = testing.readlines()
for line in lines:
list = line.rstrip('\n').split(',')
list = map(float,list)
testing_matrix.append(list)
x_test = np.array(mo.col_submatrix(testing_matrix,0,feature_dim))
y_test = np.array(mo.getcolumnvector(testing_matrix,feature_dim))
if test == False:
x_test = (x_test - x_min) / (x_max - x_min)
y_test = (y_test - y_min) / (y_max - y_min)
return x_test,y_test | [
"nrgdoublex@gmail.com"
] | nrgdoublex@gmail.com |
cf8be04dc2453ed60a1c1ed76234c4441a6026e2 | a4ec840eae9ba57a93f3a884186578c2dd29bb68 | /funcs_test_harrismatch.py | 54cf2b6bee7107bff09760f7d81be8a7169c288f | [] | no_license | ian4hu/jump-assistant | e7bbc3b29e43e113e63643dd6397ec363338904f | ca5bdbd561677e0e705d37f818212b488692af7f | refs/heads/master | 2021-09-02T10:40:35.343522 | 2018-01-01T23:52:03 | 2018-01-01T23:52:03 | 115,862,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | from pylab import *
from numpy import *
from PIL import Image
import funcs as harris
from PCV.tools.imtools import imresize
"""
This is the Harris point matching example in Figure 2-2.
"""
im1 = array(Image.open("./a.png").convert("L"))
im2 = array(Image.open("./a1.png").convert("L"))
# resize to make matching faster
im1 = imresize(im1,(im1.shape[1]/2,im1.shape[0]/2))
im2 = imresize(im2,(im2.shape[1]/2,im2.shape[0]/2))
wid = 5
harrisim = harris.compute_harris_response(im1,5)
filtered_coords1 = harris.get_harris_points(harrisim,wid+1)
d1 = harris.get_descriptors(im1,filtered_coords1,wid)
harrisim = harris.compute_harris_response(im2,5)
filtered_coords2 = harris.get_harris_points(harrisim,wid+1)
d2 = harris.get_descriptors(im2,filtered_coords2,wid)
print 'starting matching'
matches = harris.match_twosided(d1,d2)
figure()
gray()
harris.plot_matches(im1,im2,filtered_coords1,filtered_coords2,matches)
show() | [
"jixuhu@ctrip.com"
] | jixuhu@ctrip.com |
de91d6fc4ed104ef9cb0733fa0725e3637e56a7e | 88e60d4c463355775a2f1d5959edf91965ded6c2 | /mysqrt.py | 015fe07630af1a689186297a384a94d1f704fe3e | [] | no_license | 924235317/leetcode | 5a6a35c827cec301f6a9829405b9f4ee148acdad | d6b887b1f2fd4713f60e732712f989eeffa9938b | refs/heads/master | 2020-07-27T19:46:41.699959 | 2020-05-26T03:09:44 | 2020-05-26T03:09:44 | 109,564,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py |
def mySqrt(x: int) -> int:
if x == 0:
return 0
if x == 1:
return 1
left = 0
right = x // 2
mid = (left + right + 1) // 2
while left < right:
mid = (right + left + 1) // 2
if mid**2 > x:
right = mid - 1
elif mid**2 < x:
left = mid
else:
return int(mid)
return int(left)
if __name__ == "__main__":
print(mySqrt(10))
| [
"liuao03@baidu.com"
] | liuao03@baidu.com |
63932968068bc9e0d03dad37b567321d1d050ad4 | c13878ed7dbace40080028bd537d70898cfd303e | /format_data.py | 3700e0f93b4e52d645a0deeb08c0e71804246023 | [
"MIT"
] | permissive | 7568/ORVP | f1b3950375547a42cb0fc8c97c4f994f282a4a31 | 558943bdf416c5a5c21047918928cca58c2c6134 | refs/heads/main | 2023-08-23T04:18:03.380638 | 2021-10-02T08:25:42 | 2021-10-02T08:25:42 | 403,240,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,127 | py | # -*- coding: UTF-8 -*-
"""
Created by louis at 2021/9/13
Description:
"""
import os
import gc
import glob
import torch
from pandas import DataFrame
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pandas as pd
import time
from itertools import islice
from torch.utils.data import Dataset, DataLoader
from multiprocessing import Pool
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from torch.utils.tensorboard import SummaryWriter
from tqdm.auto import tqdm
import logging
import resource
import shutil
import os
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
datefmt = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(filename='pytorch-baseline.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s',
datefmt=datefmt, level=logging.DEBUG)
# import tqdm
tqdm.pandas()
import warnings
from multiprocessing import cpu_count
def get_path_dict(f, v):
f_dict = {}
for i in tqdm(v):
fpath = f'{f}/stock_id={i}'
flist = glob.glob(os.path.join(fpath, '*.parquet'))
if len(flist) > 0:
f_dict[i] = flist[0]
return f_dict
# train_idx, valid_idx = train_test_split(train_ds['row_id'], shuffle=True, test_size=0.1, random_state=SEED)
# ds: train.csv里面的数据 f_dict:是 book_train.parquet 里面的数据
def process_optiver_ds(ds, f_dict, skip_cols, t_dict):
x = []
y = []
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)}
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
for stock_id, stock_fnmame in tqdm(f_dict.items()):
trade_train_ = t_dict.get(stock_id)
trade_train_ = pd.read_parquet(trade_train_)
optiver_ds = pd.read_parquet(stock_fnmame)
time_ids = optiver_ds['time_id'].unique()
for time_id in time_ids:
optiver_ds_ = optiver_ds[optiver_ds['time_id'] == time_id]
optiver_ds_ = pd.merge(full_seconds_in_bucket, optiver_ds_, how='left', on='seconds_in_bucket')
optiver_ds_ = pd.merge(optiver_ds_, trade_train_[trade_train_['time_id'] == time_id], how='left',
on='seconds_in_bucket')
# optiver_ds_.drop(skip_cols)
optiver_ds_.drop(['time_id_x', 'time_id_y'], axis=1)
optiver_ds_ = np.nan_to_num(optiver_ds_)
row_id = str(stock_id) + '-' + time_id.astype(str)
r = ds[ds['row_id'] == row_id]['target']
x.append(optiver_ds_)
y.append(r)
return x, y
def chunks(data, SIZE=10000):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
def process_book_train_chunk(chunk_ds):
return process_optiver_ds(train_ds, chunk_ds, book_skip_columns, trade_train_dict)
def process_book_test_chunk(chunk_ds):
return process_optiver_ds(test_ds, chunk_ds, book_skip_columns, trade_test_dict)
'''
# 将样本分成4块,每块里面有28条数据
book_train_chunks = [i for i in chunks(book_train_dict, int(len(book_train_dict) / NTHREADS))]
# trade_train_chunks = [i for i in chunks(trade_train_dict, int(len(trade_train_dict) / NTHREADS))]
z = 1 if len(book_test_dict) < NTHREADS else NTHREADS
book_test_chunks = [i for i in chunks(book_test_dict, int(len(book_test_dict) / z))]
# trade_test_chunks = [i for i in chunks(trade_test_dict, int(len(trade_test_dict) / z))]
pool = Pool(NTHREADS) # 创建进程池,最大进程数为 NTHREADS
r = pool.map(process_book_train_chunk, book_train_chunks)
pool.close()
a1, a2 = zip(*r)
pool = Pool(NTHREADS) # 创建进程池,最大进程数为 NTHREADS
r = pool.map(process_book_test_chunk, book_test_chunks)
pool.close()
t_a1, t_a2 = zip(*r)
np_train = a1
np_target = a2'''
# Scaler
# transformers = []
# for i in tqdm(range(np_train.shape[1])):
# a = np.nan_to_num(np_train[train_idx])
# b = np.nan_to_num(np_train[valid_idx])
#
# transformer = StandardScaler() # StandardScaler is very useful!
# np_train[train_idx] = transformer.fit_transform(a)
# np_train[valid_idx] = transformer.transform(b)
# transformers.append(transformer) # Save Scalers for the inference stage
class LSTMModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.1):
super(LSTMModel, self).__init__()
# self.drop = nn.Dropout(dropout)
# self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp + input_features_num, nhid + input_features_num, nlayers, dropout=dropout,
batch_first=True, bidirectional=True)
self.regress_rnn = nn.Linear(2 * nhid + 2 * input_features_num, 1)
self.decoder = nn.Sequential(
nn.Linear(3 * nhid + 2 * input_features_num, nhid + input_features_num),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(nhid + input_features_num, ntoken),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(ntoken, 1),
)
self.self_attention = nn.Sequential(
nn.Linear(3 * nhid + 2 * input_features_num, 10 * (nhid + input_features_num)),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(10 * (nhid + input_features_num), 10 * (nhid + input_features_num)),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(10 * (nhid + input_features_num), 3 * nhid + 2 * input_features_num),
nn.Softmax(dim=1)
)
# self.decoder_1 = nn.Linear(nhid, ntoken)
# self.decoder_2 = nn.Linear(ntoken, 1)
self.conv1d_relu_stack = nn.Sequential(
nn.Conv1d(in_channels=600, out_channels=1200, kernel_size=3),
nn.Dropout(0.1),
nn.ReLU(), # 9
nn.Conv1d(in_channels=1200, out_channels=1200, kernel_size=3),
nn.Dropout(0.2),
nn.ReLU(), # 7
nn.Conv1d(in_channels=1200, out_channels=1200, kernel_size=3),
nn.Dropout(0.2),
nn.ReLU(), # 5
nn.Conv1d(in_channels=1200, out_channels=600, kernel_size=3),
nn.Dropout(0.1),
nn.ReLU(), # 3
nn.Conv1d(in_channels=600, out_channels=nhid, kernel_size=3),
nn.ReLU(), # 1
)
self.regress_conv = nn.Linear(nhid, 1)
self.linear_relu_stack = nn.Sequential(
nn.Linear(input_features_num, ntoken),
nn.Dropout(0.1),
nn.ReLU(),
nn.Linear(ntoken, ninp),
nn.Dropout(0.2),
nn.ReLU(),
nn.Linear(ninp, ninp),
nn.Dropout(0.2),
nn.ReLU(),
)
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input):
# emb = self.drop(self.encoder(input))
cov_logits = self.conv1d_relu_stack(input)
cov_logits = cov_logits.view(cov_logits.shape[0], cov_logits.shape[1])
regress_conv_out = self.regress_conv(cov_logits)
logits = self.linear_relu_stack(input)
logits = torch.cat((logits, input), 2)
# logits = logits.view(1, len(logits), -1)
output, hidden = self.rnn(logits)
output = output[:, -1, :]
regress_rnn_out = self.regress_rnn(output)
new_logits = torch.cat((cov_logits, output), 1)
attention_output = self.self_attention(new_logits)
# output = self.drop(output)
new_logits = torch.mul(new_logits, attention_output)
decoded_out = self.decoder(new_logits)
# decoded_2 = self.decoder_2(decoded_1)
return regress_conv_out, regress_rnn_out, decoded_out
def init_hidden(self, bsz):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
# dataloader = DataLoader(transformed_dataset, batch_size=4,
# shuffle=True, num_workers=0)
def rmspe(y_true, y_pred):
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
def RMSPELoss(y_pred, y_true):
return torch.sqrt(torch.mean(((y_true - y_pred) / y_true) ** 2)).clone()
def do_process(optiver_ds, full_seconds_in_bucket, trade__, time_id):
optiver_ds_ = optiver_ds[optiver_ds['time_id'] == time_id]
if optiver_ds_.size == 0:
return None
optiver_ds_ = pd.merge(full_seconds_in_bucket, optiver_ds_, how='left', on='seconds_in_bucket')
optiver_ds_ = pd.merge(optiver_ds_, trade__[trade__['time_id'] == time_id], how='left',
on='seconds_in_bucket')
# optiver_ds_.drop(skip_cols)
optiver_ds_ = optiver_ds_.drop(['time_id_x', 'time_id_y', 'seconds_in_bucket'], axis=1).fillna(0)
# optiver_ds_ = np.nan_to_num(optiver_ds_)
# TODO 将每一列进行标准化
for i in range(optiver_ds_.shape[1]):
optiver_ds_[lambda df: df.columns[0]]
if np.sum(optiver_ds_[lambda df: df.columns[i]]) != 0 and np.std(optiver_ds_[lambda df: df.columns[i]]) != 0:
a = (optiver_ds_[lambda df: df.columns[i]] - np.mean(optiver_ds_[lambda df: df.columns[i]])) / np.std(optiver_ds_[lambda df: df.columns[i]])
optiver_ds_[lambda df: df.columns[i]] = a
return optiver_ds_
def process_train_bach(arg):
# input_0 = []
# target_0 = []
stock_id = arg['stock_id']
time_id = arg['time_id']
optiver_ds = arg['optiver_ds']
full_seconds_in_bucket = arg['full_seconds_in_bucket']
trade_train_ = arg['trade_train_']
optiver_ds_ = do_process(optiver_ds, full_seconds_in_bucket, trade_train_, time_id)
row_id = str(stock_id) + '-' + time_id.astype(str)
np_target = train_ds[train_ds['row_id'] == row_id]
# 创建的目录
path = f"{DATA_PATH}formated_data/{stock_id}/"
optiver_ds_.to_parquet(f'{path}/{time_id}.parquet')
np_target.to_parquet(f'{path}/{time_id}_target.parquet')
return optiver_ds_, np_target
def process_test_bach(time_id, ARGS):
optiver_ds = ARGS['optiver_ds']
full_seconds_in_bucket = ARGS['full_seconds_in_bucket']
trade_test_ = ARGS['trade_test_']
optiver_ds_ = do_process(optiver_ds, full_seconds_in_bucket, trade_test_, time_id)
return optiver_ds_
def train_bach(epoch):
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)} # seconds_in_bucket最大是600,训练数据中不连续,这里将他们连起来
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
# lstmmodel.zero_grad()
# pool = Pool(30) # 创建进程池,最大进程数为 NTHREADS
for stock_id, stock_fnmame in book_train_dict.items():
print(stock_id)
path = f"{DATA_PATH}formated_data/{stock_id}/"
if not os.path.exists(path):
os.makedirs(path)
trade_train_parquet = trade_train_dict.get(stock_id)
trade_train_ = pd.read_parquet(trade_train_parquet)
book_train = pd.read_parquet(stock_fnmame)
time_ids = book_train['time_id'].unique()
# hidden = lstmmodel.init_hidden(1)
_start = time.time()
param = []
for time_id in time_ids:
ARGS_ = dict(optiver_ds=book_train, full_seconds_in_bucket=full_seconds_in_bucket,
trade_train_=trade_train_, stock_id=stock_id, time_id=time_id)
param.append(ARGS_)
# process_train_bach(ARGS_)
with Pool(8) as p:
p.map(process_train_bach, param)
# input_0, target_0 = zip(*r)
# print(time.time() - _start)
# ARGS = dict(optiver_ds=optiver_ds, full_seconds_in_bucket=full_seconds_in_bucket, trade_train_=trade_train_,
# stock_id=stock_id)
# input_0=[]
# target_0=[]
# for time_id in time_ids:
# input_0_, target_0_ = process_train_bach(time_id, ARGS)
# input_0.append(input_0_)
# target_0.append(target_0_)
# input_0, target_0 = pool.apply(func=process_train_bach, args=(time_ids,optiver_ds,full_seconds_in_bucket,trade_train_,stock_id))
# input_0, target_0 = process_train_bach(time_ids,optiver_ds,full_seconds_in_bucket,trade_train_,stock_id)
print(time.time() - _start)
# logging.debug(f'epoch = {epoch} , stock_id = {stock_id} , time_id = {time_id} , loss = {loss_2.item()}')
# snapshot = tracemalloc.take_snapshot()
# top_stats = snapshot.statistics('lineno')
# print("[ Top 10 ]")
# for stat in top_stats[:10]:
# print(stat)
# logging.debug('========================================================')
# 每一个epoch之后就测试一下验证集
# with torch.no_grad():
# test()
# idx = np.arange(np_train.shape[0])
# train_idx, valid_idx = train_test_split(idx, shuffle=True, test_size=0.1, random_state=SEED)
def start_train():
train_bach(0)
def predict():
lstmmodel = torch.load_state_dict(torch.load('train_out/model_weights_24.pth'))
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)}
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
# lstmmodel.zero_grad()
loss_all = []
# pool = Pool(30) # 创建进程池,最大进程数为 NTHREADS
target = []
for index, row in test_ds.iterrows():
# print(row['stock_id'])
stock_id = row['stock_id']
trade_test_id = book_train_dict.get(stock_id)
trade_test_ = pd.read_parquet(trade_test_id)
optiver_ds = pd.read_parquet(book_test_dict.get(stock_id))
time_id = row['time_id']
ARGS = dict(optiver_ds=optiver_ds, full_seconds_in_bucket=full_seconds_in_bucket, trade_test_=trade_test_,
stock_id=stock_id)
input_0 = process_test_bach(time_id, ARGS)
if input_0 is None:
target.append(0)
continue
input_0 = input_0[None, :, :]
input_1 = torch.tensor(input_0, dtype=torch.float32, requires_grad=True).to(device)
with torch.no_grad():
output_2, _ = lstmmodel(input_1)
target.append(output_2.item())
test_ds['target'] = target
# print(test_ds)
test_ds[['row_id', 'target']].to_csv('submission.csv', index=False)
if __name__ == '__main__':
logging.debug('-------- start -----------')
# print("CPU的核数为:{}".format(cpu_count()))
NTHREADS = cpu_count()
SEED = 42
TRAIN_BATCH_SIZE = 3
TEST_BATCH_SIZE = 256
EPOCH_ACCOUNT = 250
# DATA_PATH = '../input/optiver-realized-volatility-prediction'
DATA_PATH = '/home/data/optiver-realized-volatility-prediction/'
# DATA_PATH = '/home/szu/liyu/data/optiver-realized-volatility-prediction/'
BOOK_TRAIN_PATH = DATA_PATH + 'book_train.parquet'
TRADE_TRAIN_PATH = DATA_PATH + 'trade_train.parquet'
BOOK_TEST_PATH = DATA_PATH + 'book_test.parquet'
TRADE_TEST_PATH = DATA_PATH + 'trade_test.parquet'
CHECKPOINT = './model_checkpoint/model_01'
train_ds = pd.read_csv(os.path.join(DATA_PATH, 'train.csv'))
test_ds = pd.read_csv(os.path.join(DATA_PATH, 'test.csv'))
print(f'Train ds shape: {train_ds.shape}')
print(f'Test ds shape: {test_ds.shape}')
train_ds['row_id'] = train_ds['stock_id'].astype(str) + '-' + train_ds['time_id'].astype(str)
book_train_dict = get_path_dict(BOOK_TRAIN_PATH, train_ds['stock_id'].unique())
trade_train_dict = get_path_dict(TRADE_TRAIN_PATH, train_ds['stock_id'].unique())
book_test_dict = get_path_dict(BOOK_TEST_PATH, test_ds['stock_id'].unique())
trade_test_dict = get_path_dict(TRADE_TEST_PATH, test_ds['stock_id'].unique())
book_skip_columns = trade_skip_columns = ['time_id', 'row_id', 'target']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
print(device)
input_features_num = 11
# start_train()
print('============= finish =============')
| [
"18508227568@163.com"
] | 18508227568@163.com |
64fb01142f1af659deff75c1e992308d52b79ffa | abb5c57aca959927a8337cdc59df978337108524 | /slovnik.py | 8fc1481c08af330979ecf73e9f92194e12121f0a | [] | no_license | b-mi/python-tests | eb225482831b7863a0380971cca5a6ecd90f7260 | c29c4aed61a32a2507686fbb7ed507e3b7b8fb23 | refs/heads/main | 2023-05-20T11:51:13.095536 | 2021-06-06T15:36:08 | 2021-06-06T15:36:08 | 374,215,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | slovnik = {}
food = {"ham": "yes", "eggs": "yes", "spam": "no"}
print(food)
food["yoghurt"] = "no"
print(food)
for k, v in food.items():
print(f"{k} => {v}");
| [
"site@arxa.eu"
] | site@arxa.eu |
a49dd69ff0cdd8b1de22a754b9217b4cc52d5f80 | deaccb91a53ce50cde2805883e89c60fc2e3de58 | /modified/neutronserver/neutron/services/instantvpn/driver/instantvpn_smtpdriver.py | 38cca24e6f59af7226ec8e334332dc18b16ed494 | [] | no_license | VikramChoudhary/instantvpn | 11353641db0d5dbcfdbfe831c295c6100d4b2ddf | 3d070fe4768fa8cdc2376b14c81e498093c92455 | refs/heads/master | 2021-01-10T14:34:03.744474 | 2015-06-01T07:06:03 | 2015-06-01T07:06:03 | 36,642,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Somebody
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
import itertools
import smtplib
def send_mail(subject, text):
msg = MIMEMultipart()
msg['From'] = "vikschw@gmail.com"
msg['To'] = ",".join("vikram.choudhary@huawei.com")
msg['Subject'] = subject
msg.attach(MIMEText(text))
mailServer = smtplib.SMTP("smtp.gmail.com",
587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
# TODO: Enable lesssecure option @
# https://www.google.com/settings/security/lesssecureapps
# Otherwise you will face authentication issues
mailServer.login("vikschw@gmail.com",
"xxxxxxxx")
mailServer.sendmail("vikschw@gmail.com",
"vikram.choudhary@huawei.com",
msg.as_string())
mailServer.close()
def _prepare_message(data):
def _build_line(key, value):
return "%s: %s\n" % (key, value)
message_lines = itertools.imap(_build_line,
data.keys(),
data.values())
return "".join(message_lines)
def notify_instantvpn_create(instvpn_data):
subject = "[INSTVPN] Create instantvpn request:%s" % instvpn_data['name']
send_mail(subject, _prepare_message(instvpn_data))
def notify_instantvpn_delete(instvpn_data):
subject = "[INSTVPN] Delete instantvpn request:%s" % instvpn_data['name']
message = "Request coming from tenant:%s" % instvpn_data['tenant_id']
send_mail(subject, message)
| [
"vikram.choudhary@huawei.com"
] | vikram.choudhary@huawei.com |
b455760f7be5f2c85f2e420ebba2c429ac85fc71 | cf22dbba3dd94d761d3e8269c49e14c2b5b72635 | /Python1/newLine.py | 4854f3b48bdb9e668cc760b5397a5df7df167760 | [] | no_license | derdelush/project1 | 7647c15a64d74cdc06707b86d05e6b78579c544b | b768f9cb66c7eaf7652be733c9106e8fbe01ffcc | refs/heads/master | 2022-09-11T17:48:44.108552 | 2020-06-01T16:22:40 | 2020-06-01T16:22:40 | 261,424,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | equation = "2 + 2 ="
answer = 4
print(equation, answer)
output = equation + " " + str(answer)
print(output)
line1 = "These data types, \nare really fundamental\n when it comes to writing\n useful programs!"
print(line1)
| [
"noreply@github.com"
] | noreply@github.com |
9b6263e9ba80bbfbc22bd0697bf35aaebf5fef39 | 2cdde02ee35e7e9eb23a3d832263ecb6274e8d85 | /Solutins/perceptron.py | bab6ac6ad32f19fd7e4605d9bc95042ad5b422ca | [] | no_license | akash72/LeetCode-Practice | fee69aef986572ab842f03275d6f905a6089a396 | 0278d6446305b300d3aba36c0fc8444bcb024154 | refs/heads/master | 2020-05-09T20:33:23.687919 | 2019-04-15T04:29:40 | 2019-04-15T04:29:40 | 181,411,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,582 | py | import numpy as np
from numpy import linalg
def linear_kernel(x1, x2):
return np.dot(x1, x2)
def polynomial_kernel(x, y, p=3):
return (1 + np.dot(x, y)) ** p
def gaussian_kernel(x, y, sigma=5.0):
return np.exp(-linalg.norm(x-y)**2 / (2 * (sigma ** 2)))
class Perceptron(object):
def __init__(self, T=1):
self.T = T
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.T):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
class KernelPerceptron(object):
def __init__(self, kernel=linear_kernel, T=1):
self.kernel = kernel
self.T = T
def fit(self, X, y):
n_samples, n_features = X.shape
#np.hstack((X, np.ones((n_samples, 1))))
self.alpha = np.zeros(n_samples, dtype=np.float64)
# Gram matrix
K = np.zeros((n_samples, n_samples))
for i in range(n_samples):
for j in range(n_samples):
K[i,j] = self.kernel(X[i], X[j])
for t in range(self.T):
for i in range(n_samples):
if np.sign(np.sum(K[:,i] * self.alpha * y)) != y[i]:
self.alpha[i] += 1.0
# Support vectors
sv = self.alpha > 1e-5
ind = np.arange(len(self.alpha))[sv]
self.alpha = self.alpha[sv]
self.sv = X[sv]
self.sv_y = y[sv]
print ("%d support vectors out of %d points" % (len(self.alpha),n_samples))
def project(self, X):
y_predict = np.zeros(len(X))
for i in range(len(X)):
s = 0
for a, sv_y, sv in zip(self.alpha, self.sv_y, self.sv):
s += a * sv_y * self.kernel(X[i], sv)
y_predict[i] = s
print(y_predict)
return y_predict
def predict(self, X):
X = np.atleast_2d(X)
n_samples, n_features = X.shape
#np.hstack((X, np.ones((n_samples, 1))))
return np.sign(self.project(X))
if __name__ == "__main__":
import pylab as pl
def gen_lin_separable_data():
# generate training data in the 2-d case
mean1 = np.array([0, 2])
mean2 = np.array([2, 0])
cov = np.array([[0.8, 0.6], [0.6, 0.8]])
X1 = np.random.multivariate_normal(mean1, cov, 100)
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 100)
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
def gen_non_lin_separable_data():
mean1 = [-1, 2]
mean2 = [1, -1]
mean3 = [4, -4]
mean4 = [-4, 4]
cov = [[1.0,0.8], [0.8, 1.0]]
X1 = np.random.multivariate_normal(mean1, cov, 50)
X1 = np.vstack((X1, np.random.multivariate_normal(mean3, cov, 50)))
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 50)
X2 = np.vstack((X2, np.random.multivariate_normal(mean4, cov, 50)))
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
def gen_lin_separable_overlap_data():
# generate training data in the 2-d case
mean1 = np.array([0, 2])
mean2 = np.array([2, 0])
cov = np.array([[1.5, 1.0], [1.0, 1.5]])
X1 = np.random.multivariate_normal(mean1, cov, 100)
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 100)
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
def split_train(X1, y1, X2, y2):
X1_train = X1[:90]
y1_train = y1[:90]
X2_train = X2[:90]
y2_train = y2[:90]
X_train = np.vstack((X1_train, X2_train))
y_train = np.hstack((y1_train, y2_train))
return X_train, y_train
def split_test(X1, y1, X2, y2):
X1_test = X1[90:]
y1_test = y1[90:]
X2_test = X2[90:]
y2_test = y2[90:]
X_test = np.vstack((X1_test, X2_test))
y_test = np.hstack((y1_test, y2_test))
return X_test, y_test
def plot_margin(X1_train, X2_train, clf):
def f(x, w, b, c=0):
# given x, return y such that [x,y] in on the line
# w.x + b = c
return (-w[0] * x - b + c) / w[1]
pl.plot(X1_train[:,0], X1_train[:,1], "ro")
pl.plot(X2_train[:,0], X2_train[:,1], "bo")
# w.x + b = 0
a0 = -4; a1 = f(a0, clf.w, clf.b)
b0 = 4; b1 = f(b0, clf.w, clf.b)
pl.plot([a0,b0], [a1,b1], "k")
pl.axis("tight")
pl.show()
def plot_contour(X1_train, X2_train, clf):
pl.plot(X1_train[:,0], X1_train[:,1], "ro")
pl.plot(X2_train[:,0], X2_train[:,1], "bo")
pl.scatter(clf.sv[:,0], clf.sv[:,1], s=100, c="g")
X1, X2 = np.meshgrid(np.linspace(-6,6,50), np.linspace(-6,6,50))
X = np.array([[x1, x2] for x1, x2 in zip(np.ravel(X1), np.ravel(X2))])
Z = clf.project(X).reshape(X1.shape)
pl.contour(X1, X2, Z, [0.0], colors='k', linewidths=1, origin='lower')
pl.axis("tight")
pl.show()
def test_linear():
X1, y1, X2, y2 = gen_lin_separable_data()
#X1, y1, X2, y2 = gen_lin_separable_overlap_data()
X_train, y_train = split_train(X1, y1, X2, y2)
X_test, y_test = split_test(X1, y1, X2, y2)
clf = Perceptron(T=3)
clf.fit(X_train, y_train)
y_predict = clf.predict(X_test)
correct = np.sum(y_predict == y_test)
print ("%d out of %d predictions correct" % (correct, len(y_predict)))
plot_margin(X_train[y_train==1], X_train[y_train==-1], clf)
def test_kernel():
X1, y1, X2, y2 = gen_non_lin_separable_data()
X_train, y_train = split_train(X1, y1, X2, y2)
X_test, y_test = split_test(X1, y1, X2, y2)
clf = KernelPerceptron(gaussian_kernel, T=20)
clf.fit(X_train, y_train)
y_predict = clf.predict(X_test)
correct = np.sum(y_predict == y_test)
print ("%d out of %d predictions correct" % (correct, len(y_predict)))
plot_contour(X_train[y_train==1], X_train[y_train==-1], clf)
test_linear()
| [
"noreply@github.com"
] | noreply@github.com |
6a3ed7899e201a6c2834904751cb35c888707708 | 5da064a2d8566074444c29adf9404532c3a88e9a | /Assignment1-ETL/bobhe/utils/convert_date_time_format.py | 498edbd7ce0339df4b3d8b3f8c6d076ce16fa9e1 | [] | permissive | BinYuOnCa/Algo-ETL | 92e28c3dada05d419d891069ffda2ed5f1d52438 | 952ab6f3a5fa26121a940fe91ea7eb909c6dea54 | refs/heads/main | 2023-04-03T21:49:59.710323 | 2021-04-14T17:20:47 | 2021-04-14T17:20:47 | 330,030,143 | 1 | 37 | MIT | 2021-04-14T17:20:48 | 2021-01-15T21:51:46 | Jupyter Notebook | UTF-8 | Python | false | false | 418 | py | from datetime import datetime
from datetime import date
def convert_unix_date_format(dt):
"""
conver YYYY-MM-DD to unix datetime fromat
:param dt: (str) YYYY-MM-DD
:return: (int) unix datetime format
"""
return int(datetime.strptime(dt, "%Y-%m-%d").timestamp())
def get_today_date():
"""
get today date
:param:
:return: (str) YYYY-MM-DD
"""
return str(date.today())
| [
"bobgxp@gmail.com"
] | bobgxp@gmail.com |
a27fe1917e7622ce4fd80485d888d7e0e79c03a4 | 7bc3cb63ef0f6046b22a736c206ae61bbacb68c4 | /src/timesheet/forms.py | 13376c90f68a1bf6c0c09d97d4aabb5f0f3e5578 | [] | no_license | jwoods02/Mosaic-Island-Timesheet-System | f3b5ec7bdf20cc08e1616436e75771dd6f1d14cf | 7e9aa9436151aac3e85d210dbdd1c425971e00ac | refs/heads/master | 2021-03-27T10:57:03.558394 | 2016-01-28T09:47:23 | 2016-01-28T09:47:23 | 48,131,068 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | from django import forms
from .models import RowControl, Entry, Department, Activity
class RowControlForm(forms.ModelForm):
class Meta:
model = RowControl
fields = ['month_control_record', 'department', 'activity', 'notes']
class EntryForm(forms.ModelForm):
class Meta:
model = Entry
fields = ['row_control', 'date', 'hours']
def clean(self):
cleaned_data = self.cleaned_data
row_control = cleaned_data['row_control']
date = cleaned_data['date']
hours = cleaned_data['hours']
if not self.instance:
if Entry.objects.get(row_control=row_control, date=date):
raise forms.ValidationError("Entry for this day and row already exists.")
if hours > 24:
raise forms.ValidationError("Cannot work more than 24 hours in a day.")
if hours <= 0:
raise forms.ValidationError("Hours field must be more than 0.")
# Always return cleaned data
return cleaned_data
| [
"james@jwoods.me"
] | james@jwoods.me |
95a097ec8b8543de52dbe26616b63804d1678bd2 | a7975a2fa9f4af97be6a1ecbf2e6ba16f6691088 | /node-python-roulette-master/old/list_resourcegroups.py | 0bc1c7254f19998672528ba08d44f75c0d450707 | [] | no_license | MohamedJuned/aws | 855afd10605964de854cbfce8311690cc5a95856 | 0a107fd0fc41bc7fc09eedfc4f19f1f4ecef76af | refs/heads/master | 2020-04-29T11:22:04.962760 | 2019-03-17T12:34:52 | 2019-03-17T12:34:52 | 176,095,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | import os
import traceback
import sys
import psycopg2 #postgres library. command to install pip install psycopg2
import json
from collections import defaultdict
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute import ComputeManagementClient
#from azure.mgmt.compute.models import DiskCreateOption
from haikunator import Haikunator
haikunator = Haikunator()
client_id = ''
secret = ''
tenant = ''
i=0
confs=json.load(open("config.json"))
entry=confs["data"]
for d in entry:
database=d["database"]
user=d["user"]
password=d["password"]
host=d["host"]
port=d["port"]
conn = psycopg2.connect(database=database, user=user, password=password, host=host, port=port)
cur = conn.cursor()
#Database table extraction and copy the content.
cur.execute("SELECT subscription_id FROM subscriptions")
rows = cur.fetchall()
for j in rows:
cur.execute("SELECT client_id,secret,tenant FROM subscriptions WHERE subscription_id='%s' " %j)
rows = cur.fetchall()
for row in rows:
c = row[i]
s = row[i+1]
t = row[i+2]
credentials = ServicePrincipalCredentials(
client_id = c,
secret = s,
tenant = t
)
subscription_id=''.join(j)
resource_client = ResourceManagementClient(credentials, subscription_id)
compute_client = ComputeManagementClient(credentials, subscription_id)
network_client = NetworkManagementClient(credentials, subscription_id)
###########
# Prepare #
###########
i=0
arr=[]
fin=[]
for item in resource_client.resource_groups.list():
if item is not None:
print item
# data=item.encode('ascii')
# data=json.dumps(item.__dict__)
# val=json.loads(data)
# s = json.dumps(val, indent=4, sort_keys=True)
# print data
# arr.append(i)
# i=i+1
# # item.name= item.name.encode('ascii')
# name={"name":item.name.encode('ascii')}
# loc={"location":item.location.encode('ascii')}
# # print name
# cp=name.copy()
# # print cp
# cp.update(loc)
# # print cp
# fin.append(str(cp))
# else:
# # print "nodata"
# lst=tuple(fin)
# dic=zip(arr,lst)
# dat=dict(dic)
# convert =json.dumps(dat, indent=4, sort_keys=False)
# convert=convert.replace("'",'"')
# # convert =convert.dumps(dat, indent=4, sort_keys=False)
# print convert
| [
"md.junedulla@gmail.com"
] | md.junedulla@gmail.com |
6f5980258752082c35aaff63112e57d84ac32d19 | 21fec19cb8f74885cf8b59e7b07d1cd659735f6c | /chapter_13/getone-urllib.py | 879783dfb46bea3276181cea113fd47ade1bf7c0 | [
"MIT"
] | permissive | bimri/programming_python | ec77e875b9393179fdfb6cbc792b3babbdf7efbe | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | refs/heads/master | 2023-09-02T12:21:11.898011 | 2021-10-26T22:32:34 | 2021-10-26T22:32:34 | 394,783,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | "Using urllib to Download Files"
'''
Python urllib.request module: given an Internet address
string—a URL, or Universal Resource Locator—this module opens a connection
to the specified server and returns a file-like object ready to be read with normal file
object method calls (e.g., read, readline).
We can use such a higher-level interface to download anything with an address on the
Web—files published by FTP sites (using URLs that start with ftp://); web pages and
output of scripts that live on remote servers (using http:// URLs); and even local files
(using file:// URLs).
'''
#!/usr/local/bin/python
"""
A Python script to download a file by FTP by its URL string; use higher-level
urllib instead of ftplib to fetch file; urllib supports FTP, HTTP, client-side
HTTPS, and local files, and handles proxies, redirects, cookies, and more;
urllib also allows downloads of html pages, images, text, etc.; see also
Python html/xml parsers for web pages fetched by urllib in Chapter 19;
"""
import os, getpass
from urllib.request import urlopen # socket-based web tools
filename = 'monkeys.jpg' # remote/local filename
password = getpass.getpass('Pswd?')
remoteaddr = 'ftp://lutz:%s@ftp.rmi.net/%s;type=i' % (password, filename)
print('Downloading', remoteaddr)
# this works too:
# urllib.request.urlretrieve(remoteaddr, filename)
remotefile = urlopen(remoteaddr) # return input file-like object
localfile = open(filename, 'wb') # where to store data locally
localfile.write(remotefile.read())
localfile.close()
remotefile.close()
'''
Technically speaking, urllib.request supports a variety of Internet protocols (HTTP,
FTP, and local files). Unlike ftplib, urllib.request is generally used for reading remote
objects, not for writing or uploading them (though the HTTP and FTP protocols support
file uploads too). As with ftplib, retrievals must generally be run in threads if
blocking is a concern.
'''
| [
"bimri@outlook.com"
] | bimri@outlook.com |
0bf62614546ffaaceb1eccb62147ec59466e6958 | 86970d48696260e5e4781fd32df3fb8b9d7586fc | /app/resources/stats.py | 91e54d478916493a2cc55c097b297b6e223a87c1 | [
"Apache-2.0"
] | permissive | pieterlukasse/rest_api | ee867f001aaf162559c2800ab8092c855ee1af09 | 2a25de7ade58b77a79ccd5fd690987624e5d9fc6 | refs/heads/master | 2020-12-29T04:20:23.904522 | 2018-02-20T12:48:23 | 2018-02-20T12:48:23 | 123,285,941 | 0 | 0 | null | 2018-02-28T13:04:09 | 2018-02-28T13:04:09 | null | UTF-8 | Python | false | false | 661 | py |
from app.common.auth import is_authenticated
from app.common.rate_limit import rate_limit
from app.common.response_templates import CTTVResponse
from flask import current_app
from flask.ext import restful
from flask.ext.restful import abort
import time
__author__ = 'andreap'
class Stats(restful.Resource):
@is_authenticated
@rate_limit
def get(self):
'''
get counts and statistics fro the availabkle data
'''
start_time = time.time()
es = current_app.extensions['esquery']
res = es.get_stats()
return CTTVResponse.OK(res,
took=time.time() - start_time)
| [
"andreap@ebi.ac.uk"
] | andreap@ebi.ac.uk |
0f6536464144f51c0061d20f423decaaf971937d | b3d2a42cc62b1a7b62d37a010974a4e3ecd3239f | /agents/actor.py | 269b41c44f49ea315cfb756379043546ba08e8a2 | [] | no_license | op317q/RL-Quadcopter-2 | 7e3dedf6737283b9f645313d54f891beb50a6e20 | 2e3783c3d02b5d46ddffaeb5ca2b7be001ffab39 | refs/heads/master | 2022-07-11T10:35:59.663026 | 2019-10-31T00:56:57 | 2019-10-31T00:56:57 | 218,449,302 | 0 | 0 | null | 2022-06-21T23:14:41 | 2019-10-30T05:22:53 | Jupyter Notebook | UTF-8 | Python | false | false | 2,248 | py | from keras import layers, models, optimizers, initializers, regularizers
from keras import backend as K
class Actor:
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, action_low, action_high):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
action_low (array): Min value of each action dimension
action_high (array): Max value of each action dimension
"""
self.state_size = state_size
self.action_size = action_size
self.action_low = action_low
self.action_high = action_high
self.action_range = self.action_high - self.action_low
# Initialize any other variables here
self.build_model()
def build_model(self):
"""Build an actor (policy) network that maps states -> actions."""
# Define input layer (states)
states = layers.Input(shape=(self.state_size,), name='states')
# Add hidden layers
net = layers.Dense(units=32, activation='relu')(states)
net = layers.Dense(units=400, activation='relu')(net)
net = layers.Dense(units=300, activation='relu')(net)
raw_actions = layers.Dense(units=self.action_size, activation='tanh',
name='raw_actions')(net)
# Scale [0, 1] output for each action dimension to proper range
actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low,
name='actions')(raw_actions)
# Create Keras model
self.model = models.Model(inputs=states, outputs=actions)
# Define loss function using action value (Q value) gradients
action_gradients = layers.Input(shape=(self.action_size,))
loss = K.mean(-action_gradients * actions)
# Define optimizer and training function
optimizer = optimizers.Adam(lr=0.0001)
updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
self.train_fn = K.function(
inputs=[self.model.input, action_gradients, K.learning_phase()],
outputs=[],
updates=updates_op)
| [
"op317q@att.com"
] | op317q@att.com |
d8ebc2e90f08ce24e537c39ea79a7c75a1287255 | 74f2e6326095d747673f6098513c0fa1d0161fa9 | /na_galimberti/__manifest__.py | 03b9afb0b028d886853c51edaecc8eb943262683 | [] | no_license | Galimberti/odoo12 | 1f326f7e3a0b2b6785e0208c1ba1a7ad40212e63 | 846f1015f35f601747d885881e74c2bcaa692cbe | refs/heads/master | 2020-05-20T11:46:17.628836 | 2019-05-08T09:38:44 | 2019-05-08T09:38:44 | 185,557,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | # -*- encoding: utf-8 -*-
{
'name': 'NexApp Galimberti',
'version': '1',
'depends': [
'account',
'base_geolocalize',
'contacts',
'crm',
'mail',
'purchase',
'sale_management',
'sale_crm',
'stock',
'sale_stock',
'web_google_maps',
'web_notify',
],
'author': "Manuel Pagani",
'website': 'http://www.nexapp.it',
'category': 'NexApp',
'sequence': 1,
'data': [
'data/zap_parameters.xml',
'security/ir.model.access.csv',
'views/category.xml',
'views/crm.xml',
'views/email.xml',
# 'views/pacchi.xml',
'views/product.xml',
'views/purchase.xml',
'views/res_partner.xml',
'views/sale.xml',
'views/scripts.xml',
'views/sequence.xml',
'views/stock.xml',
'views/tendina.xml',
'views/value_temp.xml',
],
'installable': True,
'application': True,
} | [
"mpagani@nexapp.it"
] | mpagani@nexapp.it |
3d48d25fcba7435e461ef9bfa788273f1d6a4285 | 2a5e289bda0ca06bee73ed89c629654466bbe37f | /common/lockfile.py | 5fa9d7fef5c55f998ce894d9c319e9470ff2ed35 | [] | no_license | eocampo2000/factelec | b7121e52fcfb592475cfe078ba59eb0ac4069424 | 6773a6f5822efec409f21d8150362c240e0e15dc | refs/heads/master | 2020-07-03T11:56:34.950423 | 2016-11-18T23:19:00 | 2016-11-18T23:19:00 | 73,839,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,051 | py | '''
Created on Jun 27, 2012
@author: eocampo
New Style class
'''
__version__ = '20120923'
# flock.py
import os
import sys
import utils.fileutils as fu
import utils.strutils as su
import proc.process as ps
class LockFile(object):
'''Class to handle creating and removing (pid) lockfiles'''
def __init__(self, lckFn, log, pid = os.getpid()):
self.pid = pid
self.lckFn = lckFn
self.log = log
self.lock = False
self.log.debug('Initializing: %s for pid %s' % (self.lckFn,self.pid))
# Try to get a lock. Returns True if lock is acquired, otherwise false.
def getLock(self):
# Check if a valid process lock exists !
rc = self._chkIfValidLock()
if rc is True:
self.log.warn('Cannot Create %s. Process is currently running!!' % self.lckFn)
return False
rc = fu.createFile(self.lckFn,str(self.pid))
if rc == 0 :
self.log.info('Created lock %s for PID = %s' % (self.lckFn,self.pid))
self.lock = True
return True
else :
self.log.error('Could not create Lock %s for PID = %s' % (self.lckFn,self.pid))
return False
# This method checks if a lock is valid.
def _chkIfValidLock(self):
rc = True
# Check if there is a lock file.
if fu.fileExists(self.lckFn):
sPid = fu.readFile(self.lckFn)
pid = su.toInt(sPid)
self.log.info('Lock File %s EXISTS. sPid = %s , pid = %s' % (self.lckFn,sPid,pid))
# Check if file has a valid PID (number)
if pid is None :
rc = fu.delFile(self.lckFn)
if rc == 0 :
self.log.info('Removed File %s' % (self.lckFn))
else:
self.log.error('Could not removed File %s' % (self.lckFn))
return False
# If pid is a valid number, check if the process is running ...
rc = ps.isProcRunning(pid,self.log)
self.log.debug('isProcRunning returned %s' % (rc))
return rc
# No lock file exists.
else: return False
# Release the lock file
def relLock(self):
if self.lock:
try:
fu.delFile(self.lckFn)
self.log.info('Released lock: %s for pid %s' % (self.lckFn,self.pid))
return 0
except:
self.log.error("==EXCEP %s %s" % (sys.exc_type,sys.exc_value))
return 1
else:
self.log.warn('No lock to release for: %s for pid %s' % (self.lckFn,self.pid))
return 0
# Destructor
def __del__(self):
# Do not remove an existing valid lock file !
rc=self.relLock()
self.log.debug('Automatic Lock Cleanup for: %s for pid %s rc = %s' % (self.lckFn,self.pid,rc))
| [
"eocampo2000@hotmail.com"
] | eocampo2000@hotmail.com |
c531e8963a8bdd1fd5685361f3d120b112d7931c | f0acc407f95b758fa734f5ed5f6506a8b20d2706 | /docs_src/parameter_types/bool/tutorial004_an.py | 1cb42fcc86f69fbffbf6fb0cd4576c958c05ba79 | [
"MIT"
] | permissive | shnups/typer | ede6d86c5b169e8caa7823b0552f8531ed041f84 | e0b207f3f577cb2e59fdd60da39686a2f5ed0e77 | refs/heads/master | 2023-08-31T01:54:21.168547 | 2023-08-01T09:36:09 | 2023-08-01T09:36:09 | 313,047,732 | 0 | 0 | MIT | 2020-11-15T14:22:06 | 2020-11-15T14:22:05 | null | UTF-8 | Python | false | false | 276 | py | import typer
from typing_extensions import Annotated
def main(in_prod: Annotated[bool, typer.Option(" /--demo", " /-d")] = True):
if in_prod:
print("Running in production")
else:
print("Running demo")
if __name__ == "__main__":
typer.run(main)
| [
"noreply@github.com"
] | noreply@github.com |
2abbedbe62a9a485cb164c84c4b9060e5596384d | 9d0b3029a2c10683e6c7bda94887154857bfe634 | /Beginner/URI_1959.py | fc2d39f56b9fa4201baedf6f921b534d62bcecec | [
"MIT"
] | permissive | rbshadow/Python_URI | 559b25a559fbe955c0e1fe6bdc1c39e30f5c18a9 | 4f7df8cdea0eba5c550bb3016b1a7ab6dc723d56 | refs/heads/master | 2020-02-26T15:53:55.367919 | 2018-10-04T00:43:31 | 2018-10-04T00:43:31 | 70,911,515 | 3 | 0 | MIT | 2018-02-15T17:35:56 | 2016-10-14T13:13:13 | Python | UTF-8 | Python | false | false | 122 | py | def math():
n, L = map(int, input().split())
res = (n * L)
print(res)
if __name__ == '__main__':
math()
| [
"iqbal2053@diu.edu.bd"
] | iqbal2053@diu.edu.bd |
8c549578be2ffe70b1219f0eaee724f0e01297a6 | 55b877071f1dc660dae32bcb7eee6933327318f4 | /match/urls.py | b736c62099924e6a1059ad28704c98e57b9dd878 | [] | no_license | dnd-hack/backend | 61a18c3f35e8b447b6dfe65e1611b513250a7926 | 0a908b125141a217217dfe160a1367e30aed2c76 | refs/heads/master | 2023-09-02T18:26:41.110359 | 2021-11-20T07:25:36 | 2021-11-20T07:25:36 | 429,919,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
from .views import *
router = DefaultRouter()
router.register('group', views.GroupViewSet)
urlpatterns = [
path('', include(router.urls)),
path('joined_member/', JoinMember.as_view()),
path('match_list/', MatchList.as_view()),
path('filter_match/', FilterMatch.as_view()),
] | [
"julysein723@gmail.com"
] | julysein723@gmail.com |
4430c8ca579d7e8d75862ff789f16ec07bb3e5ff | d3f708811f4a3a84b6241ec388ebe0bb9f8583cd | /Lab2.4/apicem.py | 52519bf95d022ffc154109fd909b7b8171499d6c | [] | no_license | mk-99/p4ne | 4938b2dfab2916936dd318d54d1e0ebec4e2fb56 | 16990b36248a26e760f66421e2748e5a520ce412 | refs/heads/master | 2022-09-03T21:14:56.123319 | 2022-08-04T16:46:17 | 2022-08-04T16:46:17 | 66,036,817 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | import requests, json, pprint
from flask import Flask
from flask import render_template, jsonify
# controller = "devnetapi.cisco.com/sandbox/apic_em"
controller = "sandboxapicem.cisco.com"
def new_ticket():
url = 'https://' + controller + '/api/v1/ticket'
payload = {"username": "devnetuser",
"password": "Cisco123!"
}
header = {"content-type": "application/json"}
response = requests.post(url, data=json.dumps(payload),
headers=header, verify=False)
return response.json()['response']['serviceTicket']
def get_hosts(ticket):
url = "https://" + controller + "/api/v1/host"
header = {"content-type": "application/json",
"X-Auth-Token":ticket
}
response = requests.get(url, headers=header, verify=False)
return response.json()
def get_devices(ticket):
url = "https://" + controller + "/api/v1/network-device"
header = {"content-type": "application/json",
"X-Auth-Token": ticket
}
response = requests.get(url, headers=header, verify=False)
return response.json()
def get_topo(ticket):
url = "https://" + controller + "/api/v1/topology/physical-topology"
header = {"content-type": "application/json",
"X-Auth-Token": ticket
}
response = requests.get(url, headers=header, verify=False)
return response.json()
app = Flask(__name__)
@app.route("/")
def index():
return render_template("topology.html")
@app.route("/api/topology")
def topology():
ticket = new_ticket()
return jsonify(get_topo(ticket)['response'])
if __name__ == '__main__':
ticket = new_ticket()
print("Hosts = ")
pprint.pprint(get_hosts(ticket))
print("Devices = ")
pprint.pprint(get_devices(ticket))
print("Topology = ")
pprint.pprint(get_topo(ticket))
app.run(debug=True)
| [
"mklochkov@gmail.com"
] | mklochkov@gmail.com |
1d4682439a3ec9cebb7221e6ed9577f7be10a86c | 41cd61226440c7f0a6fcf77f7e4f65e65c28aaa1 | /wg_auto/a1_inject/sql_injection/intro.py | 2a1bcbc73989354939d03940f43f8d0cb3c7b42d | [] | no_license | xx-zhang/webgoat_auto | 6d99d98148e180b6eacf46c5d2b4de81b552fb1e | 8d47d6af68530940987a272224e9c21f870bf402 | refs/heads/master | 2023-04-03T22:24:54.675321 | 2021-04-16T09:23:30 | 2021-04-16T09:23:30 | 358,497,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | # coding:utf-8
from wg_auto.base import request_wrap
def sql2_test(q="select department from employees where first_name='Bob' and last_name='Franco'"):
__url = '/SqlInjection/attack2'
return request_wrap(method='post', url=__url, data={"query": q})
def sql3_test(q="update employees set department='Sales' where "
"first_name='Tobi' and last_name='Barnett'"):
__url = '/SqlInjection/attack3'
return request_wrap(method='post', url=__url, data={"query": q})
def sql4_test(q='alter table employees add phone varchar(20)'):
__url = '/SqlInjection/attack4'
return request_wrap(method='post', url=__url, data={"query": q})
def sql5_test(q='grant alter table to UnauthorizedUser'):
__url = '/SqlInjection/attack5'
return request_wrap(method='post', url=__url, data={"query": q})
def sql9_test():
__url = "/SqlInjection/assignment5a"
data = {"account": "Smith'", "operator": "or", "injection": "'1'='1"}
return request_wrap(method='post', url=__url, data=data)
def sql10_test():
__url = "/SqlInjection/assignment5b"
data = {"login_count": "1", "userid": "1 or 1=1"}
return request_wrap(method='post', url=__url, data=data)
def sql11_test():
__url = "/SqlInjection/attack8"
data = {"name": "Smith", "auth_tan": "1' or '1'='1"}
return request_wrap(method='post', url=__url, data=data)
def sql12_test():
__url = "/SqlInjection/attack9"
# data = {"name": "Smith", "auth_tan": "3SL99A' or '1'='1"}
data = {"name": "Smith", "auth_tan": "1' or 1=1;update employees set salary = 90000 where last_name = 'Smith';--+"}
return request_wrap(method='post', url=__url, data=data)
def sql13_test():
__url = "/SqlInjection/attack10"
data = {"action_string": "1' or 1=1;drop table access_log;--"}
return request_wrap(method='post', url=__url, data=data)
| [
"you@example.com"
] | you@example.com |
35ea7aee6833455e696b851f5b9719929a46676f | a764f739cb663faca21f84ae0fcffc06c29de491 | /webapp/manage.py | ed1b33804aafe5ed79627d1a35bc2c73c716bb22 | [] | no_license | iViolinSolo/SJ-KnowledgeGraphMS | 8e990c03de699fe96aced6ffd337b371f4e28362 | 60106eb08243bd0420dbd5d5bdfa3c92084c171f | refs/heads/master | 2021-05-31T23:58:50.729343 | 2016-01-30T12:07:23 | 2016-01-30T12:07:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,235 | py | # -*- coding: utf-8 -*-
from flask import Flask, render_template, redirect, url_for, request, flash, session
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from tj.db.util.cayley_util import CayleyUtil
from tj.util.import_util import import_excel_new_version
from forms import AddRelationForm
import json
import threading
import os
import functools
import inspect
from datetime import timedelta
import traceback
from tj.util.file.file_util import create_file_with_time
is_login = str('is_login')
app = Flask(__name__)
app.config['SECRET_KEY'] = 'jiu bu gao su ni'
app.config['UPLOAD_FOLDER'] = './FileUpload'
bootstrap = Bootstrap(app)
manager = Manager(app)
@app.before_request
def make_session_permanent():
'''setting for session expire span, now we set it to 3min
'''
# app.logger.info("@app.before_request invoke, refresh session expire time")
expire_span = 5#5 minutes
session.permanent = True
app.permanent_session_lifetime = timedelta(minutes=expire_span)
def check_is_login(next_url = None):
def check_is_login_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# func_args = inspect.getcallargs(func, *args, **kwargs)
# print func_args
# if func_args.get('username') != 'admin':
# raise Exception('permission denied')
app.logger.debug('check_is_login')
if not session.get(is_login):
flash('Plz login first')
app.logger.error('must login first')
return redirect(url_for('login', next = next_url if next_url else None))
return func(*args, **kwargs)
return wrapper
return check_is_login_decorator
@app.route('/sjkg/login', methods=['GET','POST'])
def login():
if request.method == 'GET':
return render_template('session/login.html')
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
app.logger.info('IMPORT: Attemp login... with username:{} password:{}'.format(username, password))
next_url = request.args.get('next')
app.logger.info('login module: next_url is {}'.format(next_url))
# app.logger.info('root_url is {}\n and script_root is {}\n url_for is {}'.format(request.url_root, request.script_root, url_for('login')))
if not username and not password:
flash('Field can not be blank! Try again.')
return redirect(url_for('login', next = next_url if next_url else None))
elif username == "admin" and password == 'nicai':
session[is_login] = True
flash('Login Success~ Welcome back !')
app.logger.info('login success, redirect to {}'.format(next_url))
# return redirect(url_for('home'))
return redirect(next_url if next_url else url_for('home'))
else:
flash('Wrong username or password! Try again.')
return redirect(url_for('login', next = next_url if next_url else None))
@app.route('/sjkg/logout', methods=['GET'])
def logout():
if session.get(is_login):
flash('Logout Success~')
session.pop(is_login, None)
return redirect(url_for('home'))
@app.route('/sjkg/card')
def card():
# session[is_login] = True
return render_template("card/cardbase.html")
@app.route('/sjkg/control', methods=['GET', 'POST'])
@check_is_login(next_url = '/sjkg/control')
def control():
add_relation_form = AddRelationForm()
cayley_util = CayleyUtil()
if add_relation_form.validate_on_submit():
form_subject = add_relation_form.subject.data.encode("utf-8")
form_predicate = add_relation_form.predicate.data.encode("utf-8")
form_object = add_relation_form.object.data.encode("utf-8")
cayley_util.insert_quads_triple(form_subject, form_predicate, form_object)
return redirect(url_for("control"))
gremlin_query = "g.V().Tag(\"Subject\").Out(null, \"Predicate\").Tag(\"Object\").All()"
gremlin_result = json.loads(cayley_util.query(gremlin_query))
return render_template("control/controlbase.html", triples=gremlin_result["result"],
add_relation_form=add_relation_form, status=request.args.get("status"))
@app.route('/sjkg/control/relation/delete', methods=['POST'])
@check_is_login(next_url = '/sjkg/control')
def control_relation_delete():
# subject = request.args.get("subject").encode("utf-8")
# object = request.args.get("object").encode("utf-8")
# predicate = request.args.get("predicate").encode("utf-8")
subject = request.json["subject"].encode("utf-8")
object = request.json["object"].encode("utf-8")
predicate = request.json["predicate"].encode("utf-8")
print subject, object, predicate
result = False
if subject and object and predicate:
cayley_util = CayleyUtil()
try:
result = cayley_util.delete_quads_triple(subject, predicate, object)
except Exception as e:
app.logger.error(traceback.format_exc())
return json.dumps({"result": result})
@app.route('/sjkg/entity')
def entity():
# if session.get(is_login):
# session.pop(is_login, None)
return render_template("entity/entitybase.html")
@app.route('/sjkg/entity/<name>')
def entity_name(name):
return render_template("entity/entityname.html", name=name)
@app.route('/sjkg/home')
@app.route('/sjkg')
# @check_is_login
def home():
'''bind both this two url to such a one method'''
# return redirect(url_for("hehe"))
return render_template("home.html")
# @app.route('/sjkg/home')
# def hehe():
# return render_template("home.html")
def fetch_relations_by_entity(name):
result_rlts = []
cayley_util = CayleyUtil()
#get result relations origin data
rlts_origin_data = cayley_util.find_relations_from_node(name)
if not rlts_origin_data:
#handle condition if not exist the entityname
return None, None
# print 'all changes....'
app.logger.info("show data in rlts_origin_data query by entity name:{0}",name)
_print_rlts_odata(rlts_origin_data)
#doing sorting using lambda expr
rlts_origin_data.sort(key=lambda x: x['relation'], reverse=False)
app.logger.info("show data in rlts_origin_data after sort")
_print_rlts_odata(rlts_origin_data)
for item_rlts in rlts_origin_data:
if not item_rlts['relation'].startswith("attribute:"):
continue
predicate = item_rlts['relation'].replace("attribute:", "").split("/")
p_len = len(predicate)
real_concept_1_subject = predicate[p_len - 1]
result_rlts.append({"sect_title": real_concept_1_subject, "sect_text": item_rlts['target'], "margin_left": (p_len - 1) * 50})
rest_attrs, rest_non_attrs = _process_rlts_odata(rlts_origin_data)
app.logger.error(rest_attrs)
app.logger.error(rest_non_attrs)
# return result_rlts
return rest_attrs, rest_non_attrs
def _process_rlts_odata(rlts_origin_data):
'''generate section indicator in num, generate two kinds of model
'''
rest_non_attrs = []#None atrributes
rest_attrs = []#attributes
attrs_no = [0,0,0,0]
for item_rlts in rlts_origin_data:
item_rlts__source = item_rlts['source']
item_rlts__relation = item_rlts['relation']
item_rlts__id = item_rlts['id']
item_rlts__target = item_rlts['target']
if item_rlts__relation.startswith("attribute:"):
#means atrribute if curren entity
item_rlts__relation = item_rlts__relation.replace("attribute:","")#del no useful attribute: prefix
hierachy_dirs_list = item_rlts__relation.split('/')
hir_len = len(hierachy_dirs_list)
sect_title = hierachy_dirs_list[hir_len-1]
margin_left = hir_len - 1
#there has some error, not always appear
while(len(attrs_no)<hir_len):
#ensure len equals
attrs_no.append(0)
attrs_no[hir_len-1] += 1
attrs_no[hir_len:]=[]#del not useful list num
no = ".".join(str(t) for t in attrs_no)
# for index in range(hir_len,len(attrs_no)):
# attrs_no[index] = [1]
rest_attrs.append({"sect_title":sect_title,"sect_text":item_rlts__target,"margin_left":margin_left,"no":no})
else:
#means not the atrribute
rest_non_attrs.append({'subject':item_rlts__source, 'predicate':item_rlts__relation, 'object':item_rlts__target})
return rest_attrs, rest_non_attrs
def _print_rlts_odata(rlts_origin_data):
'''private method, just using to print rlts_origin_data.'''
if not rlts_origin_data:
app.logger.warning('Attention: rlts_origin_data is None')
return
for item in rlts_origin_data:
source = item['source']
relation = item['relation']
id = item['id']
target = item['target']
info = "source:{}, relation:{}, id:{}, target:{}".format(source, relation, id, target).decode('utf-8')
app.logger.debug(info)
# print info
return
@app.route('/sjkg/submitCard', methods=['POST', 'GET'])
@check_is_login(next_url = '/sjkg/submitCard')
def submitCard():
'''submitCard calling by the form post action happened in /sjkg/card
'''
if request.method == "POST":
entity_name = request.form["entity_name"].encode("utf-8")
if not entity_name:
app.logger.warning("No Input in form['entity_name'] from card page, alert~")
flash("Field content can not be empty! Try again!")
return redirect(url_for('card'))
# result_rlts = fetch_relations_by_entity(entity_name)
rest_attrs, rest_non_attrs = fetch_relations_by_entity(entity_name)
if not rest_attrs and not rest_non_attrs:
# when result_rlts is None means no such entity found!
app.logger.warning("No such entity: {0}".format(entity_name))
flash("Can not find entity: {0}, Check it!".format(entity_name))
return redirect(url_for('card'))
else:
return render_template("card/card.html", data={"rest_attrs": rest_attrs, "rest_non_attrs":rest_non_attrs}, name=entity_name)
@app.route('/sjkg/search')
def search():
if request.method == "GET":
entity1 = request.args.get("entity1", "").encode("utf-8")
entity2 = request.args.get("entity2", "").encode("utf-8")
level = request.args.get("level", 6)
if level == "大于6":
level = 7
level = int(level)
level = level if level <= 6 else 10
if entity1 == "" or entity2 == "":
flash('test:Nothing in it')
return render_template("home.html")
else:
cayley_util = CayleyUtil()
paths, relations = cayley_util.find_all_paths(entity1, entity2, [], [], None, level)
relation_dict = {"relations": relations}
return render_template("search/searchbase.html", relations=json.dumps(relation_dict), paths=paths)
@app.route('/sjkg/ajax/entity/<name>', methods=['POST'])
def ajax_entity(name):
cayley_util = CayleyUtil()
return cayley_util.find_relations(name)
ALLOWED_EXTENSIONS = set(['xls', 'xlsx'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
@app.route('/sjkg/excel_import', methods=['POST'])
@check_is_login(next_url = '/sjkg/control')
def excel_import():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
# file_location = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file_location = create_file_with_time(app.config['UPLOAD_FOLDER'], file.filename)
file.save(file_location)
t = threading.Thread(target=import_excel_new_version, args=(file_location,))
t.start()
return redirect(url_for("control", status="上传成功!请等待一段时间!"))
elif file and not allowed_file(file.filename):
return redirect(url_for("control", status="文件类型错误!"))
return redirect(url_for("control", status="错误!请检查文件格式!"))
@app.route('/sjkg/upload_pic', methods=['POST'])
@check_is_login(next_url = '/sjkg/control')
def upload_pic():
if request.method == 'POST':
file = request.files['file']
entity_name = request.form.get('entity').encode('utf-8')
file_location = os.path.join(os.getcwd(), 'static/pic', file.filename)
file.save(file_location)
cayley_util = CayleyUtil()
url_prefix = '/sjkg/pic/' + file.filename
cayley_util.insert_quads_triple(entity_name, 'attribute:图片'.encode('utf-8'), url_prefix.encode('utf-8'))
return redirect(url_for("control", status="添加图片成功!"))
@app.route('/sjkg/upload_vid', methods=['POST'])
@check_is_login(next_url = '/sjkg/control')
def upload_vid():
if request.method == 'POST':
file = request.files['file']
entity_name = request.form.get('entity').encode('utf-8')
file_location = os.path.join(os.getcwd(), 'static/vid', file.filename)
file.save(file_location)
cayley_util = CayleyUtil()
url_prefix = '/sjkg/vid/' + file.filename
cayley_util.insert_quads_triple(entity_name, 'attribute:视频'.encode('utf-8'), url_prefix.encode('utf-8'))
return redirect(url_for("control", status="添加视频成功!"))
@app.route('/sjkg/pic/<name>', methods=['GET'])
def show_pic(name):
index = name.index('.')
name1 = name[0:index]
url = '/static/pic/' + name
return render_template('pic.html', head=name1, url=url)
@app.route('/sjkg/vid/<name>', methods=['GET'])
def show_vid(name):
index = name.index('.')
name1 = name[0:index]
url = '/static/vid/' + name
return render_template('vid.html', head=name1, url=url)
#----------------------------------------
# main func snippet
#----------------------------------------
if __name__ == '__main__':
app.run(debug=True, port=5000, host="0.0.0.0")
| [
"violiniselegant@gmail.com"
] | violiniselegant@gmail.com |
a40a61da4b281943142d8d4709eff02cb23d9dca | 2ca3b6cc4f9145438e283d4e055e55fff550ec90 | /flask/hello.py | 68f2d487bdd7eac37fde1aad5bf11e7ee96000bc | [] | no_license | manuck/python_practice | e39a7e3be41608dd9bf8a7bdb9228a22ceb652b6 | 7adbefbe616f305430c75e896d817ec8e7f938d3 | refs/heads/master | 2020-04-12T02:45:06.427693 | 2018-12-21T01:15:31 | 2018-12-21T01:15:31 | 162,252,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | from flask import Flask, render_template
import datetime
import random
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("index.html")
@app.route("/ssafy")
def ssafy():
return "방가방가룽~"
@app.route("/isitchristmas")
def christmas():
if datetime.datetime.month == 12:
if datetime.datetime.day == 25:
return"ㅇ"
else:
return"ㄴ"
# variable routing
@app.route("/greeting/<string:name>")
def greeting(name):
return f"{name} 안녕하십니까? 인사 오지게 박습니다."
@app.route("/cube/<int:num>")
def cube(num):
sq = num**3
return f"{sq}"
@app.route("/dinner")
def dinner():
menu = ["햄버거", "수육", "치킨"]
dinner = random.choice(menu)
return render_template("dinner.html", dinner=dinner, menu=menu)
@app.route("/music")
def music():
mlist = ["아이유-이름에게", "멜로망스-욕심", "태연-기억을 걷는 시간"]
music = random.choice(mlist)
return render_template("music.html", music=music, mlist=mlist) | [
"snc9000@naver.com"
] | snc9000@naver.com |
6354aa7b7ba28bde4173cb4edc107ac16ee3a368 | a3050fc7faa04e3b54aef166d1cda8d1159f3f7e | /SpiderPractice/scrapy_edu/qianmu/qianmu/pipelines.py | 1b7c23fed951c07cab49b5c7d144da90ed4f8c3c | [] | no_license | cdsire/mySpider | 5cb3f340a9819d51d4b59dd819cace0fcf582d57 | 24a7f7217be77ecae970eeda81cf510883defd65 | refs/heads/master | 2020-03-07T21:38:32.444089 | 2018-04-02T11:37:55 | 2018-04-02T11:37:55 | 127,732,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class QianmuPipeline(object):
def process_item(self, item, spider):
return item
| [
"po@ubantu.com"
] | po@ubantu.com |
e2da485c57742e78b5fda5b4a27556d22741da6f | 53db61f70e15d276a89c9f3a3fd1765fe6caa193 | /hw5Files/bayesnets.py | 1403441c6418058782b28fed840a11643f164436 | [] | no_license | KyleMortek/pythonAI | ac369493aa2c9e3d0d78c0b6a089fffa4ac67c3d | b37256924178b2b51980c802ad324e413adcd140 | refs/heads/master | 2020-03-28T13:12:13.520742 | 2019-01-31T17:32:54 | 2019-01-31T17:32:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,113 | py | """
Probability models. (Chapter 13-15)
Nils Napp
Based on AIMA code
"""
import random
from numbers import Number
class ProbDist:
"""A discrete probability distribution. You name the random variable
in the constructor, then assign and query probability of values.
>>> P = ProbDist('Flip'); P['H'], P['T'] = 0.25, 0.75; P['H']
0.25
>>> P = ProbDist('X', {'lo': 125, 'med': 375, 'hi': 500})
>>> P['lo'], P['med'], P['hi']
(0.125, 0.375, 0.5)
"""
def __init__(self, varname='?', freqs=None, sample_space=None, vals=None):
"""If freqs is given, it is a dictionary of values - frequency pairs,
then ProbDist is normalized."""
self.prob = {}
self.prob_vec =[] # Not yet used
self.varname = varname
self.values = []
keys=()
if freqs:
for (v, p) in freqs.items():
self[v] = p
self.normalize()
self.sample_space=self.prob.keys()
elif sample_space:
assert isinstance(vals,(list,tuple)), "'vals' must be a list or tuple of probabilities"
assert isinstance(sample_space,(str,list,tuple)), "'sample_space must' be string or list"
if isinstance(sample_space,str):
sample_space=sample_space.split()
assert isinstance(sample_space,(list,tuple))
keys=sample_space
assert len(keys)==len(vals), "Number of keys and values does not match"
for k,p in zip(keys,vals):
assert isinstance(p,Number), "Elements in vals must me numbers"
self.prob[k]=p
self.sample_space=sample_space
def __getitem__(self, val):
"""Given a value, return P(value)."""
try:
return self.prob[val]
except KeyError:
return 0
def __setitem__(self, val, p):
"""Set P(val) = p."""
if val not in self.values:
self.values.append(val)
self.prob[val] = p
def normalize(self):
"""Make sure the probabilities of all values sum to 1.
Returns the normalized distribution.
Raises a ZeroDivisionError if the sum of the values is 0."""
total = sum(self.prob.values())
if not isclose(total, 1.0):
for val in self.prob:
self.prob[val] /= total
return self
def show_approx(self, numfmt='{:.3g}'):
"""Show the probabilities rounded and sorted by key, for the
sake of portable doctests."""
return ', '.join([('{}: ' + numfmt).format(v, p)
for (v, p) in sorted(self.prob.items())])
def __repr__(self):
return "P({})".format(self.varname)
#---VVV---VVV---VVV------ Helper Functions
def event_values(event, variables):
"""Return a tuple of the values of variables in event.
>>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A'])
(8, 10)
>>> event_values ((1, 2), ['C', 'A'])
(1, 2)
"""
if isinstance(event, tuple) and len(event) == len(variables):
return event #event is already a tuple
else:
return tuple([event[var] for var in variables])
def normalize(dist):
"""
Multiply each number by a constant such that the sum is 1.0
Can be applied to both dictionary distributions and lists/tuples
"""
if isinstance(dist, dict):
total = sum(dist.values())
for key in dist:
dist[key] = dist[key] / total
assert 0 <= dist[key] <= 1, "Probabilities must be between 0 and 1."
return dist
total = sum(dist)
return [(n / total) for n in dist]
try: # math.isclose was added in Python 3.5; but we might be in 3.4
from math import isclose
except ImportError:
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"""Return true if numbers a and b are close to each other."""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
#------------------
class BayesNet:
"""Bayesian network"""
def __init__(self, node_specs=[]):
"""Nodes must be ordered with parents before children."""
self.nodes = []
self.variables = []
for node_spec in node_specs:
self.add(node_spec)
def add(self, node_spec):
"""Add a node to the net. Its parents must already be in the
net, and its variable must not."""
node = BayesNode(*node_spec)
assert node.variable not in self.variables
assert all((parent in self.variables) for parent in node.parents)
self.nodes.append(node)
self.variables.append(node.variable)
for parent in node.parents:
parent_node=self.variable_node(parent)
parent_node.child_nodes.append(node)
node.parent_nodes.append(parent_node)
def variable_node(self, var):
"""Return the node for the variable named var."""
for n in self.nodes:
if n.variable == var:
return n
raise Exception("No such variable: {}".format(var))
def variable_values(self, var):
"""Return the domain of var."""
n=self.variable_node(var)
return n.sample_space
def _check_names(self):
"""Check that the parent domain names and the cpt tuples in the children match"""
pass
def _check_cpt_keys(self,var):
pass
def __repr__(self):
return 'BayesNet({0!r})'.format(self.nodes)
class BayesNode:
"""A conditional probability distribution, P(X | parents). Part of a BayesNet."""
def __init__(self, X :str, sample_space, parents, cpt :dict):
"""
* 'X' is the variable name
* 'sample_space' is a list/tuple, or space deliminated string of possile values.
The values cannot themselves be tuples.
Speical case: empty string or None will be treated as (True,False)
* 'parents' is a list/tuple or space deliminate string of parent names
* 'cpt' is a dictionary where the variable assigments of the parens are
a tuple i.t. {(v1, v2, ...): (p1, p2, p3 ...), ...}, the distribution
P(X | parent1=v1, parent2=v2, ...) = p. Each key must have as many
values as there are parents. Each probability distribution must have
as many elements as there are in 'sample_space'
* 'cpt' can be given as {v: (p1, p2, p3, ..), ...}, the conditional
probability distribution P(X| parent=v). When there's just one parent, i.e.
the tuple parentases can be dropped for single item key
Examples:
Base node, no parents, with a sample space of three outomces called valN
>> BayesNode('BaseRV','val1 val2 val3','',(0.1, 0.8, 0.1))
Base node using a default binary RV
>> BayesNode('BaseRV',(True,False),'',(0.1, 0.8))
>> BayesNode('BaseRV','','',(0.1, 0.8))
One Parent binary RV with a binary parent
>> BayesNode('OneParentRV','','MyParent',{True:(0.2,0.8), False:(0.8,0.2)})
One Parent RV with a binary parent
>> BayesNode('OneParentRV','val1 val2 val3','MyParent',{True:(0.2,0.7,0.1), False:(0.8,0.1,0.1)})
One Parent RV with a parent sample space ('val1',val2','val3')
>> BayesNode('OneParentRV','val1 val2 val3','MyParent',{'val1':(0.2,0.7,0.1),
'val2':(0.8,0.1,0.1)
'val3':(0.8,0.1,0.1)})
Two parent RV with a parent sample spaces ('val1',val2','val3')
>> BayesNode('OneParentRV','val1 val2 val3','p1 p2',{('val1', 'val1'):(0.2,0.7,0.1),
('val1', 'val2'):(0.8,0.1,0.1)
('val1', 'val3'):(0.8,0.1,0.1)
...
('val3', 'val3'):(0.8,0.1,0.1)
})
"""
"""
Check and create the sample space
"""
if sample_space=='' or sample_space==None:
sample_space=(True,False)
if isinstance(sample_space,str):
sample_space=sample_space.split()
assert isinstance(sample_space,(list,tuple)), "'sample_space' has wrong type"
"""
Check the parents
"""
if isinstance(parents, str):
parents = parents.split()
"""
Parse and setup the cpt
first convert into tuple keys
then check and normalize the distributions
"""
if isinstance(cpt, (list, tuple)): # no parents, 0-tuple
assert len(parents)==0, 'Can only use tuple notation for root nodes'
cpt = {(): cpt}
elif isinstance(cpt, dict):
# one parent, 1-tuple
if cpt and not isinstance(list(cpt.keys())[0], tuple):
assert len(parents)==1, 'Can only use non-tuple keys for one-parent nodes'
cptNew={}
for v,p in cpt.items():
assert isinstance(p,(list,tuple)) and len(sample_space)==len(p) , 'distribution of wrong length or type'
cptNew[(v,)]=p
cpt=cptNew
assert isinstance(cpt, dict)
#check prob dist and types for dict if it was passed through
for vs, p in cpt.items():
assert isinstance(vs, tuple) and len(vs) == len(parents)
assert isinstance(p,(list,tuple)) and len(sample_space)==len(p)
assert all(0 <= pv <= 1 for pv in p), 'vector entires pi must be 0<=pi<=1'
assert abs(sum(p)-1) < 0.00001, 'Probability must sum to 1: ' + str(p) + '->' + str(sum(p))
cpt[vs]=p
"""
dictionary for looking up the index of samples in the distribuiton
"""
idx=dict(zip(sample_space,range(len(sample_space))))
"""
assign the node properies
"""
self.variable = X
self.parents = parents
self.sample_space= tuple(sample_space)
self.idx=idx
self.cpt = cpt
self.child_nodes = []
self.parent_nodes=[]
def p(self, value, evidence):
"""Return the conditional probability
P(X=value | parents=parent_values), where parent_values
are the values of parents in evidence. (evidence must assign each
parent a value, but could include more variables)
"""
assert value in self.sample_space, "'value' is not in sample space"
return self.cpt[event_values(evidence, self.parents)][self.idx[value]]
def sample(self, event):
"""Sample from the distribution for this variable conditioned
on event's values for parent_variables.
"""
'''
In Python 3.6 and up you can use random.choices
Here we do the lookup using the uniform random number generator
If you do a lot of sampling, then the cumulative sums should
be pre-copmtued and stored during initialization
'''
r=random.random()
acc=0
i=0
for p in self.cpt[event_values(event, self.parents)]:
acc += p
if r <= acc:
break
i += 1
assert i<len(self.sample_space), "random index is out of bounds. Make sure conditional probability talbe sumsto 1"
return self.sample_space[i]
def __repr__(self):
return repr((self.variable, ' '.join(self.parents)))
| [
"kmortek@buffalo.edu"
] | kmortek@buffalo.edu |
7177569b14c063b92ba3d33a07cd863c3c8cf26e | d66d1979914d11f218b79e56758faae9347542fd | /telegram_markov.py | 6ecbc564345c7c61daf680e79f5bff78791cdf8f | [] | no_license | ValeriaVol/projects | c6db4f20c94df0327001e2f5a3644d980b3ff24e | 0e79864af7207bdc364111e816d0a68abd87d338 | refs/heads/master | 2020-05-22T18:37:32.513839 | 2019-12-09T11:53:11 | 2019-12-09T11:53:11 | 186,476,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | import pickle
import pandas as pd
import markovify
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
df = pd.read_csv("wiki_movie_plots_deduped.csv")
def Model(msg):
corpus = list(df.Plot)
example = markovify.Text(corpus)
plot = example.make_sentence()
return plot
updater = Updater(token='851227046:AAH5_p3gvo2hoClaj6CSwSzIMdYleva8gHs')
dispatcher = updater.dispatcher
def startCommand(bot, update):
bot.send_message(chat_id=update.message.chat_id, text='Generate a crazy movie plot with Markov Chain')
def textMessage(bot, update):
custom_keyboard = [['Press']]
reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard)
plot = Model(update.message.text)
bot.send_message(chat_id=update.message.chat_id, text=plot, reply_markup=reply_markup)
start_command_handler = CommandHandler('start', startCommand)
text_message_handler = MessageHandler(Filters.text, textMessage)
dispatcher.add_handler(start_command_handler)
dispatcher.add_handler(text_message_handler)
updater.start_polling(clean=True)
updater.idle()
| [
"noreply@github.com"
] | noreply@github.com |
34c6e82241951242631ee6d1911dfe903c5385f7 | 9f930df50f28e6cbc74089057fb4418460a7f657 | /registrations/migrations/0014_auto_20160114_1326.py | b864677ac0b537caa2270c8df03d09e5df250be0 | [
"MIT"
] | permissive | xxyyzz/apogee-2016 | 2c57b3c48334e798cab560d6525567da9b2ede61 | c55f6427bbe246060aacbeb831e1519fb051a1b1 | refs/heads/master | 2021-05-16T11:55:21.525340 | 2017-09-07T19:05:00 | 2017-09-07T19:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('registrations', '0013_auto_20151217_1104'),
]
operations = [
migrations.AlterField(
model_name='paper',
name='status',
field=models.CharField(max_length=2, choices=[('1', 'Round 1'), ('2', 'Round 2'), ('3', 'Round 3')], default='1'),
),
migrations.AlterField(
model_name='project',
name='status',
field=models.CharField(max_length=2, choices=[('1', 'Round 1'), ('2', 'Round 2'), ('3', 'Round 3')], default='1'),
),
]
| [
"prateek.g1509@gmail.com"
] | prateek.g1509@gmail.com |
574e6ebe552a5a31852ca844355d2fbf29fe83c7 | 18e94e29ad968c8b9b6550ad8b060600b099d243 | /unet3d/predictor.py | 0932f5b52b8850125d7343e6ab04b352fcf7d7bb | [
"MIT"
] | permissive | zzz123xyz/pytorch-3dunet | cf8de5f1b0c86f88aaecbbf4e965ca883ce9cdbb | 5bab6968b23cff5c6ae456b343285bfa9b104294 | refs/heads/master | 2020-06-22T01:04:13.943289 | 2020-05-26T15:54:41 | 2020-05-26T15:54:41 | 197,595,039 | 0 | 0 | MIT | 2019-07-18T13:43:38 | 2019-07-18T13:43:37 | null | UTF-8 | Python | false | false | 20,847 | py | import time
import h5py
import hdbscan
import numpy as np
import torch
from sklearn.cluster import MeanShift
from datasets.hdf5 import SliceBuilder
from unet3d.utils import get_logger
from unet3d.utils import unpad
logger = get_logger('UNet3DTrainer')
class _AbstractPredictor:
def __init__(self, model, loader, output_file, config, **kwargs):
self.model = model
self.loader = loader
self.output_file = output_file
self.config = config
self.predictor_config = kwargs
@staticmethod
def _volume_shape(dataset):
# TODO: support multiple internal datasets
raw = dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
@staticmethod
def _get_output_dataset_names(number_of_datasets, prefix='predictions'):
if number_of_datasets == 1:
return [prefix]
else:
return [f'{prefix}{i}' for i in range(number_of_datasets)]
def predict(self):
raise NotImplementedError
class StandardPredictor(_AbstractPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predictions from the network are kept in memory. If the results from the network don't fit in into RAM
use `LazyPredictor` instead.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def predict(self):
out_channels = self.config['model'].get('out_channels')
if out_channels is None:
out_channels = self.config['model']['dt_out_channels']
prediction_channel = self.config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} batches...')
# dimensionality of the the output predictions
volume_shape = self._volume_shape(self.loader.dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True)
logger.info(f'Avoid block artifacts: {avoid_block_artifacts}')
# create destination H5 file
h5_output_file = h5py.File(self.output_file, 'w')
# allocate prediction and normalization arrays
logger.info('Allocating prediction and normalization arrays...')
prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape,
output_heads, h5_output_file)
# Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present)
self.model.eval()
# Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied!
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# send batch to device
batch = batch.to(device)
# forward pass
predictions = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
# for each output head
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# convert to numpy array
prediction = prediction.cpu().numpy()
# for each batch sample
for pred, index in zip(prediction, indices):
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = (channel_slice,) + index
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
pred = np.expand_dims(pred[prediction_channel], axis=0)
logger.info(f'Saving predictions for slice:{index}...')
if avoid_block_artifacts:
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = unpad(pred, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
else:
# accumulate probabilities into the output prediction array
prediction_map[index] += pred
# count voxel visits for normalization
normalization_mask[index] += 1
# save results to
self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset)
# close the output H5 file
h5_output_file.close()
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# initialize the output prediction arrays
prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)]
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
# save probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks,
prediction_datasets):
prediction_map = prediction_map / normalization_mask
if dataset.mirror_padding:
pad_width = dataset.pad_width
logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...')
prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width]
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=prediction_map, compression="gzip")
class LazyPredictor(StandardPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor
is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# allocate datasets for probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
prediction_maps = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True,
compression='gzip')
for dataset_name in prediction_datasets]
# allocate datasets for normalization masks
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
normalization_masks = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True,
compression='gzip')
for dataset_name in normalization_datasets]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
if dataset.mirror_padding:
logger.warn(
f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}')
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
# normalize the prediction_maps inside the H5
for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps,
normalization_masks,
prediction_datasets,
normalization_datasets):
# split the volume into 4 parts and load each into the memory separately
logger.info(f'Normalizing {prediction_dataset}...')
z, y, x = prediction_map.shape[1:]
# take slices which are 1/27 of the original volume
patch_shape = (z // 3, y // 3, x // 3)
for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape):
logger.info(f'Normalizing slice: {index}')
prediction_map[index] /= normalization_mask[index]
# make sure to reset the slice that has been visited already in order to avoid 'double' normalization
# when the patches overlap with each other
normalization_mask[index] = 1
logger.info(f'Deleting {normalization_dataset}...')
del output_file[normalization_dataset]
class EmbeddingsPredictor(_AbstractPredictor):
"""
Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format.
The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings
with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together.
"""
def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
self.iou_threshold = iou_threshold
self.noise_label = noise_label
self.clustering = clustering
assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported'
logger.info(f'IoU threshold: {iou_threshold}')
self.clustering_name = clustering
self.clustering = self._get_clustering(clustering, kwargs)
def predict(self):
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} patches...')
# dimensionality of the the output segmentation
volume_shape = self._volume_shape(self.loader.dataset)
logger.info(f'The shape of the output segmentation (DHW): {volume_shape}')
logger.info('Allocating segmentation array...')
# initialize the output prediction arrays
output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)]
# initialize visited_voxels arrays
visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)]
# Sets the module in evaluation mode explicitly
self.model.eval()
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# logger.info(f'Predicting embeddings for slice:{index}')
# send batch to device
batch = batch.to(device)
# forward pass
embeddings = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
embeddings = [embeddings]
for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations,
visited_voxels_arrays):
# convert to numpy array
prediction = prediction.cpu().numpy()
# iterate sequentially because of the current simple stitching that we're using
for pred, index in zip(prediction, indices):
# convert embeddings to segmentation with hdbscan clustering
segmentation = self._embeddings_to_segmentation(pred)
# stitch patches
self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array)
# save results
with h5py.File(self.output_file, 'w') as output_file:
prediction_datasets = self._get_output_dataset_names(output_heads,
prefix=f'segmentation/{self.clustering_name}')
for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets):
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=output_segmentation, compression="gzip")
def _embeddings_to_segmentation(self, embeddings):
"""
Cluster embeddings vectors with HDBSCAN and return the segmented volume.
Args:
embeddings (ndarray): 4D (CDHW) embeddings tensor
Returns:
3D (DHW) segmentation
"""
# shape of the output segmentation
output_shape = embeddings.shape[1:]
# reshape (C, D, H, W) -> (C, D * H * W) and transpose -> (D * H * W, C)
flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose()
logger.info('Clustering embeddings...')
# perform clustering and reshape in order to get the segmentation volume
start = time.time()
clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape)
logger.info(
f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.')
return clusters
def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array):
"""
Given the `segmentation` patch, its `index` in the `output_segmentation` array and the array visited voxels
merge the segmented patch (`segmentation`) into the `output_segmentation`
Args:
segmentation (ndarray): segmented patch
index (tuple): position of the patch inside `output_segmentation` volume
output_segmentation (ndarray): current state of the output segmentation
visited_voxels_array (ndarray): array of voxels visited so far (same size as `output_segmentation`); visited
voxels will be marked by a number greater than 0
"""
index = tuple(index)
# get new unassigned label
max_label = np.max(output_segmentation) + 1
# make sure there are no clashes between current segmentation patch and the output_segmentation
# but keep the noise label
noise_mask = segmentation == self.noise_label
segmentation += int(max_label)
segmentation[noise_mask] = self.noise_label
# get the overlap mask in the current patch
overlap_mask = visited_voxels_array[index] > 0
# get the new labels inside the overlap_mask
new_labels = np.unique(segmentation[overlap_mask])
merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation)
# relabel new segmentation with the merged labels
for current_label, new_label in merged_labels:
segmentation[segmentation == new_label] = current_label
# update the output_segmentation
output_segmentation[index] = segmentation
# visit the patch
visited_voxels_array[index] += 1
def _merge_labels(self, current_segmentation, new_labels, new_segmentation):
def _most_frequent_label(labels):
unique, counts = np.unique(labels, return_counts=True)
ind = np.argmax(counts)
return unique[ind]
result = []
# iterate over new_labels and merge regions if the IoU exceeds a given threshold
for new_label in new_labels:
# skip 'noise' label assigned by hdbscan
if new_label == self.noise_label:
continue
new_label_mask = new_segmentation == new_label
# get only the most frequent overlapping label
most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask])
# skip 'noise' label
if most_frequent_label == self.noise_label:
continue
current_label_mask = current_segmentation == most_frequent_label
# compute Jaccard index
iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask,
current_label_mask).sum()
if iou > self.iou_threshold:
# merge labels
result.append((most_frequent_label, new_label))
return result
def _get_clustering(self, clustering_alg, kwargs):
logger.info(f'Using {clustering_alg} for clustering')
if clustering_alg == 'hdbscan':
min_cluster_size = kwargs.get('min_cluster_size', 50)
min_samples = kwargs.get('min_samples', None),
metric = kwargs.get('metric', 'euclidean')
cluster_selection_method = kwargs.get('cluster_selection_method', 'eom')
logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}')
return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric,
cluster_selection_method=cluster_selection_method)
else:
bandwidth = kwargs['bandwidth']
logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True')
# use fast MeanShift with bin seeding
return MeanShift(bandwidth=bandwidth, bin_seeding=True)
| [
"zyx321zzz@gmail.com"
] | zyx321zzz@gmail.com |
3e253e722e54bbe8f9ba24c1e6f62976f365e4f0 | 4cbb2067f7c816acaa30dc9f13d80a591ecd70ad | /setup.py | c9d8d68faf56b65e1b64e3569ed122bdb4bfdacc | [] | no_license | apparazzi/kryptoflow-serving | 0022344d68c232cba917d65c6cfed6b0f1036672 | 805f40fb7f1c20f756f54e69f8698007c252ec55 | refs/heads/master | 2020-03-21T14:36:20.117009 | 2018-06-19T02:55:03 | 2018-06-19T02:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for kryptoflow_serving.
This file was generated with PyScaffold 3.0.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: http://pyscaffold.org/
"""
import sys
from setuptools import setup, find_packages
# Add here console scripts and other entry points in ini-style format
entry_points = """
[console_scripts]
# script_name = kryptoflow_serving.module:function
# For example:
# fibonacci = kryptoflow_serving.skeleton:run
"""
def setup_package():
setup(entry_points=entry_points,
version='0.12',
tests_require=['pytest', 'pytest-cov', 'pytest-runner'],
packages=find_packages(exclude=['docs', 'tests'], include=['kryptoflow_serving']))
if __name__ == "__main__":
setup_package()
| [
"carlo.mazzaferro@gmail.com"
] | carlo.mazzaferro@gmail.com |
db619209d99e9c11e7884096814e36d0ecfb565e | bdfd3937f6222157d436dbdc7d7efad2b1b3f8f6 | /appengine/logging/writing_logs/main_test.py | 339caa4ef66206268f60e87b58a9339a9577a20d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | b-fong/python-docs-samples | 81f089db6f4378cb7cfd278d3c8f9fb198aeb504 | 493f850306f7860a85948365ba4ee70500bec0d6 | refs/heads/master | 2020-12-25T08:37:37.864777 | 2016-02-17T22:54:50 | 2016-02-17T22:54:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testing import AppEngineTest
import webtest
from . import main
class TestWritingLogs(AppEngineTest):
def setUp(self):
super(TestWritingLogs, self).setUp()
self.app = webtest.TestApp(main.app)
def test_get(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
self.assertTrue('Logging example' in response.text)
| [
"jon.wayne.parrott@gmail.com"
] | jon.wayne.parrott@gmail.com |
90bf70ca65b9533d94cc48452db642e4e2e6e5d8 | 3b9f83d768cf411c8a06e69b5e73c57652f17da0 | /server/wsgi.py | e444db59a5ef96c0ac140e11f6c82b34136055d9 | [] | no_license | ichoukou/JuziServices | be8efcf3489cdc6e5497d424ca3293becaba18e3 | 9217f1d596214603877057f32924373bf1b785ad | refs/heads/master | 2020-07-19T18:53:21.536588 | 2019-01-04T09:00:56 | 2019-01-04T09:00:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | # coding: utf8
# Copyright 2017 Stephen.Z. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
WSGI config for chatbot_rest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings")
application = get_wsgi_application()
| [
"15242200221@163.com"
] | 15242200221@163.com |
8f086c8011e8050bec4d7f6614aa3937ffc6c47c | 00b0d20508d27807ad999d90aa3012cff8626be0 | /cchain/processors/exceptions.py | 73bb5dd4021bed4c2ae771bfdfea83f536f5aa30 | [] | no_license | krisb78/couch-chain | 5789ab81b8497dcdf0d5673adcb01ad0cced5771 | 240411dbdb715cc12981836fa1813065bb9708ab | refs/heads/master | 2023-04-18T07:55:54.938950 | 2023-04-10T19:57:45 | 2023-04-10T19:57:45 | 29,787,766 | 1 | 3 | null | 2022-01-21T18:57:32 | 2015-01-24T18:58:38 | Python | UTF-8 | Python | false | false | 107 | py | class ProcessingError(Exception):
"""Raised when there was an error while processing changes.
"""
| [
"krzysztof.bandurski@gmail.com"
] | krzysztof.bandurski@gmail.com |
a3d1bc0e4278dc91ff2dc1dc70906472fdd4ecfa | b195cbe44b84f2f56e49fbce3afeee5f82d27b0c | /chrvis-server/home/admin.py | 43c28bc546ca8a6d8f10db3410571723b7bb9954 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | orionpax00/ChrVis | 72b1774f55e5d48ff3b25f5eb71dad96411830dd | 0589feef56cc0e2d5266898defb837c5dbb1201d | refs/heads/master | 2022-08-09T12:08:28.371140 | 2020-01-16T16:01:06 | 2020-01-16T16:01:06 | 188,672,741 | 2 | 1 | Apache-2.0 | 2022-07-15T20:31:41 | 2019-05-26T11:10:33 | Jupyter Notebook | UTF-8 | Python | false | false | 122 | py | from django.contrib import admin
from .models import Document
# Register your models here.
admin.site.register(Document)
| [
"durgesh123.iitr@gmail.com"
] | durgesh123.iitr@gmail.com |
a4aa24ec5521424963907b8bcfa6a69be7b6b532 | 3e42a1027e40da3c0aceb5f96879a915c3e97b7a | /scraping/migrations/0001_initial.py | 90065d035815f4b6522f69a80a981df8c3ccda34 | [] | no_license | SmartBoY10/scrapling_service | 775a06f4a7c842a9944f3e6e59817e5783c81c6e | 0e772b4abbee28f69cd5b11df478838b5e00f9c7 | refs/heads/main | 2022-12-24T02:45:53.771161 | 2020-10-01T18:43:48 | 2020-10-01T18:43:48 | 300,363,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | # Generated by Django 3.0.3 on 2020-10-01 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(blank=True)),
],
),
]
| [
"qurol.abduajalilov99@gmail.com"
] | qurol.abduajalilov99@gmail.com |
cf33a7a9d0b7505b10b4181e0638317e524ace1d | 124f40a026d1ec3854972200f0d2402edd86f96a | /Сортировка_выборкойy.py | 96e03138067fa2fc9bff91e9c4305196a2853778 | [] | no_license | doharo2001/Python_algorithms | 0305a3bba8fbbd64ad356138621979366fedf33c | 26fe3347def820bae7b2762650a2af4151335dfd | refs/heads/main | 2023-07-07T17:52:33.015694 | 2021-08-22T12:43:55 | 2021-08-22T12:43:55 | 398,783,828 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | # Сортировка выборкой
# Суть: разбить список на две части (условно на левую и правую), в левой части будет хранится
# отсортированная часть списка, а в правой неотсортированная. Цмклом проходим по правой части,
# пока там не останется всего один элемент, при этом каждый раз мы будем искать наименьший элемент
# в правой части списка и заменять его на текущий в левой части, если таковой будет найден.
nums = [5, 7, 6, 9, 8, 2, 4, 3, 1]
print("Было: ", nums)
for i in range(len(nums)): # проходим по всей длине массива
lowest = i # первый элемент примим за наименьший
for j in range(i+1, len(nums)):
if nums[j] < nums[lowest]:
lowest = j # нашли элемент меньше в правом срезе
nums[i], nums[lowest] = nums[lowest], nums[i]
print("Стало: ", nums) | [
"doharo2001@gmail.com"
] | doharo2001@gmail.com |
66034e4237f03e3feea6cf0c1cb3a5d2f84b4f3e | 7f81c7b4110640f73b769b6a41e9ef3ae2495611 | /bert_multitask_learning/__init__.py | e9e702d00da1d9e2c6bc914b6a59975fe2a14257 | [
"Apache-2.0"
] | permissive | ml2457/bert-multitask-learning | 26464c6d1ad94e7aeebd93d02f2604298ebde5db | 993c1e6ca279e90e12ce4a684260219b18bbea70 | refs/heads/master | 2023-02-10T14:05:27.643723 | 2021-01-10T15:22:11 | 2021-01-10T15:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | __version__ = "0.6.10"
from .read_write_tfrecord import *
from .input_fn import *
from .model_fn import *
from .params import *
from .top import *
from .run_bert_multitask import *
from .utils import *
from .preproc_decorator import preprocessing_fn
from . import predefined_problems
from .special_tokens import *
| [
"junpang.yip@gmail.com"
] | junpang.yip@gmail.com |
f78963add4b60ef66c7ce35ce18852ad3a6e9be9 | 33daf4c69a8f46d7ad8d93eaa73fc60e36fd022d | /gestion/asignaciones/20150908-todos-cuerpos/procesar_tabla.py~ | 6817418317f466cb6fa5e7e4a9ff2c5abf0fe429 | [] | no_license | OscarMaestre/estructurado | 81cfc9412b77d5015be1bebf66785c357746d8e2 | 7649747e48128cb9c17dee937574e9490fcc9087 | refs/heads/master | 2021-01-10T15:05:47.695362 | 2016-04-28T07:30:50 | 2016-04-28T07:30:50 | 53,923,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,671 | #!/usr/bin/env python3
import re
import sys
import os
NUM_SUBDIRECTORIOS_ANTERIORES=1
SEPARADOR=os.sep
RUTA_PAQUETE_BD=(".."+SEPARADOR) * NUM_SUBDIRECTORIOS_ANTERIORES
DIRECTORIO= RUTA_PAQUETE_BD + "db_nombramientos"
#aqui = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, DIRECTORIO)
import GestorDB
import ListaCampos
archivo=sys.argv[1]
re_dni="[0-9]{7,8}[A-Z]"
#especialidad="[PWB0]59[0-9][0-9]{3}"
re_especialidad="\- [PWB0]59([0-9]{4})"
re_codigo_centro="[0-9]{8}"
re_codigo_centro_ciudad_real="^13[0-9]{6}$"
re_fecha="[0-9]{2}/[0-9]{2}/[0-9]{4}"
def linea_contiene_patron(patron, linea):
expresion=re.compile(patron)
if expresion.search(linea):
return True
return False
def extraer_patron(patron, linea):
expresion=re.compile(patron)
concordancia=expresion.search(linea)
if concordancia:
inicio=concordancia.start()
final=concordancia.end()
return concordancia.string[inicio:final]
print ("No concordancia")
def extraer_codigo_centro(linea):
return extraer_patron(re_codigo_centro, linea)
def extraer_localidad(linea):
localidad=linea[9:51]
return localidad.strip()
def extraer_dni(linea):
trozo=linea[51:60]
return extraer_patron(re_dni, linea)
def extraer_nombre(linea):
linea=linea[49:]
pos=linea.find("-")
if pos==-1:
return "Error:"+linea
return linea[pos+2:].strip()
cadena_sql="""insert into asignaciones_18092015 values
(
*C1*'{0}'*C1*,
*C2*'{1}'*C2*,
*C3*'{2}'*C3*,
*C4*'{3}'*C4*,
*C5*'{4}'*C5*,
*C6*'{5}'*C6*,
*C7*'{6}'*C7*,
*C8*'{7}'*C8*
);
"""
def generar_linea_sql(lista_campos):
dni=lista_campos[0]
cod_centro=lista_campos[3]
fecha_fin=lista_campos[7]
if not linea_contiene_patron(re_codigo_centro_ciudad_real, cod_centro):
cod_centro="9888"
sql= "update gaseosa set cod_centro='"+cod_centro+"' where dni='"+dni+"';\n"
sql+="update gaseosa set auxiliar='HACIENDO SUSTITUCION HASTA "+fecha_fin+"' where dni='"+dni+"';\n"
return sql
def generar_linea_sql2(lista_campos):
valores=":".join(lista_campos)
return valores
archivo=open(archivo,"r")
lineas=archivo.readlines()
total_lineas=len(lineas)
codigo_especialidad=""
lista_inserts_sql3=[]
for i in range(0, total_lineas):
linea=lineas[i]
lista_campos=[]
lista_campos_para_insertar=ListaCampos.ListaCampos()
if (linea_contiene_patron(re_especialidad, linea)):
codigo_especialidad=extraer_patron(re_especialidad, linea)
if (linea_contiene_patron(re_dni, linea)):
linea_limpia=linea.strip()
codigo_centro=extraer_codigo_centro(linea_limpia)
localidad=extraer_localidad(linea_limpia)
dni = extraer_dni(linea_limpia)
nombre = extraer_nombre(linea_limpia)
linea_siguiente=lineas[i+1]
nombre_centro=linea_siguiente[0:51].strip()
trozo_fecha1=linea_siguiente[72:132]
fecha_1=extraer_patron(re_fecha, trozo_fecha1)
trozo_fecha2=linea_siguiente[133:]
fecha_2=extraer_patron(re_fecha, trozo_fecha2)
lista_campos=[dni, nombre, codigo_especialidad, codigo_centro, nombre_centro, localidad, fecha_1, fecha_2]
linea_sql=generar_linea_sql(lista_campos)
lista_campos_para_insertar.anadir("nif", dni, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("nombre_completo", nombre, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("fecha_inicio", fecha_1, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("fecha_fin", fecha_2, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("procedimiento", "Adjudicacion 08-09-2015", ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("especialidad", codigo_especialidad, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("codigo_centro", codigo_centro, ListaCampos.ListaCampos.CADENA)
print (linea_sql)
#print cadena_sql.format(codigo_especialidad, codigo_centro, localidad, dni, nombre, nombre_centro, fecha_1, fecha_2)
i=i+1
lista_inserts_sql3.append(lista_campos_para_insertar.generar_insert("nombramientos"))
archivo.close()
GestorDB.BD_RESULTADOS.ejecutar_sentencias(lista_inserts_sql3)
| [
"profesor.oscar.gomez@gmail.com"
] | profesor.oscar.gomez@gmail.com | |
27f95d2e43a12b557a7ce14431e53368ee5452e3 | ae2c06e5e01132bdc9d2fd3d78a558d0d92ad85b | /qa/rpc-tests/txn_clone.py | ae4587bfcb6ddc9583a7f010648f3f06e894a30e | [
"MIT"
] | permissive | communitycoin1/cc | 1cd8c2736a626635ffb0c59fab7a0c7ead3b0fb3 | d539107da36b68ccebe320acaa4bfd1819751825 | refs/heads/master | 2020-03-21T16:13:08.729536 | 2018-06-27T21:25:11 | 2018-06-27T21:25:11 | 138,757,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,017 | py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 12,500 COMMUNITYCOIN:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs)
# 3 hex manipulations on the clone are required
# manipulation 1. sequence is at version+#inputs+input+sigstub
posseq = 2*(4+1+36+1)
seqbe = '%08x' % rawtx1["vin"][0]["sequence"]
clone_raw = clone_raw[:posseq] + seqbe[6:8] + seqbe[4:6] + seqbe[2:4] + seqbe[0:2] + clone_raw[posseq + 8:]
# manipulation 2. createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 COMMUNITYCOIN serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or
rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# manipulation 3. locktime is after outputs
poslt = pos0 + 2 * output_len
ltbe = '%08x' % rawtx1["locktime"]
clone_raw = clone_raw[:poslt] + ltbe[6:8] + ltbe[4:6] + ltbe[2:4] + ltbe[0:2] + clone_raw[poslt + 8:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500DASH for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 12190 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 1000 COMMUNITYCOIN for 2 matured,
# less possible orphaned matured subsidy
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 12190 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 12190
+ fund_foo_tx["fee"]
- 290
+ fund_bar_tx["fee"]
+ 1000)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| [
"eddie@es.home"
] | eddie@es.home |
0725b208bc125d2ab8e98b94ffeab22dd91dd229 | 4aaa2f751fe129e2fb479b52467bb9a13ffc970e | /day6.py | 6f03853c18dc4275d162b6d358b4d64f26d332a5 | [] | no_license | jillesme/AdventOfCode2017 | b43fb604149ea055c26c76c2401bc5c6c63e6cdf | eb5156cba860622abe68dd553e41803ca3685b72 | refs/heads/master | 2021-08-28T17:25:01.932872 | 2017-12-12T22:58:45 | 2017-12-12T22:58:45 | 112,797,928 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | input = '5 1 10 0 1 7 13 14 3 12 8 10 7 12 0 6'
def challenge_one(input):
memory = [int(n) for n in input.split('\t')]
track = []
steps = 0
while (memory not in track):
steps += 1
track.append(memory[:])
highest_index = memory.index(max(memory))
highest_memory = memory[highest_index]
# Set highest to 0
memory[highest_index] -= highest_memory
# Loop over the rest
for n in range(1, highest_memory + 1):
memory[(highest_index + n) % len(memory)] += 1
return steps
def challenge_two(input):
memory = [int(n) for n in input.split('\t')]
track = []
steps = 0
while (memory not in track):
steps += 1
track.append(memory[:])
highest_index = memory.index(max(memory))
highest_memory = memory[highest_index]
# Set highest to 0
memory[highest_index] -= highest_memory
# Loop over the rest
for n in range(1, highest_memory + 1):
memory[(highest_index + n) % len(memory)] += 1
# Return difference between two
return len(track) - track.index(memory)
| [
"jilles.soeters@udemy.com"
] | jilles.soeters@udemy.com |
7b726e78d17e8a9f49a6de856f02862ac492efba | bd2dc60667bf96b0960f86647e245642bb8af532 | /python/BLOSEM_tutorial/EVControllerMsgFed.py | 1e9c35d7af185522f85f40418aec07b076cb92c5 | [
"BSD-3-Clause"
] | permissive | activeshadow/HELICS-Examples | 4a25bf35845199d66056fdd3e59f229b607828f1 | 750cd111eb11efc681d2575b4919759bdce38e51 | refs/heads/master | 2021-06-06T04:12:57.628736 | 2021-05-12T04:57:49 | 2021-05-12T04:57:49 | 149,702,075 | 0 | 0 | null | 2018-09-21T03:01:28 | 2018-09-21T03:01:28 | null | UTF-8 | Python | false | false | 6,388 | py | """
Created on 8/27/2020
This is a simple EV charge controller federate that manages the charging at
a set of charging terminals in a hypothetical EV garage. It receives periodic
SOC messages from each EV (associated with a particular charging terminal)
and sends back a message indicating whether the EV should continue charging
or not (based on whether it is full).
@author: Allison M. Campbell
allison.m.campbell@pnnl.gov
"""
import helics as h
import logging
import numpy as np
import sys
import time
import matplotlib.pyplot as plt
import pandas as pd
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
def destroy_federate(fed):
'''
As part of ending a HELICS co-simulation it is good housekeeping to
formally destroy a federate. Doing so informs the rest of the
federation that it is no longer a part of the co-simulation and they
should proceed without it (if applicable). Generally this is done
when the co-simulation is complete and all federates end execution
at more or less the same wall-clock time.
:param fed: Federate to be destroyed
:return: (none)
'''
status = h.helicsFederateFinalize(fed)
h.helicsFederateFree(fed)
h.helicsCloseLibrary()
print("EVController: Federate finalized")
if __name__ == "__main__":
############## Registering federate from json ##########################
fed = h.helicsCreateMessageFederateFromConfig("EVControllerconfig.json")
federate_name = h.helicsFederateGetName(fed)
logging.info(f'Created federate {federate_name}')
#### Register endpoint #####
# Only one endpoint for the controller
endid = h.helicsFederateGetEndpointByIndex(fed, 0)
end_name = h.helicsEndpointGetName(endid)
logger.info("Registered Endpoint ---> {}".format(end_name))
############## Entering Execution Mode ##################################
h.helicsFederateEnterExecutingMode(fed)
logger.info('Entered HELICS execution mode')
hours = 24*7 # one week
total_interval = int(60 * 60 * hours)
grantedtime = -1
# It is common in HELICS for controllers to have slightly weird timing
# Generally, controllers only need to produce new control values when
# their inputs change. Because of this, it is common to have them
# request a time very far in the future (helics_time_maxtime) and
# when a signal arrives, they will be granted a time earlier than
# that, recalculate the control output and request a very late time
# again.
# There appears to be a bug related to maxtime in HELICS 2.4 that can
# can be avoided by using a slightly smaller version of maxtime
# (helics_time_maxtime is the largest time that HELICS can internally
# represent and is an approximation for a point in time very far in
# in the future).
fake_max_time = h.helics_time_maxtime/1000
starttime = fake_max_time
logger.debug(f'Requesting initial time {starttime}')
grantedtime = h.helicsFederateRequestTime (fed, starttime)
logger.debug(f'Granted time {grantedtime}')
t = grantedtime
time_sim = []
soc = {}
while t < total_interval:
# In HELICS, when multiple messages arrive at an endpoint they
# queue up and are popped off one-by-one with the
# "helicsEndpointHasMessage" API call. When that API doesn't
# return a message, you've processed them all.
while h.helicsEndpointHasMessage(endid):
# Get the SOC from the EV/charging terminal in question
msg = h.helicsEndpointGetMessageObject(endid)
currentsoc = h.helicsMessageGetString(msg)
source = h.helicsMessageGetOriginalSource(msg)
logger.debug(f'Received message from endpoint {source}'
f' at time {t}'
f' with SOC {currentsoc}')
# Send back charging command based on current SOC
# Our very basic protocol:
# If the SOC is less than soc_full keep charging (send "1")
# Otherwise, stop charging (send "0")
soc_full = 0.9
if float(currentsoc) <= soc_full:
instructions = 1
else:
instructions = 0
message = str(instructions)
h.helicsEndpointSendMessageRaw(endid, source, message)
logger.debug(f'Sent message to endpoint {source}'
f' at time {t}'
f' with payload {instructions}')
# Store SOC for later analysis/graphing
if source not in soc:
soc[source] = []
soc[source].append(float(currentsoc))
time_sim.append(t)
# Since we've dealt with all the messages that are queued, there's
# nothing else for the federate to do until/unless another
# message comes in. Request a time very far into the future
# and take a break until/unless a new message arrives.
logger.debug(f'Requesting time {fake_max_time}')
grantedtime = h.helicsFederateRequestTime (fed, fake_max_time)
logger.info(f'Granted time: {grantedtime}')
t = grantedtime
# Close out co-simulation execution cleanly now that we're done.
destroy_federate(fed)
# Printing out final results graphs for comparison/diagnostic purposes.
xaxis = np.array(time_sim)/3600
y = []
for key in soc:
y.append(np.array(soc[key]))
plt.figure()
fig, axs = plt.subplots(5, sharex=True, sharey=True)
fig.suptitle('SOC at each charging port')
axs[0].plot(xaxis, y[0], color='tab:blue', linestyle='-')
axs[0].set_yticks(np.arange(0,1.25,0.5))
axs[0].set(ylabel='EV1')
axs[0].grid(True)
axs[1].plot(xaxis, y[1], color='tab:blue', linestyle='-')
axs[1].set(ylabel='EV2')
axs[1].grid(True)
axs[2].plot(xaxis, y[2], color='tab:blue', linestyle='-')
axs[2].set(ylabel='EV3')
axs[2].grid(True)
axs[3].plot(xaxis, y[3], color='tab:blue', linestyle='-')
axs[3].set(ylabel='EV4')
axs[3].grid(True)
axs[4].plot(xaxis, y[4], color='tab:blue', linestyle='-')
axs[4].set(ylabel='EV5')
axs[4].grid(True)
plt.xlabel('time (hr)')
#for ax in axs():
# ax.label_outer()
plt.show()
| [
"trevor.hardy@pnnl.gov"
] | trevor.hardy@pnnl.gov |
bcc44dd356766361c249296518c49be6feaab444 | a077400e92e995c1b939c6d4a822e868bacea8ad | /plotting.py | a9ecf5e3dae77dc3cc688933b016e57ba7ad8563 | [] | no_license | aryatejas2/dashcam-footage-accident-prediction | 7ea3fa5f6e6be3f6253e473f4abab3b7d27a79c6 | 2341d8d2440de831ce7046211221a6786499ba28 | refs/heads/master | 2023-07-30T11:45:54.676536 | 2021-09-17T18:07:17 | 2021-09-17T18:07:17 | 407,627,847 | 0 | 0 | null | 2021-09-17T18:07:18 | 2021-09-17T17:32:31 | Python | UTF-8 | Python | false | false | 611 | py | """
@author: Tejas Arya (ta2763)
@author: Amritha Venkataramana (axv3602)
"""
import matplotlib.pyplot as plt
arr = [[0.847466766834259, 0.46666666865348816], [0.6447384357452393, 0.5333333611488342], [0.7249559164047241, 0.6000000238418579]]
loss = []
accuracy = []
for each in arr:
loss.append(each[0])
accuracy.append(each[1]*100)
y = [x for x in range(len(loss))]
plt.plot(y,loss)
plt.plot(y, loss, 'r.')
plt.xlabel('batches')
plt.ylabel('loss')
plt.show()
plt.close()
plt.xlabel('batches')
plt.ylabel('accuracy')
plt.plot(y, accuracy, 'r.')
plt.plot(y,accuracy)
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
f932861d369b33babe2d491bf2a81d7802552474 | 7e6be6d1e8d85795313410a739537ce0a2ce641a | /Mining-Demo/tweetgatherer.py | 326e5f6cc49a24071cc39867d964c9c7ad9ca757 | [] | no_license | ianiemeka14/data-mining-project | 4951379c4c97c7625c6cb4139724cc3dbbca7126 | 826a9004fb76bec34491303148f7cd1c55ffc201 | refs/heads/master | 2020-07-26T15:13:04.351917 | 2019-09-16T01:20:19 | 2019-09-16T01:20:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,397 | py | #Import the text pre-process function and a custom stream Listener, as well as API keys. and other needed files
from textpreprocess import preprocess
from streamlistener import MyListener
from Authorize import api
import tweepy
from tweepy import Stream
from tweepy.streaming import StreamListener
from Authorize import auth
import json
import termfrequencies
from termfrequencies import stop
import operator
from collections import Counter
# Time limit set to 20 seconds, if undefined defaults to 60
#tweetstream = tweepy.Stream(auth, MyListener(time_limit=20))
#tweetstream.filter(track=['#python'])
from collections import defaultdict
# remember to include the other import from the previous post
com = defaultdict(lambda : defaultdict(int))
fname='tweetstream.json'
# Open and analyze the Tweets we pulled.
with open(fname, 'r') as f:
line = f.readline()
while line:
tweet=json.loads(line)
line = f.readline()
tweet = json.loads(line)
terms_only = [term for term in preprocess(tweet['text'])
if term not in stop
and not term.startswith(('#', '@'))]
# Build co-occurrence matrix
for i in range(len(terms_only)-1):
for j in range(i+1, len(terms_only)):
w1, w2 = sorted([terms_only[i], terms_only[j]])
if w1 != w2:
com[w1][w2] += 1
com_max = []
# For each term, look for the most common co-occurrent terms
for t1 in com:
t1_max_terms = sorted(com[t1].items(), key=operator.itemgetter(1), reverse=True)[:5]
for t2, t2_count in t1_max_terms:
com_max.append(((t1, t2), t2_count))
# Get the most frequent co-occurrences
terms_max = sorted(com_max, key=operator.itemgetter(1), reverse=True)
print(terms_max[:5])
search_word = 'python' # pass a term as a command-line argument
count_search = Counter()
for line in f:
tweet = json.loads(line)
terms_only = [term for term in preprocess(tweet['text'])
if term not in stop
and not term.startswith(('#', '@'))]
if search_word in terms_only:
count_search.update(terms_only)
print("Co-occurrence for %s:" % search_word)
print(count_search.most_common(20)) | [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.