code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Generated with StiffnessType
#
from enum import Enum
from enum import auto
class StiffnessType(Enum):
""""""
LINEAR = auto()
NON_LINEAR = auto()
def label(self):
if self == StiffnessType.LINEAR:
return "Linear"
if self == StiffnessType.NON_LINEAR:
return "Non linear" | [
"enum.auto"
] | [((130, 136), 'enum.auto', 'auto', ([], {}), '()\n', (134, 136), False, 'from enum import auto\n'), ((154, 160), 'enum.auto', 'auto', ([], {}), '()\n', (158, 160), False, 'from enum import auto\n')] |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.views import View
from .forms import DismissNotificationForm
from .models import Notification
class DismissNotificationView(LoginRequiredMixin, View):
http_method_names = ['post', 'get']
def get(self, request, **kwargs):
return self.post(request, **kwargs)
def post(self, request, **kwargs):
form = DismissNotificationForm(kwargs)
if not form.is_valid():
# Notification.pk missing or invalid
return HttpResponse(status=422)
else:
try:
notification = Notification.objects.get(
pk=form.cleaned_data.get('pk'))
# the request.user must be the notify user, or an agent of the
# notify org.
if (
'user' in str(notification.notify_content_type)
and notification.notify_id != request.user.id
) or (
'organization' in str(notification.notify_content_type)
and notification.notify_id
not in [org.pk for org in request.user.agent_organizations.all()]
):
raise Notification.DoesNotExist()
notification.dismissed = True
notification.save()
except Notification.DoesNotExist:
raise Http404("Notification does not exist")
if request.GET.get('next'):
return redirect(request.GET.get('next'))
else:
return redirect(reverse('home'))
| [
"django.http.HttpResponse",
"django.http.Http404",
"django.urls.reverse"
] | [((650, 674), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(422)'}), '(status=422)\n', (662, 674), False, 'from django.http import Http404, HttpResponse\n'), ((1728, 1743), 'django.urls.reverse', 'reverse', (['"""home"""'], {}), "('home')\n", (1735, 1743), False, 'from django.urls import reverse\n'), ((1557, 1595), 'django.http.Http404', 'Http404', (['"""Notification does not exist"""'], {}), "('Notification does not exist')\n", (1564, 1595), False, 'from django.http import Http404, HttpResponse\n')] |
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import requests
import time
def get_corp_code():
url = "http://comp.fnguide.com/XML/Market/CompanyList.txt"
resp = requests.get(url)
resp.encoding = "utf-8-sig"
data = resp.json()
comp = data['Co']
df = pd.DataFrame(data=comp)
cond = df['gb'] == '701'
df2 = df[cond].copy()
return df2
def get_closing_accounts_day(code):
url = f"https://comp.fnguide.com/SVO2/ASP/SVD_main.asp?pGB=1&gicode=A{code}"
selector = "#compBody > div.section.ul_corpinfo > div.corp_group1 > p > span.stxt.stxt3"
resp = requests.get(url)
html = resp.text
soup = BeautifulSoup(html, "html5lib")
tags = soup.select(selector)
return tags[0].text
def get_gpa(code):
url = f"http://comp.fnguide.com/SVO2/ASP/SVD_Finance.asp?pGB=1&gicode=A{code}&cID=&MenuYn=Y&ReportGB=&NewMenuID=103&stkGb=701"
try:
dfs = pd.read_html(url)
# gross profit
df = dfs[0]
df2 = df.set_index(df.columns[0])
gp = df2.filter(regex="^2020").loc["매출총이익"].values[0]
gp = float(gp) * 100000000
# asset
df3 = dfs[2]
df4 = df3.set_index(df3.columns[0])
asset = df4.filter(regex="^2020").loc["자산"].values[0]
asset = float(asset) * 100000000
result = gp / asset
except:
result = np.nan
return result
def get_cap(code):
url = f"http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A{code}&cID=&MenuYn=Y&ReportGB=&NewMenuID=101&stkGb=701"
try:
dfs = pd.read_html(url)
cap = float(dfs[0].iloc[4, 1])
cap = cap * 100000000
except:
cap = np.nan
return cap
def get_pbr(code):
url = f"https://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A{code}"
try:
dfs = pd.read_html(url)
df5 = dfs[10]
df6 = df5.set_index(df5.columns[0])
pbr = df6["Annual"].filter(regex="^2020").loc["PBR"].values[0]
except:
pbr = 0
return pbr
if __name__ == "__main__":
df = get_corp_code()
columns = ['code', 'name', 'day', 'gp/a', 'pbr', 'cap']
data = []
for i in range(len(df)):
s = df.iloc[i]
acode = s['cd']
code = acode[1:]
name = s['nm']
day = get_closing_accounts_day(code)
gpa = get_gpa(code)
pbr = get_pbr(code)
cap = get_cap(code)
data.append((code, name, day, gpa, pbr, cap))
print(i, code, name, day, gpa, pbr, cap)
if (i+1) % 100 == 0:
df1 = pd.DataFrame(data=data, columns=columns)
df1.to_excel(f"./data/data_{i+1}.xlsx")
data = []
time.sleep(0.3)
# last companies
df1 = pd.DataFrame(data=data, columns=columns)
df1.to_excel(f"./data/data_last.xlsx") | [
"pandas.read_html",
"time.sleep",
"requests.get",
"bs4.BeautifulSoup",
"pandas.DataFrame"
] | [((194, 211), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (206, 211), False, 'import requests\n'), ((298, 321), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'comp'}), '(data=comp)\n', (310, 321), True, 'import pandas as pd\n'), ((615, 632), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (627, 632), False, 'import requests\n'), ((667, 698), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html5lib"""'], {}), "(html, 'html5lib')\n", (680, 698), False, 'from bs4 import BeautifulSoup\n'), ((2761, 2801), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'columns'}), '(data=data, columns=columns)\n', (2773, 2801), True, 'import pandas as pd\n'), ((933, 950), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (945, 950), True, 'import pandas as pd\n'), ((1575, 1592), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (1587, 1592), True, 'import pandas as pd\n'), ((1838, 1855), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (1850, 1855), True, 'import pandas as pd\n'), ((2701, 2716), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2711, 2716), False, 'import time\n'), ((2577, 2617), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'columns'}), '(data=data, columns=columns)\n', (2589, 2617), True, 'import pandas as pd\n')] |
import numpy as np
import argparse
from simple_algo_utils import *
if __name__=='__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,description=None)
parser.add_argument('--example_n', default=1, type=int, help=None)
parser.add_argument('--verbose', default=0, type=int, help=None)
args = vars(parser.parse_args())
example_n = args['example_n']
verbose = args['verbose']
AVAILABLE_EXAMPLES = [1,2]
SECTION_LABELS = [None,1,1]
PAGES = [None, 223, 225]
T_MAX_LIST = [None,3,9]
try:
print('Example from <NAME>\' textbook section 4.2.3.%s page %s'%(
str(SECTION_LABELS[example_n]),str(PAGES[example_n])))
print('example_n : %s'%(str(example_n)))
except:
print('EXAMPLE NOT FOUND.')
print('The available example numbers for --example_n are %s'%(str(AVAILABLE_EXAMPLES)))
exit()
print("==== SETTOPOLOGY ====")
Nj = 4 # no. of neighbors
J = np.array([ [1,0], [0,1], [-1,0], [0,-1]]) # (X,Y) coordinates of neighbors
assert(J.shape[0]==Nj)
print(J, '\n')
print("==== SETBSG ====")
# the group used was \mathbb{Z}_4
if example_n in [1]:
L = 10
elif example_n in [2]:
L = 20
nBSG = 4
BSG = setBSG(nBSG, Nj)
print('BSG:\n',BSG,'\n')
print("==== SETG0 ====")
age = np.zeros(shape=(L,L))
if example_n in [1]:
G0 = np.array([[0,0,0,0],[3,3,3,3],[0,0,3,3],[0,0,0,0]])
elif example_n in [2]:
G0 = [[0,0,0,0], [2,0,0,0],]
for i in range(3,1+9): G0.append([i,0,i,0])
G0.append([0,0,10,0])
G0 = np.array(G0)
nG0 = G0.shape[0]-1 # no of generators, excluding emtpy generator
alpha = range(nG0+1)
print('G0:\n',G0,'\n')
print('alpha:\n',alpha,'\n')
print("==== SETEXTG0 =====")
GE = setEXTG0(nBSG, G0, Nj, BSG)
print('GE:\n',GE,'\n')
print("==== SETCINIT ====")
CE = np.ones(shape=(L,L))
if example_n in [1]:
CE[4,4] = 5
elif example_n in [2]:
CE[10,4:9] = 37
print('CE:\n',CE.astype(int),'\n')
print('==== DEVELOP ===')
T_MAX = T_MAX_LIST[example_n]
P = 1
if example_n in [1,2]:
split_mode = None
if example_n==2: split_mode = 'split2'
CE, age = develop(T_MAX, Nj, nBSG, L, J, GE, CE, age, P, verbose=verbose,
growth_mode='growth3', split_mode=split_mode)
if example_n == 2:
print('\nAPPLY SURGERY HERE')
CE[2:7,4:9] = 1
CE[7,4:9] = 21
# print(CE.astype(int))
print_in_alphabet(CE, nBSG, ALPH=None)
CE, age = develop(2, Nj, nBSG, L, J, GE, CE, age, P, verbose=verbose,
growth_mode='growth3', split_mode=split_mode)
print('\nAPPLY MORE SURGERY HERE')
CE[4:6,4:9] = 1
CE[9:12,4:9] = 1
CE[8,4:9] = 29
# print(CE.astype(int))
print_in_alphabet(CE, nBSG, ALPH=None)
CE, age = develop(2, Nj, nBSG, L, J, GE, CE, age, P, verbose=verbose,
growth_mode='growth3', split_mode=split_mode)
print('\nLAST PART')
CE = np.ones(shape=(20,20))
CE[4,4:10] = [13,13,14,13,13,13]
CE[5,4:10] = 19
CE[6,4:10] = 21
CE[7,4:10] = 27
CE[8,4:10] = 29
CE[6,8] = 29
CE[8,5] = 13
print_in_alphabet(CE, nBSG, ALPH=None)
print(CE.astype(int))
CE, age = develop(1, Nj, nBSG, L, J, GE, CE, age, P, verbose=verbose,
growth_mode='growth3', split_mode=split_mode)
| [
"numpy.array",
"numpy.zeros",
"numpy.ones",
"argparse.ArgumentParser"
] | [((107, 207), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'description': 'None'}), '(formatter_class=argparse.\n RawDescriptionHelpFormatter, description=None)\n', (130, 207), False, 'import argparse\n'), ((1010, 1054), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [-1, 0], [0, -1]]'], {}), '([[1, 0], [0, 1], [-1, 0], [0, -1]])\n', (1018, 1054), True, 'import numpy as np\n'), ((1394, 1416), 'numpy.zeros', 'np.zeros', ([], {'shape': '(L, L)'}), '(shape=(L, L))\n', (1402, 1416), True, 'import numpy as np\n'), ((1976, 1997), 'numpy.ones', 'np.ones', ([], {'shape': '(L, L)'}), '(shape=(L, L))\n', (1983, 1997), True, 'import numpy as np\n'), ((1454, 1520), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [3, 3, 3, 3], [0, 0, 3, 3], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [3, 3, 3, 3], [0, 0, 3, 3], [0, 0, 0, 0]])\n', (1462, 1520), True, 'import numpy as np\n'), ((1665, 1677), 'numpy.array', 'np.array', (['G0'], {}), '(G0)\n', (1673, 1677), True, 'import numpy as np\n'), ((3220, 3243), 'numpy.ones', 'np.ones', ([], {'shape': '(20, 20)'}), '(shape=(20, 20))\n', (3227, 3243), True, 'import numpy as np\n')] |
from torchtext.data import Filed, TabularDataset, BucketIterator
def tokenize(x): return x.split()
quote = Field(sequential=True, use_vocab=True, tokenize=tokenize, lower=True)
score = Field(sequential=False, use_vocab=False)
fields = {
'quote': ('q', quote),
'score': ('s', score)
}
train_data, test_data = TabularDataset.splits(
path='mydata',
train='train.json',
test='test.json',
format='json',
fields=fields
)
quote.build_vocab(train_data,
max_size=10000,
min_freq=1)
train_iterator, test_iterator = BucketIterator.splits(
(train_data,test_data),
batch_size=2,
device='cuda'
)
for batch in train_iterator:
print(batch.q)
print(batch.s)
| [
"torchtext.data.BucketIterator.splits",
"torchtext.data.TabularDataset.splits"
] | [((323, 431), 'torchtext.data.TabularDataset.splits', 'TabularDataset.splits', ([], {'path': '"""mydata"""', 'train': '"""train.json"""', 'test': '"""test.json"""', 'format': '"""json"""', 'fields': 'fields'}), "(path='mydata', train='train.json', test='test.json',\n format='json', fields=fields)\n", (344, 431), False, 'from torchtext.data import Filed, TabularDataset, BucketIterator\n'), ((579, 654), 'torchtext.data.BucketIterator.splits', 'BucketIterator.splits', (['(train_data, test_data)'], {'batch_size': '(2)', 'device': '"""cuda"""'}), "((train_data, test_data), batch_size=2, device='cuda')\n", (600, 654), False, 'from torchtext.data import Filed, TabularDataset, BucketIterator\n')] |
# Seenbot module.
from datetime import datetime
import json
from michiru import db, personalities
from michiru.modules import command, hook
_ = personalities.localize
## Module information.
__name__ = 'seenbot'
__author__ = 'Shiz'
__license__ = 'WTFPL'
__desc__ = 'Tells when someone was last seen.'
## Database stuff and constants.
db.table('seen', {
'id': db.ID,
'server': (db.STRING, db.INDEX),
'nickname': (db.STRING, db.INDEX),
'action': db.INT,
'data': db.STRING,
'time': db.DATETIME
})
# The action values. Fake enum.
class Actions:
JOIN = 0x1
PART = 0x2
QUIT = 0x3
KICK = 0x4
KICKED = 0x5
NICKCHANGE = 0x6
NICKCHANGED = 0x7
MESSAGE = 0x8
NOTICE = 0x9
TOPICCHANGE = 0x10
CTCP = 0x11
## Utility functions.
def timespan(date, current=None, reach=2):
""" Calculate human readable timespan. """
if current is None:
current = datetime.now()
timespans = [
('millennium', 'millennia', 60*60*24*365*1000),
('century', 'centuries', 60*60*24*365*100),
('decennium', 'decennia', 60*60*24*365*10),
('year', 'years', 60*60*24*365),
('month', 'months', 60*60*24*30),
('week', 'weeks', 60*60*24*7),
('day', 'days', 60*60*24),
('hour', 'hours', 60*60),
('minute', 'minutes', 60),
('second', 'seconds', 1)
]
message = None
reachstart = None
delta = int((current - date).total_seconds())
for i, (singular, plural, seconds) in enumerate(timespans):
# Is our time at least one 'unit' worth of this span?
if delta >= seconds:
# Get the number of units it's worth, and the remainder.
n, delta = divmod(delta, seconds)
# Append to message.
if message is not None:
message += ', '
else:
reachstart = i
message = ''
message += '{n} {noun}'.format(n=n, noun=plural if n >= 2 else singular)
# Stop if we reached our precision limit.
if reachstart is not None and reach is not None and i - reachstart + 1 >= reach:
break
if message is None:
message = 'just now'
else:
message += ' ago'
return message
def log(server, nick, what, **data):
""" Remove earlier entries for `nick` from database and insert new log entry. """
db.from_('seen').where('nickname', nick.lower()).and_('server', server).delete()
db.to('seen').add({
'server': server,
'nickname': nick.lower(),
'action': what,
'data': json.dumps(data),
'time': datetime.now()
})
def meify(bot, nick):
if bot.nickname == nick:
return 'me'
return bot.highlight(nick)
## Commands and hooks.
@command(r'seen (\S+)$')
@command(r'have you seen (\S+)(?: lately)?\??$')
def seen(bot, server, target, source, message, parsed, private, admin):
nick = parsed.group(1)
# Weed out the odd cases.
if nick == source:
yield from bot.message(target, _(bot, 'Asking for yourself?', serv=server, nick=nick))
return
elif nick == bot.nickname:
yield from bot.message(target, _(bot, "I'm right here.", serv=server, nick=nick))
return
# Do we have an entry for this nick?
entry = db.from_('seen').where('nickname', nick.lower()).and_('server', server).single('action', 'data', 'time')
if not entry:
yield from bot.message(target, _(bot, "I don't know who {nick} is.", serv=server, nick=meify(bot, nick)))
return
message = 'I saw {nick} {timeago}, {action}'
submessage = None
action, raw_data, raw_time = entry
data = json.loads(raw_data)
time = datetime.strptime(raw_time, db.DATETIME_FORMAT)
# Huge if/else chain incoming.
if action == Actions.JOIN:
submessage = _(bot, 'joining {chan}.', serv=server, **data)
elif action == Actions.PART:
submessage = _(bot, 'leaving {chan}, with reason "{reason}".', serv=server, **data)
elif action == Actions.QUIT:
submessage = _(bot, 'disconnecting with reason "{reason}".', serv=server, **data)
elif action == Actions.KICK:
submessage = _(bot, 'kicking {target} from {chan} with reason "{reason}".', serv=server, **data)
elif action == Actions.KICKED:
submessage = _(bot, 'getting kicked from {chan} by {kicker} with reason "{reason}".', serv=server, **data)
elif action == Actions.NICKCHANGE:
submessage = _(bot, 'changing nickname to {newnick}.', serv=server, **data)
elif action == Actions.NICKCHANGED:
submessage = _(bot, 'changing nickname from {oldnick}.', serv=server, **data)
elif action == Actions.MESSAGE:
submessage = _(bot, 'telling {chan} "<{nick}> {message}".', serv=server, nick=nick, **data)
elif action == Actions.NOTICE:
submessage = _(bot, 'noticing {chan} "*{nick}* {message}".', serv=server, nick=nick **data)
elif action == Actions.TOPICCHANGE:
submessage = _(bot, 'changing topic for {chan} to "{topic}".', serv=server, **data)
elif action == Actions.CTCP:
submessage = _(bot, 'CTCPing {target} ({message}).', serv=server, **data)
else:
submessage = _(bot, 'doing something.', serv=server, **data)
message = _(bot, message, action=submessage, nick=meify(bot, nick), serv=server, rawtime=time, timeago=timespan(time))
yield from bot.message(target, message)
@hook('chat.join')
def join(bot, server, channel, who):
log(server, who, Actions.JOIN, chan=channel)
@hook('chat.part')
def part(bot, server, channel, who, reason):
log(server, who, Actions.PART, chan=channel, reason=reason)
@hook('chat.disconnect')
def quit(bot, server, who, reason):
log(server, who, Actions.QUIT, reason=reason)
@hook('chat.kick')
def kick(bot, server, channel, target, by, reason):
log(server, by, Actions.KICK, chan=channel, target=meify(bot, target), reason=reason)
log(server, target, Actions.KICKED, chan=channel, kicker=meify(bot, by), reason=reason)
@hook('chat.nickchange')
def nickchange(bot, server, who, to):
log(server, who, Actions.NICKCHANGE, newnick=to)
log(server, to, Actions.NICKCHANGED, oldnick=who)
@hook('chat.message')
def message(bot, server, target, who, message, private, admin):
if not private:
log(server, who, Actions.MESSAGE, chan=target, message=message)
@hook('chat.notice')
def notice(bot, server, target, who, message, private, admin):
if not private:
log(server, who, Actions.NOTICE, chan=target, message=message)
@hook('chat.topicchange')
def topicchange(bot, server, channel, who, topic):
log(server, who, Actions.TOPICCHANGE, chan=channel, topic=topic)
@hook('irc.ctcp')
def ctcp(bot, server, target, who, message):
log(server, who, Actions.CTCP, target=meify(bot, target), message=message)
## Boilerplate.
def load():
return True
def unload():
pass
| [
"json.loads",
"michiru.modules.hook",
"datetime.datetime.strptime",
"michiru.db.from_",
"michiru.db.table",
"json.dumps",
"datetime.datetime.now",
"michiru.modules.command",
"michiru.db.to"
] | [((339, 505), 'michiru.db.table', 'db.table', (['"""seen"""', "{'id': db.ID, 'server': (db.STRING, db.INDEX), 'nickname': (db.STRING, db.\n INDEX), 'action': db.INT, 'data': db.STRING, 'time': db.DATETIME}"], {}), "('seen', {'id': db.ID, 'server': (db.STRING, db.INDEX), 'nickname':\n (db.STRING, db.INDEX), 'action': db.INT, 'data': db.STRING, 'time': db.\n DATETIME})\n", (347, 505), False, 'from michiru import db, personalities\n'), ((2893, 2916), 'michiru.modules.command', 'command', (['"""seen (\\\\S+)$"""'], {}), "('seen (\\\\S+)$')\n", (2900, 2916), False, 'from michiru.modules import command, hook\n'), ((2918, 2966), 'michiru.modules.command', 'command', (['"""have you seen (\\\\S+)(?: lately)?\\\\??$"""'], {}), "('have you seen (\\\\S+)(?: lately)?\\\\??$')\n", (2925, 2966), False, 'from michiru.modules import command, hook\n'), ((5561, 5578), 'michiru.modules.hook', 'hook', (['"""chat.join"""'], {}), "('chat.join')\n", (5565, 5578), False, 'from michiru.modules import command, hook\n'), ((5667, 5684), 'michiru.modules.hook', 'hook', (['"""chat.part"""'], {}), "('chat.part')\n", (5671, 5684), False, 'from michiru.modules import command, hook\n'), ((5796, 5819), 'michiru.modules.hook', 'hook', (['"""chat.disconnect"""'], {}), "('chat.disconnect')\n", (5800, 5819), False, 'from michiru.modules import command, hook\n'), ((5908, 5925), 'michiru.modules.hook', 'hook', (['"""chat.kick"""'], {}), "('chat.kick')\n", (5912, 5925), False, 'from michiru.modules import command, hook\n'), ((6162, 6185), 'michiru.modules.hook', 'hook', (['"""chat.nickchange"""'], {}), "('chat.nickchange')\n", (6166, 6185), False, 'from michiru.modules import command, hook\n'), ((6333, 6353), 'michiru.modules.hook', 'hook', (['"""chat.message"""'], {}), "('chat.message')\n", (6337, 6353), False, 'from michiru.modules import command, hook\n'), ((6512, 6531), 'michiru.modules.hook', 'hook', (['"""chat.notice"""'], {}), "('chat.notice')\n", (6516, 6531), False, 'from michiru.modules import command, hook\n'), ((6688, 6712), 'michiru.modules.hook', 'hook', (['"""chat.topicchange"""'], {}), "('chat.topicchange')\n", (6692, 6712), False, 'from michiru.modules import command, hook\n'), ((6835, 6851), 'michiru.modules.hook', 'hook', (['"""irc.ctcp"""'], {}), "('irc.ctcp')\n", (6839, 6851), False, 'from michiru.modules import command, hook\n'), ((3794, 3814), 'json.loads', 'json.loads', (['raw_data'], {}), '(raw_data)\n', (3804, 3814), False, 'import json\n'), ((3826, 3873), 'datetime.datetime.strptime', 'datetime.strptime', (['raw_time', 'db.DATETIME_FORMAT'], {}), '(raw_time, db.DATETIME_FORMAT)\n', (3843, 3873), False, 'from datetime import datetime\n'), ((922, 936), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (934, 936), False, 'from datetime import datetime\n'), ((2587, 2600), 'michiru.db.to', 'db.to', (['"""seen"""'], {}), "('seen')\n", (2592, 2600), False, 'from michiru import db, personalities\n'), ((2707, 2723), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2717, 2723), False, 'import json\n'), ((2741, 2755), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2753, 2755), False, 'from datetime import datetime\n'), ((2501, 2517), 'michiru.db.from_', 'db.from_', (['"""seen"""'], {}), "('seen')\n", (2509, 2517), False, 'from michiru import db, personalities\n'), ((3419, 3435), 'michiru.db.from_', 'db.from_', (['"""seen"""'], {}), "('seen')\n", (3427, 3435), False, 'from michiru import db, personalities\n')] |
from soccerpy.modules.Fixture.base_fixture import BaseFixture
from soccerpy.modules.Fundamentals.fixtures import Fixture
from soccerpy.modules.Fundamentals.head2head import Head2Head
class FixturesSpecific(BaseFixture):
def __init__(self, data, headers, request):
super().__init__(headers, request)
self.fixture = Fixture(data['fixture'], self.r)
self.head2head = Head2Head(data['head2head'], self.r)
| [
"soccerpy.modules.Fundamentals.fixtures.Fixture",
"soccerpy.modules.Fundamentals.head2head.Head2Head"
] | [((336, 368), 'soccerpy.modules.Fundamentals.fixtures.Fixture', 'Fixture', (["data['fixture']", 'self.r'], {}), "(data['fixture'], self.r)\n", (343, 368), False, 'from soccerpy.modules.Fundamentals.fixtures import Fixture\n'), ((394, 430), 'soccerpy.modules.Fundamentals.head2head.Head2Head', 'Head2Head', (["data['head2head']", 'self.r'], {}), "(data['head2head'], self.r)\n", (403, 430), False, 'from soccerpy.modules.Fundamentals.head2head import Head2Head\n')] |
#!/usr/bin/env python2.7
# encoding: utf8
import os
import sys
import time
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
sys.path.append(os.path.realpath(__file__ + '/..'))
import udf
from abstract_performance_test import AbstractPerformanceTest
class SetEmitStartOnlyRPeformanceTest(AbstractPerformanceTest):
def setUp(self):
self.create_schema()
self.generate_data_linear(500)
self.query(udf.fixindent('''
CREATE R SET SCRIPT START_ONLY(
intVal INT) EMITS (count_value INT) AS
run <- function(ctx){
}
'''))
self.query("commit")
def tearDown(self):
self.cleanup(self.schema)
def test_consume_next(self):
self.run_test(1000, 3, 2.0, "SELECT START_ONLY(1)")
if __name__ == '__main__':
udf.main()
# vim: ts=4:sts=4:sw=4:et:fdm=indent
| [
"os.path.realpath",
"udf.main",
"udf.fixindent"
] | [((93, 137), 'os.path.realpath', 'os.path.realpath', (["(__file__ + '/../../../lib')"], {}), "(__file__ + '/../../../lib')\n", (109, 137), False, 'import os\n'), ((155, 189), 'os.path.realpath', 'os.path.realpath', (["(__file__ + '/..')"], {}), "(__file__ + '/..')\n", (171, 189), False, 'import os\n'), ((861, 871), 'udf.main', 'udf.main', ([], {}), '()\n', (869, 871), False, 'import udf\n'), ((440, 651), 'udf.fixindent', 'udf.fixindent', (['"""\n CREATE R SET SCRIPT START_ONLY(\n intVal INT) EMITS (count_value INT) AS\n run <- function(ctx){\n }\n """'], {}), '(\n """\n CREATE R SET SCRIPT START_ONLY(\n intVal INT) EMITS (count_value INT) AS\n run <- function(ctx){\n }\n """\n )\n', (453, 651), False, 'import udf\n')] |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "http://xsdtesting"
@dataclass
class A:
a: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://xsdtesting",
}
)
@dataclass
class B:
b: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://xsdtesting",
}
)
@dataclass
class Base:
"""
documentation documentation.
"""
class Meta:
name = "base"
e1: Optional[A] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://xsdtesting",
"required": True,
}
)
e2: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://xsdtesting",
}
)
@dataclass
class Doc(Base):
class Meta:
name = "doc"
namespace = "http://xsdtesting"
| [
"dataclasses.field"
] | [((154, 241), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': 'http://xsdtesting'}"}), "(default=None, metadata={'type': 'Element', 'namespace':\n 'http://xsdtesting'})\n", (159, 241), False, 'from dataclasses import dataclass, field\n'), ((343, 430), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': 'http://xsdtesting'}"}), "(default=None, metadata={'type': 'Element', 'namespace':\n 'http://xsdtesting'})\n", (348, 430), False, 'from dataclasses import dataclass, field\n'), ((619, 724), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': 'http://xsdtesting', 'required': True}"}), "(default=None, metadata={'type': 'Element', 'namespace':\n 'http://xsdtesting', 'required': True})\n", (624, 724), False, 'from dataclasses import dataclass, field\n'), ((817, 904), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element', 'namespace': 'http://xsdtesting'}"}), "(default=None, metadata={'type': 'Element', 'namespace':\n 'http://xsdtesting'})\n", (822, 904), False, 'from dataclasses import dataclass, field\n')] |
import json
import os
import sys
sys.path.append(".") # Assume script run in project root directory
from multiprocessing import Process, Queue
import argparse
from ai2thor.controller import BFSController
from datasets.offline_sscontroller import SSController
def parse_arguments():
parser = argparse.ArgumentParser(description="scrape all possible images from ai2thor scene")
parser.add_argument(
"--out_dir",
type=str,
default='/home/chenjunting/Robothor_data',
help="path to store scraped images",
)
parser.add_argument(
"--num_process",
type=int,
default=12,
help="number of processes launched to scrape images parallelly",
)
parser.add_argument(
"--scenes",
type=str,
default=None,
help="specify scenes to scrape, in the format of 'scene1,scene2,...'"
)
parser.add_argument("--state_decimal", type=int, default=3, help="decimal of key in state data: e.g. images.hdf5")
args = parser.parse_args()
return args
def search_and_save(in_queue, out_dir, rank, gpus=[0,1,2,3]):
gpu_id = gpus[rank % len(gpus)]
os.system("export CUDA_VISIBLE_DEVICES={}".format(gpu_id))
while not in_queue.empty():
try:
scene_name = in_queue.get(timeout=3)
except:
return
c = None
# try:
sub_out_dir = os.path.join(out_dir, scene_name)
if not os.path.exists(sub_out_dir):
os.mkdir(sub_out_dir)
print('starting:', scene_name)
c = SSController(
grid_size=0.125,
grid_file=os.path.join(sub_out_dir, 'grid.json'),
graph_file=os.path.join(sub_out_dir, 'graph.json'),
metadata_file=os.path.join(sub_out_dir, 'metadata.json'),
images_file=os.path.join(sub_out_dir, 'images.hdf5'),
# depth_file=os.path.join(sub_out_dir, 'depth.hdf5'), # no depth data allowed in robothor-challenge
grid_assumption=False,
rotate_by=30,
state_decimal=3,
ai2thor_args={
'start_unity':True,
'width':640,
'height':480,
'agentMode':'bot',
'gridSize':0.125,
})
# c.start()
c.search_all_closed(scene_name)
c.stop()
# except AssertionError as e:
# print('Error is', e)
# print('Error in scene {}'.format(scene_name))
# if c is not None:
# c.stop()
# continue
def main():
args = parse_arguments()
out_dir = args.out_dir
num_processes = args.num_process
queue = Queue()
if args.scenes:
scenes = args.scenes.split(',')
else: # all scenes in robothor
scenes = ["FloorPlan_Train{}_{}".format(i, j) for i in range(1,13) for j in range(1,6)]
for scene in scenes:
queue.put(scene)
processes = []
for i in range(num_processes):
p = Process(target=search_and_save, args=(queue, out_dir, i))
p.start()
processes.append(p)
for p in processes:
p.join()
if __name__ == "__main__":
main() | [
"os.path.exists",
"argparse.ArgumentParser",
"multiprocessing.Process",
"os.path.join",
"os.mkdir",
"multiprocessing.Queue",
"sys.path.append"
] | [((33, 53), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (48, 53), False, 'import sys\n'), ((297, 386), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""scrape all possible images from ai2thor scene"""'}), "(description=\n 'scrape all possible images from ai2thor scene')\n", (320, 386), False, 'import argparse\n'), ((2692, 2699), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (2697, 2699), False, 'from multiprocessing import Process, Queue\n'), ((1401, 1434), 'os.path.join', 'os.path.join', (['out_dir', 'scene_name'], {}), '(out_dir, scene_name)\n', (1413, 1434), False, 'import os\n'), ((3008, 3065), 'multiprocessing.Process', 'Process', ([], {'target': 'search_and_save', 'args': '(queue, out_dir, i)'}), '(target=search_and_save, args=(queue, out_dir, i))\n', (3015, 3065), False, 'from multiprocessing import Process, Queue\n'), ((1450, 1477), 'os.path.exists', 'os.path.exists', (['sub_out_dir'], {}), '(sub_out_dir)\n', (1464, 1477), False, 'import os\n'), ((1491, 1512), 'os.mkdir', 'os.mkdir', (['sub_out_dir'], {}), '(sub_out_dir)\n', (1499, 1512), False, 'import os\n'), ((1631, 1669), 'os.path.join', 'os.path.join', (['sub_out_dir', '"""grid.json"""'], {}), "(sub_out_dir, 'grid.json')\n", (1643, 1669), False, 'import os\n'), ((1694, 1733), 'os.path.join', 'os.path.join', (['sub_out_dir', '"""graph.json"""'], {}), "(sub_out_dir, 'graph.json')\n", (1706, 1733), False, 'import os\n'), ((1761, 1803), 'os.path.join', 'os.path.join', (['sub_out_dir', '"""metadata.json"""'], {}), "(sub_out_dir, 'metadata.json')\n", (1773, 1803), False, 'import os\n'), ((1829, 1869), 'os.path.join', 'os.path.join', (['sub_out_dir', '"""images.hdf5"""'], {}), "(sub_out_dir, 'images.hdf5')\n", (1841, 1869), False, 'import os\n')] |
import unittest
class TestCodeString(unittest.TestCase):
def test___new__(self):
# code_string = CodeString(string, uncomplete, imports)
assert False # TODO: implement your test here
class TestCombineTwoCodeStrings(unittest.TestCase):
def test_combine_two_code_strings(self):
# self.assertEqual(expected, combine_two_code_strings(template, cs1, cs2))
assert False # TODO: implement your test here
class TestCombineStringAndCodeString(unittest.TestCase):
def test_combine_string_and_code_string(self):
# self.assertEqual(expected, combine_string_and_code_string(template, s, cs))
assert False # TODO: implement your test here
class TestCombineCodeStringAndString(unittest.TestCase):
def test_combine_code_string_and_string(self):
# self.assertEqual(expected, combine_code_string_and_string(template, cs, s))
assert False # TODO: implement your test here
class TestCombine(unittest.TestCase):
def test_combine(self):
# self.assertEqual(expected, combine(cs1, cs2, template))
assert False # TODO: implement your test here
class TestJoin(unittest.TestCase):
def test_join(self):
# self.assertEqual(expected, join(char, code_strings))
assert False # TODO: implement your test here
class TestPutinto(unittest.TestCase):
def test_putinto(self):
# self.assertEqual(expected, putinto(cs, template, imports))
assert False # TODO: implement your test here
class TestAddimport(unittest.TestCase):
def test_addimport(self):
# self.assertEqual(expected, addimport(cs, imp))
assert False # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((1715, 1730), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1728, 1730), False, 'import unittest\n')] |
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Create the cachedb configuration which consists of
* An cachmanager server to run three daemons for
* cache-write
* cache-miss
* cache-delayed write
* Lambdas
* SNS topics
* SQS queues
This will most likely be merged into production once it is finished.
"""
from lib.cloudformation import CloudFormationConfiguration, Arg, Ref
from lib.userdata import UserData
from lib.names import AWSNames
from lib.external import ExternalCalls
from lib import aws
from lib import scalyr
from lib import constants as const
from update_lambda_fcn import load_lambdas_on_s3
import boto3
def create_config(session, domain, keypair=None, user_data=None):
"""
Create the CloudFormationConfiguration object.
Args:
session: amazon session object
domain: domain of the stack being created
keypair: keypair used to by instances being created
user_data (UserData): information used by the endpoint instance and vault. Data will be run through the CloudFormation Fn::Join template intrinsic function so other template intrinsic functions used in the user_data will be parsed and executed.
Returns: the config for the Cloud Formation stack
"""
# Prepare user data for parsing by CloudFormation.
if user_data is not None:
parsed_user_data = { "Fn::Join" : ["", user_data.format_for_cloudformation()]}
else:
parsed_user_data = user_data
names = AWSNames(domain)
config = CloudFormationConfiguration("cachedb", domain, const.REGION)
vpc_id = config.find_vpc(session)
# Create several subnets for all the lambdas to use.
lambda_azs = aws.azs_lookup(session, lambda_compatible_only=True)
internal_route_table_id = aws.rt_lookup(session, vpc_id, names.internal)
print("AZs for lambda: " + str(lambda_azs))
lambda_subnets = []
for i in range(const.LAMBDA_SUBNETS):
key = 'LambdaSubnet{}'.format(i)
lambda_subnets.append(Ref(key))
config.add_subnet(key, names.subnet('lambda{}'.format(i)), az=lambda_azs[i % len(lambda_azs)][0])
config.add_route_table_association(key + "RTA",
internal_route_table_id,
Ref(key))
# Lookup the External Subnet, Internal Security Group IDs that are
# needed by other resources
internal_subnet_id = aws.subnet_id_lookup(session, names.subnet("internal"))
config.add_arg(Arg.Subnet("InternalSubnet",
internal_subnet_id,
"ID of Internal Subnet to create resources in"))
internal_sg_id = aws.sg_lookup(session, vpc_id, names.internal)
config.add_arg(Arg.SecurityGroup("InternalSecurityGroup",
internal_sg_id,
"ID of internal Security Group"))
role = aws.role_arn_lookup(session, "lambda_cache_execution")
config.add_arg(Arg.String("LambdaCacheExecutionRole", role,
"IAM role for multilambda." + domain))
index_bucket_name = names.cuboid_bucket
if not aws.s3_bucket_exists(session, index_bucket_name):
config.add_s3_bucket("cuboidBucket", index_bucket_name)
config.add_s3_bucket_policy(
"cuboidBucketPolicy", index_bucket_name,
['s3:GetObject', 's3:PutObject'],
{ 'AWS': role})
delete_bucket_name = names.delete_bucket
if not aws.s3_bucket_exists(session, delete_bucket_name):
config.add_s3_bucket("deleteBucket", delete_bucket_name)
config.add_s3_bucket_policy(
"deleteBucketPolicy", delete_bucket_name,
['s3:GetObject', 's3:PutObject'],
{ 'AWS': role})
creating_tile_bucket = False
tile_bucket_name = names.tile_bucket
if not aws.s3_bucket_exists(session, tile_bucket_name):
creating_tile_bucket = True
config.add_s3_bucket("tileBucket", tile_bucket_name)
config.add_s3_bucket_policy(
"tileBucketPolicy", tile_bucket_name,
['s3:GetObject', 's3:PutObject'],
{ 'AWS': role})
ingest_bucket_name = names.ingest_bucket
if not aws.s3_bucket_exists(session, ingest_bucket_name):
config.add_s3_bucket("ingestBucket", ingest_bucket_name)
config.add_s3_bucket_policy(
"ingestBucketPolicy", ingest_bucket_name,
['s3:GetObject', 's3:PutObject'],
{ 'AWS': role})
config.add_ec2_instance("CacheManager",
names.cache_manager,
aws.ami_lookup(session, "cachemanager.boss"),
keypair,
subnet=Ref("InternalSubnet"),
public_ip=False,
type_=const.CACHE_MANAGER_TYPE,
security_groups=[Ref("InternalSecurityGroup")],
user_data=parsed_user_data,
role="cachemanager")
lambda_bucket = aws.get_lambda_s3_bucket(session)
config.add_lambda("MultiLambda",
names.multi_lambda,
Ref("LambdaCacheExecutionRole"),
s3=(aws.get_lambda_s3_bucket(session),
"multilambda.{}.zip".format(domain),
"lambda_loader.handler"),
timeout=120,
memory=1024,
security_groups=[Ref('InternalSecurityGroup')],
subnets=lambda_subnets,
runtime='python3.6')
if creating_tile_bucket:
config.add_lambda_permission(
'tileBucketInvokeMultiLambda', names.multi_lambda,
principal='s3.amazonaws.com', source={
'Fn::Join': [':', ['arn', 'aws', 's3', '', '', tile_bucket_name]]}, #DP TODO: move into constants
depends_on=['tileBucket', 'MultiLambda']
)
else:
config.add_lambda_permission(
'tileBucketInvokeMultiLambda', names.multi_lambda,
principal='s3.amazonaws.com', source={
'Fn::Join': [':', ['arn', 'aws', 's3', '', '', tile_bucket_name]]},
depends_on='MultiLambda'
)
# Add topic to indicating that the object store has been write locked.
# Now using "production mailing list" instead of separate write lock topic.
#config.add_sns_topic('WriteLock',
# names.write_lock_topic,
# names.write_lock,
# []) # TODO: add subscribers
return config
def generate(session, domain):
"""Create the configuration and save it to disk"""
config = create_config(session, domain)
config.generate()
def create(session, domain):
"""Create the configuration, and launch it"""
names = AWSNames(domain)
user_data = UserData()
user_data["system"]["fqdn"] = names.cache_manager
user_data["system"]["type"] = "cachemanager"
user_data["aws"]["cache"] = names.cache
user_data["aws"]["cache-state"] = names.cache_state
user_data["aws"]["cache-db"] = "0"
user_data["aws"]["cache-state-db"] = "0"
user_data["aws"]["s3-flush-queue"] = aws.sqs_lookup_url(session, names.s3flush_queue)
user_data["aws"]["s3-flush-deadletter-queue"] = aws.sqs_lookup_url(session, names.deadletter_queue)
user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket
user_data["aws"]["ingest_bucket"] = names.ingest_bucket
user_data["aws"]["s3-index-table"] = names.s3_index
user_data["aws"]["id-index-table"] = names.id_index
user_data["aws"]["id-count-table"] = names.id_count_index
#user_data["aws"]["sns-write-locked"] = str(Ref('WriteLock'))
mailing_list_arn = aws.sns_topic_lookup(session, const.PRODUCTION_MAILING_LIST)
if mailing_list_arn is None:
msg = "MailingList {} needs to be created before running config".format(const.PRODUCTION_MAILING_LIST)
raise Exception(msg)
user_data["aws"]["sns-write-locked"] = mailing_list_arn
user_data["lambda"]["flush_function"] = names.multi_lambda
user_data["lambda"]["page_in_function"] = names.multi_lambda
keypair = aws.keypair_lookup(session)
try:
pre_init(session, domain)
config = create_config(session, domain, keypair, user_data)
success = config.create(session)
if not success:
raise Exception("Create Failed")
else:
post_init(session, domain)
except:
# DP NOTE: This will catch errors from pre_init, create, and post_init
print("Error detected")
raise
def pre_init(session, domain):
"""Send spdb, bossutils, lambda, and lambda_utils to the lambda build
server, build the lambda environment, and upload to S3.
"""
bucket = aws.get_lambda_s3_bucket(session)
load_lambdas_on_s3(session, domain, bucket)
def post_init(session, domain):
print("post_init")
print('adding tile bucket trigger of multi-lambda')
add_tile_bucket_trigger(session, domain)
# Tell Scalyr to get CloudWatch metrics for these instances.
names = AWSNames(domain)
instances = [names.cache_manager]
scalyr.add_instances_to_scalyr(
session, const.REGION, instances)
def add_tile_bucket_trigger(session, domain):
"""Trigger MultiLambda when file uploaded to tile bucket.
This is done in post-init() because the tile bucket isn't always
created during CloudFormation (it may already exist).
This function's effects should be idempotent because the same id is
used everytime the notification event is added to the tile bucket.
Args:
session (Boto3.Session)
domain (string): VPC domain name.
"""
names = AWSNames(domain)
lambda_name = names.multi_lambda
bucket_name = names.tile_bucket
lam = session.client('lambda')
resp = lam.get_function_configuration(FunctionName=lambda_name)
lambda_arn = resp['FunctionArn']
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
notification = bucket.Notification()
notification.put(NotificationConfiguration={
'LambdaFunctionConfigurations': [
{
'Id': 'tileBucketInvokeMultiLambda',
'LambdaFunctionArn': lambda_arn,
'Events': ['s3:ObjectCreated:*']
}
]
})
def delete(session, domain):
# NOTE: CloudWatch logs for the DNS Lambda are not deleted
names = AWSNames(domain)
aws.route53_delete_records(session, domain, names.cache_manager)
CloudFormationConfiguration("cachedb", domain).delete(session)
| [
"update_lambda_fcn.load_lambdas_on_s3",
"lib.cloudformation.Arg.SecurityGroup",
"lib.aws.azs_lookup",
"lib.cloudformation.Arg.String",
"lib.aws.role_arn_lookup",
"lib.aws.get_lambda_s3_bucket",
"lib.aws.route53_delete_records",
"lib.aws.sg_lookup",
"lib.scalyr.add_instances_to_scalyr",
"lib.aws.rt... | [((2068, 2084), 'lib.names.AWSNames', 'AWSNames', (['domain'], {}), '(domain)\n', (2076, 2084), False, 'from lib.names import AWSNames\n'), ((2098, 2158), 'lib.cloudformation.CloudFormationConfiguration', 'CloudFormationConfiguration', (['"""cachedb"""', 'domain', 'const.REGION'], {}), "('cachedb', domain, const.REGION)\n", (2125, 2158), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((2273, 2325), 'lib.aws.azs_lookup', 'aws.azs_lookup', (['session'], {'lambda_compatible_only': '(True)'}), '(session, lambda_compatible_only=True)\n', (2287, 2325), False, 'from lib import aws\n'), ((2356, 2402), 'lib.aws.rt_lookup', 'aws.rt_lookup', (['session', 'vpc_id', 'names.internal'], {}), '(session, vpc_id, names.internal)\n', (2369, 2402), False, 'from lib import aws\n'), ((3266, 3312), 'lib.aws.sg_lookup', 'aws.sg_lookup', (['session', 'vpc_id', 'names.internal'], {}), '(session, vpc_id, names.internal)\n', (3279, 3312), False, 'from lib import aws\n'), ((3511, 3565), 'lib.aws.role_arn_lookup', 'aws.role_arn_lookup', (['session', '"""lambda_cache_execution"""'], {}), "(session, 'lambda_cache_execution')\n", (3530, 3565), False, 'from lib import aws\n'), ((5645, 5678), 'lib.aws.get_lambda_s3_bucket', 'aws.get_lambda_s3_bucket', (['session'], {}), '(session)\n', (5669, 5678), False, 'from lib import aws\n'), ((7479, 7495), 'lib.names.AWSNames', 'AWSNames', (['domain'], {}), '(domain)\n', (7487, 7495), False, 'from lib.names import AWSNames\n'), ((7513, 7523), 'lib.userdata.UserData', 'UserData', ([], {}), '()\n', (7521, 7523), False, 'from lib.userdata import UserData\n'), ((7853, 7901), 'lib.aws.sqs_lookup_url', 'aws.sqs_lookup_url', (['session', 'names.s3flush_queue'], {}), '(session, names.s3flush_queue)\n', (7871, 7901), False, 'from lib import aws\n'), ((7954, 8005), 'lib.aws.sqs_lookup_url', 'aws.sqs_lookup_url', (['session', 'names.deadletter_queue'], {}), '(session, names.deadletter_queue)\n', (7972, 8005), False, 'from lib import aws\n'), ((8392, 8452), 'lib.aws.sns_topic_lookup', 'aws.sns_topic_lookup', (['session', 'const.PRODUCTION_MAILING_LIST'], {}), '(session, const.PRODUCTION_MAILING_LIST)\n', (8412, 8452), False, 'from lib import aws\n'), ((8830, 8857), 'lib.aws.keypair_lookup', 'aws.keypair_lookup', (['session'], {}), '(session)\n', (8848, 8857), False, 'from lib import aws\n'), ((9460, 9493), 'lib.aws.get_lambda_s3_bucket', 'aws.get_lambda_s3_bucket', (['session'], {}), '(session)\n', (9484, 9493), False, 'from lib import aws\n'), ((9498, 9541), 'update_lambda_fcn.load_lambdas_on_s3', 'load_lambdas_on_s3', (['session', 'domain', 'bucket'], {}), '(session, domain, bucket)\n', (9516, 9541), False, 'from update_lambda_fcn import load_lambdas_on_s3\n'), ((9779, 9795), 'lib.names.AWSNames', 'AWSNames', (['domain'], {}), '(domain)\n', (9787, 9795), False, 'from lib.names import AWSNames\n'), ((9838, 9902), 'lib.scalyr.add_instances_to_scalyr', 'scalyr.add_instances_to_scalyr', (['session', 'const.REGION', 'instances'], {}), '(session, const.REGION, instances)\n', (9868, 9902), False, 'from lib import scalyr\n'), ((10398, 10414), 'lib.names.AWSNames', 'AWSNames', (['domain'], {}), '(domain)\n', (10406, 10414), False, 'from lib.names import AWSNames\n'), ((11133, 11149), 'lib.names.AWSNames', 'AWSNames', (['domain'], {}), '(domain)\n', (11141, 11149), False, 'from lib.names import AWSNames\n'), ((11154, 11218), 'lib.aws.route53_delete_records', 'aws.route53_delete_records', (['session', 'domain', 'names.cache_manager'], {}), '(session, domain, names.cache_manager)\n', (11180, 11218), False, 'from lib import aws\n'), ((3086, 3186), 'lib.cloudformation.Arg.Subnet', 'Arg.Subnet', (['"""InternalSubnet"""', 'internal_subnet_id', '"""ID of Internal Subnet to create resources in"""'], {}), "('InternalSubnet', internal_subnet_id,\n 'ID of Internal Subnet to create resources in')\n", (3096, 3186), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((3332, 3427), 'lib.cloudformation.Arg.SecurityGroup', 'Arg.SecurityGroup', (['"""InternalSecurityGroup"""', 'internal_sg_id', '"""ID of internal Security Group"""'], {}), "('InternalSecurityGroup', internal_sg_id,\n 'ID of internal Security Group')\n", (3349, 3427), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((3585, 3671), 'lib.cloudformation.Arg.String', 'Arg.String', (['"""LambdaCacheExecutionRole"""', 'role', "('IAM role for multilambda.' + domain)"], {}), "('LambdaCacheExecutionRole', role, 'IAM role for multilambda.' +\n domain)\n", (3595, 3671), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((3755, 3803), 'lib.aws.s3_bucket_exists', 'aws.s3_bucket_exists', (['session', 'index_bucket_name'], {}), '(session, index_bucket_name)\n', (3775, 3803), False, 'from lib import aws\n'), ((4074, 4123), 'lib.aws.s3_bucket_exists', 'aws.s3_bucket_exists', (['session', 'delete_bucket_name'], {}), '(session, delete_bucket_name)\n', (4094, 4123), False, 'from lib import aws\n'), ((4425, 4472), 'lib.aws.s3_bucket_exists', 'aws.s3_bucket_exists', (['session', 'tile_bucket_name'], {}), '(session, tile_bucket_name)\n', (4445, 4472), False, 'from lib import aws\n'), ((4774, 4823), 'lib.aws.s3_bucket_exists', 'aws.s3_bucket_exists', (['session', 'ingest_bucket_name'], {}), '(session, ingest_bucket_name)\n', (4794, 4823), False, 'from lib import aws\n'), ((5169, 5213), 'lib.aws.ami_lookup', 'aws.ami_lookup', (['session', '"""cachemanager.boss"""'], {}), "(session, 'cachemanager.boss')\n", (5183, 5213), False, 'from lib import aws\n'), ((5780, 5811), 'lib.cloudformation.Ref', 'Ref', (['"""LambdaCacheExecutionRole"""'], {}), "('LambdaCacheExecutionRole')\n", (5783, 5811), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((2589, 2597), 'lib.cloudformation.Ref', 'Ref', (['key'], {}), '(key)\n', (2592, 2597), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((2872, 2880), 'lib.cloudformation.Ref', 'Ref', (['key'], {}), '(key)\n', (2875, 2880), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((5295, 5316), 'lib.cloudformation.Ref', 'Ref', (['"""InternalSubnet"""'], {}), "('InternalSubnet')\n", (5298, 5316), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((11223, 11269), 'lib.cloudformation.CloudFormationConfiguration', 'CloudFormationConfiguration', (['"""cachedb"""', 'domain'], {}), "('cachedb', domain)\n", (11250, 11269), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((5480, 5508), 'lib.cloudformation.Ref', 'Ref', (['"""InternalSecurityGroup"""'], {}), "('InternalSecurityGroup')\n", (5483, 5508), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n'), ((5839, 5872), 'lib.aws.get_lambda_s3_bucket', 'aws.get_lambda_s3_bucket', (['session'], {}), '(session)\n', (5863, 5872), False, 'from lib import aws\n'), ((6098, 6126), 'lib.cloudformation.Ref', 'Ref', (['"""InternalSecurityGroup"""'], {}), "('InternalSecurityGroup')\n", (6101, 6126), False, 'from lib.cloudformation import CloudFormationConfiguration, Arg, Ref\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
try:
print('trying installed module')
from kafka_client_decorators import KafkaDecorator
except:
print('installed module failed, trying from path')
import sys
sys.path.insert(1, '../')
from kafka_client_decorators import KafkaDecorator
kc = KafkaDecorator( )
#<EMAIL>(zookeeper_hosts='localhost:2181' )
@kc.host(hosts='localhost:9092' )
class A:
def __init__(self, testA, cls):
self.a = testA
self.cls = cls
pass
@kc.balanced_consumer('test1', consumer_group='testgroup3', auto_commit_enable=True, managed=True, consumer_timeout_ms=1000)
def get(self, msg):
print ( f'{self.a} Receive offset {msg.offset} key {msg.partition_key} message: { msg.value }' )
self.send( msg.value )
@kc.simple_consumer('test2', consumer_group='testgroup4', auto_commit_enable=True, consumer_timeout_ms=1000)
def get2(self, msg):
print ( f'{self.a} Receive offset {msg.offset}, message: { msg.value }' )
self.cls.stop(self)
@kc.producer('test2')
def send(self, msg):
pass
@kc.producer('test1')
def sendKey(self, msg, key ):
pass
class B:
def __init__(self):
pass
def stop(self, conn):
conn.stop()
a = A('Example', B())
a.start()
a.sendKey( 'Hello'.encode('utf-8'), partition_key='world'.encode('utf-8') )
a.wait()
| [
"kafka_client_decorators.KafkaDecorator",
"sys.path.insert"
] | [((310, 326), 'kafka_client_decorators.KafkaDecorator', 'KafkaDecorator', ([], {}), '()\n', (324, 326), False, 'from kafka_client_decorators import KafkaDecorator\n'), ((223, 248), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../"""'], {}), "(1, '../')\n", (238, 248), False, 'import sys\n')] |
"""Add passcode claimed field
Revision ID: d46ec0214eb2
Revises: <PASSWORD>
Create Date: 2019-08-26 13:18:28.719962
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'd46ec0214eb2'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('entity', sa.Column('pass_code_claimed', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('entity', 'pass_code_claimed')
# ### end Alembic commands ###
| [
"sqlalchemy.Boolean",
"alembic.op.drop_column"
] | [((602, 647), 'alembic.op.drop_column', 'op.drop_column', (['"""entity"""', '"""pass_code_claimed"""'], {}), "('entity', 'pass_code_claimed')\n", (616, 647), False, 'from alembic import op\n'), ((448, 460), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (458, 460), True, 'import sqlalchemy as sa\n')] |
from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd
from torch.utils.data import DataLoader
import h5py
import os
import torch
def get_data_loader(args,type="train"):
if args.dataset == 'pascal':
# load data
load_dir = "data/VOC2012/"
print("load data from file:{} ".format(os.path.join(load_dir,type+"_"+str(args.poison_rate)+".h5")))
h5_store = h5py.File(os.path.join(load_dir,type+"_"+str(args.poison_rate)+".h5"),"r")
image = h5_store['image'][:]
target = h5_store['target'][:]
h5_store.close()
dataset = pascal.VOCSegmentation_posion(torch.from_numpy(image),torch.from_numpy(target))
if type == "train":
return DataLoader(dataset,batch_size=args.batch_size,shuffle=True)
else:
return DataLoader(dataset,batch_size=args.batch_size,shuffle=False)
else:
print("only support pascal dataset~")
raise
def make_data_loader(args, **kwargs):
if args.dataset == 'pascal':
train_set = pascal.VOCSegmentation(args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'cityscapes':
train_set = cityscapes.CityscapesSegmentation(args, split='train')
val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'coco':
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
else:
raise NotImplementedError
| [
"dataloaders.datasets.combine_dbs.CombineDBs",
"torch.from_numpy",
"dataloaders.datasets.pascal.VOCSegmentation",
"torch.utils.data.DataLoader",
"dataloaders.datasets.coco.COCOSegmentation",
"dataloaders.datasets.sbd.SBDSegmentation",
"dataloaders.datasets.cityscapes.CityscapesSegmentation"
] | [((1046, 1089), 'dataloaders.datasets.pascal.VOCSegmentation', 'pascal.VOCSegmentation', (['args'], {'split': '"""train"""'}), "(args, split='train')\n", (1068, 1089), False, 'from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd\n'), ((1108, 1149), 'dataloaders.datasets.pascal.VOCSegmentation', 'pascal.VOCSegmentation', (['args'], {'split': '"""val"""'}), "(args, split='val')\n", (1130, 1149), False, 'from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd\n'), ((1406, 1479), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n', (1416, 1479), False, 'from torch.utils.data import DataLoader\n'), ((1501, 1573), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n', (1511, 1573), False, 'from torch.utils.data import DataLoader\n'), ((632, 655), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (648, 655), False, 'import torch\n'), ((656, 680), 'torch.from_numpy', 'torch.from_numpy', (['target'], {}), '(target)\n', (672, 680), False, 'import torch\n'), ((729, 790), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(dataset, batch_size=args.batch_size, shuffle=True)\n', (739, 790), False, 'from torch.utils.data import DataLoader\n'), ((822, 884), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dataset, batch_size=args.batch_size, shuffle=False)\n', (832, 884), False, 'from torch.utils.data import DataLoader\n'), ((1199, 1248), 'dataloaders.datasets.sbd.SBDSegmentation', 'sbd.SBDSegmentation', (['args'], {'split': "['train', 'val']"}), "(args, split=['train', 'val'])\n", (1218, 1248), False, 'from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd\n'), ((1273, 1339), 'dataloaders.datasets.combine_dbs.CombineDBs', 'combine_dbs.CombineDBs', (['[train_set, sbd_train]'], {'excluded': '[val_set]'}), '([train_set, sbd_train], excluded=[val_set])\n', (1295, 1339), False, 'from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd\n'), ((1726, 1780), 'dataloaders.datasets.cityscapes.CityscapesSegmentation', 'cityscapes.CityscapesSegmentation', (['args'], {'split': '"""train"""'}), "(args, split='train')\n", (1759, 1780), False, 'from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd\n'), ((1799, 1851), 'dataloaders.datasets.cityscapes.CityscapesSegmentation', 'cityscapes.CityscapesSegmentation', (['args'], {'split': '"""val"""'}), "(args, split='val')\n", (1832, 1851), False, 'from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd\n'), ((1871, 1924), 'dataloaders.datasets.cityscapes.CityscapesSegmentation', 'cityscapes.CityscapesSegmentation', (['args'], {'split': '"""test"""'}), "(args, split='test')\n", (1904, 1924), False, 'from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd\n'), ((1990, 2063), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n', (2000, 2063), False, 'from torch.utils.data import DataLoader\n'), ((2085, 2157), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n', (2095, 2157), False, 'from torch.utils.data import DataLoader\n'), ((2180, 2253), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n', (2190, 2253), False, 'from torch.utils.data import DataLoader\n'), ((2373, 2415), 'dataloaders.datasets.coco.COCOSegmentation', 'coco.COCOSegmentation', (['args'], {'split': '"""train"""'}), "(args, split='train')\n", (2394, 2415), False, 'from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd\n'), ((2434, 2474), 'dataloaders.datasets.coco.COCOSegmentation', 'coco.COCOSegmentation', (['args'], {'split': '"""val"""'}), "(args, split='val')\n", (2455, 2474), False, 'from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd\n'), ((2540, 2613), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n', (2550, 2613), False, 'from torch.utils.data import DataLoader\n'), ((2635, 2707), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n', (2645, 2707), False, 'from torch.utils.data import DataLoader\n')] |
from .Setup import EngineSetup
from Core.GlobalExceptions import Exceptions
from Services.NetworkRequests import requests
from Services.Utils.Utils import Utils
class ClipDownloader(EngineSetup):
def run(self):
try:
self.download()
except:
self.status.raiseError(Exceptions.NetworkError)
self.status.setDone()
self.syncStatus()
def download(self):
response = requests.get(self.setup.downloadInfo.getUrl(), stream=True)
if response.status_code == 200:
self.progress.totalByteSize = int(response.headers.get("Content-Length", 0))
self.progress.totalSize = Utils.formatByteSize(self.progress.totalByteSize)
self.status.setDownloading()
self.syncStatus()
try:
with open(self.setup.downloadInfo.getAbsoluteFileName(), "wb") as file:
loopCount = 0
for data in response.iter_content(1024):
file.write(data)
self.progress.byteSize += len(data)
self.progress.size = Utils.formatByteSize(self.progress.byteSize)
if loopCount % 1024 == 0:
self.syncProgress()
loopCount += 1
self.syncProgress()
except:
self.status.raiseError(Exceptions.FileSystemError)
else:
if self.progress.byteSize != self.progress.totalByteSize:
raise
else:
raise
def cancel(self):
pass | [
"Services.Utils.Utils.Utils.formatByteSize"
] | [((663, 712), 'Services.Utils.Utils.Utils.formatByteSize', 'Utils.formatByteSize', (['self.progress.totalByteSize'], {}), '(self.progress.totalByteSize)\n', (683, 712), False, 'from Services.Utils.Utils import Utils\n'), ((1130, 1174), 'Services.Utils.Utils.Utils.formatByteSize', 'Utils.formatByteSize', (['self.progress.byteSize'], {}), '(self.progress.byteSize)\n', (1150, 1174), False, 'from Services.Utils.Utils import Utils\n')] |
import logging
from vespid import setup_logger
logger = setup_logger(__name__)
import pandas as pd
import numpy as np
from tqdm import tqdm
def calculate_interdisciplinarity_score(
membership_vectors
):
'''
Given a set of entities and
one vector for each representing the (ordered) strength
of membership of that entity across a set of clusters,
calculate the level of interdisciplinarity for each entity.
NOTE: length of membership_vectors should be the same for
all entities for an accurate calculation.
Parameters
----------
membership_vectors: numpy array of shape (n_samples, n_clusters)
that indicates how strongly each sample/entity belongs to
a cluster (e.g. membership_vectors[0] = [0.1, 0.2, 0.3, 0.4]
would indicate the strongest association for sample 0 with
cluster 3 and the weakest with cluster 0).
Returns
-------
numpy array of float scores of shape (n_samples,) in the range
[0.0, 1.0].
'''
num_clusters = membership_vectors.shape[1]
# (N / N-1) * (1 - max(P)) * (1 - stdev(P))
id_scores = (num_clusters / (num_clusters - 1)) * \
(1 - membership_vectors.max(axis=1)) * \
(1 - membership_vectors.std(axis=1))
# In some instances, the score can go higher than 1.0
# Make sure that doesn't happen but we alert on it
over_max = id_scores[id_scores > 1.0].sum()
if over_max > 0:
logger.warn(f"Found {over_max} instances in which score is above 1.0. "
"Forcing these to be 1.0...")
id_scores[id_scores > 1.0] = 1.0
return id_scores
def interdisciplinarity_from_citation_clusters(
graph,
year,
cluster_attribute='clusterID'
):
'''
Uses Cypher query with Neo4j instance (enriched with paper cluster labels
e.g. from HDBSCAN clustering) to determine how interdisciplinary
papers' references and citations are. Uses a similar scoring
logic as what is used in vespid.models.clustering with
HDBSCAN soft clustering probabilities.
Parameters
----------
graph: Neo4jConnectionHandler object. Used for querying the
graph for citation information.
year: int. Indicates the maximum year of publication of
interest.
cluster_attribute: str. Indicates the node attribute to use
for determining the cluster membership of the node
(e.g. 'cluster_id_2019').
Returns
-------
pandas DataFrame with columns ['paperID', 'id_score'] of
length n_nodes, with id_score being interdisciplinarity
scores of shape (n_nodes,)
'''
def fill_out_vector(cluster_identifiers, cluster_values, num_total_clusters):
'''
Takes a partial membership vector and fills out the missing
elements with zeros, placing the nonzero elements properly.
Parameters
----------
cluster_identifiers: numpy array of ints. Indicates which clusters
map to the values given in ``cluster_values`` (and thus must
be the same length as ``cluster_values``) for the node
in question.
cluster_values: numpy array of float. Indicates the strength
of membership the entity has to each cluster for the node
in question.
num_total_clusters: int. Indicates how many clusters there
are in the total solution. Must be greater than or
equal to the values provided in ``cluster_identifiers``.
Returns
-------
numpy array of shape (num_total_clusters,) representing
the cluster membership strengths/probabilities of the
node.
'''
if len(cluster_identifiers) != len(cluster_values):
raise ValueError("cluster_identifiers and cluster_values "
f"must be of the same length, but got {len(cluster_identifiers)} "
f"and {len(cluster_values)}, resp.")
if num_total_clusters < np.max(cluster_identifiers):
raise ValueError(f"num_total_clusters ({num_total_clusters}) "
"must not be less than the maximum "
f"cluster_identifiers value ({np.max(cluster_identifiers)})")
if len(cluster_identifiers) > len(np.unique(cluster_identifiers)):
raise ValueError("cluster_identifiers contains duplicate values")
# Build out an all-zeros vector of the proper length
cluster_vector = np.zeros(num_total_clusters)
# Fill in the right zeros to reflect cluster membership values
cluster_vector[cluster_identifiers] = cluster_values
return cluster_vector
# Query in the same fashion as what is used to generate BW centrality scores
# Effectively insures that all papers are either published in `year` or
# are referenced by ones published in `year`
# also ignores publications that lack a cluster ID or are noise (clusterID = -1)
query = f"""
MATCH (p:Publication)<-[c:CITED_BY]-(m:Publication)
WHERE c.publicationDate.year = {year}
AND m.publicationDate.year <= {year}
AND p.{cluster_attribute} IS NOT NULL
AND toInteger(p.{cluster_attribute}) > -1
AND m.{cluster_attribute} IS NOT NULL
AND toInteger(m.{cluster_attribute}) > -1
WITH DISTINCT p AS p, COUNT(c) AS NumTotalCitations
MATCH (p)<-[c:CITED_BY]-(m:Publication)
WHERE c.publicationDate.year = {year}
AND m.publicationDate.year <= {year}
AND m.{cluster_attribute} IS NOT NULL
AND toInteger(m.{cluster_attribute}) > -1
WITH p,
NumTotalCitations,
toInteger(m.{cluster_attribute}) AS CitationClusterLabel,
COUNT(m) AS NumCitationsInCluster
RETURN p.id AS paperID,
p.publicationDate.year AS Year,
toInteger(p.{cluster_attribute}) AS PrimaryClusterLabel,
CitationClusterLabel,
toFloat(NumCitationsInCluster) / NumTotalCitations AS FractionalMembership
"""
df = graph.cypher_query_to_dataframe(query, verbose=False)
logger.debug(f"Years covered by network-ID-scoring query are {df['Year'].min()} to {df['Year'].max()}")
# Which papers didn't have a membership value for the cluster they're assigned to?
# AKA which ones failed to have any citations/references from within their own cluster?
df['PrimaryLabelMatchesCitation'] = df['PrimaryClusterLabel'] == df['CitationClusterLabel']
num_zero_primary_membership = \
df['paperID'].nunique() - df.loc[df['PrimaryLabelMatchesCitation'], 'paperID'].nunique()
fraction_zero_primary_membership = round(num_zero_primary_membership / df['paperID'].nunique() * 100, 2)
if num_zero_primary_membership > 0:
logger.warn(f"No citations from host cluster found for "
f"{num_zero_primary_membership} ({fraction_zero_primary_membership}%) papers! "
"This suggests that the clustering solution may not be very good or "
"that the citation network was undersampled")
query = f"""
MATCH (p:Publication)
WHERE p.{cluster_attribute} IS NOT NULL
AND p.publicationDate.year = {year}
RETURN MAX(toInteger(p.{cluster_attribute}))
"""
# cluster labels are zero-indexed, so need +1
num_clusters = graph.cypher_query_to_dataframe(query, verbose=False).iloc[0,0] + 1
tqdm.pandas(desc="Building full cluster membership vectors from citation-based membership per paper")
# Group membership into list for each paper
cluster_vectors = df.groupby('paperID', sort=False).agg(list).progress_apply(
lambda row: fill_out_vector(
row['CitationClusterLabel'],
row['FractionalMembership'],
num_clusters
),
axis=1
)
id_scores = calculate_interdisciplinarity_score(
np.array(cluster_vectors.tolist())
)
output = pd.DataFrame({
'paperID': df['paperID'].unique(),
'scoreInterDNetwork': id_scores
})
#TODO: maybe additional weighting from dendrogram distance/cluster exemplar-exemplar distance?
return output | [
"numpy.unique",
"vespid.setup_logger",
"numpy.max",
"numpy.zeros",
"tqdm.tqdm.pandas"
] | [((56, 78), 'vespid.setup_logger', 'setup_logger', (['__name__'], {}), '(__name__)\n', (68, 78), False, 'from vespid import setup_logger\n'), ((7366, 7477), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {'desc': '"""Building full cluster membership vectors from citation-based membership per paper"""'}), "(desc=\n 'Building full cluster membership vectors from citation-based membership per paper'\n )\n", (7377, 7477), False, 'from tqdm import tqdm\n'), ((4473, 4501), 'numpy.zeros', 'np.zeros', (['num_total_clusters'], {}), '(num_total_clusters)\n', (4481, 4501), True, 'import numpy as np\n'), ((3997, 4024), 'numpy.max', 'np.max', (['cluster_identifiers'], {}), '(cluster_identifiers)\n', (4003, 4024), True, 'import numpy as np\n'), ((4267, 4297), 'numpy.unique', 'np.unique', (['cluster_identifiers'], {}), '(cluster_identifiers)\n', (4276, 4297), True, 'import numpy as np\n'), ((4192, 4219), 'numpy.max', 'np.max', (['cluster_identifiers'], {}), '(cluster_identifiers)\n', (4198, 4219), True, 'import numpy as np\n')] |
import sys
import threading
import time
import serial
import binascii
from linptech.packet import Packet
from linptech.constant import SerialConfig
import logging
try:
import queue
except ImportError:
import Queue as queue
logging.getLogger().setLevel(logging.ERROR)
class LinptechSerial(threading.Thread):
"""
- 实例化线程进行串口发送和读取
send_queue 发送指令队列
receive_queue 接收指令队列
"""
def __init__(self, port, receive):
super(LinptechSerial, self).__init__()
self.stop_flag = threading.Event()
self.buffer = ""
# Setup packet queues
self.send_queue = queue.Queue()
self.receive_queue = queue.Queue()
# Set the receive method,对外接口,接收指令
self.receive = receive
# Internal variable for the Base ID of the module.
self.port = port
self.ser = serial.Serial(self.port, 57600, timeout=0.1)
self.restart_num=0
def restart(self):
self.stop_flag.set()
self.ser.close()
while self.stop_flag.is_set():
time.sleep(2)
try:
number = self.ser.inWaiting()
self.stop_flag.clear()
self.run()
except Exception as e:
self.restart_num+=1
logging.error("reconnect serialport %d",self.restart_num)
try:
self.stop_flag.set()
self.ser = serial.Serial(self.port, 57600, timeout=0.1)
except Exception as e:
logging.error(e)
def send(self, data):
"""对外接口,发送指令"""
try:
packet=Packet.create(data)
self.send_queue.put(packet)
logging.debug("send_queue=%s" % self.send_queue.qsize())
return True
except Exception as e:
logging.error("send error:%s",e)
def get_from_send_queue(self):
""" Get message from transmit queue, if one exists """
try:
packet = self.send_queue.get(block=False)
return packet
except queue.Empty:
pass
return None
def get_from_receive_queue(self):
"""
get packet from receive queue
and as parameter pass to receive()
"""
while not self.receive_queue.empty():
try:
logging.debug("receive_queue=%s" % self.receive_queue.qsize())
packet=self.receive_queue.get()
data,optional=Packet.parse(packet)
self.receive(data,optional)
except queue.Empty:
pass
def process_buffer(self,buffer):
if len(buffer) > 2*max(SerialConfig.RECEIVE_LEN_LIST):
try:
index = buffer.find("550",2*min(SerialConfig.RECEIVE_LEN_LIST))
if int(index/2) in SerialConfig.RECEIVE_LEN_LIST :
prev_buffer=buffer[0:index]
if Packet.check(prev_buffer):
self.receive_queue.put(prev_buffer)
self.process_buffer(buffer[index:])
except Exception as e:
logging.error("process buffer error:%s" % e)
elif len(buffer)/2 in SerialConfig.RECEIVE_LEN_LIST and Packet.check(buffer):
self.receive_queue.put(buffer)
def run(self):
"""
run when self.start()
threading.Thread function
"""
logging.debug('LinptechSerial started')
while not self.stop_flag.is_set():
for i in range(10):
time.sleep(SerialConfig.SEND_INTERVAL)
try:
number = self.ser.inWaiting()
# print(number)
if number >= min(SerialConfig.RECEIVE_LEN_LIST):
self.buffer += str(binascii.b2a_hex(self.ser.read(number)),encoding="utf-8")
logging.debug("numner=%s,self.buffer=%s" % (number,self.buffer))
self.ser.flushInput()
# 多组数据同时进入,进行递归分割
self.process_buffer(self.buffer)
self.buffer=""
self.get_from_receive_queue()
except Exception as e:
logging.error("run serial read data error:%s" % e)
self.restart()
# # If there's messages in transmit queue,send them
packet = self.get_from_send_queue()
if packet:
try:
logging.debug("send_packet=%s",packet)
self.ser.write(binascii.unhexlify(packet))
except Exception as e:
logging.error("run serial write data error:%s" % e)
self.restart()
if __name__=="__main__":
logging.getLogger().setLevel(logging.DEBUG)
print(SerialConfig.RECEIVE_LEN_LIST)
port ='/dev/tty.SLAB_USBtoUART'
#port ="COM3"
def receive(data,optional):
print(data,optional)
lp_serial=LinptechSerial(port,receive=receive)
lp_serial.setDaemon(True)
lp_serial.start()
while lp_serial.is_alive():
time.sleep(5)
| [
"logging.getLogger",
"logging.debug",
"linptech.packet.Packet.parse",
"time.sleep",
"threading.Event",
"serial.Serial",
"linptech.packet.Packet.create",
"Queue.Queue",
"logging.error",
"binascii.unhexlify",
"linptech.packet.Packet.check"
] | [((226, 245), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (243, 245), False, 'import logging\n'), ((476, 493), 'threading.Event', 'threading.Event', ([], {}), '()\n', (491, 493), False, 'import threading\n'), ((557, 570), 'Queue.Queue', 'queue.Queue', ([], {}), '()\n', (568, 570), True, 'import Queue as queue\n'), ((594, 607), 'Queue.Queue', 'queue.Queue', ([], {}), '()\n', (605, 607), True, 'import Queue as queue\n'), ((755, 799), 'serial.Serial', 'serial.Serial', (['self.port', '(57600)'], {'timeout': '(0.1)'}), '(self.port, 57600, timeout=0.1)\n', (768, 799), False, 'import serial\n'), ((2748, 2787), 'logging.debug', 'logging.debug', (['"""LinptechSerial started"""'], {}), "('LinptechSerial started')\n", (2761, 2787), False, 'import logging\n'), ((4071, 4084), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4081, 4084), False, 'import time\n'), ((920, 933), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (930, 933), False, 'import time\n'), ((1334, 1353), 'linptech.packet.Packet.create', 'Packet.create', (['data'], {}), '(data)\n', (1347, 1353), False, 'from linptech.packet import Packet\n'), ((3764, 3783), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3781, 3783), False, 'import logging\n'), ((1488, 1521), 'logging.error', 'logging.error', (['"""send error:%s"""', 'e'], {}), "('send error:%s', e)\n", (1501, 1521), False, 'import logging\n'), ((2018, 2038), 'linptech.packet.Packet.parse', 'Packet.parse', (['packet'], {}), '(packet)\n', (2030, 2038), False, 'from linptech.packet import Packet\n'), ((2609, 2629), 'linptech.packet.Packet.check', 'Packet.check', (['buffer'], {}), '(buffer)\n', (2621, 2629), False, 'from linptech.packet import Packet\n'), ((2852, 2890), 'time.sleep', 'time.sleep', (['SerialConfig.SEND_INTERVAL'], {}), '(SerialConfig.SEND_INTERVAL)\n', (2862, 2890), False, 'import time\n'), ((1072, 1130), 'logging.error', 'logging.error', (['"""reconnect serialport %d"""', 'self.restart_num'], {}), "('reconnect serialport %d', self.restart_num)\n", (1085, 1130), False, 'import logging\n'), ((2367, 2392), 'linptech.packet.Packet.check', 'Packet.check', (['prev_buffer'], {}), '(prev_buffer)\n', (2379, 2392), False, 'from linptech.packet import Packet\n'), ((2506, 2550), 'logging.error', 'logging.error', (["('process buffer error:%s' % e)"], {}), "('process buffer error:%s' % e)\n", (2519, 2550), False, 'import logging\n'), ((3546, 3585), 'logging.debug', 'logging.debug', (['"""send_packet=%s"""', 'packet'], {}), "('send_packet=%s', packet)\n", (3559, 3585), False, 'import logging\n'), ((1181, 1225), 'serial.Serial', 'serial.Serial', (['self.port', '(57600)'], {'timeout': '(0.1)'}), '(self.port, 57600, timeout=0.1)\n', (1194, 1225), False, 'import serial\n'), ((3104, 3169), 'logging.debug', 'logging.debug', (["('numner=%s,self.buffer=%s' % (number, self.buffer))"], {}), "('numner=%s,self.buffer=%s' % (number, self.buffer))\n", (3117, 3169), False, 'import logging\n'), ((3348, 3398), 'logging.error', 'logging.error', (["('run serial read data error:%s' % e)"], {}), "('run serial read data error:%s' % e)\n", (3361, 3398), False, 'import logging\n'), ((3605, 3631), 'binascii.unhexlify', 'binascii.unhexlify', (['packet'], {}), '(packet)\n', (3623, 3631), False, 'import binascii\n'), ((3665, 3716), 'logging.error', 'logging.error', (["('run serial write data error:%s' % e)"], {}), "('run serial write data error:%s' % e)\n", (3678, 3716), False, 'import logging\n'), ((1258, 1274), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (1271, 1274), False, 'import logging\n')] |
from datetime import date
from typing import Type
import pytest
import sympy
from sympy import Interval, oo
from nettlesome.entities import Entity
from nettlesome.predicates import Predicate
from nettlesome.quantities import Comparison, Q_, Quantity
class TestComparisons:
def test_comparison_with_wrong_comparison_symbol(self):
with pytest.raises(ValueError):
_ = Comparison(
content="the height of {} was {}",
sign=">>",
expression=Q_("160 centimeters"),
)
def test_comparison_interval(self):
comparison = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression=Q_("20 miles"),
)
assert comparison.interval == Interval(20, oo, left_open=True)
def test_comparison_not_equal(self):
comparison = Comparison(
content="the distance between $place1 and $place2 was",
sign="!=",
expression=Q_("20 miles"),
)
assert comparison.interval == sympy.Union(
Interval(0, 20, right_open=True), Interval(20, oo, left_open=True)
)
class TestPredicates:
def test_no_sign_allowed_for_predicate(self):
with pytest.raises(TypeError):
Predicate(
"the date when $work was created was",
sign=">=",
expression=date(1978, 1, 1),
)
def test_term_positions(self):
predicate = Predicate(
content="$organizer1 and $organizer2 planned for $player1 to play $game with $player2."
)
assert predicate.term_positions() == {
"organizer1": {0, 1},
"organizer2": {0, 1},
"player1": {2, 4},
"game": {3},
"player2": {2, 4},
}
def test_term_positions_with_repetition(self):
predicate = Predicate(
content="$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2."
)
assert predicate.term_positions() == {
"organizer1": {0, 1},
"organizer2": {0, 1},
"game": {2},
}
def test_term_permutations(self):
predicate = Predicate(
content="$organizer1 and $organizer2 planned for $player1 to play $game with $player2."
)
assert predicate.term_index_permutations() == [
(0, 1, 2, 3, 4),
(0, 1, 4, 3, 2),
(1, 0, 2, 3, 4),
(1, 0, 4, 3, 2),
]
def test_term_permutations_with_repetition(self):
predicate = Predicate(
content="$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2."
)
assert predicate.term_index_permutations() == [
(0, 1, 2),
(1, 0, 2),
]
def test_convert_false_statement_about_quantity_to_obverse(self, make_predicate):
assert make_predicate["p7_obverse"].truth is True
assert make_predicate["p7_obverse"].quantity == Q_(35, "foot")
assert make_predicate["p7"].truth is True
assert make_predicate["p7"].sign == "<="
assert "sign='<='" in repr(make_predicate["p7"])
assert make_predicate["p7_obverse"].sign == "<="
def test_quantity_type(self, make_predicate):
assert isinstance(make_predicate["p7"].quantity, Quantity)
def test_string_for_date_as_expression(self):
copyright_date_range = Comparison(
content="the date when $work was created was",
sign=">=",
expression=date(1978, 1, 1),
)
assert str(copyright_date_range).endswith("1978-01-01")
def test_quantity_string(self, make_predicate):
assert str(make_predicate["p7"].quantity) == "35 foot"
def test_predicate_content_comparison(self, make_predicate):
assert make_predicate["p8_exact"].content == make_predicate["p7"].content
def test_expression_comparison(self, make_predicate):
assert str(make_predicate["p7"].quantity_range) == "no more than 35 foot"
assert str(make_predicate["p9"].quantity_range) == "no more than 5 foot"
def test_predicate_has_no_expression_comparison(self, make_predicate):
with pytest.raises(AttributeError):
make_predicate["p1"].expression_comparison() == ""
def test_context_slots(self, make_predicate):
assert len(make_predicate["p7"]) == 2
def test_str_for_predicate_with_number_quantity(self, make_predicate):
assert "distance between $place1 and $place2 was at least 20" in str(
make_predicate["p8_int"]
)
assert "distance between $place1 and $place2 was at least 20.0" in str(
make_predicate["p8_float"]
)
assert "distance between $place1 and $place2 was at least 20 foot" in str(
make_predicate["p8"]
)
def test_template_singular_by_default(self):
predicate = Predicate(content="$people were in $city")
assert str(predicate.template) == 'StatementTemplate("$people was in $city")'
@pytest.mark.parametrize(
"context, expected",
[
(
[Entity(name="the book", plural=False)],
"<the book> was names, towns,",
),
(
[Entity(name="the book's listings", plural=True)],
"<the book's listings> were names, towns,",
),
],
)
def test_make_str_plural(self, context, expected):
phrase = (
"$thing were names, towns, and telephone numbers of telephone subscribers"
)
predicate = Predicate(content=phrase)
with_context = predicate._content_with_terms(context)
assert with_context.startswith(expected)
def test_str_not_equal(self, make_predicate):
assert (
"the distance between $place1 and $place2 was not equal to 35 foot"
in str(make_predicate["p7_not_equal"])
)
def test_negated_method(self, make_predicate):
assert make_predicate["p7"].negated().means(make_predicate["p7_opposite"])
assert make_predicate["p3"].negated().means(make_predicate["p3_false"])
class TestSameMeaning:
def test_predicate_equality(self, make_predicate):
assert make_predicate["p1"].means(make_predicate["p1_again"])
def test_predicate_inequality(self, make_predicate, watt_factor):
assert not make_predicate["p2"].means(make_predicate["p2_reflexive"])
def test_error_predicate_means_fact(self, make_predicate, watt_factor):
with pytest.raises(TypeError):
make_predicate["p2"].means(watt_factor["f2"])
def test_obverse_predicates_equal(self, make_predicate):
assert make_predicate["p7"].means(make_predicate["p7_obverse"])
def test_equal_float_and_int(self, make_predicate):
"""
These now evaluate equal even though their equal quantities are different types
"""
assert make_predicate["p8_int"].means(make_predicate["p8_float"])
def test_same_meaning_float_and_int(self, make_predicate):
"""
The Predicate means method considers equal quantities of different types to have the same meaning.
"""
assert make_predicate["p8_int"].means(make_predicate["p8_float"])
def test_no_equality_with_inconsistent_dimensionality(self, make_predicate):
assert not make_predicate["p9"].means(make_predicate["p9_acres"])
def test_different_truth_value_prevents_equality(self, make_predicate):
assert not make_predicate["p_murder"].means(make_predicate["p_murder_whether"])
assert not make_predicate["p_murder_false"].means(
make_predicate["p_murder_whether"]
)
assert not make_predicate["p_murder_false"].means(make_predicate["p_murder"])
def test_predicate_does_not_mean_fact(self, make_predicate, watt_factor):
with pytest.raises(TypeError):
make_predicate["p8"].means(watt_factor["f8"])
def test_term_placeholders_do_not_change_result(self):
left = Predicate(
content="$organizer1 and $organizer2 planned for $player1 to play $game with $player2."
)
right = Predicate(
content="$promoter1 and $promoter2 planned for $player1 to play $chess with $player2."
)
assert left.means(right)
def test_term_positions_change_result(self):
left = Predicate(
content="$organizer1 and $organizer2 planned for $player1 to play $game with $player2."
)
right = Predicate(
content="$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2."
)
assert not left.means(right)
class TestImplication:
def test_greater_than_because_of_quantity(self, make_predicate):
assert make_predicate["p8_meters"] > make_predicate["p8"]
assert make_predicate["p8_meters"] != make_predicate["p8"]
def test_greater_float_and_int(self, make_predicate):
assert make_predicate["p8_higher_int"] > make_predicate["p8_float"]
assert make_predicate["p8_int"] < make_predicate["p8_higher_int"]
def test_any_truth_value_implies_none(self, make_predicate):
assert make_predicate["p_murder"] > make_predicate["p_murder_whether"]
assert make_predicate["p_murder_false"] > make_predicate["p_murder_whether"]
def test_no_implication_by_exact_quantity(self, make_predicate):
assert not make_predicate["p_quantity=3"] > make_predicate["p_quantity>5"]
def test_no_implication_of_exact_quantity(self, make_predicate):
assert not make_predicate["p_quantity>5"] > make_predicate["p_quantity=3"]
def test_no_implication_by_greater_or_equal_quantity(self, make_predicate):
assert not make_predicate["p_quantity>=4"] > make_predicate["p_quantity>5"]
def test_no_implication_of_greater_or_equal_quantity(self):
less = Comparison(content="The number of mice was", sign=">", expression=4)
more = Comparison(content="The number of mice was", sign=">=", expression=5)
assert not less.implies(more)
def test_no_contradiction_inconsistent_dimensions(self):
equal = Comparison(
content="${defendant}'s sentence was", sign="=", expression="8 years"
)
less = Comparison(
content="${defendant}'s sentence was", sign="<=", expression="10 parsecs"
)
assert not equal.contradicts(less)
assert not equal.implies(less)
def test_equal_implies_greater_or_equal(self, make_predicate):
assert make_predicate["p9_exact"] > make_predicate["p9"]
def test_implication_with_not_equal(self, make_predicate):
assert make_predicate["p7_opposite"] > make_predicate["p7_not_equal"]
def test_no_implication_with_inconsistent_dimensionality(self, make_predicate):
assert not make_predicate["p9"] >= make_predicate["p9_acres"]
assert not make_predicate["p9"] <= make_predicate["p9_acres"]
def test_implication_with_no_truth_value(self, make_predicate):
assert not make_predicate["p2_no_truth"] > make_predicate["p2"]
assert make_predicate["p2"] > make_predicate["p2_no_truth"]
def test_predicate_cannot_imply_factor(self, make_predicate, watt_factor):
assert not make_predicate["p7_true"] > watt_factor["f7"]
def test_implication_due_to_dates(self):
copyright_date_range = Comparison(
content="the date when $work was created was",
sign=">=",
expression=date(1978, 1, 1),
)
copyright_date_specific = Comparison(
content="the date when $work was created was",
sign="=",
expression=date(1980, 6, 20),
)
assert copyright_date_specific.implies(copyright_date_range)
class TestContradiction:
def test_predicate_no_contradictions(self, make_predicate):
assert not make_predicate["p7"].contradicts(make_predicate["p7_true"])
assert not make_predicate["p1"].contradicts(make_predicate["p1_again"])
assert not make_predicate["p3"].contradicts(make_predicate["p7"])
def test_contradiction_by_exact(self, make_predicate):
assert make_predicate["p8_exact"].contradicts(make_predicate["p8_less"])
def test_contradiction_of_exact(self, make_predicate):
assert make_predicate["p8_less"].contradicts(make_predicate["p8_exact"])
def test_contradiction_by_equal_quantity(self, make_predicate):
assert make_predicate["p_quantity=3"].contradicts(
make_predicate["p_quantity>5"]
)
def test_contradiction_of_equal_quantity(self, make_predicate):
assert make_predicate["p_quantity>5"].contradicts(
make_predicate["p_quantity=3"]
)
def test_no_contradiction_by_greater_or_equal_quantity(self, make_predicate):
assert not make_predicate["p_quantity>=4"].contradicts(
make_predicate["p_quantity>5"]
)
def test_no_contradiction_of_greater_or_equal_quantity(self, make_predicate):
assert not make_predicate["p_quantity>5"].contradicts(
make_predicate["p_quantity>=4"]
)
def test_error_predicate_contradict_factor(self, make_predicate, watt_factor):
with pytest.raises(TypeError):
make_predicate["p7_true"].contradicts(watt_factor["f7"])
def test_no_contradiction_with_no_truth_value(self, make_predicate):
assert not make_predicate["p2_no_truth"].contradicts(make_predicate["p2"])
assert not make_predicate["p2"].contradicts(make_predicate["p2_no_truth"])
def test_no_contradiction_with_inconsistent_dimensionality(self, make_predicate):
assert not make_predicate["p9"].contradicts(make_predicate["p9_acres"])
assert not make_predicate["p9_acres"].contradicts(make_predicate["p9"])
def test_contradiction_with_quantity(self, make_predicate):
assert make_predicate["p8_less"].contradicts(make_predicate["p8_meters"])
def test_contradictory_date_ranges(self):
later = Comparison(
content="the date $dentist became a licensed dentist was",
sign=">",
expression=date(2010, 1, 1),
)
earlier = Comparison(
content="the date $dentist became a licensed dentist was",
sign="<",
expression=date(1990, 1, 1),
)
assert later.contradicts(earlier)
assert earlier.contradicts(later)
def test_no_contradiction_without_truth_value(self):
later = Comparison(
content="the date $dentist became a licensed dentist was",
sign=">",
expression=date(2010, 1, 1),
truth=None,
)
earlier = Comparison(
content="the date $dentist became a licensed dentist was",
sign="<",
expression=date(1990, 1, 1),
)
assert not later.contradicts(earlier)
assert not earlier.contradicts(later)
def test_no_contradiction_date_and_time_period(self):
later = Comparison(
content="the date $dentist became a licensed dentist was",
sign=">",
expression=date(2010, 1, 1),
)
earlier = Comparison(
content="the date $dentist became a licensed dentist was",
sign="<",
expression="2000 years",
)
assert not later.contradicts(earlier)
assert not earlier.contradicts(later)
def test_no_contradiction_irrelevant_quantities(self):
more_cows = Comparison(
content="the number of cows $person owned was",
sign=">",
expression=10,
)
fewer_horses = Comparison(
content="the number of horses $person owned was",
sign="<",
expression=3,
)
assert not more_cows.contradicts(fewer_horses)
assert not fewer_horses.contradicts(more_cows)
def test_no_contradiction_of_predicate(self):
more_cows = Comparison(
content="the number of cows $person owned was",
sign=">",
expression=10,
)
no_cows = Predicate(content="the number of cows $person owned was", truth=False)
assert not more_cows.contradicts(no_cows)
assert not no_cows.contradicts(more_cows)
class TestQuantities:
def test_does_not_exclude_other_quantity(self):
comparison = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression=Q_("20 miles"),
)
comparison_opposite = Comparison(
content="the distance between $place1 and $place2 was",
sign="<",
expression=Q_("30 miles"),
)
assert not comparison.contradicts(comparison_opposite)
def test_convert_quantity_of_Comparison(self):
comparison = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression=Q_("20 miles"),
)
comparison_km = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression=Q_("30 kilometers"),
)
assert comparison > comparison_km
def test_quantity_comparison_to_predicate(self):
distance = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression="20 miles",
)
predicate = Predicate(content="the distance between $place1 and $place2 was")
assert not distance >= predicate
| [
"nettlesome.quantities.Q_",
"sympy.Interval",
"pytest.raises",
"datetime.date",
"nettlesome.quantities.Comparison",
"nettlesome.entities.Entity",
"nettlesome.predicates.Predicate"
] | [((1519, 1627), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""$organizer1 and $organizer2 planned for $player1 to play $game with $player2."""'}), "(content=\n '$organizer1 and $organizer2 planned for $player1 to play $game with $player2.'\n )\n", (1528, 1627), False, 'from nettlesome.predicates import Predicate\n'), ((1924, 2038), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2."""'}), "(content=\n '$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2.'\n )\n", (1933, 2038), False, 'from nettlesome.predicates import Predicate\n'), ((2260, 2368), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""$organizer1 and $organizer2 planned for $player1 to play $game with $player2."""'}), "(content=\n '$organizer1 and $organizer2 planned for $player1 to play $game with $player2.'\n )\n", (2269, 2368), False, 'from nettlesome.predicates import Predicate\n'), ((2638, 2752), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2."""'}), "(content=\n '$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2.'\n )\n", (2647, 2752), False, 'from nettlesome.predicates import Predicate\n'), ((5007, 5049), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""$people were in $city"""'}), "(content='$people were in $city')\n", (5016, 5049), False, 'from nettlesome.predicates import Predicate\n'), ((5704, 5729), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': 'phrase'}), '(content=phrase)\n', (5713, 5729), False, 'from nettlesome.predicates import Predicate\n'), ((8158, 8266), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""$organizer1 and $organizer2 planned for $player1 to play $game with $player2."""'}), "(content=\n '$organizer1 and $organizer2 planned for $player1 to play $game with $player2.'\n )\n", (8167, 8266), False, 'from nettlesome.predicates import Predicate\n'), ((8295, 8402), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""$promoter1 and $promoter2 planned for $player1 to play $chess with $player2."""'}), "(content=\n '$promoter1 and $promoter2 planned for $player1 to play $chess with $player2.'\n )\n", (8304, 8402), False, 'from nettlesome.predicates import Predicate\n'), ((8513, 8621), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""$organizer1 and $organizer2 planned for $player1 to play $game with $player2."""'}), "(content=\n '$organizer1 and $organizer2 planned for $player1 to play $game with $player2.'\n )\n", (8522, 8621), False, 'from nettlesome.predicates import Predicate\n'), ((8650, 8764), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2."""'}), "(content=\n '$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2.'\n )\n", (8659, 8764), False, 'from nettlesome.predicates import Predicate\n'), ((10031, 10099), 'nettlesome.quantities.Comparison', 'Comparison', ([], {'content': '"""The number of mice was"""', 'sign': '""">"""', 'expression': '(4)'}), "(content='The number of mice was', sign='>', expression=4)\n", (10041, 10099), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((10115, 10184), 'nettlesome.quantities.Comparison', 'Comparison', ([], {'content': '"""The number of mice was"""', 'sign': '""">="""', 'expression': '(5)'}), "(content='The number of mice was', sign='>=', expression=5)\n", (10125, 10184), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((10301, 10387), 'nettlesome.quantities.Comparison', 'Comparison', ([], {'content': '"""${defendant}\'s sentence was"""', 'sign': '"""="""', 'expression': '"""8 years"""'}), '(content="${defendant}\'s sentence was", sign=\'=\', expression=\n \'8 years\')\n', (10311, 10387), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((10420, 10510), 'nettlesome.quantities.Comparison', 'Comparison', ([], {'content': '"""${defendant}\'s sentence was"""', 'sign': '"""<="""', 'expression': '"""10 parsecs"""'}), '(content="${defendant}\'s sentence was", sign=\'<=\', expression=\n \'10 parsecs\')\n', (10430, 10510), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((15374, 15483), 'nettlesome.quantities.Comparison', 'Comparison', ([], {'content': '"""the date $dentist became a licensed dentist was"""', 'sign': '"""<"""', 'expression': '"""2000 years"""'}), "(content='the date $dentist became a licensed dentist was', sign=\n '<', expression='2000 years')\n", (15384, 15483), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((15698, 15785), 'nettlesome.quantities.Comparison', 'Comparison', ([], {'content': '"""the number of cows $person owned was"""', 'sign': '""">"""', 'expression': '(10)'}), "(content='the number of cows $person owned was', sign='>',\n expression=10)\n", (15708, 15785), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((15852, 15940), 'nettlesome.quantities.Comparison', 'Comparison', ([], {'content': '"""the number of horses $person owned was"""', 'sign': '"""<"""', 'expression': '(3)'}), "(content='the number of horses $person owned was', sign='<',\n expression=3)\n", (15862, 15940), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((16165, 16252), 'nettlesome.quantities.Comparison', 'Comparison', ([], {'content': '"""the number of cows $person owned was"""', 'sign': '""">"""', 'expression': '(10)'}), "(content='the number of cows $person owned was', sign='>',\n expression=10)\n", (16175, 16252), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((16314, 16384), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""the number of cows $person owned was"""', 'truth': '(False)'}), "(content='the number of cows $person owned was', truth=False)\n", (16323, 16384), False, 'from nettlesome.predicates import Predicate\n'), ((17496, 17599), 'nettlesome.quantities.Comparison', 'Comparison', ([], {'content': '"""the distance between $place1 and $place2 was"""', 'sign': '""">"""', 'expression': '"""20 miles"""'}), "(content='the distance between $place1 and $place2 was', sign='>',\n expression='20 miles')\n", (17506, 17599), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((17663, 17728), 'nettlesome.predicates.Predicate', 'Predicate', ([], {'content': '"""the distance between $place1 and $place2 was"""'}), "(content='the distance between $place1 and $place2 was')\n", (17672, 17728), False, 'from nettlesome.predicates import Predicate\n'), ((350, 375), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (363, 375), False, 'import pytest\n'), ((798, 830), 'sympy.Interval', 'Interval', (['(20)', 'oo'], {'left_open': '(True)'}), '(20, oo, left_open=True)\n', (806, 830), False, 'from sympy import Interval, oo\n'), ((1273, 1297), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1286, 1297), False, 'import pytest\n'), ((3078, 3092), 'nettlesome.quantities.Q_', 'Q_', (['(35)', '"""foot"""'], {}), "(35, 'foot')\n", (3080, 3092), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((4290, 4319), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (4303, 4319), False, 'import pytest\n'), ((6654, 6678), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6667, 6678), False, 'import pytest\n'), ((7999, 8023), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (8012, 8023), False, 'import pytest\n'), ((13399, 13423), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (13412, 13423), False, 'import pytest\n'), ((734, 748), 'nettlesome.quantities.Q_', 'Q_', (['"""20 miles"""'], {}), "('20 miles')\n", (736, 748), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((1020, 1034), 'nettlesome.quantities.Q_', 'Q_', (['"""20 miles"""'], {}), "('20 miles')\n", (1022, 1034), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((1109, 1141), 'sympy.Interval', 'Interval', (['(0)', '(20)'], {'right_open': '(True)'}), '(0, 20, right_open=True)\n', (1117, 1141), False, 'from sympy import Interval, oo\n'), ((1143, 1175), 'sympy.Interval', 'Interval', (['(20)', 'oo'], {'left_open': '(True)'}), '(20, oo, left_open=True)\n', (1151, 1175), False, 'from sympy import Interval, oo\n'), ((3623, 3639), 'datetime.date', 'date', (['(1978)', '(1)', '(1)'], {}), '(1978, 1, 1)\n', (3627, 3639), False, 'from datetime import date\n'), ((11658, 11674), 'datetime.date', 'date', (['(1978)', '(1)', '(1)'], {}), '(1978, 1, 1)\n', (11662, 11674), False, 'from datetime import date\n'), ((11836, 11853), 'datetime.date', 'date', (['(1980)', '(6)', '(20)'], {}), '(1980, 6, 20)\n', (11840, 11853), False, 'from datetime import date\n'), ((14319, 14335), 'datetime.date', 'date', (['(2010)', '(1)', '(1)'], {}), '(2010, 1, 1)\n', (14323, 14335), False, 'from datetime import date\n'), ((14493, 14509), 'datetime.date', 'date', (['(1990)', '(1)', '(1)'], {}), '(1990, 1, 1)\n', (14497, 14509), False, 'from datetime import date\n'), ((14807, 14823), 'datetime.date', 'date', (['(2010)', '(1)', '(1)'], {}), '(2010, 1, 1)\n', (14811, 14823), False, 'from datetime import date\n'), ((15005, 15021), 'datetime.date', 'date', (['(1990)', '(1)', '(1)'], {}), '(1990, 1, 1)\n', (15009, 15021), False, 'from datetime import date\n'), ((15328, 15344), 'datetime.date', 'date', (['(2010)', '(1)', '(1)'], {}), '(2010, 1, 1)\n', (15332, 15344), False, 'from datetime import date\n'), ((16707, 16721), 'nettlesome.quantities.Q_', 'Q_', (['"""20 miles"""'], {}), "('20 miles')\n", (16709, 16721), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((16888, 16902), 'nettlesome.quantities.Q_', 'Q_', (['"""30 miles"""'], {}), "('30 miles')\n", (16890, 16902), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((17175, 17189), 'nettlesome.quantities.Q_', 'Q_', (['"""20 miles"""'], {}), "('20 miles')\n", (17177, 17189), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((17350, 17369), 'nettlesome.quantities.Q_', 'Q_', (['"""30 kilometers"""'], {}), "('30 kilometers')\n", (17352, 17369), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((510, 531), 'nettlesome.quantities.Q_', 'Q_', (['"""160 centimeters"""'], {}), "('160 centimeters')\n", (512, 531), False, 'from nettlesome.quantities import Comparison, Q_, Quantity\n'), ((1431, 1447), 'datetime.date', 'date', (['(1978)', '(1)', '(1)'], {}), '(1978, 1, 1)\n', (1435, 1447), False, 'from datetime import date\n'), ((5237, 5274), 'nettlesome.entities.Entity', 'Entity', ([], {'name': '"""the book"""', 'plural': '(False)'}), "(name='the book', plural=False)\n", (5243, 5274), False, 'from nettlesome.entities import Entity\n'), ((5371, 5418), 'nettlesome.entities.Entity', 'Entity', ([], {'name': '"""the book\'s listings"""', 'plural': '(True)'}), '(name="the book\'s listings", plural=True)\n', (5377, 5418), False, 'from nettlesome.entities import Entity\n')] |
import numpy as np
def Linear_Fit(array_A, array_B):
"""
Returns slope and y-intercept of the line of best fit
"""
array_A = np.array(array_A)
array_B = np.array(array_B)
#Pair arrays then sort them for easier fit
zipped_list = zip(array_A[~np.isnan(array_A)], array_B[~np.isnan(array_B)])
sorted_list = sorted(zipped_list)
sorted_a, sorted_b = zip(*sorted_list)
m, b = np.polyfit(sorted_a, sorted_b, 1)
return m, b
| [
"numpy.array",
"numpy.isnan",
"numpy.polyfit"
] | [((142, 159), 'numpy.array', 'np.array', (['array_A'], {}), '(array_A)\n', (150, 159), True, 'import numpy as np\n'), ((174, 191), 'numpy.array', 'np.array', (['array_B'], {}), '(array_B)\n', (182, 191), True, 'import numpy as np\n'), ((421, 454), 'numpy.polyfit', 'np.polyfit', (['sorted_a', 'sorted_b', '(1)'], {}), '(sorted_a, sorted_b, 1)\n', (431, 454), True, 'import numpy as np\n'), ((275, 292), 'numpy.isnan', 'np.isnan', (['array_A'], {}), '(array_A)\n', (283, 292), True, 'import numpy as np\n'), ((304, 321), 'numpy.isnan', 'np.isnan', (['array_B'], {}), '(array_B)\n', (312, 321), True, 'import numpy as np\n')] |
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from app.api import gas_price_prediction, airbnb_predict
app = FastAPI(
title='RESFEBER CARTER DS API',
description=""" Awesome Data Science Team.
\n**INSTRUCTIONS**
\n- To use the API, click on a *post* method below.
\n- Click on "Try it out" on the right side
\n- Use the default values or enter your own values
\n- Click on "Execute"
\n- You will get a prediction below
\n**Note:** If you enter a value that the model is not expecting, you will get an error message along with what the error is
\n ***MOST IMPORTANTLY -- HAVE FUN***""",
version='0.1',
docs_url='/',
)
app.include_router(gas_price_prediction.router)
app.include_router(airbnb_predict.router)
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
if __name__ == '__main__':
uvicorn.run(app)
| [
"fastapi.FastAPI",
"uvicorn.run"
] | [((159, 764), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""RESFEBER CARTER DS API"""', 'description': '""" Awesome Data Science Team.\n \n**INSTRUCTIONS** \n \n- To use the API, click on a *post* method below. \n \n- Click on "Try it out" on the right side\n \n- Use the default values or enter your own values\n \n- Click on "Execute" \n \n- You will get a prediction below\n \n**Note:** If you enter a value that the model is not expecting, you will get an error message along with what the error is\n \n \n ***MOST IMPORTANTLY -- HAVE FUN***"""', 'version': '"""0.1"""', 'docs_url': '"""/"""'}), '(title=\'RESFEBER CARTER DS API\', description=\n """ Awesome Data Science Team.\n \n**INSTRUCTIONS** \n \n- To use the API, click on a *post* method below. \n \n- Click on "Try it out" on the right side\n \n- Use the default values or enter your own values\n \n- Click on "Execute" \n \n- You will get a prediction below\n \n**Note:** If you enter a value that the model is not expecting, you will get an error message along with what the error is\n \n \n ***MOST IMPORTANTLY -- HAVE FUN***"""\n , version=\'0.1\', docs_url=\'/\')\n', (166, 764), False, 'from fastapi import FastAPI\n'), ((1051, 1067), 'uvicorn.run', 'uvicorn.run', (['app'], {}), '(app)\n', (1062, 1067), False, 'import uvicorn\n')] |
# OpenCV: Image processing
import cv2
import time
import popupWindow as detectionWindow
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
# numpy: numerical computation
import numpy as np
import core.utils as utils
# Tensorflow: deep learning
import tensorflow as tf
# Allow for GPU memory growth to prevent "Out of memory" errors
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
import sma as sma
# YOLOV3 itself
from core.yolov3 import YOLOv3, decode
def startDetection(window, minConfidence, videoPath):
popUpFlag = False
video_path = videoPath
# Number of classes, one class for each element
num_classes = 80
input_size = 704
min_confidence = minConfidence / 100
# Layer to be used as an entry point into a Network (a graph of layers).
# Tuple with height, width and depth used to reshape arrays.
# This is used for reshaping in Keras.
input_layer = tf.keras.layers.Input([input_size, input_size, 3])
# (TO DO: see how it does it)
feature_maps = YOLOv3(input_layer)
bbox_tensors = []
for i, fm in enumerate(feature_maps):
bbox_tensor = decode(fm, i)
bbox_tensors.append(bbox_tensor)
# Model groups layers into an object with training and inference features.
# input: input_layer
# output: bbox_tensors
model = tf.keras.Model(input_layer, bbox_tensors)
# load weights from file
utils.load_weights(model, "./data/weights/handgun.weights")
# Prints a string summary of the network.
# model.summary()
# Load video from file with openCV
vid = cv2.VideoCapture(video_path)
runFlag = True
while runFlag:
# Get a frame from the video
# Returns a bool (True/False).
# If frame is read correctly, it will be True.
# So you can check end of the video by checking this return value.
return_value, frame = vid.read()
if not return_value:
raise ValueError("No image!")
# thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
# print(thistuple[:2]) => ('apple', 'banana')
# shape holds heigth, width and number of channels
# Gets width and height of the frame
frame_size = frame.shape[:2]
# np.copy(frame) => Return an array copy of the given object.
# Resizes frame to network input size => def image_preporcess(image, target_size, gt_boxes=None):
image_data = utils.image_preporcess(np.copy(frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
# Performs the prediction on the frame (TO DO: see how it does it)
pred_bbox = model.predict_on_batch(image_data)
# Changes tensor shape, similar to transposing a matrix
# href: https://www.tensorflow.org/api_docs/python/tf/reshape
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
# Concatenates tensors along one dimension axis = 0 => axis = y
pred_bbox = tf.concat(pred_bbox, axis=0)
# (TO DO: see how it does it)
bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size, min_confidence)
# (TO DO: see how it does it)
bboxes = utils.nms(bboxes, 0.45, method='nms')
# Draws boundingbox in image
# (TO DO: see how it does it)
image = utils.draw_bbox(frame, bboxes)
window.imageDisplay.setPixmap(QtGui.QPixmap(utils.convert_cv_qt(image.image)))
# HERE check if detected class is handgun
if(image.classDetected == 'handgun' and image.calculatedSma >= 0.95):
if (popUpFlag == False):
popUpFlag = True
popUpFlag = callPopUpWindow(window, image.image)
if popUpFlag == "Alarm":
popUpFlag = True
window.title.setText("Alarm triggered - Detection saved to PC")
window.title.setStyleSheet("color : red")
# window.title.setPointSize(25)
window.setStyleSheet("""QMainWindow{border: 6px solid red;}""")
# Breaks while loop on 'q' press
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
def callPopUpWindow(self, detection):
dialog = detectionWindow.DetectionWindow(self)
dialog.setImage(detection)
dialog.show()
if dialog.exec_() == QtWidgets.QDialog.Accepted:
return dialog.returnValue
| [
"core.utils.load_weights",
"tensorflow.keras.layers.Input",
"core.yolov3.YOLOv3",
"core.utils.draw_bbox",
"numpy.copy",
"popupWindow.DetectionWindow",
"tensorflow.shape",
"tensorflow.config.experimental.set_memory_growth",
"core.yolov3.decode",
"tensorflow.concat",
"core.utils.postprocess_boxes"... | [((494, 545), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (538, 545), True, 'import tensorflow as tf\n'), ((1219, 1269), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['[input_size, input_size, 3]'], {}), '([input_size, input_size, 3])\n', (1240, 1269), True, 'import tensorflow as tf\n'), ((1324, 1343), 'core.yolov3.YOLOv3', 'YOLOv3', (['input_layer'], {}), '(input_layer)\n', (1330, 1343), False, 'from core.yolov3 import YOLOv3, decode\n'), ((1631, 1672), 'tensorflow.keras.Model', 'tf.keras.Model', (['input_layer', 'bbox_tensors'], {}), '(input_layer, bbox_tensors)\n', (1645, 1672), True, 'import tensorflow as tf\n'), ((1707, 1766), 'core.utils.load_weights', 'utils.load_weights', (['model', '"""./data/weights/handgun.weights"""'], {}), "(model, './data/weights/handgun.weights')\n", (1725, 1766), True, 'import core.utils as utils\n'), ((1887, 1915), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (1903, 1915), False, 'import cv2\n'), ((4629, 4666), 'popupWindow.DetectionWindow', 'detectionWindow.DetectionWindow', (['self'], {}), '(self)\n', (4660, 4666), True, 'import popupWindow as detectionWindow\n'), ((1432, 1445), 'core.yolov3.decode', 'decode', (['fm', 'i'], {}), '(fm, i)\n', (1438, 1445), False, 'from core.yolov3 import YOLOv3, decode\n'), ((3326, 3354), 'tensorflow.concat', 'tf.concat', (['pred_bbox'], {'axis': '(0)'}), '(pred_bbox, axis=0)\n', (3335, 3354), True, 'import tensorflow as tf\n'), ((3411, 3485), 'core.utils.postprocess_boxes', 'utils.postprocess_boxes', (['pred_bbox', 'frame_size', 'input_size', 'min_confidence'], {}), '(pred_bbox, frame_size, input_size, min_confidence)\n', (3434, 3485), True, 'import core.utils as utils\n'), ((3542, 3579), 'core.utils.nms', 'utils.nms', (['bboxes', '(0.45)'], {'method': '"""nms"""'}), "(bboxes, 0.45, method='nms')\n", (3551, 3579), True, 'import core.utils as utils\n'), ((3672, 3702), 'core.utils.draw_bbox', 'utils.draw_bbox', (['frame', 'bboxes'], {}), '(frame, bboxes)\n', (3687, 3702), True, 'import core.utils as utils\n'), ((589, 640), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (629, 640), True, 'import tensorflow as tf\n'), ((2779, 2793), 'numpy.copy', 'np.copy', (['frame'], {}), '(frame)\n', (2786, 2793), True, 'import numpy as np\n'), ((4530, 4553), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4551, 4553), False, 'import cv2\n'), ((3760, 3792), 'core.utils.convert_cv_qt', 'utils.convert_cv_qt', (['image.image'], {}), '(image.image)\n', (3779, 3792), True, 'import core.utils as utils\n'), ((4482, 4496), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4493, 4496), False, 'import cv2\n'), ((3196, 3207), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3204, 3207), True, 'import tensorflow as tf\n')] |
"""
Cadquery Extensions
name: extensions.py
by: Gumyr
date: August 2nd 2021
desc:
This python module provides extensions to the native cadquery code base.
Hopefully future generations of cadquery will incorporate this or similar
functionality.
license:
Copyright 2021 Gumyr
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from math import sin, cos, radians
from typing import Union, Tuple
import cadquery as cq
VectorLike = Union[Tuple[float, float], Tuple[float, float, float], cq.Vector]
"""
Assembly extensions: rotate(), translate()
"""
def _translate(self, vec: VectorLike):
"""
Moves the current assembly (without making a copy) by the specified translation vector
:param vec: The translation vector
"""
self.loc = self.loc * cq.Location(cq.Vector(vec))
return self
cq.Assembly.translate = _translate
def _rotate(self, axis: VectorLike, angle: float):
"""
Rotates the current assembly (without making a copy) around the axis of rotation
by the specified angle
:param axis: The axis of rotation (starting at the origin)
:type axis: a 3-tuple of floats
:param angle: the rotation angle, in degrees
:type angle: float
"""
self.loc = self.loc * cq.Location(cq.Vector(0, 0, 0), cq.Vector(axis), angle)
return self
cq.Assembly.rotate = _rotate
"""
Vector extensions: rotateX(), rotateY(), rotateZ(), pointToVector()
"""
def _vector_rotate_x(self, angle: float) -> cq.Vector:
""" cq.Vector rotate angle in degrees about x-axis """
return cq.Vector(
self.x,
self.y * cos(radians(angle)) - self.z * sin(radians(angle)),
self.y * sin(radians(angle)) + self.z * cos(radians(angle)),
)
cq.Vector.rotateX = _vector_rotate_x
def _vector_rotate_y(self, angle: float) -> cq.Vector:
""" cq.Vector rotate angle in degrees about y-axis """
return cq.Vector(
self.x * cos(radians(angle)) + self.z * sin(radians(angle)),
self.y,
-self.x * sin(radians(angle)) + self.z * cos(radians(angle)),
)
cq.Vector.rotateY = _vector_rotate_y
def _vector_rotate_z(self, angle: float) -> cq.Vector:
""" cq.Vector rotate angle in degrees about z-axis """
return cq.Vector(
self.x * cos(radians(angle)) - self.y * sin(radians(angle)),
self.x * sin(radians(angle)) + self.y * cos(radians(angle)),
self.z,
)
cq.Vector.rotateZ = _vector_rotate_z
def _point_to_vector(self, plane: str, offset: float = 0.0) -> cq.Vector:
""" map a 2D point on the XY plane to 3D space on the given plane at the offset """
if not isinstance(plane, str) or plane not in ["XY", "XZ", "YZ"]:
raise ValueError("plane " + str(plane) + " must be one of: XY,XZ,YZ")
if plane == "XY":
mapped_point = cq.Vector(self.x, self.y, offset)
elif plane == "XZ":
mapped_point = cq.Vector(self.x, offset, self.y)
else: # YZ
mapped_point = cq.Vector(offset, self.x, self.y)
return mapped_point
cq.Vector.pointToVector = _point_to_vector
"""
Vertex extensions: __add__(), __sub__(), __str__()
"""
def __vertex_add__(
self, other: Union[cq.Vertex, cq.Vector, Tuple[float, float, float]]
) -> cq.Vertex:
""" Add a Vector or tuple of floats to a Vertex """
if isinstance(other, cq.Vertex):
new_vertex = cq.Vertex.makeVertex(
self.X + other.X, self.Y + other.Y, self.Z + other.Z
)
elif isinstance(other, (cq.Vector, tuple)):
new_other = cq.Vector(other)
new_vertex = cq.Vertex.makeVertex(
self.X + new_other.x, self.Y + new_other.y, self.Z + new_other.z
)
else:
raise TypeError(
"Vertex addition only supports Vertex,Vector or tuple(float,float,float) as input"
)
return new_vertex
cq.Vertex.__add__ = __vertex_add__
def __vertex_sub__(self, other: Union[cq.Vertex, cq.Vector, tuple]) -> cq.Vertex:
""" Subtract a Vector or tuple of floats to a Vertex """
if isinstance(other, cq.Vertex):
new_vertex = cq.Vertex.makeVertex(
self.X - other.X, self.Y - other.Y, self.Z - other.Z
)
elif isinstance(other, (cq.Vector, tuple)):
new_other = cq.Vector(other)
new_vertex = cq.Vertex.makeVertex(
self.X - new_other.x, self.Y - new_other.y, self.Z - new_other.z
)
else:
raise TypeError(
"Vertex subtraction only supports Vertex,Vector or tuple(float,float,float) as input"
)
return new_vertex
cq.Vertex.__sub__ = __vertex_sub__
def __vertex_str__(self) -> str:
""" Display a Vertex """
return f"Vertex: ({self.X}, {self.Y}, {self.Z})"
cq.Vertex.__str__ = __vertex_str__
def _vertex_to_vector(self) -> cq.Vector:
""" Convert a Vertex to a Vector """
return cq.Vector(self.toTuple())
cq.Vertex.toVector = _vertex_to_vector
| [
"cadquery.Vector",
"cadquery.Vertex.makeVertex",
"math.radians"
] | [((3310, 3343), 'cadquery.Vector', 'cq.Vector', (['self.x', 'self.y', 'offset'], {}), '(self.x, self.y, offset)\n', (3319, 3343), True, 'import cadquery as cq\n'), ((3854, 3928), 'cadquery.Vertex.makeVertex', 'cq.Vertex.makeVertex', (['(self.X + other.X)', '(self.Y + other.Y)', '(self.Z + other.Z)'], {}), '(self.X + other.X, self.Y + other.Y, self.Z + other.Z)\n', (3874, 3928), True, 'import cadquery as cq\n'), ((4568, 4642), 'cadquery.Vertex.makeVertex', 'cq.Vertex.makeVertex', (['(self.X - other.X)', '(self.Y - other.Y)', '(self.Z - other.Z)'], {}), '(self.X - other.X, self.Y - other.Y, self.Z - other.Z)\n', (4588, 4642), True, 'import cadquery as cq\n'), ((1311, 1325), 'cadquery.Vector', 'cq.Vector', (['vec'], {}), '(vec)\n', (1320, 1325), True, 'import cadquery as cq\n'), ((1771, 1789), 'cadquery.Vector', 'cq.Vector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1780, 1789), True, 'import cadquery as cq\n'), ((1791, 1806), 'cadquery.Vector', 'cq.Vector', (['axis'], {}), '(axis)\n', (1800, 1806), True, 'import cadquery as cq\n'), ((3391, 3424), 'cadquery.Vector', 'cq.Vector', (['self.x', 'offset', 'self.y'], {}), '(self.x, offset, self.y)\n', (3400, 3424), True, 'import cadquery as cq\n'), ((3464, 3497), 'cadquery.Vector', 'cq.Vector', (['offset', 'self.x', 'self.y'], {}), '(offset, self.x, self.y)\n', (3473, 3497), True, 'import cadquery as cq\n'), ((4019, 4035), 'cadquery.Vector', 'cq.Vector', (['other'], {}), '(other)\n', (4028, 4035), True, 'import cadquery as cq\n'), ((4057, 4147), 'cadquery.Vertex.makeVertex', 'cq.Vertex.makeVertex', (['(self.X + new_other.x)', '(self.Y + new_other.y)', '(self.Z + new_other.z)'], {}), '(self.X + new_other.x, self.Y + new_other.y, self.Z +\n new_other.z)\n', (4077, 4147), True, 'import cadquery as cq\n'), ((4733, 4749), 'cadquery.Vector', 'cq.Vector', (['other'], {}), '(other)\n', (4742, 4749), True, 'import cadquery as cq\n'), ((4771, 4861), 'cadquery.Vertex.makeVertex', 'cq.Vertex.makeVertex', (['(self.X - new_other.x)', '(self.Y - new_other.y)', '(self.Z - new_other.z)'], {}), '(self.X - new_other.x, self.Y - new_other.y, self.Z -\n new_other.z)\n', (4791, 4861), True, 'import cadquery as cq\n'), ((2116, 2130), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2123, 2130), False, 'from math import sin, cos, radians\n'), ((2147, 2161), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2154, 2161), False, 'from math import sin, cos, radians\n'), ((2185, 2199), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2192, 2199), False, 'from math import sin, cos, radians\n'), ((2216, 2230), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2223, 2230), False, 'from math import sin, cos, radians\n'), ((2437, 2451), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2444, 2451), False, 'from math import sin, cos, radians\n'), ((2468, 2482), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2475, 2482), False, 'from math import sin, cos, radians\n'), ((2523, 2537), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2530, 2537), False, 'from math import sin, cos, radians\n'), ((2554, 2568), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2561, 2568), False, 'from math import sin, cos, radians\n'), ((2775, 2789), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2782, 2789), False, 'from math import sin, cos, radians\n'), ((2806, 2820), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2813, 2820), False, 'from math import sin, cos, radians\n'), ((2844, 2858), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2851, 2858), False, 'from math import sin, cos, radians\n'), ((2875, 2889), 'math.radians', 'radians', (['angle'], {}), '(angle)\n', (2882, 2889), False, 'from math import sin, cos, radians\n')] |
# -*- coding: utf-8 -*-
from pyramid.events import ContextFound
from pkg_resources import iter_entry_points
from pyramid.interfaces import IRequest
from openprocurement.api.interfaces import IContentConfigurator
from openprocurement.auctions.core.models import IAuction
from openprocurement.auctions.core.design import add_design
from openprocurement.auctions.core.utils import set_logging_context, extract_auction, register_auction_procurementMethodType, isAuction, auction_from_data
from openprocurement.auctions.core.adapters import AuctionConfigurator
def includeme(config):
add_design()
config.add_subscriber(set_logging_context, ContextFound)
config.scan("openprocurement.auctions.core.views")
# auction procurementMethodType plugins support
config.add_route_predicate('auctionsprocurementMethodType', isAuction)
config.registry.auction_procurementMethodTypes = {}
config.add_request_method(extract_auction, 'auction', reify=True)
config.add_request_method(auction_from_data)
config.add_directive('add_auction_procurementMethodType', register_auction_procurementMethodType)
config.registry.registerAdapter(AuctionConfigurator, (IAuction, IRequest),
IContentConfigurator)
plugins = config.registry.app_meta(['plugins'])
for entry_point in iter_entry_points('openprocurement.auctions.core.plugins'):
if not plugins or entry_point.name in plugins:
plugin = entry_point.load()
plugin(config)
| [
"pkg_resources.iter_entry_points",
"openprocurement.auctions.core.design.add_design"
] | [((585, 597), 'openprocurement.auctions.core.design.add_design', 'add_design', ([], {}), '()\n', (595, 597), False, 'from openprocurement.auctions.core.design import add_design\n'), ((1332, 1390), 'pkg_resources.iter_entry_points', 'iter_entry_points', (['"""openprocurement.auctions.core.plugins"""'], {}), "('openprocurement.auctions.core.plugins')\n", (1349, 1390), False, 'from pkg_resources import iter_entry_points\n')] |
from __future__ import print_function
import os
import keras
from keras.layers import Dense,Flatten,Conv2D,MaxPooling2D,Activation,Input,Concatenate,Dropout,GlobalAveragePooling2D
from keras.models import Model
import time
from keras.datasets import cifar10
from keras.optimizers import SGD
from keras.utils import get_file
from keras.preprocessing import image
import numpy as np
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
#SqueezeNet
#backend=tf channels_last (rows,cols,channels)
def fire_module(input,squeeze_filters,expand_filters):
squeeze=Conv2D(squeeze_filters,
kernel_size=(1,1),
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
data_format='channels_last'
)(input)
relu_squeeze=Activation('relu')(squeeze)
expand1=Conv2D(expand_filters,
kernel_size=(1,1),
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
data_format='channels_last'
)(relu_squeeze)
relu_expand1=Activation('relu')(expand1)
expand2=Conv2D(expand_filters,
kernel_size=(3,3),
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
data_format='channels_last'
)(relu_squeeze)
relu_expand2=Activation('relu')(expand2)
merge=Concatenate(axis=3)([relu_expand1,relu_expand2])
output=merge
return output
def SqueezeNet(input_shape,num_classes,weight=None):
input=Input(shape=input_shape)
conv_1=Conv2D(96,
kernel_size=(7,7),
strides=2,
padding='same',
kernel_initializer='glorot_uniform'
)(input)
pool_1=MaxPooling2D(pool_size=(3,3),
strides=2)(conv_1)
fire_2=fire_module(pool_1,16,64)
fire_3=fire_module(fire_2,16,64)
fire_4=fire_module(fire_3,32,128)
pool_4=MaxPooling2D(pool_size=(3,3),
strides=2)(fire_4)
fire_5=fire_module(pool_4,32,128)
fire_6=fire_module(fire_5,48,192)
fire_7=fire_module(fire_6,48,192)
fire_8=fire_module(fire_7,64,256)
pool_8=MaxPooling2D(pool_size=(3,3),
strides=2)(fire_8)
fire_9=fire_module(pool_8,64,256)
drop=Dropout(0.5)(fire_9)
conv_10=Conv2D(num_classes,
kernel_size=(1,1),
strides=1,
padding='same',
kernel_initializer='glorot_uniform'
)(drop)
relu_11=Activation('relu')(conv_10)
avgpool=GlobalAveragePooling2D()(relu_11)
flatten=Flatten()(relu_11)
dense=Dense(64)(flatten)
relu_dense=Activation('relu')(dense)
dense=Dense(2)(relu_dense)
softmax1=Activation('softmax')(dense)
softmax=Activation('softmax')(avgpool)
print(softmax)
output=softmax
model=Model(input=input,output=output)
return model
def main():
t0=time.time()
batch_size = 32
num_classes = 10
epochs = 20
data_augmentation = True
print('start')
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print(x_train.shape)
x_train_n = np.zeros((x_train.shape[0], 224, 224, 3),dtype = 'float16')
x_test_n = np.zeros((x_test.shape[0], 224, 224, 3),dtype = 'float16')
for i in range(x_train.shape[0]):
if i%5000==0:
print(i)
data=x_train[i]
img=image.array_to_img(data)
img2=img.resize((224,224))
data2=image.img_to_array(img2)
x_train_n[i,:]=data2
for i in range(x_test.shape[0]):
if i%2000==0:
print(i)
data=x_test[i]
img=image.array_to_img(data)
img2=img.resize((224,224))
data2=image.img_to_array(img2)
x_test_n[i,:]=data2
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train_n /= 255.0
x_test_n /= 255.0
model=SqueezeNet((224,224,3),10)
model.summary()
print('wow')
print(time.time()-t0)
sgd=SGD(lr=0.01,decay=0.0002,momentum=0.9)
model.compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train_n, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test_n, y_test),
shuffle=True)
if __name__=='__main__':
main() | [
"keras.preprocessing.image.img_to_array",
"keras.layers.Conv2D",
"keras.layers.Flatten",
"keras.datasets.cifar10.load_data",
"keras.layers.MaxPooling2D",
"keras.layers.Concatenate",
"keras.utils.to_categorical",
"numpy.zeros",
"keras.layers.Input",
"keras.optimizers.SGD",
"keras.models.Model",
... | [((1747, 1771), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1752, 1771), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3171, 3204), 'keras.models.Model', 'Model', ([], {'input': 'input', 'output': 'output'}), '(input=input, output=output)\n', (3176, 3204), False, 'from keras.models import Model\n'), ((3262, 3273), 'time.time', 'time.time', ([], {}), '()\n', (3271, 3273), False, 'import time\n'), ((3434, 3453), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (3451, 3453), False, 'from keras.datasets import cifar10\n'), ((3499, 3557), 'numpy.zeros', 'np.zeros', (['(x_train.shape[0], 224, 224, 3)'], {'dtype': '"""float16"""'}), "((x_train.shape[0], 224, 224, 3), dtype='float16')\n", (3507, 3557), True, 'import numpy as np\n'), ((3575, 3632), 'numpy.zeros', 'np.zeros', (['(x_test.shape[0], 224, 224, 3)'], {'dtype': '"""float16"""'}), "((x_test.shape[0], 224, 224, 3), dtype='float16')\n", (3583, 3632), True, 'import numpy as np\n'), ((4156, 4204), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (4182, 4204), False, 'import keras\n'), ((4219, 4266), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (4245, 4266), False, 'import keras\n'), ((4431, 4471), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(0.0002)', 'momentum': '(0.9)'}), '(lr=0.01, decay=0.0002, momentum=0.9)\n', (4434, 4471), False, 'from keras.optimizers import SGD\n'), ((615, 755), 'keras.layers.Conv2D', 'Conv2D', (['squeeze_filters'], {'kernel_size': '(1, 1)', 'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""', 'data_format': '"""channels_last"""'}), "(squeeze_filters, kernel_size=(1, 1), strides=1, padding='same',\n kernel_initializer='glorot_uniform', data_format='channels_last')\n", (621, 755), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((897, 915), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (907, 915), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((938, 1077), 'keras.layers.Conv2D', 'Conv2D', (['expand_filters'], {'kernel_size': '(1, 1)', 'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""', 'data_format': '"""channels_last"""'}), "(expand_filters, kernel_size=(1, 1), strides=1, padding='same',\n kernel_initializer='glorot_uniform', data_format='channels_last')\n", (944, 1077), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1226, 1244), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1236, 1244), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1267, 1406), 'keras.layers.Conv2D', 'Conv2D', (['expand_filters'], {'kernel_size': '(3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""', 'data_format': '"""channels_last"""'}), "(expand_filters, kernel_size=(3, 3), strides=1, padding='same',\n kernel_initializer='glorot_uniform', data_format='channels_last')\n", (1273, 1406), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1555, 1573), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1565, 1573), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1594, 1613), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (1605, 1613), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1784, 1882), 'keras.layers.Conv2D', 'Conv2D', (['(96)'], {'kernel_size': '(7, 7)', 'strides': '(2)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""'}), "(96, kernel_size=(7, 7), strides=2, padding='same',\n kernel_initializer='glorot_uniform')\n", (1790, 1882), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1993, 2034), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2)'}), '(pool_size=(3, 3), strides=2)\n', (2005, 2034), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2194, 2235), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2)'}), '(pool_size=(3, 3), strides=2)\n', (2206, 2235), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2436, 2477), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2)'}), '(pool_size=(3, 3), strides=2)\n', (2448, 2477), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2559, 2571), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2566, 2571), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2593, 2700), 'keras.layers.Conv2D', 'Conv2D', (['num_classes'], {'kernel_size': '(1, 1)', 'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""'}), "(num_classes, kernel_size=(1, 1), strides=1, padding='same',\n kernel_initializer='glorot_uniform')\n", (2599, 2700), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2816, 2834), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2826, 2834), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2857, 2881), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (2879, 2881), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2908, 2917), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2915, 2917), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2938, 2947), 'keras.layers.Dense', 'Dense', (['(64)'], {}), '(64)\n', (2943, 2947), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2973, 2991), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2983, 2991), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3010, 3018), 'keras.layers.Dense', 'Dense', (['(2)'], {}), '(2)\n', (3015, 3018), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3045, 3066), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3055, 3066), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3089, 3110), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3099, 3110), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3758, 3782), 'keras.preprocessing.image.array_to_img', 'image.array_to_img', (['data'], {}), '(data)\n', (3776, 3782), False, 'from keras.preprocessing import image\n'), ((3834, 3858), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img2'], {}), '(img2)\n', (3852, 3858), False, 'from keras.preprocessing import image\n'), ((4009, 4033), 'keras.preprocessing.image.array_to_img', 'image.array_to_img', (['data'], {}), '(data)\n', (4027, 4033), False, 'from keras.preprocessing import image\n'), ((4085, 4109), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img2'], {}), '(img2)\n', (4103, 4109), False, 'from keras.preprocessing import image\n'), ((4406, 4417), 'time.time', 'time.time', ([], {}), '()\n', (4415, 4417), False, 'import time\n')] |
# Generated by Django 3.2.9 on 2021-12-02 09:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'ordering': ('name',), 'verbose_name_plural': 'categories'},
),
]
| [
"django.db.migrations.AlterModelOptions"
] | [((213, 333), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""category"""', 'options': "{'ordering': ('name',), 'verbose_name_plural': 'categories'}"}), "(name='category', options={'ordering': ('name',\n ), 'verbose_name_plural': 'categories'})\n", (241, 333), False, 'from django.db import migrations\n')] |
'''
Created on 24.01.2018
@author: gregor
'''
import pandas as pd
from ipet.Key import ProblemStatusCodes, SolverStatusCodes, ObjectiveSenseCode
from ipet import Key
from ipet.misc import getInfinity as infty
from ipet.misc import isInfinite as isInf
import numpy as np
import logging
import sqlite3
logger = logging.getLogger(__name__)
DEFAULT_RELTOL = 1e-4
DEFAULT_FEASTOL = 1e-6
class SolufileMarkers:
OPT = "=opt="
INF = "=inf="
BEST = "=best="
UNKN = "=unkn="
BESTDUAL = "=bestdual="
FEAS = "=feas="
class DataBaseMarkers:
OPT = "opt"
INF = "inf"
BEST = "best"
class Validation:
'''
Validation of experiments by using external solution information
'''
__primalidx__ = 0
__dualidx__ = 1
__feas__ = 1e99
__infeas__ = 1e100
def __init__(self, solufilename : str = None, tol : float = DEFAULT_RELTOL, feastol : float = DEFAULT_FEASTOL):
'''
Validation constructor
Parameters
----------
solufilename : str
string with absolute or relative path to a solu file with reference information
tol : float
relative objective tolerance
feastol : float
relative feasibility tolerance
'''
if solufilename:
if solufilename.endswith(".solu"):
self.referencedict = self.readSoluFile(solufilename)
self.objsensedict = {}
else:
self.referencedict, self.objsensedict = self.connectToDataBase(solufilename)
logger.debug("Data base connection finished, {} items".format(len(self.referencedict.items())))
else:
self.referencedict, self.objsensedict = {}, {}
self.tol = tol
self.inconsistentset = set()
self.feastol = feastol
def set_tol(self, tol : float):
"""sets this validation's tol attribute
Parameters
----------
tol : float
new value for the tol for this validation
"""
self.tol = tol
def set_feastol(self, feastol : float):
"""sets this validation's feastol attribute
Parameters
----------
feastol : float
new value for the feastol for this validation
"""
self.feastol = feastol
def connectToDataBase(self, databasefilename):
"""connects this validation to a data base
"""
soludict = {}
objsensedict = {}
with sqlite3.connect(databasefilename) as conn:
c = conn.cursor()
c.execute('SELECT DISTINCT name, objsense,primbound,dualbound,status FROM instances')
for name, objsense, primbound, dualbound, status in c:
if name in soludict:
logger.warning("Warning: Duplicate name {} with different data in data base".format(name))
infotuple = [None, None]
if status == DataBaseMarkers.OPT:
infotuple[self.__primalidx__] = infotuple[self.__dualidx__] = primbound
elif status == DataBaseMarkers.BEST:
if primbound is not None:
infotuple[self.__primalidx__] = primbound
if dualbound is not None:
infotuple[self.__dualidx__] = dualbound
elif status == DataBaseMarkers.INF:
infotuple[self.__primalidx__] = self.__infeas__
objsensedict[name] = ObjectiveSenseCode.MAXIMIZE if objsense == "max" else ObjectiveSenseCode.MINIMIZE
soludict[name] = tuple(infotuple)
return soludict, objsensedict
def readSoluFile(self, solufilename : str) -> dict:
"""parse entire solu file into a dictionary with problem names as keys
Parameters:
-----------
solufilename : str
name of .solu file containing optimal or best known bounds for instances
Return
------
dict
dictionary with problem names as keys and best known primal and dual bounds for validation as entries.
"""
soludict = dict()
with open(solufilename, "r") as solufile:
for line in solufile:
if line.strip() == "":
continue
spline = line.split()
marker = spline[0]
problemname = spline[1]
infotuple = list(soludict.get(problemname, (None, None)))
if marker == SolufileMarkers.OPT:
infotuple[self.__primalidx__] = infotuple[self.__dualidx__] = float(spline[2])
elif marker == SolufileMarkers.BEST:
infotuple[self.__primalidx__] = float(spline[2])
elif marker == SolufileMarkers.BESTDUAL:
infotuple[self.__dualidx__] = float(spline[2])
elif marker == SolufileMarkers.FEAS:
infotuple[self.__primalidx__] = self.__feas__
elif marker == SolufileMarkers.INF:
infotuple[self.__primalidx__] = self.__infeas__
soludict[problemname] = tuple(infotuple)
return soludict
def getPbValue(self, pb : float, objsense : int) -> float:
"""returns a floating point value computed from a given primal bound
"""
if pd.isnull(pb):
pb = infty() if objsense == ObjectiveSenseCode.MINIMIZE else -infty()
return pb
def getDbValue(self, db : float, objsense : int) -> float :
"""returns a floating point value computed from a given primal bound
"""
if pd.isnull(db):
db = -infty() if objsense == ObjectiveSenseCode.MINIMIZE else infty()
return db
def isInconsistent(self, problemname : str) -> bool:
"""are there inconsistent results for this problem
Parameters
----------
problemname : str
name of a problem
Returns
-------
bool
True if inconsistent results were detected for this instance, False otherwise
"""
return problemname in self.inconsistentset
def isSolFeasible(self, x : pd.Series):
"""check if the solution is feasible within tolerances
"""
#
# respect solution checker output, if it exists
#
if x.get(Key.SolCheckerRead) is not None:
#
# if this column is not None, the solution checker output exists for at least some of the problems
# such that it is reasonable to assume that it should exist for all parsed problems
#
# recall that we explicitly assume that there has been a solution reported when this function is called
# if the solution checker failed to read in the solution, or the solution checker crashed and did
# not report the result of the check command, the solution was most likely infeasible.
#
if not pd.isnull(x.get(Key.SolCheckerRead)) and x.get(Key.SolCheckerRead):
if not pd.isnull(x.get(Key.SolCheckerFeas)) and x.get(Key.SolCheckerFeas):
return True
else:
return False
else:
return False
# compute the maximum violation of constraints, LP rows, bounds, and integrality
maxviol = max((x.get(key, 0.0) for key in
[Key.ViolationBds, Key.ViolationCons, Key.ViolationInt, Key.ViolationLP]))
return maxviol <= self.feastol
def isSolInfeasible(self, x : pd.Series):
"""check if the solution is infeasible within tolerances
Parameters
----------
x : Series or dict
series or dictionary representing single instance information
"""
#
# respect solution checker output, if it exists
#
if x.get(Key.SolCheckerRead) is not None:
if not pd.isnull(x.get(Key.SolCheckerRead)) and x.get(Key.SolCheckerRead):
if not pd.isnull(x.get(Key.SolCheckerFeas)) and x.get(Key.SolCheckerFeas):
return False
else:
return True
# compute the maximum violation of constraints, LP rows, bounds, and integrality
maxviol = max((x.get(key, 0.0) for key in [Key.ViolationBds, Key.ViolationCons, Key.ViolationInt, Key.ViolationLP]))
# if no violations have been recorded, no solution was found, and the solution is not infeasible.
if pd.isnull(maxviol):
return False
return maxviol > self.feastol
def getReferencePb(self, problemname : str) -> float:
"""get the reference primal bound for this instance
Parameters
----------
problemname : str
base name of a problem to access the reference data
Returns
-------
float or None
either a finite floating point value, or None
"""
reference = self.referencedict.get(problemname, (None, None))
if self.isUnkn(reference) or self.isInf(reference) or self.isFeas(reference):
return None
else:
return reference[self.__primalidx__]
def getReferenceDb(self, problemname : str) -> float:
"""get the reference primal bound for this instance
Parameters
----------
problemname : str
base name of a problem to access the reference data
Returns
-------
float or None
either a finite floating point value, or None
"""
reference = self.referencedict.get(problemname, (None, None))
if self.isUnkn(reference) or self.isInf(reference) or self.isFeas(reference):
return None
else:
return reference[self.__dualidx__]
def getObjSense(self, problemname : str, x : pd.Series):
"""get the objective sense of a problem
"""
if problemname in self.objsensedict:
return self.objsensedict[problemname]
elif not pd.isnull(x.get(Key.ObjectiveSense, None)):
return x.get(Key.ObjectiveSense)
else:
logger.warning("No objective sense for {}, assuming minimization".format(problemname))
return ObjectiveSenseCode.MINIMIZE
def validateSeries(self, x : pd.Series) -> str:
"""
validate the results of a problem
Parameters:
----------
x : Series
Data series that represents problem information parsed by a solver
"""
# print("{x.ProblemName} {x.PrimalBound} {x.DualBound} {x.SolverStatus}".format(x=x))
problemname = x.get(Key.ProblemName)
sstatus = x.get(Key.SolverStatus)
if not problemname:
return ProblemStatusCodes.Unknown
if pd.isnull(sstatus):
return ProblemStatusCodes.FailAbort
else:
#
# check feasibility
#
pb = x.get(Key.PrimalBound)
if self.isSolInfeasible(x) or not (pd.isnull(pb) or isInf(pb) or self.isLE(x.get(Key.ObjectiveLimit, -1e20), pb) or self.isSolFeasible(x)):
return ProblemStatusCodes.FailSolInfeasible
#
# check reference consistency
#
psc = self.isReferenceConsistent(x)
if psc != ProblemStatusCodes.Ok:
return psc
#
# report inconsistency among solvers.
#
elif self.isInconsistent(problemname):
return ProblemStatusCodes.FailInconsistent
return Key.solverToProblemStatusCode(sstatus)
def isInf(self, referencetuple : tuple) -> bool:
"""is this an infeasible reference?
Parameters:
-----------
referencetuple : tuple
tuple containing a primal and dual reference bound
Return:
-------
bool
True if reference bound is infeasible, False otherwise
"""
return referencetuple[self.__primalidx__] == self.__infeas__
def isFeas(self, referencetuple):
"""is this a feasible reference?
Parameters:
-----------
referencetuple : tuple
tuple containing a primal and dual reference bound
Return:
-------
bool
True if reference bound is feasible, False otherwise
"""
return referencetuple[self.__primalidx__] == self.__feas__
def isUnkn(self, referencetuple):
"""is this a reference tuple of an unknown instance?
"""
return referencetuple[self.__primalidx__] is None and referencetuple[self.__dualidx__] is None
def collectInconsistencies(self, df : pd.DataFrame):
"""collect individual results for primal and dual bounds and collect inconsistencies.
Parameters
----------
df : DataFrame
joined data of an experiment with several test runs
"""
# problems with inconsistent primal and dual bounds
self.inconsistentset = set()
self.bestpb = dict()
self.bestdb = dict()
df.apply(self.updateInconsistency, axis = 1)
def isPbReferenceConsistent(self, pb : float, referencedb : float, objsense : int) -> bool:
"""compare primal bound consistency against reference bound
Returns
-------
bool
True if the primal bound value is consistent with the reference dual bound
"""
if objsense == ObjectiveSenseCode.MINIMIZE:
if not self.isLE(referencedb, pb):
return False
else:
if not self.isGE(referencedb, pb):
return False
return True
def isDbReferenceConsistent(self, db : float, referencepb : float, objsense : int) -> bool:
"""compare dual bound consistency against reference bound
Returns
-------
bool
True if the dual bound value is consistent with the reference primal bound
"""
if objsense == ObjectiveSenseCode.MINIMIZE:
if not self.isGE(referencepb, db):
return False
else:
if not self.isLE(referencepb, db):
return False
return True
def isReferenceConsistent(self, x : pd.Series) -> str :
"""Check consistency with solution information
"""
problemname = x.get(Key.ProblemName)
pb = x.get(Key.PrimalBound)
db = x.get(Key.DualBound)
obs = self.getObjSense(problemname, x)
sstatus = x.get(Key.SolverStatus)
reference = self.referencedict.get(problemname, (None, None))
logger.debug("Checking against reference {} for problem {}".format(reference, problemname))
referencepb = self.getPbValue(reference[self.__primalidx__], obs)
referencedb = self.getDbValue(reference[self.__dualidx__], obs)
if self.isUnkn(reference):
return ProblemStatusCodes.Ok
elif self.isInf(reference):
if sstatus != SolverStatusCodes.Infeasible and not pd.isnull(pb) and not isInf(pb):
return ProblemStatusCodes.FailSolOnInfeasibleInstance
elif self.isFeas(reference):
if sstatus == SolverStatusCodes.Infeasible:
return ProblemStatusCodes.FailDualBound
else:
pb = self.getPbValue(pb, obs)
db = self.getDbValue(db, obs)
if not self.isPbReferenceConsistent(pb, referencedb, obs):
return ProblemStatusCodes.FailObjectiveValue
if sstatus == SolverStatusCodes.Infeasible and abs(referencepb) < infty():
return ProblemStatusCodes.FailDualBound
if not self.isDbReferenceConsistent(db, referencepb, obs):
return ProblemStatusCodes.FailDualBound
return ProblemStatusCodes.Ok
def updateInconsistency(self, x : pd.Series):
"""
method that is applied to every row of a data frame to update inconsistency information
"""
problemname = x.get(Key.ProblemName)
pb = x.get(Key.PrimalBound)
db = x.get(Key.DualBound)
obs = self.getObjSense(problemname, x)
if pd.isnull(obs):
obs = ObjectiveSenseCode.MINIMIZE
if not problemname:
return
#
# for inconsistency checks, we only consider problems that are consistent
# with the reference information.
#
if self.isReferenceConsistent(x) != ProblemStatusCodes.Ok:
return
# do not trust versions/settings/solvers that returned an infeasible solution
if self.isSolInfeasible(x) or (not pd.isnull(pb) and not self.isSolFeasible(x)):
return
pb = self.getPbValue(pb, obs)
db = self.getDbValue(db, obs)
bestpb = self.bestpb.get(problemname, np.inf if obs == ObjectiveSenseCode.MINIMIZE else -np.inf)
bestpb = min(bestpb, pb) if obs == ObjectiveSenseCode.MINIMIZE else max(bestpb, pb)
bestdb = self.bestdb.get(problemname, -np.inf if obs == ObjectiveSenseCode.MINIMIZE else np.inf)
if x.get(Key.SolverStatus) == SolverStatusCodes.Infeasible:
db = infty() if obs == ObjectiveSenseCode.MINIMIZE else -infty()
bestdb = max(bestdb, db) if obs == ObjectiveSenseCode.MINIMIZE else min(bestdb, db)
if (obs == ObjectiveSenseCode.MINIMIZE and not self.isLE(bestdb, bestpb)) or (obs == ObjectiveSenseCode.MAXIMIZE and not self.isGE(bestdb, bestpb)):
self.inconsistentset.add(problemname)
else:
self.bestdb[problemname] = bestdb
self.bestpb[problemname] = bestpb
def validate(self, d : pd.DataFrame):
"""validates the solutions against external information and inconsistencies
Validation scans data twice:
1) Collection of inconsistencies (contradicting primal and dual bounds)
2) Comparison against external validation information from solu file
Parameters
d : DataFrame
joined data from an experiment.
"""
logger.info("Validating with a (gap) tolerance of {} and a feasibility tolerance of {}.".format(self.tol, self.feastol))
#
# 1) collect inconsistencies
#
self.collectInconsistencies(d)
#
# 2) validate everything considering inconsistencies and validation info from reference information.
#
return d.apply(self.validateSeries, axis = 1)
def isGE(self, a : float, b : float) -> bool:
"""tolerance comparison of a >= b
"""
return (a >= b - self.tol * max(abs(a), abs(b), 1.0)) #and (a >= b - 0.1)
def isLE(self, a : float, b : float) -> bool:
"""tolerance comparison of a <= b
"""
return self.isGE(b, a)
| [
"logging.getLogger",
"pandas.isnull",
"sqlite3.connect",
"ipet.Key.solverToProblemStatusCode",
"ipet.misc.isInfinite",
"ipet.misc.getInfinity"
] | [((311, 338), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (328, 338), False, 'import logging\n'), ((5397, 5410), 'pandas.isnull', 'pd.isnull', (['pb'], {}), '(pb)\n', (5406, 5410), True, 'import pandas as pd\n'), ((5677, 5690), 'pandas.isnull', 'pd.isnull', (['db'], {}), '(db)\n', (5686, 5690), True, 'import pandas as pd\n'), ((8611, 8629), 'pandas.isnull', 'pd.isnull', (['maxviol'], {}), '(maxviol)\n', (8620, 8629), True, 'import pandas as pd\n'), ((10938, 10956), 'pandas.isnull', 'pd.isnull', (['sstatus'], {}), '(sstatus)\n', (10947, 10956), True, 'import pandas as pd\n'), ((11731, 11769), 'ipet.Key.solverToProblemStatusCode', 'Key.solverToProblemStatusCode', (['sstatus'], {}), '(sstatus)\n', (11760, 11769), False, 'from ipet import Key\n'), ((16386, 16400), 'pandas.isnull', 'pd.isnull', (['obs'], {}), '(obs)\n', (16395, 16400), True, 'import pandas as pd\n'), ((2501, 2534), 'sqlite3.connect', 'sqlite3.connect', (['databasefilename'], {}), '(databasefilename)\n', (2516, 2534), False, 'import sqlite3\n'), ((5429, 5436), 'ipet.misc.getInfinity', 'infty', ([], {}), '()\n', (5434, 5436), True, 'from ipet.misc import getInfinity as infty\n'), ((5766, 5773), 'ipet.misc.getInfinity', 'infty', ([], {}), '()\n', (5771, 5773), True, 'from ipet.misc import getInfinity as infty\n'), ((17388, 17395), 'ipet.misc.getInfinity', 'infty', ([], {}), '()\n', (17393, 17395), True, 'from ipet.misc import getInfinity as infty\n'), ((5486, 5493), 'ipet.misc.getInfinity', 'infty', ([], {}), '()\n', (5491, 5493), True, 'from ipet.misc import getInfinity as infty\n'), ((5710, 5717), 'ipet.misc.getInfinity', 'infty', ([], {}), '()\n', (5715, 5717), True, 'from ipet.misc import getInfinity as infty\n'), ((16858, 16871), 'pandas.isnull', 'pd.isnull', (['pb'], {}), '(pb)\n', (16867, 16871), True, 'import pandas as pd\n'), ((17440, 17447), 'ipet.misc.getInfinity', 'infty', ([], {}), '()\n', (17445, 17447), True, 'from ipet.misc import getInfinity as infty\n'), ((11169, 11182), 'pandas.isnull', 'pd.isnull', (['pb'], {}), '(pb)\n', (11178, 11182), True, 'import pandas as pd\n'), ((11186, 11195), 'ipet.misc.isInfinite', 'isInf', (['pb'], {}), '(pb)\n', (11191, 11195), True, 'from ipet.misc import isInfinite as isInf\n'), ((15247, 15260), 'pandas.isnull', 'pd.isnull', (['pb'], {}), '(pb)\n', (15256, 15260), True, 'import pandas as pd\n'), ((15269, 15278), 'ipet.misc.isInfinite', 'isInf', (['pb'], {}), '(pb)\n', (15274, 15278), True, 'from ipet.misc import isInfinite as isInf\n'), ((15810, 15817), 'ipet.misc.getInfinity', 'infty', ([], {}), '()\n', (15815, 15817), True, 'from ipet.misc import getInfinity as infty\n')] |
from copy import deepcopy
from hashlib import sha256
import os
import unittest
from google.protobuf.timestamp_pb2 import Timestamp
from blindai.pb.securedexchange_pb2 import (
Payload,
)
from blindai.client import (
RunModelResponse,
UploadModelResponse,
)
from blindai.dcap_attestation import Policy
from blindai.utils.errors import SignatureError, AttestationError
from .covidnet import get_input, get_model
exec_run = os.path.join(os.path.dirname(__file__), "exec_run.proof")
exec_upload = os.path.join(os.path.dirname(__file__), "exec_upload.proof")
tmp_path = os.path.join(os.path.dirname(__file__), "tmp_exec.proof")
policy_file = os.path.join(os.path.dirname(__file__), "policy.toml")
class TestProof(unittest.TestCase):
def test_parse_run(self):
response = RunModelResponse()
response.load_from_file(exec_run)
self.assertTrue(response.is_signed())
response2 = RunModelResponse()
with open(exec_run, "rb") as file:
response2.load_from_bytes(file.read())
self.assertEqual(response.payload, response2.payload)
self.assertEqual(response.signature, response2.signature)
self.assertEqual(response.attestation, response2.attestation)
self.assertEqual(response.output, response2.output)
response3 = RunModelResponse()
response3.load_from_bytes(response.as_bytes())
self.assertEqual(response.payload, response3.payload)
self.assertEqual(response.signature, response3.signature)
self.assertEqual(response.attestation, response3.attestation)
self.assertEqual(response.output, response3.output)
response3.save_to_file(tmp_path)
response4 = RunModelResponse()
response4.load_from_file(tmp_path)
self.assertEqual(response.payload, response4.payload)
self.assertEqual(response.signature, response4.signature)
self.assertEqual(response.attestation, response4.attestation)
self.assertEqual(response.output, response4.output)
def test_parse_upload(self):
response = UploadModelResponse()
response.load_from_file(exec_upload)
self.assertTrue(response.is_signed())
response2 = UploadModelResponse()
with open(exec_upload, "rb") as file:
response2.load_from_bytes(file.read())
self.assertEqual(response.payload, response2.payload)
self.assertEqual(response.signature, response2.signature)
self.assertEqual(response.attestation, response2.attestation)
response3 = UploadModelResponse()
response3.load_from_bytes(response.as_bytes())
self.assertEqual(response.payload, response3.payload)
self.assertEqual(response.signature, response3.signature)
self.assertEqual(response.attestation, response3.attestation)
response3.save_to_file(tmp_path)
response4 = UploadModelResponse()
response4.load_from_file(tmp_path)
self.assertEqual(response.payload, response4.payload)
self.assertEqual(response.signature, response4.signature)
self.assertEqual(response.attestation, response4.attestation)
def test_validate_run(self):
response = RunModelResponse()
response.load_from_file(exec_run)
policy = Policy.from_file(policy_file)
response.validate(
get_input(),
policy=policy,
)
# Not signed
response2 = deepcopy(response)
response2.signature = None
response2.attestation = None
with self.assertRaises(SignatureError):
response2.validate(
get_input(),
policy=policy,
)
# Quote validation
response2 = deepcopy(response)
response2.attestation.quote += b"a"
with self.assertRaises(AttestationError):
response2.validate(
get_input(),
policy=policy,
)
response2 = deepcopy(response)
response2.attestation.enclave_held_data += b"a"
with self.assertRaises(AttestationError):
response2.validate(
get_input(),
policy=policy,
)
# Payload validation
response2 = deepcopy(response)
payload = Payload.FromString(response2.payload)
payload.run_model_payload.output[0] += 0.1
response2.payload = payload.SerializeToString()
with self.assertRaises(SignatureError):
response2.validate(
get_input(),
policy=policy,
)
# Input validation
response2 = deepcopy(response)
data = deepcopy(get_input())
data[4] += 1
with self.assertRaises(SignatureError):
response2.validate(
data,
policy=policy,
)
# Using file
response.validate(
get_input(),
policy_file=policy_file,
)
def test_validate_upload(self):
response = UploadModelResponse()
response.load_from_file(exec_upload)
policy = Policy.from_file(policy_file)
model_hash = sha256(get_model()).digest()
response.validate(
model_hash,
policy=policy,
)
# Not signed
response2 = deepcopy(response)
response2.signature = None
response2.attestation = None
with self.assertRaises(SignatureError):
response2.validate(
model_hash,
policy=policy,
)
# Quote validation
response2 = deepcopy(response)
response2.attestation.quote += b"a"
with self.assertRaises(AttestationError):
response2.validate(
model_hash,
policy=policy,
)
response2 = deepcopy(response)
response2.attestation.enclave_held_data += b"a"
with self.assertRaises(AttestationError):
response2.validate(
model_hash,
policy=policy,
)
# Payload validation
response2 = deepcopy(response)
payload = Payload.FromString(response2.payload)
payload.send_model_payload.model_hash = (
b"1" + payload.send_model_payload.model_hash[1:]
)
response2.payload = payload.SerializeToString()
with self.assertRaises(SignatureError):
response2.validate(
model_hash,
policy=policy,
)
# Input validation
response2 = deepcopy(response)
new_hash = model_hash[:5] + b"1" + model_hash[6:]
with self.assertRaises(SignatureError):
response2.validate(
new_hash,
policy=policy,
)
# Using file
response.validate(
model_hash,
policy_file=policy_file,
)
| [
"blindai.client.RunModelResponse",
"blindai.client.UploadModelResponse",
"os.path.dirname",
"blindai.pb.securedexchange_pb2.Payload.FromString",
"copy.deepcopy",
"blindai.dcap_attestation.Policy.from_file"
] | [((450, 475), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (465, 475), False, 'import os\n'), ((522, 547), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (537, 547), False, 'import os\n'), ((594, 619), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (609, 619), False, 'import os\n'), ((666, 691), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (681, 691), False, 'import os\n'), ((795, 813), 'blindai.client.RunModelResponse', 'RunModelResponse', ([], {}), '()\n', (811, 813), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((924, 942), 'blindai.client.RunModelResponse', 'RunModelResponse', ([], {}), '()\n', (940, 942), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((1317, 1335), 'blindai.client.RunModelResponse', 'RunModelResponse', ([], {}), '()\n', (1333, 1335), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((1712, 1730), 'blindai.client.RunModelResponse', 'RunModelResponse', ([], {}), '()\n', (1728, 1730), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((2086, 2107), 'blindai.client.UploadModelResponse', 'UploadModelResponse', ([], {}), '()\n', (2105, 2107), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((2221, 2242), 'blindai.client.UploadModelResponse', 'UploadModelResponse', ([], {}), '()\n', (2240, 2242), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((2560, 2581), 'blindai.client.UploadModelResponse', 'UploadModelResponse', ([], {}), '()\n', (2579, 2581), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((2898, 2919), 'blindai.client.UploadModelResponse', 'UploadModelResponse', ([], {}), '()\n', (2917, 2919), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((3215, 3233), 'blindai.client.RunModelResponse', 'RunModelResponse', ([], {}), '()\n', (3231, 3233), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((3293, 3322), 'blindai.dcap_attestation.Policy.from_file', 'Policy.from_file', (['policy_file'], {}), '(policy_file)\n', (3309, 3322), False, 'from blindai.dcap_attestation import Policy\n'), ((3456, 3474), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (3464, 3474), False, 'from copy import deepcopy\n'), ((3750, 3768), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (3758, 3768), False, 'from copy import deepcopy\n'), ((3990, 4008), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (3998, 4008), False, 'from copy import deepcopy\n'), ((4271, 4289), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (4279, 4289), False, 'from copy import deepcopy\n'), ((4308, 4345), 'blindai.pb.securedexchange_pb2.Payload.FromString', 'Payload.FromString', (['response2.payload'], {}), '(response2.payload)\n', (4326, 4345), False, 'from blindai.pb.securedexchange_pb2 import Payload\n'), ((4656, 4674), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (4664, 4674), False, 'from copy import deepcopy\n'), ((5058, 5079), 'blindai.client.UploadModelResponse', 'UploadModelResponse', ([], {}), '()\n', (5077, 5079), False, 'from blindai.client import RunModelResponse, UploadModelResponse\n'), ((5143, 5172), 'blindai.dcap_attestation.Policy.from_file', 'Policy.from_file', (['policy_file'], {}), '(policy_file)\n', (5159, 5172), False, 'from blindai.dcap_attestation import Policy\n'), ((5355, 5373), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (5363, 5373), False, 'from copy import deepcopy\n'), ((5648, 5666), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (5656, 5666), False, 'from copy import deepcopy\n'), ((5887, 5905), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (5895, 5905), False, 'from copy import deepcopy\n'), ((6168, 6186), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (6176, 6186), False, 'from copy import deepcopy\n'), ((6205, 6242), 'blindai.pb.securedexchange_pb2.Payload.FromString', 'Payload.FromString', (['response2.payload'], {}), '(response2.payload)\n', (6223, 6242), False, 'from blindai.pb.securedexchange_pb2 import Payload\n'), ((6622, 6640), 'copy.deepcopy', 'deepcopy', (['response'], {}), '(response)\n', (6630, 6640), False, 'from copy import deepcopy\n')] |
from setuptools import find_packages, setup
PACKAGE_NAME = "up-bank-api"
VERSION = "0.3.2"
PROJECT_URL = "https://github.com/jcwillox/up-bank-api"
PROJECT_AUTHOR = "<NAME>"
DOWNLOAD_URL = f"{PROJECT_URL}/archive/{VERSION}.zip"
PACKAGES = find_packages()
with open("README.md", "r", encoding="UTF-8") as file:
LONG_DESCRIPTION = file.read()
if __name__ == "__main__":
setup(
name=PACKAGE_NAME,
version=VERSION,
url=PROJECT_URL,
download_url=DOWNLOAD_URL,
author=PROJECT_AUTHOR,
author_email="",
packages=PACKAGES,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
python_requires=">=3.7",
install_requires=["requests>=2.14.0"],
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [
"setuptools.find_packages",
"setuptools.setup"
] | [((239, 254), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (252, 254), False, 'from setuptools import find_packages, setup\n'), ((378, 880), 'setuptools.setup', 'setup', ([], {'name': 'PACKAGE_NAME', 'version': 'VERSION', 'url': 'PROJECT_URL', 'download_url': 'DOWNLOAD_URL', 'author': 'PROJECT_AUTHOR', 'author_email': '""""""', 'packages': 'PACKAGES', 'long_description': 'LONG_DESCRIPTION', 'long_description_content_type': '"""text/markdown"""', 'python_requires': '""">=3.7"""', 'install_requires': "['requests>=2.14.0']", 'classifiers': "['Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']"}), "(name=PACKAGE_NAME, version=VERSION, url=PROJECT_URL, download_url=\n DOWNLOAD_URL, author=PROJECT_AUTHOR, author_email='', packages=PACKAGES,\n long_description=LONG_DESCRIPTION, long_description_content_type=\n 'text/markdown', python_requires='>=3.7', install_requires=[\n 'requests>=2.14.0'], classifiers=[\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'])\n", (383, 880), False, 'from setuptools import find_packages, setup\n')] |
#main file to run the scripts that generate the data based off of the
#guassian .out files
#and Sauron forged in secret a master ring...
from IPython import get_ipython;
get_ipython().magic('reset -sf')
import pandas as pd
import glob
import os
from rdkit import Chem
from Environmental_PAH_Mutagenicity.read_ccris_data import convert_xml_xlsx
# from Environmental_PAH_Mutagenicity.read_ccris_data import CIRconvert
from Environmental_PAH_Mutagenicity.gethlenergy import gethlenergy
from Environmental_PAH_Mutagenicity.get_padel_data import get_padel_data
# from Environmental_PAH_Mutagenicity.gethlenergy import gethlenergy
from Environmental_PAH_Mutagenicity.read_IP_EA_functions import read_neut_energy
from Environmental_PAH_Mutagenicity.read_IP_EA_functions import read_posneg_energy
#a handy script for creating the guassian inputs from smiles codes
#not used in this file
def initalize_guass_input(filename, smiles):
m = Chem.MolFromSmiles(smiles)
m = Chem.AddHs(m)
AllChem.EmbedMolecule(m,randomSeed=0xf00d)
# AllChem.MMFFOptimizeMolecule(m)
coord_3d = Chem.MolToMolBlock(m)
coordArray = coord_3d.splitlines()
actualCoords = []
for c in coordArray:
if len(c) == 69:
chunks = c.split()
actualCoords.extend([chunks[3],' ',chunks[0], ' ',chunks[1], ' ',chunks[2], '\n'])
#I want to overwrite the entire file, so I need to open it back up in write mode.
list_of_lines = list(range(0,8))
list_of_lines[0] = "%Nproc=6\n"
list_of_lines[1] = "%mem=20GB\n"
list_of_lines[2] = "#b3lyp/6-311g(d,p) opt=maxcycle=640 freq nosymm scf=(maxcycle=999) gfinput iop(6/7=3) "
list_of_lines[3] = '\n'
list_of_lines[4] = '\n'
list_of_lines.insert(5, (smiles + "\n"))
list_of_lines[6] = "\n0 1\n"
list_of_lines[7] = "".join(actualCoords)
list_of_lines[8] = '\n \n \n'
a_file = open(filename, "w")
a_file.writelines(list_of_lines)
a_file.close()
return
# testDF = convert_xml_xlsx("test_CCRIS_data.xml")
runningList = []
bigDataFrame = pd.DataFrame()
path = r'C:\ResearchWorkingDirectory\MutagenicityDataMar\completed'
verticals_path = r'C:\ResearchWorkingDirectory\MutagenicityDataMar\posneg'
i = 0
for filename in glob.glob(os.path.join(path,'*.OUT')):
#get the smiles from the file, if we already have this smiles, skip and go to next.
#Use this to get the appropriate verticals
short_filename = filename.split('\\')[-1]
base_filename = short_filename.split('.')[0]
if os.path.exists(os.path.join(verticals_path, (base_filename +'_pos.out') )):
ip_eng = read_posneg_energy(verticals_path, (base_filename +'_pos.out'))
else:
print((base_filename +'_pos.out') + ' not found')
if os.path.exists(os.path.join(verticals_path, (base_filename +'_neg.out') )):
ea_eng = read_posneg_energy(verticals_path, (base_filename +'_neg.out'))
else:
print((base_filename +'_neg.out') + ' not found')
prevLine = []
curSmiles = []
with open(filename)as searchfile:
i = i+1
print(i)
#************************
p = 'not found'
c = 'not found'
t = 'not found'
for line in searchfile:
#***************************************************************
#Get the smiles code that we have in the top of every input card
left,sep,right=line.partition('Symbolic Z-matrix:')
#as long as the smiles code is in the line this code will find it.
#there can be other stuff there also, as long as the smiles code
#is seperated by at least one space.
if sep:
# print(sep)
# print(prevLine)
p = prevLine2. split()[-1]
if set(p).issubset(r' CcOo=()-[]123456789+@#H\//') and ('c' in set(p) or 'C' in set(p) ):
# print(p)
curSmiles = p
# print(prevLine)
else:
print('issue with ', p, 'filename ', filename)
prevLine2 = prevLine
prevLine = line
#check curSmiles
mol = Chem.rdmolfiles.MolFromSmiles(curSmiles)
curSmiles = Chem.rdmolfiles.MolToSmiles(mol)
# if curSmiles in runningList:
# continue
paData = get_padel_data(filename)
#Use this to get the appropriate verticals
short_filename = filename.split('\\')[-1]
base_filename = short_filename.split('.')[0]
ip_eng = 0
ea_eng = 0
if os.path.exists(os.path.join(verticals_path, (base_filename +'_pos.out') )):
ip_eng = read_posneg_energy(verticals_path, (base_filename +'_pos.out'))
else:
print((base_filename +'_pos.out') + ' not found')
if os.path.exists(os.path.join(verticals_path, (base_filename +'_neg.out') )):
ea_eng = read_posneg_energy(verticals_path, (base_filename +'_neg.out'))
else:
print((base_filename +'_neg.out') + ' not found')
HLdata = gethlenergy(path, short_filename)
paData['HOMO'] = HLdata[0]
paData['LUMO'] = HLdata[1]
#add the IP and EA data right here
neutral_eng = read_neut_energy(path, filename)
paData['Neutral'] = neutral_eng
paData['IP'] = ip_eng
paData['EA'] = ea_eng
bigDataFrame = bigDataFrame.append(paData)
runningList.append(curSmiles)
#merge this into the existnig data frame based on the smiles code
# short_filename = filename.split(r'\\')[-1]
# idx = final_Data.index[final_Data['SMILES']==curSmiles].tolist()
# mw = Chem.Descriptors.ExactMolWt(mol)
# if len(idx)>0:
# final_Data.at[idx[0], 'HOMO'] = HLdata[0]
# final_Data.at[idx[0], 'LUMO'] = HLdata[1]
# final_Data.at[idx[0], 'filename'] = short_filename
# else:
# pass
# print(curSmiles)
# print(filename.split(r'\\')[-1])
# # print('molecular weight is ', mw)
bigDataFrame['HLgap'] = (bigDataFrame['LUMO']-bigDataFrame['HOMO'])*27.2114
bigDataFrame['IP_eV'] = (bigDataFrame['Neutral'] - bigDataFrame['IP'])*27.2114
bigDataFrame['EA_eV'] = (bigDataFrame['Neutral'] - bigDataFrame['EA'])*27.2114
endcols = list(bigDataFrame.columns)
endcols.remove('SMILES')
# endcols.remove('result')
endcols.remove('HOMO')
endcols.remove('LUMO')
endcols.remove('IP_eV')
endcols.remove('EA_eV')
endcols.remove('HLgap')
# endcols.remove('result')
#move some columsn to the front
columnList = ['SMILES','HOMO', 'LUMO', 'IP_eV', 'EA_eV', 'HLgap'] + endcols
bigDataFrame2 = bigDataFrame[ columnList]
#this the reference data. Use this to get name and CAS. CAS is very useful later
# bonus_data = pd.read_excel(r"C:\ResearchWorkingDirectory\Environmental_PAH_Mutagenicity\Final_Data\beforeMarchReset\TA98TA100_RawData3.xlsx", sheet_name = 'Sheet1')
bonus_data = pd.read_excel(r"C:\ResearchWorkingDirectory\Environmental_PAH_Mutagenicity\Final_Data\mutagenicity_data.xlsx", sheet_name = 'Sheet1')
final_results = bonus_data[['SMILES', 'name', 'CAS', 'result']]
# bigDataFrame2 = current_data.merge(final_results, on='SMILES', how='inner')
bigDataFrame2 = bigDataFrame2.merge(final_results, on='SMILES', how='inner')
endcols = list(bigDataFrame2.columns)
endcols.remove('Neutral')
endcols.remove('result')
endcols.remove('IP')
endcols.remove('EA')
endcols.remove('filename')
endcols.remove('CAS')
endcols.remove('name')
columnList = ['Neutral','IP', 'EA', 'filename', 'CAS', 'name', 'result'] + endcols
bigDataFrame2 = bigDataFrame2[ columnList]
#Write off the final data
# writer = pd.ExcelWriter('mutagen_data.xlsx')
# bigDataFrame2.to_excel(writer,'Sheet1')
# writer.save()
# writer.close()
| [
"IPython.get_ipython",
"Environmental_PAH_Mutagenicity.read_IP_EA_functions.read_posneg_energy",
"rdkit.Chem.AddHs",
"rdkit.Chem.MolToMolBlock",
"Environmental_PAH_Mutagenicity.read_IP_EA_functions.read_neut_energy",
"rdkit.Chem.MolFromSmiles",
"os.path.join",
"Environmental_PAH_Mutagenicity.get_padel... | [((2157, 2171), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2169, 2171), True, 'import pandas as pd\n'), ((7175, 7319), 'pandas.read_excel', 'pd.read_excel', (['"""C:\\\\ResearchWorkingDirectory\\\\Environmental_PAH_Mutagenicity\\\\Final_Data\\\\mutagenicity_data.xlsx"""'], {'sheet_name': '"""Sheet1"""'}), "(\n 'C:\\\\ResearchWorkingDirectory\\\\Environmental_PAH_Mutagenicity\\\\Final_Data\\\\mutagenicity_data.xlsx'\n , sheet_name='Sheet1')\n", (7188, 7319), True, 'import pandas as pd\n'), ((952, 978), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (970, 978), False, 'from rdkit import Chem\n'), ((987, 1000), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['m'], {}), '(m)\n', (997, 1000), False, 'from rdkit import Chem\n'), ((1118, 1139), 'rdkit.Chem.MolToMolBlock', 'Chem.MolToMolBlock', (['m'], {}), '(m)\n', (1136, 1139), False, 'from rdkit import Chem\n'), ((2350, 2377), 'os.path.join', 'os.path.join', (['path', '"""*.OUT"""'], {}), "(path, '*.OUT')\n", (2362, 2377), False, 'import os\n'), ((4409, 4449), 'rdkit.Chem.rdmolfiles.MolFromSmiles', 'Chem.rdmolfiles.MolFromSmiles', (['curSmiles'], {}), '(curSmiles)\n', (4438, 4449), False, 'from rdkit import Chem\n'), ((4466, 4498), 'rdkit.Chem.rdmolfiles.MolToSmiles', 'Chem.rdmolfiles.MolToSmiles', (['mol'], {}), '(mol)\n', (4493, 4498), False, 'from rdkit import Chem\n'), ((4585, 4609), 'Environmental_PAH_Mutagenicity.get_padel_data.get_padel_data', 'get_padel_data', (['filename'], {}), '(filename)\n', (4599, 4609), False, 'from Environmental_PAH_Mutagenicity.get_padel_data import get_padel_data\n'), ((5311, 5344), 'Environmental_PAH_Mutagenicity.gethlenergy.gethlenergy', 'gethlenergy', (['path', 'short_filename'], {}), '(path, short_filename)\n', (5322, 5344), False, 'from Environmental_PAH_Mutagenicity.gethlenergy import gethlenergy\n'), ((5470, 5502), 'Environmental_PAH_Mutagenicity.read_IP_EA_functions.read_neut_energy', 'read_neut_energy', (['path', 'filename'], {}), '(path, filename)\n', (5486, 5502), False, 'from Environmental_PAH_Mutagenicity.read_IP_EA_functions import read_neut_energy\n'), ((180, 193), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (191, 193), False, 'from IPython import get_ipython\n'), ((2646, 2702), 'os.path.join', 'os.path.join', (['verticals_path', "(base_filename + '_pos.out')"], {}), "(verticals_path, base_filename + '_pos.out')\n", (2658, 2702), False, 'import os\n'), ((2729, 2791), 'Environmental_PAH_Mutagenicity.read_IP_EA_functions.read_posneg_energy', 'read_posneg_energy', (['verticals_path', "(base_filename + '_pos.out')"], {}), "(verticals_path, base_filename + '_pos.out')\n", (2747, 2791), False, 'from Environmental_PAH_Mutagenicity.read_IP_EA_functions import read_posneg_energy\n'), ((2898, 2954), 'os.path.join', 'os.path.join', (['verticals_path', "(base_filename + '_neg.out')"], {}), "(verticals_path, base_filename + '_neg.out')\n", (2910, 2954), False, 'import os\n'), ((2989, 3051), 'Environmental_PAH_Mutagenicity.read_IP_EA_functions.read_posneg_energy', 'read_posneg_energy', (['verticals_path', "(base_filename + '_neg.out')"], {}), "(verticals_path, base_filename + '_neg.out')\n", (3007, 3051), False, 'from Environmental_PAH_Mutagenicity.read_IP_EA_functions import read_posneg_energy\n'), ((4820, 4876), 'os.path.join', 'os.path.join', (['verticals_path', "(base_filename + '_pos.out')"], {}), "(verticals_path, base_filename + '_pos.out')\n", (4832, 4876), False, 'import os\n'), ((4903, 4965), 'Environmental_PAH_Mutagenicity.read_IP_EA_functions.read_posneg_energy', 'read_posneg_energy', (['verticals_path', "(base_filename + '_pos.out')"], {}), "(verticals_path, base_filename + '_pos.out')\n", (4921, 4965), False, 'from Environmental_PAH_Mutagenicity.read_IP_EA_functions import read_posneg_energy\n'), ((5072, 5128), 'os.path.join', 'os.path.join', (['verticals_path', "(base_filename + '_neg.out')"], {}), "(verticals_path, base_filename + '_neg.out')\n", (5084, 5128), False, 'import os\n'), ((5163, 5225), 'Environmental_PAH_Mutagenicity.read_IP_EA_functions.read_posneg_energy', 'read_posneg_energy', (['verticals_path', "(base_filename + '_neg.out')"], {}), "(verticals_path, base_filename + '_neg.out')\n", (5181, 5225), False, 'from Environmental_PAH_Mutagenicity.read_IP_EA_functions import read_posneg_energy\n')] |
import torch
class LDA:
"""
Fisher's discriminant class.
Attributes
----------
n_features : int
Number of features
n_classes : int
Number of classes
evals_ : torch.Tensor
LDA eigenvalues
evecs_ : torch.Tensor
LDA eigenvectors
S_b_ : torch.Tensor
Between scatter matrix
S_w_ : torch.Tensor
Within scatter matrix
sw_reg : float
Regularization to S_w matrix
"""
def __init__(self, harmonic_lda = False):
"""
Create a LDA object
Parameters
----------
harmonic_lda : bool
Harmonic variant of LDA
"""
# initialize attributes
self.harmonic_lda = harmonic_lda
self.evals_ = None
self.evecs_ = None
self.S_b_ = None
self.S_w_ = None
self.n_features = None
self.n_classes = None
# Regularization
self.sw_reg = 1e-6
def compute_LDA(self, H, label, save_params=True):
"""
Performs LDA and saves parameters.
Parameters
----------
H : array-like of shape (n_samples, n_features)
Training data.
label : array-like of shape (n_samples,)
Classes labels.
save_params: bool, optional
Whether to store parameters in model
Returns
-------
evals : array of shape (n_classes-1)
LDA eigenvalues.
"""
# device
device = H.device
# sizes
N, d = H.shape
self.n_features = d
# classes
classes = torch.unique(label)
n_classes = len(classes)
self.n_classes = n_classes
# Mean centered observations for entire population
H_bar = H - torch.mean(H, 0, True)
# Total scatter matrix (cov matrix over all observations)
S_t = H_bar.t().matmul(H_bar) / (N - 1)
# Define within scatter matrix and compute it
S_w = torch.Tensor().new_zeros((d, d)).to(device)
if self.harmonic_lda:
S_w_inv = torch.Tensor().new_zeros((d, d)).to(device)
# Loop over classes to compute means and covs
for i in classes:
# check which elements belong to class i
H_i = H[torch.nonzero(label == i).view(-1)]
# compute mean centered obs of class i
H_i_bar = H_i - torch.mean(H_i, 0, True)
# count number of elements
N_i = H_i.shape[0]
if N_i == 0:
continue
# LDA
S_w += H_i_bar.t().matmul(H_i_bar) / ((N_i - 1) * n_classes)
# HLDA
if self.harmonic_lda:
inv_i = H_i_bar.t().matmul(H_i_bar) / ((N_i - 1) * n_classes)
S_w_inv += inv_i.inverse()
if self.harmonic_lda:
S_w = S_w_inv.inverse()
# Compute S_b from total scatter matrix
S_b = S_t - S_w
# Regularize S_w
S_w = S_w + self.sw_reg * torch.diag(
torch.Tensor().new_ones((d)).to(device)
)
# -- Generalized eigenvalue problem: S_b * v_i = lambda_i * Sw * v_i --
# (1) use cholesky decomposition for S_w
L = torch.cholesky(S_w, upper=False)
# (2) define new matrix using cholesky decomposition
L_t = torch.t(L)
L_ti = torch.inverse(L_t)
L_i = torch.inverse(L)
S_new = torch.matmul(torch.matmul(L_i, S_b), L_ti)
# (3) find eigenvalues and vectors of S_new
eigvals, eigvecs = torch.symeig(S_new, eigenvectors=True)
# sort
eigvals, indices = torch.sort(eigvals, 0, descending=True)
eigvecs = eigvecs[:, indices]
# (4) return to original eigenvectors
eigvecs = torch.matmul(L_ti, eigvecs)
# normalize them
for i in range(eigvecs.shape[1]): # TODO maybe change in sum along axis?
norm = eigvecs[:, i].pow(2).sum().sqrt()
eigvecs[:, i].div_(norm)
# set the first component positive
eigvecs.mul_(torch.sign(eigvecs[0, :]).unsqueeze(0).expand_as(eigvecs))
# keep only C-1 eigvals and eigvecs
eigvals = eigvals[: n_classes - 1]
eigvecs = eigvecs[:, : n_classes - 1]
if save_params:
self.evals_ = eigvals
self.evecs_ = eigvecs
self.S_b_ = S_b
self.S_w_ = S_w
return eigvals, eigvecs
| [
"torch.sort",
"torch.unique",
"torch.mean",
"torch.cholesky",
"torch.Tensor",
"torch.sign",
"torch.t",
"torch.nonzero",
"torch.matmul",
"torch.symeig",
"torch.inverse"
] | [((1622, 1641), 'torch.unique', 'torch.unique', (['label'], {}), '(label)\n', (1634, 1641), False, 'import torch\n'), ((3232, 3264), 'torch.cholesky', 'torch.cholesky', (['S_w'], {'upper': '(False)'}), '(S_w, upper=False)\n', (3246, 3264), False, 'import torch\n'), ((3341, 3351), 'torch.t', 'torch.t', (['L'], {}), '(L)\n', (3348, 3351), False, 'import torch\n'), ((3367, 3385), 'torch.inverse', 'torch.inverse', (['L_t'], {}), '(L_t)\n', (3380, 3385), False, 'import torch\n'), ((3400, 3416), 'torch.inverse', 'torch.inverse', (['L'], {}), '(L)\n', (3413, 3416), False, 'import torch\n'), ((3556, 3594), 'torch.symeig', 'torch.symeig', (['S_new'], {'eigenvectors': '(True)'}), '(S_new, eigenvectors=True)\n', (3568, 3594), False, 'import torch\n'), ((3637, 3676), 'torch.sort', 'torch.sort', (['eigvals', '(0)'], {'descending': '(True)'}), '(eigvals, 0, descending=True)\n', (3647, 3676), False, 'import torch\n'), ((3780, 3807), 'torch.matmul', 'torch.matmul', (['L_ti', 'eigvecs'], {}), '(L_ti, eigvecs)\n', (3792, 3807), False, 'import torch\n'), ((1790, 1812), 'torch.mean', 'torch.mean', (['H', '(0)', '(True)'], {}), '(H, 0, True)\n', (1800, 1812), False, 'import torch\n'), ((3446, 3468), 'torch.matmul', 'torch.matmul', (['L_i', 'S_b'], {}), '(L_i, S_b)\n', (3458, 3468), False, 'import torch\n'), ((2403, 2427), 'torch.mean', 'torch.mean', (['H_i', '(0)', '(True)'], {}), '(H_i, 0, True)\n', (2413, 2427), False, 'import torch\n'), ((1995, 2009), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (2007, 2009), False, 'import torch\n'), ((2288, 2313), 'torch.nonzero', 'torch.nonzero', (['(label == i)'], {}), '(label == i)\n', (2301, 2313), False, 'import torch\n'), ((2091, 2105), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (2103, 2105), False, 'import torch\n'), ((4070, 4095), 'torch.sign', 'torch.sign', (['eigvecs[0, :]'], {}), '(eigvecs[0, :])\n', (4080, 4095), False, 'import torch\n'), ((3039, 3053), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (3051, 3053), False, 'import torch\n')] |
import pytest
import datetime
from gps_time.core import GPSTime
from gps_time.leapseconds import LeapSeconds
@pytest.mark.parametrize("year,leap_seconds", [
(1981, 0), (1982, 1), (1983, 2), (1984, 3), (1986, 4), (1989, 5),
(1991, 6), (1992, 7), (1993, 8), (1995, 10), (1997, 11), (1998, 12),
(2000, 13), (2007, 14), (2010, 15), (2013, 16), (2016, 17), (2020, 18),
(2021, 18)
])
def test_get_leap_seconds(year, leap_seconds):
date = datetime.datetime(year, 1, 1, 0, 0, 0)
calculated_ls = LeapSeconds.get_leap_seconds(
GPSTime.from_datetime(date))
assert leap_seconds == calculated_ls
@pytest.mark.parametrize("year,leap_second_date,leap_second", [
(1980, GPSTime(77, 259200), 1), (2021, None, None),
(2000, GPSTime(1356, 0.0), 14)
])
def test_get_next_leap_seconds(year, leap_second_date, leap_second):
date = datetime.datetime(year, 1, 1, 0, 0, 0)
out = LeapSeconds.get_next_leap_second(
GPSTime.from_datetime(date))
if leap_second_date is not None:
date, calculated_ls = out
assert leap_second_date == date
assert leap_second == calculated_ls
else:
assert out is None
if __name__ == "__main__":
gps_time = GPSTime.from_datetime(datetime.datetime(2000, 1, 1))
# gps_time2 = gps_time + datetime.datetime(1990, 1, 6)
# GPSTime(0, 0) - 1
#print(time.tow2zcount(0, 0, 1980))
# print(LeapSeconds.get_leap_seconds(gps_time))
date = datetime.datetime(1980, 1, 1, 0, 0, 0)
calculated_ls = LeapSeconds.get_next_leap_second(gps_time)
print(calculated_ls)
| [
"datetime.datetime",
"pytest.mark.parametrize",
"gps_time.leapseconds.LeapSeconds.get_next_leap_second",
"gps_time.core.GPSTime.from_datetime",
"gps_time.core.GPSTime"
] | [((114, 393), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""year,leap_seconds"""', '[(1981, 0), (1982, 1), (1983, 2), (1984, 3), (1986, 4), (1989, 5), (1991, 6\n ), (1992, 7), (1993, 8), (1995, 10), (1997, 11), (1998, 12), (2000, 13),\n (2007, 14), (2010, 15), (2013, 16), (2016, 17), (2020, 18), (2021, 18)]'], {}), "('year,leap_seconds', [(1981, 0), (1982, 1), (1983, \n 2), (1984, 3), (1986, 4), (1989, 5), (1991, 6), (1992, 7), (1993, 8), (\n 1995, 10), (1997, 11), (1998, 12), (2000, 13), (2007, 14), (2010, 15),\n (2013, 16), (2016, 17), (2020, 18), (2021, 18)])\n", (137, 393), False, 'import pytest\n'), ((456, 494), 'datetime.datetime', 'datetime.datetime', (['year', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(year, 1, 1, 0, 0, 0)\n', (473, 494), False, 'import datetime\n'), ((863, 901), 'datetime.datetime', 'datetime.datetime', (['year', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(year, 1, 1, 0, 0, 0)\n', (880, 901), False, 'import datetime\n'), ((1458, 1496), 'datetime.datetime', 'datetime.datetime', (['(1980)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1980, 1, 1, 0, 0, 0)\n', (1475, 1496), False, 'import datetime\n'), ((1517, 1559), 'gps_time.leapseconds.LeapSeconds.get_next_leap_second', 'LeapSeconds.get_next_leap_second', (['gps_time'], {}), '(gps_time)\n', (1549, 1559), False, 'from gps_time.leapseconds import LeapSeconds\n'), ((553, 580), 'gps_time.core.GPSTime.from_datetime', 'GPSTime.from_datetime', (['date'], {}), '(date)\n', (574, 580), False, 'from gps_time.core import GPSTime\n'), ((954, 981), 'gps_time.core.GPSTime.from_datetime', 'GPSTime.from_datetime', (['date'], {}), '(date)\n', (975, 981), False, 'from gps_time.core import GPSTime\n'), ((1241, 1270), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (1258, 1270), False, 'import datetime\n'), ((700, 719), 'gps_time.core.GPSTime', 'GPSTime', (['(77)', '(259200)'], {}), '(77, 259200)\n', (707, 719), False, 'from gps_time.core import GPSTime\n'), ((756, 774), 'gps_time.core.GPSTime', 'GPSTime', (['(1356)', '(0.0)'], {}), '(1356, 0.0)\n', (763, 774), False, 'from gps_time.core import GPSTime\n')] |
#!/usr/bin/env python3
from collections import namedtuple, defaultdict
from os.path import join as join_path, dirname, abspath
from copy import deepcopy
from pegparse import create_parser_from_file, ASTWalker
EBNF_FILE = join_path(dirname(abspath(__file__)), 'c-like.ebnf')
CodeBlock = namedtuple('CodeBlock', ['entrance', 'exits'])
LivenessAnalysis = namedtuple('LivenessAnalysis', ['source', 'lines', 'edges'])
class LineOfCode:
def __init__(self, line_num, column, source, var, used):
self.line_num = line_num
self.column = column
self.source = source
self.var = var
self.used = used
@property
def var_name(self):
return '{}_{}_{}'.format(self.var, self.line_num, self.column)
class DataflowWalker(ASTWalker):
def __init__(self):
super().__init__(create_parser_from_file(EBNF_FILE), 'Program')
self._reset()
def _reset(self):
self.edges = []
self.lines = {}
self.line_num = 1
def parse(self, text, term=None):
super().parse(text, term)
result = LivenessAnalysis(text, self.lines, self.edges)
self._reset()
return result
def _parse_Program(self, ast, results):
for before, after in zip(results[:-1], results[1:]):
for exit_line in before.exits:
self.edges.append([exit_line.line_num, after.entrance.line_num])
return CodeBlock(results[0].entrance, results[-1].exits)
def _parse_IfElse(self, ast, results):
condition, true_block, false_block = results
condition.source = 'if ' + condition.source
self.edges.append([condition.line_num, true_block.entrance.line_num])
self.edges.append([condition.line_num, false_block.entrance.line_num])
return CodeBlock(condition, [*true_block.exits, *false_block.exits])
def _parse_If(self, ast, results):
condition, true_block = results
condition.source = 'if ' + condition.source
self.edges.append([condition.line_num, true_block.entrance.line_num])
return CodeBlock(condition, [condition, *true_block.exits])
def _parse_Loop(self, ast, results):
condition, body = results
condition.source = 'while ' + condition.source
self.edges.append([condition.line_num, body.entrance.line_num])
for exit_line in body.exits:
self.edges.append([exit_line.line_num, condition.line_num])
return CodeBlock(condition, [condition])
def _parse_Condition(self, ast, results):
line = LineOfCode(self.line_num, ast.column, ast.match, None, set(results))
self.line_num += 1
self.lines[line.line_num] = line
return line
def _parse_BasicBlock(self, ast, results):
for before, after in zip(results[:-1], results[1:]):
self.edges.append([before.line_num, after.line_num])
return CodeBlock(results[0], [results[-1]])
def _parse_Assignment(self, ast, results):
if len(ast.first_descendant('AssignmentOperator').match) > 1:
used = set(results)
else:
used = set(results[1:])
line = LineOfCode(self.line_num, ast.column, ast.match, results[0], used)
self.line_num += 1
self.lines[line.line_num] = line
return line
def _parse_ExpressionStatement(self, ast, results):
line = LineOfCode(self.line_num, ast.column, ast.match, None, set(results))
self.line_num += 1
self.lines[line.line_num] = line
return line
def _parse_Variable(self, ast, results):
return ast.match
def control_flow_graph(analysis):
lines = []
lines.append('digraph {')
for line_num, line in sorted(analysis.lines.items()):
lines.append(' {} [label="{}: {}"]'.format(line_num, line_num, line.source))
for src, dest in analysis.edges:
lines.append(' {} -> {}'.format(src, dest))
lines.append('}')
return '\n'.join(lines)
def upwards_exposure(analysis):
upwards = defaultdict(set)
for line_num, line in sorted(analysis.lines.items(), reverse=True):
upwards[line_num] |= line.used
changed = True
while changed:
changed = False
for line_num, line in sorted(analysis.lines.items(), reverse=True):
for var in upwards[line_num]:
for src, dest in analysis.edges:
if dest == line_num and var != analysis.lines[src].var and var not in upwards[src]:
upwards[src].add(var)
changed = True
return upwards
def local_definitions(analysis):
db = defaultdict(set)
for line_num, line in analysis.lines.items():
if line.var:
db[line_num] = set([line.var_name])
return db
def available_definitions(analysis):
variables = defaultdict(set)
for line in analysis.lines.values():
if line.var:
variables[line.var].add(line.var_name)
pb = defaultdict(set)
for line_num, line in analysis.lines.items():
pb[line_num] = set.union(set(), *(values for key, values in variables.items() if key != line.var))
return pb
def reachability(analysis):
db = local_definitions(analysis)
pb = available_definitions(analysis)
r_s = [defaultdict(set)]
a_s = [defaultdict(set)]
changed = True
iteration = 0
while changed:
changed = False
r = deepcopy(r_s[-1])
a = deepcopy(a_s[-1])
iteration += 1
# update a
for line_num in analysis.lines.keys():
old_a = a[line_num]
a[line_num] = db[line_num].union(r[line_num].intersection(pb[line_num]))
if old_a != a[line_num]:
changed = True
# update r
for line_num in analysis.lines.keys():
old_r = r[line_num]
new_r = set()
for pred, succ in analysis.edges:
if succ == line_num:
new_r |= a[pred]
r[line_num] = new_r
if old_r != new_r:
changed = True
if changed:
r_s.append(r)
a_s.append(a)
return list(zip(r_s, a_s))
def liveness(analysis):
u = upwards_exposure(analysis)
r, _ = reachability(analysis)[-1]
l = defaultdict(set)
for line_num in sorted(analysis.lines.keys()):
for reachable_var in r[line_num]:
for exposed_var in u[line_num]:
if reachable_var.startswith(exposed_var + '_'):
l[line_num].add(reachable_var)
return l
def main():
import sys
with open(sys.argv[1]) as fd:
source = fd.read()
analysis = DataflowWalker().parse(source)
print(control_flow_graph(analysis))
if __name__ == '__main__':
main()
| [
"pegparse.create_parser_from_file",
"collections.namedtuple",
"collections.defaultdict",
"copy.deepcopy",
"os.path.abspath"
] | [((290, 336), 'collections.namedtuple', 'namedtuple', (['"""CodeBlock"""', "['entrance', 'exits']"], {}), "('CodeBlock', ['entrance', 'exits'])\n", (300, 336), False, 'from collections import namedtuple, defaultdict\n'), ((356, 416), 'collections.namedtuple', 'namedtuple', (['"""LivenessAnalysis"""', "['source', 'lines', 'edges']"], {}), "('LivenessAnalysis', ['source', 'lines', 'edges'])\n", (366, 416), False, 'from collections import namedtuple, defaultdict\n'), ((4029, 4045), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4040, 4045), False, 'from collections import namedtuple, defaultdict\n'), ((4638, 4654), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4649, 4654), False, 'from collections import namedtuple, defaultdict\n'), ((4843, 4859), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4854, 4859), False, 'from collections import namedtuple, defaultdict\n'), ((4982, 4998), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4993, 4998), False, 'from collections import namedtuple, defaultdict\n'), ((6298, 6314), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (6309, 6314), False, 'from collections import namedtuple, defaultdict\n'), ((242, 259), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (249, 259), False, 'from os.path import join as join_path, dirname, abspath\n'), ((5289, 5305), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (5300, 5305), False, 'from collections import namedtuple, defaultdict\n'), ((5318, 5334), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (5329, 5334), False, 'from collections import namedtuple, defaultdict\n'), ((5428, 5445), 'copy.deepcopy', 'deepcopy', (['r_s[-1]'], {}), '(r_s[-1])\n', (5436, 5445), False, 'from copy import deepcopy\n'), ((5458, 5475), 'copy.deepcopy', 'deepcopy', (['a_s[-1]'], {}), '(a_s[-1])\n', (5466, 5475), False, 'from copy import deepcopy\n'), ((833, 867), 'pegparse.create_parser_from_file', 'create_parser_from_file', (['EBNF_FILE'], {}), '(EBNF_FILE)\n', (856, 867), False, 'from pegparse import create_parser_from_file, ASTWalker\n')] |
import sys
import sdl2
import sdl2.ext
GREY = sdl2.ext.Color(200, 200, 200)
RED = sdl2.ext.Color(255, 0, 0)
GREEN = sdl2.ext.Color(0, 255, 0)
def onInput(ui, event):
print("Input: ", ui, event)
# print(dir(event))
# print(event.key)#<sdl2.events.SDL_KeyboardEvent
# print(event.text)#sdl2.events.SDL_TextInputEvent
# print(dir(event.text))
print(event.text.text)
def onEditing(ui, event):
print("Editing: ", ui, event)
def run():
sdl2.ext.init()
window = sdl2.ext.Window("PySDL2でTextEntryを作る", size=(800, 600))
window.show()
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
uifactory = sdl2.ext.UIFactory(factory)
entry = uifactory.from_color(sdl2.ext.TEXTENTRY, color=GREY, size=(300,50))
entry.position = 50, 50
entry.input += onInput
entry.editing += onEditing
spriterenderer = factory.create_sprite_render_system(window)
uiprocessor = sdl2.ext.UIProcessor()
running = True
while running:
events = sdl2.ext.get_events()
for event in events:
if event.type == sdl2.SDL_QUIT:
running = False
break
uiprocessor.dispatch([entry], event)
spriterenderer.render((entry, ))
if __name__ == "__main__":
sys.exit(run())
| [
"sdl2.ext.UIFactory",
"sdl2.ext.Color",
"sdl2.ext.init",
"sdl2.ext.SpriteFactory",
"sdl2.ext.get_events",
"sdl2.ext.Window",
"sdl2.ext.UIProcessor"
] | [((47, 76), 'sdl2.ext.Color', 'sdl2.ext.Color', (['(200)', '(200)', '(200)'], {}), '(200, 200, 200)\n', (61, 76), False, 'import sdl2\n'), ((83, 108), 'sdl2.ext.Color', 'sdl2.ext.Color', (['(255)', '(0)', '(0)'], {}), '(255, 0, 0)\n', (97, 108), False, 'import sdl2\n'), ((117, 142), 'sdl2.ext.Color', 'sdl2.ext.Color', (['(0)', '(255)', '(0)'], {}), '(0, 255, 0)\n', (131, 142), False, 'import sdl2\n'), ((461, 476), 'sdl2.ext.init', 'sdl2.ext.init', ([], {}), '()\n', (474, 476), False, 'import sdl2\n'), ((490, 545), 'sdl2.ext.Window', 'sdl2.ext.Window', (['"""PySDL2でTextEntryを作る"""'], {'size': '(800, 600)'}), "('PySDL2でTextEntryを作る', size=(800, 600))\n", (505, 545), False, 'import sdl2\n'), ((583, 624), 'sdl2.ext.SpriteFactory', 'sdl2.ext.SpriteFactory', (['sdl2.ext.SOFTWARE'], {}), '(sdl2.ext.SOFTWARE)\n', (605, 624), False, 'import sdl2\n'), ((641, 668), 'sdl2.ext.UIFactory', 'sdl2.ext.UIFactory', (['factory'], {}), '(factory)\n', (659, 668), False, 'import sdl2\n'), ((919, 941), 'sdl2.ext.UIProcessor', 'sdl2.ext.UIProcessor', ([], {}), '()\n', (939, 941), False, 'import sdl2\n'), ((998, 1019), 'sdl2.ext.get_events', 'sdl2.ext.get_events', ([], {}), '()\n', (1017, 1019), False, 'import sdl2\n')] |
import json
import logging
import os
logger = logging.getLogger(__name__)
class DataManager:
# Get the value of a key from the loaded guild/user
def get(self, data_key):
return self.guild_data[data_key] if data_key in self.guild_data else None
# Load a guild/user using ctx from JSON file
def load(self, ctx):
self.id = ctx.guild.id if ctx.guild else ctx.author.id
self.id_type = "guild" if ctx.guild else "user"
if not os.path.isfile(f"data/{self.id_type}s/{self.id}.json"):
with open(f"data/{self.id_type}s/{self.id}.json", "w+") as f:
self.guild_data = {}
json.dump(self.guild_data, f)
logger.info(f"Created new file for {self.id_type} {self.id}.")
return
with open(f"data/{self.id_type}s/{self.id}.json", "r") as f:
self.guild_data = json.load(f)
logger.info(f"Loaded {self.id_type} {self.id} from file.")
# Remove a key from the specified data key in the current guild/user
def remove(self, data_key, key):
if data_key in self.guild_data:
self.guild_data[data_key].pop(key)
with open(f"data/{self.id_type}s/{self.id}.json", "w") as f:
json.dump(self.guild_data, f)
logger.info(f"Removed {self.id_type} data for {self.id}.")
# Update the data of the specified data key in the current guild/user
def update(self, data_key, guild_data):
if data_key in self.guild_data:
self.guild_data[data_key].update(guild_data)
else:
self.guild_data[data_key] = guild_data
with open(f"data/{self.id_type}s/{self.id}.json", "w") as f:
json.dump(self.guild_data, f)
logger.info(f"Updated {self.id_type} data for {self.id}.")
data_manager = DataManager()
| [
"logging.getLogger",
"json.load",
"json.dump",
"os.path.isfile"
] | [((47, 74), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (64, 74), False, 'import logging\n'), ((472, 526), 'os.path.isfile', 'os.path.isfile', (['f"""data/{self.id_type}s/{self.id}.json"""'], {}), "(f'data/{self.id_type}s/{self.id}.json')\n", (486, 526), False, 'import os\n'), ((878, 890), 'json.load', 'json.load', (['f'], {}), '(f)\n', (887, 890), False, 'import json\n'), ((1708, 1737), 'json.dump', 'json.dump', (['self.guild_data', 'f'], {}), '(self.guild_data, f)\n', (1717, 1737), False, 'import json\n'), ((655, 684), 'json.dump', 'json.dump', (['self.guild_data', 'f'], {}), '(self.guild_data, f)\n', (664, 684), False, 'import json\n'), ((1245, 1274), 'json.dump', 'json.dump', (['self.guild_data', 'f'], {}), '(self.guild_data, f)\n', (1254, 1274), False, 'import json\n')] |
"""Run coveralls only on travis."""
import os
import subprocess
import click
def echo_call(cmd):
click.echo('calling: {}'.format(' '.join(cmd)), err=True)
@click.command()
def main():
"""Run coveralls only on travis."""
if os.getenv('TRAVIS'):
cmd = ['coveralls']
echo_call(cmd)
subprocess.call(cmd)
if __name__ == '__main__':
main()
| [
"click.command",
"subprocess.call",
"os.getenv"
] | [((164, 179), 'click.command', 'click.command', ([], {}), '()\n', (177, 179), False, 'import click\n'), ((239, 258), 'os.getenv', 'os.getenv', (['"""TRAVIS"""'], {}), "('TRAVIS')\n", (248, 258), False, 'import os\n'), ((319, 339), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (334, 339), False, 'import subprocess\n')] |
from app import db
class GenLoc(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
sublocs = db.relationship('SubLoc', backref='general_location', lazy='dynamic')
events = db.relationship('Event', backref='general_location', lazy='dynamic')
def __repr__(self):
return '<General location %r>' % (self.name)
class SubLoc(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
gen_loc = db.Column(db.Integer, db.ForeignKey('gen_loc.id'))
events = db.relationship('Event', backref='sublocation', lazy='dynamic')
def __repr__(self):
return '<Sublocation %r>' % (self.name)
class EventType(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
subtypes = db.relationship('EventSubtype', backref='general_event_type', lazy='dynamic')
events = db.relationship('Event', backref='event_type', lazy='dynamic')
def __repr__(self):
return '<Event type %r>' % (self.name)
class EventSubtype(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
gen_type = db.Column(db.Integer, db.ForeignKey('event_type.id'))
events = db.relationship('Event', backref='event_specifics', lazy='dynamic')
def __repr__(self):
return '<Event subtype %r>' % (self.name)
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True)
gen_loc = db.Column(db.Integer, db.ForeignKey('gen_loc.id'))
gen_type = db.Column(db.Integer, db.ForeignKey('event_type.id'))
subtype = db.Column(db.Integer, db.ForeignKey('event_subtype.id'))
subloc = db.Column(db.Integer, db.ForeignKey('sub_loc.id'))
synopsis = db.Column(db.String(500))
timestamp = db.Column(db.DateTime)
def __repr__(self):
return '<Event %r>' % (self.id)
| [
"app.db.String",
"app.db.Column",
"app.db.ForeignKey",
"app.db.relationship"
] | [((50, 89), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (59, 89), False, 'from app import db\n'), ((134, 203), 'app.db.relationship', 'db.relationship', (['"""SubLoc"""'], {'backref': '"""general_location"""', 'lazy': '"""dynamic"""'}), "('SubLoc', backref='general_location', lazy='dynamic')\n", (149, 203), False, 'from app import db\n'), ((214, 282), 'app.db.relationship', 'db.relationship', (['"""Event"""'], {'backref': '"""general_location"""', 'lazy': '"""dynamic"""'}), "('Event', backref='general_location', lazy='dynamic')\n", (229, 282), False, 'from app import db\n'), ((384, 423), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (393, 423), False, 'from app import db\n'), ((529, 592), 'app.db.relationship', 'db.relationship', (['"""Event"""'], {'backref': '"""sublocation"""', 'lazy': '"""dynamic"""'}), "('Event', backref='sublocation', lazy='dynamic')\n", (544, 592), False, 'from app import db\n'), ((691, 730), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (700, 730), False, 'from app import db\n'), ((776, 853), 'app.db.relationship', 'db.relationship', (['"""EventSubtype"""'], {'backref': '"""general_event_type"""', 'lazy': '"""dynamic"""'}), "('EventSubtype', backref='general_event_type', lazy='dynamic')\n", (791, 853), False, 'from app import db\n'), ((864, 926), 'app.db.relationship', 'db.relationship', (['"""Event"""'], {'backref': '"""event_type"""', 'lazy': '"""dynamic"""'}), "('Event', backref='event_type', lazy='dynamic')\n", (879, 926), False, 'from app import db\n'), ((1027, 1066), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1036, 1066), False, 'from app import db\n'), ((1176, 1243), 'app.db.relationship', 'db.relationship', (['"""Event"""'], {'backref': '"""event_specifics"""', 'lazy': '"""dynamic"""'}), "('Event', backref='event_specifics', lazy='dynamic')\n", (1191, 1243), False, 'from app import db\n'), ((1340, 1379), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1349, 1379), False, 'from app import db\n'), ((1688, 1710), 'app.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (1697, 1710), False, 'from app import db\n'), ((108, 121), 'app.db.String', 'db.String', (['(50)'], {}), '(50)\n', (117, 121), False, 'from app import db\n'), ((442, 455), 'app.db.String', 'db.String', (['(50)'], {}), '(50)\n', (451, 455), False, 'from app import db\n'), ((490, 517), 'app.db.ForeignKey', 'db.ForeignKey', (['"""gen_loc.id"""'], {}), "('gen_loc.id')\n", (503, 517), False, 'from app import db\n'), ((749, 762), 'app.db.String', 'db.String', (['(50)'], {}), '(50)\n', (758, 762), False, 'from app import db\n'), ((1085, 1098), 'app.db.String', 'db.String', (['(50)'], {}), '(50)\n', (1094, 1098), False, 'from app import db\n'), ((1134, 1164), 'app.db.ForeignKey', 'db.ForeignKey', (['"""event_type.id"""'], {}), "('event_type.id')\n", (1147, 1164), False, 'from app import db\n'), ((1413, 1440), 'app.db.ForeignKey', 'db.ForeignKey', (['"""gen_loc.id"""'], {}), "('gen_loc.id')\n", (1426, 1440), False, 'from app import db\n'), ((1476, 1506), 'app.db.ForeignKey', 'db.ForeignKey', (['"""event_type.id"""'], {}), "('event_type.id')\n", (1489, 1506), False, 'from app import db\n'), ((1541, 1574), 'app.db.ForeignKey', 'db.ForeignKey', (['"""event_subtype.id"""'], {}), "('event_subtype.id')\n", (1554, 1574), False, 'from app import db\n'), ((1608, 1635), 'app.db.ForeignKey', 'db.ForeignKey', (['"""sub_loc.id"""'], {}), "('sub_loc.id')\n", (1621, 1635), False, 'from app import db\n'), ((1659, 1673), 'app.db.String', 'db.String', (['(500)'], {}), '(500)\n', (1668, 1673), False, 'from app import db\n')] |
import os
import argparse
import re
def main(args):
line1_regex = re.compile(r'^iter: (?P<iter>\d{1,10}) \/ \d{1,10}, total loss: (?P<tot_loss>\d{1,10}.\d{1,10})')
line2_regex = re.compile(r'^ >>> loss_cls \(detector\)\: (?P<loss_cls>\d{1,10}.\d{1,10})')
line3_regex = re.compile(r'^ >>> loss_box \(detector\)\: (?P<loss_box>\d{1,10}.\d{1,10})')
with open(args.output, "w") as out_file:
out_file.write("iter tot_loss loss_cls loss_box\n")
with open(args.input, "r") as in_file:
line_index = 0
iter, tot_loss, loss_cls, loss_box = 0, 0, 0, 0
for line in in_file:
if line_index == 0:
m = line1_regex.match(line)
if m:
iter = m.groupdict()['iter']
tot_loss = m.groupdict()['tot_loss']
line_index += 1
else:
line_index = 0
elif line_index == 1:
m = line2_regex.match(line)
if m:
loss_cls = m.groupdict()['loss_cls']
line_index += 1
else:
line_index = 0
elif line_index == 2:
m = line3_regex.match(line)
if m:
loss_box = m.groupdict()['loss_box']
out_file.write("%s %s %s %s\n" % (iter, tot_loss,
loss_cls, loss_box))
line_index = 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse DRL_RPN output')
parser.add_argument('-i', '--input', default=None, type=str,
help='input log file')
parser.add_argument('-o', '--output', default=None, type=str,
help='output as $iter $tot_loss $loss_cls $loss_box')
main(parser.parse_args()) | [
"argparse.ArgumentParser",
"re.compile"
] | [((71, 182), 're.compile', 're.compile', (['"""^iter: (?P<iter>\\\\d{1,10}) \\\\/ \\\\d{1,10}, total loss: (?P<tot_loss>\\\\d{1,10}.\\\\d{1,10})"""'], {}), "(\n '^iter: (?P<iter>\\\\d{1,10}) \\\\/ \\\\d{1,10}, total loss: (?P<tot_loss>\\\\d{1,10}.\\\\d{1,10})'\n )\n", (81, 182), False, 'import re\n'), ((187, 272), 're.compile', 're.compile', (['"""^ >>> loss_cls \\\\(detector\\\\)\\\\: (?P<loss_cls>\\\\d{1,10}.\\\\d{1,10})"""'], {}), "('^ >>> loss_cls \\\\(detector\\\\)\\\\: (?P<loss_cls>\\\\d{1,10}.\\\\d{1,10})'\n )\n", (197, 272), False, 'import re\n'), ((282, 367), 're.compile', 're.compile', (['"""^ >>> loss_box \\\\(detector\\\\)\\\\: (?P<loss_box>\\\\d{1,10}.\\\\d{1,10})"""'], {}), "('^ >>> loss_box \\\\(detector\\\\)\\\\: (?P<loss_box>\\\\d{1,10}.\\\\d{1,10})'\n )\n", (292, 367), False, 'import re\n'), ((1641, 1700), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse DRL_RPN output"""'}), "(description='Parse DRL_RPN output')\n", (1664, 1700), False, 'import argparse\n')] |
import unittest
class Solution:
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
version1 = [int(s) for s in version1.split('.')]
version2 = [int(s) for s in version2.split('.')]
for v1, v2 in zip(version1, version2):
if v1 < v2:
return -1
elif v1 > v2:
return 1
if len(version1) < len(version2):
return 0 if all(v == 0 for v in version2[len(version1):]) else -1
elif len(version1) > len(version2):
return 0 if all(v == 0 for v in version1[len(version2):]) else 1
return 0
class Test(unittest.TestCase):
def test(self):
self._test('0.1.345', '0.1.346', -1)
self._test('0.1', '1.1', -1)
self._test('0.1', '0.1', 0)
self._test('0.1', '0.1.1', -1)
self._test('0.1', '0.1.0', 0)
def _test(self, version1, version2, expected):
actual = Solution().compareVersion(version1, version2)
self.assertEqual(expected, actual)
actual = Solution().compareVersion(version2, version1)
self.assertEqual(-expected, actual)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((1253, 1268), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1266, 1268), False, 'import unittest\n')] |
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
import requests
from bs4 import BeautifulSoup
from PIL import Image
from io import BytesIO
from getpass import getpass
import re
import sys
import json
import argparse
import yaml
from six.moves import input
class EmojiRegister(object):
def run(self):
self.session_create()
self.get_token_and_name()
self.channellist = self.get_channellist()
self.setemoji(self.channelname)
def set_config(self, imagefile, emojiname, channelname):
self.imagefile = imagefile
self.emojiname = emojiname
self.channelname = channelname
def set_login(self, config):
self.teamname = config["teamname"] if "teamname" in config else input("TeamName: ")
self.email = config["email"] if "email" in config else input("E-mail: ")
self.password = config["password"] if "password" in config else getpass("Password: ")
if "channel" in config:
self.channelname = config["channel"]
self.baseurl = "https://" + self.teamname + ".slack.com/"
def session_create(self):
self.s = requests.Session()
r = self.s.get(self.baseurl)
soup = BeautifulSoup(r.text, "lxml")
formdata = soup.find("form", attrs={"id": "signin_form"})
params = {"email": self.email, "password": self.password}
for i in formdata.find_all("input", attrs={"type": "hidden"}):
params[i["name"]] = i["value"]
self.s.post(self.baseurl, data=params)
def get_token_and_name(self):
try:
messagehtml = self.s.get("https://api.slack.com/custom-integrations/legacy-tokens")
messagesoup = BeautifulSoup(messagehtml.text, "lxml")
self.token = messagesoup.find("input")["value"]
userinfo = json.loads(
self.s.post("https://slack.com/api/users.list", data={"token": self.token}).text)
self.username = [user["name"] for user in userinfo["members"] if
"email" in user["profile"] and user["profile"]["email"] == self.email][0]
except:
print("You need to generate token")
exit(1)
def get_channellist(self):
return json.loads(
self.s.post("https://slack.com/api/channels.list", data={"token": self.token}).text)["channels"]
def channnel_post(self, channelname):
channelid = ""
try:
channelid = [channel for channel in self.channellist if channel["name"] == channelname][0]["id"]
except:
print("Channel not found")
exit(1)
sendtext = '@{} 新しい絵文字を登録しました "{}" > :{}:'.format(self.username, self.emojiname, self.emojiname)
self.s.post("https://slack.com/api/chat.postMessage",
data={"channel": channelid, "token": self.token, "link_names": "true", "username": "Dr.EMOJI",
"as_user": "false", "text": sendtext})
def setemoji(self, channelname):
emojilist = self.get_emoji_list()
if self.emojiname in emojilist:
print("emoji already exist")
else:
emojitxt = self.s.get(self.baseurl + "admin/emoji")
soup = BeautifulSoup(emojitxt.text, "lxml")
form = soup.find("form", attrs={"action": "/customize/emoji"})
params = {"name": self.emojiname, "mode": "data"}
for i in form.find_all("input", attrs={"type": "hidden"}):
if "value" in i.attrs:
params[i["name"]] = i["value"]
img = Image.open(self.imagefile)
img.thumbnail((128, 128), Image.ANTIALIAS)
image = BytesIO()
img.save(image, img.format)
files = {"img": (self.imagefile, image.getvalue())}
res = self.s.post(self.baseurl + "/customize/emoji", files=files, data=params)
if res:
emojilist = self.get_emoji_list()
if self.emojiname in emojilist:
print("Save Emoji")
self.channnel_post(channelname)
else:
print("Failed")
def get_emoji_list(self):
emojilist = json.loads(self.s.post("https://slack.com/api/emoji.list", data={"token": self.token}).text)
return emojilist["emoji"]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Slack Emoji Save Script")
parser.add_argument("imagefile", action="store", type=str)
parser.add_argument("--name", "-n", action="store", type=str)
parser.add_argument("--channel", "-ch", default="emoji")
parser.add_argument("--config", "-c", action="store")
args = parser.parse_args()
args.imagefile = args.imagefile.replace("\\", "/")
if not args.name:
args.name = re.split("[./]", args.imagefile)[-2]
print("emoji name > ", args.name)
emojiregster = EmojiRegister()
emojiregster.set_config(args.imagefile, args.name, args.channel)
if args.config:
with open(args.config) as f:
config = yaml.load(f)
else:
config = {}
emojiregster.set_login(config)
emojiregster.run()
| [
"re.split",
"PIL.Image.open",
"requests.Session",
"argparse.ArgumentParser",
"six.moves.input",
"io.BytesIO",
"yaml.load",
"getpass.getpass",
"bs4.BeautifulSoup"
] | [((4402, 4464), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Slack Emoji Save Script"""'}), "(description='Slack Emoji Save Script')\n", (4425, 4464), False, 'import argparse\n'), ((1157, 1175), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1173, 1175), False, 'import requests\n'), ((1228, 1257), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (1241, 1257), False, 'from bs4 import BeautifulSoup\n'), ((767, 786), 'six.moves.input', 'input', (['"""TeamName: """'], {}), "('TeamName: ')\n", (772, 786), False, 'from six.moves import input\n'), ((850, 867), 'six.moves.input', 'input', (['"""E-mail: """'], {}), "('E-mail: ')\n", (855, 867), False, 'from six.moves import input\n'), ((940, 961), 'getpass.getpass', 'getpass', (['"""Password: """'], {}), "('Password: ')\n", (947, 961), False, 'from getpass import getpass\n'), ((1721, 1760), 'bs4.BeautifulSoup', 'BeautifulSoup', (['messagehtml.text', '"""lxml"""'], {}), "(messagehtml.text, 'lxml')\n", (1734, 1760), False, 'from bs4 import BeautifulSoup\n'), ((3254, 3290), 'bs4.BeautifulSoup', 'BeautifulSoup', (['emojitxt.text', '"""lxml"""'], {}), "(emojitxt.text, 'lxml')\n", (3267, 3290), False, 'from bs4 import BeautifulSoup\n'), ((3607, 3633), 'PIL.Image.open', 'Image.open', (['self.imagefile'], {}), '(self.imagefile)\n', (3617, 3633), False, 'from PIL import Image\n'), ((3709, 3718), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3716, 3718), False, 'from io import BytesIO\n'), ((4842, 4874), 're.split', 're.split', (['"""[./]"""', 'args.imagefile'], {}), "('[./]', args.imagefile)\n", (4850, 4874), False, 'import re\n'), ((5104, 5116), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (5113, 5116), False, 'import yaml\n')] |
from API_client.python.lib.dataset import dataset
import dh_py_access.lib.datahub as datahub
from dh_py_access import package_api
import datetime
server = 'http://api.planetos.com/v1/datasets/'
API_key = open('APIKEY').read().strip()
today = datetime.datetime.today()
two_days_ago = today - datetime.timedelta(days=1)
two_days_ago_str = datetime.datetime.strftime(two_days_ago, '%Y-%m-%dT') + '12:00:00'
dh=datahub.datahub_main(API_key)
ds = dataset('noaa_rbsn_timeseries',dh)
fmi_hirlam_surface=dataset('fmi_hirlam_surface',dh)
metno_harmonie_metcoop=dataset('metno_harmonie_metcoop',dh)
gfs=dataset('noaa_gfs_pgrb2_global_forecast_recompute_0.25degree',dh)
sample_var_names = {fmi_hirlam_surface:'Temperature_height_above_ground',
metno_harmonie_metcoop:'air_temperature_2m',
gfs:'tmp_m'}
obs_data = ds.get_station_data_as_pandas(['26233'],variables='temperature',start = two_days_ago_str)
longitude= 25.60
latitude = 58.36
sample_point_data = [(k,k.get_json_data_in_pandas(**{'var':v,'lon':longitude,'lat':latitude,'count':1000,'reftime_start':two_days_ago_str,'reftime_end':two_days_ago_str})) for k,v in sample_var_names.items()] | [
"dh_py_access.lib.datahub.datahub_main",
"datetime.datetime.today",
"datetime.timedelta",
"datetime.datetime.strftime",
"API_client.python.lib.dataset.dataset"
] | [((243, 268), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (266, 268), False, 'import datetime\n'), ((410, 439), 'dh_py_access.lib.datahub.datahub_main', 'datahub.datahub_main', (['API_key'], {}), '(API_key)\n', (430, 439), True, 'import dh_py_access.lib.datahub as datahub\n'), ((445, 480), 'API_client.python.lib.dataset.dataset', 'dataset', (['"""noaa_rbsn_timeseries"""', 'dh'], {}), "('noaa_rbsn_timeseries', dh)\n", (452, 480), False, 'from API_client.python.lib.dataset import dataset\n'), ((500, 533), 'API_client.python.lib.dataset.dataset', 'dataset', (['"""fmi_hirlam_surface"""', 'dh'], {}), "('fmi_hirlam_surface', dh)\n", (507, 533), False, 'from API_client.python.lib.dataset import dataset\n'), ((556, 593), 'API_client.python.lib.dataset.dataset', 'dataset', (['"""metno_harmonie_metcoop"""', 'dh'], {}), "('metno_harmonie_metcoop', dh)\n", (563, 593), False, 'from API_client.python.lib.dataset import dataset\n'), ((597, 663), 'API_client.python.lib.dataset.dataset', 'dataset', (['"""noaa_gfs_pgrb2_global_forecast_recompute_0.25degree"""', 'dh'], {}), "('noaa_gfs_pgrb2_global_forecast_recompute_0.25degree', dh)\n", (604, 663), False, 'from API_client.python.lib.dataset import dataset\n'), ((292, 318), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (310, 318), False, 'import datetime\n'), ((340, 393), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['two_days_ago', '"""%Y-%m-%dT"""'], {}), "(two_days_ago, '%Y-%m-%dT')\n", (366, 393), False, 'import datetime\n')] |
import numpy as np
from scipy.stats import binom
# import modules needed for logging
import logging
import os
logger = logging.getLogger(__name__) # module logger
def cummin(x):
"""A python implementation of the cummin function in R"""
for i in range(1, len(x)):
if x[i-1] < x[i]:
x[i] = x[i-1]
return x
def bh_fdr(pval):
"""A python implementation of the Benjamani-Hochberg FDR method.
This code should always give precisely the same answer as using
p.adjust(pval, method="BH") in R.
Parameters
----------
pval : list or array
list/array of p-values
Returns
-------
pval_adj : np.array
adjusted p-values according the benjamani-hochberg method
"""
pval_array = np.array(pval)
sorted_order = np.argsort(pval_array)
original_order = np.argsort(sorted_order)
pval_array = pval_array[sorted_order]
# calculate the needed alpha
n = float(len(pval))
pval_adj = np.zeros(int(n))
i = np.arange(1, n+1, dtype=float)[::-1] # largest to smallest
pval_adj = np.minimum(1, cummin(n/i * pval_array[::-1]))[::-1]
return pval_adj[original_order]
def frequency_test(mut_of_interest,
total_mut,
residues_of_interest,
residues_at_risk):
"""Perform a binomial test on the frequency of missense mutations within
given pre-defined residues within the gene.
Parameters
----------
mut_of_interest : {list, np.array}
number of mutations that are deemed "of interest"
total_mut : {list, np.array}
total number of mutations
residues_of_interest : {list, np.array}
contains the number of residues of interest for a mutation.
residues_at_risk : {list, np.array}
contains the number of residues at risk for a mutation.
Returns
-------
p_values : np.array
p-value for each gene for binomial test
"""
# initialize input
p_values = np.zeros(len(mut_of_interest))
mut = np.asarray(mut_of_interest)
N = np.asarray(total_mut)
residues_of_interest = np.asarray(residues_of_interest)
residues_at_risk = np.asarray(residues_at_risk, dtype=float)
residues_at_risk[residues_at_risk==0] = np.nan # fill zeros to avoid divide by zero
# calculate the background probability of mutation occurring at
# the residues of interest
P = residues_of_interest.astype(float) / residues_at_risk
# iterate through each gene to calculate p-value
logger.info('Calculating binomial test p-values . . .')
for k in range(len(mut)):
if not np.isnan(P[k]):
p_val = binomial_test(mut[k], N[k], P[k])
else:
# catch case for nan element
p_val = 1.0
p_values[k] = p_val
logger.info('Finished calculating binomial test p-values.')
return p_values
def binomial_test(n, N, P):
"""Perform binomial test on the observed n being higher than expected.
Specifically, N residues are at risk and of those there are n mutations
occurred at the Np residues of interest. Given the background probability of
a mutation at a specific residue, the p-value is calculated as the probability
of observing n or greater mutations. Since N is large and n is small,
it is computationally more efficient to take 1 - Pr(i<=n-1).
Parameters
----------
n : int
number of observed mutations
N : int
number of residues at risk
P : float
background probability that a mutation would occur at a single residue
Returns
-------
pval : np.array
p-value for binomial test
"""
if n <= 0:
return 1.0
pval = binom.sf(n-1, N, P)
return pval
| [
"logging.getLogger",
"numpy.asarray",
"scipy.stats.binom.sf",
"numpy.argsort",
"numpy.array",
"numpy.isnan",
"numpy.arange"
] | [((121, 148), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (138, 148), False, 'import logging\n'), ((763, 777), 'numpy.array', 'np.array', (['pval'], {}), '(pval)\n', (771, 777), True, 'import numpy as np\n'), ((797, 819), 'numpy.argsort', 'np.argsort', (['pval_array'], {}), '(pval_array)\n', (807, 819), True, 'import numpy as np\n'), ((841, 865), 'numpy.argsort', 'np.argsort', (['sorted_order'], {}), '(sorted_order)\n', (851, 865), True, 'import numpy as np\n'), ((2037, 2064), 'numpy.asarray', 'np.asarray', (['mut_of_interest'], {}), '(mut_of_interest)\n', (2047, 2064), True, 'import numpy as np\n'), ((2073, 2094), 'numpy.asarray', 'np.asarray', (['total_mut'], {}), '(total_mut)\n', (2083, 2094), True, 'import numpy as np\n'), ((2122, 2154), 'numpy.asarray', 'np.asarray', (['residues_of_interest'], {}), '(residues_of_interest)\n', (2132, 2154), True, 'import numpy as np\n'), ((2178, 2219), 'numpy.asarray', 'np.asarray', (['residues_at_risk'], {'dtype': 'float'}), '(residues_at_risk, dtype=float)\n', (2188, 2219), True, 'import numpy as np\n'), ((3728, 3749), 'scipy.stats.binom.sf', 'binom.sf', (['(n - 1)', 'N', 'P'], {}), '(n - 1, N, P)\n', (3736, 3749), False, 'from scipy.stats import binom\n'), ((1007, 1039), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {'dtype': 'float'}), '(1, n + 1, dtype=float)\n', (1016, 1039), True, 'import numpy as np\n'), ((2630, 2644), 'numpy.isnan', 'np.isnan', (['P[k]'], {}), '(P[k])\n', (2638, 2644), True, 'import numpy as np\n')] |
# __main__.py
import argparse
import os
from configparser import ConfigParser
from canvas_client.client import Client
from canvas_client import util
#TODO add progress bar
config_path = os.path.join(".", "config.json")
if not os.path.isfile(config_path):
init = util.query_yes_no( "No config file found in the current folder.\n" +\
"Would you like to initialize it now?")
if not init:
print("Please go to a folder with a valid config file\
or initialize one here")
exit(0)
else:
with open(config_path, 'w') as f:
f.write( '{\n' +
'\t"url" : "<canvas_url>",\n'+
'\t"access_token" : "<acces_token>",\n'+
'\t"course_id" : <course_id>,\n'+
'\t"labs" : {\n'+
'\t\t"<1st_assignment_name (choose a name)>: {"assignment_id": <id>},\n'+
'\t\t"<2nd_assignment_name>: {"assignment_id": <id>}\n'+
'\t}\n'+
'}\n')
print("Config file created. Please fill it properly.")
exit(0)
config = util.load_json('./config.json')
labs = list(config['labs'].keys())
parser = argparse.ArgumentParser(description='Canvas grader')
parser.add_argument('lab', choices=labs,
help='The lab which you want to test')
parser.add_argument('-d', '--download_submissions', action='store_true',
help='Download the subissions')
parser.add_argument('-u', '--upload', action='store_true',
help='Upload grades and comments to canvas from excel.')
args = parser.parse_args()
def main():
if args.download_submissions:
ac_grader = Client(args.lab)
ac_grader.download_submissions()
elif args.upload:
ac_grader = Client(args.lab)
#upload grades to canvas
ac_grader.upload_grades_from_excel()
else:
print("No optional argument selected. Pleas select one.")
if __name__ == "__main__":
main() | [
"canvas_client.util.query_yes_no",
"canvas_client.util.load_json",
"argparse.ArgumentParser",
"canvas_client.client.Client",
"os.path.join",
"os.path.isfile"
] | [((203, 235), 'os.path.join', 'os.path.join', (['"""."""', '"""config.json"""'], {}), "('.', 'config.json')\n", (215, 235), False, 'import os\n'), ((1263, 1294), 'canvas_client.util.load_json', 'util.load_json', (['"""./config.json"""'], {}), "('./config.json')\n", (1277, 1294), False, 'from canvas_client import util\n'), ((1343, 1395), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Canvas grader"""'}), "(description='Canvas grader')\n", (1366, 1395), False, 'import argparse\n'), ((244, 271), 'os.path.isfile', 'os.path.isfile', (['config_path'], {}), '(config_path)\n', (258, 271), False, 'import os\n'), ((291, 402), 'canvas_client.util.query_yes_no', 'util.query_yes_no', (["('No config file found in the current folder.\\n' +\n 'Would you like to initialize it now?')"], {}), "('No config file found in the current folder.\\n' +\n 'Would you like to initialize it now?')\n", (308, 402), False, 'from canvas_client import util\n'), ((1870, 1886), 'canvas_client.client.Client', 'Client', (['args.lab'], {}), '(args.lab)\n', (1876, 1886), False, 'from canvas_client.client import Client\n'), ((1973, 1989), 'canvas_client.client.Client', 'Client', (['args.lab'], {}), '(args.lab)\n', (1979, 1989), False, 'from canvas_client.client import Client\n')] |
# coding=utf8
from email.header import Header
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.utils import parseaddr, formataddr
import smtplib
from cgtk_config import studio_config
def format_addr(s):
name, addr = parseaddr(s)
return formataddr((
Header(name, 'utf-8').encode(),
addr.encode('utf-8') if isinstance(addr, unicode) else addr))
def send_mail(to_addr, subject_string, body, image=None):
email_cfg = studio_config.get('email')
from_addr = email_cfg['from']
password = email_cfg['password']
smtp_server = email_cfg['smtp_server']
body_string = body
msg_root = MIMEMultipart('related')
msg_root['From'] = format_addr('cgtk-%s<%s>' % (email_cfg["name"], from_addr))
msg_root['To'] = format_addr('user<%s>' % to_addr)
msg_root['Subject'] = Header(subject_string, 'utf-8').encode()
if image is not None:
msg_text = MIMEText(body_string + '<br><img src="cid:image1">', 'html')
msg_root.attach(msg_text)
fp = open(image, 'rb')
msg_image = MIMEImage(fp.read())
fp.close()
msg_image.add_header('Content-ID', '<image1>')
msg_root.attach(msg_image)
else:
msg_text = MIMEText(body_string, 'html')
msg_root.attach(msg_text)
smtp_port = int(email_cfg['port'])
if email_cfg['ssl'] == 'yes':
server = smtplib.SMTP_SSL(smtp_server, smtp_port)
else:
server = smtplib.SMTP(smtp_server, smtp_port)
# server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg_root.as_string())
server.quit()
if __name__ == "__main__":
send_mail("<EMAIL>", "CGTK Test", "Hello, this is a test", image=None)
| [
"smtplib.SMTP",
"smtplib.SMTP_SSL",
"email.utils.parseaddr",
"cgtk_config.studio_config.get",
"email.mime.multipart.MIMEMultipart",
"email.header.Header",
"email.mime.text.MIMEText"
] | [((308, 320), 'email.utils.parseaddr', 'parseaddr', (['s'], {}), '(s)\n', (317, 320), False, 'from email.utils import parseaddr, formataddr\n'), ((531, 557), 'cgtk_config.studio_config.get', 'studio_config.get', (['"""email"""'], {}), "('email')\n", (548, 557), False, 'from cgtk_config import studio_config\n'), ((710, 734), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', (['"""related"""'], {}), "('related')\n", (723, 734), False, 'from email.mime.multipart import MIMEMultipart\n'), ((985, 1045), 'email.mime.text.MIMEText', 'MIMEText', (['(body_string + \'<br><img src="cid:image1">\')', '"""html"""'], {}), '(body_string + \'<br><img src="cid:image1">\', \'html\')\n', (993, 1045), False, 'from email.mime.text import MIMEText\n'), ((1290, 1319), 'email.mime.text.MIMEText', 'MIMEText', (['body_string', '"""html"""'], {}), "(body_string, 'html')\n", (1298, 1319), False, 'from email.mime.text import MIMEText\n'), ((1445, 1485), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['smtp_server', 'smtp_port'], {}), '(smtp_server, smtp_port)\n', (1461, 1485), False, 'import smtplib\n'), ((1513, 1549), 'smtplib.SMTP', 'smtplib.SMTP', (['smtp_server', 'smtp_port'], {}), '(smtp_server, smtp_port)\n', (1525, 1549), False, 'import smtplib\n'), ((899, 930), 'email.header.Header', 'Header', (['subject_string', '"""utf-8"""'], {}), "(subject_string, 'utf-8')\n", (905, 930), False, 'from email.header import Header\n'), ((353, 374), 'email.header.Header', 'Header', (['name', '"""utf-8"""'], {}), "(name, 'utf-8')\n", (359, 374), False, 'from email.header import Header\n')] |
import io
from flask import (
Blueprint,
render_template,
abort,
current_app,
make_response
)
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
client = Blueprint('client', __name__, template_folder='templates', static_url_path='/static')
@client.route('/<int:points>', methods=['GET'])
def home(points):
title = current_app.config['TITLE']
plot = plot_points(points)
return render_template('index.html', title=title, plot=plot)
def plot_points(points):
"""Generate a plot with a varying number of randomly generated points
Args:
points (int): a number of points to plot
Returns: An svg plot with <points> data points
"""
# data for plotting
data = np.random
data = np.random.rand(points, 2)
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.scatter(data[:,0], data[:,1])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title(f'There are {points} data points!')
ax.grid(True)
img = io.StringIO()
fig.savefig(img, format='svg')
#clip off the xml headers from the image
svg_img = '<svg' + img.getvalue().split('<svg')[1]
return svg_img
| [
"flask.render_template",
"numpy.random.rand",
"matplotlib.figure.Figure",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"io.StringIO",
"flask.Blueprint"
] | [((261, 351), 'flask.Blueprint', 'Blueprint', (['"""client"""', '__name__'], {'template_folder': '"""templates"""', 'static_url_path': '"""/static"""'}), "('client', __name__, template_folder='templates', static_url_path=\n '/static')\n", (270, 351), False, 'from flask import Blueprint, render_template, abort, current_app, make_response\n'), ((496, 549), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': 'title', 'plot': 'plot'}), "('index.html', title=title, plot=plot)\n", (511, 549), False, 'from flask import Blueprint, render_template, abort, current_app, make_response\n'), ((824, 849), 'numpy.random.rand', 'np.random.rand', (['points', '(2)'], {}), '(points, 2)\n', (838, 849), True, 'import numpy as np\n'), ((861, 869), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (867, 869), False, 'from matplotlib.figure import Figure\n'), ((874, 891), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (886, 891), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((1090, 1103), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1101, 1103), False, 'import io\n')] |
"""
Requirements:
-----------
pyngrok==5.0.5
mlflow==1.15.0
pandas==1.2.3
numpy==1.19.3
scikit-learn==0.24.1
Examples of usege can be found in the url below:
https://nbviewer.jupyter.org/github/abreukuse/ml_utilities/blob/master/examples/experiments_management.ipynb
"""
import os
import mlflow
from pyngrok import ngrok
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
def generate_mlflow_ui():
"""
Creates a remote mlflow user interface with ngrok.
"""
get_ipython().system_raw("mlflow ui --port 5000 &")
ngrok.kill()
ngrok_tunnel = ngrok.connect(addr="5000", proto="http", bind_tls=True)
print("MLflow Tracking UI:", ngrok_tunnel.public_url, end='\n\n')
def __log_metrics(metrics,
metric_name,
y_train,
y_test,
y_estimate_train,
y_estimate_test):
"""
Record a particular metric score in mlflow.
It will be called from the __logging function.
---------------------------------------
Parameters
metrics: Dictionary containing the metrics names as keys and the metrics fucnctions as values.
metrics_name: String representing the name of the metric.
y_train and y_test: A numpy array or pandas series with the true train and test target values.
y_estimate_train and y_estimate_test: A numpy array or pandas series with the predicted train and test target values.
Return four variables representing the metrics names and values.
"""
# metric name
score_name_train = f'train_{metric_name}'
score_name_test = f'test_{metric_name}'
# metric score
score_train = metrics[metric_name](y_train, y_estimate_train)
score_test = metrics[metric_name](y_test, y_estimate_test)
if metric_name == 'rmse':
score_train = np.sqrt(score_train)
score_test = np.sqrt(score_test)
# metric log
mlflow.log_metric(score_name_train, score_train)
mlflow.log_metric(score_name_test, score_test)
return score_name_train, score_train, score_name_test, score_test
def __logging(metrics,
y_train,
y_test,
y_pred_train,
y_pred_test,
y_proba_train=None,
y_proba_test=None):
"""
Creates a dictionary with all the metrics from train and test.
It will be called from the simple_split function.
--------------------------------------------------------
Parameters
metrics: Dictionary containing the metrics names as keys and the metrics fucnctions as values.
y_train and y_test: The true target values from train and test.
y_pred_train and y_pred_test: Array with the estimate results from the algorithms.
y_proba_train and y_proba_test: An array with the probability results from the algorithms.
Returns a dictionary with metrics names and metrics results.
"""
metrics_scores = {}
log_metrics_results = None
for metric_name in metrics.keys():
args = [metrics, metric_name, y_train, y_test]
if metric_name not in ['auc', 'log_loss']:
log_metrics_results = __log_metrics(*args, y_pred_train, y_pred_test)
else:
log_metrics_results = __log_metrics(*args, y_proba_train, y_proba_test)
# Unpacking
score_name_train = log_metrics_results[0]
score_train = log_metrics_results[1]
score_name_test = log_metrics_results[2]
score_test = log_metrics_results[3]
# Store the scores in a dictionary
metrics_scores.setdefault(score_name_train, score_train)
metrics_scores.setdefault(score_name_test, score_test)
return metrics_scores
def data_artifacts(X_train):
"""
Creates and stores data artifacts like a sample of the data, the features and indices.
---------------------------------------------------
Parameter
X_train: The pandas data frame right before it enters the algorithm in the last but one step the pipeline.
"""
os.makedirs('artifacts_temp', exist_ok=True)
features = list(X_train.columns)
indices = list(X_train.index)
with open('artifacts_temp/features.txt', 'w') as features_txt:
features_txt.write(str(features))
with open('artifacts_temp/indices.txt', 'w') as indices_txt:
indices_txt.write(str(indices))
X_train.head(10).to_csv('artifacts_temp/X_train_sample.csv', index=False)
mlflow.log_artifacts('artifacts_temp')
def simple_split(*, task,
pipeline,
X,
y,
test_size,
metrics,
random_state,
inverse=None):
"""
Split the data in train and test sets.
-------------------------------------
Parameters
task: String indicating if it is a 'classification' or 'regression' task.
pipeline: The sklearn pipeline to run.
X: Dataframe with all the variables.
y: Target.
test_size: Size of the test data. It can be a float representing a percentage or an interger.
metrics: Sictionary containing the metrics names as keys and the metrics fucnctions as values.
random_state: Random number generator for the split in data.
inverse: A function with the inverse transformation applied in the target.
Returns a dictionary with metrics names and metrics results.
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
# Get the last but one state of the training data.
if len(pipeline) > 1:
X_train = pipeline[:-1].fit_transform(X_train)
pipeline[-1].fit(X_train, y_train)
else:
pipeline.fit(X_train, y_train)
# Collect data artifacts
data_artifacts(X_train)
if task == 'classification':
y_pred_train, y_pred_test, y_proba_train, y_proba_test = None, None, None, None
allowed_metrics = ['precision','recall','f1_score','accuracy','auc','log_loss']
if not set(metrics.keys()).issubset(allowed_metrics):
raise ValueError(f'Only these metrics are valid: {allowed_metrics}.')
if any(item in allowed_metrics[-2:] for item in metrics.keys()):
y_proba_train = pipeline[-1].predict_proba(X_train)[:,1]
y_proba_test = pipeline.predict_proba(X_test)[:,1]
if any(item in allowed_metrics[:-2] for item in metrics.keys()):
y_pred_train = pipeline[-1].predict(X_train)
y_pred_test = pipeline.predict(X_test)
metrics_scores = __logging(metrics,
y_train,
y_test,
y_pred_train,
y_pred_test,
y_proba_train,
y_proba_test)
elif task == 'regression':
y_pred_train = pipeline[-1].predict(X_train)
y_pred_test = pipeline.predict(X_test)
if inverse:
targets = [y_train, y_test, y_pred_train, y_pred_test]
y_train, y_test, y_pred_train, y_pred_test = [inverse(target) for target in targets]
allowed_metrics = ['rmse','mae','mape','msle','r2']
if not set(metrics.keys()).issubset(allowed_metrics):
raise ValueError(f'Only these metrics are valid: {allowed_metrics}.')
metrics_scores = __logging(metrics,
y_train,
y_test,
y_pred_train,
y_pred_test)
return metrics_scores
def cross_validation(*, task,
pipeline,
X,
y,
cv_method,
metrics,
inverse=None):
"""
Performs cross validation.
-------------------------
Parameters
pipeline: The sklearn pipeline to run.
X: Dataframe with all the variables.
y: target.
cv_method: When 'cross_validation' is chose. A callble from sklearn e.g {KFold, StratifiedKFold}
metrics: Dictionary containing the metrics names as keys and the metrics fucnctions as values.
inverse: A function with the inverse transformation applied in the target.
Returns a dictionary with metrics names and metrics results.
"""
metrics_scores = {}
X_train = None
splits = cv_method
for metric_name, metric in metrics.items():
for train_index, test_index in splits.split(X, y):
# split
X_train, y_train = X.loc[train_index], y.loc[train_index]
X_test, y_test = X.loc[test_index], y.loc[test_index]
# training
if len(pipeline) > 1:
X_train = pipeline[:-1].fit_transform(X_train)
pipeline[-1].fit(X_train, y_train)
else:
pipeline.fit(X_train, y_train)
# predict
if metric_name in ['auc', 'log_loss']:
y_pred_train = pipeline[-1].predict_proba(X_train)[:,1]
y_pred_test = pipeline.predict_proba(X_test)[:,1]
else:
y_pred_train = pipeline[-1].predict(X_train)
y_pred_test = pipeline.predict(X_test)
# inverse tranformation for target if needed
if task == 'regression' and inverse:
targets = [y_train, y_test, y_pred_train, y_pred_test]
y_train, y_test, y_pred_train, y_pred_test = [inverse(target) for target in targets]
# compute score
score_train = metrics[metric_name](y_train, y_pred_train)
score_test = metrics[metric_name](y_test, y_pred_test)
if metric_name == 'rmse':
score_train = np.sqrt(score_train)
score_test = np.sqrt(score_test)
metrics_scores.setdefault(f'train_{metric_name}', []).append(score_train)
metrics_scores.setdefault(f'test_{metric_name}', []).append(score_test)
# log
for metric_name, scores in metrics_scores.items():
mlflow.log_metric(metric_name, np.mean(scores))
metrics_scores[metric_name] = np.mean(scores)
# Collect data artifacts from the last fold
data_artifacts(X_train)
return metrics_scores
def experiment_manager(task,
pipeline, X, y,
runs,
validation,
hyperparameters,
metrics,
random_state=0,
remote_ui=False,
**kwargs):
"""
This function runs experiments and records the results.
-----------------------------------------------------
Parameters
task: String indicating if it is a 'classification' or 'regression' task.
pipeline: The sklearn pipeline to run.
X: Dataframe with all the variables.
y: Target.
validation: 'simple_split' or 'cross_validation'.
hyperparameters: A function returning a dictionary with all the hyperparameters names as keys
and range values to be tested in each algorithm as values.
metrics: Dictionary containing the metrics names as keys and the metrics fucnctions as values.
Metrics names allowed are: For classification {'precision', 'recall', 'f1_score', 'accuracy', 'auc', 'log_loss'}.
For regression {'rmse', 'mae', 'mape', 'msle', 'r2'}.
random_state: Random number generator for the split in data.
remote_ui: Interact with mlflow inerface remotely or locally. Set 'True' if you are using google colab or other remote platform.
available kwargs: run_label -> For optional labelling in the run name.
test_size -> When 'simple_split' is chose. Float for the size of the test set.
cv_method -> When 'cross_validation' is chose. A callble from sklearn e.g {KFold, StratifiedKFold}
inverse -> A function with the inverse transformation applied in the target.
"""
experiment_name = pipeline[-1].__class__.__name__
mlflow.set_experiment(experiment_name=experiment_name)
experiment = mlflow.get_experiment_by_name(experiment_name)
print(f"Experiment Name: {experiment.name}")
print(f"Experiment_id: {experiment.experiment_id}", end='\n\n')
for run in range(runs):
optional_run_label = kwargs.get('run_label') if kwargs.get('run_label') != None else ''
with mlflow.start_run(run_name=f'Run: {run+1}{optional_run_label}'):
# log hyperpatameters
for hyperpatameter_name, hyperpatameter in hyperparameters().items():
mlflow.log_param(hyperpatameter_name.split('__')[1], hyperpatameter)
# training
pipeline.set_params(**hyperparameters())
# simple split
if validation == 'simple_split':
mlflow.set_tag('random_state_split', random_state)
mlflow.set_tag('test_size', kwargs['test_size'])
metrics_scores = simple_split(task=task,
pipeline=pipeline,
X=X,
y=y,
test_size=kwargs['test_size'],
metrics=metrics,
random_state=random_state,
inverse=kwargs.get('inverse'))
# cross validation
elif validation == 'cross_validation':
mlflow.set_tag('cv', kwargs['cv_method'])
metrics_scores = cross_validation(task=task,
pipeline=pipeline,
X=X,
y=y,
cv_method=kwargs['cv_method'],
metrics=metrics,
inverse=kwargs.get('inverse'))
# Print results
print(f'Run {run+1}', end='\n\n')
print('HYPERPARAMETERS')
for key, value in hyperparameters().items():
print(f'{key[key.find("__")+2:]}: {value}')
print()
print('SCORES')
for key, value in metrics_scores.items():
print(f'{key}: {np.round(value, 3)}')
print()
# mlflow user interface
if remote_ui == True:
return generate_mlflow_ui()
elif remote_ui == False:
print('Type "mlflow ui" in your terminal in order to interact with mlflow user interface.', end='\n\n')
| [
"numpy.mean",
"mlflow.set_tag",
"numpy.sqrt",
"os.makedirs",
"pyngrok.ngrok.kill",
"sklearn.model_selection.train_test_split",
"mlflow.log_metric",
"mlflow.set_experiment",
"mlflow.get_experiment_by_name",
"mlflow.log_artifacts",
"pyngrok.ngrok.connect",
"mlflow.start_run",
"numpy.round"
] | [((574, 586), 'pyngrok.ngrok.kill', 'ngrok.kill', ([], {}), '()\n', (584, 586), False, 'from pyngrok import ngrok\n'), ((606, 661), 'pyngrok.ngrok.connect', 'ngrok.connect', ([], {'addr': '"""5000"""', 'proto': '"""http"""', 'bind_tls': '(True)'}), "(addr='5000', proto='http', bind_tls=True)\n", (619, 661), False, 'from pyngrok import ngrok\n'), ((1942, 1990), 'mlflow.log_metric', 'mlflow.log_metric', (['score_name_train', 'score_train'], {}), '(score_name_train, score_train)\n', (1959, 1990), False, 'import mlflow\n'), ((1995, 2041), 'mlflow.log_metric', 'mlflow.log_metric', (['score_name_test', 'score_test'], {}), '(score_name_test, score_test)\n', (2012, 2041), False, 'import mlflow\n'), ((4069, 4113), 'os.makedirs', 'os.makedirs', (['"""artifacts_temp"""'], {'exist_ok': '(True)'}), "('artifacts_temp', exist_ok=True)\n", (4080, 4113), False, 'import os\n'), ((4510, 4548), 'mlflow.log_artifacts', 'mlflow.log_artifacts', (['"""artifacts_temp"""'], {}), "('artifacts_temp')\n", (4530, 4548), False, 'import mlflow\n'), ((5528, 5598), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(X, y, test_size=test_size, random_state=random_state)\n', (5544, 5598), False, 'from sklearn.model_selection import train_test_split\n'), ((12318, 12372), 'mlflow.set_experiment', 'mlflow.set_experiment', ([], {'experiment_name': 'experiment_name'}), '(experiment_name=experiment_name)\n', (12339, 12372), False, 'import mlflow\n'), ((12390, 12436), 'mlflow.get_experiment_by_name', 'mlflow.get_experiment_by_name', (['experiment_name'], {}), '(experiment_name)\n', (12419, 12436), False, 'import mlflow\n'), ((1858, 1878), 'numpy.sqrt', 'np.sqrt', (['score_train'], {}), '(score_train)\n', (1865, 1878), True, 'import numpy as np\n'), ((1900, 1919), 'numpy.sqrt', 'np.sqrt', (['score_test'], {}), '(score_test)\n', (1907, 1919), True, 'import numpy as np\n'), ((10356, 10371), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (10363, 10371), True, 'import numpy as np\n'), ((10301, 10316), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (10308, 10316), True, 'import numpy as np\n'), ((12693, 12757), 'mlflow.start_run', 'mlflow.start_run', ([], {'run_name': 'f"""Run: {run + 1}{optional_run_label}"""'}), "(run_name=f'Run: {run + 1}{optional_run_label}')\n", (12709, 12757), False, 'import mlflow\n'), ((9955, 9975), 'numpy.sqrt', 'np.sqrt', (['score_train'], {}), '(score_train)\n', (9962, 9975), True, 'import numpy as np\n'), ((10005, 10024), 'numpy.sqrt', 'np.sqrt', (['score_test'], {}), '(score_test)\n', (10012, 10024), True, 'import numpy as np\n'), ((13137, 13187), 'mlflow.set_tag', 'mlflow.set_tag', (['"""random_state_split"""', 'random_state'], {}), "('random_state_split', random_state)\n", (13151, 13187), False, 'import mlflow\n'), ((13204, 13252), 'mlflow.set_tag', 'mlflow.set_tag', (['"""test_size"""', "kwargs['test_size']"], {}), "('test_size', kwargs['test_size'])\n", (13218, 13252), False, 'import mlflow\n'), ((13872, 13913), 'mlflow.set_tag', 'mlflow.set_tag', (['"""cv"""', "kwargs['cv_method']"], {}), "('cv', kwargs['cv_method'])\n", (13886, 13913), False, 'import mlflow\n'), ((14713, 14731), 'numpy.round', 'np.round', (['value', '(3)'], {}), '(value, 3)\n', (14721, 14731), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from . import data
# Repositories section
@pytest.mark.parametrize('value', data.NOT_A_DICT)
def test_repositories_section_syntax_type(parser, value):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories: {{ value }}
""", dict(value=value)).assert_failure()
def test_repositories_section_syntax_empty(parser):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories: {}
""").assert_success()
# Repository
@pytest.mark.parametrize('value', data.NOT_A_DICT_OR_STRING)
def test_repository_syntax_type(parser, value):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository: {{ value }}
""", dict(value=value)).assert_failure()
def test_repository_syntax_unsupported(parser):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository:
url: a url
unsupported: {}
""").assert_failure()
def test_repository_syntax_empty(parser):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository: {} # "url" is required
""").assert_failure()
# Description
@pytest.mark.parametrize('value', data.NOT_A_STRING)
def test_repository_description_syntax_type(parser, value):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository:
url: a url
description: {{ value }}
""", dict(value=value)).assert_failure()
def test_repository_description(parser):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository:
url: a url
description: a description
""").assert_success()
# URL
@pytest.mark.parametrize('value', data.NOT_A_STRING)
def test_repository_url_syntax_type(parser, value):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository:
url: {{ value }}
""", dict(value=value)).assert_failure()
def test_repository_url_short_form(parser):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository: a url
""").assert_success()
# Credential
@pytest.mark.parametrize('value', data.NOT_A_DICT)
def test_repository_credential_syntax_type(parser, value):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository:
url: a url
credential: {{ value }}
""", dict(value=value), import_profile=True).assert_failure()
def test_repository_credential_syntax_unsupported(parser):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository:
url: a url
credential:
unsupported: {}
""", import_profile=True).assert_failure()
def test_repository_credential_empty(parser):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository:
url: a url
credential: {}
""", import_profile=True).assert_success()
def test_repository_credential_full(parser):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
my_repository:
url: a url
credential:
protocol: a protocol
token_type: a token type
token: a token
keys:
key1: value1
key2: value2
user: a user
""", import_profile=True).assert_success()
# Unicode
def test_repository_unicode(parser):
parser.parse_literal("""
tosca_definitions_version: tosca_simple_yaml_1_0
repositories:
知識庫:
url: 網址
description: 描述
credential:
protocol: 協議
token_type: 類型
token: 代幣
keys:
鍵一: 值
鍵二: 值
user: 用戶
""", import_profile=True).assert_success()
| [
"pytest.mark.parametrize"
] | [((867, 916), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', 'data.NOT_A_DICT'], {}), "('value', data.NOT_A_DICT)\n", (890, 916), False, 'import pytest\n'), ((1308, 1367), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', 'data.NOT_A_DICT_OR_STRING'], {}), "('value', data.NOT_A_DICT_OR_STRING)\n", (1331, 1367), False, 'import pytest\n'), ((2010, 2061), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', 'data.NOT_A_STRING'], {}), "('value', data.NOT_A_STRING)\n", (2033, 2061), False, 'import pytest\n'), ((2546, 2597), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', 'data.NOT_A_STRING'], {}), "('value', data.NOT_A_STRING)\n", (2569, 2597), False, 'import pytest\n'), ((3021, 3070), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', 'data.NOT_A_DICT'], {}), "('value', data.NOT_A_DICT)\n", (3044, 3070), False, 'import pytest\n')] |
#!/usr/bin/env python3.6
import random
from account import Credentials
from account import User
def create_account(users_name,username,password):
"""
function to create new credentials for a new account
"""
new_account = User(users_name)
new_account = Credentials(username,password)
return new_account
def save_credentials(credentials):
"""
function that saves the user's credentials
"""
credentials.save_credentials()
def del_credentials(credentials):
"""
function that deletes credentials
"""
credentials.del_contact()
def find_credentials(username):
"""
function that finds credentials by username and returns all credentias
"""
return Credentials.find_by_username(username)
def display_credentials():
"""
function that returns all saved credentials
"""
return Credentials.display_credentials()
def copy_credentials(Credentials):
"""
function that return copied credentials, those in clipboard
"""
return Credentials.copy_credentials()
def main():
print("Hello, create a new account. What is your name?")
username = (input())
print(f"Hey {username}, what account would you like to create?")
print('\n')
while True:
print("Use these short codes: ca-create a new account, dc-display credentials, fc-find credentials, ex-exist account_list ")
short_code = input().lower()
if short_code == 'ca':
print("New Account")
print("-"*10)
print("User's_name")
users_name = input()
print("username")
username = input()
print("would you want a generated password? y/N")
answer = input()
if answer == 'y':
chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789~!@#$%^&**()_+=+'
length= int(input('password length?'))
password=''
for i in range(length):
password += random.choice(chars)
print(password)
else:
print("password")
password=input()
save_credentials(create_account(users_name,username,password))
print('\n')
elif short_code == 'dc':
if display_credentials():
print("here is a list of account usernames and passwords")
print('\n')
for credentials in display_credentials():
print(f"username: {credentials.username} \npassword: {credentials.password}")
print('\n')
else:
print("You don't seem to have any accounts yet")
print('\n')
elif short_code == 'fc':
print("Enter the username you want to search for")
search_username = input()
if find_credentials(search_username):
search_credentials = find_credentials(search_username)
print(f"Username {search_credentials.username}")
print(f"Password {search_credentials.password}")
else: print("that username does not exist")
elif short_code == 'ex':
print(f"Bye {username}")
break
else: print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main()
| [
"account.Credentials.display_credentials",
"account.Credentials",
"random.choice",
"account.User",
"account.Credentials.copy_credentials",
"account.Credentials.find_by_username"
] | [((239, 255), 'account.User', 'User', (['users_name'], {}), '(users_name)\n', (243, 255), False, 'from account import User\n'), ((274, 305), 'account.Credentials', 'Credentials', (['username', 'password'], {}), '(username, password)\n', (285, 305), False, 'from account import Credentials\n'), ((716, 754), 'account.Credentials.find_by_username', 'Credentials.find_by_username', (['username'], {}), '(username)\n', (744, 754), False, 'from account import Credentials\n'), ((858, 891), 'account.Credentials.display_credentials', 'Credentials.display_credentials', ([], {}), '()\n', (889, 891), False, 'from account import Credentials\n'), ((1019, 1049), 'account.Credentials.copy_credentials', 'Credentials.copy_credentials', ([], {}), '()\n', (1047, 1049), False, 'from account import Credentials\n'), ((2020, 2040), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (2033, 2040), False, 'import random\n')] |
#//////////////#####///////////////
#
# ANU u6325688 <NAME>
# Supervisor: Dr.<NAME>
#//////////////#####///////////////
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.utils.data
import numpy as np
from GAIL.Discriminator import Discriminator1D
from GAIL.Generator import Generator1D
from GAIL.PPO import PPO
from commons.DataInfo import DataInfo
import gym
import matplotlib.pyplot as plt
from sklearn.preprocessing import normalize
cudnn.benchmark = True
if torch.cuda.is_available():
map_location=lambda storage, loc: storage.cuda()
else:
map_location='cpu'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class GAIL():
def __init__(self,dataInfo:DataInfo, resultPath)-> None:
self.learnRate = 0.0005
self.entropyBeta = 0.001
self.lossCriterion = nn.BCELoss()
self.dataInfo = dataInfo
self.resultPath = resultPath
self.generator = None
self.generatorOptim = None
self.discriminator = None
self.discriminatorOptim = None
self.datatype = 0
self.lastActions = []
self.env = gym.make(dataInfo.gameName)
self.ppo = None
self.ppoExp = None
#Graphs
self.rwdCounter = []
self.genCounter = []
self.disCounter = []
self.entCounter = []
self.enableOnPolicy = True
def setUpGail(self):
self.generator = Generator1D(self.dataInfo).to(device)
self.generatorOptim = torch.optim.Adam(self.generator.parameters(), lr=self.learnRate)
self.discriminator = Discriminator1D(self.dataInfo).to(device)
self.discriminatorOptim = torch.optim.Adam(self.discriminator.parameters(), lr=self.learnRate)
self.ppoExp = PPO(self.generator,self.learnRate)
def getAction(self,state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.generator(state).cpu().data.numpy().flatten()
def makeDisInput(self, state, action):
output = action.view(action.shape[0],1)
output = output.type(torch.FloatTensor).to(device)
return torch.cat((state,output),1)
def getGraph(self):
if len(self.rwdCounter) > 0:
plt.plot(range(len(self.rwdCounter)), self.rwdCounter, linestyle='-', marker="X")
plt.xlabel("Iteration")
plt.ylabel("Rewards")
plt.title("GAIL for {}-{} AverageReward={}[{},{}]".format(self.dataInfo.gameName, "LocState", \
str(sum(self.rwdCounter) / len(self.rwdCounter)), \
str(min(self.rwdCounter)),
str(max(self.rwdCounter))))
plt.savefig(self.resultPath + "/" + str(self.enableOnPolicy)+"LoctrainRwd.png")
plt.close("all")
plt.plot(range(len(self.genCounter)), self.genCounter, linestyle='-')
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.title("GAIL-Generator Loss for {}-{}[{},{}]".format(self.dataInfo.gameName, \
"LocState", \
str(round(min(self.genCounter).item(), 5)), \
str(round(max(self.genCounter).item(), 5))))
plt.savefig(self.resultPath + "/" + str(self.enableOnPolicy)+"LoctrainGenLoss.png")
plt.close("all")
plt.plot(range(len(self.disCounter)), self.disCounter, linestyle='-')
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.title("GAIL-Discriminator Loss for {}-{}[{},{}]".format(self.dataInfo.gameName, \
"LocState",
str(round(min(self.disCounter).item(), 5)), \
str(round(max(self.disCounter).item(), 5))))
plt.savefig(self.resultPath + "/" + str(self.enableOnPolicy)+"LoctrainDisLoss.png")
plt.close("all")
plt.plot(range(len(self.entCounter)), self.entCounter, linestyle='-')
plt.xlabel("Batch")
plt.ylabel("Entropy")
plt.title("GAIL Entropy for {}-{}[{},{}]".format(self.dataInfo.gameName, "LocState", \
str(round(min(self.entCounter).item(), 5)), \
str(round(max(self.entCounter).item(), 5))))
plt.savefig(self.resultPath + "/" + str(self.enableOnPolicy)+"LoctrainEntropy.png")
plt.close("all")
def updateModel(self):
for batchIndex in range(len(self.dataInfo.expertState)):
#read experts' state
batch = self.dataInfo.expertState[batchIndex].size
exp_action = np.zeros((batch, 1))
exp_reward = np.zeros((batch,1))
exp_done = np.zeros((batch,1)) #asume all "not done"
exp_done = (exp_done==0) #Return False for all
exp_state = np.zeros((batch, self.dataInfo.locateShape)) #Location
for j in range(batch):
exp_state[j] = self.dataInfo.expertLocation[batchIndex][j] #Location
exp_action[j] = self.dataInfo.expertAction[batchIndex][j]
exp_state = normalize(exp_state)
exp_state = (torch.from_numpy(exp_state)).type(torch.FloatTensor).to(device)
# exp_state = torch.unsqueeze(exp_state, 0)
exp_action = (torch.from_numpy(exp_action)).type(torch.FloatTensor).to(device)
print("Batch: {}\t generating {} fake data...".format(str(batchIndex), str(batch)))
#Generate action
fake_actionDis, fake_action, fake_entroP = self.generator(exp_state)
exp_score = (self.generator.criticScore).detach()
# Initialise Discriminator
self.discriminatorOptim.zero_grad()
#Train Discriminator with fake(s,a) & expert(s,a)
detach_fake_action = fake_action.detach()
fake_input = self.makeDisInput(exp_state, detach_fake_action)
exp_input = self.makeDisInput(exp_state, exp_action)
print("Calculating loss...")
fake_label = torch.full((batch, 1), 0, device=device)
exp_label = torch.full((batch, 1), 1, device=device)
fake_loss = self.discriminator(fake_input)
fake_loss = self.lossCriterion(fake_loss, fake_label)
exp_loss = self.discriminator(exp_input)
exp_loss = self.lossCriterion(exp_loss, exp_label)
#Update Discriminator based on loss gradient
loss = (fake_loss+exp_loss)-self.entropyBeta*fake_entroP.detach().mean()
loss.backward()
self.discriminatorOptim.step()
#Get PPO Loss
print("PPO....")
exp_state = (Variable(exp_state).data).cpu().numpy() #convert to numpy
exp_action = (Variable(exp_action).data).cpu().numpy()
exp_score = (Variable(exp_score).data).cpu().numpy()
self.ppoExp = PPO(self.generator, self.generatorOptim)
self.ppoExp.importExpertData(exp_state,exp_action,exp_reward,exp_score,exp_done,fake_actionDis)
state, generatorLoss, entropy = self.ppoExp.optimiseGenerator1D()
if torch.isnan(entropy) or loss==0:
break
self.generator.load_state_dict(state)
self.genCounter.append(generatorLoss)
self.disCounter.append(loss)
self.entCounter.append(entropy)
print("--DisLoss {}-- --GenLoss {} --Entropy {}".format(str(loss.detach()), \
str(generatorLoss), str(entropy)))
del self.ppoExp
def train(self, numIteration, enableOnPolicy):
self.enableOnPolicy = str(enableOnPolicy)
for i in range(numIteration):
print("-----------------------Iteration {}------------------------------".format(str(i)))
# GAIL
self.dataInfo.shuffle()
self.dataInfo.sampleData()
self.updateModel()
self.ppo = PPO(self.generator, self.generatorOptim)
self.ppo.tryEnvironment1D()
self.rwdCounter.append(self.ppo.totalReward)
if enableOnPolicy == True:
#PPO
state, loss, entropy = self.ppo.optimiseGenerator1D()
if torch.isnan(entropy) or loss==0:
del self.ppo
continue
else:
self.generator.load_state_dict(state)
del self.ppo
self.getGraph()
def save(self, path, type):
torch.save(self.generator.state_dict(), '{}/{}_generator.pth'.format(path,type))
torch.save(self.discriminator.state_dict(), '{}/{}_discriminator.pth'.format(path,type))
def load(self, path, type):
self.generator.load_state_dict(torch.load('{}/{}_generator.pth'.format(path,type),map_location=map_location))
self.discriminator.load_state_dict(torch.load('{}/{}_discriminator.pth'.format(path,type),map_location=map_location))
| [
"matplotlib.pyplot.ylabel",
"torch.full",
"matplotlib.pyplot.xlabel",
"GAIL.Generator.Generator1D",
"torch.from_numpy",
"matplotlib.pyplot.close",
"torch.nn.BCELoss",
"torch.cuda.is_available",
"numpy.zeros",
"GAIL.PPO.PPO",
"GAIL.Discriminator.Discriminator1D",
"sklearn.preprocessing.normaliz... | [((598, 623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (621, 623), False, 'import torch\n'), ((741, 766), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (764, 766), False, 'import torch\n'), ((950, 962), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (960, 962), True, 'import torch.nn as nn\n'), ((1252, 1279), 'gym.make', 'gym.make', (['dataInfo.gameName'], {}), '(dataInfo.gameName)\n', (1260, 1279), False, 'import gym\n'), ((1882, 1917), 'GAIL.PPO.PPO', 'PPO', (['self.generator', 'self.learnRate'], {}), '(self.generator, self.learnRate)\n', (1885, 1917), False, 'from GAIL.PPO import PPO\n'), ((2248, 2277), 'torch.cat', 'torch.cat', (['(state, output)', '(1)'], {}), '((state, output), 1)\n', (2257, 2277), False, 'import torch\n'), ((3135, 3154), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch"""'], {}), "('Batch')\n", (3145, 3154), True, 'import matplotlib.pyplot as plt\n'), ((3163, 3181), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3173, 3181), True, 'import matplotlib.pyplot as plt\n'), ((3669, 3685), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3678, 3685), True, 'import matplotlib.pyplot as plt\n'), ((3773, 3792), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch"""'], {}), "('Batch')\n", (3783, 3792), True, 'import matplotlib.pyplot as plt\n'), ((3801, 3819), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3811, 3819), True, 'import matplotlib.pyplot as plt\n'), ((4321, 4337), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4330, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4425, 4444), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch"""'], {}), "('Batch')\n", (4435, 4444), True, 'import matplotlib.pyplot as plt\n'), ((4453, 4474), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entropy"""'], {}), "('Entropy')\n", (4463, 4474), True, 'import matplotlib.pyplot as plt\n'), ((4875, 4891), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4884, 4891), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2467), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2454, 2467), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2501), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rewards"""'], {}), "('Rewards')\n", (2490, 2501), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3047), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3040, 3047), True, 'import matplotlib.pyplot as plt\n'), ((5107, 5127), 'numpy.zeros', 'np.zeros', (['(batch, 1)'], {}), '((batch, 1))\n', (5115, 5127), True, 'import numpy as np\n'), ((5153, 5173), 'numpy.zeros', 'np.zeros', (['(batch, 1)'], {}), '((batch, 1))\n', (5161, 5173), True, 'import numpy as np\n'), ((5196, 5216), 'numpy.zeros', 'np.zeros', (['(batch, 1)'], {}), '((batch, 1))\n', (5204, 5216), True, 'import numpy as np\n'), ((5322, 5366), 'numpy.zeros', 'np.zeros', (['(batch, self.dataInfo.locateShape)'], {}), '((batch, self.dataInfo.locateShape))\n', (5330, 5366), True, 'import numpy as np\n'), ((5597, 5617), 'sklearn.preprocessing.normalize', 'normalize', (['exp_state'], {}), '(exp_state)\n', (5606, 5617), False, 'from sklearn.preprocessing import normalize\n'), ((6533, 6573), 'torch.full', 'torch.full', (['(batch, 1)', '(0)'], {'device': 'device'}), '((batch, 1), 0, device=device)\n', (6543, 6573), False, 'import torch\n'), ((6598, 6638), 'torch.full', 'torch.full', (['(batch, 1)', '(1)'], {'device': 'device'}), '((batch, 1), 1, device=device)\n', (6608, 6638), False, 'import torch\n'), ((7387, 7427), 'GAIL.PPO.PPO', 'PPO', (['self.generator', 'self.generatorOptim'], {}), '(self.generator, self.generatorOptim)\n', (7390, 7427), False, 'from GAIL.PPO import PPO\n'), ((8481, 8521), 'GAIL.PPO.PPO', 'PPO', (['self.generator', 'self.generatorOptim'], {}), '(self.generator, self.generatorOptim)\n', (8484, 8521), False, 'from GAIL.PPO import PPO\n'), ((1551, 1577), 'GAIL.Generator.Generator1D', 'Generator1D', (['self.dataInfo'], {}), '(self.dataInfo)\n', (1562, 1577), False, 'from GAIL.Generator import Generator1D\n'), ((1714, 1744), 'GAIL.Discriminator.Discriminator1D', 'Discriminator1D', (['self.dataInfo'], {}), '(self.dataInfo)\n', (1729, 1744), False, 'from GAIL.Discriminator import Discriminator1D\n'), ((7629, 7649), 'torch.isnan', 'torch.isnan', (['entropy'], {}), '(entropy)\n', (7640, 7649), False, 'import torch\n'), ((8770, 8790), 'torch.isnan', 'torch.isnan', (['entropy'], {}), '(entropy)\n', (8781, 8790), False, 'import torch\n'), ((5643, 5670), 'torch.from_numpy', 'torch.from_numpy', (['exp_state'], {}), '(exp_state)\n', (5659, 5670), False, 'import torch\n'), ((5788, 5816), 'torch.from_numpy', 'torch.from_numpy', (['exp_action'], {}), '(exp_action)\n', (5804, 5816), False, 'import torch\n'), ((7171, 7190), 'torch.autograd.Variable', 'Variable', (['exp_state'], {}), '(exp_state)\n', (7179, 7190), False, 'from torch.autograd import Variable\n'), ((7255, 7275), 'torch.autograd.Variable', 'Variable', (['exp_action'], {}), '(exp_action)\n', (7263, 7275), False, 'from torch.autograd import Variable\n'), ((7321, 7340), 'torch.autograd.Variable', 'Variable', (['exp_score'], {}), '(exp_score)\n', (7329, 7340), False, 'from torch.autograd import Variable\n')] |
"""
Lexers
======
Additional Lexers not included in Pygments
"""
import re
from pygments.lexer import Lexer, do_insertions
from pygments.lexers.javascript import JavascriptLexer
from pygments.token import Generic
# =============================================================================
line_re = re.compile('.*?\n')
# =============================================================================
class NodeConsoleLexer(Lexer):
"""
For parsing JavaScript within an interactive Node.js shell, such as:
.. sourcecode:: nodejs
> let a = 3
undefined
> a
3
> let b = '4'
undefined
> b
'4'
> b == a
false
"""
name = 'JavaScript Node.js console session'
aliases = ['nodejs']
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript']
def get_tokens_unprocessed(self, text):
jslexer = JavascriptLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:2])]))
curcode += line[2:]
elif line.startswith('...'):
# node does a nested ... thing depending on depth
code = line.lstrip('.')
lead = len(line) - len(code)
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:lead])]))
curcode += code
else:
if curcode:
yield from do_insertions(insertions,
jslexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
yield from do_insertions([],
jslexer.get_tokens_unprocessed(line))
if curcode:
yield from do_insertions(insertions,
jslexer.get_tokens_unprocessed(curcode))
| [
"pygments.lexers.javascript.JavascriptLexer",
"re.compile"
] | [((308, 327), 're.compile', 're.compile', (['""".*?\n"""'], {}), "('.*?\\n')\n", (318, 327), False, 'import re\n'), ((979, 1010), 'pygments.lexers.javascript.JavascriptLexer', 'JavascriptLexer', ([], {}), '(**self.options)\n', (994, 1010), False, 'from pygments.lexers.javascript import JavascriptLexer\n')] |
from utils import *
from block_descriptor import *
from Crypto.Cipher import AES
import hashlib
import cStringIO
import gzip
import json
import gzip_mod
import os
class Image:
def __init__(self, image_data, read=True):
self.stream = cStringIO.StringIO(image_data)
self.stream_len = len(image_data)
if read:
self.readHeader()
def getBody(self):
cur_pos = self.stream.tell()
self.stream.seek(0xA0)
body = self.stream.read()
self.stream.seek(cur_pos)
return body
def readHeader(self):
# Header size appears to universally be 0xA0 (160) bytes, with 32 bytes alloted per field.
raise Exception('readHeader not implemented for class %s!' % self.__class__.__name__)
def validateType(self):
raise Exception('No validateType implemented for class %s!' % self.__class__.__name__)
def getKeyPair(self):
raise Exception('No getKeyPair implemented for class %s' % self.__class__.__name__)
def decryptImage(self):
# Firmware images for Sercomm devices appear to universally use AES 256 in CBC mode.
if not hasattr(self, 'filesize'):
raise Exception('decryptImage called before readHeader!')
key_pair = self.getKeyPair()
aes = AES.new(key=key_pair['key'], mode=AES.MODE_CBC, IV=key_pair['iv'])
cur_pos = self.stream.tell()
# Seek past the header (remember, always 160 bytes!)
self.stream.seek(0xA0)
plaintext_body = aes.decrypt(self.stream.read())
return plaintext_body[:self.filesize]
class Stage2(Image):
def readHeader(self):
self.device_header = self.stream.read(128)
self.image_digest = self.stream.read(32)
self.blocks = []
def validateType(self):
if not hasattr(self, 'device_header'):
raise Exception('validateType called before readHeader!')
digest = hashlib.new('sha256')
digest.update(self.getBody())
return digest.digest() == self.image_digest
def extractHeader(self):
if not hasattr(self, 'device_header'):
raise Exception('extractHeader called before readHeader!')
try:
open('dev_hdr.bin', 'wb').write(self.device_header)
except IOError:
print('[-] Failed to write device header to file!')
def extractBlocks(self):
cur_pos = self.stream.tell()
self.stream.seek(0xA0)
gzip_stream = gzip.GzipFile(fileobj=self.stream, mode='rb')
while True:
block_name = unnullpad_str(gzip_stream.read(32))
if not block_name:
break
payload_size = int(unnullpad_str(gzip_stream.read(32)))
block_version = unnullpad_str(gzip_stream.read(32))
gzip_stream.read(32) # Padding?
try:
file_name = '%s_%s.bin' % (block_name, block_version)
open(file_name, 'wb').write(gzip_stream.read(payload_size))
self.blocks.append(BlockDescriptor(block_name, block_version, file_name))
print('[+] Wrote block %s version %s to file!' % (block_name, block_version))
except IOError:
print('[-] Failed to write block %s to file!' % block_name)
self.stream.seek(cur_pos)
def readManifest(self):
self.blocks = []
manifest_data = json.loads(open('manifest.json', 'rb').read())
if 'blocks' not in manifest_data:
raise Exception('Invalid firmware manifest provided!')
for block in manifest_data['blocks']:
self.blocks.append(BlockDescriptor(block['block_name'], block['block_version'], block['block_filename']))
def writeManifest(self):
block_manifests = []
for block in self.blocks:
block_manifests.append(block.asDict())
manifest_data = dict(blocks=block_manifests)
try:
open('manifest.json', 'wb').write(json.dumps(manifest_data))
except IOError:
print('[-] Failed to write manifest to file!')
def createImage(self):
self.stream = cStringIO.StringIO()
content_stream = cStringIO.StringIO()
gzip_wrapper = gzip_mod.GzipFile(filename = None, mode = 'wb', fileobj = content_stream, compresslevel = 6)
for block in self.blocks:
print('[+] Writing block with name %s and version %s to stream...' % (block.block_name, block.block_version))
gzip_wrapper.write(nullpad_str(block.block_name, 32))
gzip_wrapper.write(nullpad_str(str(os.path.getsize(block.block_filename)), 32))
gzip_wrapper.write(nullpad_str(block.block_version, 32))
gzip_wrapper.write('\x00' * 32) # Padding
gzip_wrapper.write(open(block.block_filename, 'rb').read())
gzip_wrapper.close()
content_stream.seek(0)
body_digest = hashlib.new('sha256')
body_digest.update(content_stream.read())
content_stream.seek(0)
self.stream.write(open('dev_hdr.bin', 'rb').read())
self.stream.write(body_digest.digest())
self.stream.write(content_stream.read())
self.stream.seek(0)
self.readHeader()
assert self.validateType() # Make sure we can pass our own validation checks
self.stream.seek(0)
return self.stream.read()
class Type1(Image):
def readHeader(self):
self.nullpad = self.stream.read(32)
self.fw_version = unnullpad_str(self.stream.read(32))
self.iv = self.stream.read(32)
self.nullpad2 = self.stream.read(32)
self.filesize = int(unnullpad_str(self.stream.read(32)))
assert self.stream.tell() == 0xA0
def validateType(self):
return self.nullpad == ('\x00' * 32) and self.nullpad2 == ('\x00' * 32)
def getKeyPair(self):
if not hasattr(self, 'fw_version'):
raise Exception('validateType called before readHeader!')
digest_1 = hashlib.new('md5')
digest_1.update(self.nullpad2)
digest_1.update(nullpad_str(self.fw_version, 32))
digest_2 = hashlib.new('md5')
digest_2.update(nullpad_str(str(self.filesize), 32))
digest_2.update(nullpad_str(self.fw_version, 32))
key = digest_1.digest() + digest_2.digest()
return dict(key=key, iv=self.iv[:16])
def createImage(self, fw_version, stage2_image):
self.stream = cStringIO.StringIO()
self.nullpad = '\x00' * 32
self.nullpad2 = '\x00' * 32
self.fw_version = fw_version
self.filesize = len(stage2_image)
self.iv = os.urandom(32)
image_key = self.getKeyPair()
aes = AES.new(key=image_key['key'], mode=AES.MODE_CBC, IV=image_key['iv'][:16]) # NB: Only the first 16 bytes are used
self.stream.write(self.nullpad) # Padding
self.stream.write(nullpad_str(self.fw_version, 32))
self.stream.write(self.iv)
self.stream.write(self.nullpad2) # More padding
self.stream.write(nullpad_str(str(self.filesize), 32))
self.stream.write(aes.encrypt(pkcs7_pad(stage2_image)))
self.stream.seek(0)
self.readHeader()
assert self.validateType()
self.stream.seek(0)
return self.stream.read()
class Type2(Image):
def readHeader(self):
self.image_digest = self.stream.read(32)
self.fw_version = unnullpad_str(self.stream.read(32))
self.key_factor = self.stream.read(32)
self.iv = self.stream.read(32)
self.filesize = int(unnullpad_str(self.stream.read(32)))
assert self.stream.tell() == 0xA0
def validateType(self):
if not hasattr(self, 'fw_version'):
raise Exception('validateType called before readHeader!')
cur_pos = self.stream.tell()
self.stream.seek(32) # Skip original image digest
digest = hashlib.new('sha256')
digest.update(('\x00' * 32) + self.stream.read())
self.stream.seek(cur_pos)
return digest.digest() == self.image_digest
@staticmethod
def keyPermutator(key):
perm_tbl = '26aejsw37bfktx48chmuy59dipvz'
key = bytearray(key)
for i in xrange(len(key)):
key[i] = perm_tbl[key[i] % len(perm_tbl)]
return str(key)
def getKeyPair(self):
digest_1 = hashlib.new('md5')
digest_1.update(self.key_factor)
digest_1.update(self.fw_version)
digest_2 = hashlib.new('md5')
digest_2.update('b7293e8150d1330c6c3d93f2fa81331b')
digest_2.update(self.fw_version)
digest_3 = hashlib.new('md5')
digest_3.update('83f323b7132703029da5f4a9daa72a60')
digest_3.update(self.fw_version)
digest_fin = hashlib.new('md5')
digest_fin.update(digest_1.digest())
digest_fin.update(digest_2.digest())
digest_fin.update(digest_3.digest())
key = Type2.keyPermutator(sercomm_hexdigest(digest_fin.digest()))
return dict(key=key, iv=self.iv[:16])
def createImage(self, fw_version, stage2_image):
self.stream = cStringIO.StringIO()
self.fw_version = fw_version
self.filesize = len(stage2_image)
self.key_factor = os.urandom(32)
#self.iv = os.urandom(32)
self.iv = '\x00' * 32
image_key = self.getKeyPair()
aes = AES.new(key=image_key['key'], mode=AES.MODE_CBC, IV=image_key['iv'][:16]) # NB: Only the first 16 bytes are used
self.stream.write('\x00' * 32) # Null digest for initial digest calculation
self.stream.write(nullpad_str(self.fw_version, 32))
self.stream.write(self.key_factor)
self.stream.write(self.iv)
self.stream.write(nullpad_str(str(self.filesize), 32))
self.stream.write(aes.encrypt(pkcs7_pad(stage2_image)))
self.stream.seek(0)
digest = hashlib.new('sha256')
digest.update(self.stream.read()) # Now overwrite it with the actual image digest
self.stream.seek(0)
self.stream.write(digest.digest())
self.stream.seek(0)
self.readHeader()
assert self.validateType()
self.stream.seek(0)
return self.stream.read()
| [
"os.path.getsize",
"cStringIO.StringIO",
"hashlib.new",
"os.urandom",
"json.dumps",
"gzip_mod.GzipFile",
"gzip.GzipFile",
"Crypto.Cipher.AES.new"
] | [((260, 290), 'cStringIO.StringIO', 'cStringIO.StringIO', (['image_data'], {}), '(image_data)\n', (278, 290), False, 'import cStringIO\n'), ((1337, 1403), 'Crypto.Cipher.AES.new', 'AES.new', ([], {'key': "key_pair['key']", 'mode': 'AES.MODE_CBC', 'IV': "key_pair['iv']"}), "(key=key_pair['key'], mode=AES.MODE_CBC, IV=key_pair['iv'])\n", (1344, 1403), False, 'from Crypto.Cipher import AES\n'), ((1992, 2013), 'hashlib.new', 'hashlib.new', (['"""sha256"""'], {}), "('sha256')\n", (2003, 2013), False, 'import hashlib\n'), ((2554, 2599), 'gzip.GzipFile', 'gzip.GzipFile', ([], {'fileobj': 'self.stream', 'mode': '"""rb"""'}), "(fileobj=self.stream, mode='rb')\n", (2567, 2599), False, 'import gzip\n'), ((4257, 4277), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (4275, 4277), False, 'import cStringIO\n'), ((4304, 4324), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (4322, 4324), False, 'import cStringIO\n'), ((4349, 4437), 'gzip_mod.GzipFile', 'gzip_mod.GzipFile', ([], {'filename': 'None', 'mode': '"""wb"""', 'fileobj': 'content_stream', 'compresslevel': '(6)'}), "(filename=None, mode='wb', fileobj=content_stream,\n compresslevel=6)\n", (4366, 4437), False, 'import gzip_mod\n'), ((5043, 5064), 'hashlib.new', 'hashlib.new', (['"""sha256"""'], {}), "('sha256')\n", (5054, 5064), False, 'import hashlib\n'), ((6148, 6166), 'hashlib.new', 'hashlib.new', (['"""md5"""'], {}), "('md5')\n", (6159, 6166), False, 'import hashlib\n'), ((6286, 6304), 'hashlib.new', 'hashlib.new', (['"""md5"""'], {}), "('md5')\n", (6297, 6304), False, 'import hashlib\n'), ((6605, 6625), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (6623, 6625), False, 'import cStringIO\n'), ((6799, 6813), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (6809, 6813), False, 'import os\n'), ((6868, 6941), 'Crypto.Cipher.AES.new', 'AES.new', ([], {'key': "image_key['key']", 'mode': 'AES.MODE_CBC', 'IV': "image_key['iv'][:16]"}), "(key=image_key['key'], mode=AES.MODE_CBC, IV=image_key['iv'][:16])\n", (6875, 6941), False, 'from Crypto.Cipher import AES\n'), ((8095, 8116), 'hashlib.new', 'hashlib.new', (['"""sha256"""'], {}), "('sha256')\n", (8106, 8116), False, 'import hashlib\n'), ((8560, 8578), 'hashlib.new', 'hashlib.new', (['"""md5"""'], {}), "('md5')\n", (8571, 8578), False, 'import hashlib\n'), ((8683, 8701), 'hashlib.new', 'hashlib.new', (['"""md5"""'], {}), "('md5')\n", (8694, 8701), False, 'import hashlib\n'), ((8825, 8843), 'hashlib.new', 'hashlib.new', (['"""md5"""'], {}), "('md5')\n", (8836, 8843), False, 'import hashlib\n'), ((8969, 8987), 'hashlib.new', 'hashlib.new', (['"""md5"""'], {}), "('md5')\n", (8980, 8987), False, 'import hashlib\n'), ((9327, 9347), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (9345, 9347), False, 'import cStringIO\n'), ((9456, 9470), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (9466, 9470), False, 'import os\n'), ((9591, 9664), 'Crypto.Cipher.AES.new', 'AES.new', ([], {'key': "image_key['key']", 'mode': 'AES.MODE_CBC', 'IV': "image_key['iv'][:16]"}), "(key=image_key['key'], mode=AES.MODE_CBC, IV=image_key['iv'][:16])\n", (9598, 9664), False, 'from Crypto.Cipher import AES\n'), ((10106, 10127), 'hashlib.new', 'hashlib.new', (['"""sha256"""'], {}), "('sha256')\n", (10117, 10127), False, 'import hashlib\n'), ((4092, 4117), 'json.dumps', 'json.dumps', (['manifest_data'], {}), '(manifest_data)\n', (4102, 4117), False, 'import json\n'), ((4715, 4752), 'os.path.getsize', 'os.path.getsize', (['block.block_filename'], {}), '(block.block_filename)\n', (4730, 4752), False, 'import os\n')] |
from __future__ import print_function, division
from black import out
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from sklearn.utils import class_weight
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = - input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, ignore_index=255, reduction='mean'):
super(CrossEntropyLoss2d, self).__init__()
self.CE = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
def forward(self, output, target):
loss = self.CE(output, target)
return loss
class DiceLoss(nn.Module):
def __init__(self, smooth=1., ignore_index=None):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
def forward(self, output, target):
if self.ignore_index is not None and self.ignore_index not in range(target.min(), target.max()):
if (target == self.ignore_index).sum() > 0:
target[target == self.ignore_index] = target.min()
target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = (output_flat * target_flat).sum()
loss = 1 - ((2. * intersection + self.smooth) /
(output_flat.sum() + target_flat.sum() + self.smooth))
return loss
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.CE_loss = nn.CrossEntropyLoss(reduce=False, weight=alpha)
def forward(self, output, target):
logpt = self.CE_loss(output, target)
pt = torch.exp(-logpt)
loss = ((1-pt)**self.gamma) * logpt
if self.size_average:
return loss.mean()
return loss.sum()
"""
====================
Focal Loss
code reference: https://github.com/clcarwin/focal_loss_pytorch
====================
"""
# class FocalLoss(nn.Module):
# def __init__(self, gamma=0, alpha=None, size_average=True):
# super(FocalLoss, self).__init__()
# self.gamma = gamma
# self.alpha = alpha
# if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
# if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
# self.size_average = size_average
# def forward(self, input, target):
# if input.dim()>2:
# input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
# input = input.transpose(1,2) # N,C,H*W => N,H*W,C
# input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
# target = target.view(-1,1)
# logpt = F.log_softmax(input)
# logpt = logpt.gather(1,target)
# logpt = logpt.view(-1)
# pt = Variable(logpt.data.exp())
# if self.alpha is not None:
# if self.alpha.type()!=input.data.type():
# self.alpha = self.alpha.type_as(input.data)
# at = self.alpha.gather(0,target.data.view(-1))
# logpt = logpt * Variable(at)
# loss = -1 * (1-pt)**self.gamma * logpt
# if self.size_average: return loss.mean()
# else: return loss.sum()
class CE_DiceLoss(nn.Module):
def __init__(self, smooth=1, reduction='mean', ignore_index=None, weight=None):
super(CE_DiceLoss, self).__init__()
self.smooth = smooth
self.dice = DiceLoss()
if ignore_index is not None:
self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)
else:
self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction)
def forward(self, output, target):
CE_loss = self.cross_entropy(output, target)
dice_loss = self.dice(output, target)
return CE_loss + dice_loss
class LovaszSoftmax(nn.Module):
def __init__(self, classes='all', per_image=True, ignore_index=None):
super(LovaszSoftmax, self).__init__()
self.smooth = classes
self.per_image = per_image
self.ignore_index = ignore_index
def forward(self, output, target):
logits = F.softmax(output, dim=1)
loss = lovasz_softmax(logits, target, ignore=self.ignore_index)
return loss
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = - input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
"""
Lovasz-Softmax and Jaccard hinge loss in PyTorch
<NAME> 2018 ESAT-PSI KU Leuven (MIT License)
https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytorch/lovasz_losses.py
"""
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / float(union)
ious.append(iou)
iou = mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / float(union))
ious.append(iou)
ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image
return 100 * np.array(ious)
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
"""
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
# --------------------------- MULTICLASS LOSSES ---------------------------
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
return loss
def lovasz_softmax_flat(probas, labels, classes='present'):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
def xloss(logits, labels, ignore=None):
"""
Cross entropy loss
"""
return F.cross_entropy(logits, Variable(labels), ignore_index=255)
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
### data processing ###
def toOneHot(mask, nb_class=10):
"""
Convert label image to onehot encoding
Args:
mask (Image): mask containing pixels labels
nb_class (int): number of class
"""
categorical = torch.from_numpy(np.array(mask)).long()
categorical = F.one_hot(categorical, nb_class)
return categorical.permute(2,0,1).float()
### losses & accuracy ###
def dice_loss(yhat, ytrue, epsilon=1e-6):
"""
Computes a soft Dice Loss
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
epsilon (Float): smoothing value to avoid division by 0
output:
DL value with `mean` reduction
"""
# compute Dice components
intersection = torch.sum(yhat * ytrue, (1,2,3))
cardinal = torch.sum(yhat + ytrue, (1,2,3))
return torch.mean(1. - (2 * intersection / (cardinal + epsilon)))
def tversky_index(yhat, ytrue, alpha=0.3, beta=0.7, epsilon=1e-6):
"""
Computes Tversky index
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
alpha (Float): weight for False positive
beta (Float): weight for False negative
`` alpha and beta control the magnitude of penalties and should sum to 1``
epsilon (Float): smoothing value to avoid division by 0
output:
tversky index value
"""
TP = torch.sum(yhat * ytrue)
FP = torch.sum((1. - ytrue) * yhat)
FN = torch.sum((1. - yhat) * ytrue)
return TP/(TP + alpha * FP + beta * FN + epsilon)
def tversky_loss(yhat, ytrue):
"""
Computes tversky loss given tversky index
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
output:
tversky loss value with `mean` reduction
"""
return torch.mean(1 - tversky_index(yhat, ytrue))
def tversky_focal_loss(yhat, ytrue, alpha=0.7, beta=0.3, gamma=0.75):
"""
Computes tversky focal loss for highly umbalanced data
https://arxiv.org/pdf/1810.07842.pdf
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
alpha (Float): weight for False positive
beta (Float): weight for False negative
`` alpha and beta control the magnitude of penalties and should sum to 1``
gamma (Float): focal parameter
``control the balance between easy background and hard ROI training examples``
output:
tversky focal loss value with `mean` reduction
"""
return torch.mean(torch.pow(1 - tversky_index(yhat, ytrue, alpha, beta), gamma))
def focal_loss(yhat, ytrue, alpha=0.75, gamma=2):
"""
Computes α-balanced focal loss from FAIR
https://arxiv.org/pdf/1708.02002v2.pdf
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
alpha (Float): weight to balance Cross entropy value
gamma (Float): focal parameter
output:
loss value with `mean` reduction
"""
# compute the actual focal loss
focal = -alpha * torch.pow(1. - yhat, gamma) * torch.log(yhat)
f_loss = torch.sum(ytrue * focal, dim=1)
return torch.mean(f_loss)
def iou_accuracy(yhat, ytrue, threshold=0.5, epsilon=1e-6):
"""
Computes Intersection over Union metric
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
threshold (Float): threshold for pixel classification
epsilon (Float): smoothing parameter for numerical stability
output:
iou value with `mean` reduction
"""
intersection = ((yhat>threshold).long() & ytrue.long()).float().sum((1,2,3))
union = ((yhat>threshold).long() | ytrue.long()).float().sum((1,2,3))
return torch.mean(intersection/(union + epsilon)).item()
def make_one_hot(labels, classes):
one_hot = torch.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_().to(labels.device)
target = one_hot.scatter_(1, labels.data, 1)
return target
def get_weights(target):
t_np = target.view(-1).data.cpu().numpy()
classes, counts = np.unique(t_np, return_counts=True)
# cls_w = np.median(counts) / counts
cls_w = class_weight.compute_class_weight(class_weight='balanced', classes=classes, y=t_np)
weights = np.ones(7)
weights[classes] = cls_w
return torch.from_numpy(weights).float().cuda() | [
"torch.sort",
"torch.log",
"numpy.unique",
"numpy.ones",
"torch.nn.CrossEntropyLoss",
"torch.mean",
"sklearn.utils.class_weight.compute_class_weight",
"torch.exp",
"itertools.filterfalse",
"torch.pow",
"numpy.array",
"torch.from_numpy",
"torch.sum",
"torch.nn.functional.one_hot",
"torch.... | [((8712, 8754), 'torch.sort', 'torch.sort', (['errors'], {'dim': '(0)', 'descending': '(True)'}), '(errors, dim=0, descending=True)\n', (8722, 8754), False, 'import torch\n'), ((13562, 13594), 'torch.nn.functional.one_hot', 'F.one_hot', (['categorical', 'nb_class'], {}), '(categorical, nb_class)\n', (13571, 13594), True, 'import torch.nn.functional as F\n'), ((14010, 14044), 'torch.sum', 'torch.sum', (['(yhat * ytrue)', '(1, 2, 3)'], {}), '(yhat * ytrue, (1, 2, 3))\n', (14019, 14044), False, 'import torch\n'), ((14058, 14092), 'torch.sum', 'torch.sum', (['(yhat + ytrue)', '(1, 2, 3)'], {}), '(yhat + ytrue, (1, 2, 3))\n', (14067, 14092), False, 'import torch\n'), ((14103, 14160), 'torch.mean', 'torch.mean', (['(1.0 - 2 * intersection / (cardinal + epsilon))'], {}), '(1.0 - 2 * intersection / (cardinal + epsilon))\n', (14113, 14160), False, 'import torch\n'), ((14665, 14688), 'torch.sum', 'torch.sum', (['(yhat * ytrue)'], {}), '(yhat * ytrue)\n', (14674, 14688), False, 'import torch\n'), ((14698, 14729), 'torch.sum', 'torch.sum', (['((1.0 - ytrue) * yhat)'], {}), '((1.0 - ytrue) * yhat)\n', (14707, 14729), False, 'import torch\n'), ((14738, 14769), 'torch.sum', 'torch.sum', (['((1.0 - yhat) * ytrue)'], {}), '((1.0 - yhat) * ytrue)\n', (14747, 14769), False, 'import torch\n'), ((16394, 16425), 'torch.sum', 'torch.sum', (['(ytrue * focal)'], {'dim': '(1)'}), '(ytrue * focal, dim=1)\n', (16403, 16425), False, 'import torch\n'), ((16438, 16456), 'torch.mean', 'torch.mean', (['f_loss'], {}), '(f_loss)\n', (16448, 16456), False, 'import torch\n'), ((17385, 17420), 'numpy.unique', 'np.unique', (['t_np'], {'return_counts': '(True)'}), '(t_np, return_counts=True)\n', (17394, 17420), True, 'import numpy as np\n'), ((17474, 17561), 'sklearn.utils.class_weight.compute_class_weight', 'class_weight.compute_class_weight', ([], {'class_weight': '"""balanced"""', 'classes': 'classes', 'y': 't_np'}), "(class_weight='balanced', classes=classes,\n y=t_np)\n", (17507, 17561), False, 'from sklearn.utils import class_weight\n'), ((17573, 17583), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (17580, 17583), True, 'import numpy as np\n'), ((1000, 1087), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'weight', 'ignore_index': 'ignore_index', 'reduction': 'reduction'}), '(weight=weight, ignore_index=ignore_index, reduction=\n reduction)\n', (1019, 1087), True, 'import torch.nn as nn\n'), ((1741, 1765), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (1750, 1765), True, 'import torch.nn.functional as F\n'), ((2302, 2349), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduce': '(False)', 'weight': 'alpha'}), '(reduce=False, weight=alpha)\n', (2321, 2349), True, 'import torch.nn as nn\n'), ((2448, 2465), 'torch.exp', 'torch.exp', (['(-logpt)'], {}), '(-logpt)\n', (2457, 2465), False, 'import torch\n'), ((4994, 5018), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (5003, 5018), True, 'import torch.nn.functional as F\n'), ((7493, 7507), 'numpy.array', 'np.array', (['ious'], {}), '(ious)\n', (7501, 7507), True, 'import numpy as np\n'), ((8860, 8881), 'torch.nn.functional.relu', 'F.relu', (['errors_sorted'], {}), '(errors_sorted)\n', (8866, 8881), True, 'import torch.nn.functional as F\n'), ((8883, 8897), 'torch.autograd.Variable', 'Variable', (['grad'], {}), '(grad)\n', (8891, 8897), False, 'from torch.autograd import Variable\n'), ((11779, 11817), 'torch.sort', 'torch.sort', (['errors', '(0)'], {'descending': '(True)'}), '(errors, 0, descending=True)\n', (11789, 11817), False, 'import torch\n'), ((12677, 12693), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (12685, 12693), False, 'from torch.autograd import Variable\n'), ((12975, 12997), 'itertools.filterfalse', 'ifilterfalse', (['isnan', 'l'], {}), '(isnan, l)\n', (12987, 12997), True, 'from itertools import filterfalse as ifilterfalse\n'), ((16365, 16380), 'torch.log', 'torch.log', (['yhat'], {}), '(yhat)\n', (16374, 16380), False, 'import torch\n'), ((4309, 4396), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'weight', 'reduction': 'reduction', 'ignore_index': 'ignore_index'}), '(weight=weight, reduction=reduction, ignore_index=\n ignore_index)\n', (4328, 4396), True, 'import torch.nn as nn\n'), ((4439, 4494), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'weight', 'reduction': 'reduction'}), '(weight=weight, reduction=reduction)\n', (4458, 4494), True, 'import torch.nn as nn\n'), ((8669, 8684), 'torch.autograd.Variable', 'Variable', (['signs'], {}), '(signs)\n', (8677, 8684), False, 'from torch.autograd import Variable\n'), ((16335, 16363), 'torch.pow', 'torch.pow', (['(1.0 - yhat)', 'gamma'], {}), '(1.0 - yhat, gamma)\n', (16344, 16363), False, 'import torch\n'), ((17015, 17059), 'torch.mean', 'torch.mean', (['(intersection / (union + epsilon))'], {}), '(intersection / (union + epsilon))\n', (17025, 17059), False, 'import torch\n'), ((13521, 13535), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (13529, 13535), True, 'import numpy as np\n'), ((11716, 11728), 'torch.autograd.Variable', 'Variable', (['fg'], {}), '(fg)\n', (11724, 11728), False, 'from torch.autograd import Variable\n'), ((17624, 17649), 'torch.from_numpy', 'torch.from_numpy', (['weights'], {}), '(weights)\n', (17640, 17649), False, 'import torch\n')] |
#!/usr/bin/env python2
"""Basic Snapchat client
Usage:
get_stories.py [-q -z] -u <username> [-p <password> | -a <auth_token>] --gmail=<gmail> --gpasswd=<gpasswd> <path>
Options:
-h --help Show usage
-q --quiet Suppress output
-u --username=<username> Username
-p --password=<password> Password (optional, will prompt if omitted)
--gmail=<gmail> Gmail address
--gpasswd=<gpasswd> Gmail password
-a --auth-token=<auth_token> Auth token from Snapchat session
-z --unzip Unzip files
"""
from __future__ import print_function
import os.path
import sys
from getpass import getpass
import base64
from docopt import docopt
from snapy import get_file_extension, Snapchat
from snapy.utils import unzip_snap_mp4
from zipfile import is_zipfile
def main():
arguments = docopt(__doc__)
quiet = arguments['--quiet']
unzip = arguments['--unzip']
username = arguments['--username']
auth_token = arguments['--auth-token']
gmail = arguments['--gmail']
if arguments['--gpasswd'] is None:
gpasswd = getpass('Gmail password:')
else:
gpasswd = arguments['--gpasswd']
path = arguments['<path>']
if not os.path.isdir(path):
print('No such directory: {0}'.format(arguments['<path>']))
sys.exit(1)
s = Snapchat()
if auth_token:
s.restore_token(username, auth_token, gmail, gpasswd)
else:
if arguments['--password'] is None:
password = getpass('Password:')
else:
password = arguments['--password']
if not s.login(username, password, gmail, gpasswd)['updates_response'].get('logged'):
print('Invalid username or password')
sys.exit(1)
for snap in s.get_friend_stories():
filename = '{0}.{1}'.format(snap['id'],
get_file_extension(snap['media_type']))
abspath = os.path.abspath(os.path.join(path, filename))
if os.path.isfile(abspath):
continue
data = s.get_story_blob(snap['media_id'],
snap['media_key'],
snap['media_iv'])
if data is None:
continue
with open(abspath, 'wb') as f:
f.write(data)
if not quiet:
print('Saved: {0}'.format(abspath))
if is_zipfile(abspath) and unzip:
unzip_snap_mp4(abspath, quiet)
if __name__ == '__main__':
main()
| [
"snapy.Snapchat",
"zipfile.is_zipfile",
"getpass.getpass",
"sys.exit",
"snapy.utils.unzip_snap_mp4",
"docopt.docopt",
"snapy.get_file_extension"
] | [((878, 893), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (884, 893), False, 'from docopt import docopt\n'), ((1375, 1385), 'snapy.Snapchat', 'Snapchat', ([], {}), '()\n', (1383, 1385), False, 'from snapy import get_file_extension, Snapchat\n'), ((1135, 1161), 'getpass.getpass', 'getpass', (['"""Gmail password:"""'], {}), "('Gmail password:')\n", (1142, 1161), False, 'from getpass import getpass\n'), ((1354, 1365), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1362, 1365), False, 'import sys\n'), ((1546, 1566), 'getpass.getpass', 'getpass', (['"""Password:"""'], {}), "('Password:')\n", (1553, 1566), False, 'from getpass import getpass\n'), ((1785, 1796), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1793, 1796), False, 'import sys\n'), ((1922, 1960), 'snapy.get_file_extension', 'get_file_extension', (["snap['media_type']"], {}), "(snap['media_type'])\n", (1940, 1960), False, 'from snapy import get_file_extension, Snapchat\n'), ((2438, 2457), 'zipfile.is_zipfile', 'is_zipfile', (['abspath'], {}), '(abspath)\n', (2448, 2457), False, 'from zipfile import is_zipfile\n'), ((2481, 2511), 'snapy.utils.unzip_snap_mp4', 'unzip_snap_mp4', (['abspath', 'quiet'], {}), '(abspath, quiet)\n', (2495, 2511), False, 'from snapy.utils import unzip_snap_mp4\n')] |
from my_utils.dicts.get_config_item import get_config_item
from rlkit.torch.sac.diayn.diayn_env_replay_buffer import DIAYNEnvReplayBuffer
from diayn.memory.replay_buffer_prioritized import DIAYNEnvReplayBufferEBP
from diayn.energy.calc_energy_1D_pos_dim import calc_energy_1d_pos_dim
from diayn.energy.calc_energy_mcar import calc_energy_mcar
from diayn.memory.replay_buffer_discrete import DIAYNEnvReplayBufferOptDiscrete
def get_replay_buffer(
config: dict,
replay_buffer_kwargs
) -> DIAYNEnvReplayBuffer:
ebp_sampling = get_config_item(
config=config,
key='ebp_sampling',
default=False
)
if config['env_kwargs']['env_id'] == "MountainCarContinuous-v0":
energy_fun = calc_energy_mcar
else:
energy_fun = calc_energy_1d_pos_dim
if ebp_sampling:
replay_buffer = DIAYNEnvReplayBufferEBP(
calc_path_energy_fun=energy_fun,
**replay_buffer_kwargs
)
else:
replay_buffer = DIAYNEnvReplayBufferOptDiscrete(
**replay_buffer_kwargs
)
return replay_buffer
| [
"diayn.memory.replay_buffer_prioritized.DIAYNEnvReplayBufferEBP",
"diayn.memory.replay_buffer_discrete.DIAYNEnvReplayBufferOptDiscrete",
"my_utils.dicts.get_config_item.get_config_item"
] | [((548, 613), 'my_utils.dicts.get_config_item.get_config_item', 'get_config_item', ([], {'config': 'config', 'key': '"""ebp_sampling"""', 'default': '(False)'}), "(config=config, key='ebp_sampling', default=False)\n", (563, 613), False, 'from my_utils.dicts.get_config_item import get_config_item\n'), ((853, 938), 'diayn.memory.replay_buffer_prioritized.DIAYNEnvReplayBufferEBP', 'DIAYNEnvReplayBufferEBP', ([], {'calc_path_energy_fun': 'energy_fun'}), '(calc_path_energy_fun=energy_fun, **replay_buffer_kwargs\n )\n', (876, 938), False, 'from diayn.memory.replay_buffer_prioritized import DIAYNEnvReplayBufferEBP\n'), ((1003, 1058), 'diayn.memory.replay_buffer_discrete.DIAYNEnvReplayBufferOptDiscrete', 'DIAYNEnvReplayBufferOptDiscrete', ([], {}), '(**replay_buffer_kwargs)\n', (1034, 1058), False, 'from diayn.memory.replay_buffer_discrete import DIAYNEnvReplayBufferOptDiscrete\n')] |
from errors import LoxRuntimeError
class Environment:
def __init__(self, parent=None):
self.parent: Environment = parent
self.values = {}
def ancestor(self, distance):
e = self
for i in range(0, distance):
e = e.parent
return e
def define(self, name, value):
self.values[name.lexeme] = value
def get(self, name, fallback=True):
if name.lexeme in self.values:
return self.values[name.lexeme]
if fallback and self.parent is not None:
return self.parent.get(name)
raise LoxRuntimeError(name, f"Undefined Variable '{name.lexeme}'.")
def get_at(self, distance, name):
return self.ancestor(distance).get(name, fallback=False)
def assign(self, name, value, fallback=True):
if name.lexeme in self.values:
self.values[name.lexeme] = value
return value
if fallback and self.parent is not None:
return self.parent.assign(name, value)
raise LoxRuntimeError(name, f"Undefined Variable '{name.lexeme}'.")
def assign_at(self, distance, name, value):
self.ancestor(distance).assign(name, value, fallback=False)
| [
"errors.LoxRuntimeError"
] | [((599, 660), 'errors.LoxRuntimeError', 'LoxRuntimeError', (['name', 'f"""Undefined Variable \'{name.lexeme}\'."""'], {}), '(name, f"Undefined Variable \'{name.lexeme}\'.")\n', (614, 660), False, 'from errors import LoxRuntimeError\n'), ((1041, 1102), 'errors.LoxRuntimeError', 'LoxRuntimeError', (['name', 'f"""Undefined Variable \'{name.lexeme}\'."""'], {}), '(name, f"Undefined Variable \'{name.lexeme}\'.")\n', (1056, 1102), False, 'from errors import LoxRuntimeError\n')] |
# imports
import numpy as np
from rubin_sim.maf.metrics.baseMetric import BaseMetric
# constants
__all__ = ["UseMetric"]
# exception classes
# interface functions
# classes
class UseMetric(BaseMetric): # pylint: disable=too-few-public-methods
"""Metric to classify visits by type of visits"""
def __init__(self, noteCol="note", **kwargs):
self.noteCol = noteCol
super().__init__(col=[noteCol], metricDtype="object", **kwargs)
def run(self, dataSlice, slicePoint=None): # pylint: disable=invalid-name
"""Run the metric.
Parameters
----------
dataSlice : numpy.NDarray
Values passed to metric by the slicer, which the metric will use to calculate
metric values at each slicePoint.
slicePoint : Dict
Dictionary of slicePoint metadata passed to each metric.
E.g. the ra/dec of the healpix pixel or opsim fieldId.
Returns
-------
str
use at each slicePoint.
"""
use_name = None
visible_bands = ("u", "g", "r")
notes = dataSlice[self.noteCol]
if len(notes.shape) == 0:
note = notes
else:
note = notes[0]
assert np.all(notes == note)
note_elems = note.replace(":", ", ").split(", ")
if note_elems[0] == "greedy":
use_name = note_elems[0]
if note_elems[0] == "DD":
use_name = note_elems[1]
if note_elems[0] == "blob":
use_name = "wide with only IR"
for band in visible_bands:
if band in note_elems[1]:
use_name = "wide with u, g, or r"
assert use_name is not None, f"Unrecognized note: {note}"
return use_name
# internal functions & classes
| [
"numpy.all"
] | [((1251, 1272), 'numpy.all', 'np.all', (['(notes == note)'], {}), '(notes == note)\n', (1257, 1272), True, 'import numpy as np\n')] |
import time
import numpy as np
from tqdm import tqdm
from sklearn.decomposition import MiniBatchDictionaryLearning
from .metrics import distance_between_atoms
from .visualizations import show_dictionary_atoms_img
from .plots import plot_reconstruction_error_and_dictionary_distances
def loader(X, batch_size):
for j, i in enumerate(range(0, len(X), batch_size)):
try:
yield j, X[i: i + batch_size]
except IndexError:
yield j, X[i:]
def study_dictionary_convergence_and_reconstruction_for_images(
X: np.ndarray, X_test: np.ndarray, n_atoms=10, batch_size=30, data_nature_changes=[], compute_atoms_distance_every=10, color=True, atom_h=32, atom_w=32, display_intermediate=True):
"""
X: array of shape (num_samples, feature_size)
X_test: array of shape (num_samples, feature_size)
"""
# Initializations
times, reconstruction_errors = [], []
dictionary_atoms_distances, batches_seen = [], []
data_nature_changes_batches = [
size // batch_size for size in data_nature_changes]
data_nature_changes_time = []
# Define an online dictionary learning
clf = MiniBatchDictionaryLearning(n_components=n_atoms,
batch_size=batch_size,
transform_algorithm='lasso_lars',
verbose=False)
former_atoms = np.zeros((n_atoms, X_test.shape[1]))
start = time.time()
# For every batch of image, compute a partial fit of the dictionary
for i, sample in tqdm(loader(X, batch_size), total=X.shape[0] // batch_size):
clf.partial_fit(sample)
# We then measure the reconstruction error
reconstruction_error = np.array([np.linalg.norm(
test_x - clf.transform(test_x).dot(clf.components_)) for test_x in X_test])
reconstruction_errors.append(reconstruction_error)
times.append(time.time() - start)
# We compute the data nature change time if there is any
nb_of_current_changes = len(data_nature_changes_time)
if nb_of_current_changes < len(data_nature_changes):
# Data nature change at current batch
if data_nature_changes[nb_of_current_changes] <= i * batch_size:
data_nature_changes_time.append(time.time() - start)
atoms_distance_computation_cond = i % compute_atoms_distance_every == compute_atoms_distance_every - 1
if atoms_distance_computation_cond:
# We occasionally compute the atoms distances between iterations
distance_from_prev_dict = distance_between_atoms(
former_atoms, clf.components_)
former_atoms = np.copy(clf.components_)
dictionary_atoms_distances.append(distance_from_prev_dict)
batches_seen.append(i)
# We optionally display the learnt atoms
if display_intermediate:
print("=" * 20, "\n", "Batch", i)
print("Distance between current and previous atoms:",
distance_from_prev_dict)
show_dictionary_atoms_img(
clf, color=color, atom_h=atom_h, atom_w=atom_w)
# We eventually plot the reconstruction error and the evolution of atoms distances
reconstruction_errors = np.array(reconstruction_errors).T
dictionary_atoms_distances = np.array(dictionary_atoms_distances)
plot_reconstruction_error_and_dictionary_distances(
times, reconstruction_errors, batches_seen, dictionary_atoms_distances, compute_atoms_distance_every, data_nature_changes_time, data_nature_changes_batches)
| [
"numpy.copy",
"sklearn.decomposition.MiniBatchDictionaryLearning",
"numpy.array",
"numpy.zeros",
"time.time"
] | [((1165, 1290), 'sklearn.decomposition.MiniBatchDictionaryLearning', 'MiniBatchDictionaryLearning', ([], {'n_components': 'n_atoms', 'batch_size': 'batch_size', 'transform_algorithm': '"""lasso_lars"""', 'verbose': '(False)'}), "(n_components=n_atoms, batch_size=batch_size,\n transform_algorithm='lasso_lars', verbose=False)\n", (1192, 1290), False, 'from sklearn.decomposition import MiniBatchDictionaryLearning\n'), ((1421, 1457), 'numpy.zeros', 'np.zeros', (['(n_atoms, X_test.shape[1])'], {}), '((n_atoms, X_test.shape[1]))\n', (1429, 1457), True, 'import numpy as np\n'), ((1471, 1482), 'time.time', 'time.time', ([], {}), '()\n', (1480, 1482), False, 'import time\n'), ((3405, 3441), 'numpy.array', 'np.array', (['dictionary_atoms_distances'], {}), '(dictionary_atoms_distances)\n', (3413, 3441), True, 'import numpy as np\n'), ((3338, 3369), 'numpy.array', 'np.array', (['reconstruction_errors'], {}), '(reconstruction_errors)\n', (3346, 3369), True, 'import numpy as np\n'), ((2721, 2745), 'numpy.copy', 'np.copy', (['clf.components_'], {}), '(clf.components_)\n', (2728, 2745), True, 'import numpy as np\n'), ((1946, 1957), 'time.time', 'time.time', ([], {}), '()\n', (1955, 1957), False, 'import time\n'), ((2331, 2342), 'time.time', 'time.time', ([], {}), '()\n', (2340, 2342), False, 'import time\n')] |
from collections import deque
def cin():
return list(map(int, input().split()))
n, q = cin()
graph = [[] for _ in range(n + 1)]
for i in range(n - 1):
a, b = cin()
graph[a].append(b)
graph[b].append(a)
query = [cin() for _ in range(q)]
dist = [-1 for _ in range(n + 1)]
dist[0] = 0
dist[1] = 0
d = deque()
d.append(1)
while d:
v = d.popleft()
for i in graph[v]:
if dist[i] != -1:
continue
dist[i] = dist[v] + 1
d.append(i)
for i in range(q):
distance = dist[query[i][0]] + dist[query[i][1]]
if distance % 2 == 0:
print('Town')
else:
print('Road')
| [
"collections.deque"
] | [((323, 330), 'collections.deque', 'deque', ([], {}), '()\n', (328, 330), False, 'from collections import deque\n')] |
import sys
import os
# Add basedir to path
script_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(script_dir + "/../")
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from data.utils import read_pickle_from_file
from dscribe.descriptors import ACSF
from dscribe.core.system import System
from data.data import MolecularGraph, load_csv, make_graph
from time import time
DATA_DIR = script_dir + "/../../data/"
SPLIT_DIR = script_dir + "./split/"
GRAPH_DIR = script_dir + "./graph/"
class MolecularGraphDataset(Dataset):
def __init__(self, split,
csv,
mode,
augment=None):
"""Set Dataset for molecular graph
Arguments:
split {str} -- numpy splot
csv {str} -- 'train' or 'test'
mode {str} -- train
Keyword Arguments:
augment {[type]} -- [description] (default: {None})
"""
self.split = split
self.csv = csv
self.mode = mode
self.augment = augment
self.df = pd.read_csv(DATA_DIR + '/%s.csv'%csv)
if split is not None:
self.id = np.load(split,allow_pickle=True)
else:
self.id = self.df.molecule_name.unique()
def __str__(self):
string = ''\
+ '\tmode = %s\n'%self.mode \
+ '\tsplit = %s\n'%self.split \
+ '\tcsv = %s\n'%self.csv \
+ '\tlen = %d\n'%len(self)
return string
def __len__(self):
return len(self.id)
def __getitem__(self, index):
molecule_name = self.id[index]
graph_file = f'{GRAPH_DIR}/{molecule_name}.pickle'
graph = read_pickle_from_file(graph_file)
if 0:
# 1JHC, 2JHC, 3JHC, 1JHN, 2JHN, 3JHN, 2JHH, 3JHH
mask = np.zeros(len(graph['coupling'].type),np.bool)
for t in ['2JHC' , '2JHN', '2JHH']:
mask += (graph['coupling'].type == COUPLING_TYPE.index(t))
graph['coupling'].id = graph['coupling'].id[mask]
graph['coupling'].contribution = graph['coupling'].contribution[mask]
graph['coupling'].index = graph['coupling'].index[mask]
graph['coupling'].type = graph['coupling'].type[mask]
graph['coupling'].value = graph['coupling'].value[mask]
return graph
def _collate_fn(batch):
graphs = []
targets = []
batch_size = len(batch)
offset = 0
coupling_value = []
coupling_atom_index = []
coupling_type_index = []
coupling_batch_index = []
infor = []
for b in range(batch_size):
graph = batch[b]
graphs.append(graph)
num_coupling = len(graph['coupling'].value)
coupling_value.append(graph['coupling'].value)
coupling_atom_index.append(graph['coupling'].index+offset)
coupling_type_index.append (graph['coupling'].type)
coupling_batch_index.append(np.array([b]*num_coupling))
infor.append(graph['coupling'].id)
offset += len(graph['atom'])
train_input = MoleculaGraph().get_flat_data(graphs)
gnode = []
for i, j in enumerate(train_input[0]):
gnode += [i] * len(j)
gbond = []
for i, j in enumerate(train_input[1]):
gbond += [i] * len(j)
gnode = torch.from_numpy(np.ravel(gnode))
gbond = torch.from_numpy(np.ravel(gbond))
node = torch.from_numpy(np.concatenate(train_input[0])).float()
edge = torch.from_numpy(np.concatenate(train_input[1])).float()
state = torch.from_numpy(np.concatenate(train_input[2])).float()
index1_temp = train_input[3]
index2_temp = train_input[4]
index1 = []
index2 = []
offset_ind = 0
for ind1, ind2 in zip(index1_temp, index2_temp):
index1 += [i + offset_ind for i in ind1]
index2 += [i + offset_ind for i in ind2]
offset_ind += (max(ind1) + 1)
index1 = torch.from_numpy(np.ravel(index1)).long()
index2 = torch.from_numpy(np.ravel(index2)).long()
coupling_value = torch.from_numpy(np.concatenate(coupling_value)).float()
targets = coupling_value
coupling_index = np.concatenate([
np.concatenate(coupling_atom_index),
np.concatenate(coupling_type_index).reshape(-1,1),
np.concatenate(coupling_batch_index).reshape(-1,1),
],-1)
coupling_index = torch.from_numpy(coupling_index).long()
inputs = [node, edge, state, index1, index2, gnode, gbond, coupling_index, infor]
return inputs, targets
if __name__ == "__main__":
dataset = MolecularGraphDataset(split='debug_split_by_mol.1000.npy',
mode = 'train',
csv = 'train',
)
train_dl = DataLoader(dataset, batch_size=16,
shuffle=False, collate_fn=_collate_fn,
num_workers=0)
# print(dataset[0])
start = time()
for inputs, targets in train_dl:
print(time() - start)
start = time()
pass
print('qsdf')
| [
"data.utils.read_pickle_from_file",
"pandas.read_csv",
"torch.from_numpy",
"os.path.dirname",
"numpy.array",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"numpy.ravel",
"sys.path.append",
"numpy.load",
"time.time"
] | [((100, 136), 'sys.path.append', 'sys.path.append', (["(script_dir + '/../')"], {}), "(script_dir + '/../')\n", (115, 136), False, 'import sys\n'), ((73, 98), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (88, 98), False, 'import os\n'), ((4884, 4976), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(16)', 'shuffle': '(False)', 'collate_fn': '_collate_fn', 'num_workers': '(0)'}), '(dataset, batch_size=16, shuffle=False, collate_fn=_collate_fn,\n num_workers=0)\n', (4894, 4976), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5062, 5068), 'time.time', 'time', ([], {}), '()\n', (5066, 5068), False, 'from time import time\n'), ((1152, 1191), 'pandas.read_csv', 'pd.read_csv', (["(DATA_DIR + '/%s.csv' % csv)"], {}), "(DATA_DIR + '/%s.csv' % csv)\n", (1163, 1191), True, 'import pandas as pd\n'), ((1769, 1802), 'data.utils.read_pickle_from_file', 'read_pickle_from_file', (['graph_file'], {}), '(graph_file)\n', (1790, 1802), False, 'from data.utils import read_pickle_from_file\n'), ((3446, 3461), 'numpy.ravel', 'np.ravel', (['gnode'], {}), '(gnode)\n', (3454, 3461), True, 'import numpy as np\n'), ((3492, 3507), 'numpy.ravel', 'np.ravel', (['gbond'], {}), '(gbond)\n', (3500, 3507), True, 'import numpy as np\n'), ((5152, 5158), 'time.time', 'time', ([], {}), '()\n', (5156, 5158), False, 'from time import time\n'), ((1242, 1275), 'numpy.load', 'np.load', (['split'], {'allow_pickle': '(True)'}), '(split, allow_pickle=True)\n', (1249, 1275), True, 'import numpy as np\n'), ((3071, 3099), 'numpy.array', 'np.array', (['([b] * num_coupling)'], {}), '([b] * num_coupling)\n', (3079, 3099), True, 'import numpy as np\n'), ((4286, 4321), 'numpy.concatenate', 'np.concatenate', (['coupling_atom_index'], {}), '(coupling_atom_index)\n', (4300, 4321), True, 'import numpy as np\n'), ((4473, 4505), 'torch.from_numpy', 'torch.from_numpy', (['coupling_index'], {}), '(coupling_index)\n', (4489, 4505), False, 'import torch\n'), ((3537, 3567), 'numpy.concatenate', 'np.concatenate', (['train_input[0]'], {}), '(train_input[0])\n', (3551, 3567), True, 'import numpy as np\n'), ((3605, 3635), 'numpy.concatenate', 'np.concatenate', (['train_input[1]'], {}), '(train_input[1])\n', (3619, 3635), True, 'import numpy as np\n'), ((3674, 3704), 'numpy.concatenate', 'np.concatenate', (['train_input[2]'], {}), '(train_input[2])\n', (3688, 3704), True, 'import numpy as np\n'), ((4051, 4067), 'numpy.ravel', 'np.ravel', (['index1'], {}), '(index1)\n', (4059, 4067), True, 'import numpy as np\n'), ((4106, 4122), 'numpy.ravel', 'np.ravel', (['index2'], {}), '(index2)\n', (4114, 4122), True, 'import numpy as np\n'), ((4169, 4199), 'numpy.concatenate', 'np.concatenate', (['coupling_value'], {}), '(coupling_value)\n', (4183, 4199), True, 'import numpy as np\n'), ((5120, 5126), 'time.time', 'time', ([], {}), '()\n', (5124, 5126), False, 'from time import time\n'), ((4331, 4366), 'numpy.concatenate', 'np.concatenate', (['coupling_type_index'], {}), '(coupling_type_index)\n', (4345, 4366), True, 'import numpy as np\n'), ((4390, 4426), 'numpy.concatenate', 'np.concatenate', (['coupling_batch_index'], {}), '(coupling_batch_index)\n', (4404, 4426), True, 'import numpy as np\n')] |
"""Module state store tests."""
import pytest
from pytest_lazyfixture import lazy_fixture # type: ignore[import]
from opentrons.types import DeckSlotName
from opentrons.protocol_engine import commands, actions
from opentrons.protocol_engine.commands import (
heater_shaker as hs_commands,
temperature_module as temp_commands,
thermocycler as tc_commands,
)
from opentrons.protocol_engine.types import (
DeckSlotLocation,
ModuleDefinition,
ModuleModel,
)
from opentrons.protocol_engine.state.modules import (
ModuleStore,
ModuleState,
HardwareModule,
)
from opentrons.protocol_engine.state.module_substates import (
MagneticModuleId,
MagneticModuleSubState,
HeaterShakerModuleId,
HeaterShakerModuleSubState,
TemperatureModuleId,
TemperatureModuleSubState,
ThermocyclerModuleId,
ThermocyclerModuleSubState,
ModuleSubStateType,
)
def test_initial_state() -> None:
"""It should initialize the module state."""
subject = ModuleStore()
assert subject.state == ModuleState(
slot_by_module_id={},
hardware_by_module_id={},
substate_by_module_id={},
)
@pytest.mark.parametrize(
argnames=["module_definition", "model", "expected_substate"],
argvalues=[
(
lazy_fixture("magdeck_v2_def"),
ModuleModel.MAGNETIC_MODULE_V2,
MagneticModuleSubState(
module_id=MagneticModuleId("module-id"),
model=ModuleModel.MAGNETIC_MODULE_V2,
),
),
(
lazy_fixture("heater_shaker_v1_def"),
ModuleModel.HEATER_SHAKER_MODULE_V1,
HeaterShakerModuleSubState(
module_id=HeaterShakerModuleId("module-id"),
plate_target_temperature=None,
),
),
(
lazy_fixture("tempdeck_v1_def"),
ModuleModel.TEMPERATURE_MODULE_V1,
TemperatureModuleSubState(
module_id=TemperatureModuleId("module-id"),
plate_target_temperature=None,
),
),
(
lazy_fixture("thermocycler_v1_def"),
ModuleModel.THERMOCYCLER_MODULE_V1,
ThermocyclerModuleSubState(
module_id=ThermocyclerModuleId("module-id"),
target_block_temperature=None,
target_lid_temperature=None,
),
),
],
)
def test_load_module(
module_definition: ModuleDefinition,
model: ModuleModel,
expected_substate: ModuleSubStateType,
) -> None:
"""It should handle a successful LoadModule command."""
action = actions.UpdateCommandAction(
command=commands.LoadModule.construct( # type: ignore[call-arg]
params=commands.LoadModuleParams(
model=model,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
),
result=commands.LoadModuleResult(
moduleId="module-id",
model=model,
serialNumber="serial-number",
definition=module_definition,
),
)
)
subject = ModuleStore()
subject.handle_action(action)
assert subject.state == ModuleState(
slot_by_module_id={"module-id": DeckSlotName.SLOT_1},
hardware_by_module_id={
"module-id": HardwareModule(
serial_number="serial-number",
definition=module_definition,
)
},
substate_by_module_id={"module-id": expected_substate},
)
@pytest.mark.parametrize(
argnames=["module_definition", "expected_substate"],
argvalues=[
(
lazy_fixture("magdeck_v2_def"),
MagneticModuleSubState(
module_id=MagneticModuleId("module-id"),
model=ModuleModel.MAGNETIC_MODULE_V2,
),
),
(
lazy_fixture("heater_shaker_v1_def"),
HeaterShakerModuleSubState(
module_id=HeaterShakerModuleId("module-id"),
plate_target_temperature=None,
),
),
(
lazy_fixture("tempdeck_v2_def"),
TemperatureModuleSubState(
module_id=TemperatureModuleId("module-id"),
plate_target_temperature=None,
),
),
(
lazy_fixture("thermocycler_v1_def"),
ThermocyclerModuleSubState(
module_id=ThermocyclerModuleId("module-id"),
target_block_temperature=None,
target_lid_temperature=None,
),
),
],
)
def test_add_module_action(
module_definition: ModuleDefinition,
expected_substate: ModuleSubStateType,
) -> None:
"""It should be able to add attached modules directly into state."""
action = actions.AddModuleAction(
module_id="module-id",
serial_number="serial-number",
definition=module_definition,
)
subject = ModuleStore()
subject.handle_action(action)
assert subject.state == ModuleState(
slot_by_module_id={"module-id": None},
hardware_by_module_id={
"module-id": HardwareModule(
serial_number="serial-number",
definition=module_definition,
)
},
substate_by_module_id={"module-id": expected_substate},
)
def test_handle_hs_temperature_commands(heater_shaker_v1_def: ModuleDefinition) -> None:
"""It should update `plate_target_temperature` correctly."""
load_module_cmd = commands.LoadModule.construct( # type: ignore[call-arg]
params=commands.LoadModuleParams(
model=ModuleModel.HEATER_SHAKER_MODULE_V1,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
),
result=commands.LoadModuleResult(
moduleId="module-id",
model=ModuleModel.HEATER_SHAKER_MODULE_V1,
serialNumber="serial-number",
definition=heater_shaker_v1_def,
),
)
set_temp_cmd = hs_commands.SetTargetTemperature.construct( # type: ignore[call-arg]
params=hs_commands.SetTargetTemperatureParams(moduleId="module-id", celsius=42),
result=hs_commands.SetTargetTemperatureResult(),
)
deactivate_cmd = hs_commands.DeactivateHeater.construct( # type: ignore[call-arg]
params=hs_commands.DeactivateHeaterParams(moduleId="module-id"),
result=hs_commands.DeactivateHeaterResult(),
)
subject = ModuleStore()
subject.handle_action(actions.UpdateCommandAction(command=load_module_cmd))
subject.handle_action(actions.UpdateCommandAction(command=set_temp_cmd))
assert subject.state.substate_by_module_id == {
"module-id": HeaterShakerModuleSubState(
module_id=HeaterShakerModuleId("module-id"), plate_target_temperature=42
)
}
subject.handle_action(actions.UpdateCommandAction(command=deactivate_cmd))
assert subject.state.substate_by_module_id == {
"module-id": HeaterShakerModuleSubState(
module_id=HeaterShakerModuleId("module-id"), plate_target_temperature=None
)
}
def test_handle_tempdeck_temperature_commands(
tempdeck_v2_def: ModuleDefinition,
) -> None:
"""It should update Tempdeck's `plate_target_temperature` correctly."""
load_module_cmd = commands.LoadModule.construct( # type: ignore[call-arg]
params=commands.LoadModuleParams(
model=ModuleModel.TEMPERATURE_MODULE_V2,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
),
result=commands.LoadModuleResult(
moduleId="module-id",
model=ModuleModel.TEMPERATURE_MODULE_V2,
serialNumber="serial-number",
definition=tempdeck_v2_def,
),
)
set_temp_cmd = temp_commands.SetTargetTemperature.construct( # type: ignore[call-arg]
params=temp_commands.SetTargetTemperatureParams(
moduleId="module-id", celsius=42.4
),
result=temp_commands.SetTargetTemperatureResult(targetTemperature=42),
)
deactivate_cmd = temp_commands.DeactivateTemperature.construct( # type: ignore[call-arg]
params=temp_commands.DeactivateTemperatureParams(moduleId="module-id"),
result=temp_commands.DeactivateTemperatureResult(),
)
subject = ModuleStore()
subject.handle_action(actions.UpdateCommandAction(command=load_module_cmd))
subject.handle_action(actions.UpdateCommandAction(command=set_temp_cmd))
assert subject.state.substate_by_module_id == {
"module-id": TemperatureModuleSubState(
module_id=TemperatureModuleId("module-id"), plate_target_temperature=42
)
}
subject.handle_action(actions.UpdateCommandAction(command=deactivate_cmd))
assert subject.state.substate_by_module_id == {
"module-id": TemperatureModuleSubState(
module_id=TemperatureModuleId("module-id"), plate_target_temperature=None
)
}
def test_handle_thermocycler_block_temperature_commands(
thermocycler_v1_def: ModuleDefinition,
) -> None:
"""It should update Tempdeck's `plate_target_temperature` correctly."""
load_module_cmd = commands.LoadModule.construct( # type: ignore[call-arg]
params=commands.LoadModuleParams(
model=ModuleModel.THERMOCYCLER_MODULE_V1,
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_1),
),
result=commands.LoadModuleResult(
moduleId="module-id",
model=ModuleModel.THERMOCYCLER_MODULE_V1,
serialNumber="serial-number",
definition=thermocycler_v1_def,
),
)
set_block_temp_cmd = tc_commands.SetTargetBlockTemperature.construct( # type: ignore[call-arg]
params=tc_commands.SetTargetBlockTemperatureParams(
moduleId="module-id", celsius=42.4
),
result=tc_commands.SetTargetBlockTemperatureResult(targetBlockTemperature=42.4),
)
deactivate_block_cmd = tc_commands.DeactivateBlock.construct( # type: ignore[call-arg]
params=tc_commands.DeactivateBlockParams(moduleId="module-id"),
result=tc_commands.DeactivateBlockResult(),
)
set_lid_temp_cmd = tc_commands.SetTargetLidTemperature.construct( # type: ignore[call-arg]
params=tc_commands.SetTargetLidTemperatureParams(
moduleId="module-id", celsius=35.3
),
result=tc_commands.SetTargetLidTemperatureResult(targetLidTemperature=35.3),
)
deactivate_lid_cmd = tc_commands.DeactivateLid.construct( # type: ignore[call-arg]
params=tc_commands.DeactivateLidParams(moduleId="module-id"),
result=tc_commands.DeactivateLidResult(),
)
subject = ModuleStore()
subject.handle_action(actions.UpdateCommandAction(command=load_module_cmd))
subject.handle_action(actions.UpdateCommandAction(command=set_block_temp_cmd))
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
module_id=ThermocyclerModuleId("module-id"),
target_block_temperature=42.4,
target_lid_temperature=None,
)
}
subject.handle_action(actions.UpdateCommandAction(command=set_lid_temp_cmd))
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
module_id=ThermocyclerModuleId("module-id"),
target_block_temperature=42.4,
target_lid_temperature=35.3,
)
}
subject.handle_action(actions.UpdateCommandAction(command=deactivate_lid_cmd))
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
module_id=ThermocyclerModuleId("module-id"),
target_block_temperature=42.4,
target_lid_temperature=None,
)
}
subject.handle_action(actions.UpdateCommandAction(command=deactivate_block_cmd))
assert subject.state.substate_by_module_id == {
"module-id": ThermocyclerModuleSubState(
module_id=ThermocyclerModuleId("module-id"),
target_block_temperature=None,
target_lid_temperature=None,
)
}
| [
"opentrons.protocol_engine.commands.thermocycler.DeactivateLidParams",
"opentrons.protocol_engine.commands.thermocycler.DeactivateLidResult",
"opentrons.protocol_engine.commands.temperature_module.DeactivateTemperatureResult",
"opentrons.protocol_engine.commands.heater_shaker.DeactivateHeaterParams",
"opent... | [((1002, 1015), 'opentrons.protocol_engine.state.modules.ModuleStore', 'ModuleStore', ([], {}), '()\n', (1013, 1015), False, 'from opentrons.protocol_engine.state.modules import ModuleStore, ModuleState, HardwareModule\n'), ((3165, 3178), 'opentrons.protocol_engine.state.modules.ModuleStore', 'ModuleStore', ([], {}), '()\n', (3176, 3178), False, 'from opentrons.protocol_engine.state.modules import ModuleStore, ModuleState, HardwareModule\n'), ((4863, 4975), 'opentrons.protocol_engine.actions.AddModuleAction', 'actions.AddModuleAction', ([], {'module_id': '"""module-id"""', 'serial_number': '"""serial-number"""', 'definition': 'module_definition'}), "(module_id='module-id', serial_number=\n 'serial-number', definition=module_definition)\n", (4886, 4975), False, 'from opentrons.protocol_engine import commands, actions\n'), ((5017, 5030), 'opentrons.protocol_engine.state.modules.ModuleStore', 'ModuleStore', ([], {}), '()\n', (5028, 5030), False, 'from opentrons.protocol_engine.state.modules import ModuleStore, ModuleState, HardwareModule\n'), ((6536, 6549), 'opentrons.protocol_engine.state.modules.ModuleStore', 'ModuleStore', ([], {}), '()\n', (6547, 6549), False, 'from opentrons.protocol_engine.state.modules import ModuleStore, ModuleState, HardwareModule\n'), ((8395, 8408), 'opentrons.protocol_engine.state.modules.ModuleStore', 'ModuleStore', ([], {}), '()\n', (8406, 8408), False, 'from opentrons.protocol_engine.state.modules import ModuleStore, ModuleState, HardwareModule\n'), ((10791, 10804), 'opentrons.protocol_engine.state.modules.ModuleStore', 'ModuleStore', ([], {}), '()\n', (10802, 10804), False, 'from opentrons.protocol_engine.state.modules import ModuleStore, ModuleState, HardwareModule\n'), ((1045, 1134), 'opentrons.protocol_engine.state.modules.ModuleState', 'ModuleState', ([], {'slot_by_module_id': '{}', 'hardware_by_module_id': '{}', 'substate_by_module_id': '{}'}), '(slot_by_module_id={}, hardware_by_module_id={},\n substate_by_module_id={})\n', (1056, 1134), False, 'from opentrons.protocol_engine.state.modules import ModuleStore, ModuleState, HardwareModule\n'), ((6577, 6629), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'load_module_cmd'}), '(command=load_module_cmd)\n', (6604, 6629), False, 'from opentrons.protocol_engine import commands, actions\n'), ((6657, 6706), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'set_temp_cmd'}), '(command=set_temp_cmd)\n', (6684, 6706), False, 'from opentrons.protocol_engine import commands, actions\n'), ((6936, 6987), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'deactivate_cmd'}), '(command=deactivate_cmd)\n', (6963, 6987), False, 'from opentrons.protocol_engine import commands, actions\n'), ((8436, 8488), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'load_module_cmd'}), '(command=load_module_cmd)\n', (8463, 8488), False, 'from opentrons.protocol_engine import commands, actions\n'), ((8516, 8565), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'set_temp_cmd'}), '(command=set_temp_cmd)\n', (8543, 8565), False, 'from opentrons.protocol_engine import commands, actions\n'), ((8793, 8844), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'deactivate_cmd'}), '(command=deactivate_cmd)\n', (8820, 8844), False, 'from opentrons.protocol_engine import commands, actions\n'), ((10832, 10884), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'load_module_cmd'}), '(command=load_module_cmd)\n', (10859, 10884), False, 'from opentrons.protocol_engine import commands, actions\n'), ((10912, 10967), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'set_block_temp_cmd'}), '(command=set_block_temp_cmd)\n', (10939, 10967), False, 'from opentrons.protocol_engine import commands, actions\n'), ((11253, 11306), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'set_lid_temp_cmd'}), '(command=set_lid_temp_cmd)\n', (11280, 11306), False, 'from opentrons.protocol_engine import commands, actions\n'), ((11592, 11647), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'deactivate_lid_cmd'}), '(command=deactivate_lid_cmd)\n', (11619, 11647), False, 'from opentrons.protocol_engine import commands, actions\n'), ((11933, 11990), 'opentrons.protocol_engine.actions.UpdateCommandAction', 'actions.UpdateCommandAction', ([], {'command': 'deactivate_block_cmd'}), '(command=deactivate_block_cmd)\n', (11960, 11990), False, 'from opentrons.protocol_engine import commands, actions\n'), ((5842, 6005), 'opentrons.protocol_engine.commands.LoadModuleResult', 'commands.LoadModuleResult', ([], {'moduleId': '"""module-id"""', 'model': 'ModuleModel.HEATER_SHAKER_MODULE_V1', 'serialNumber': '"""serial-number"""', 'definition': 'heater_shaker_v1_def'}), "(moduleId='module-id', model=ModuleModel.\n HEATER_SHAKER_MODULE_V1, serialNumber='serial-number', definition=\n heater_shaker_v1_def)\n", (5867, 6005), False, 'from opentrons.protocol_engine import commands, actions\n'), ((6166, 6238), 'opentrons.protocol_engine.commands.heater_shaker.SetTargetTemperatureParams', 'hs_commands.SetTargetTemperatureParams', ([], {'moduleId': '"""module-id"""', 'celsius': '(42)'}), "(moduleId='module-id', celsius=42)\n", (6204, 6238), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((6255, 6295), 'opentrons.protocol_engine.commands.heater_shaker.SetTargetTemperatureResult', 'hs_commands.SetTargetTemperatureResult', ([], {}), '()\n', (6293, 6295), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((6405, 6461), 'opentrons.protocol_engine.commands.heater_shaker.DeactivateHeaterParams', 'hs_commands.DeactivateHeaterParams', ([], {'moduleId': '"""module-id"""'}), "(moduleId='module-id')\n", (6439, 6461), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((6478, 6514), 'opentrons.protocol_engine.commands.heater_shaker.DeactivateHeaterResult', 'hs_commands.DeactivateHeaterResult', ([], {}), '()\n', (6512, 6514), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((7637, 7793), 'opentrons.protocol_engine.commands.LoadModuleResult', 'commands.LoadModuleResult', ([], {'moduleId': '"""module-id"""', 'model': 'ModuleModel.TEMPERATURE_MODULE_V2', 'serialNumber': '"""serial-number"""', 'definition': 'tempdeck_v2_def'}), "(moduleId='module-id', model=ModuleModel.\n TEMPERATURE_MODULE_V2, serialNumber='serial-number', definition=\n tempdeck_v2_def)\n", (7662, 7793), False, 'from opentrons.protocol_engine import commands, actions\n'), ((7956, 8032), 'opentrons.protocol_engine.commands.temperature_module.SetTargetTemperatureParams', 'temp_commands.SetTargetTemperatureParams', ([], {'moduleId': '"""module-id"""', 'celsius': '(42.4)'}), "(moduleId='module-id', celsius=42.4)\n", (7996, 8032), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((8071, 8133), 'opentrons.protocol_engine.commands.temperature_module.SetTargetTemperatureResult', 'temp_commands.SetTargetTemperatureResult', ([], {'targetTemperature': '(42)'}), '(targetTemperature=42)\n', (8111, 8133), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((8250, 8313), 'opentrons.protocol_engine.commands.temperature_module.DeactivateTemperatureParams', 'temp_commands.DeactivateTemperatureParams', ([], {'moduleId': '"""module-id"""'}), "(moduleId='module-id')\n", (8291, 8313), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((8330, 8373), 'opentrons.protocol_engine.commands.temperature_module.DeactivateTemperatureResult', 'temp_commands.DeactivateTemperatureResult', ([], {}), '()\n', (8371, 8373), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((9507, 9668), 'opentrons.protocol_engine.commands.LoadModuleResult', 'commands.LoadModuleResult', ([], {'moduleId': '"""module-id"""', 'model': 'ModuleModel.THERMOCYCLER_MODULE_V1', 'serialNumber': '"""serial-number"""', 'definition': 'thermocycler_v1_def'}), "(moduleId='module-id', model=ModuleModel.\n THERMOCYCLER_MODULE_V1, serialNumber='serial-number', definition=\n thermocycler_v1_def)\n", (9532, 9668), False, 'from opentrons.protocol_engine import commands, actions\n'), ((9840, 9919), 'opentrons.protocol_engine.commands.thermocycler.SetTargetBlockTemperatureParams', 'tc_commands.SetTargetBlockTemperatureParams', ([], {'moduleId': '"""module-id"""', 'celsius': '(42.4)'}), "(moduleId='module-id', celsius=42.4)\n", (9883, 9919), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((9958, 10030), 'opentrons.protocol_engine.commands.thermocycler.SetTargetBlockTemperatureResult', 'tc_commands.SetTargetBlockTemperatureResult', ([], {'targetBlockTemperature': '(42.4)'}), '(targetBlockTemperature=42.4)\n', (10001, 10030), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((10145, 10200), 'opentrons.protocol_engine.commands.thermocycler.DeactivateBlockParams', 'tc_commands.DeactivateBlockParams', ([], {'moduleId': '"""module-id"""'}), "(moduleId='module-id')\n", (10178, 10200), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((10217, 10252), 'opentrons.protocol_engine.commands.thermocycler.DeactivateBlockResult', 'tc_commands.DeactivateBlockResult', ([], {}), '()\n', (10250, 10252), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((10371, 10448), 'opentrons.protocol_engine.commands.thermocycler.SetTargetLidTemperatureParams', 'tc_commands.SetTargetLidTemperatureParams', ([], {'moduleId': '"""module-id"""', 'celsius': '(35.3)'}), "(moduleId='module-id', celsius=35.3)\n", (10412, 10448), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((10487, 10555), 'opentrons.protocol_engine.commands.thermocycler.SetTargetLidTemperatureResult', 'tc_commands.SetTargetLidTemperatureResult', ([], {'targetLidTemperature': '(35.3)'}), '(targetLidTemperature=35.3)\n', (10528, 10555), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((10666, 10719), 'opentrons.protocol_engine.commands.thermocycler.DeactivateLidParams', 'tc_commands.DeactivateLidParams', ([], {'moduleId': '"""module-id"""'}), "(moduleId='module-id')\n", (10697, 10719), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((10736, 10769), 'opentrons.protocol_engine.commands.thermocycler.DeactivateLidResult', 'tc_commands.DeactivateLidResult', ([], {}), '()\n', (10767, 10769), True, 'from opentrons.protocol_engine.commands import heater_shaker as hs_commands, temperature_module as temp_commands, thermocycler as tc_commands\n'), ((1294, 1324), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""magdeck_v2_def"""'], {}), "('magdeck_v2_def')\n", (1306, 1324), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((1565, 1601), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""heater_shaker_v1_def"""'], {}), "('heater_shaker_v1_def')\n", (1577, 1601), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((1848, 1879), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""tempdeck_v1_def"""'], {}), "('tempdeck_v1_def')\n", (1860, 1879), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((2122, 2157), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""thermocycler_v1_def"""'], {}), "('thermocycler_v1_def')\n", (2134, 2157), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((3701, 3731), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""magdeck_v2_def"""'], {}), "('magdeck_v2_def')\n", (3713, 3731), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((3928, 3964), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""heater_shaker_v1_def"""'], {}), "('heater_shaker_v1_def')\n", (3940, 3964), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((4162, 4193), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""tempdeck_v2_def"""'], {}), "('tempdeck_v2_def')\n", (4174, 4193), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((4389, 4424), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""thermocycler_v1_def"""'], {}), "('thermocycler_v1_def')\n", (4401, 4424), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((2933, 3058), 'opentrons.protocol_engine.commands.LoadModuleResult', 'commands.LoadModuleResult', ([], {'moduleId': '"""module-id"""', 'model': 'model', 'serialNumber': '"""serial-number"""', 'definition': 'module_definition'}), "(moduleId='module-id', model=model, serialNumber=\n 'serial-number', definition=module_definition)\n", (2958, 3058), False, 'from opentrons.protocol_engine import commands, actions\n'), ((3374, 3449), 'opentrons.protocol_engine.state.modules.HardwareModule', 'HardwareModule', ([], {'serial_number': '"""serial-number"""', 'definition': 'module_definition'}), "(serial_number='serial-number', definition=module_definition)\n", (3388, 3449), False, 'from opentrons.protocol_engine.state.modules import ModuleStore, ModuleState, HardwareModule\n'), ((5211, 5286), 'opentrons.protocol_engine.state.modules.HardwareModule', 'HardwareModule', ([], {'serial_number': '"""serial-number"""', 'definition': 'module_definition'}), "(serial_number='serial-number', definition=module_definition)\n", (5225, 5286), False, 'from opentrons.protocol_engine.state.modules import ModuleStore, ModuleState, HardwareModule\n'), ((5768, 5814), 'opentrons.protocol_engine.types.DeckSlotLocation', 'DeckSlotLocation', ([], {'slotName': 'DeckSlotName.SLOT_1'}), '(slotName=DeckSlotName.SLOT_1)\n', (5784, 5814), False, 'from opentrons.protocol_engine.types import DeckSlotLocation, ModuleDefinition, ModuleModel\n'), ((6831, 6864), 'opentrons.protocol_engine.state.module_substates.HeaterShakerModuleId', 'HeaterShakerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (6851, 6864), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((7112, 7145), 'opentrons.protocol_engine.state.module_substates.HeaterShakerModuleId', 'HeaterShakerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (7132, 7145), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((7563, 7609), 'opentrons.protocol_engine.types.DeckSlotLocation', 'DeckSlotLocation', ([], {'slotName': 'DeckSlotName.SLOT_1'}), '(slotName=DeckSlotName.SLOT_1)\n', (7579, 7609), False, 'from opentrons.protocol_engine.types import DeckSlotLocation, ModuleDefinition, ModuleModel\n'), ((8689, 8721), 'opentrons.protocol_engine.state.module_substates.TemperatureModuleId', 'TemperatureModuleId', (['"""module-id"""'], {}), "('module-id')\n", (8708, 8721), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((8968, 9000), 'opentrons.protocol_engine.state.module_substates.TemperatureModuleId', 'TemperatureModuleId', (['"""module-id"""'], {}), "('module-id')\n", (8987, 9000), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((9433, 9479), 'opentrons.protocol_engine.types.DeckSlotLocation', 'DeckSlotLocation', ([], {'slotName': 'DeckSlotName.SLOT_1'}), '(slotName=DeckSlotName.SLOT_1)\n', (9449, 9479), False, 'from opentrons.protocol_engine.types import DeckSlotLocation, ModuleDefinition, ModuleModel\n'), ((11092, 11125), 'opentrons.protocol_engine.state.module_substates.ThermocyclerModuleId', 'ThermocyclerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (11112, 11125), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((11431, 11464), 'opentrons.protocol_engine.state.module_substates.ThermocyclerModuleId', 'ThermocyclerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (11451, 11464), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((11772, 11805), 'opentrons.protocol_engine.state.module_substates.ThermocyclerModuleId', 'ThermocyclerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (11792, 11805), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((12115, 12148), 'opentrons.protocol_engine.state.module_substates.ThermocyclerModuleId', 'ThermocyclerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (12135, 12148), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((1432, 1461), 'opentrons.protocol_engine.state.module_substates.MagneticModuleId', 'MagneticModuleId', (['"""module-id"""'], {}), "('module-id')\n", (1448, 1461), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((1718, 1751), 'opentrons.protocol_engine.state.module_substates.HeaterShakerModuleId', 'HeaterShakerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (1738, 1751), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((1993, 2025), 'opentrons.protocol_engine.state.module_substates.TemperatureModuleId', 'TemperatureModuleId', (['"""module-id"""'], {}), "('module-id')\n", (2012, 2025), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((2273, 2306), 'opentrons.protocol_engine.state.module_substates.ThermocyclerModuleId', 'ThermocyclerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (2293, 2306), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((3795, 3824), 'opentrons.protocol_engine.state.module_substates.MagneticModuleId', 'MagneticModuleId', (['"""module-id"""'], {}), "('module-id')\n", (3811, 3824), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((4032, 4065), 'opentrons.protocol_engine.state.module_substates.HeaterShakerModuleId', 'HeaterShakerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (4052, 4065), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((4260, 4292), 'opentrons.protocol_engine.state.module_substates.TemperatureModuleId', 'TemperatureModuleId', (['"""module-id"""'], {}), "('module-id')\n", (4279, 4292), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((4492, 4525), 'opentrons.protocol_engine.state.module_substates.ThermocyclerModuleId', 'ThermocyclerModuleId', (['"""module-id"""'], {}), "('module-id')\n", (4512, 4525), False, 'from opentrons.protocol_engine.state.module_substates import MagneticModuleId, MagneticModuleSubState, HeaterShakerModuleId, HeaterShakerModuleSubState, TemperatureModuleId, TemperatureModuleSubState, ThermocyclerModuleId, ThermocyclerModuleSubState, ModuleSubStateType\n'), ((2851, 2897), 'opentrons.protocol_engine.types.DeckSlotLocation', 'DeckSlotLocation', ([], {'slotName': 'DeckSlotName.SLOT_1'}), '(slotName=DeckSlotName.SLOT_1)\n', (2867, 2897), False, 'from opentrons.protocol_engine.types import DeckSlotLocation, ModuleDefinition, ModuleModel\n')] |
"""
To check if a point P lies within the N sided polygon:
Step 1: Area of the polygon = sum of area of N-2 triangles formed by the polygon points.
Step 2: Area Covered by P = sum of areas of N triangles formed by P and any two adjasecnt sides of the ploygon.
If areas obtain from step 1 and 2 are equal then P lies with the polygon, else outside.
"""
import sys
def area(point_1, point_2, point_3):
return abs(point_1[0] * (point_2[1] - point_3[1])
+ point_2[0] * (point_3[1] - point_1[1])
+ point_3[0] * (point_1[1] - point_2[1]))
def get_coordinate(data):
l = data.split(",")
return tuple([int(c) for c in l])
def check_within():
points = []
c = int(raw_input("Nth sided polygon:: "))
if c > 4 or c < 3:
print("Program only works for rectangle and triangles")
sys.exit(1)
for i in range(c):
data = raw_input("Enter the polygon point:: ")
point = get_coordinate(data)
points.append(point)
test_data = raw_input("Enter the point to check:: ")
test_point = get_coordinate(test_data)
points.append(test_point)
if c == 3:
poly_area = area(points[0], points[1], points[2])
else:
poly_area = area(points[0], points[1], points[2]) + \
area(points[1], points[2], points[3])
while(True):
point_area = 0
if c == 3:
point_area += area(points[-1], points[0], points[1])
point_area += area(points[-1], points[0], points[2])
point_area += area(points[-1], points[1], points[2])
else:
point_area += area(points[-1], points[0], points[1])
point_area += area(points[-1], points[0], points[2])
point_area += area(points[-1], points[1], points[3])
point_area += area(points[-1], points[2], points[3])
if poly_area == point_area:
print("Point lies with polygon")
else:
print("Point does not lies with ploygon")
print("Lets check another point")
test_data = raw_input("Enter the point to check:: ")
test_point = get_coordinate(test_data)
points[-1] = test_point
def main():
check_within()
if __name__ == '__main__':
main()
| [
"sys.exit"
] | [((839, 850), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (847, 850), False, 'import sys\n')] |
from twitchstreams.models import *
from django.contrib import admin
admin.site.register(Channel)
admin.site.register(Tag)
| [
"django.contrib.admin.site.register"
] | [((69, 97), 'django.contrib.admin.site.register', 'admin.site.register', (['Channel'], {}), '(Channel)\n', (88, 97), False, 'from django.contrib import admin\n'), ((98, 122), 'django.contrib.admin.site.register', 'admin.site.register', (['Tag'], {}), '(Tag)\n', (117, 122), False, 'from django.contrib import admin\n')] |
"""award_details JSON blob and awarded_at date stamp on BriefResponse
Constraint on Brief to allow only one BriefResponse with non-null 'awarded_at'
Revision ID: 960
Revises: 950
Create Date: 2017-08-07 15:22:43.619680
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '960'
down_revision = '950'
def upgrade():
op.add_column(
'brief_responses',
sa.Column('award_details', postgresql.JSON(astext_type=sa.Text()), nullable=True, server_default='{}')
)
op.add_column('brief_responses', sa.Column('awarded_at', sa.DateTime(), nullable=True))
op.create_index(
'idx_brief_responses_unique_awarded_at_per_brief_id',
'brief_responses',
['brief_id'],
unique=True,
postgresql_where=sa.text('awarded_at IS NOT NULL')
)
def downgrade():
op.drop_index('idx_brief_responses_unique_awarded_at_per_brief_id', table_name='brief_responses')
op.drop_column('brief_responses', 'awarded_at')
op.drop_column('brief_responses', 'award_details')
| [
"sqlalchemy.text",
"sqlalchemy.DateTime",
"sqlalchemy.Text",
"alembic.op.drop_column",
"alembic.op.drop_index"
] | [((910, 1011), 'alembic.op.drop_index', 'op.drop_index', (['"""idx_brief_responses_unique_awarded_at_per_brief_id"""'], {'table_name': '"""brief_responses"""'}), "('idx_brief_responses_unique_awarded_at_per_brief_id',\n table_name='brief_responses')\n", (923, 1011), False, 'from alembic import op\n'), ((1012, 1059), 'alembic.op.drop_column', 'op.drop_column', (['"""brief_responses"""', '"""awarded_at"""'], {}), "('brief_responses', 'awarded_at')\n", (1026, 1059), False, 'from alembic import op\n'), ((1064, 1114), 'alembic.op.drop_column', 'op.drop_column', (['"""brief_responses"""', '"""award_details"""'], {}), "('brief_responses', 'award_details')\n", (1078, 1114), False, 'from alembic import op\n'), ((638, 651), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (649, 651), True, 'import sqlalchemy as sa\n'), ((847, 880), 'sqlalchemy.text', 'sa.text', (['"""awarded_at IS NOT NULL"""'], {}), "('awarded_at IS NOT NULL')\n", (854, 880), True, 'import sqlalchemy as sa\n'), ((523, 532), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (530, 532), True, 'import sqlalchemy as sa\n')] |
import pygame
change = int(input('Digite o Número da música que você deseja tocar: [1/2/3/4]: '))
if change == (1000-999):
print("Está tocando: 'Diego e <NAME> - Pisadinha'")
music1= pygame.mixer.init()
pygame.init()
pygame.mixer.music.load('pis.mp3')
pygame.mixer.music.play()
pygame.event.wait()
input()
elif change == (100/50):
print("Está tocando: 'HUNGRIA - Amor e fé'")
music2 = pygame.mixer.init()
pygame.init()
pygame.mixer.music.load('aef.mp3')
pygame.mixer.music.play()
pygame.event.wait()
input()
elif change == (4860/1620):
print("Está tocando: 'Ilusão - Cracolândia' ")
music3 = pygame.mixer.init()
pygame.init()
pygame.mixer.music.load('ccl.mp3')
pygame.mixer.music.play()
pygame.event.wait()
input()
elif change == 4:
print("Está tocando: 'Samahnta - <NAME>'")
music4 = pygame.mixer.init()
pygame.init()
pygame.mixer.music.load('nep.mp3')
pygame.mixer.music.play()
pygame.event.wait()
input()
else:
print('None')
| [
"pygame.mixer.init",
"pygame.init",
"pygame.event.wait",
"pygame.mixer.music.load",
"pygame.mixer.music.play"
] | [((193, 212), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (210, 212), False, 'import pygame\n'), ((217, 230), 'pygame.init', 'pygame.init', ([], {}), '()\n', (228, 230), False, 'import pygame\n'), ((235, 269), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""pis.mp3"""'], {}), "('pis.mp3')\n", (258, 269), False, 'import pygame\n'), ((274, 299), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (297, 299), False, 'import pygame\n'), ((304, 323), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (321, 323), False, 'import pygame\n'), ((424, 443), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (441, 443), False, 'import pygame\n'), ((448, 461), 'pygame.init', 'pygame.init', ([], {}), '()\n', (459, 461), False, 'import pygame\n'), ((466, 500), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""aef.mp3"""'], {}), "('aef.mp3')\n", (489, 500), False, 'import pygame\n'), ((505, 530), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (528, 530), False, 'import pygame\n'), ((535, 554), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (552, 554), False, 'import pygame\n'), ((660, 679), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (677, 679), False, 'import pygame\n'), ((684, 697), 'pygame.init', 'pygame.init', ([], {}), '()\n', (695, 697), False, 'import pygame\n'), ((702, 736), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""ccl.mp3"""'], {}), "('ccl.mp3')\n", (725, 736), False, 'import pygame\n'), ((741, 766), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (764, 766), False, 'import pygame\n'), ((771, 790), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (788, 790), False, 'import pygame\n'), ((882, 901), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (899, 901), False, 'import pygame\n'), ((906, 919), 'pygame.init', 'pygame.init', ([], {}), '()\n', (917, 919), False, 'import pygame\n'), ((924, 958), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""nep.mp3"""'], {}), "('nep.mp3')\n", (947, 958), False, 'import pygame\n'), ((963, 988), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (986, 988), False, 'import pygame\n'), ((993, 1012), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (1010, 1012), False, 'import pygame\n')] |
#!/usr/bin/python
#
# Copyright (c) 2012 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import with_statement
import os
import copy
import itertools
import collections
import pysam
import pypeline.common.fileutils as fileutils
import pypeline.common.utilities as utilities
import pypeline.common.sequences as sequtils
import pypeline.common.text as text
from pypeline.common.formats.fasta import \
FASTA
from pypeline.common.formats.msa import \
MSA
from pypeline.node import \
NodeError, \
Node, \
MetaNode
from pypeline.common.bedtools import \
BEDRecord
class CollectSequencesNode(Node):
def __init__(self, fasta_files, sequences, destination, dependencies=()):
"""
fasta_files -- { taxon_name_1 : filename_1, ... }
sequences -- { interval_name_1, ... }
"""
self._infiles = copy.deepcopy(fasta_files)
self._sequences = utilities.safe_coerce_to_frozenset(sequences)
self._destination = copy.copy(destination)
self._outfiles = [os.path.join(destination, name + ".fasta")
for name in self._sequences]
input_files = list(self._infiles.itervalues())
for filename in self._infiles.itervalues():
input_files.append(filename + ".fai")
desc = "<CollectSequences: %i sequences from %i files -> '%s'>" \
% (len(self._sequences), len(self._infiles), self._destination)
Node.__init__(self,
description=desc,
input_files=input_files,
output_files=self._outfiles,
dependencies=dependencies)
def _setup(self, _config, _temp):
for filename in self._infiles.itervalues():
with open(filename + ".fai") as handle:
sequences = set()
for line in handle:
sequences.add(line.split("\t", 1)[0])
missing_sequences = list(self._sequences - sequences)
if missing_sequences:
if len(missing_sequences) >= 4:
missing_sequences = missing_sequences[:3]
missing_sequences.append("...")
message = ("FASTA file does not contain expected "
"sequences:\n File = %r\n "
"Sequences = %s\n") \
% (filename, ", ".join(missing_sequences))
raise NodeError(message)
def _run(self, _config, temp):
fasta_files = []
for (name, filename) in sorted(self._infiles.iteritems()):
fasta_files.append((name, pysam.Fastafile(filename)))
for sequence_name in sorted(self._sequences):
filename = os.path.join(temp, sequence_name + ".fasta")
with open(filename, "w") as out_handle:
for (sample, fasta_file) in fasta_files:
sequence = fasta_file.fetch(sequence_name)
fasta = FASTA(sample, sequence_name, sequence)
out_handle.write(str(fasta))
def _teardown(self, _config, temp):
for destination in sorted(self._outfiles):
source = fileutils.reroot_path(temp, destination)
fileutils.move_file(source, destination)
class FilterSingletonsNode(Node):
def __init__(self, input_file, output_file, filter_by, dependencies):
self._input_file = input_file
self._output_file = output_file
self._filter_by = dict(filter_by)
for (to_filter, groups) in self._filter_by.items():
# The taxa to be filtered is implied to be part of the group,
# but is not needed when actually carrying out the filtering
groups = utilities.safe_coerce_to_frozenset(groups) \
- utilities.safe_coerce_to_frozenset(to_filter)
if not groups:
raise RuntimeError("Singleton filtering must involve at least "
"one other taxa")
self._filter_by[to_filter] = groups
Node.__init__(self,
description="<FilterSingleton: '%s' -> '%s'>"
% (input_file, output_file),
input_files=[input_file],
output_files=[output_file],
dependencies=dependencies)
def _run(self, _config, temp):
alignment = MSA.from_file(self._input_file)
for (to_filter, groups) in self._filter_by.iteritems():
alignment = alignment.filter_singletons(to_filter, groups)
temp_filename = fileutils.reroot_path(temp, self._output_file)
with open(temp_filename, "w") as handle:
alignment.to_file(handle)
fileutils.move_file(temp_filename, self._output_file)
class FilterSingletonsMetaNode(MetaNode):
def __init__(self, input_files, destination, filter_by, dependencies=()):
subnodes = []
filter_by = dict(filter_by)
for (filename, node) in input_files.iteritems():
output_filename = fileutils.reroot_path(destination, filename)
subnodes.append(FilterSingletonsNode(input_file=filename,
output_file=output_filename,
filter_by=filter_by,
dependencies=node))
MetaNode.__init__(self,
description="<FilterSingleton: %i files -> '%s'>"
% (len(subnodes), destination),
subnodes=subnodes,
dependencies=dependencies)
class ExtractReferenceNode(Node):
def __init__(self, reference, bedfile, outfile, dependencies=()):
self._reference = reference
self._bedfile = bedfile
self._outfile = outfile
description = "<ExtractReference: '%s' -> '%s'>" \
% (reference, outfile)
Node.__init__(self,
description=description,
input_files=[reference, bedfile],
output_files=[outfile],
dependencies=dependencies)
def _run(self, _config, temp):
def _by_name(bed):
return bed.name
fastafile = pysam.Fastafile(self._reference)
seqs = collections.defaultdict(list)
with open(self._bedfile) as bedfile:
bedrecords = text.parse_lines_by_contig(bedfile, BEDRecord)
for (contig, beds) in sorted(bedrecords.iteritems()):
beds.sort(key=lambda bed: (bed.contig, bed.name, bed.start))
for (gene, gene_beds) in itertools.groupby(beds, _by_name):
gene_beds = tuple(gene_beds)
sequence = self._collect_sequence(fastafile, gene_beds)
seqs[(contig, gene)] = sequence
temp_file = os.path.join(temp, "sequences.fasta")
with open(temp_file, "w") as out_file:
for ((_, gene), sequence) in sorted(seqs.items()):
FASTA(gene, None, sequence).write(out_file)
fileutils.move_file(temp_file, self._outfile)
@classmethod
def _collect_sequence(cls, fastafile, beds):
sequence = []
for bed in beds:
fragment = fastafile.fetch(bed.contig, bed.start, bed.end)
if len(fragment) != (bed.end - bed.start):
cls._report_failure(bed, fragment)
sequence.append(fragment)
sequence = "".join(sequence)
if any((bed.strand == "-") for bed in beds):
assert all((bed.strand == "-") for bed in beds)
sequence = sequtils.reverse_complement(sequence)
return sequence
@classmethod
def _report_failure(cls, bed, fragment):
message = "Failed to extract region from " \
"reference sequence at %s:%i-%i; got " \
"%i bp, but expected %i bp." \
% (bed.contig, bed.start, bed.end,
len(fragment), (bed.end - bed.start))
raise NodeError(message)
| [
"pysam.Fastafile",
"pypeline.common.formats.fasta.FASTA",
"pypeline.node.NodeError",
"pypeline.common.sequences.reverse_complement",
"pypeline.common.fileutils.move_file",
"pypeline.common.formats.msa.MSA.from_file",
"pypeline.common.utilities.safe_coerce_to_frozenset",
"itertools.groupby",
"os.path... | [((1899, 1925), 'copy.deepcopy', 'copy.deepcopy', (['fasta_files'], {}), '(fasta_files)\n', (1912, 1925), False, 'import copy\n'), ((1952, 1997), 'pypeline.common.utilities.safe_coerce_to_frozenset', 'utilities.safe_coerce_to_frozenset', (['sequences'], {}), '(sequences)\n', (1986, 1997), True, 'import pypeline.common.utilities as utilities\n'), ((2026, 2048), 'copy.copy', 'copy.copy', (['destination'], {}), '(destination)\n', (2035, 2048), False, 'import copy\n'), ((2493, 2616), 'pypeline.node.Node.__init__', 'Node.__init__', (['self'], {'description': 'desc', 'input_files': 'input_files', 'output_files': 'self._outfiles', 'dependencies': 'dependencies'}), '(self, description=desc, input_files=input_files, output_files\n =self._outfiles, dependencies=dependencies)\n', (2506, 2616), False, 'from pypeline.node import NodeError, Node, MetaNode\n'), ((5150, 5335), 'pypeline.node.Node.__init__', 'Node.__init__', (['self'], {'description': '("<FilterSingleton: \'%s\' -> \'%s\'>" % (input_file, output_file))', 'input_files': '[input_file]', 'output_files': '[output_file]', 'dependencies': 'dependencies'}), '(self, description="<FilterSingleton: \'%s\' -> \'%s\'>" % (\n input_file, output_file), input_files=[input_file], output_files=[\n output_file], dependencies=dependencies)\n', (5163, 5335), False, 'from pypeline.node import NodeError, Node, MetaNode\n'), ((5492, 5523), 'pypeline.common.formats.msa.MSA.from_file', 'MSA.from_file', (['self._input_file'], {}), '(self._input_file)\n', (5505, 5523), False, 'from pypeline.common.formats.msa import MSA\n'), ((5684, 5730), 'pypeline.common.fileutils.reroot_path', 'fileutils.reroot_path', (['temp', 'self._output_file'], {}), '(temp, self._output_file)\n', (5705, 5730), True, 'import pypeline.common.fileutils as fileutils\n'), ((5826, 5879), 'pypeline.common.fileutils.move_file', 'fileutils.move_file', (['temp_filename', 'self._output_file'], {}), '(temp_filename, self._output_file)\n', (5845, 5879), True, 'import pypeline.common.fileutils as fileutils\n'), ((7053, 7186), 'pypeline.node.Node.__init__', 'Node.__init__', (['self'], {'description': 'description', 'input_files': '[reference, bedfile]', 'output_files': '[outfile]', 'dependencies': 'dependencies'}), '(self, description=description, input_files=[reference,\n bedfile], output_files=[outfile], dependencies=dependencies)\n', (7066, 7186), False, 'from pypeline.node import NodeError, Node, MetaNode\n'), ((7383, 7415), 'pysam.Fastafile', 'pysam.Fastafile', (['self._reference'], {}), '(self._reference)\n', (7398, 7415), False, 'import pysam\n'), ((7431, 7460), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (7454, 7460), False, 'import collections\n'), ((7996, 8033), 'os.path.join', 'os.path.join', (['temp', '"""sequences.fasta"""'], {}), "(temp, 'sequences.fasta')\n", (8008, 8033), False, 'import os\n'), ((8213, 8258), 'pypeline.common.fileutils.move_file', 'fileutils.move_file', (['temp_file', 'self._outfile'], {}), '(temp_file, self._outfile)\n', (8232, 8258), True, 'import pypeline.common.fileutils as fileutils\n'), ((9176, 9194), 'pypeline.node.NodeError', 'NodeError', (['message'], {}), '(message)\n', (9185, 9194), False, 'from pypeline.node import NodeError, Node, MetaNode\n'), ((2075, 2117), 'os.path.join', 'os.path.join', (['destination', "(name + '.fasta')"], {}), "(destination, name + '.fasta')\n", (2087, 2117), False, 'import os\n'), ((3825, 3869), 'os.path.join', 'os.path.join', (['temp', "(sequence_name + '.fasta')"], {}), "(temp, sequence_name + '.fasta')\n", (3837, 3869), False, 'import os\n'), ((4271, 4311), 'pypeline.common.fileutils.reroot_path', 'fileutils.reroot_path', (['temp', 'destination'], {}), '(temp, destination)\n', (4292, 4311), True, 'import pypeline.common.fileutils as fileutils\n'), ((4324, 4364), 'pypeline.common.fileutils.move_file', 'fileutils.move_file', (['source', 'destination'], {}), '(source, destination)\n', (4343, 4364), True, 'import pypeline.common.fileutils as fileutils\n'), ((6147, 6191), 'pypeline.common.fileutils.reroot_path', 'fileutils.reroot_path', (['destination', 'filename'], {}), '(destination, filename)\n', (6168, 6191), True, 'import pypeline.common.fileutils as fileutils\n'), ((7531, 7577), 'pypeline.common.text.parse_lines_by_contig', 'text.parse_lines_by_contig', (['bedfile', 'BEDRecord'], {}), '(bedfile, BEDRecord)\n', (7557, 7577), True, 'import pypeline.common.text as text\n'), ((8763, 8800), 'pypeline.common.sequences.reverse_complement', 'sequtils.reverse_complement', (['sequence'], {}), '(sequence)\n', (8790, 8800), True, 'import pypeline.common.sequences as sequtils\n'), ((4823, 4865), 'pypeline.common.utilities.safe_coerce_to_frozenset', 'utilities.safe_coerce_to_frozenset', (['groups'], {}), '(groups)\n', (4857, 4865), True, 'import pypeline.common.utilities as utilities\n'), ((4886, 4931), 'pypeline.common.utilities.safe_coerce_to_frozenset', 'utilities.safe_coerce_to_frozenset', (['to_filter'], {}), '(to_filter)\n', (4920, 4931), True, 'import pypeline.common.utilities as utilities\n'), ((7763, 7796), 'itertools.groupby', 'itertools.groupby', (['beds', '_by_name'], {}), '(beds, _by_name)\n', (7780, 7796), False, 'import itertools\n'), ((3534, 3552), 'pypeline.node.NodeError', 'NodeError', (['message'], {}), '(message)\n', (3543, 3552), False, 'from pypeline.node import NodeError, Node, MetaNode\n'), ((3719, 3744), 'pysam.Fastafile', 'pysam.Fastafile', (['filename'], {}), '(filename)\n', (3734, 3744), False, 'import pysam\n'), ((4070, 4108), 'pypeline.common.formats.fasta.FASTA', 'FASTA', (['sample', 'sequence_name', 'sequence'], {}), '(sample, sequence_name, sequence)\n', (4075, 4108), False, 'from pypeline.common.formats.fasta import FASTA\n'), ((8160, 8187), 'pypeline.common.formats.fasta.FASTA', 'FASTA', (['gene', 'None', 'sequence'], {}), '(gene, None, sequence)\n', (8165, 8187), False, 'from pypeline.common.formats.fasta import FASTA\n')] |
import cv2
import numpy as np
from random import randrange
#generated by:<NAME>
trained_face_data = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
webcam=cv2.VideoCapture(0)
while True:
successful_frame_read , frame = webcam.read()
grayscaled_img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
face_coordinates = trained_face_data.detectMultiScale(grayscaled_img)
for(x,y,w,h) in face_coordinates:
cv2.rectangle(frame,(x,y),(x+w,y+h),(randrange(256),randrange(256),randrange(256)),10)
#detect face train
cv2.imshow('Hasan Saleh Face Detector',frame)
key=cv2.waitKey(1)
if key==81 or key==113:
break
exit()
| [
"random.randrange",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.waitKey"
] | [((100, 160), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (121, 160), False, 'import cv2\n'), ((168, 187), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (184, 187), False, 'import cv2\n'), ((271, 310), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (283, 310), False, 'import cv2\n'), ((549, 595), 'cv2.imshow', 'cv2.imshow', (['"""Hasan Saleh Face Detector"""', 'frame'], {}), "('Hasan Saleh Face Detector', frame)\n", (559, 595), False, 'import cv2\n'), ((603, 617), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (614, 617), False, 'import cv2\n'), ((467, 481), 'random.randrange', 'randrange', (['(256)'], {}), '(256)\n', (476, 481), False, 'from random import randrange\n'), ((482, 496), 'random.randrange', 'randrange', (['(256)'], {}), '(256)\n', (491, 496), False, 'from random import randrange\n'), ((497, 511), 'random.randrange', 'randrange', (['(256)'], {}), '(256)\n', (506, 511), False, 'from random import randrange\n')] |
"""
Implementation of a Compiler for INE5426 - UFSC Authors:
<NAME> (18100539)
<NAME> (18102721)
<NAME> (18100547)
"""
import ply.yacc as yacc
from draguilexer import tokens
def p_program(p):
'''program : statement
| funclist
| empty
'''
pass
def p_funclist(p):
'''funclist : funcdef _funclist
'''
pass
def p__funclist(p):
'''_funclist : funclist
| empty
'''
pass
def p_funcdef(p):
'''funcdef : DEFINE IDENT LPAREN paramlist RPAREN LBRACES statelist RBRACES
'''
pass
def p_paramlist(p):
'''paramlist : INT IDENT _paramlist
| FLOAT IDENT _paramlist
| STRING IDENT _paramlist
| empty
'''
pass
def p__paramlist(p):
'''_paramlist : COMMA paramlist
| empty
'''
pass
def p_statement(p):
'''statement : vardecl SEMICOLON
| atribstat SEMICOLON
| printstat SEMICOLON
| readstat SEMICOLON
| returnstat SEMICOLON
| ifstat
| forstat
| LBRACES statelist RBRACES
| BREAK SEMICOLON
| SEMICOLON
'''
pass
def p_vardecl(p):
'''vardecl : INT IDENT vardecl_line
| FLOAT IDENT vardecl_line
| STRING IDENT vardecl_line
'''
pass
def p_vardecl_line(p):
'''vardecl_line : LBRACKET INT_CONSTANT RBRACKET vardecl_line
| empty
'''
pass
def p_atribstat(p):
'''atribstat : lvalue ASSIGN _atribstat
'''
pass
def p__atribstat(p):
'''_atribstat : PLUS _atribstat_help
| MINUS _atribstat_help
| __atribstat
| IDENT ___atribstat
| allocexpression
'''
pass
def p__atribstat_help(p):
'''_atribstat_help : IDENT lvalue_line term_line numexpression_line _expression
| __atribstat
'''
pass
def p___atribstat(p):
'''__atribstat : INT_CONSTANT term_line numexpression_line _expression
| FLOAT_CONSTANT term_line numexpression_line _expression
| STRING_CONSTANT term_line numexpression_line _expression
| NULL term_line numexpression_line _expression
| LPAREN numexpression RPAREN term_line numexpression_line _expression
'''
pass
def p____atribstat(p):
'''___atribstat : lvalue_line term_line numexpression_line _expression
| LPAREN paramlistcall RPAREN
'''
pass
def p_funccall(p):
'''funccall : IDENT LPAREN paramlistcall RPAREN
'''
pass
#p[0] = "ident()"
def p_paramlistcall(p):
'''paramlistcall : IDENT _paramlistcall
| empty
'''
pass
def p__paramlistcall(p):
'''_paramlistcall : COMMA paramlistcall
| empty
'''
pass
def p_printstat(p):
'''printstat : PRINT expression
'''
pass
def p_readstat(p):
'''readstat : READ lvalue
'''
pass
def p_returnstat(p):
'''returnstat : RETURN
'''
pass
def p_ifstat(p):
'''ifstat : IF LPAREN expression RPAREN LBRACES statelist RBRACES _ifstat
'''
pass
def p__ifstat(p):
'''_ifstat : ELSE statement
| empty
'''
pass
def p_forstat(p):
'''forstat : FOR LPAREN atribstat SEMICOLON expression SEMICOLON atribstat RPAREN statement
'''
pass
def p_statelist(p):
'''statelist : statement _statelist
'''
pass
def p__statelist(p):
'''_statelist : statelist
| empty
'''
pass
def p_allocexpression(p):
'''allocexpression : NEW _allocexpression
'''
pass
def p__allocexpression(p):
'''_allocexpression : INT allocexpression_line
| FLOAT allocexpression_line
| STRING allocexpression_line
'''
pass
def p_allocexpression_line(p):
'''allocexpression_line : LBRACKET numexpression RBRACKET _allocexpression_line
'''
pass
def p__allocexpression_line(p):
'''_allocexpression_line : allocexpression_line
| empty
'''
pass
def p_expression(p):
'''expression : numexpression _expression
'''
pass
def p__expression(p):
'''_expression : LESS_THAN numexpression
| GREATER_THAN numexpression
| LESS_EQUAL_THAN numexpression
| GREATER_EQUAL_THAN numexpression
| EQUAL_TO numexpression
| NOT_EQUAL_TO numexpression
| empty
'''
pass
def p_numexpression(p):
'''numexpression : term numexpression_line
'''
pass
def p_numexpression_line(p):
'''numexpression_line : PLUS term numexpression_line
| MINUS term numexpression_line
| empty
'''
pass
def p_term(p):
'''term : unaryexpr term_line
'''
pass
def p_term_line(p):
'''term_line : TIMES unaryexpr term_line
| DIVIDE unaryexpr term_line
| MODULO unaryexpr term_line
| empty
'''
pass
def p_unaryexpr(p):
'''unaryexpr : factor
| PLUS factor
| MINUS factor
'''
pass
def p_factor(p):
'''factor : INT_CONSTANT
| FLOAT_CONSTANT
| STRING_CONSTANT
| NULL
| lvalue
| LPAREN numexpression RPAREN
'''
pass
def p_lvalue(p):
'''lvalue : IDENT lvalue_line
'''
pass
def p_lvalue_line(p):
'''lvalue_line : LBRACKET numexpression RBRACKET lvalue_line
| empty
'''
pass
def p_empty(p):
'''empty :'''
pass
# Error rule for syntax errors
def p_error(p):
print(f"Syntax error in input: {p.value} ({p.type})")
print("Current sentence form")
print(f"{parser.symstack} \n\n")
raise SyntaxError
# Build the parser
parser = yacc.yacc()
| [
"ply.yacc.yacc"
] | [((6029, 6040), 'ply.yacc.yacc', 'yacc.yacc', ([], {}), '()\n', (6038, 6040), True, 'import ply.yacc as yacc\n')] |
from multiprocessing.pool import ThreadPool as Pool
from typing import Any, Hashable
import pandas as pd
from rockflow.common.datatime_helper import GmtDatetimeCheck
from rockflow.common.logo import Public, Etoro
from rockflow.operators.common import is_none_us_symbol
from rockflow.operators.const import GLOBAL_DEBUG, DEFAULT_POOL_SIZE
from rockflow.operators.oss import OSSOperator
class LogoBatchOperator(OSSOperator):
def __init__(self,
from_key: str,
key: str,
pool_size: int = DEFAULT_POOL_SIZE,
**kwargs) -> None:
super().__init__(**kwargs)
self.from_key = from_key
self.key = key
self.pool_size = pool_size
@property
def symbols(self) -> pd.DataFrame:
return pd.read_csv(self.get_object(self.from_key))
def object_not_update_for_a_week(self, key: str):
if not self.object_exists(key):
return True
elif GLOBAL_DEBUG:
return False
try:
return GmtDatetimeCheck(
self.last_modified(key), weeks=1
).check
except Exception as e:
self.log.error(f"error: {str(e)}")
return True
def save_one(self, line: tuple[Hashable, pd.Series], cls):
index = line[0]
symbol = line[1]['yahoo']
if is_none_us_symbol(symbol):
return
self.log.debug(f"index: {index}, symbol: {symbol}")
obj = cls(
symbol=symbol,
prefix=self.key,
proxy=self.proxy
)
if self.object_not_update_for_a_week(obj.oss_key):
r = obj.get()
if not r:
return
self.put_object(obj.oss_key, r.content)
@property
def cls(self):
raise NotImplementedError()
def execute(self, context: Any):
self.log.info(f"symbol: {self.symbols}")
# self.symbols.apply(
# self.save_one,
# axis=1,
# args=(self.cls,)
# )
with Pool(self.pool_size) as pool:
pool.map(
lambda x: self.save_one(x, self.cls), self.symbols.iterrows()
)
class PublicLogoBatchOperator(LogoBatchOperator):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
@property
def cls(self):
return Public
class PublicLogoBatchOperatorDebug(PublicLogoBatchOperator):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
@property
def symbols(self) -> pd.DataFrame:
return super().symbols[:100]
class EtoroLogoBatchOperator(LogoBatchOperator):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
@property
def cls(self):
return Etoro
class EtoroLogoBatchOperatorDebug(EtoroLogoBatchOperator):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
@property
def symbols(self) -> pd.DataFrame:
return super().symbols[:100]
| [
"rockflow.operators.common.is_none_us_symbol",
"multiprocessing.pool.ThreadPool"
] | [((1362, 1387), 'rockflow.operators.common.is_none_us_symbol', 'is_none_us_symbol', (['symbol'], {}), '(symbol)\n', (1379, 1387), False, 'from rockflow.operators.common import is_none_us_symbol\n'), ((2058, 2078), 'multiprocessing.pool.ThreadPool', 'Pool', (['self.pool_size'], {}), '(self.pool_size)\n', (2062, 2078), True, 'from multiprocessing.pool import ThreadPool as Pool\n')] |
import pandas as pd
from databases.connection import connector_mysql, create_table, create_sale
# CONNECT TO DATABASE MYSQL
mydb = connector_mysql()
# CREATE TABLE SALES
create_table()
mycursor = mydb.cursor()
# LOAD FILE CSV
data = pd.read_csv('sales_data_sample.csv', sep=";", encoding="latin1")
data = data.fillna(0)
# FORMAT DATE TO ORDERDATE
data['ORDERDATECLEAN'] = pd.to_datetime(data['ORDERDATE'])
# ASSIGN FIELD YEAR TO CSV
data['YEAR'] = data['ORDERDATECLEAN'].dt.strftime('%Y')
# ASSIGN FIELD MONTH TO CSV
data['MONTH'] = data['ORDERDATECLEAN'].dt.strftime('%m')
# ASSIGN FIELD SALES TO CSV
data['SALES'] = data['QUANTITYORDERED'] * data['PRICEEACH']
for index, row in data.iterrows():
create_sale(row)
| [
"pandas.read_csv",
"databases.connection.create_sale",
"databases.connection.create_table",
"databases.connection.connector_mysql",
"pandas.to_datetime"
] | [((133, 150), 'databases.connection.connector_mysql', 'connector_mysql', ([], {}), '()\n', (148, 150), False, 'from databases.connection import connector_mysql, create_table, create_sale\n'), ((173, 187), 'databases.connection.create_table', 'create_table', ([], {}), '()\n', (185, 187), False, 'from databases.connection import connector_mysql, create_table, create_sale\n'), ((238, 302), 'pandas.read_csv', 'pd.read_csv', (['"""sales_data_sample.csv"""'], {'sep': '""";"""', 'encoding': '"""latin1"""'}), "('sales_data_sample.csv', sep=';', encoding='latin1')\n", (249, 302), True, 'import pandas as pd\n'), ((378, 411), 'pandas.to_datetime', 'pd.to_datetime', (["data['ORDERDATE']"], {}), "(data['ORDERDATE'])\n", (392, 411), True, 'import pandas as pd\n'), ((712, 728), 'databases.connection.create_sale', 'create_sale', (['row'], {}), '(row)\n', (723, 728), False, 'from databases.connection import connector_mysql, create_table, create_sale\n')] |
from .exception import AuthFailedException
from .client import default_client
def init(client=default_client):
"""
Init configuration for SocketIO client.
Returns:
Event client that will be able to set listeners.
"""
from socketIO_client import SocketIO, BaseNamespace
from . import get_event_host
from gazu.client import make_auth_header
path = get_event_host(client)
event_client = SocketIO(path, None, headers=make_auth_header())
main_namespace = event_client.define(BaseNamespace, "/events")
event_client.main_namespace = main_namespace
event_client.on('error', connect_error)
return event_client
def connect_error(data):
print("The connection failed!")
return data
def add_listener(event_client, event_name, event_handler):
"""
Set a listener that reacts to a given event.
"""
event_client.main_namespace.on(event_name, event_handler)
return event_client
def run_client(event_client):
"""
Run event client (it blocks current thread). It listens to all events
configured.
"""
try:
event_client.wait()
except TypeError:
raise AuthFailedException
return event_client
| [
"gazu.client.make_auth_header"
] | [((461, 479), 'gazu.client.make_auth_header', 'make_auth_header', ([], {}), '()\n', (477, 479), False, 'from gazu.client import make_auth_header\n')] |
#-------------------------------------------------------------------------------
# Project: Paldb
# Name: Paldb
# Purpose:
# Author: zhaozhongyu
# Created: 2/9/2017 4:23 PM
# Copyright: (c) "zhaozhongyu" "2/9/2017 4:23 PM"
# Licence: <your licence>
# -*- coding:utf-8 -*-
#-------------------------------------------------------------------------------
from Paldb.ipml import ReaderIpml, WriterIpml
class PalDB:
def __init__(self):
print("PalDB init.")
def createWriter(file):
print("PalDB createWriter.")
return WriterIpml.WriterIpml(file)
def createReader(file):
print("PalDB createReader.")
return ReaderIpml.ReaderIpml(file)
| [
"Paldb.ipml.ReaderIpml.ReaderIpml",
"Paldb.ipml.WriterIpml.WriterIpml"
] | [((569, 596), 'Paldb.ipml.WriterIpml.WriterIpml', 'WriterIpml.WriterIpml', (['file'], {}), '(file)\n', (590, 596), False, 'from Paldb.ipml import ReaderIpml, WriterIpml\n'), ((667, 694), 'Paldb.ipml.ReaderIpml.ReaderIpml', 'ReaderIpml.ReaderIpml', (['file'], {}), '(file)\n', (688, 694), False, 'from Paldb.ipml import ReaderIpml, WriterIpml\n')] |
import connexion
from openapi_server import encoder
from flask import redirect
ARGUMENTS = {
'title': 'OpenAPI for NCATS Biomedical Translator Reasoners'
}
PORT=8080
def main(name:str):
"""
Sets up and runs the web application.
Usage in server/openapi_server/__main__.py:
from reasoner import main
if __name__ == '__main__':
main(__name__)
"""
app = connexion.App(name, specification_dir='./openapi/')
app.app.json_encoder = encoder.JSONEncoder
app.app.json_encoder.include_nulls = True
api = app.add_api(
'openapi.yaml',
arguments=ARGUMENTS
)
app.add_error_handler(404, lambda e: redirect('{}/ui/'.format(api.base_path)))
app.run(port=PORT)
| [
"connexion.App"
] | [((406, 457), 'connexion.App', 'connexion.App', (['name'], {'specification_dir': '"""./openapi/"""'}), "(name, specification_dir='./openapi/')\n", (419, 457), False, 'import connexion\n')] |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: pysemeels.tools.generate_hdf5_file
.. moduleauthor:: <NAME> <<EMAIL>>
Generate HDF5 file from Hitachi EELS data.
"""
###############################################################################
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import os.path
import logging
# Third party modules.
import numpy as np
# Local modules.
# Project modules.
from pysemeels.hitachi.eels_su.elv_text_file import ElvTextParameters
from pysemeels.hitachi.eels_su.elv_file import ElvFile
from pysemeels.tools.hdf5_file_labels import *
# Globals and constants variables.
class GenerateHdf5File(object):
def __init__(self, hdf5_file):
self.hdf5_file = hdf5_file
def add_spectrum(self, file_path, name=None):
if name is None:
basename, _extension = os.path.splitext(os.path.basename(file_path))
name = basename
spectrum_group = self.hdf5_file.create_group(name)
elv_text_file_path, _extension = os.path.splitext(file_path)
elv_text_file_path += '.txt'
with open(elv_text_file_path, 'r', encoding="UTF-16", errors='ignore') as elv_text_file:
elv_text_parameters = ElvTextParameters()
elv_text_parameters.read(elv_text_file)
spectrum_group.attrs[HDF5_MODEL] = elv_text_parameters.model
spectrum_group.attrs[HDF5_SAMPLE_HEIGHT] = elv_text_parameters.sample_height_mm
spectrum_group.attrs[HDF5_FILE_PATH] = elv_text_parameters.file_name
spectrum_group.attrs[HDF5_COMMENT] = elv_text_parameters.comment
spectrum_group.attrs[HDF5_DATE] = elv_text_parameters.date
spectrum_group.attrs[HDF5_TIME] = elv_text_parameters.time
spectrum_group.attrs[HDF5_ACCELERATING_VOLTAGE_V] = elv_text_parameters.accelerating_voltage_V
spectrum_group.attrs[HDF5_ENERGY_WIDTH_eV] = elv_text_parameters.energy_width_eV
spectrum_group.attrs[HDF5_ENERGY_LOSS] = elv_text_parameters.energy_loss_eV
spectrum_group.attrs[HDF5_ACQUISITION_SPEED] = elv_text_parameters.speed_us
with open(file_path, 'r', encoding="ANSI", errors='ignore') as elv_text_file:
elv_file = ElvFile()
elv_file.read(elv_text_file)
self.compare_attribute(spectrum_group, HDF5_DATE, elv_file.date)
self.compare_attribute(spectrum_group, HDF5_TIME, elv_file.time)
self.compare_attribute(spectrum_group, HDF5_COMMENT, elv_file.comment)
self.compare_attribute(spectrum_group, HDF5_ACQUISITION_SPEED, elv_file.dose)
self.compare_attribute(spectrum_group, HDF5_ENERGY_LOSS, elv_file.le)
spectrum_group.attrs[HDF5_RAW] = elv_file.raw
self.compare_attribute(spectrum_group, HDF5_ENERGY_WIDTH_eV, elv_file.energy_width)
spectrum_group.attrs[HDF5_DUAL_DET_POSITION] = elv_file.dual_det_position
spectrum_group.attrs[HDF5_DUAL_DET_POST] = elv_file.dual_det_post
spectrum_group.attrs[HDF5_DUAL_DET_CENTER] = elv_file.dual_det_center
spectrum_group.attrs[HDF5_Q1] = elv_file.q1
spectrum_group.attrs[HDF5_Q1S] = elv_file.q1s
spectrum_group.attrs[HDF5_Q2] = elv_file.q2
spectrum_group.attrs[HDF5_Q2S] = elv_file.q2s
spectrum_group.attrs[HDF5_Q3] = elv_file.q3
spectrum_group.attrs[HDF5_H1] = elv_file.h1
spectrum_group.attrs[HDF5_H1S] = elv_file.h1s
spectrum_group.attrs[HDF5_H2] = elv_file.h2
spectrum_group.attrs[HDF5_H2S] = elv_file.h2s
spectrum_group.attrs[HDF5_H4] = elv_file.h4
spectrum_group.attrs[HDF5_ELV_X] = elv_file.elv_x
spectrum_group.attrs[HDF5_ELV_Y] = elv_file.elv_y
spectrum_group.attrs[HDF5_SPECTRUM_ALIGNMENT_X] = elv_file.spectrum_alignment_x
spectrum_group.attrs[HDF5_SPECTRUM_ALIGNMENT_Y] = elv_file.spectrum_alignment_y
spectrum_group.attrs[HDF5_DET_SPEC_ALIGNMENT_X] = elv_file.det_spec_alignment_x
spectrum_group.attrs[HDF5_DET_SPEC_ALIGNMENT_Y] = elv_file.det_spec_alignment_y
spectrum_group.attrs[HDF5_DET_MAP_ALIGNMENT_X] = elv_file.det_map_alignment_x
spectrum_group.attrs[HDF5_DET_MAP_ALIGNMENT_Y] = elv_file.det_map_alignment_y
spectrum_group.attrs[HDF5_MAGNIFICATION] = elv_file.mag
data = np.zeros((1023, 5))
data[:, 0] = elv_file.energies_eV[:-1]
data[:, 1] = elv_file.counts[:-1]
data[:, 2] = elv_file.raw_counts[:-1]
data[:, 3] = elv_file.gain_corrections[:-1]
data[:, 4] = elv_file.dark_currents[:-1]
spectrum_data_set = spectrum_group.create_dataset(HDF5_SPECTRUM, data=data)
data = np.arange(1, 1023+1)
spectrum_channel_data_set = spectrum_group.create_dataset(HDF5_SPECTRUM_CHANNELS, data=data)
spectrum_data_set.dims.create_scale(spectrum_channel_data_set, HDF5_SPECTRUM_CHANNEL)
spectrum_data_set.dims[0].attach_scale(spectrum_channel_data_set)
data_types = [HDF5_SPECTRUM_ENERGIES_eV, HDF5_SPECTRUM_COUNTS, HDF5_SPECTRUM_RAW_COUNTS,
HDF5_SPECTRUM_GAIN_CORRECTIONS, HDF5_SPECTRUM_DARK_CURRENTS]
max_size = max([len(data_type) for data_type in data_types])
data = np.array(data_types, dtype="S{}".format(max_size+1))
spectrum_types_data_set = spectrum_group.create_dataset(HDF5_SPECTRUM_DATA_TYPES, data=data)
spectrum_data_set.dims.create_scale(spectrum_types_data_set, HDF5_SPECTRUM_DATA_TYPE)
spectrum_data_set.dims[1].attach_scale(spectrum_types_data_set)
def compare_attribute(self, spectrum_group, attribute_name, attribute_value):
if attribute_name in spectrum_group.attrs:
if attribute_value != spectrum_group.attrs[attribute_name]:
logging.error("{} is not the same in .txt and .elv files".format(attribute_name))
else:
spectrum_group.attrs[attribute_name] = attribute_value
| [
"numpy.zeros",
"pysemeels.hitachi.eels_su.elv_file.ElvFile",
"pysemeels.hitachi.eels_su.elv_text_file.ElvTextParameters",
"numpy.arange"
] | [((1859, 1878), 'pysemeels.hitachi.eels_su.elv_text_file.ElvTextParameters', 'ElvTextParameters', ([], {}), '()\n', (1876, 1878), False, 'from pysemeels.hitachi.eels_su.elv_text_file import ElvTextParameters\n'), ((2883, 2892), 'pysemeels.hitachi.eels_su.elv_file.ElvFile', 'ElvFile', ([], {}), '()\n', (2890, 2892), False, 'from pysemeels.hitachi.eels_su.elv_file import ElvFile\n'), ((5076, 5095), 'numpy.zeros', 'np.zeros', (['(1023, 5)'], {}), '((1023, 5))\n', (5084, 5095), True, 'import numpy as np\n'), ((5461, 5483), 'numpy.arange', 'np.arange', (['(1)', '(1023 + 1)'], {}), '(1, 1023 + 1)\n', (5470, 5483), True, 'import numpy as np\n')] |
import torch
class CELoss(torch.nn.Module):
def __init__(self):
super(CELoss, self).__init__()
def forward(self, y_pred, y_true):
y_pred = torch.clamp(y_pred, 1e-9, 1 - 1e-9)
return -(y_true * torch.log(y_pred)).sum(dim=1).mean()
| [
"torch.log",
"torch.clamp"
] | [((167, 204), 'torch.clamp', 'torch.clamp', (['y_pred', '(1e-09)', '(1 - 1e-09)'], {}), '(y_pred, 1e-09, 1 - 1e-09)\n', (178, 204), False, 'import torch\n'), ((229, 246), 'torch.log', 'torch.log', (['y_pred'], {}), '(y_pred)\n', (238, 246), False, 'import torch\n')] |
# MinimalDX version 0.1.4 (https://www.github.com/dmey/minimal-dx).
# Copyright 2018-2020 <NAME> and <NAME>. Licensed under MIT.
import subprocess
import os
from collections import namedtuple
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_style('ticks')
def main(mode, with_eplus_psychro=False):
'''
Compares the results from the simplified coil implementation and the original version in EnergyPlus
'''
# Check environment to determine folder structure
if os.name == 'nt':
path_to_exe = os.path.join('..', 'build', 'tests', mode, 'Debug')
elif os.name == 'posix' or 'mac':
path_to_exe = os.path.join('..', 'build', 'tests', mode)
else:
raise Exception('Error: this program cannot run on your system')
# Define the path to the 2 programs called by run_tests.py
path_to_make_test_data_program = os.path.join(path_to_exe, 'make_test_data_' + mode)
if with_eplus_psychro:
path_to_tests_program = os.path.join(path_to_exe, 'test_' + mode + '_eplus')
eplus_suffix = ' with EPlus Psychro'
eplus_suffix_folder = '-with-eplus-psychro'
else:
path_to_tests_program = os.path.join(path_to_exe, 'test_' + mode + '')
eplus_suffix = ''
eplus_suffix_folder = ''
# Define the path to the output folder and check if exists else create it
path_to_output_folder = os.path.join('outputs', mode + eplus_suffix_folder)
if not os.path.exists(path_to_output_folder):
os.makedirs(path_to_output_folder)
# Define the path to the figure folder and check if exists else create it
path_to_figures = os.path.join(path_to_output_folder, 'figures')
if not os.path.exists(path_to_figures):
os.makedirs(path_to_figures)
# Create the test dataset
path_to_test_dataset = os.path.join(path_to_output_folder, 'test_data_' + mode + '.csv')
subprocess.check_call([path_to_make_test_data_program, path_to_test_dataset])
# Read input dataset to use later
input_dataset = pd.read_csv(path_to_test_dataset, delim_whitespace=True, na_values='Infinity')
# Call implementations and read outputs
Impl = namedtuple('Implementation', ['name', 'exe_path'])
impls = [
Impl('MinimalDX', os.path.abspath(path_to_tests_program))
#Impl('SimDXCoolingCoil', os.path.abspath(path_to_tests_program))
]
ref_impl = Impl('EnergyPlus', os.path.abspath(path_to_tests_program))
outputs = {}
for impl in [ref_impl] + impls:
out_path = os.path.join(path_to_output_folder, impl.name + '.csv')
with open('tests.log', 'w') as f:
subprocess.check_call([impl.exe_path, path_to_test_dataset, out_path, impl.name], stdout=f)
outputs[impl.name] = pd.read_csv(out_path, delim_whitespace=True, na_values='Infinity')
# determine output variables and row count from ref impl
Var = namedtuple('Variable', ['key', 'name', 'unit'])
out_vars = [
Var(name, *name.split('|'))
for name in outputs[ref_impl.name].columns.values
]
for out_var in out_vars:
df = pd.DataFrame({
impl.name: outputs[impl.name][out_var.key]
for impl in [ref_impl] + impls
})
fig, ax = plt.subplots()
sns.boxplot(data=df, orient="v", color='white', width=.25, ax=ax)
plt.setp(ax.artists, edgecolor='k', facecolor='w')
plt.setp(ax.lines, color='k')
ax.set(xlabel='Model Name', ylabel=out_var.name + ' in ' + out_var.unit)
sns.despine(offset=5, trim=True, ax=ax)
fig.tight_layout()
fig.savefig(os.path.join(path_to_figures, 'BoxPlot' + impl.name + out_var.name + '.png'))
# Plot boxplot of relative error against reference implementation
# Use absolute error metric to compare the new implementation with ASHRAE psychrometric
# routines as we expect the differences to be large
# Switch to relative error (expressed as a percentage of the reference implementation)
# when comparing the new implementation that uses EnergyPlus psychrometric routines as
# the two implementation should yield almost identical results
# https://en.wikipedia.org/wiki/Approximation_error
if impl.name == 'MinimalDX-ABS':
error_type = ' absolute error '
error_unit = out_var.unit
error = pd.DataFrame({
impl.name + ' - ' + ref_impl.name: (outputs[impl.name][out_var.key] - outputs[ref_impl.name][out_var.key])
for impl in impls
})
elif impl.name == 'MinimalDX':
error_type = ' relative error '
error_unit = '%'
error = pd.DataFrame({
impl.name + ' - ' + ref_impl.name: ((outputs[impl.name][out_var.key] - outputs[ref_impl.name][out_var.key]) \
/ outputs[ref_impl.name][out_var.key])*100
for impl in impls
})
max_diff = error.max()[0]
if max_diff != 0.0:
print('-------------------------- Max Diff----------------------')
print('max_diff: ', error.max())
print('\n')
idxmax = error.idxmax()
print('Input data that produce max diff', input_dataset.iloc[idxmax])
print('\n\n')
min_diff = error.min()[0]
if min_diff != 0.0:
print('-------------------------- Min Diff----------------------')
print('min_diff: ', error.min())
print('\n')
idxmin = error.idxmin()
print('Input data that produce min diff: ', input_dataset.iloc[idxmin])
print('\n\n')
fig, ax = plt.subplots()
sns.boxplot(data=error, orient="v", color='white', width=.25, ax=ax)
plt.setp(ax.artists, edgecolor='k', facecolor='w')
plt.setp(ax.lines, color='k')
ax.set(xlabel='Model Name', ylabel=out_var.name + error_type + 'in ' + error_unit)
sns.despine(offset=5, trim=True, ax=ax)
fig.tight_layout()
fig.savefig(os.path.join(path_to_figures, 'BoxPlotError' + impl.name + out_var.name + '.png'))
fig, ax = plt.subplots()
ax.scatter(error.index.values, error, color='k', s=0.01)
ax.set_xlabel('Run Number in 1')
ax.set_ylabel(out_var.name + error_type + 'in '+ error_unit)
ax.set_ylim([ min_diff, max_diff])
sns.despine(offset=5, trim=True, ax=ax)
fig.tight_layout()
fig.savefig(os.path.join(path_to_figures, 'ScatterPlotError' + impl.name + out_var.name + '.png'))
# Free memory at the end of each iteration
plt.close()
if __name__ == "__main__":
main('cooling')
main('cooling', with_eplus_psychro=True) | [
"os.path.exists",
"matplotlib.pyplot.setp",
"collections.namedtuple",
"pandas.read_csv",
"subprocess.check_call",
"matplotlib.use",
"os.makedirs",
"pandas.DataFrame",
"seaborn.despine",
"os.path.join",
"seaborn.set_style",
"seaborn.boxplot",
"matplotlib.pyplot.close",
"os.path.abspath",
... | [((211, 232), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (225, 232), False, 'import matplotlib\n'), ((326, 348), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (339, 348), True, 'import seaborn as sns\n'), ((952, 1003), 'os.path.join', 'os.path.join', (['path_to_exe', "('make_test_data_' + mode)"], {}), "(path_to_exe, 'make_test_data_' + mode)\n", (964, 1003), False, 'import os\n'), ((1468, 1519), 'os.path.join', 'os.path.join', (['"""outputs"""', '(mode + eplus_suffix_folder)'], {}), "('outputs', mode + eplus_suffix_folder)\n", (1480, 1519), False, 'import os\n'), ((1714, 1760), 'os.path.join', 'os.path.join', (['path_to_output_folder', '"""figures"""'], {}), "(path_to_output_folder, 'figures')\n", (1726, 1760), False, 'import os\n'), ((1900, 1965), 'os.path.join', 'os.path.join', (['path_to_output_folder', "('test_data_' + mode + '.csv')"], {}), "(path_to_output_folder, 'test_data_' + mode + '.csv')\n", (1912, 1965), False, 'import os\n'), ((1970, 2047), 'subprocess.check_call', 'subprocess.check_call', (['[path_to_make_test_data_program, path_to_test_dataset]'], {}), '([path_to_make_test_data_program, path_to_test_dataset])\n', (1991, 2047), False, 'import subprocess\n'), ((2107, 2185), 'pandas.read_csv', 'pd.read_csv', (['path_to_test_dataset'], {'delim_whitespace': '(True)', 'na_values': '"""Infinity"""'}), "(path_to_test_dataset, delim_whitespace=True, na_values='Infinity')\n", (2118, 2185), True, 'import pandas as pd\n'), ((2242, 2292), 'collections.namedtuple', 'namedtuple', (['"""Implementation"""', "['name', 'exe_path']"], {}), "('Implementation', ['name', 'exe_path'])\n", (2252, 2292), False, 'from collections import namedtuple\n'), ((2975, 3022), 'collections.namedtuple', 'namedtuple', (['"""Variable"""', "['key', 'name', 'unit']"], {}), "('Variable', ['key', 'name', 'unit'])\n", (2985, 3022), False, 'from collections import namedtuple\n'), ((613, 664), 'os.path.join', 'os.path.join', (['""".."""', '"""build"""', '"""tests"""', 'mode', '"""Debug"""'], {}), "('..', 'build', 'tests', mode, 'Debug')\n", (625, 664), False, 'import os\n'), ((1063, 1115), 'os.path.join', 'os.path.join', (['path_to_exe', "('test_' + mode + '_eplus')"], {}), "(path_to_exe, 'test_' + mode + '_eplus')\n", (1075, 1115), False, 'import os\n'), ((1255, 1301), 'os.path.join', 'os.path.join', (['path_to_exe', "('test_' + mode + '')"], {}), "(path_to_exe, 'test_' + mode + '')\n", (1267, 1301), False, 'import os\n'), ((1531, 1568), 'os.path.exists', 'os.path.exists', (['path_to_output_folder'], {}), '(path_to_output_folder)\n', (1545, 1568), False, 'import os\n'), ((1578, 1612), 'os.makedirs', 'os.makedirs', (['path_to_output_folder'], {}), '(path_to_output_folder)\n', (1589, 1612), False, 'import os\n'), ((1772, 1803), 'os.path.exists', 'os.path.exists', (['path_to_figures'], {}), '(path_to_figures)\n', (1786, 1803), False, 'import os\n'), ((1813, 1841), 'os.makedirs', 'os.makedirs', (['path_to_figures'], {}), '(path_to_figures)\n', (1824, 1841), False, 'import os\n'), ((2488, 2526), 'os.path.abspath', 'os.path.abspath', (['path_to_tests_program'], {}), '(path_to_tests_program)\n', (2503, 2526), False, 'import os\n'), ((2601, 2656), 'os.path.join', 'os.path.join', (['path_to_output_folder', "(impl.name + '.csv')"], {}), "(path_to_output_folder, impl.name + '.csv')\n", (2613, 2656), False, 'import os\n'), ((3184, 3278), 'pandas.DataFrame', 'pd.DataFrame', (['{impl.name: outputs[impl.name][out_var.key] for impl in [ref_impl] + impls}'], {}), '({impl.name: outputs[impl.name][out_var.key] for impl in [\n ref_impl] + impls})\n', (3196, 3278), True, 'import pandas as pd\n'), ((3327, 3341), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3339, 3341), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3416), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'orient': '"""v"""', 'color': '"""white"""', 'width': '(0.25)', 'ax': 'ax'}), "(data=df, orient='v', color='white', width=0.25, ax=ax)\n", (3361, 3416), True, 'import seaborn as sns\n'), ((3424, 3474), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.artists'], {'edgecolor': '"""k"""', 'facecolor': '"""w"""'}), "(ax.artists, edgecolor='k', facecolor='w')\n", (3432, 3474), True, 'import matplotlib.pyplot as plt\n'), ((3483, 3512), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.lines'], {'color': '"""k"""'}), "(ax.lines, color='k')\n", (3491, 3512), True, 'import matplotlib.pyplot as plt\n'), ((3602, 3641), 'seaborn.despine', 'sns.despine', ([], {'offset': '(5)', 'trim': '(True)', 'ax': 'ax'}), '(offset=5, trim=True, ax=ax)\n', (3613, 3641), True, 'import seaborn as sns\n'), ((5809, 5823), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5821, 5823), True, 'import matplotlib.pyplot as plt\n'), ((5832, 5901), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'error', 'orient': '"""v"""', 'color': '"""white"""', 'width': '(0.25)', 'ax': 'ax'}), "(data=error, orient='v', color='white', width=0.25, ax=ax)\n", (5843, 5901), True, 'import seaborn as sns\n'), ((5909, 5959), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.artists'], {'edgecolor': '"""k"""', 'facecolor': '"""w"""'}), "(ax.artists, edgecolor='k', facecolor='w')\n", (5917, 5959), True, 'import matplotlib.pyplot as plt\n'), ((5968, 5997), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.lines'], {'color': '"""k"""'}), "(ax.lines, color='k')\n", (5976, 5997), True, 'import matplotlib.pyplot as plt\n'), ((6097, 6136), 'seaborn.despine', 'sns.despine', ([], {'offset': '(5)', 'trim': '(True)', 'ax': 'ax'}), '(offset=5, trim=True, ax=ax)\n', (6108, 6136), True, 'import seaborn as sns\n'), ((6286, 6300), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6298, 6300), True, 'import matplotlib.pyplot as plt\n'), ((6527, 6566), 'seaborn.despine', 'sns.despine', ([], {'offset': '(5)', 'trim': '(True)', 'ax': 'ax'}), '(offset=5, trim=True, ax=ax)\n', (6538, 6566), True, 'import seaborn as sns\n'), ((6761, 6772), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6770, 6772), True, 'import matplotlib.pyplot as plt\n'), ((725, 767), 'os.path.join', 'os.path.join', (['""".."""', '"""build"""', '"""tests"""', 'mode'], {}), "('..', 'build', 'tests', mode)\n", (737, 767), False, 'import os\n'), ((2333, 2371), 'os.path.abspath', 'os.path.abspath', (['path_to_tests_program'], {}), '(path_to_tests_program)\n', (2348, 2371), False, 'import os\n'), ((2711, 2807), 'subprocess.check_call', 'subprocess.check_call', (['[impl.exe_path, path_to_test_dataset, out_path, impl.name]'], {'stdout': 'f'}), '([impl.exe_path, path_to_test_dataset, out_path, impl.\n name], stdout=f)\n', (2732, 2807), False, 'import subprocess\n'), ((2836, 2902), 'pandas.read_csv', 'pd.read_csv', (['out_path'], {'delim_whitespace': '(True)', 'na_values': '"""Infinity"""'}), "(out_path, delim_whitespace=True, na_values='Infinity')\n", (2847, 2902), True, 'import pandas as pd\n'), ((3689, 3765), 'os.path.join', 'os.path.join', (['path_to_figures', "('BoxPlot' + impl.name + out_var.name + '.png')"], {}), "(path_to_figures, 'BoxPlot' + impl.name + out_var.name + '.png')\n", (3701, 3765), False, 'import os\n'), ((4463, 4610), 'pandas.DataFrame', 'pd.DataFrame', (["{(impl.name + ' - ' + ref_impl.name): (outputs[impl.name][out_var.key] -\n outputs[ref_impl.name][out_var.key]) for impl in impls}"], {}), "({(impl.name + ' - ' + ref_impl.name): (outputs[impl.name][\n out_var.key] - outputs[ref_impl.name][out_var.key]) for impl in impls})\n", (4475, 4610), True, 'import pandas as pd\n'), ((6184, 6269), 'os.path.join', 'os.path.join', (['path_to_figures', "('BoxPlotError' + impl.name + out_var.name + '.png')"], {}), "(path_to_figures, 'BoxPlotError' + impl.name + out_var.name +\n '.png')\n", (6196, 6269), False, 'import os\n'), ((6614, 6703), 'os.path.join', 'os.path.join', (['path_to_figures', "('ScatterPlotError' + impl.name + out_var.name + '.png')"], {}), "(path_to_figures, 'ScatterPlotError' + impl.name + out_var.name +\n '.png')\n", (6626, 6703), False, 'import os\n'), ((4788, 4986), 'pandas.DataFrame', 'pd.DataFrame', (["{(impl.name + ' - ' + ref_impl.name): ((outputs[impl.name][out_var.key] -\n outputs[ref_impl.name][out_var.key]) / outputs[ref_impl.name][out_var.\n key] * 100) for impl in impls}"], {}), "({(impl.name + ' - ' + ref_impl.name): ((outputs[impl.name][\n out_var.key] - outputs[ref_impl.name][out_var.key]) / outputs[ref_impl.\n name][out_var.key] * 100) for impl in impls})\n", (4800, 4986), True, 'import pandas as pd\n')] |
import sys
import click
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow_probability as tfp
import statsmodels.api as sm
import xgboost as xgb
import matplotlib.pyplot as plt
import seaborn as sns
from abc import ABC, abstractmethod
from pathlib import Path
from tensorflow.keras.losses import BinaryCrossentropy
from bore.models import DenseMaximizableSequential
from bore_experiments.datasets import make_classification_dataset
from bore_experiments.plotting.utils import GOLDEN_RATIO, WIDTH, pt_to_in
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
K.set_floatx("float64")
# shortcuts
tfd = tfp.distributions
OUTPUT_DIR = "figures/"
class DensityRatioBase(ABC):
def __call__(self, X, y=None):
return self.ratio(X, y)
@abstractmethod
def logit(self, X, y=None):
pass
def ratio(self, X, y=None):
return tf.exp(self.logit(X, y))
def prob(self, X, y=None):
"""
Probability of sample being from P_{top}(x) vs. P_{bot}(x).
"""
return tf.sigmoid(self.logit(X, y))
class DensityRatioMarginals(DensityRatioBase):
def __init__(self, top, bot):
self.top = top
self.bot = bot
def logit(self, X, y=None):
return self.top.log_prob(X) - self.bot.log_prob(X)
def make_dataset(self, num_samples, rate=0.5, dtype=tf.float64, seed=None):
num_top = int(num_samples * rate)
num_bot = num_samples - num_top
_X_top = self.top.sample(sample_shape=(num_top, 1), seed=seed)
_X_bot = self.bot.sample(sample_shape=(num_bot, 1), seed=seed)
X_top = tf.cast(_X_top, dtype=dtype).numpy()
X_bot = tf.cast(_X_bot, dtype=dtype).numpy()
return X_top, X_bot
class MLPDensityRatioEstimator(DensityRatioBase):
def __init__(self, num_layers=2, num_units=32, activation="tanh",
seed=None, *args, **kwargs):
self.model = DenseMaximizableSequential(input_dim=1, output_dim=1,
num_layers=num_layers,
num_units=num_units,
layer_kws=dict(activation=activation))
def logit(self, X, y=None):
# TODO: time will tell whether squeezing the final axis
# makes things easier.
return K.squeeze(self.model(X), axis=-1)
def compile(self, optimizer, metrics=["accuracy"], *args, **kwargs):
self.model.compile(optimizer=optimizer,
loss=BinaryCrossentropy(from_logits=True),
metrics=metrics, *args, **kwargs)
def fit(self, X_top, X_bot, *args, **kwargs):
X, y = make_classification_dataset(X_top, X_bot)
return self.model.fit(X, y, *args, **kwargs)
def evaluate(self, X_top, X_bot, *args, **kwargs):
X, y = make_classification_dataset(X_top, X_bot)
return self.model.evaluate(X, y, *args, **kwargs)
def gamma_relative_density_ratio(ratio, gamma):
denom = gamma + (1-gamma) / ratio
return 1 / denom
@click.command()
@click.argument("name")
@click.option('--gamma', '-g', type=float, default=1/3)
@click.option("--output-dir", default=OUTPUT_DIR,
type=click.Path(file_okay=False, dir_okay=True),
help="Output directory.")
@click.option('--transparent', is_flag=True)
@click.option('--context', default="paper")
@click.option('--style', default="ticks")
@click.option('--palette', default="deep")
@click.option('--width', '-w', type=float, default=pt_to_in(WIDTH))
@click.option('--height', '-h', type=float)
@click.option('--aspect', '-a', type=float, default=GOLDEN_RATIO)
@click.option('--dpi', type=float)
@click.option('--extension', '-e', multiple=True, default=["png"])
@click.option("--seed", default=8888)
def main(name, gamma, output_dir, transparent, context, style, palette,
width, height, aspect, dpi, extension, seed):
num_features = 1 # dimensionality
num_train = 1000 # nbr training points in synthetic dataset
# x_min, x_max = -6.0, 6.0
x_min, x_max = -5.0, 5.0
num_index_points = 512 # nbr of index points
if height is None:
height = width / aspect
# figsize = size(width, aspect)
figsize = (width, height)
suffix = f"{width*dpi:.0f}x{height*dpi:.0f}"
rc = {
"figure.figsize": figsize,
"font.serif": ["Times New Roman"],
"text.usetex": True,
}
sns.set(context=context, style=style, palette=palette, font="serif", rc=rc)
output_path = Path(output_dir).joinpath(name)
output_path.mkdir(parents=True, exist_ok=True)
random_state = np.random.RandomState(seed)
# /preamble
X_grid = np.linspace(x_min, x_max, num_index_points) \
.reshape(-1, num_features)
p = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=[0.3, 0.7]),
components_distribution=tfd.Normal(loc=[2.0, -3.0], scale=[1.0, 0.5]))
q = tfd.Normal(loc=0.0, scale=2.0)
r = DensityRatioMarginals(top=p, bot=q)
X_p, X_q = r.make_dataset(num_train, rate=gamma, seed=seed)
X_train, y_train = make_classification_dataset(X_p, X_q)
kde_lesser = sm.nonparametric.KDEUnivariate(X_p)
kde_lesser.fit(bw="normal_reference")
kde_greater = sm.nonparametric.KDEUnivariate(X_q)
kde_greater.fit(bw="normal_reference")
# Build DataFrame
rows = []
rows.append(dict(x=X_grid.squeeze(axis=-1),
y=r.top.prob(X_grid).numpy().squeeze(axis=-1),
density=r"$\ell(x)$", kind=r"$\textsc{exact}$"))
rows.append(dict(x=X_grid.squeeze(axis=-1),
y=r.bot.prob(X_grid).numpy().squeeze(axis=-1),
density=r"$g(x)$", kind=r"$\textsc{exact}$"))
rows.append(dict(x=X_grid.squeeze(axis=-1),
y=kde_lesser.evaluate(X_grid.ravel()),
density=r"$\ell(x)$", kind=r"$\textsc{kde}$"))
rows.append(dict(x=X_grid.squeeze(axis=-1),
y=kde_greater.evaluate(X_grid.ravel()),
density=r"$g(x)$", kind=r"$\textsc{kde}$"))
frames = map(pd.DataFrame, rows)
data = pd.concat(frames, axis="index", ignore_index=True, sort=True)
fig, ax = plt.subplots()
sns.lineplot(x='x', y='y', hue="density", style="kind", data=data, ax=ax)
ax.set_prop_cycle(None)
ax.set_ylim(-0.025, None)
ax.set_xlim(1.1*X_grid.min(), 1.1*X_grid.max())
sns.rugplot(X_p.squeeze(), height=0.02, alpha=0.2, ax=ax)
sns.rugplot(X_q.squeeze(), height=0.02, alpha=0.2, ax=ax)
ax.set_xlabel(r'$x$')
ax.set_ylabel('density')
plt.tight_layout()
for ext in extension:
fig.savefig(output_path.joinpath(f"densities_{context}_{suffix}.{ext}"),
dpi=dpi, transparent=transparent)
plt.show()
classifiers = dict(
svm=SVC(C=10.0, kernel="rbf", probability=True, tol=1e-9),
rf=RandomForestClassifier(n_estimators=16, max_depth=3, random_state=random_state),
xgb=xgb.XGBClassifier(n_estimators=16, max_depth=3, use_label_encoder=False, random_state=random_state)
# mlp=
)
# base_clf = RandomForestClassifier(random_state=random_state)
# clf = CalibratedClassifierCV(base_estimator=base_clf, method="isotonic") \
# .fit(X_train, y_train)
r_mlp = MLPDensityRatioEstimator(num_layers=3, num_units=32, activation="elu")
r_mlp.compile(optimizer="adam", metrics=["accuracy"])
r_mlp.fit(X_p, X_q, epochs=500, batch_size=64)
# Build DataFrame
# rows = []
# # exact
# # rows.append({'x': X_grid.squeeze(axis=-1),
# # 'y': r.ratio(X_grid).numpy().squeeze(axis=-1),
# # 'kind': r"$\textsc{exact}$", r'$\gamma$': r"$0$"})
# rows.append({'x': X_grid.squeeze(axis=-1),
# 'y': gamma_relative_density_ratio(r.ratio(X_grid), gamma=gamma) \
# .numpy().squeeze(axis=-1),
# 'kind': r"$\textsc{exact}$", r'$\gamma$': r"$\frac{1}{4}$", "exact": True})
# # kde
# # rows.append({'x': X_grid.squeeze(axis=-1),
# # 'y': kde_lesser.evaluate(X_grid.ravel()) / kde_greater.evaluate(X_grid.ravel()),
# # 'kind': r"$\textsc{kde}$", r'$\gamma$': r"$0$"})
# rows.append({'x': X_grid.squeeze(axis=-1),
# 'y': gamma_relative_density_ratio(kde_lesser.evaluate(X_grid.ravel()) / kde_greater.evaluate(X_grid.ravel()), gamma),
# 'kind': r"$\textsc{kde}$", r'$\gamma$': r"$\frac{1}{4}$", "exact": False})
# # cpe
# for clf_name, clf in classifiers.items():
# clf = clf.fit(X_train, y_train)
# rows.append({'x': X_grid.squeeze(axis=-1),
# 'y': clf.predict_proba(X_grid).T[1] / gamma,
# 'kind': rf"$\textsc{{cpe}}$ (\textsc{{{clf_name}}})",
# r'$\gamma$': r"$\frac{1}{3}$", "exact": False})
# data = pd.concat(map(pd.DataFrame, rows), axis="index", ignore_index=True,
# sort=True)
fig, ax = plt.subplots()
ax.plot(X_grid.squeeze(axis=-1),
gamma_relative_density_ratio(r.ratio(X_grid), gamma=gamma).numpy().squeeze(axis=-1),
label=r"$\textsc{exact}$")
ax.plot(X_grid.squeeze(axis=-1),
gamma_relative_density_ratio(kde_lesser.evaluate(X_grid.ravel()) /
kde_greater.evaluate(X_grid.ravel()),
gamma=gamma),
alpha=0.8, label=r"$\textsc{kde}$")
ax.plot(X_grid.squeeze(axis=-1), r_mlp.prob(X_grid) / gamma,
alpha=0.8, label=r"$\textsc{{cpe}}$ (\textsc{mlp})")
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$r_{\gamma}(x)$")
ax.set_xlim(1.1*X_grid.min(), 1.1*X_grid.max())
ax.legend()
plt.tight_layout()
for ext in extension:
fig.savefig(output_path.joinpath(f"ratios_mlp_{context}_{suffix}.{ext}"),
dpi=dpi, transparent=transparent)
plt.show()
for clf_name, clf in classifiers.items():
clf = clf.fit(X_train, y_train)
fig, ax = plt.subplots()
ax.plot(X_grid.squeeze(axis=-1),
gamma_relative_density_ratio(r.ratio(X_grid), gamma=gamma).numpy().squeeze(axis=-1),
label=r"$\textsc{exact}$")
ax.plot(X_grid.squeeze(axis=-1),
gamma_relative_density_ratio(kde_lesser.evaluate(X_grid.ravel()) /
kde_greater.evaluate(X_grid.ravel()),
gamma=gamma),
alpha=0.8, label=r"$\textsc{kde}$")
ax.plot(X_grid.squeeze(axis=-1), clf.predict_proba(X_grid).T[1] / gamma,
alpha=0.8, label=rf"$\textsc{{cpe}}$ (\textsc{{{clf_name}}})")
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$r_{\gamma}(x)$")
ax.set_xlim(1.1*X_grid.min(), 1.1*X_grid.max())
ax.legend()
plt.tight_layout()
for ext in extension:
fig.savefig(output_path.joinpath(f"ratios_{clf_name}_{context}_{suffix}.{ext}"),
dpi=dpi, transparent=transparent)
plt.show()
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| [
"tensorflow.cast",
"numpy.random.RandomState",
"seaborn.set",
"pathlib.Path",
"click.option",
"bore_experiments.datasets.make_classification_dataset",
"numpy.linspace",
"click.command",
"click.argument",
"statsmodels.api.nonparametric.KDEUnivariate",
"tensorflow.keras.losses.BinaryCrossentropy",... | [((717, 740), 'tensorflow.keras.backend.set_floatx', 'K.set_floatx', (['"""float64"""'], {}), "('float64')\n", (729, 740), True, 'import tensorflow.keras.backend as K\n'), ((3229, 3244), 'click.command', 'click.command', ([], {}), '()\n', (3242, 3244), False, 'import click\n'), ((3246, 3268), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (3260, 3268), False, 'import click\n'), ((3270, 3326), 'click.option', 'click.option', (['"""--gamma"""', '"""-g"""'], {'type': 'float', 'default': '(1 / 3)'}), "('--gamma', '-g', type=float, default=1 / 3)\n", (3282, 3326), False, 'import click\n'), ((3479, 3522), 'click.option', 'click.option', (['"""--transparent"""'], {'is_flag': '(True)'}), "('--transparent', is_flag=True)\n", (3491, 3522), False, 'import click\n'), ((3524, 3566), 'click.option', 'click.option', (['"""--context"""'], {'default': '"""paper"""'}), "('--context', default='paper')\n", (3536, 3566), False, 'import click\n'), ((3568, 3608), 'click.option', 'click.option', (['"""--style"""'], {'default': '"""ticks"""'}), "('--style', default='ticks')\n", (3580, 3608), False, 'import click\n'), ((3610, 3651), 'click.option', 'click.option', (['"""--palette"""'], {'default': '"""deep"""'}), "('--palette', default='deep')\n", (3622, 3651), False, 'import click\n'), ((3721, 3763), 'click.option', 'click.option', (['"""--height"""', '"""-h"""'], {'type': 'float'}), "('--height', '-h', type=float)\n", (3733, 3763), False, 'import click\n'), ((3765, 3829), 'click.option', 'click.option', (['"""--aspect"""', '"""-a"""'], {'type': 'float', 'default': 'GOLDEN_RATIO'}), "('--aspect', '-a', type=float, default=GOLDEN_RATIO)\n", (3777, 3829), False, 'import click\n'), ((3831, 3864), 'click.option', 'click.option', (['"""--dpi"""'], {'type': 'float'}), "('--dpi', type=float)\n", (3843, 3864), False, 'import click\n'), ((3866, 3931), 'click.option', 'click.option', (['"""--extension"""', '"""-e"""'], {'multiple': '(True)', 'default': "['png']"}), "('--extension', '-e', multiple=True, default=['png'])\n", (3878, 3931), False, 'import click\n'), ((3933, 3969), 'click.option', 'click.option', (['"""--seed"""'], {'default': '(8888)'}), "('--seed', default=8888)\n", (3945, 3969), False, 'import click\n'), ((4612, 4687), 'seaborn.set', 'sns.set', ([], {'context': 'context', 'style': 'style', 'palette': 'palette', 'font': '"""serif"""', 'rc': 'rc'}), "(context=context, style=style, palette=palette, font='serif', rc=rc)\n", (4619, 4687), True, 'import seaborn as sns\n'), ((4810, 4837), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4831, 4837), True, 'import numpy as np\n'), ((5303, 5340), 'bore_experiments.datasets.make_classification_dataset', 'make_classification_dataset', (['X_p', 'X_q'], {}), '(X_p, X_q)\n', (5330, 5340), False, 'from bore_experiments.datasets import make_classification_dataset\n'), ((5359, 5394), 'statsmodels.api.nonparametric.KDEUnivariate', 'sm.nonparametric.KDEUnivariate', (['X_p'], {}), '(X_p)\n', (5389, 5394), True, 'import statsmodels.api as sm\n'), ((5456, 5491), 'statsmodels.api.nonparametric.KDEUnivariate', 'sm.nonparametric.KDEUnivariate', (['X_q'], {}), '(X_q)\n', (5486, 5491), True, 'import statsmodels.api as sm\n'), ((6340, 6401), 'pandas.concat', 'pd.concat', (['frames'], {'axis': '"""index"""', 'ignore_index': '(True)', 'sort': '(True)'}), "(frames, axis='index', ignore_index=True, sort=True)\n", (6349, 6401), True, 'import pandas as pd\n'), ((6417, 6431), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6429, 6431), True, 'import matplotlib.pyplot as plt\n'), ((6437, 6510), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""x"""', 'y': '"""y"""', 'hue': '"""density"""', 'style': '"""kind"""', 'data': 'data', 'ax': 'ax'}), "(x='x', y='y', hue='density', style='kind', data=data, ax=ax)\n", (6449, 6510), True, 'import seaborn as sns\n'), ((6808, 6826), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6824, 6826), True, 'import matplotlib.pyplot as plt\n'), ((6994, 7004), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7002, 7004), True, 'import matplotlib.pyplot as plt\n'), ((9248, 9262), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9260, 9262), True, 'import matplotlib.pyplot as plt\n'), ((10007, 10025), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10023, 10025), True, 'import matplotlib.pyplot as plt\n'), ((10194, 10204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10202, 10204), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2890), 'bore_experiments.datasets.make_classification_dataset', 'make_classification_dataset', (['X_top', 'X_bot'], {}), '(X_top, X_bot)\n', (2876, 2890), False, 'from bore_experiments.datasets import make_classification_dataset\n'), ((3016, 3057), 'bore_experiments.datasets.make_classification_dataset', 'make_classification_dataset', (['X_top', 'X_bot'], {}), '(X_top, X_bot)\n', (3043, 3057), False, 'from bore_experiments.datasets import make_classification_dataset\n'), ((10312, 10326), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10324, 10326), True, 'import matplotlib.pyplot as plt\n'), ((11153, 11171), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11169, 11171), True, 'import matplotlib.pyplot as plt\n'), ((11363, 11373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11371, 11373), True, 'import matplotlib.pyplot as plt\n'), ((3394, 3436), 'click.Path', 'click.Path', ([], {'file_okay': '(False)', 'dir_okay': '(True)'}), '(file_okay=False, dir_okay=True)\n', (3404, 3436), False, 'import click\n'), ((3703, 3718), 'bore_experiments.plotting.utils.pt_to_in', 'pt_to_in', (['WIDTH'], {}), '(WIDTH)\n', (3711, 3718), False, 'from bore_experiments.plotting.utils import GOLDEN_RATIO, WIDTH, pt_to_in\n'), ((4707, 4723), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (4711, 4723), False, 'from pathlib import Path\n'), ((4868, 4911), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'num_index_points'], {}), '(x_min, x_max, num_index_points)\n', (4879, 4911), True, 'import numpy as np\n'), ((7042, 7096), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(10.0)', 'kernel': '"""rbf"""', 'probability': '(True)', 'tol': '(1e-09)'}), "(C=10.0, kernel='rbf', probability=True, tol=1e-09)\n", (7045, 7096), False, 'from sklearn.svm import SVC\n'), ((7108, 7187), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(16)', 'max_depth': '(3)', 'random_state': 'random_state'}), '(n_estimators=16, max_depth=3, random_state=random_state)\n', (7130, 7187), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7201, 7304), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'n_estimators': '(16)', 'max_depth': '(3)', 'use_label_encoder': '(False)', 'random_state': 'random_state'}), '(n_estimators=16, max_depth=3, use_label_encoder=False,\n random_state=random_state)\n', (7218, 7304), True, 'import xgboost as xgb\n'), ((1759, 1787), 'tensorflow.cast', 'tf.cast', (['_X_top'], {'dtype': 'dtype'}), '(_X_top, dtype=dtype)\n', (1766, 1787), True, 'import tensorflow as tf\n'), ((1812, 1840), 'tensorflow.cast', 'tf.cast', (['_X_bot'], {'dtype': 'dtype'}), '(_X_bot, dtype=dtype)\n', (1819, 1840), True, 'import tensorflow as tf\n'), ((2683, 2719), 'tensorflow.keras.losses.BinaryCrossentropy', 'BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (2701, 2719), False, 'from tensorflow.keras.losses import BinaryCrossentropy\n')] |
"""
imgLog.py - experimental log for imgFolder
initial: 2019-10-04
"""
import os
import pandas as pd
if ('np' not in dir()): import numpy as np
from imlib.imgfolder import ImgFolder
__author__ = '<NAME> <<EMAIL>>'
__version__ = '1.0.0'
class ImgLog(ImgFolder):
""" imgFolder for channel experiment images """
def __init__(self, dirname, sort=False, debug=False):
""" initialization """
super().__init__(dirname, sort=sort, debug=debug)
self._logfname = dirname + '/log.xlsx'
if not self.loadLog():
self._data['wall1'] = self._image._meta['wall1']
self._data['wall2'] = self._image._meta['wall2']
self._data['isChannel'] = False
self._data['range1'] = self._image._meta['range1']
self._data['range2'] = self._image._meta['range2']
self._data['hasRange'] = False
self._data['fangle'] = 0. # frame angle
self._data['mangle'] = 0. # migration angle
self._data['p'] = 0. # pressure
self._data['u'] = 0. # longitudinal velocity
self._data['mag'] = '40x' # magnification
self._data['filter'] = '' # filter
self._data['exp'] = 0. # exposure
self._data['location'] = '' # location
self._data['D'] = 0. # diffusion constant
self._data['Pe'] = 0. # Peclet number
self._data = self._data.astype(
{'D':'float', 'Pe':'float', 'mangle':'float',
'hasRange':'bool', 'isChannel':'bool',
'exp':'float', 'range1':'int', 'range2':'int',
'wall1':'int', 'wall2':'int'})
def __repr__(self):
""" show print out message """
msg = super().__repr__()
return msg
def __getitem__(self, fileID):
self._image = super().__getitem__(fileID)
p = self._data.at[fileID, 'p']
if isinstance(p, str) and (p.find(',') > -1):
p = float(p.replace(',', '.'))
self._data.at[fileID, 'p'] = p
if isinstance(p, float) or isinstance(p, np.int64):
u = 143.9518*p + 44.0784 # TODO in case of condenser chip
self._data.at[fileID, 'u'] = u
if self._debug: print('... (p, u) = {}, {}'.format(p, u))
self._data.at[fileID, 'exp'] = self._image._meta['exposuretime']
self._image.set_expInfo(magnification=self._data.at[fileID, 'mag'],
velocity=self._data.at[fileID, 'u'], p=p,
fangle=self._data.at[fileID, 'fangle'])
return self._image
# manage log excel sheet
def saveLog(self):
""" save log sheet """
with pd.ExcelWriter(self._logfname) as writer:
if self._debug: print('... save to {}'.format(self._logfname))
self._data.to_excel(writer)
def loadLog(self):
""" load log sheet """
if os.path.isfile(self._logfname):
if self._debug: print('... load from {}'.format(self._logfname))
self._data = pd.read_excel(self._logfname, index_col=0)
return True
else:
return False
# image analysis
def set_log(self, colname, values, ranges=[]):
""" set log values for specific colname """
if len(ranges) == 0:
ranges = range(len(self._data))
for i in ranges:
self._data.at[i, colname] = values
if self._debug: print('{}: [{}] - {}'.format(i, colname, values))
def detect_channel(self, fileID=-1, show=True):
""" find wall information and save in object """
if fileID > -1:
self._image = self.getfile(fileID)
res = self._image.detect_channel(show=show)
if len(res) > 3:
self._data.at[self._curidx, 'range1'] = res[2]
self._data.at[self._curidx, 'range2'] = res[3]
self._data.at[self._curidx, 'hasRange'] = True
if len(res) > 1:
self._data.at[self._curidx, 'wall1'] = res[0]
self._data.at[self._curidx, 'wall2'] = res[1]
self._data.at[self._curidx, 'isChannel'] = True
if len(res) == 1:
self._data.at[self._curidx, 'isChannel'] = False
return res
def analysis_10x(self, fileID, bfileID=-1, wallinfo=[], p=-1, method='gaussian', update=True, padding=0):
""" find angle and diffusion constant in 10x flu. and bright field images """
angle = 0.0
if p > -1:
self._data.at[self._curidx, 'p'] = p
if len(wallinfo) == 4:
self._data.loc[fileID, ['wall1', 'wall2', 'range1', 'range2']] = wallinfo
print('... fileID: [{}] use wallinfo: {}, ranges: {}'.format(fileID, wallinfo[:2], wallinfo[2:]))
else:
if bfileID > -1:
wallinfo = self.detect_channel(fileID=bfileID)
wallinfo[0] = wallinfo[0] + padding
wallinfo[1] = wallinfo[1] - padding
if len(wallinfo) == 3:
self._data.loc[fileID, ['wall1', 'wall2', 'range1']] = wallinfo
elif len(wallinfo) == 4:
self._data.loc[fileID, ['wall1', 'wall2', 'range1', 'range2']] = wallinfo
else:
print('... no wall. Is this [{}] correct image?'.format(bfileID))
return
img = self.__getitem__(bfileID)
angle = img.detect_angles(show=False)
print('... fileID: [{}] use wallinfo: {}, ranges: {}, frame angle: {}'.format(fileID, wallinfo[:2], wallinfo[2:], angle))
# set image information
self._image = self.__getitem__(fileID)
self._image.set_wallinfo(self._data.loc[fileID, ['wall1', 'wall2', 'range1', 'range2']])
self._image.set_expInfo(magnification=self._data.at[fileID, 'mag'],
velocity=self._data.at[fileID, 'u'], p=self._data.at[fileID, 'p'],
fangle=self._data.at[fileID, 'fangle'])
# calculate peak positions
self._image.fitlines_x(method=method, update=update)
self._image.showfit_sigmas()
self._image.showfit_angles()
# save results
self._data.at[fileID, 'mangle'] = self._image._meta['mangle']
self._data.at[fileID, 'D'] = self._image._meta['D']
self._data.at[fileID, 'Pe'] = self._image._meta['Pe']
if angle > 0:
self._data.at[fileID, 'fangle'] = angle
def analysis_all(self, blist, flist, method='gaussian', update=False):
""" analaysis migration angle of files in flist with wall info from
blist """
if isinstance(blist, int):
blist = np.zeros_like(np.array(flist)) + blist
for i in range(len(flist)):
self.analysis_10x(flist[i], bfileID=blist[i], padding=5,
update=update, method=method)
self.saveLog()
def showinfo(self, colname='mag', condition='10x'):
""" show panda data with condition """
return self._data[self._data[colname] == condition]
# vim:foldmethod=indent:foldlevel=0
| [
"os.path.isfile",
"numpy.array",
"pandas.ExcelWriter",
"pandas.read_excel"
] | [((2968, 2998), 'os.path.isfile', 'os.path.isfile', (['self._logfname'], {}), '(self._logfname)\n', (2982, 2998), False, 'import os\n'), ((2744, 2774), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['self._logfname'], {}), '(self._logfname)\n', (2758, 2774), True, 'import pandas as pd\n'), ((3102, 3144), 'pandas.read_excel', 'pd.read_excel', (['self._logfname'], {'index_col': '(0)'}), '(self._logfname, index_col=0)\n', (3115, 3144), True, 'import pandas as pd\n'), ((6749, 6764), 'numpy.array', 'np.array', (['flist'], {}), '(flist)\n', (6757, 6764), True, 'import numpy as np\n')] |
# say action
import sys, time
from actionproxy import ActionProxy
ACTION_NAME = 'say'
class SayActionProxy(ActionProxy):
def __init__(self, actionname):
ActionProxy.__init__(self, actionname)
def __del__(self):
ActionProxy.__del__(self)
def action_thread(self, params):
v = params.split('_')
saystr = v[0]
if len(v)>1:
lang = v[1]
else:
lang = 'en'
tm = 1 + saystr.count(' ')
print("Say [%s]: %s (time:%d)" %(lang,saystr,tm))
cnt = 0
dt = 0.5
while self.do_run and cnt < tm:
time.sleep(dt)
cnt += dt
print('---end say---')
if __name__ == "__main__":
params = None
if (len(sys.argv)>1):
params = sys.argv[1]
a = SayActionProxy(ACTION_NAME)
if params is not None:
a.execute(params) # blocking, CTRL-C to interrupt
else:
a.run_server() # blocking, CTRL-C to interrupt
| [
"actionproxy.ActionProxy.__init__",
"actionproxy.ActionProxy.__del__",
"time.sleep"
] | [((172, 210), 'actionproxy.ActionProxy.__init__', 'ActionProxy.__init__', (['self', 'actionname'], {}), '(self, actionname)\n', (192, 210), False, 'from actionproxy import ActionProxy\n'), ((244, 269), 'actionproxy.ActionProxy.__del__', 'ActionProxy.__del__', (['self'], {}), '(self)\n', (263, 269), False, 'from actionproxy import ActionProxy\n'), ((625, 639), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (635, 639), False, 'import sys, time\n')] |
from datetime import datetime
import logging
from weconnect.addressable import AddressableAttribute, AddressableList
from weconnect.elements.generic_settings import GenericSettings
from weconnect.util import robustTimeParse
LOG = logging.getLogger("weconnect")
class ChargingProfiles(GenericSettings):
def __init__(
self,
vehicle,
parent,
statusId,
fromDict=None,
fixAPI=True,
):
self.profiles = AddressableList(localAddress='profiles', parent=self)
self.timeInCar = AddressableAttribute(localAddress='timeInCar', parent=self, value=None, valueType=datetime)
super().__init__(vehicle=vehicle, parent=parent, statusId=statusId, fromDict=fromDict, fixAPI=fixAPI)
def update(self, fromDict, ignoreAttributes=None):
ignoreAttributes = ignoreAttributes or []
LOG.debug('Update charging profiles from dict')
if 'value' in fromDict:
if 'profiles' in fromDict['value'] and fromDict['value']['profiles'] is not None:
for profile in fromDict['value']['profiles']:
LOG.warning('Charging profiles are not yet implemented %s', profile)
else:
self.profiles.clear()
self.profiles.enabled = False
if 'timeInCar' in fromDict['value']:
self.timeInCar.setValueWithCarTime(robustTimeParse(
fromDict['value']['timeInCar']), lastUpdateFromCar=None, fromServer=True)
else:
self.timeInCar.enabled = False
else:
self.profiles.clear()
self.profiles.enabled = False
self.timeInCar.enabled = False
super().update(fromDict=fromDict, ignoreAttributes=(ignoreAttributes + ['profiles', 'timeInCar']))
def __str__(self):
string = super().__str__()
if self.timeInCar.enabled:
string += f'\n\tTime in Car: {self.timeInCar.value.isoformat()}' # pylint: disable=no-member
string += f' (captured at {self.carCapturedTimestamp.value.isoformat()})' # pylint: disable=no-member
string += f'\n\tProfiles: {len(self.profiles)} items'
for profile in self.profiles:
string += f'\n\t\t{profile}'
return string
| [
"logging.getLogger",
"weconnect.addressable.AddressableAttribute",
"weconnect.util.robustTimeParse",
"weconnect.addressable.AddressableList"
] | [((232, 262), 'logging.getLogger', 'logging.getLogger', (['"""weconnect"""'], {}), "('weconnect')\n", (249, 262), False, 'import logging\n'), ((464, 517), 'weconnect.addressable.AddressableList', 'AddressableList', ([], {'localAddress': '"""profiles"""', 'parent': 'self'}), "(localAddress='profiles', parent=self)\n", (479, 517), False, 'from weconnect.addressable import AddressableAttribute, AddressableList\n'), ((543, 638), 'weconnect.addressable.AddressableAttribute', 'AddressableAttribute', ([], {'localAddress': '"""timeInCar"""', 'parent': 'self', 'value': 'None', 'valueType': 'datetime'}), "(localAddress='timeInCar', parent=self, value=None,\n valueType=datetime)\n", (563, 638), False, 'from weconnect.addressable import AddressableAttribute, AddressableList\n'), ((1388, 1435), 'weconnect.util.robustTimeParse', 'robustTimeParse', (["fromDict['value']['timeInCar']"], {}), "(fromDict['value']['timeInCar'])\n", (1403, 1435), False, 'from weconnect.util import robustTimeParse\n')] |
#!/usr/bin/env python3
import os
import ctypes
import platform
import logging
logger = logging.getLogger(__name__)
def load_dll():
dl_path_env = os.getenv("CENTAURUS_DL_PATH", "")
if platform.uname()[0] == "Windows":
dl_path = os.path.join(dl_path_env, "libpycentaurus.dll")
elif platform.uname()[0] == "Linux":
dl_path = os.path.join(dl_path_env, "libpycentaurus.so")
return ctypes.CDLL(dl_path, mode=ctypes.RTLD_GLOBAL)
CoreLib = load_dll()
EnumMachinesCallback = ctypes.CFUNCTYPE(None, ctypes.c_wchar_p, ctypes.c_int)
class Grammar(object):
"""EBNF grammar definition."""
CoreLib.GrammarCreate.restype = ctypes.c_void_p
CoreLib.GrammarCreate.argtypes = [ctypes.c_char_p]
CoreLib.GrammarDestroy.argtypes = [ctypes.c_void_p]
#corelib.GrammarAddRule.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_wchar_p]
CoreLib.GrammarPrint.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int]
CoreLib.GrammarEnumMachines.argtypes = [ctypes.c_void_p, EnumMachinesCallback]
CoreLib.GrammarOptimize.argtypes = [ctypes.c_void_p]
def __init__(self, filename):
self.handle = CoreLib.GrammarCreate(filename.encode('utf-8'))
self.actions = []
self.ids = {}
CoreLib.GrammarEnumMachines(self.handle, EnumMachinesCallback(self.enum_machines_callback))
self.names = [''] * len(self.ids)
for name, id in self.ids.items():
self.names[id - 1] = name
def enum_machines_callback(self, name, id):
self.ids[name] = id
def __del__(self):
CoreLib.GrammarDestroy(self.handle)
def print(self, filename):
CoreLib.GrammarPrint(self.handle, filename.encode('utf-8'), 4)
def optimize(self):
CoreLib.GrammarOptimize(self.handle)
def get_machine_id(self, name):
return self.ids[name]
def get_machine_name(self, id):
return self.names[id - 1]
def get_machine_num(self):
return len(self.ids)
class Parser(object):
CoreLib.ParserCreate.restype = ctypes.c_void_p
CoreLib.ParserCreate.argtypes = [ctypes.c_void_p, ctypes.c_bool]
CoreLib.ParserDestroy.argtypes = [ctypes.c_void_p]
def __init__(self, grammar, dry = False):
self.handle = CoreLib.ParserCreate(grammar.handle, dry)
def __del__(self):
CoreLib.ParserDestroy(self.handle)
class Chaser(object):
CoreLib.ChaserCreate.restype = ctypes.c_void_p
CoreLib.ChaserCreate.argtypes = [ctypes.c_void_p]
CoreLib.ChaserDestroy.argtypes = [ctypes.c_void_p]
def __init__(self, grammar):
self.handle = CoreLib.ChaserCreate(grammar.handle)
def __del__(self):
CoreLib.ChaserDestroy(self.handle)
class SymbolEntry(ctypes.Structure):
_fields_ = [('id', ctypes.c_int),
('start', ctypes.c_long),
('end', ctypes.c_long)]
ReductionListener = ctypes.CFUNCTYPE(ctypes.c_long, ctypes.POINTER(SymbolEntry), ctypes.POINTER(ctypes.c_long), ctypes.c_int)
TransferListener = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_int)
class BaseRunner(object):
CoreLib.RunnerDestroy.argtypes = [ctypes.c_void_p]
CoreLib.RunnerStart.argtypes = [ctypes.c_void_p]
CoreLib.RunnerWait.argtypes = [ctypes.c_void_p]
CoreLib.RunnerRegisterListener.argtypes = [ctypes.c_void_p, ReductionListener, TransferListener]
CoreLib.RunnerGetWindow.restype = ctypes.c_void_p
CoreLib.RunnerGetWindow.argtypes = [ctypes.c_void_p]
def __init__(self, handle):
self.handle = handle
def __del__(self):
CoreLib.RunnerDestroy(self.handle)
def start(self):
CoreLib.RunnerStart(self.handle)
def wait(self):
CoreLib.RunnerWait(self.handle)
def attach(self, listener, xferlistener):
self.listener = ReductionListener(listener)
self.xferlistener = TransferListener(xferlistener)
CoreLib.RunnerRegisterListener(self.handle, self.listener, self.xferlistener)
def get_window(self):
return CoreLib.RunnerGetWindow(self.handle)
class Stage1Runner(BaseRunner):
CoreLib.Stage1RunnerCreate.restype = ctypes.c_void_p
CoreLib.Stage1RunnerCreate.argtypes = [ctypes.c_char_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_int]
def __init__(self, filename, parser, bank_size, bank_num):
super(Stage1Runner, self).__init__(CoreLib.Stage1RunnerCreate(filename.encode('utf-8'), parser.handle, bank_size, bank_num))
class Stage2Runner(BaseRunner):
CoreLib.Stage2RunnerCreate.restype = ctypes.c_void_p
CoreLib.Stage2RunnerCreate.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_int, ctypes.c_int]
def __init__(self, filename, bank_size, bank_num, master_pid):
super(Stage2Runner, self).__init__(CoreLib.Stage2RunnerCreate(filename.encode('utf-8'), bank_size, bank_num, master_pid))
class Stage3Runner(BaseRunner):
CoreLib.Stage3RunnerCreate.restype = ctypes.c_void_p
CoreLib.Stage3RunnerCreate.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_int, ctypes.c_int]
def __init__(self, filename, bank_size, bank_num, master_pid):
super(Stage3Runner, self).__init__(CoreLib.Stage3RunnerCreate(filename.encode('utf-8'), bank_size, bank_num, master_pid))
| [
"logging.getLogger",
"ctypes.CFUNCTYPE",
"ctypes.POINTER",
"os.getenv",
"os.path.join",
"platform.uname",
"ctypes.CDLL"
] | [((89, 116), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (106, 116), False, 'import logging\n'), ((503, 557), 'ctypes.CFUNCTYPE', 'ctypes.CFUNCTYPE', (['None', 'ctypes.c_wchar_p', 'ctypes.c_int'], {}), '(None, ctypes.c_wchar_p, ctypes.c_int)\n', (519, 557), False, 'import ctypes\n'), ((3006, 3056), 'ctypes.CFUNCTYPE', 'ctypes.CFUNCTYPE', (['None', 'ctypes.c_int', 'ctypes.c_int'], {}), '(None, ctypes.c_int, ctypes.c_int)\n', (3022, 3056), False, 'import ctypes\n'), ((152, 186), 'os.getenv', 'os.getenv', (['"""CENTAURUS_DL_PATH"""', '""""""'], {}), "('CENTAURUS_DL_PATH', '')\n", (161, 186), False, 'import os\n'), ((411, 456), 'ctypes.CDLL', 'ctypes.CDLL', (['dl_path'], {'mode': 'ctypes.RTLD_GLOBAL'}), '(dl_path, mode=ctypes.RTLD_GLOBAL)\n', (422, 456), False, 'import ctypes\n'), ((2913, 2940), 'ctypes.POINTER', 'ctypes.POINTER', (['SymbolEntry'], {}), '(SymbolEntry)\n', (2927, 2940), False, 'import ctypes\n'), ((2942, 2971), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_long'], {}), '(ctypes.c_long)\n', (2956, 2971), False, 'import ctypes\n'), ((246, 293), 'os.path.join', 'os.path.join', (['dl_path_env', '"""libpycentaurus.dll"""'], {}), "(dl_path_env, 'libpycentaurus.dll')\n", (258, 293), False, 'import os\n'), ((194, 210), 'platform.uname', 'platform.uname', ([], {}), '()\n', (208, 210), False, 'import platform\n'), ((353, 399), 'os.path.join', 'os.path.join', (['dl_path_env', '"""libpycentaurus.so"""'], {}), "(dl_path_env, 'libpycentaurus.so')\n", (365, 399), False, 'import os\n'), ((303, 319), 'platform.uname', 'platform.uname', ([], {}), '()\n', (317, 319), False, 'import platform\n')] |
import os
import bpy
import bpy_extras
from ..core import animation_lists
from ..core import detection_manager
class DetectFaceShapes(bpy.types.Operator):
bl_idname = "rsl.detect_face_shapes"
bl_label = "Auto Detect"
bl_description = "Automatically detect face shape keys for supported naming schemes"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
obj = context.object
if not hasattr(obj.data, 'shape_keys') or not hasattr(obj.data.shape_keys, 'key_blocks'):
self.report({'ERROR'}, 'This mesh has no shapekeys!')
return {'CANCELLED'}
for shape_name_key in animation_lists.face_shapes:
setattr(obj, 'rsl_face_' + shape_name_key, detection_manager.detect_shape(obj, shape_name_key))
return {'FINISHED'}
class DetectActorBones(bpy.types.Operator):
bl_idname = "rsl.detect_actor_bones"
bl_label = "Auto Detect"
bl_description = "Automatically detect actor bones for supported naming schemes"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
obj = context.object
for bone_name_key in animation_lists.actor_bones.keys():
setattr(obj, 'rsl_actor_' + bone_name_key, detection_manager.detect_bone(obj, bone_name_key))
return {'FINISHED'}
class SaveCustomBonesRetargeting(bpy.types.Operator):
bl_idname = "rsl.save_custom_bones_retargeting"
bl_label = "Save Custom Bones"
bl_description = "This saves the currently selected bones and they will then get automatically detected"
bl_options = {'INTERNAL'}
def execute(self, context):
# Save the bone list if the user changed anything
detection_manager.save_retargeting_to_list()
return {'FINISHED'}
class ImportCustomBones(bpy.types.Operator, bpy_extras.io_utils.ImportHelper):
bl_idname = "rsl.import_custom_schemes"
bl_label = "Import Custom Scheme"
bl_description = "Import a custom naming scheme"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
files: bpy.props.CollectionProperty(type=bpy.types.OperatorFileListElement, options={'HIDDEN', 'SKIP_SAVE'})
directory: bpy.props.StringProperty(maxlen=1024, subtype='FILE_PATH', options={'HIDDEN', 'SKIP_SAVE'})
filter_glob: bpy.props.StringProperty(default='*.json;', options={'HIDDEN'})
def execute(self, context):
import_count = 0
if self.directory:
for f in self.files:
file_name = f.name
if not file_name.endswith('.json'):
continue
detection_manager.import_custom_list(self.directory, file_name)
import_count += 1
# If this operator is called with no directory but a filepath argument, import that
elif self.filepath:
detection_manager.import_custom_list(os.path.dirname(self.filepath), os.path.basename(self.filepath))
import_count += 1
detection_manager.save_to_file_and_update()
if not import_count:
self.report({'ERROR'}, 'No files were imported.')
return {'FINISHED'}
self.report({'INFO'}, 'Successfully imported new naming schemes.')
return {'FINISHED'}
class ExportCustomBones(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
bl_idname = "rsl.export_custom_schemes"
bl_label = "Export Custom Scheme"
bl_description = "Export your custom naming schemes"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
filename_ext = ".json"
filter_glob: bpy.props.StringProperty(default='*.json;', options={'HIDDEN'})
def execute(self, context):
file_name = detection_manager.export_custom_list(self.filepath)
if not file_name:
self.report({'ERROR'}, 'You don\'t have any custom naming schemes!')
return {'FINISHED'}
self.report({'INFO'}, 'Exported custom naming schemes as "' + file_name + '".')
return {'FINISHED'}
class ClearCustomBones(bpy.types.Operator):
bl_idname = "rsl.clear_custom_bones"
bl_label = "Clear Custom Bones"
bl_description = "Clear all custom bone naming schemes"
bl_options = {'INTERNAL'}
def draw(self, context):
layout = self.layout
layout.separator()
row = layout.row(align=True)
row.scale_y = 0.5
row.label(text='You are about to delete all stored custom bone naming schemes.', icon='ERROR')
row = layout.row(align=True)
row.scale_y = 0.5
row.label(text='Continue?', icon='BLANK1')
layout.separator()
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self, width=400)
def execute(self, context):
detection_manager.delete_custom_bone_list()
self.report({'INFO'}, 'Cleared all custom bone naming schemes!')
return {'FINISHED'}
class ClearCustomShapes(bpy.types.Operator):
bl_idname = "rsl.clear_custom_shapes"
bl_label = "Clear Custom Shapekeys"
bl_description = "Clear all custom shape naming schemes"
bl_options = {'INTERNAL'}
def draw(self, context):
layout = self.layout
layout.separator()
row = layout.row(align=True)
row.scale_y = 0.5
row.label(text='You are about to delete all stored custom shape naming schemes.', icon='ERROR')
row = layout.row(align=True)
row.scale_y = 0.5
row.label(text='Continue?', icon='BLANK1')
layout.separator()
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self, width=400)
def execute(self, context):
detection_manager.delete_custom_shape_list()
self.report({'INFO'}, 'Cleared all custom shape naming schemes!')
return {'FINISHED'}
| [
"os.path.dirname",
"bpy.props.StringProperty",
"bpy.props.CollectionProperty",
"os.path.basename"
] | [((2071, 2176), 'bpy.props.CollectionProperty', 'bpy.props.CollectionProperty', ([], {'type': 'bpy.types.OperatorFileListElement', 'options': "{'HIDDEN', 'SKIP_SAVE'}"}), "(type=bpy.types.OperatorFileListElement,\n options={'HIDDEN', 'SKIP_SAVE'})\n", (2099, 2176), False, 'import bpy\n'), ((2188, 2284), 'bpy.props.StringProperty', 'bpy.props.StringProperty', ([], {'maxlen': '(1024)', 'subtype': '"""FILE_PATH"""', 'options': "{'HIDDEN', 'SKIP_SAVE'}"}), "(maxlen=1024, subtype='FILE_PATH', options={\n 'HIDDEN', 'SKIP_SAVE'})\n", (2212, 2284), False, 'import bpy\n'), ((2297, 2360), 'bpy.props.StringProperty', 'bpy.props.StringProperty', ([], {'default': '"""*.json;"""', 'options': "{'HIDDEN'}"}), "(default='*.json;', options={'HIDDEN'})\n", (2321, 2360), False, 'import bpy\n'), ((3570, 3633), 'bpy.props.StringProperty', 'bpy.props.StringProperty', ([], {'default': '"""*.json;"""', 'options': "{'HIDDEN'}"}), "(default='*.json;', options={'HIDDEN'})\n", (3594, 3633), False, 'import bpy\n'), ((2879, 2909), 'os.path.dirname', 'os.path.dirname', (['self.filepath'], {}), '(self.filepath)\n', (2894, 2909), False, 'import os\n'), ((2911, 2942), 'os.path.basename', 'os.path.basename', (['self.filepath'], {}), '(self.filepath)\n', (2927, 2942), False, 'import os\n')] |
from django.contrib.auth import get_user_model
from ninja import Schema
from ninja.orm import create_schema
from typing import Dict, List
UsernameSchemaMixin = create_schema(
get_user_model(),
fields=[get_user_model().USERNAME_FIELD]
)
EmailSchemaMixin = create_schema(
get_user_model(),
fields=[get_user_model().EMAIL_FIELD]
)
UserOut = create_schema(
get_user_model(),
exclude=['password']
)
class LoginIn(UsernameSchemaMixin):
password: str
class RequestPasswordResetIn(EmailSchemaMixin):
pass
class SetPasswordIn(UsernameSchemaMixin):
new_password1: str
new_password2: str
token: str
class ChangePasswordIn(Schema):
old_password: str
new_password1: str
new_password2: str
class ErrorsOut(Schema):
errors: Dict[str, List[str]]
| [
"django.contrib.auth.get_user_model"
] | [((181, 197), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (195, 197), False, 'from django.contrib.auth import get_user_model\n'), ((285, 301), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (299, 301), False, 'from django.contrib.auth import get_user_model\n'), ((377, 393), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (391, 393), False, 'from django.contrib.auth import get_user_model\n'), ((211, 227), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (225, 227), False, 'from django.contrib.auth import get_user_model\n'), ((315, 331), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (329, 331), False, 'from django.contrib.auth import get_user_model\n')] |
from prettytable import PrettyTable
pid = [int(x) for x in input('Enter the process ids: ').split()]
burst = [int(x) for x in input('Enter the burst time: ').split()]
table = PrettyTable(['Process Id', 'Burst Time'])
# Assumption: All processes arrive at time t=0
n = len(pid)
timeQuantum = int(input("Enter the time quantum: "))
print('\n')
queue = []
for i in range(n):
queue.append(['P'+str(pid[i]), burst[i]])
t = 0
gantt = []
wt = [0]*n
tt = [0]*n
while queue:
t += timeQuantum
process_id, process_b = queue.pop(0)
process_burst = process_b - timeQuantum
if process_burst > 0:
queue.append([process_id, process_burst])
gantt.append([process_id, timeQuantum])
else:
tt[int(process_id[1:])-1] = t + process_burst
t += process_burst
gantt.append([process_id,process_b])
for i in range(len(tt)):
wt[i] = tt[i] - burst[i]
print("GANTT CHART\n")
print("|", end="")
print("----------"*len(gantt), end="")
print("|")
for i in gantt:
print("| "+str(i[0])+" ", end="")
print(" |\n|", end="")
print("----------"*len(gantt), end="")
print("|")
print(0,end=" ")
s = 0
for i in gantt:
s = s+i[1]
print(s, end=" "*(10-len(str(s))))
print("\n")
table = PrettyTable()
table.field_names = ["Process id", "Burst time",
"Waiting time", "Turnaround time"]
for i in range(len(pid)):
table.add_row([pid[i], burst[i], wt[i], tt[i]])
print(table)
print('\nThe average waiting time is:', sum(wt))
| [
"prettytable.PrettyTable"
] | [((180, 221), 'prettytable.PrettyTable', 'PrettyTable', (["['Process Id', 'Burst Time']"], {}), "(['Process Id', 'Burst Time'])\n", (191, 221), False, 'from prettytable import PrettyTable\n'), ((1298, 1311), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (1309, 1311), False, 'from prettytable import PrettyTable\n')] |
"""Validates the codecov.yml configuration file."""
import click
import requests
# The exit(1) is used to indicate error in pre-commit
NOT_OK = 1
OK = 0
@click.command()
@click.option(
"--filename", default="codecov.yml", help="Codecov configuration file."
)
def ccv(filename):
"""Open the codecov configuration and check if it is valid.
Parameters
----------
filename : str
Name of the configuration file.
"""
file = open_file(filename)
result = run_request(file)
check_valid(result)
def check_valid(result):
"""Check if the message contains the "Valid!" string from the request call.
Parameters
----------
result : str
Message to be analyzed.
"""
if "Valid!" in result:
print("Valid!")
exit(OK)
else:
print(result)
exit(NOT_OK)
def open_file(filename):
"""Tries to open the configuration file.
Parameters
----------
filename : str
Name of the configuration file.
Returns
-------
bytes
Contents of the configuration file, or the string
zero.
"""
try:
with open(filename, "rb") as myconfig:
file = myconfig.read()
return file
except FileNotFoundError:
print("Configuration file not found.")
exit(NOT_OK)
def run_request(file):
"""Send the configuration to the codecov site.
Parameters
----------
file : string
Contents of the configuration file.
Returns
-------
str
Result of the request.
"""
try:
received = requests.post("https://codecov.io/validate", data=file)
except (
requests.exceptions.ConnectTimeout,
requests.exceptions.HTTPError,
requests.exceptions.ReadTimeout,
requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
):
print("Failed to establish connection. Check your internet.")
exit(NOT_OK)
message = received.content.decode("utf-8")
return message
if __name__ == "__main__":
ccv()
| [
"click.option",
"requests.post",
"click.command"
] | [((157, 172), 'click.command', 'click.command', ([], {}), '()\n', (170, 172), False, 'import click\n'), ((174, 264), 'click.option', 'click.option', (['"""--filename"""'], {'default': '"""codecov.yml"""', 'help': '"""Codecov configuration file."""'}), "('--filename', default='codecov.yml', help=\n 'Codecov configuration file.')\n", (186, 264), False, 'import click\n'), ((1604, 1659), 'requests.post', 'requests.post', (['"""https://codecov.io/validate"""'], {'data': 'file'}), "('https://codecov.io/validate', data=file)\n", (1617, 1659), False, 'import requests\n')] |
#!/usr/bin/env python3
# Import ATC classes
from dataneeded import DataNeeded
from detectionrule import DetectionRule
from loggingpolicy import LoggingPolicy
# from triggers import Triggers
from enrichment import Enrichment
from responseaction import ResponseAction
from responseplaybook import ResponsePlaybook
from pdb import set_trace as bp
# Import ATC Utils
from atcutils import ATCutils
# Others
import glob
import traceback
import sys
ATCconfig = ATCutils.read_yaml_file("config.yml")
class PopulateMarkdown:
"""Class for populating markdown repo"""
def __init__(self, lp=False, dn=False, dr=False, en=False, tg=False,
ra=False, rp=False, auto=False, art_dir=False, atc_dir=False,
lp_path=False, dn_path=False, dr_path=False, en_path=False,
tg_path=False, ra_path=False, rp_path=False):
"""Init"""
# Check if atc_dir provided
if atc_dir:
self.atc_dir = atc_dir
else:
self.atc_dir = '../'+ATCconfig.get('md_name_of_root_directory')+'/'
# Check if art_dir provided
if art_dir:
self.art_dir = art_dir
else:
self.art_dir = ATCconfig.get('triggers_directory')
# Main logic
if auto:
self.logging_policy(lp_path)
self.data_needed(dn_path)
self.enrichment(en_path)
self.triggers(tg_path)
self.response_action(ra_path)
self.response_playbook(rp_path)
self.detection_rule(dr_path)
if lp:
self.logging_policy(lp_path)
if dn:
self.data_needed(dn_path)
if en:
self.enrichment(en_path)
if dr:
self.detection_rule(dr_path)
if ra:
self.response_action(ra_path)
if rp:
self.response_playbook(rp_path)
if tg:
self.triggers(tg_path)
def triggers(self, tg_path):
"""Populate triggers"""
if self.art_dir and self.atc_dir:
r = ATCutils.populate_tg_markdown(art_dir=self.art_dir,
atc_dir=self.atc_dir)
elif self.art_dir:
r = ATCutils.populate_tg_markdown(art_dir=self.art_dir)
elif self.atc_dir:
r = ATCutils.populate_tg_markdown(atc_dir=self.atc_dir)
else:
r = ATCutils.populate_tg_markdown()
return r
def logging_policy(self, lp_path):
"""Desc"""
if lp_path:
lp_list = glob.glob(lp_path + '*.yml')
else:
lp_list = glob.glob('../logging_policies/*.yml')
for lp_file in lp_list:
try:
lp = LoggingPolicy(lp_file)
lp.render_template("markdown")
lp.save_markdown_file(atc_dir=self.atc_dir)
except Exception as e:
print(lp_file + " failed\n\n%s\n\n" % e)
print("Err message: %s" % e)
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
def data_needed(self, dn_path):
"""Desc"""
if dn_path:
dn_list = glob.glob(dn_path + '*.yml')
else:
dn_list = glob.glob('../data_needed/*.yml')
for dn_file in dn_list:
try:
dn = DataNeeded(dn_file)
dn.render_template("markdown")
dn.save_markdown_file(atc_dir=self.atc_dir)
except Exception as e:
print(dn_file + " failed\n\n%s\n\n" % e)
print("Err message: %s" % e)
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
def detection_rule(self, dr_path):
"""Desc"""
if dr_path:
dr_list = glob.glob(dr_path + '*.yml')
else:
dr_list = glob.glob(ATCconfig.get(
'detection_rules_directory') + '/*.yml')
for dr_file in dr_list:
try:
dr = DetectionRule(dr_file)
dr.render_template("markdown")
dr.save_markdown_file(atc_dir=self.atc_dir)
except Exception as e:
print(dr_file + " failed\n\n%s\n\n" % e)
print("Err message: %s" % e)
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
def enrichment(self, en_path):
"""Nothing here yet"""
if en_path:
en_list = glob.glob(en_path + '*.yml')
else:
en_list = glob.glob('../enrichments/*.yml')
for en_file in en_list:
try:
en = Enrichment(en_file)
en.render_template("markdown")
en.save_markdown_file(atc_dir=self.atc_dir)
except Exception as e:
print(en_file + " failed\n\n%s\n\n" % e)
print("Err message: %s" % e)
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
def response_action(self, ra_path):
"""Nothing here yet"""
if ra_path:
ra_list = glob.glob(ra_path + '*.yml')
else:
ra_list = glob.glob('../response_actions/*.yml')
for ra_file in ra_list:
try:
ra = ResponseAction(ra_file)
ra.render_template("markdown")
ra.save_markdown_file(atc_dir=self.atc_dir)
except Exception as e:
print(ra_file + " failed\n\n%s\n\n" % e)
print("Err message: %s" % e)
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
def response_playbook(self, rp_path):
"""Nothing here yet"""
if rp_path:
rp_list = glob.glob(rp_path + '*.yml')
else:
rp_list = glob.glob('../response_playbooks/*.yml')
for rp_file in rp_list:
try:
rp = ResponsePlaybook(rp_file)
rp.render_template("markdown")
rp.save_markdown_file(atc_dir=self.atc_dir)
except Exception as e:
print(rp_file + " failed\n\n%s\n\n" % e)
print("Err message: %s" % e)
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
| [
"detectionrule.DetectionRule",
"loggingpolicy.LoggingPolicy",
"atcutils.ATCutils.read_yaml_file",
"atcutils.ATCutils.populate_tg_markdown",
"responseplaybook.ResponsePlaybook",
"responseaction.ResponseAction",
"dataneeded.DataNeeded",
"enrichment.Enrichment",
"traceback.print_exc",
"glob.glob"
] | [((478, 515), 'atcutils.ATCutils.read_yaml_file', 'ATCutils.read_yaml_file', (['"""config.yml"""'], {}), "('config.yml')\n", (501, 515), False, 'from atcutils import ATCutils\n'), ((2149, 2222), 'atcutils.ATCutils.populate_tg_markdown', 'ATCutils.populate_tg_markdown', ([], {'art_dir': 'self.art_dir', 'atc_dir': 'self.atc_dir'}), '(art_dir=self.art_dir, atc_dir=self.atc_dir)\n', (2178, 2222), False, 'from atcutils import ATCutils\n'), ((2662, 2690), 'glob.glob', 'glob.glob', (["(lp_path + '*.yml')"], {}), "(lp_path + '*.yml')\n", (2671, 2690), False, 'import glob\n'), ((2729, 2767), 'glob.glob', 'glob.glob', (['"""../logging_policies/*.yml"""'], {}), "('../logging_policies/*.yml')\n", (2738, 2767), False, 'import glob\n'), ((3340, 3368), 'glob.glob', 'glob.glob', (["(dn_path + '*.yml')"], {}), "(dn_path + '*.yml')\n", (3349, 3368), False, 'import glob\n'), ((3407, 3440), 'glob.glob', 'glob.glob', (['"""../data_needed/*.yml"""'], {}), "('../data_needed/*.yml')\n", (3416, 3440), False, 'import glob\n'), ((4011, 4039), 'glob.glob', 'glob.glob', (["(dr_path + '*.yml')"], {}), "(dr_path + '*.yml')\n", (4020, 4039), False, 'import glob\n'), ((4744, 4772), 'glob.glob', 'glob.glob', (["(en_path + '*.yml')"], {}), "(en_path + '*.yml')\n", (4753, 4772), False, 'import glob\n'), ((4811, 4844), 'glob.glob', 'glob.glob', (['"""../enrichments/*.yml"""'], {}), "('../enrichments/*.yml')\n", (4820, 4844), False, 'import glob\n'), ((5430, 5458), 'glob.glob', 'glob.glob', (["(ra_path + '*.yml')"], {}), "(ra_path + '*.yml')\n", (5439, 5458), False, 'import glob\n'), ((5497, 5535), 'glob.glob', 'glob.glob', (['"""../response_actions/*.yml"""'], {}), "('../response_actions/*.yml')\n", (5506, 5535), False, 'import glob\n'), ((6127, 6155), 'glob.glob', 'glob.glob', (["(rp_path + '*.yml')"], {}), "(rp_path + '*.yml')\n", (6136, 6155), False, 'import glob\n'), ((6194, 6234), 'glob.glob', 'glob.glob', (['"""../response_playbooks/*.yml"""'], {}), "('../response_playbooks/*.yml')\n", (6203, 6234), False, 'import glob\n'), ((2317, 2368), 'atcutils.ATCutils.populate_tg_markdown', 'ATCutils.populate_tg_markdown', ([], {'art_dir': 'self.art_dir'}), '(art_dir=self.art_dir)\n', (2346, 2368), False, 'from atcutils import ATCutils\n'), ((2843, 2865), 'loggingpolicy.LoggingPolicy', 'LoggingPolicy', (['lp_file'], {}), '(lp_file)\n', (2856, 2865), False, 'from loggingpolicy import LoggingPolicy\n'), ((3516, 3535), 'dataneeded.DataNeeded', 'DataNeeded', (['dn_file'], {}), '(dn_file)\n', (3526, 3535), False, 'from dataneeded import DataNeeded\n'), ((4236, 4258), 'detectionrule.DetectionRule', 'DetectionRule', (['dr_file'], {}), '(dr_file)\n', (4249, 4258), False, 'from detectionrule import DetectionRule\n'), ((4920, 4939), 'enrichment.Enrichment', 'Enrichment', (['en_file'], {}), '(en_file)\n', (4930, 4939), False, 'from enrichment import Enrichment\n'), ((5611, 5634), 'responseaction.ResponseAction', 'ResponseAction', (['ra_file'], {}), '(ra_file)\n', (5625, 5634), False, 'from responseaction import ResponseAction\n'), ((6310, 6335), 'responseplaybook.ResponsePlaybook', 'ResponsePlaybook', (['rp_file'], {}), '(rp_file)\n', (6326, 6335), False, 'from responseplaybook import ResponsePlaybook\n'), ((2416, 2467), 'atcutils.ATCutils.populate_tg_markdown', 'ATCutils.populate_tg_markdown', ([], {'atc_dir': 'self.atc_dir'}), '(atc_dir=self.atc_dir)\n', (2445, 2467), False, 'from atcutils import ATCutils\n'), ((2502, 2533), 'atcutils.ATCutils.populate_tg_markdown', 'ATCutils.populate_tg_markdown', ([], {}), '()\n', (2531, 2533), False, 'from atcutils import ATCutils\n'), ((3165, 3201), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (3184, 3201), False, 'import traceback\n'), ((3835, 3871), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (3854, 3871), False, 'import traceback\n'), ((4558, 4594), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (4577, 4594), False, 'import traceback\n'), ((5239, 5275), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (5258, 5275), False, 'import traceback\n'), ((5934, 5970), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (5953, 5970), False, 'import traceback\n'), ((6635, 6671), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (6654, 6671), False, 'import traceback\n')] |
from mcc_libusb import *
import datetime
import time
import numpy as np
mcc = USB1208FS()
mcc.usbOpen()
#mcc.usbDConfigPort(DIO_PORTA, DIO_DIR_OUT)
#mcc.usbDConfigPort(DIO_PORTB, DIO_DIR_IN)
#mcc.usbDOut(DIO_PORTA, 0)
#num = mcc.usbAIn(1, BP_1_00V)
#print(str(mcc.volts_FS(BP_1_00V, num)))
#channel = np.array([1, 2, 3, 7])
#gain = np.array([SE_10_00V, BP_10_00V, BP_20_00V, BP_1_25V])
#mcc.usbALoadQueue(4, channel, gain)
#mcc.usbReset()
#mcc.usbAIn_Stop()
options = AIN_EXECUTION | AIN_GAIN_QUEUE
sdata = mcc.usbAIn_Scan_SE(0, 0, 50, 1000, options)
print(sdata)
print(mcc.volts_SE(np.average(sdata)))
#mcc.usbALoadQueue(1, np.array([1]), np.array([BP_10_00V]))
#sdata1 = mcc.usbAIn_Scan(1,1,50,1000, AIN_EXECUTION)
#print(sdata1)
#print(mcc.volts_FS(BP_10_00V, np.average(sdata1)))
mcc.usbClose()
'''
while 1:
print("\nUSB 1208FS Testing")
print("----------------")
print("Hit 'b' to blink LED")
print("Hit 'c' to test counter")
print("Hit 'e' to exit")
print("Hit 'd' to test digital I/O");
print("Hit 'g' to test analog input scan (differential).")
print("Hit 'j' to test analog input scan (single ended).")
print("Hit 'i' to test analog input (differential mode)")
print("Hit 'h' to test analog input (single ended)")
print("Hit 'o' to test analog output")
print("Hit 'O' to test analog output scan")
print("Hit 'r' to reset")
print("Hit 'S' to get status")
print("Hit 's' to get serial number")
i = input(">> ")
if i == 'b': #test to see if led blinks
mcc.usbBlink()
elif i == 'e':
mcc.close()
exit(1)
elif i == 'd':
print("\nTesting Digital I/O....")
print("connect pins 21 through 28 <=> 32 through 39")
temp = int(input("Enter a byte number [0-0xff]: "))
mcc.usbDOut(DIO_PORTA, temp)
din = mcc.usbDIn(DIO_PORTB)
print("The number you entered = " + hex(din & 0xff))
elif i == 'i':
print("Testing the analog input differential...")
gain = int(input("Enter gain: "))
channel = int(input("Enter channel [0-7]: "))
value = mcc.usbAIn(channel, gain)
print("Channel: " + str(channel) + ": value = " + str(value))
elif i == 'h':
print("Testing the analog input single ended...")
#channel = input("Entner channel [0-7]: ")
for i in range(0, 100):
start = datetime.datetime.now()
for j in range(0,8):
value = mcc.usbAIn(j, SE_10_00V)
print("Channel: %d: Value = 0x%04X, %.2fV" % (j%8 ,value, mcc.volts_SE(value)))
delta = datetime.datetime.now() - start;
print("%d" % (delta.microseconds))
time.sleep(0.1)
elif i == 'o': #test the analog output
print("Testing the analog output...")
channel = int(input("Enter channel [0-1] => (pin 13-14): "))
value = int(input("Enter a value: "))
mcc.usbAOut(channel, value)
else:
continue
'''
| [
"numpy.average"
] | [((585, 602), 'numpy.average', 'np.average', (['sdata'], {}), '(sdata)\n', (595, 602), True, 'import numpy as np\n')] |
'''
Created on 21.01.2021
@author: wf
'''
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy() | [
"flask_sqlalchemy.SQLAlchemy"
] | [((89, 101), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (99, 101), False, 'from flask_sqlalchemy import SQLAlchemy\n')] |
# Copyright (c) <NAME> <<EMAIL>>
# See LICENSE file.
import sys
from _sadm import log, version
from _sadm.cmd import flags
from _sadm.web import app, syslog
def _getArgs(argv):
p = flags.new('sadm-web', desc = 'sadm web interface')
# ~ p.add_argument('--address', help = 'bind to ip address (localhost)',
# ~ metavar = 'address', default = 'localhost')
p.add_argument('--port', help = 'bind to tcp port (3478)',
metavar = 'number', type = int, default = 3478)
return flags.parse(p, argv)
def main(argv = None):
if argv is None:
argv = sys.argv[1:] # pragma: no cover
args = _getArgs(argv)
log.info("sadm-web v%s" % version.get())
log.msg("http://%s:%d/" % ('localhost', args.port))
syslog.init()
app.run('localhost', args.port, args.debug)
syslog.close()
log.msg('done!')
return 0
if __name__ == '__main__':
sys.exit(main()) # pragma: no cover
| [
"_sadm.version.get",
"_sadm.web.app.run",
"_sadm.cmd.flags.parse",
"_sadm.web.syslog.init",
"_sadm.cmd.flags.new",
"_sadm.web.syslog.close",
"_sadm.log.msg"
] | [((185, 233), '_sadm.cmd.flags.new', 'flags.new', (['"""sadm-web"""'], {'desc': '"""sadm web interface"""'}), "('sadm-web', desc='sadm web interface')\n", (194, 233), False, 'from _sadm.cmd import flags\n'), ((478, 498), '_sadm.cmd.flags.parse', 'flags.parse', (['p', 'argv'], {}), '(p, argv)\n', (489, 498), False, 'from _sadm.cmd import flags\n'), ((648, 699), '_sadm.log.msg', 'log.msg', (["('http://%s:%d/' % ('localhost', args.port))"], {}), "('http://%s:%d/' % ('localhost', args.port))\n", (655, 699), False, 'from _sadm import log, version\n'), ((701, 714), '_sadm.web.syslog.init', 'syslog.init', ([], {}), '()\n', (712, 714), False, 'from _sadm.web import app, syslog\n'), ((716, 759), '_sadm.web.app.run', 'app.run', (['"""localhost"""', 'args.port', 'args.debug'], {}), "('localhost', args.port, args.debug)\n", (723, 759), False, 'from _sadm.web import app, syslog\n'), ((761, 775), '_sadm.web.syslog.close', 'syslog.close', ([], {}), '()\n', (773, 775), False, 'from _sadm.web import app, syslog\n'), ((777, 793), '_sadm.log.msg', 'log.msg', (['"""done!"""'], {}), "('done!')\n", (784, 793), False, 'from _sadm import log, version\n'), ((632, 645), '_sadm.version.get', 'version.get', ([], {}), '()\n', (643, 645), False, 'from _sadm import log, version\n')] |
import unittest
import invoiced
import responses
class TestTask(unittest.TestCase):
def setUp(self):
self.client = invoiced.Client('api_key')
def test_endpoint(self):
task = invoiced.Task(self.client, 123)
self.assertEqual('/tasks/123', task.endpoint())
@responses.activate
def test_create(self):
responses.add('POST', 'https://api.invoiced.com/tasks',
status=201,
json={"id": 123, "user_id": 234, "customer_id": 345,
"name": "<NAME>", "action": "phone",
"due_date": 1234567890})
task = invoiced.Task(self.client)
task = task.create(customer_id=345, user_id=234, name="<NAME>",
action="phone", due_date=1234567890)
self.assertIsInstance(task, invoiced.Task)
self.assertEqual(task.id, 123)
self.assertEqual(task.customer_id, 345)
self.assertEqual(task.name, "<NAME>")
@responses.activate
def test_retrieve(self):
responses.add('GET', 'https://api.invoiced.com/tasks/123',
status=200,
json={"id": 123, "user_id": 234, "customer_id": 345,
"name": "<NAME>", "action": "phone",
"due_date": 1234567890})
task = invoiced.Task(self.client)
task = task.retrieve(123)
self.assertIsInstance(task, invoiced.Task)
self.assertEqual(task.id, 123)
self.assertEqual(task.action, "phone")
def test_update_no_params(self):
task = invoiced.Task(self.client, 123)
self.assertFalse(task.save())
@responses.activate
def test_update(self):
responses.add('PATCH', 'https://api.invoiced.com/tasks/123',
status=200,
json={"id": 123, "user_id": 234, "customer_id": 345,
"name": "<NAME>", "action": "phone",
"due_date": 1234567890})
task = invoiced.Task(self.client, 123)
task.name = "<NAME>"
self.assertTrue(task.save())
self.assertEqual(task.name, "2nd Call")
@responses.activate
def test_list(self):
responses.add('GET', 'https://api.invoiced.com/tasks',
status=200,
json=[{"id": 123, "user_id": 234, "customer_id": 345,
"name": "<NAME>", "action": "phone",
"due_date": 1234567890}],
adding_headers={
'x-total-count': '15',
'link': '<https://api.invoiced.com/tasks?per_page=25&page=1>; rel="self", <https://api.invoiced.com/tasks?per_page=25&page=1>; rel="first", <https://api.invoiced.com/tasks?per_page=25&page=1>; rel="last"'}) # noqa
task = invoiced.Task(self.client)
tasks, metadata = task.list()
self.assertIsInstance(tasks, list)
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].id, 123)
self.assertIsInstance(metadata, invoiced.List)
self.assertEqual(metadata.total_count, 15)
@responses.activate
def test_delete(self):
responses.add('DELETE', 'https://api.invoiced.com/tasks/123',
status=204)
task = invoiced.Task(self.client, 123)
self.assertTrue(task.delete())
| [
"responses.add",
"invoiced.Task",
"invoiced.Client"
] | [((130, 156), 'invoiced.Client', 'invoiced.Client', (['"""api_key"""'], {}), "('api_key')\n", (145, 156), False, 'import invoiced\n'), ((202, 233), 'invoiced.Task', 'invoiced.Task', (['self.client', '(123)'], {}), '(self.client, 123)\n', (215, 233), False, 'import invoiced\n'), ((350, 541), 'responses.add', 'responses.add', (['"""POST"""', '"""https://api.invoiced.com/tasks"""'], {'status': '(201)', 'json': "{'id': 123, 'user_id': 234, 'customer_id': 345, 'name': '<NAME>', 'action':\n 'phone', 'due_date': 1234567890}"}), "('POST', 'https://api.invoiced.com/tasks', status=201, json={\n 'id': 123, 'user_id': 234, 'customer_id': 345, 'name': '<NAME>',\n 'action': 'phone', 'due_date': 1234567890})\n", (363, 541), False, 'import responses\n'), ((649, 675), 'invoiced.Task', 'invoiced.Task', (['self.client'], {}), '(self.client)\n', (662, 675), False, 'import invoiced\n'), ((1059, 1253), 'responses.add', 'responses.add', (['"""GET"""', '"""https://api.invoiced.com/tasks/123"""'], {'status': '(200)', 'json': "{'id': 123, 'user_id': 234, 'customer_id': 345, 'name': '<NAME>', 'action':\n 'phone', 'due_date': 1234567890}"}), "('GET', 'https://api.invoiced.com/tasks/123', status=200, json\n ={'id': 123, 'user_id': 234, 'customer_id': 345, 'name': '<NAME>',\n 'action': 'phone', 'due_date': 1234567890})\n", (1072, 1253), False, 'import responses\n'), ((1361, 1387), 'invoiced.Task', 'invoiced.Task', (['self.client'], {}), '(self.client)\n', (1374, 1387), False, 'import invoiced\n'), ((1613, 1644), 'invoiced.Task', 'invoiced.Task', (['self.client', '(123)'], {}), '(self.client, 123)\n', (1626, 1644), False, 'import invoiced\n'), ((1743, 1938), 'responses.add', 'responses.add', (['"""PATCH"""', '"""https://api.invoiced.com/tasks/123"""'], {'status': '(200)', 'json': "{'id': 123, 'user_id': 234, 'customer_id': 345, 'name': '<NAME>', 'action':\n 'phone', 'due_date': 1234567890}"}), "('PATCH', 'https://api.invoiced.com/tasks/123', status=200,\n json={'id': 123, 'user_id': 234, 'customer_id': 345, 'name': '<NAME>',\n 'action': 'phone', 'due_date': 1234567890})\n", (1756, 1938), False, 'import responses\n'), ((2047, 2078), 'invoiced.Task', 'invoiced.Task', (['self.client', '(123)'], {}), '(self.client, 123)\n', (2060, 2078), False, 'import invoiced\n'), ((2252, 2704), 'responses.add', 'responses.add', (['"""GET"""', '"""https://api.invoiced.com/tasks"""'], {'status': '(200)', 'json': "[{'id': 123, 'user_id': 234, 'customer_id': 345, 'name': '<NAME>', 'action':\n 'phone', 'due_date': 1234567890}]", 'adding_headers': '{\'x-total-count\': \'15\', \'link\':\n \'<https://api.invoiced.com/tasks?per_page=25&page=1>; rel="self", <https://api.invoiced.com/tasks?per_page=25&page=1>; rel="first", <https://api.invoiced.com/tasks?per_page=25&page=1>; rel="last"\'\n }'}), '(\'GET\', \'https://api.invoiced.com/tasks\', status=200, json=[{\n \'id\': 123, \'user_id\': 234, \'customer_id\': 345, \'name\': \'<NAME>\',\n \'action\': \'phone\', \'due_date\': 1234567890}], adding_headers={\n \'x-total-count\': \'15\', \'link\':\n \'<https://api.invoiced.com/tasks?per_page=25&page=1>; rel="self", <https://api.invoiced.com/tasks?per_page=25&page=1>; rel="first", <https://api.invoiced.com/tasks?per_page=25&page=1>; rel="last"\'\n })\n', (2265, 2704), False, 'import responses\n'), ((2879, 2905), 'invoiced.Task', 'invoiced.Task', (['self.client'], {}), '(self.client)\n', (2892, 2905), False, 'import invoiced\n'), ((3238, 3311), 'responses.add', 'responses.add', (['"""DELETE"""', '"""https://api.invoiced.com/tasks/123"""'], {'status': '(204)'}), "('DELETE', 'https://api.invoiced.com/tasks/123', status=204)\n", (3251, 3311), False, 'import responses\n'), ((3350, 3381), 'invoiced.Task', 'invoiced.Task', (['self.client', '(123)'], {}), '(self.client, 123)\n', (3363, 3381), False, 'import invoiced\n')] |
#!/usr/bin/env python3
'''
FILE: event_aux_data.py
DESCRIPTION: This script contains the wrapper functions for the sealog-
server event_aux_data routes.
BUGS:
NOTES:
AUTHOR: <NAME>
COMPANY: OceanDataTools.org
VERSION: 0.1
CREATED: 2021-01-01
REVISION:
LICENSE INFO: This code is licensed under MIT license (see LICENSE.txt for details)
Copyright (C) OceanDataTools.org 2021
'''
import json
import logging
import requests
from .settings import API_SERVER_URL, HEADERS, EVENT_AUX_DATA_API_PATH
def get_event_aux_data_by_cruise(cruise_uid, datasource=None, api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the aux_data records for the given cruise_uid and optional
datasource.
'''
try:
url = api_server_url + EVENT_AUX_DATA_API_PATH + '/bycruise/' + cruise_uid
if datasource is not None:
url += '&datasource=' + datasource
req = requests.get(url, headers=headers)
if req.status_code != 404:
event_aux_data = json.loads(req.text)
logging.debug(json.dumps(event_aux_data))
return event_aux_data
except Exception as error:
logging.debug(str(error))
raise error
return None
def get_event_aux_data_by_lowering(lowering_uid, datasource='', limit=0, api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the aux_data records for the given lowering_uid and optional
datasource.
'''
try:
url = api_server_url + EVENT_AUX_DATA_API_PATH + '/bylowering/' + lowering_uid
querystring = []
if datasource != '':
querystring.append('datasource=' + datasource)
if limit > 0:
querystring.append('limit=' + str(limit))
if len(querystring) > 0:
url += '?' + '&'.join(querystring)
logging.info(url)
req = requests.get(url, headers=headers)
event_aux_data = json.loads(req.text)
logging.debug(json.dumps(event_aux_data))
return event_aux_data
except Exception as error:
logging.debug(str(error))
raise error
| [
"json.loads",
"json.dumps",
"logging.info",
"requests.get"
] | [((962, 996), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (974, 996), False, 'import requests\n'), ((1882, 1899), 'logging.info', 'logging.info', (['url'], {}), '(url)\n', (1894, 1899), False, 'import logging\n'), ((1915, 1949), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (1927, 1949), False, 'import requests\n'), ((1976, 1996), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (1986, 1996), False, 'import json\n'), ((1062, 1082), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (1072, 1082), False, 'import json\n'), ((2019, 2045), 'json.dumps', 'json.dumps', (['event_aux_data'], {}), '(event_aux_data)\n', (2029, 2045), False, 'import json\n'), ((1109, 1135), 'json.dumps', 'json.dumps', (['event_aux_data'], {}), '(event_aux_data)\n', (1119, 1135), False, 'import json\n')] |
import nox
@nox.session(python=['3.7', '3.8', '3.9', '3.10', 'pypy3.7', 'pypy3.8', 'pypy3.9'])
def unittest(session):
session.install('.[test]')
session.run('pytest')
| [
"nox.session"
] | [((13, 99), 'nox.session', 'nox.session', ([], {'python': "['3.7', '3.8', '3.9', '3.10', 'pypy3.7', 'pypy3.8', 'pypy3.9']"}), "(python=['3.7', '3.8', '3.9', '3.10', 'pypy3.7', 'pypy3.8',\n 'pypy3.9'])\n", (24, 99), False, 'import nox\n')] |
from inspect import signature
def add_doc(func):
t = ', '.join(signature(func).parameters)
func.__doc__ = func.__doc__.format(t)
return func
def foo(a, b):
"""Hi, I'm the doc
{}
bloo bloo"""
add_doc(foo)
print(help(foo))
| [
"inspect.signature"
] | [((69, 84), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (78, 84), False, 'from inspect import signature\n')] |
import re
def read_raw(path):
return "".join(open(path).readlines())
def ints(input):
# return a list of ints being separated by non-numerical characters
if input.endswith(".txt"):
return ints(read_raw(input))
else:
return [int(num) for num in re.split("\D+", input) if num != '']
def lines(input):
# return lines separated by \n.
if input.endswith(".txt"):
return lines(read_raw(input))
else:
return input.split("\n")
def blocks(input):
# return blocks separated by \n\n
if input.endswith(".txt"):
return blocks(read_raw(input))
else:
return input.split("\n\n")
def neighbors(grid, with_diagonals = True):
# assign neighbors to each element in the grid
for y in range(len(grid)):
for x in range(len(grid[y])):
neighbors = []
for ny in range(max(0, y-1), y+2):
for nx in range(max(0, x-1), x+2):
if not (x == nx and y == ny):
if with_diagonals or x == nx or y == ny:
try:
neighbors.append(grid[ny][nx])
except IndexError:
pass
grid[y][x].neighbors = neighbors
def flat_grid(grid):
# return each element in 2-D grid
return [item for row in grid for item in row]
| [
"re.split"
] | [((288, 311), 're.split', 're.split', (['"""\\\\D+"""', 'input'], {}), "('\\\\D+', input)\n", (296, 311), False, 'import re\n')] |
import cv2 as cv
import numpy as np
from PIL import Image
import os
import time
import os
import concurrent.futures
#used for resizing, it will resize the image maintaining aspect ratio
# to the smallest dimension, my images were 5000 by 1000, so it gets shrunk to
# 40 px tall and an unknown width.
size = (1920, 40)
# only run on files with this file extension.
ext = '.jpeg'
# folder to run on.
path = os.getcwd()
startTime = 0
def get_boundaries(name, debug=False):
# crop out unecessary whitespace
src = cv.imread(cv.samples.findFile(name),1)
src_gray = cv.blur(src, (3,3))
threshold = 100
leftmost = src.shape[1]
rightmost = 0
canny_output = cv.Canny(src_gray, threshold, threshold * 2)
contours, _ = cv.findContours(canny_output, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
offset = int(src.shape[1]*0.03)
# Get the mass centers
mc = [None]*len(contours)
for i in range(len(contours)):
# Get the moment
moment = cv.moments(contours[i])
# add 1e-5 to avoid division by zero
mc[i] = (moment['m10'] / (moment['m00'] + 1e-5), moment['m01'] / (moment['m00'] + 1e-5))
# Draw contours
#minarea = 1000
for i, j in enumerate(contours):
val = int(mc[i][0])
area = cv.contourArea(contours[i])
#val = int(i[0][0][0])
if leftmost > val and val > offset:# and area > minarea:
leftmost = val
if rightmost < val and val < src.shape[1]-offset:# and area > minarea:
rightmost = val
if debug >= 2:
print('val: ',end='')
print(val, end = '')
print(' '.join(['Number', str(i), 'lm', str(leftmost),
'rm', str(rightmost)]))
leftmost -= offset
rightmost += offset
# Calculate the area with the moments 00 and compare with the result of the OpenCV function
if debug >= 3:
for i in range(len(contours)):
print(' * Contour[{0}]. Area: {1}. Length: {2}.'.format(i, cv.contourArea(contours[i]), cv.arcLength(contours[i], True), contours[i]))
return leftmost, 0, rightmost, src.shape[0]
def run_on(path, debug=False):
global startTime
startTime = int(time.time())
op = []
count = 0
namelist = []
for root, dirs, files in os.walk(path):
for name in files:
if ext in name:
namelist.append(name)
with concurrent.futures.ThreadPoolExecutor() as executor:
for name, result in zip(namelist, executor.map(get_boundaries, namelist)):
if debug >= 1: print('Time Elapsed:',
str(int(time.time())-startTime),
'secs. Image:', count, 'Name:', name)
shrink_and_crop(name, result, size, debug)
count+=1
print('Done in {} seconds!'.format(str(int(time.time())-startTime)))
def shrink_and_crop(name, cropbox, maxsize, debug=False):
im = Image.open(name)
if debug >= 2: print(im, cropbox)
try:
im = im.crop(cropbox)
if im.size[1] > maxsize[1]:
im.thumbnail(maxsize)
if debug >= 2: print(im)
im.save(name)
#im.save('did it work1.jpg')
except ValueError:
print('Did not run on:',name)
run_on(path, debug=1)
| [
"PIL.Image.open",
"cv2.arcLength",
"cv2.samples.findFile",
"os.getcwd",
"cv2.contourArea",
"cv2.blur",
"cv2.moments",
"cv2.findContours",
"cv2.Canny",
"time.time",
"os.walk"
] | [((425, 436), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (434, 436), False, 'import os\n'), ((600, 620), 'cv2.blur', 'cv.blur', (['src', '(3, 3)'], {}), '(src, (3, 3))\n', (607, 620), True, 'import cv2 as cv\n'), ((709, 753), 'cv2.Canny', 'cv.Canny', (['src_gray', 'threshold', '(threshold * 2)'], {}), '(src_gray, threshold, threshold * 2)\n', (717, 753), True, 'import cv2 as cv\n'), ((773, 844), 'cv2.findContours', 'cv.findContours', (['canny_output', 'cv.RETR_EXTERNAL', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(canny_output, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n', (788, 844), True, 'import cv2 as cv\n'), ((2370, 2383), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2377, 2383), False, 'import os\n'), ((3042, 3058), 'PIL.Image.open', 'Image.open', (['name'], {}), '(name)\n', (3052, 3058), False, 'from PIL import Image\n'), ((555, 580), 'cv2.samples.findFile', 'cv.samples.findFile', (['name'], {}), '(name)\n', (574, 580), True, 'import cv2 as cv\n'), ((1021, 1044), 'cv2.moments', 'cv.moments', (['contours[i]'], {}), '(contours[i])\n', (1031, 1044), True, 'import cv2 as cv\n'), ((1314, 1341), 'cv2.contourArea', 'cv.contourArea', (['contours[i]'], {}), '(contours[i])\n', (1328, 1341), True, 'import cv2 as cv\n'), ((2280, 2291), 'time.time', 'time.time', ([], {}), '()\n', (2289, 2291), False, 'import time\n'), ((2068, 2095), 'cv2.contourArea', 'cv.contourArea', (['contours[i]'], {}), '(contours[i])\n', (2082, 2095), True, 'import cv2 as cv\n'), ((2097, 2128), 'cv2.arcLength', 'cv.arcLength', (['contours[i]', '(True)'], {}), '(contours[i], True)\n', (2109, 2128), True, 'import cv2 as cv\n'), ((2945, 2956), 'time.time', 'time.time', ([], {}), '()\n', (2954, 2956), False, 'import time\n'), ((2722, 2733), 'time.time', 'time.time', ([], {}), '()\n', (2731, 2733), False, 'import time\n')] |
from __future__ import print_function
import os
import time
import torch
import torchvision.transforms as transforms
from Dataset import DeblurDataset
from torch.utils.data import DataLoader
from utils import *
from network import *
from Dataset import DeblurDataset, RealImage
def test(args):
device = torch.device("cuda" if torch.cuda.is_available() and args.gpu >= 0 else "cpu")
model_G = Generator(args, device)
model_D = Classifier(args, device)
if torch.cuda.device_count() > 1 and args.gpu >= 0:
print("Let's use", torch.cuda.device_count(), "GPUs!")
else:
print("Let's use CPUs!")
model_G = nn.DataParallel(model_G)
model_D = nn.DataParallel(model_D)
print('===> Loading models')
net_g_path = "checkpoint/netG"
net_d_path = "checkpoint/netD"
if not find_latest_model(net_g_path) or not find_latest_model(net_d_path):
print(" [!] Load failed...")
raise Exception('No model to load for testing!')
else:
print(" [*] Load SUCCESS")
model_path_G = find_latest_model(net_g_path)
checkpointG = torch.load(model_path_G, map_location=device)
model_G.load_state_dict(checkpointG['model_state_dict'])
model_path_D = find_latest_model(net_d_path)
checkpointD = torch.load(model_path_D, map_location=device)
model_D.load_state_dict(checkpointD['model_state_dict'])
netG = model_G.to(device)
netD = model_D.to(device)
netG.eval()
netD.eval()
print("====> Loading data")
############################
# For DeblurMicroscope dataset
###########################
f_test = open("./dataset/test_instance_names.txt", "r")
test_data = f_test.readlines()
test_data = [line.rstrip() for line in test_data]
f_test.close()
test_data_loader = DataLoader(DeblurDataset(test_data, args, False), batch_size=1, shuffle=False)
all_psnr = []
all_ssim = []
start_time = time.time()
netG_S2B = BlurModel(args, device)
with torch.no_grad():
for batch in test_data_loader:
real_B, real_S, img_name = batch[0], batch[1], batch[2]
real_B, real_S = real_B.to(device), real_S.to(device)
# B = (B, 1, 64, 64), S = (B, 1, 256, 256)
pred_S = netG(real_B)
pred_S0 = pred_S[0]
pred_S1 = pred_S[1]
pred_S = pred_S[-1]
real_B1 = F.interpolate(pred_S0, (args.fine_size * 2, args.fine_size * 2), mode="bilinear")
real_B2 = F.interpolate(pred_S1, (args.fine_size * 4, args.fine_size * 4), mode="bilinear")
recov_B = netG_S2B(real_S)
recov_B = recov_B[0]
pred_label = netD(pred_S)
cur_psnr, cur_ssim = compute_metrics(real_S, pred_S)
all_psnr.append(cur_psnr)
all_ssim.append(cur_ssim)
if img_name[0][-2:] == '01':
img_roi = pred_label.detach().squeeze(0).cpu()
img_roi = (img_roi * 2 - 1.)
save_img(img_roi, '{}/roi_'.format(args.valid_dir) + img_name[0])
img_rec = recov_B.detach().squeeze(0).cpu()
save_img(img_rec, '{}/rec_'.format(args.valid_dir) + img_name[0])
img_S = real_B.detach().squeeze(0).cpu()
save_img(img_S, '{}/input0_'.format(args.test_dir) + img_name[0])
img_S = real_B1.detach().squeeze(0).cpu()
save_img(img_S, '{}/input1_'.format(args.valid_dir) + img_name[0])
img_S = real_B2.detach().squeeze(0).cpu()
save_img(img_S, '{}/input2_'.format(args.valid_dir) + img_name[0])
img_S = pred_S0.detach().squeeze(0).cpu()
save_img(img_S, '{}/output0_'.format(args.valid_dir) + img_name[0])
img_S = pred_S1.detach().squeeze(0).cpu()
save_img(img_S, '{}/output1_'.format(args.valid_dir) + img_name[0])
img_S = pred_S.detach().squeeze(0).cpu()
save_img(img_S, '{}/test_'.format(args.test_dir) + img_name[0])
print('test_{}: PSNR = {} dB, SSIM = {}'
.format(img_name[0], cur_psnr, cur_ssim))
total_time = time.time() - start_time
ave_psnr = sum(all_psnr) / len(test_data_loader)
ave_ssim = sum(all_ssim) / len(test_data_loader)
ave_time = total_time / len(test_data_loader)
print("Average PSNR = {}, SSIM = {}, Processing time = {}".format(ave_psnr, ave_ssim, ave_time))
def test_real(args):
if torch.cuda.device_count() > 1 and args.gpu >= 0:
print("Let's use", torch.cuda.device_count(), "GPUs!")
else:
print("Let's use CPUs!")
device = torch.device("cuda" if torch.cuda.is_available() and args.gpu >= 0 else "cpu")
model_G = Generator(args, device)
model_G = nn.DataParallel(model_G)
print('===> Loading models')
net_g_path = "checkpoint/netG"
netG = model_G.to(device)
if not find_latest_model(net_g_path):
print(" [!] Load failed...")
raise Exception('No model to load!')
else:
print(" [*] Load SUCCESS")
model_path_G = find_latest_model(net_g_path)
checkpointG = torch.load(model_path_G, map_location=device)
netG.load_state_dict(checkpointG['model_state_dict'])
netG.eval()
print("====> Loading data")
############################
# For Real Images
###########################
if not os.path.exists(args.input_dir):
raise Exception("Input folder not exist!")
else:
image_dir = args.input_dir
image_filenames = [image_dir + x[0:-4] for x in os.listdir(image_dir) if x[-4:] in set([".png", ".jpg"])]
test_data_loader = DataLoader(RealImage(image_filenames, args, False), batch_size=1, shuffle=False)
start_time = time.time()
with torch.no_grad():
for batch in test_data_loader:
real_B, img_name = batch[0], batch[1]
real_B = real_B.to(device)
pred_S = netG(real_B)
pred_S = pred_S[-1]
img_S = pred_S.detach().squeeze(0).cpu()
save_img(img_S, '{}/result_'.format(args.output_dir) + img_name[0])
total_time = time.time() - start_time
ave_time = total_time / len(test_data_loader)
print("Processing time = {}".format(ave_time))
| [
"os.path.exists",
"Dataset.RealImage",
"os.listdir",
"torch.load",
"torch.cuda.device_count",
"torch.cuda.is_available",
"Dataset.DeblurDataset",
"torch.no_grad",
"time.time"
] | [((1967, 1978), 'time.time', 'time.time', ([], {}), '()\n', (1976, 1978), False, 'import time\n'), ((5822, 5833), 'time.time', 'time.time', ([], {}), '()\n', (5831, 5833), False, 'import time\n'), ((1104, 1149), 'torch.load', 'torch.load', (['model_path_G'], {'map_location': 'device'}), '(model_path_G, map_location=device)\n', (1114, 1149), False, 'import torch\n'), ((1291, 1336), 'torch.load', 'torch.load', (['model_path_D'], {'map_location': 'device'}), '(model_path_D, map_location=device)\n', (1301, 1336), False, 'import torch\n'), ((1845, 1882), 'Dataset.DeblurDataset', 'DeblurDataset', (['test_data', 'args', '(False)'], {}), '(test_data, args, False)\n', (1858, 1882), False, 'from Dataset import DeblurDataset, RealImage\n'), ((2027, 2042), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2040, 2042), False, 'import torch\n'), ((4224, 4235), 'time.time', 'time.time', ([], {}), '()\n', (4233, 4235), False, 'import time\n'), ((5204, 5249), 'torch.load', 'torch.load', (['model_path_G'], {'map_location': 'device'}), '(model_path_G, map_location=device)\n', (5214, 5249), False, 'import torch\n'), ((5462, 5492), 'os.path.exists', 'os.path.exists', (['args.input_dir'], {}), '(args.input_dir)\n', (5476, 5492), False, 'import os\n'), ((5734, 5773), 'Dataset.RealImage', 'RealImage', (['image_filenames', 'args', '(False)'], {}), '(image_filenames, args, False)\n', (5743, 5773), False, 'from Dataset import DeblurDataset, RealImage\n'), ((5843, 5858), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5856, 5858), False, 'import torch\n'), ((6207, 6218), 'time.time', 'time.time', ([], {}), '()\n', (6216, 6218), False, 'import time\n'), ((473, 498), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (496, 498), False, 'import torch\n'), ((549, 574), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (572, 574), False, 'import torch\n'), ((4536, 4561), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4559, 4561), False, 'import torch\n'), ((4612, 4637), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4635, 4637), False, 'import torch\n'), ((5642, 5663), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (5652, 5663), False, 'import os\n'), ((333, 358), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (356, 358), False, 'import torch\n'), ((4727, 4752), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4750, 4752), False, 'import torch\n')] |
import csv, sys, os, argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='assign input/output paths for newton decr sample data')
parser.add_argument("-i", "--input", help="newton decrement samples")
parser.add_argument("-o", "--output", help="shifted lambda data")
args = parser.parse_args()
fname = args.input
out = args.output
f = open(fname, "r"); fs = open(out, "w")
lines = f.readlines()
fs.write("lambda, newton_decrement\n")
for line in lines[1:]:
x = line.strip().split(",")
fs.write(str(float(x[0])-0.015897601733878048) + "," + x[1] + "\n")
f.close(); fs.close() | [
"argparse.ArgumentParser"
] | [((71, 168), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""assign input/output paths for newton decr sample data"""'}), "(description=\n 'assign input/output paths for newton decr sample data')\n", (94, 168), False, 'import csv, sys, os, argparse\n')] |
import platform
import os
if platform.architecture()[0] == '32bit':
os.environ["PYSDL2_DLL_PATH"] = "./SDL2/x86"
else:
os.environ["PYSDL2_DLL_PATH"] = "./SDL2/x64"
import game_framework
from pico2d import *
import start_state
# fill here
open_canvas(1200, 800, True)
game_framework.run(start_state)
close_canvas() | [
"game_framework.run",
"platform.architecture"
] | [((279, 310), 'game_framework.run', 'game_framework.run', (['start_state'], {}), '(start_state)\n', (297, 310), False, 'import game_framework\n'), ((30, 53), 'platform.architecture', 'platform.architecture', ([], {}), '()\n', (51, 53), False, 'import platform\n')] |
import pickle
import pandas as pd
import os
import sklearn
import numpy as np
from flask import Flask, request, Response
from lightgbm import LGBMClassifier
from class_.FraudDetection import FraudDetection
model = pickle.load(open('model/lgbm.pkl', 'rb')) # loading model
app = Flask(__name__) # initialize API
@app.route('/fraudDetection/predict', methods=['POST'])
def fraudDetection_predict():
test_json = request.get_json()
if test_json: # there is data
if isinstance(test_json, dict): # unique example
test_raw = pd.DataFrame(test_json, index=[0])
else: # multiple example
test_raw = pd.DataFrame(test_json, columns=test_json[0].keys())
# instantiate class
detection = FraudDetection()
# data cleaning
df1 = detection.cleaning(df=test_raw)
print('cleaning OK')
# feature engineering
df2 = detection.feature_engineering(df=df1)
print('feature engineering OK')
# data preparation
df3 = detection.preparation(df=df2)
print('data preparation OK')
# feature selection
df4 = detection.feature_selection(df=df3)
print('feature selection OK')
# prediction
df_response = detection.get_prediction(
model=model, original_data=df1, test_data=df4
)
print('prediction OK')
return df_response
else:
return Response('{}', status=200, minetype='application/json')
if __name__ == '__main__':
porta = os.environ.get('PORT', 5000)
app.run(host='0.0.0.0', port=porta)
"""
import json
import requests
# data to json
data = json.dumps(x_test.to_dict(orient='records'))
#url = 'http://127.0.0.1:5000/fraudDetection/predict'
url = 'https://api-fraud.herokuapp.com/fraudDetection/predict' # local host
header = {'content-type': 'application/json'} # set type as json
# request with method POST
response = requests.post(url, data=data, headers=header)
print('Status code: {}'.format(response.status_code))
# json to dataframe
d1 = pd.DataFrame(response.json(), columns=response.json()[0].keys())
d1""" | [
"flask.Flask",
"class_.FraudDetection.FraudDetection",
"os.environ.get",
"flask.request.get_json",
"flask.Response",
"pandas.DataFrame"
] | [((281, 296), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (286, 296), False, 'from flask import Flask, request, Response\n'), ((422, 440), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (438, 440), False, 'from flask import Flask, request, Response\n'), ((1608, 1636), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '(5000)'], {}), "('PORT', 5000)\n", (1622, 1636), False, 'import os\n'), ((774, 790), 'class_.FraudDetection.FraudDetection', 'FraudDetection', ([], {}), '()\n', (788, 790), False, 'from class_.FraudDetection import FraudDetection\n'), ((1503, 1558), 'flask.Response', 'Response', (['"""{}"""'], {'status': '(200)', 'minetype': '"""application/json"""'}), "('{}', status=200, minetype='application/json')\n", (1511, 1558), False, 'from flask import Flask, request, Response\n'), ((556, 590), 'pandas.DataFrame', 'pd.DataFrame', (['test_json'], {'index': '[0]'}), '(test_json, index=[0])\n', (568, 590), True, 'import pandas as pd\n')] |
import sys
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch.backends import cudnn
from utils.utils import cast
from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_
from .dpcnn import dpcnn
from .prep_text import TextData_Uni, TextData_Lab, TextDataBatches, gen_uni_name, gen_lab_name
from .prep_text_n import TextData_N, gen_n_name
from .prep_text import main as prep_text_main
from .text_utils import get_dlist, load_x_emb, match_vocab
from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index
cudnn.benchmark = True
#--------------------------------------------------------------------
# For a dataset "dataname", the following input files are required.
# dataname-train.tok.txt, dataname-train.cat
# dataname-test.tok.txt, dataname-test.cat
# dataname.catdic
#
# *.tok.txt: tokens delimited by white space. one document per line.
# *.cat: class labels.
# *.catdic: class names used in *.cat. one name per line.
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def prep_data(opt, types):
missing = ''
for type in types:
ds_path = gen_uni_name(opt.dataroot, opt.dataset, type)
ls_path = gen_lab_name(opt.dataroot, opt.dataset, type)
if not os.path.exists(ds_path):
missing += ' %s' % ds_path
if not os.path.exists(ls_path):
missing += ' %s' % ls_path
if len(missing) > 0:
timeLog('----------------------------------------------------------------------')
if opt.dont_write_to_dataroot:
raise Exception("The following files are missing: %s\nTo generate them, turn off 'dont_write_to_dataroot'." % missing)
timeLog('Calling prep_text_main for creating the following files: %s' % missing)
prep_text_main('prep', ['--dataset', opt.dataset, '--dataroot', opt.dataroot ])
timeLog('Done with prep text main ------------------------------')
else:
timeLog('Using existing data files ... ')
#----------------------------------------------------------
def check_opt_(opt):
#--- required attributes
names = [ 'dataroot','dataset','num_dev','x_emb','seed','batch_unit','batch_size','depth','width','dropout','top_dropout','ker_size']
raise_if_absent(opt, names, who='dpcnn_train')
#--- optional attributes
add_if_absent_(opt, ['dont_write_to_dataroot'], False)
add_if_absent_(opt, ['num_train','req_max_len'], -1)
add_if_absent_(opt, ['train_dlist_path','dev_dlist_path'], None)
add_if_absent_(opt, ['csv_fn'], '')
#********************************************************************
def main(opt):
timeLog("dpcnn_train(opt) begins ...")
check_opt_(opt)
logging('Using %s ... ' % ('GPU(s)' if torch.cuda.is_available() else 'CPU'))
reset_logging(opt.csv_fn)
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
#--- load external embeddings
x_embed,x_n_maxes = load_x_emb(opt.x_emb)
#--- prepare data
prep_data(opt, ['train', 'test'])
rs = np.random.get_state()
def prep_uni(type):
return TextData_Uni(pathname=gen_uni_name(opt.dataroot, opt.dataset, type))
def prep_lab(type):
return TextData_Lab(pathname=gen_lab_name(opt.dataroot, opt.dataset, type))
def prep_x_dss(type): # 'x' for extra
if x_n_maxes is None:
return None
return [ TextData_N(gen_n_name(opt.dataroot, opt.dataset, n_max, type)) for n_max in x_n_maxes ]
trn_dlist,dev_dlist = get_dlist(opt.seed, opt.num_train, opt.num_dev, opt.train_dlist_path, opt.dev_dlist_path,
len(prep_uni('train')))
def read_ds_ls_x(dlist): # read data and labels
type='train'
ds = prep_uni(type); ds.shuffle(dlist)
ls = prep_lab(type); ls.shuffle(dlist)
x_dss = prep_x_dss(type)
if x_dss is not None:
for xds in x_dss:
xds.shuffle(dlist)
return ds, ls, x_dss
td_ds,td_ls,x_td = read_ds_ls_x(trn_dlist) # training data
dv_ds,dv_ls,x_dv = read_ds_ls_x(dev_dlist) # validation data
match_vocab(x_embed, td_ds, x_td)
type = 'test'
ts_ds = prep_uni(type); ts_ls = prep_lab(type); x_ts = prep_x_dss(type) # test data
bch_param = {'req_max_len':opt.req_max_len, 'batch_unit':opt.batch_unit, 'batch_size':opt.batch_size}
trn_data = TextDataBatches(td_ds, td_ls, **bch_param, do_shuffle=True, x_dss=x_td)
dev_data = TextDataBatches(dv_ds, dv_ls, **bch_param, do_shuffle=False, x_dss=x_dv)
tst_data = TextDataBatches(ts_ds, ts_ls, **bch_param, do_shuffle=False, x_dss=x_ts)
np.random.set_state(rs)
test_dss = [ {'name':'dev', 'data':dev_data}, {'name':'test', 'data':tst_data} ]
num_classes = td_ls.num_class()
logging('#classes=%d' % num_classes)
if num_classes != dv_ls.num_class() or num_classes != ts_ls.num_class():
raise Exception('Conflict in # of classes: ' +str(num_classes)+','+str(dv_ls.num_class())+','+str(ts_ls.num_class()))
vocab_size = td_ds.vocab_size()
logging('#vocab=%d' % vocab_size)
if vocab_size != dv_ds.vocab_size() or vocab_size != ts_ds.vocab_size():
raise Exception('Conflict in vocabulary sizes: '+str(vocab_size)+','+str(dv_ds.vocab_size())+','+str(ts_ds.vocab_size()))
#--- prepare a model
def initialize_model():
return dpcnn(opt.depth, opt.width, num_classes, vocab_size,
top_dropout=opt.top_dropout, dropout=opt.dropout,
ker_size=opt.ker_size,
x_embed=x_embed) # external embedding
func, params = initialize_model()
#--- training ...
loss_function = F.cross_entropy
def net(sample, is_train=False):
if sample is None:
return loss_function
inputs = cast(sample[0], 'long')
x_inputs = [ cast(data, 'long') for data in sample[2] ] if len(sample) >= 3 else None
output = func(inputs, params, is_train, extra_input=x_inputs)
targets = cast(sample[Target_index], 'long')
return loss_function(output, targets), output
if not is_gulf(opt):
train_base_model(opt, net, params, trn_data, test_dss)
else:
i_func, i_params = initialize_model()
copy_params(src=params, dst=i_params)
def i_net(sample):
is_train = False
inputs = cast(sample[0], 'long')
x_inputs = [ cast(data, 'long') for data in sample[2] ] if len(sample) >= 3 else None
return i_func(inputs, i_params, is_train, extra_input=x_inputs)
train_gulf_model(opt, i_net, i_params, net, params, trn_data, test_dss)
timeLog("dpcnn_train(opt) ends ...")
| [
"utils.utils0.raise_if_absent",
"torch.manual_seed",
"numpy.random.get_state",
"utils.utils0.add_if_absent_",
"numpy.random.set_state",
"utils.utils.cast",
"os.path.exists",
"utils.utils0.timeLog",
"gulf.train_base_model",
"gulf.train_gulf_model",
"gulf.copy_params",
"torch.cuda.is_available",... | [((2349, 2395), 'utils.utils0.raise_if_absent', 'raise_if_absent', (['opt', 'names'], {'who': '"""dpcnn_train"""'}), "(opt, names, who='dpcnn_train')\n", (2364, 2395), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2429, 2483), 'utils.utils0.add_if_absent_', 'add_if_absent_', (['opt', "['dont_write_to_dataroot']", '(False)'], {}), "(opt, ['dont_write_to_dataroot'], False)\n", (2443, 2483), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2487, 2540), 'utils.utils0.add_if_absent_', 'add_if_absent_', (['opt', "['num_train', 'req_max_len']", '(-1)'], {}), "(opt, ['num_train', 'req_max_len'], -1)\n", (2501, 2540), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2543, 2608), 'utils.utils0.add_if_absent_', 'add_if_absent_', (['opt', "['train_dlist_path', 'dev_dlist_path']", 'None'], {}), "(opt, ['train_dlist_path', 'dev_dlist_path'], None)\n", (2557, 2608), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2611, 2646), 'utils.utils0.add_if_absent_', 'add_if_absent_', (['opt', "['csv_fn']", '""""""'], {}), "(opt, ['csv_fn'], '')\n", (2625, 2646), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2736, 2774), 'utils.utils0.timeLog', 'timeLog', (['"""dpcnn_train(opt) begins ..."""'], {}), "('dpcnn_train(opt) begins ...')\n", (2743, 2774), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2879, 2904), 'utils.utils0.reset_logging', 'reset_logging', (['opt.csv_fn'], {}), '(opt.csv_fn)\n', (2892, 2904), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2909, 2936), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (2926, 2936), False, 'import torch\n'), ((2940, 2964), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (2954, 2964), True, 'import numpy as np\n'), ((3113, 3134), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (3132, 3134), True, 'import numpy as np\n'), ((4659, 4682), 'numpy.random.set_state', 'np.random.set_state', (['rs'], {}), '(rs)\n', (4678, 4682), True, 'import numpy as np\n'), ((4807, 4843), 'utils.utils0.logging', 'logging', (["('#classes=%d' % num_classes)"], {}), "('#classes=%d' % num_classes)\n", (4814, 4843), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((5082, 5115), 'utils.utils0.logging', 'logging', (["('#vocab=%d' % vocab_size)"], {}), "('#vocab=%d' % vocab_size)\n", (5089, 5115), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((6624, 6660), 'utils.utils0.timeLog', 'timeLog', (['"""dpcnn_train(opt) ends ..."""'], {}), "('dpcnn_train(opt) ends ...')\n", (6631, 6660), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((1547, 1633), 'utils.utils0.timeLog', 'timeLog', (['"""----------------------------------------------------------------------"""'], {}), "(\n '----------------------------------------------------------------------')\n", (1554, 1633), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((1800, 1885), 'utils.utils0.timeLog', 'timeLog', (["('Calling prep_text_main for creating the following files: %s' % missing)"], {}), "('Calling prep_text_main for creating the following files: %s' % missing\n )\n", (1807, 1885), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((1973, 2039), 'utils.utils0.timeLog', 'timeLog', (['"""Done with prep text main ------------------------------"""'], {}), "('Done with prep text main ------------------------------')\n", (1980, 2039), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2055, 2096), 'utils.utils0.timeLog', 'timeLog', (['"""Using existing data files ... """'], {}), "('Using existing data files ... ')\n", (2062, 2096), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((5809, 5832), 'utils.utils.cast', 'cast', (['sample[0]', '"""long"""'], {}), "(sample[0], 'long')\n", (5813, 5832), False, 'from utils.utils import cast\n'), ((6009, 6043), 'utils.utils.cast', 'cast', (['sample[Target_index]', '"""long"""'], {}), "(sample[Target_index], 'long')\n", (6013, 6043), False, 'from utils.utils import cast\n'), ((6107, 6119), 'gulf.is_gulf', 'is_gulf', (['opt'], {}), '(opt)\n', (6114, 6119), False, 'from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index\n'), ((6127, 6181), 'gulf.train_base_model', 'train_base_model', (['opt', 'net', 'params', 'trn_data', 'test_dss'], {}), '(opt, net, params, trn_data, test_dss)\n', (6143, 6181), False, 'from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index\n'), ((6241, 6278), 'gulf.copy_params', 'copy_params', ([], {'src': 'params', 'dst': 'i_params'}), '(src=params, dst=i_params)\n', (6252, 6278), False, 'from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index\n'), ((6548, 6619), 'gulf.train_gulf_model', 'train_gulf_model', (['opt', 'i_net', 'i_params', 'net', 'params', 'trn_data', 'test_dss'], {}), '(opt, i_net, i_params, net, params, trn_data, test_dss)\n', (6564, 6619), False, 'from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index\n'), ((1382, 1405), 'os.path.exists', 'os.path.exists', (['ds_path'], {}), '(ds_path)\n', (1396, 1405), False, 'import os\n'), ((1456, 1479), 'os.path.exists', 'os.path.exists', (['ls_path'], {}), '(ls_path)\n', (1470, 1479), False, 'import os\n'), ((6349, 6372), 'utils.utils.cast', 'cast', (['sample[0]', '"""long"""'], {}), "(sample[0], 'long')\n", (6353, 6372), False, 'from utils.utils import cast\n'), ((2836, 2861), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2859, 2861), False, 'import torch\n'), ((5852, 5870), 'utils.utils.cast', 'cast', (['data', '"""long"""'], {}), "(data, 'long')\n", (5856, 5870), False, 'from utils.utils import cast\n'), ((6395, 6413), 'utils.utils.cast', 'cast', (['data', '"""long"""'], {}), "(data, 'long')\n", (6399, 6413), False, 'from utils.utils import cast\n')] |
# Pass the search string you want
# It search and download the first image(thumbnail) on imgur.com
# Then it will return the name of the file stored in ./images/full/
import subprocess
import re
def search_image(arg):
out = subprocess.check_output(['scrapy', 'crawl', 'imgur', '-a',
'arg='+arg], stderr=subprocess.STDOUT,
cwd='Imgur')
string = out.decode()
return re.findall(r"full.(\S*)'",string)[0]
| [
"subprocess.check_output",
"re.findall"
] | [((242, 358), 'subprocess.check_output', 'subprocess.check_output', (["['scrapy', 'crawl', 'imgur', '-a', 'arg=' + arg]"], {'stderr': 'subprocess.STDOUT', 'cwd': '"""Imgur"""'}), "(['scrapy', 'crawl', 'imgur', '-a', 'arg=' + arg],\n stderr=subprocess.STDOUT, cwd='Imgur')\n", (265, 358), False, 'import subprocess\n'), ((466, 500), 're.findall', 're.findall', (['"""full.(\\\\S*)\'"""', 'string'], {}), '("full.(\\\\S*)\'", string)\n', (476, 500), False, 'import re\n')] |