code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import unittest
import havok
class TestSectionHeaders(unittest.TestCase):
def test_it_can_decompile_the_classname_section_header(self):
""" @test it can decompile the classname section header
Given the file G-6-2.hksc
When the file is passed to the SectionHeader class
Then the section header should be __classnames__ at offset 0x100
"""
with open('../assets/G-6-2.hksc', 'rb') as infile:
infile.seek(64)
section_header = havok.SectionHeader(infile)
self.assertEqual(section_header.name, '__classnames__')
self.assertEqual(section_header.start, 0x100)
def test_it_can_decompile_all_three_section_headers(self):
""" @test it can decompile all three section headers
Given the file Enemy_Assasin_Leader.hkrg
When the file is passed to the SectionHeader class
Then the section headers for __classnames__, __types__ and __data__ should be decompiled
"""
with open('../assets/Enemy_Assasin_Leader.hkrg', 'rb') as infile:
infile.seek(64)
section_header_classnames = havok.SectionHeader(infile)
section_header_types = havok.SectionHeader(infile)
section_header_data = havok.SectionHeader(infile)
self.assertEqual(section_header_classnames.name, '__classnames__')
self.assertEqual(section_header_classnames.start, 0x100)
self.assertEqual(section_header_types.name, '__types__')
self.assertEqual(section_header_types.start, 0x290)
self.assertEqual(section_header_data.name, '__data__')
self.assertEqual(section_header_data.start, 0x290)
def test_the_section_header_tables_decompiles_all_section_headers(self):
""" @test the SectionHeaderTables decompiles all section headers
Given the file 19-13.hknm2
When the file is passed to the SectionHeaderTables class
Then all three section headers are decompiled into the container
"""
with open('../assets/19-13.hknm2', 'rb') as infile:
infile.seek(80)
section_header_tables = havok.SectionHeaderTables(infile)
self.assertEqual(section_header_tables.classnames.name, '__classnames__')
self.assertEqual(section_header_tables.types.name, '__types__')
self.assertEqual(section_header_tables.data.name, '__data__')
|
[
"havok.SectionHeaderTables",
"havok.SectionHeader"
] |
[((502, 529), 'havok.SectionHeader', 'havok.SectionHeader', (['infile'], {}), '(infile)\n', (521, 529), False, 'import havok\n'), ((1141, 1168), 'havok.SectionHeader', 'havok.SectionHeader', (['infile'], {}), '(infile)\n', (1160, 1168), False, 'import havok\n'), ((1204, 1231), 'havok.SectionHeader', 'havok.SectionHeader', (['infile'], {}), '(infile)\n', (1223, 1231), False, 'import havok\n'), ((1266, 1293), 'havok.SectionHeader', 'havok.SectionHeader', (['infile'], {}), '(infile)\n', (1285, 1293), False, 'import havok\n'), ((2167, 2200), 'havok.SectionHeaderTables', 'havok.SectionHeaderTables', (['infile'], {}), '(infile)\n', (2192, 2200), False, 'import havok\n')]
|
from __future__ import absolute_import
from django.views.decorators.csrf import csrf_exempt
from sentry.api.base import Endpoint
from sentry.integrations.pipeline import ensure_integration
from sentry.tasks.integrations import sync_metadata
from .integration import JiraIntegrationProvider
class JiraInstalledEndpoint(Endpoint):
authentication_classes = ()
permission_classes = ()
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super(JiraInstalledEndpoint, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
state = request.DATA
data = JiraIntegrationProvider().build_integration(state)
integration = ensure_integration('jira', data)
# Sync integration metadata from JIRA. This msut be executed *after*
# the integration has been isntalled on JIRA as the access tokens will
# not work until then.
sync_metadata.apply_async([integration.get_installation()], countdown=10)
return self.respond()
|
[
"sentry.integrations.pipeline.ensure_integration"
] |
[((712, 744), 'sentry.integrations.pipeline.ensure_integration', 'ensure_integration', (['"""jira"""', 'data'], {}), "('jira', data)\n", (730, 744), False, 'from sentry.integrations.pipeline import ensure_integration\n')]
|
#Load packages
import numpy as np
import pandas as pd
import torch
from tqdm.auto import tqdm
import pytorch_lightning as pl
from transformers import AutoTokenizer, AutoModel
#Import package for aspect sentiment prediction
import aspect_based_sentiment_analysis as absa
#Load the ABSA sentiment model
nlp = absa.load()
#List containing the different aspect categories
ASPECTS = ['price','speed','reliability','coverage', 'customer service']
#Load BerTweet tokenizer
TOKENIZER = AutoTokenizer.from_pretrained("vinai/bertweet-base", normalization=True)
#Load the BERTweet model
BERTWEET_MODEL = AutoModel.from_pretrained("vinai/bertweet-base", from_tf = True, return_dict = True)
class ISP_TweetAspectClassifier(pl.LightningModule):
#Set the aspect classifier
def __init__(self, n_classes=5, n_training_steps=None, n_warmup_steps=None, lr=2e-5):
super().__init__()
self.lr = lr
self.n_warmup_steps = n_warmup_steps
self.n_training_steps = n_training_steps
self.bert = BERTWEET_MODEL
self.classifier = torch.nn.Linear(self.bert.config.hidden_size, n_classes)
self.criterion = torch.nn.BCELoss()
def forward(self, input_ids, attention_mask, labels = None):
output = self.bert(input_ids, attention_mask=attention_mask)
output = self.classifier(output.pooler_output)
output = torch.sigmoid(output)
loss = 0
if labels is not None:
loss = self.criterion(output, labels)
return loss, output
#Load the best model from training
mlc_model = ISP_TweetAspectClassifier.load_from_checkpoint(
"../models/absa-aspect-extraction/bertweet/ae-epoch=19-val_loss=0.33.ckpt",
n_classes=len(ASPECTS)
)
def run(df, col_name, optimal_threshold = 0.3):
"""
Function to perform ABSA on tweets using the multi-label bertweet classifier.
ABSA is a two-part task of aspect extraction and aspect sentiment prediction
Inputs:
- df (pd DataFrame): A pandas dataframe to perform annotation on
- col_name (str): The specific column in the dataframe containing the tweets run absa on
Output:
- absa_df (pd DataFrame): DataFrame containing the tweets and the ABSA results
"""
#List to store detected aspects and their sentiments
df_list = []
#Iterate through all the tweets
for tweet in df[col_name]:
#List to store the aspects detected
aspects_detected = []
#List to store the sentiment values (Positive, Negative or Neutral) for the aspects
detected_sentiments = []
#Encode the tweet
encoding = TOKENIZER.encode_plus(
tweet,
add_special_tokens=True,
max_length=TOKENIZER.model_max_length,
return_token_type_ids=False,
padding="max_length",
return_attention_mask=True,
return_tensors='pt'
)
#Get the model's prediction
_, model_prediction = mlc_model(encoding["input_ids"], encoding["attention_mask"])
model_prediction = model_prediction.detach().numpy()
#Determine the aspects detected using the optimal threshold found during fine-tuning
model_prediction = np.where(model_prediction > optimal_threshold, 1, 0)
#Iterate through the model's predictions for each aspect
for pred_idx in range(len(model_prediction[0])):
#If the aspect was detected
if model_prediction[0][pred_idx] == 1:
#Note it down
aspects_detected.append(ASPECTS[pred_idx])
if aspects_detected:
#Next, carry out sentiment prediction on the aspects detected
sentiment = nlp(tweet,aspects = aspects_detected)
#Iterate through each aspect sentiment predicted results
for senti_result in sentiment.examples:
#Get the sentiment scores
scores = np.array(senti_result.scores)
#Find the max sentiment score (i.e. the predicted sentiment value)
max_score = np.argmax(scores)
#Record the sentiment (string) category for the aspect
if max_score == 2:
detected_sentiments.append("Positive")
elif max_score == 1:
detected_sentiments.append("Negative")
else:
detected_sentiments.append("Neutral")
#Add the detected aspects and sentiments from the sentence to the list
df_list.append([tweet,aspects_detected,detected_sentiments])
else:
df_list.append([tweet,[None],[None]])
absa_df = pd.DataFrame(df_list,
columns=[col_name,'Detected aspects','Predicted sentiment'])
return absa_df
|
[
"pandas.DataFrame",
"torch.nn.BCELoss",
"numpy.argmax",
"transformers.AutoModel.from_pretrained",
"transformers.AutoTokenizer.from_pretrained",
"torch.sigmoid",
"numpy.where",
"aspect_based_sentiment_analysis.load",
"numpy.array",
"torch.nn.Linear"
] |
[((309, 320), 'aspect_based_sentiment_analysis.load', 'absa.load', ([], {}), '()\n', (318, 320), True, 'import aspect_based_sentiment_analysis as absa\n'), ((482, 554), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""vinai/bertweet-base"""'], {'normalization': '(True)'}), "('vinai/bertweet-base', normalization=True)\n", (511, 554), False, 'from transformers import AutoTokenizer, AutoModel\n'), ((598, 683), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['"""vinai/bertweet-base"""'], {'from_tf': '(True)', 'return_dict': '(True)'}), "('vinai/bertweet-base', from_tf=True, return_dict=True\n )\n", (623, 683), False, 'from transformers import AutoTokenizer, AutoModel\n'), ((4910, 4998), 'pandas.DataFrame', 'pd.DataFrame', (['df_list'], {'columns': "[col_name, 'Detected aspects', 'Predicted sentiment']"}), "(df_list, columns=[col_name, 'Detected aspects',\n 'Predicted sentiment'])\n", (4922, 4998), True, 'import pandas as pd\n'), ((1075, 1131), 'torch.nn.Linear', 'torch.nn.Linear', (['self.bert.config.hidden_size', 'n_classes'], {}), '(self.bert.config.hidden_size, n_classes)\n', (1090, 1131), False, 'import torch\n'), ((1157, 1175), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (1173, 1175), False, 'import torch\n'), ((1391, 1412), 'torch.sigmoid', 'torch.sigmoid', (['output'], {}), '(output)\n', (1404, 1412), False, 'import torch\n'), ((3353, 3405), 'numpy.where', 'np.where', (['(model_prediction > optimal_threshold)', '(1)', '(0)'], {}), '(model_prediction > optimal_threshold, 1, 0)\n', (3361, 3405), True, 'import numpy as np\n'), ((4144, 4173), 'numpy.array', 'np.array', (['senti_result.scores'], {}), '(senti_result.scores)\n', (4152, 4173), True, 'import numpy as np\n'), ((4286, 4303), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (4295, 4303), True, 'import numpy as np\n')]
|
from functools import wraps
from time import perf_counter
def timer(func):
"""Print the runtime of the decorated function"""
@wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = perf_counter()
value = func(*args, **kwargs)
end_time = perf_counter()
run_time = end_time - start_time
print('Finished {!r} in {:.4f} secs'.format(func.__name__, run_time))
return value
return wrapper_timer
|
[
"time.perf_counter",
"functools.wraps"
] |
[((137, 148), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (142, 148), False, 'from functools import wraps\n'), ((210, 224), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (222, 224), False, 'from time import perf_counter\n'), ((282, 296), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (294, 296), False, 'from time import perf_counter\n')]
|
from config import config
#########################################################
# flask imports
from flask import Flask, Response, render_template, request, redirect, send_from_directory
#########################################################
#########################################################
# flask socketio imports
from flask_socketio import SocketIO, emit
#########################################################
#########################################################
# local imports
from serverutils.utils import SafeLimitedUniqueQueueList
from serverutils.utils import prettylog
from serverutils.utils import geturl
from serverutils.utils import write_string_to_file
from serverutils.utils import read_string_from_file
from serverutils.utils import dir_listing_as_obj
from serverutils.utils import get_variant_board
import chess
from chess.pgn import read_game
from cbuild.book import get_zobrist_key_hex
#########################################################
#########################################################
# global imports
import time
import os
import sys
import traceback
from urllib.parse import quote
import random
import json
import functools
import io
import uuid
print("importing pyrebase")
import pyrebase
print("initializing firebase")
try:
fbcreds = json.loads(open("firebase/fbcreds.json").read())
firebase = pyrebase.initialize_app(fbcreds)
db = firebase.database()
dbstorage = firebase.storage()
print("initializing firebase done")
except:
print("initializing firebase failed")
print("getting stored config")
try:
storedconfig = db.child("lichguibotconfig").get().val()
write_string_to_file("localconfig.json", storedconfig)
print("getting stored config done, size", len(storedconfig))
except:
print("getting stored config failed")
#traceback.print_exc(file=sys.stderr)
print("importing pyrebase done")
#########################################################
#########################################################
# create app
app = Flask(__name__, static_url_path='/static')
app.config['SECRET_KEY'] = 'secret!'
app.config['UPLOAD_FOLDER'] = 'upload'
app.config['DOWNLOAD_FOLDER'] = 'download'
#########################################################
#########################################################
# mount socket
socketio = SocketIO(app)
#########################################################
#########################################################
# global context
SIMPLE_SERVER_URL = config.simpleserverurl
MAX_CONNS = 3
CONFIDENTIAL_DIRS = [
"firebase",
".git"
]
def sids_overflow_callback(sid):
socketio.emit("siores", {"data": "conn removed"}, room = sid)
prettylog([
"removed SID <{}>".format(sid)
])
connected_sids = SafeLimitedUniqueQueueList(max = MAX_CONNS, overflow_callback = sids_overflow_callback)
class AppState:
def __init__(self):
pass
app_state = AppState()
def my_broadcast(obj):
with app.app_context():
for sid in connected_sids.items:
try:
socketio.emit("siores", obj, room = sid, namespace = "/")
except:
print("emit failed for sid {}".format(sid))
def getchildatpath(path):
try:
parts = path.split("/")
child = db.child(parts[0])
if len(parts) > 1:
for part in parts:
child = child.child(part)
return child
except:
#traceback.print_exc(file=sys.stderr)
return None
def storedb(path, dataobj):
try:
child = getchildatpath(path)
if not ( child is None ):
child.set(json.dumps(dataobj))
return "store db ok at {}".format(path)
except:
#traceback.print_exc(file=sys.stderr)
pass
return "store db failed at {}".format(path)
def retrievedb(path):
try:
child = getchildatpath(path)
if not ( child is None ):
val = child.get().val()
obj = json.loads(val)
return ( obj , "retrieve db ok at {} size {}".format(path, len(json.dumps(obj))) )
except:
#traceback.print_exc(file=sys.stderr)
pass
return ( None , "retrieve db failed at {}".format(path) )
def addpositioninfo(board, obj, genmove = None, genboard = None):
moves = board.generate_legal_moves()
movelist = []
for move in moves:
movelist.append({
"uci": move.uci(),
"san": board.san(move)
})
obj["positioninfo"] = {
"movelist": movelist,
"zobristkeyhex": get_zobrist_key_hex(board)
}
if genmove == "reset":
obj["positioninfo"]["genmove"] = "reset"
elif not ( genmove is None ):
obj["positioninfo"]["genmove"] = {
"uci": genmove.uci(),
"san": genboard.san(genmove)
}
def createhistory(pgn):
historyobj = None
try:
pgnio = io.StringIO(pgn)
game = read_game(pgnio)
board = game.board()
positioninfos = []
pinfo = {
"fen": board.fen()
}
addpositioninfo(board, pinfo)
positioninfos.append(pinfo)
for move in game.main_line():
genboard = board.copy()
board.push(move)
pinfo = {
"fen": board.fen()
}
addpositioninfo(board, pinfo, move, genboard)
positioninfos.append(pinfo)
historyobj = {
"positioninfos": positioninfos,
"pgn": pgn,
"uci_variant": board.uci_variant,
"chess960": board.chess960
}
return ( historyobj , "game history created ok" )
except:
traceback.print_exc(file=sys.stderr)
return ( None , "! create game history failed" )
class socket_handler:
def __init__(self, ev):
self.ev = ev
def __call__(self, f):
def wrapped_f(*args):
jsonobj = args[0]
rjsonobj = {}
connected_sids.enqueue(request.sid)
prettylog([
"EV <{}> SID <{}>".format(self.ev,request.sid),
"ARGS {}".format(args).ljust(160)[:160],
"CONNS {}".format(connected_sids.items)
])
if "kind" in jsonobj:
try:
kind = jsonobj["kind"]
if "owner" in jsonobj:
rjsonobj["owner"] = jsonobj["owner"]
if kind == "cmd":
key = jsonobj["key"]
commandjsonstr = json.dumps({"command": jsonobj["data"], "key": key})
rjsonobj["status"] = geturl(SIMPLE_SERVER_URL + "/" + quote(commandjsonstr))
rjsonobj["key"] = key
elif kind == "storebinid":
binid = jsonobj["data"]
write_string_to_file("binid.txt", binid)
elif kind == "storeconfig":
write_string_to_file("localconfig.json", jsonobj["data"])
rjsonobj["kind"] = "configstored"
try:
print("setting config on firebase")
db.child("lichguibotconfig").set(jsonobj["data"])
print("setting config on firebase done")
rjsonobj["status"] = "config stored locally and remotely"
except:
print("setting config on firebase failed")
rjsonobj["status"] = "config stored only locally"
elif kind == "storedb":
try:
path = jsonobj["path"]
dataobj = jsonobj["dataobj"]
rjsonobj["status"] = storedb(path, dataobj)
rjsonobj["path"] = path
except:
traceback.print_exc(file=sys.stderr)
rjsonobj["status"] = "! store db failed at {}".format(path)
elif kind == "retrievedb":
try:
path = jsonobj["path"]
rjsonobj["dataobj"] , rjsonobj["status"] = retrievedb(path)
rjsonobj["path"] = path
except:
traceback.print_exc(file=sys.stderr)
rjsonobj["dataobj"] = None
rjsonobj["status"] = "! retrieve db failed at {}".format(path)
elif kind == "parsepgn":
rjsonobj["historyobj"] = None
try:
data = jsonobj["data"]
rjsonobj["historyobj"] , rjsonobj["status"] = createhistory(data)
except:
traceback.print_exc(file=sys.stderr)
rjsonobj["status"] = "! parse pgn failed"
elif kind == "getlocalconfig":
rjsonobj["kind"] = "setlocalconfig"
try:
print("getting config from firebase")
rjsonobj["data"] = db.child("lichguibotconfig").get().val()
write_string_to_file("localconfig.json", rjsonobj["data"])
print("getting config from firebase done, size", len(rjsonobj["data"]))
except:
print("getting config from firebase failed, falling back to local config")
rjsonobj["data"] = read_string_from_file("localconfig.json", "{}")
elif kind == "mainboardmove":
try:
variantkey = jsonobj["variantkey"]
fen = jsonobj["fen"]
moveuci = jsonobj["moveuci"]
move = chess.Move.from_uci(moveuci)
board = get_variant_board(variantkey)
board.set_fen(fen)
if board.is_legal(move):
genboard = board.copy()
board.push(move)
rjsonobj["kind"] = "setmainboardfen"
rjsonobj["fen"] = board.fen()
rjsonobj["status"] = "making main board move ok"
addpositioninfo(board, rjsonobj, move, genboard)
else:
rjsonobj["kind"] = "setmainboardfen"
rjsonobj["fen"] = fen
rjsonobj["status"] = "! making main board move failed, illegal move"
addpositioninfo(board, rjsonobj)
except:
rjsonobj["status"] = "! making main board move failed, fatal"
traceback.print_exc(file=sys.stderr)
elif kind == "mainboardsetvariant":
try:
variantkey = jsonobj["variantkey"]
board = get_variant_board(variantkey)
if variantkey == "chess960":
board.set_chess960_pos(random.randint(0, 959))
rjsonobj["kind"] = "setmainboardfen"
rjsonobj["fen"] = board.fen()
rjsonobj["status"] = "main board variant selected ok"
addpositioninfo(board, rjsonobj, "reset")
except:
rjsonobj["status"] = "! main board variant selection failed"
traceback.print_exc(file=sys.stderr)
except:
rjsonobj["status"] = "! command error"
emit('siores', {"request": jsonobj, "response": rjsonobj})
f(*args)
return wrapped_f
#########################################################
def randurl():
return random.randint(1e9,1e10)
#########################################################
# app routes
@app.route("/")
def index():
print("request root", request.full_path)
return render_template("index.html", randurl = randurl)
@app.route("/read", methods = ["POST"])
def read():
obj = request.get_json()
my_broadcast(obj)
return ""
@app.route("/upload", methods = ["POST"])
def upload():
if 'files' not in request.files:
return Response(json.dumps({
"success": False,
"status": "no file input"
}), content_type = "application/json")
file = request.files['files']
if file:
filename = file.filename
parts = filename.split(".")
savefilename = uuid.uuid1().hex + "." + parts[-1]
savepath = os.path.join(app.config['UPLOAD_FOLDER'], savefilename)
file.save(savepath)
dbstorage.child("upload").child(savefilename).put(savepath)
return Response(json.dumps({
"success": True,
"filename": filename,
"savefilename": savefilename
}), content_type = "application/json")
@app.route("/file/<path:path>")
def serve_static_file(path):
parts = path.split("/")
basedir = parts[0]
if basedir in CONFIDENTIAL_DIRS:
return "sorry, {} directory content is confidential".format(basedir)
return send_from_directory('.', path)
@app.route("/uploads/<path:path>")
def serve_uploaded_file(path):
filepath = os.path.join(app.config['DOWNLOAD_FOLDER'], path)
dbstorage.child("upload").child(path).download(filepath)
return send_from_directory('.', "download/{}".format(path))
@app.route("/dirlist/<path:path>")
def dirlist_of_path(path):
path = functools.reduce(os.path.join, path.split("/")[1:], ".")
return Response(json.dumps(dir_listing_as_obj(path)), content_type = "application/json")
#########################################################
#########################################################
# socketio event handlers
@socketio.on('sioreq')
@socket_handler('sioreq')
def handle_sioreq(json):
pass
#########################################################
#########################################################
# startup
def startup(port = 5000):
socketio.run(app, port = port)
#########################################################
#########################################################
# main
if __name__ == '__main__':
#startup()
pass
#########################################################
|
[
"json.dumps",
"os.path.join",
"flask.request.get_json",
"serverutils.utils.SafeLimitedUniqueQueueList",
"traceback.print_exc",
"random.randint",
"json.loads",
"flask_socketio.emit",
"flask.render_template",
"cbuild.book.get_zobrist_key_hex",
"flask.send_from_directory",
"chess.Move.from_uci",
"io.StringIO",
"chess.pgn.read_game",
"pyrebase.initialize_app",
"serverutils.utils.read_string_from_file",
"uuid.uuid1",
"serverutils.utils.write_string_to_file",
"flask_socketio.SocketIO",
"serverutils.utils.get_variant_board",
"flask.Flask",
"serverutils.utils.dir_listing_as_obj",
"urllib.parse.quote"
] |
[((2051, 2093), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '"""/static"""'}), "(__name__, static_url_path='/static')\n", (2056, 2093), False, 'from flask import Flask, Response, render_template, request, redirect, send_from_directory\n'), ((2356, 2369), 'flask_socketio.SocketIO', 'SocketIO', (['app'], {}), '(app)\n', (2364, 2369), False, 'from flask_socketio import SocketIO, emit\n'), ((2796, 2884), 'serverutils.utils.SafeLimitedUniqueQueueList', 'SafeLimitedUniqueQueueList', ([], {'max': 'MAX_CONNS', 'overflow_callback': 'sids_overflow_callback'}), '(max=MAX_CONNS, overflow_callback=\n sids_overflow_callback)\n', (2822, 2884), False, 'from serverutils.utils import SafeLimitedUniqueQueueList\n'), ((1375, 1407), 'pyrebase.initialize_app', 'pyrebase.initialize_app', (['fbcreds'], {}), '(fbcreds)\n', (1398, 1407), False, 'import pyrebase\n'), ((1670, 1724), 'serverutils.utils.write_string_to_file', 'write_string_to_file', (['"""localconfig.json"""', 'storedconfig'], {}), "('localconfig.json', storedconfig)\n", (1690, 1724), False, 'from serverutils.utils import write_string_to_file\n'), ((12786, 12829), 'random.randint', 'random.randint', (['(1000000000.0)', '(10000000000.0)'], {}), '(1000000000.0, 10000000000.0)\n', (12800, 12829), False, 'import random\n'), ((12968, 13014), 'flask.render_template', 'render_template', (['"""index.html"""'], {'randurl': 'randurl'}), "('index.html', randurl=randurl)\n", (12983, 13014), False, 'from flask import Flask, Response, render_template, request, redirect, send_from_directory\n'), ((13080, 13098), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (13096, 13098), False, 'from flask import Flask, Response, render_template, request, redirect, send_from_directory\n'), ((14212, 14242), 'flask.send_from_directory', 'send_from_directory', (['"""."""', 'path'], {}), "('.', path)\n", (14231, 14242), False, 'from flask import Flask, Response, render_template, request, redirect, send_from_directory\n'), ((14333, 14382), 'os.path.join', 'os.path.join', (["app.config['DOWNLOAD_FOLDER']", 'path'], {}), "(app.config['DOWNLOAD_FOLDER'], path)\n", (14345, 14382), False, 'import os\n'), ((4612, 4638), 'cbuild.book.get_zobrist_key_hex', 'get_zobrist_key_hex', (['board'], {}), '(board)\n', (4631, 4638), False, 'from cbuild.book import get_zobrist_key_hex\n'), ((4967, 4983), 'io.StringIO', 'io.StringIO', (['pgn'], {}), '(pgn)\n', (4978, 4983), False, 'import io\n'), ((4999, 5015), 'chess.pgn.read_game', 'read_game', (['pgnio'], {}), '(pgnio)\n', (5008, 5015), False, 'from chess.pgn import read_game\n'), ((13622, 13677), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'savefilename'], {}), "(app.config['UPLOAD_FOLDER'], savefilename)\n", (13634, 13677), False, 'import os\n'), ((4033, 4048), 'json.loads', 'json.loads', (['val'], {}), '(val)\n', (4043, 4048), False, 'import json\n'), ((5753, 5789), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (5772, 5789), False, 'import traceback\n'), ((12596, 12654), 'flask_socketio.emit', 'emit', (['"""siores"""', "{'request': jsonobj, 'response': rjsonobj}"], {}), "('siores', {'request': jsonobj, 'response': rjsonobj})\n", (12600, 12654), False, 'from flask_socketio import SocketIO, emit\n'), ((13265, 13322), 'json.dumps', 'json.dumps', (["{'success': False, 'status': 'no file input'}"], {}), "({'success': False, 'status': 'no file input'})\n", (13275, 13322), False, 'import json\n'), ((13810, 13895), 'json.dumps', 'json.dumps', (["{'success': True, 'filename': filename, 'savefilename': savefilename}"], {}), "({'success': True, 'filename': filename, 'savefilename':\n savefilename})\n", (13820, 13895), False, 'import json\n'), ((14674, 14698), 'serverutils.utils.dir_listing_as_obj', 'dir_listing_as_obj', (['path'], {}), '(path)\n', (14692, 14698), False, 'from serverutils.utils import dir_listing_as_obj\n'), ((3684, 3703), 'json.dumps', 'json.dumps', (['dataobj'], {}), '(dataobj)\n', (3694, 3703), False, 'import json\n'), ((13556, 13568), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (13566, 13568), False, 'import uuid\n'), ((4124, 4139), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (4134, 4139), False, 'import json\n'), ((6659, 6711), 'json.dumps', 'json.dumps', (["{'command': jsonobj['data'], 'key': key}"], {}), "({'command': jsonobj['data'], 'key': key})\n", (6669, 6711), False, 'import json\n'), ((7018, 7058), 'serverutils.utils.write_string_to_file', 'write_string_to_file', (['"""binid.txt"""', 'binid'], {}), "('binid.txt', binid)\n", (7038, 7058), False, 'from serverutils.utils import write_string_to_file\n'), ((6814, 6835), 'urllib.parse.quote', 'quote', (['commandjsonstr'], {}), '(commandjsonstr)\n', (6819, 6835), False, 'from urllib.parse import quote\n'), ((7131, 7188), 'serverutils.utils.write_string_to_file', 'write_string_to_file', (['"""localconfig.json"""', "jsonobj['data']"], {}), "('localconfig.json', jsonobj['data'])\n", (7151, 7188), False, 'from serverutils.utils import write_string_to_file\n'), ((8299, 8335), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (8318, 8335), False, 'import traceback\n'), ((8807, 8843), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (8826, 8843), False, 'import traceback\n'), ((9351, 9387), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (9370, 9387), False, 'import traceback\n'), ((9808, 9866), 'serverutils.utils.write_string_to_file', 'write_string_to_file', (['"""localconfig.json"""', "rjsonobj['data']"], {}), "('localconfig.json', rjsonobj['data'])\n", (9828, 9866), False, 'from serverutils.utils import write_string_to_file\n'), ((10149, 10196), 'serverutils.utils.read_string_from_file', 'read_string_from_file', (['"""localconfig.json"""', '"""{}"""'], {}), "('localconfig.json', '{}')\n", (10170, 10196), False, 'from serverutils.utils import read_string_from_file\n'), ((10530, 10558), 'chess.Move.from_uci', 'chess.Move.from_uci', (['moveuci'], {}), '(moveuci)\n', (10549, 10558), False, 'import chess\n'), ((10595, 10624), 'serverutils.utils.get_variant_board', 'get_variant_board', (['variantkey'], {}), '(variantkey)\n', (10612, 10624), False, 'from serverutils.utils import get_variant_board\n'), ((11628, 11664), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (11647, 11664), False, 'import traceback\n'), ((11859, 11888), 'serverutils.utils.get_variant_board', 'get_variant_board', (['variantkey'], {}), '(variantkey)\n', (11876, 11888), False, 'from serverutils.utils import get_variant_board\n'), ((12463, 12499), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (12482, 12499), False, 'import traceback\n'), ((12015, 12037), 'random.randint', 'random.randint', (['(0)', '(959)'], {}), '(0, 959)\n', (12029, 12037), False, 'import random\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from builtins import __test_source
from enum import Enum
class CustomEnum(Enum):
TRACKED_FIELD = "A"
UNTRACKED_field = "B"
untracked_field = "C"
def tracked_index():
d = {}
d[CustomEnum.TRACKED_FIELD] = __test_source()
return d[CustomEnum.TRACKED_FIELD]
def untracked_index_a():
d = {}
d[CustomEnum.untracked_field] = __test_source()
return d[CustomEnum.untracked_field]
def untracked_index_b():
d = {}
d[CustomEnum.UNTRACKED_field] = __test_source()
return d[CustomEnum.UNTRACKED_field]
CONSTANT_A = "A"
CONSTANT_B = {"a": "b"}
untracked_constant = "1"
def tracked_constant_A():
d = {}
d[CONSTANT_A] = __test_source()
return d[CONSTANT_A]
def tracked_constant_B():
d = {}
d[CONSTANT_B] = __test_source()
return d[CONSTANT_B]
def test_untracked_constant():
d = {}
d[untracked_constant] = __test_source()
return d[untracked_constant]
|
[
"builtins.__test_source"
] |
[((405, 420), 'builtins.__test_source', '__test_source', ([], {}), '()\n', (418, 420), False, 'from builtins import __test_source\n'), ((534, 549), 'builtins.__test_source', '__test_source', ([], {}), '()\n', (547, 549), False, 'from builtins import __test_source\n'), ((665, 680), 'builtins.__test_source', '__test_source', ([], {}), '()\n', (678, 680), False, 'from builtins import __test_source\n'), ((849, 864), 'builtins.__test_source', '__test_source', ([], {}), '()\n', (862, 864), False, 'from builtins import __test_source\n'), ((949, 964), 'builtins.__test_source', '__test_source', ([], {}), '()\n', (962, 964), False, 'from builtins import __test_source\n'), ((1062, 1077), 'builtins.__test_source', '__test_source', ([], {}), '()\n', (1075, 1077), False, 'from builtins import __test_source\n')]
|
import sublime, sublime_plugin
import traceback, os, json, io, sys, imp,shlex, tempfile
class evaluate_javascriptCommand(manage_cliCommand):
isNode = True
alsoNonProject = True
def prepare_command(self, **kwargs):
is_line = kwargs.get("is_line") if "is_line" in kwargs else False
view = self.window.active_view()
sel = view.sel()[0]
region_selected = None
str_selected = view.substr(sel).strip()
if is_line:
lines = view.lines(sel)
region_selected = lines[0]
str_selected = view.substr(region_selected)
else:
if not str_selected and region_selected :
region = get_start_end_code_highlights_eval()
region_selected = sublime.Region(region[0], region[1])
lines = view.lines(region_selected)
str_selected = ""
for line in lines:
str_selected += view.substr(view.full_line(line))
elif str_selected:
lines = view.lines(sel)
region_selected = sublime.Region if not region_selected else region_selected
region_selected = sublime.Region(lines[0].begin(), lines[-1:][0].end())
elif not str_selected :
return
if not region_selected :
return
fp = tempfile.NamedTemporaryFile(delete=False)
fp.write(str.encode("console.log('\\n'); console.time('Execution Time');\n"+str_selected+"\nconsole.log('\\n'); console.timeEnd('Execution Time');"))
fp.close()
if sublime.platform() == "windows":
self.command = ["-p", "<", json.dumps(fp.name), "&", "del", "/f", "/q", json.dumps(fp.name)]
else :
self.command = ["-p", "<", shlex.quote(fp.name), ";", "rm", "-rf", shlex.quote(fp.name)]
self._run()
def _run(self):
super(evaluate_javascriptCommand, self)._run()
def is_enabled(self, **args) :
view = self.window.active_view()
if not Util.selection_in_js_scope(view) :
return False
return True
def is_visible(self, **args) :
view = self.window.active_view()
if not Util.selection_in_js_scope(view) :
return False
return True
|
[
"tempfile.NamedTemporaryFile",
"sublime.platform",
"sublime.Region",
"json.dumps",
"shlex.quote"
] |
[((1215, 1256), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (1242, 1256), False, 'import traceback, os, json, io, sys, imp, shlex, tempfile\n'), ((1434, 1452), 'sublime.platform', 'sublime.platform', ([], {}), '()\n', (1450, 1452), False, 'import sublime, sublime_plugin\n'), ((697, 733), 'sublime.Region', 'sublime.Region', (['region[0]', 'region[1]'], {}), '(region[0], region[1])\n', (711, 733), False, 'import sublime, sublime_plugin\n'), ((1500, 1519), 'json.dumps', 'json.dumps', (['fp.name'], {}), '(fp.name)\n', (1510, 1519), False, 'import traceback, os, json, io, sys, imp, shlex, tempfile\n'), ((1545, 1564), 'json.dumps', 'json.dumps', (['fp.name'], {}), '(fp.name)\n', (1555, 1564), False, 'import traceback, os, json, io, sys, imp, shlex, tempfile\n'), ((1610, 1630), 'shlex.quote', 'shlex.quote', (['fp.name'], {}), '(fp.name)\n', (1621, 1630), False, 'import traceback, os, json, io, sys, imp, shlex, tempfile\n'), ((1650, 1670), 'shlex.quote', 'shlex.quote', (['fp.name'], {}), '(fp.name)\n', (1661, 1670), False, 'import traceback, os, json, io, sys, imp, shlex, tempfile\n')]
|
__all__ = ['memoize3']
from collections.abc import Callable
from typing import TypeVar, Union
from weakref import WeakKeyDictionary
A1 = TypeVar('A1')
A2 = TypeVar('A2')
A3 = TypeVar('A3')
R = TypeVar('R')
class _UndefinedType:
...
_undefined = _UndefinedType()
def memoize3(fn: Callable[[A1, A2, A3], R]) -> Callable[[A1, A2, A3], R]:
"""Memoizes the provided three-argument function."""
cache0: list[WeakKeyDictionary] = []
def memoized(a1: A1, a2: A2, a3: A3) -> R:
if len(cache0) == 0:
cache0[0] = WeakKeyDictionary()
cache1 = cache0[0].get(a1)
if cache1 is None:
cache1 = WeakKeyDictionary()
cache0[0][a1] = cache1
cache2 = cache1.get(a2)
if cache2 is None:
cache2 = WeakKeyDictionary()
cache1[a2] = cache2
fn_result: Union[_UndefinedType, R] = cache2.get(a3, _undefined)
if isinstance(fn_result, _UndefinedType):
fn_result = fn(a1, a2, a3)
cache2[a3] = fn_result
return fn_result
return memoized
|
[
"typing.TypeVar",
"weakref.WeakKeyDictionary"
] |
[((139, 152), 'typing.TypeVar', 'TypeVar', (['"""A1"""'], {}), "('A1')\n", (146, 152), False, 'from typing import TypeVar, Union\n'), ((158, 171), 'typing.TypeVar', 'TypeVar', (['"""A2"""'], {}), "('A2')\n", (165, 171), False, 'from typing import TypeVar, Union\n'), ((177, 190), 'typing.TypeVar', 'TypeVar', (['"""A3"""'], {}), "('A3')\n", (184, 190), False, 'from typing import TypeVar, Union\n'), ((195, 207), 'typing.TypeVar', 'TypeVar', (['"""R"""'], {}), "('R')\n", (202, 207), False, 'from typing import TypeVar, Union\n'), ((548, 567), 'weakref.WeakKeyDictionary', 'WeakKeyDictionary', ([], {}), '()\n', (565, 567), False, 'from weakref import WeakKeyDictionary\n'), ((652, 671), 'weakref.WeakKeyDictionary', 'WeakKeyDictionary', ([], {}), '()\n', (669, 671), False, 'from weakref import WeakKeyDictionary\n'), ((788, 807), 'weakref.WeakKeyDictionary', 'WeakKeyDictionary', ([], {}), '()\n', (805, 807), False, 'from weakref import WeakKeyDictionary\n')]
|
import asyncio
from typing import Union
import discord
from discord.ext import commands
from discord_slash import ComponentContext, SlashContext
from discord_slash.model import ButtonStyle
from discord_slash.utils.manage_components import (
create_actionrow,
create_button,
wait_for_component,
)
class AutoSlashEmbedPaginator(object):
def __init__(self, ctx, **kwargs):
self.embeds = None
self.ctx: SlashContext = ctx
self.bot: Union[
discord.Client,
discord.AutoShardedClient,
commands.Bot,
commands.AutoShardedBot,
] = ctx.bot
self.current_page = 0
self.auto_footer = kwargs.get("auto_footer", False)
self.remove_reactions = kwargs.get("remove_reactions", False)
self.control_emojis = ("⏪", "⬅", "🔐", "➡", "⏩")
self.timeout = int(kwargs.get("timeout", 60))
async def run(self, embeds, send_to=None):
if not send_to:
send_to = self.ctx
wait_for = self.ctx.author if send_to == self.ctx else send_to
def check(_button_ctx: ComponentContext):
return _button_ctx.author == wait_for
if not self.embeds:
self.embeds = embeds
if self.auto_footer:
self.embeds[0].set_footer(
text=f"({self.current_page + 1}/{len(self.embeds)})"
)
if len(self.control_emojis) > 5: # because only one row.
raise Exception("Because of Discord limitations, max emojis are 5.")
buttons = [
create_button(ButtonStyle.blue, emoji=emoji, custom_id=str(i))
for i, emoji in enumerate(self.control_emojis)
]
buttons_no_front = [
create_button(
ButtonStyle.blue, emoji=emoji, custom_id=str(i), disabled=i in range(2)
)
for i, emoji in enumerate(self.control_emojis)
]
buttons_no_back = [
create_button(
ButtonStyle.blue,
emoji=emoji,
custom_id=str(i),
disabled=i in range(3, 5),
)
for i, emoji in enumerate(self.control_emojis)
]
_nofront = create_actionrow(
*buttons_no_front
) # "default", first 2 buttons disabled
_noback = create_actionrow(*buttons_no_back) # last 2 buttons disabled
action_row = create_actionrow(*buttons) # no buttons disabled.
if len(self.embeds) > 1:
len_components = _nofront
msg = await send_to.send(embed=self.embeds[0], components=[len_components])
while True:
if self.timeout > 0:
try:
button_ctx: ComponentContext = await wait_for_component(
self.bot,
msg,
components=len_components,
check=check,
timeout=self.timeout,
)
except asyncio.TimeoutError:
await msg.edit(components=[])
break
else:
button_ctx: ComponentContext = await wait_for_component(
self.bot, msg, check=check, components=len_components
) # no timeout
if button_ctx.custom_id == "0": # First page of iterator.
self.current_page = 0
if self.remove_reactions:
try:
await button_ctx.edit_origin(components=[])
except:
pass
else:
if self.auto_footer:
self.embeds[0].set_footer(
text=f"({self.current_page + 1}/{len(self.embeds)})"
)
len_components = _nofront
await button_ctx.edit_origin(
embed=self.embeds[0], components=len_components
)
elif button_ctx.custom_id == "1": # page prior
self.current_page = self.current_page - 1
self.current_page = (
0 if self.current_page < 0 else self.current_page
)
if self.remove_reactions:
try:
await button_ctx.edit_origin(components=[])
except:
pass
else:
if self.auto_footer:
self.embeds[self.current_page].set_footer(
text=f"({self.current_page + 1}/{len(self.embeds)})"
)
len_components = (
action_row if self.current_page != 0 else _nofront
) # Every button is on if the page is not on the first.
await button_ctx.edit_origin(
embed=self.embeds[self.current_page],
components=len_components,
)
elif button_ctx.custom_id == "2": # Locks.
self.current_page = 0
await button_ctx.edit_origin(components=[])
break
elif button_ctx.custom_id == "3": # seeks page after.
self.current_page = self.current_page + 1
self.current_page = (
len(self.embeds) - 1
if self.current_page > len(self.embeds) - 1
else self.current_page
)
if self.remove_reactions:
try:
await button_ctx.edit_origin(components=[])
except:
pass
else:
if self.auto_footer:
self.embeds[self.current_page].set_footer(
text=f"({self.current_page + 1}/{len(self.embeds)})"
)
len_components = (
action_row
if self.current_page != len(self.embeds) - 1
else _noback
) # Every button is on if the page is not on the last.
await button_ctx.edit_origin(
embed=self.embeds[self.current_page],
components=len_components,
)
elif button_ctx.custom_id == "4": # seeks last page.
self.current_page = len(self.embeds) - 1
if self.remove_reactions:
try:
await button_ctx.edit_origin(components=[])
except:
pass
else:
if self.auto_footer:
self.embeds[len(self.embeds) - 1].set_footer(
text=f"({self.current_page + 1}/{len(self.embeds)})"
)
len_components = _noback
await button_ctx.edit_origin(
embed=self.embeds[len(self.embeds) - 1],
components=len_components,
)
else:
await send_to.send(embed=self.embeds[0]) # There's no pages to scroll to.
class CustomAutoSlashPaginator(AutoSlashEmbedPaginator):
"""
A subclass of AutoSlashEmbedPaginator that lets you choose what emojis you want.
To use this object,you **must** override the button actions, to customise which actions
that you want to pick per emoji. With that said, you will have to implement stopping the
Pagination loop if you set the timeout kwarg to 0.
"""
def __init__(self, ctx, control_emojis, default_run: bool = False, **kwargs):
super().__init__(ctx, **kwargs)
self.control_emojis = control_emojis
self.default_run = default_run
# In the original paginator, the IDs go from 0 to 4.
# In function implementation, the function nomenclature follows 1 through 5, per user implementation.
# (and easier to use)
async def button_1_action(self, button_ctx: ComponentContext):
"""The function that's called when button "0" is clicked"""
raise NotImplementedError
async def button_2_action(self, button_ctx: ComponentContext):
"""The function that's called when button "1" is clicked"""
raise NotImplementedError
async def button_3_action(self, button_ctx: ComponentContext):
"""The function that's called when button "2" is clicked"""
raise NotImplementedError
async def button_4_action(self, button_ctx: ComponentContext):
"""The function that's called when button "3" is clicked"""
raise NotImplementedError
async def button_5_action(self, button_ctx: ComponentContext):
"""The function that's called when button "4" is clicked"""
raise NotImplementedError
async def run(self, embeds, send_to=None):
if self.default_run:
return await super().run(embeds, send_to)
if not send_to:
send_to = self.ctx
wait_for = self.ctx.author if send_to == self.ctx else send_to
def check(_button_ctx: ComponentContext):
return _button_ctx.author == wait_for
if not self.embeds:
self.embeds = embeds
if self.auto_footer:
self.embeds[0].set_footer(
text=f"({self.current_page + 1}/{len(self.embeds)})"
)
if len(self.control_emojis) > 5: # because only one row.
raise Exception("Because of Discord limitations, max emojis are 5.")
buttons = [
create_button(ButtonStyle.blue, emoji=emoji, custom_id=str(i))
for i, emoji in enumerate(self.control_emojis)
]
action_row = create_actionrow(*buttons)
if len(self.embeds) > 1:
msg = await send_to.send(embed=self.embeds[0], components=[action_row])
while True:
if self.timeout > 0:
try:
button_ctx: ComponentContext = await wait_for_component(
self.bot,
msg,
components=action_row,
check=check,
timeout=self.timeout,
)
except asyncio.TimeoutError:
await msg.edit(components=[])
break
else:
button_ctx: ComponentContext = await wait_for_component(
self.bot, msg, check=check, components=action_row
) # no timeout
if button_ctx.custom_id == "0":
await self.button_1_action(button_ctx)
elif button_ctx.custom_id == "1":
await self.button_2_action(button_ctx)
elif button_ctx.custom_id == "2":
await self.button_3_action(button_ctx)
elif button_ctx.custom_id == "3":
await self.button_4_action(button_ctx)
elif button_ctx.custom_id == "4":
await self.button_5_action(button_ctx)
else:
await send_to.send(embed=self.embeds[0]) # There's no pages to scroll to.
|
[
"discord_slash.utils.manage_components.create_actionrow",
"discord_slash.utils.manage_components.wait_for_component"
] |
[((2223, 2258), 'discord_slash.utils.manage_components.create_actionrow', 'create_actionrow', (['*buttons_no_front'], {}), '(*buttons_no_front)\n', (2239, 2258), False, 'from discord_slash.utils.manage_components import create_actionrow, create_button, wait_for_component\n'), ((2338, 2372), 'discord_slash.utils.manage_components.create_actionrow', 'create_actionrow', (['*buttons_no_back'], {}), '(*buttons_no_back)\n', (2354, 2372), False, 'from discord_slash.utils.manage_components import create_actionrow, create_button, wait_for_component\n'), ((2422, 2448), 'discord_slash.utils.manage_components.create_actionrow', 'create_actionrow', (['*buttons'], {}), '(*buttons)\n', (2438, 2448), False, 'from discord_slash.utils.manage_components import create_actionrow, create_button, wait_for_component\n'), ((10490, 10516), 'discord_slash.utils.manage_components.create_actionrow', 'create_actionrow', (['*buttons'], {}), '(*buttons)\n', (10506, 10516), False, 'from discord_slash.utils.manage_components import create_actionrow, create_button, wait_for_component\n'), ((3256, 3329), 'discord_slash.utils.manage_components.wait_for_component', 'wait_for_component', (['self.bot', 'msg'], {'check': 'check', 'components': 'len_components'}), '(self.bot, msg, check=check, components=len_components)\n', (3274, 3329), False, 'from discord_slash.utils.manage_components import create_actionrow, create_button, wait_for_component\n'), ((11254, 11323), 'discord_slash.utils.manage_components.wait_for_component', 'wait_for_component', (['self.bot', 'msg'], {'check': 'check', 'components': 'action_row'}), '(self.bot, msg, check=check, components=action_row)\n', (11272, 11323), False, 'from discord_slash.utils.manage_components import create_actionrow, create_button, wait_for_component\n'), ((2781, 2880), 'discord_slash.utils.manage_components.wait_for_component', 'wait_for_component', (['self.bot', 'msg'], {'components': 'len_components', 'check': 'check', 'timeout': 'self.timeout'}), '(self.bot, msg, components=len_components, check=check,\n timeout=self.timeout)\n', (2799, 2880), False, 'from discord_slash.utils.manage_components import create_actionrow, create_button, wait_for_component\n'), ((10783, 10878), 'discord_slash.utils.manage_components.wait_for_component', 'wait_for_component', (['self.bot', 'msg'], {'components': 'action_row', 'check': 'check', 'timeout': 'self.timeout'}), '(self.bot, msg, components=action_row, check=check,\n timeout=self.timeout)\n', (10801, 10878), False, 'from discord_slash.utils.manage_components import create_actionrow, create_button, wait_for_component\n')]
|
import tempfile
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from django.views.generic import View
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import ParagraphStyle, StyleSheet1
from reportlab.lib.units import mm
from reportlab.platypus import (
BaseDocTemplate, Flowable, Frame, PageTemplate, Paragraph,
)
from pretalx.common.mixins.views import PermissionRequired
from pretalx.submission.models import SubmissionStates
def ellipsize(text, length=200):
if len(text) > length:
return text[:length] + "…"
else:
return text
class SubmissionCard(Flowable):
def __init__(self, submission, styles, width):
super().__init__()
self.submission = submission
self.styles = styles
self.width = width
self.height = min(2.5 * max(submission.get_duration(), 30) * mm, A4[1])
def coord(self, x, y, unit=1):
"""
http://stackoverflow.com/questions/4726011/wrap-text-in-a-table-reportlab
Helper class to help position flowables in Canvas objects
"""
x, y = x * unit, self.height - y * unit
return x, y
def draw(self):
self.canv.rect(0, 0, self.width, self.height)
self.canv.rotate(90)
self.canv.setFont("Helvetica", 16)
self.canv.drawString(10 * mm, - 12 * mm, str(self.submission.submission_type.name))
self.canv.rotate(-90)
p = Paragraph(self.submission.title, style=self.styles["Title"])
w, h = p.wrapOn(self.canv, self.width - 30 * mm, 50 * mm)
y = h + 10 * mm
p.drawOn(self.canv, *self.coord(20 * mm, y))
p = Paragraph(", ".join(
[s.get_display_name() for s in self.submission.speakers.all()]
), style=self.styles["Speaker"])
w, h = p.wrapOn(self.canv, self.width - 30 * mm, 50 * mm)
y += h + 2 * mm
p.drawOn(self.canv, *self.coord(20 * mm, y))
p = Paragraph(_('{} minutes, #{}, {}, {}').format(
self.submission.get_duration(),
self.submission.code,
self.submission.content_locale,
self.submission.state
), style=self.styles["Meta"])
w, h = p.wrapOn(self.canv, self.width - 30 * mm, 50 * mm)
y += h + 2 * mm
p.drawOn(self.canv, *self.coord(20 * mm, y))
if self.submission.abstract:
p = Paragraph(ellipsize(self.submission.abstract, 140), style=self.styles["Meta"])
w, h = p.wrapOn(self.canv, self.width - 30 * mm, 50 * mm)
y += h + 2 * mm
p.drawOn(self.canv, *self.coord(20 * mm, y))
if self.submission.notes:
p = Paragraph(ellipsize(self.submission.notes, 140), style=self.styles["Meta"])
w, h = p.wrapOn(self.canv, self.width - 30 * mm, 50 * mm)
y += h + 2 * mm
p.drawOn(self.canv, *self.coord(20 * mm, y))
class SubmissionCards(PermissionRequired, View):
permission_required = 'orga.view_submission_cards'
def get_permission_object(self):
return self.request.event
def get_queryset(self):
return self.request.event.submissions.select_related(
'submission_type'
).prefetch_related(
'speakers'
).filter(
state__in=[SubmissionStates.ACCEPTED, SubmissionStates.CONFIRMED, SubmissionStates.SUBMITTED]
)
def get(self, request, *args, **kwargs):
with tempfile.NamedTemporaryFile(suffix=".pdf") as f:
doc = BaseDocTemplate(
f.name, pagesize=A4,
leftMargin=0, rightMargin=0, topMargin=0, bottomMargin=0
)
doc.addPageTemplates([
PageTemplate(
id='All',
frames=[
Frame(
0, 0, doc.width / 2, doc.height,
leftPadding=0, rightPadding=0, topPadding=0, bottomPadding=0,
id='left'
),
Frame(
doc.width / 2, 0, doc.width / 2, doc.height,
leftPadding=0, rightPadding=0, topPadding=0, bottomPadding=0,
id='right'
)
],
pagesize=A4
)
])
doc.build(self.get_story(doc))
f.seek(0)
r = HttpResponse(content_type='application/pdf')
r.write(f.read())
return r
def get_style(self):
stylesheet = StyleSheet1()
stylesheet.add(ParagraphStyle(name='Normal', fontName='Helvetica', fontSize=12, leading=14))
stylesheet.add(ParagraphStyle(name='Title', fontName='Helvetica-Bold', fontSize=14, leading=16))
stylesheet.add(ParagraphStyle(name='Speaker', fontName='Helvetica-Oblique', fontSize=12, leading=14))
stylesheet.add(ParagraphStyle(name='Meta', fontName='Helvetica', fontSize=10, leading=12))
return stylesheet
def get_story(self, doc):
styles = self.get_style()
story = []
for s in self.get_queryset():
story.append(SubmissionCard(s, styles, doc.width / 2))
return story
|
[
"tempfile.NamedTemporaryFile",
"django.http.HttpResponse",
"reportlab.platypus.Paragraph",
"reportlab.platypus.BaseDocTemplate",
"reportlab.lib.styles.ParagraphStyle",
"reportlab.platypus.Frame",
"django.utils.translation.ugettext",
"reportlab.lib.styles.StyleSheet1"
] |
[((1462, 1522), 'reportlab.platypus.Paragraph', 'Paragraph', (['self.submission.title'], {'style': "self.styles['Title']"}), "(self.submission.title, style=self.styles['Title'])\n", (1471, 1522), False, 'from reportlab.platypus import BaseDocTemplate, Flowable, Frame, PageTemplate, Paragraph\n'), ((4619, 4632), 'reportlab.lib.styles.StyleSheet1', 'StyleSheet1', ([], {}), '()\n', (4630, 4632), False, 'from reportlab.lib.styles import ParagraphStyle, StyleSheet1\n'), ((3469, 3511), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".pdf"""'}), "(suffix='.pdf')\n", (3496, 3511), False, 'import tempfile\n'), ((3536, 3635), 'reportlab.platypus.BaseDocTemplate', 'BaseDocTemplate', (['f.name'], {'pagesize': 'A4', 'leftMargin': '(0)', 'rightMargin': '(0)', 'topMargin': '(0)', 'bottomMargin': '(0)'}), '(f.name, pagesize=A4, leftMargin=0, rightMargin=0, topMargin\n =0, bottomMargin=0)\n', (3551, 3635), False, 'from reportlab.platypus import BaseDocTemplate, Flowable, Frame, PageTemplate, Paragraph\n'), ((4476, 4520), 'django.http.HttpResponse', 'HttpResponse', ([], {'content_type': '"""application/pdf"""'}), "(content_type='application/pdf')\n", (4488, 4520), False, 'from django.http import HttpResponse\n'), ((4656, 4732), 'reportlab.lib.styles.ParagraphStyle', 'ParagraphStyle', ([], {'name': '"""Normal"""', 'fontName': '"""Helvetica"""', 'fontSize': '(12)', 'leading': '(14)'}), "(name='Normal', fontName='Helvetica', fontSize=12, leading=14)\n", (4670, 4732), False, 'from reportlab.lib.styles import ParagraphStyle, StyleSheet1\n'), ((4757, 4842), 'reportlab.lib.styles.ParagraphStyle', 'ParagraphStyle', ([], {'name': '"""Title"""', 'fontName': '"""Helvetica-Bold"""', 'fontSize': '(14)', 'leading': '(16)'}), "(name='Title', fontName='Helvetica-Bold', fontSize=14, leading=16\n )\n", (4771, 4842), False, 'from reportlab.lib.styles import ParagraphStyle, StyleSheet1\n'), ((4862, 4951), 'reportlab.lib.styles.ParagraphStyle', 'ParagraphStyle', ([], {'name': '"""Speaker"""', 'fontName': '"""Helvetica-Oblique"""', 'fontSize': '(12)', 'leading': '(14)'}), "(name='Speaker', fontName='Helvetica-Oblique', fontSize=12,\n leading=14)\n", (4876, 4951), False, 'from reportlab.lib.styles import ParagraphStyle, StyleSheet1\n'), ((4972, 5046), 'reportlab.lib.styles.ParagraphStyle', 'ParagraphStyle', ([], {'name': '"""Meta"""', 'fontName': '"""Helvetica"""', 'fontSize': '(10)', 'leading': '(12)'}), "(name='Meta', fontName='Helvetica', fontSize=10, leading=12)\n", (4986, 5046), False, 'from reportlab.lib.styles import ParagraphStyle, StyleSheet1\n'), ((1982, 2010), 'django.utils.translation.ugettext', '_', (['"""{} minutes, #{}, {}, {}"""'], {}), "('{} minutes, #{}, {}, {}')\n", (1983, 2010), True, 'from django.utils.translation import ugettext as _\n'), ((3825, 3940), 'reportlab.platypus.Frame', 'Frame', (['(0)', '(0)', '(doc.width / 2)', 'doc.height'], {'leftPadding': '(0)', 'rightPadding': '(0)', 'topPadding': '(0)', 'bottomPadding': '(0)', 'id': '"""left"""'}), "(0, 0, doc.width / 2, doc.height, leftPadding=0, rightPadding=0,\n topPadding=0, bottomPadding=0, id='left')\n", (3830, 3940), False, 'from reportlab.platypus import BaseDocTemplate, Flowable, Frame, PageTemplate, Paragraph\n'), ((4072, 4200), 'reportlab.platypus.Frame', 'Frame', (['(doc.width / 2)', '(0)', '(doc.width / 2)', 'doc.height'], {'leftPadding': '(0)', 'rightPadding': '(0)', 'topPadding': '(0)', 'bottomPadding': '(0)', 'id': '"""right"""'}), "(doc.width / 2, 0, doc.width / 2, doc.height, leftPadding=0,\n rightPadding=0, topPadding=0, bottomPadding=0, id='right')\n", (4077, 4200), False, 'from reportlab.platypus import BaseDocTemplate, Flowable, Frame, PageTemplate, Paragraph\n')]
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader.processors import Join, MapCompose, TakeFirst
from w3lib.html import remove_tags
def remove_whitspaces(value):
return value.strip()
class ZenaCrawlerItem(scrapy.Item):
# define the fields for your item here like:
category = scrapy.Field()
headline = scrapy.Field()
text = scrapy.Field(
input_processor=MapCompose(remove_tags, remove_whitspaces),
output_processor=TakeFirst(),
)
pass
|
[
"scrapy.loader.processors.MapCompose",
"scrapy.loader.processors.TakeFirst",
"scrapy.Field"
] |
[((400, 414), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (412, 414), False, 'import scrapy\n'), ((430, 444), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (442, 444), False, 'import scrapy\n'), ((494, 536), 'scrapy.loader.processors.MapCompose', 'MapCompose', (['remove_tags', 'remove_whitspaces'], {}), '(remove_tags, remove_whitspaces)\n', (504, 536), False, 'from scrapy.loader.processors import Join, MapCompose, TakeFirst\n'), ((563, 574), 'scrapy.loader.processors.TakeFirst', 'TakeFirst', ([], {}), '()\n', (572, 574), False, 'from scrapy.loader.processors import Join, MapCompose, TakeFirst\n')]
|
import typing
import sys
import numpy as np
import numba as nb
@nb.njit((nb.i8[:, :], ), cache=True)
def solve(ab: np.ndarray) -> typing.NoReturn:
n = len(ab)
a, b = ab[:, 0], ab[:, 1]
a.sort()
b.sort()
if n & 1:
s = b[n >> 1] - a[n >> 1] + 1
else:
hi = b[n >> 1] + b[(n >> 1) - 1]
lo = a[n >> 1] + a[(n >> 1) - 1]
s = hi - lo + 1
print(s)
def main() -> typing.NoReturn:
n = int(input())
ab = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(n, 2)
solve(ab)
main()
|
[
"numba.njit",
"sys.stdin.read"
] |
[((69, 104), 'numba.njit', 'nb.njit', (['(nb.i8[:, :],)'], {'cache': '(True)'}), '((nb.i8[:, :],), cache=True)\n', (76, 104), True, 'import numba as nb\n'), ((448, 464), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (462, 464), False, 'import sys\n')]
|
import torch
from torch import nn
from torch.nn import functional as F
class BatchNorm2d(nn.Module):
"""
Fixed version of BatchNorm2d, which has only the scale and bias
"""
def __init__(self, out):
super(BatchNorm2d, self).__init__()
self.register_buffer("scale", torch.ones(out))
self.register_buffer("bias", torch.zeros(out))
# @torch.jit.script_method
def forward(self, x):
scale = self.scale.view(1, -1, 1, 1)
bias = self.bias.view(1, -1, 1, 1)
return x * scale + bias
class BiasAdd(nn.Module):
"""
Fixed version of BatchNorm2d, which has only the scale and bias
"""
def __init__(self, out):
super(BiasAdd, self).__init__()
self.register_buffer("bias", torch.zeros(out))
# @torch.jit.script_method
def forward(self, x):
bias = self.bias.view(1, -1, 1, 1)
return x + bias
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get("padding", "SAME")
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(
0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size
)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
# import pdb; pdb.set_trace()
if self.padding == "VALID":
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=0,
dilation=self.dilation,
groups=self.groups,
)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=self.dilation,
groups=self.groups,
)
def box_area(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = torch.clamp(right_bottom - left_top, min=0.0)
return hw[..., 0] * hw[..., 1]
def box_iou(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = torch.max(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = torch.min(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = box_area(overlap_left_top, overlap_right_bottom)
area0 = box_area(boxes0[..., :2], boxes0[..., 2:])
area1 = box_area(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def nms(box_scores, iou_threshold):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
_, indexes = scores.sort(descending=True)
while len(indexes) > 0:
current = indexes[0]
picked.append(current.item())
if len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[1:]
rest_boxes = boxes[indexes, :]
iou = box_iou(rest_boxes, current_box.unsqueeze(0))
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
@torch.jit.script
def decode_boxes(rel_codes, boxes, weights):
# type: (torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
# perform some unpacking to make it JIT-fusion friendly
# rel_codes=rel_codes[0][None]
wx = weights[1]
wy = weights[0]
ww = weights[3]
wh = weights[2]
boxes_x1 = boxes[:, 1].unsqueeze(1).unsqueeze(0)
boxes_y1 = boxes[:, 0].unsqueeze(1).unsqueeze(0)
boxes_x2 = boxes[:, 3].unsqueeze(1).unsqueeze(0)
boxes_y2 = boxes[:, 2].unsqueeze(1).unsqueeze(0)
dx = rel_codes[:, :, 1].unsqueeze(2)
dy = rel_codes[:, :, 0].unsqueeze(2)
dw = rel_codes[:, :, 3].unsqueeze(2)
dh = rel_codes[:, :, 2].unsqueeze(2)
# implementation starts here
widths = boxes_x2 - boxes_x1
heights = boxes_y2 - boxes_y1
ctr_x = boxes_x1 + 0.5 * widths
ctr_y = boxes_y1 + 0.5 * heights
dx = dx / wx
dy = dy / wy
dw = dw / ww
dh = dh / wh
pred_ctr_x = dx * widths + ctr_x
# import pdb; pdb.set_trace()
pred_ctr_y = dy * heights + ctr_y
pred_w = torch.exp(dw) * widths
pred_h = torch.exp(dh) * heights
pred_boxes = torch.cat(
[
pred_ctr_x - 0.5 * pred_w,
pred_ctr_y - 0.5 * pred_h,
pred_ctr_x + 0.5 * pred_w,
pred_ctr_y + 0.5 * pred_h,
],
dim=2,
)
# import pdb; pdb.set_trace()
return pred_boxes
|
[
"torch.ones",
"torch.nn.functional.conv2d",
"torch.cat",
"torch.exp",
"torch.clamp",
"torch.max",
"torch.zeros",
"torch.min",
"torch.nn.functional.pad"
] |
[((2784, 2829), 'torch.clamp', 'torch.clamp', (['(right_bottom - left_top)'], {'min': '(0.0)'}), '(right_bottom - left_top, min=0.0)\n', (2795, 2829), False, 'import torch\n'), ((3197, 3240), 'torch.max', 'torch.max', (['boxes0[..., :2]', 'boxes1[..., :2]'], {}), '(boxes0[..., :2], boxes1[..., :2])\n', (3206, 3240), False, 'import torch\n'), ((3268, 3311), 'torch.min', 'torch.min', (['boxes0[..., 2:]', 'boxes1[..., 2:]'], {}), '(boxes0[..., 2:], boxes1[..., 2:])\n', (3277, 3311), False, 'import torch\n'), ((5463, 5593), 'torch.cat', 'torch.cat', (['[pred_ctr_x - 0.5 * pred_w, pred_ctr_y - 0.5 * pred_h, pred_ctr_x + 0.5 *\n pred_w, pred_ctr_y + 0.5 * pred_h]'], {'dim': '(2)'}), '([pred_ctr_x - 0.5 * pred_w, pred_ctr_y - 0.5 * pred_h, pred_ctr_x +\n 0.5 * pred_w, pred_ctr_y + 0.5 * pred_h], dim=2)\n', (5472, 5593), False, 'import torch\n'), ((2277, 2425), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', 'self.weight', 'self.bias', 'self.stride'], {'padding': '(padding_rows // 2, padding_cols // 2)', 'dilation': 'self.dilation', 'groups': 'self.groups'}), '(input, self.weight, self.bias, self.stride, padding=(padding_rows //\n 2, padding_cols // 2), dilation=self.dilation, groups=self.groups)\n', (2285, 2425), True, 'from torch.nn import functional as F\n'), ((5385, 5398), 'torch.exp', 'torch.exp', (['dw'], {}), '(dw)\n', (5394, 5398), False, 'import torch\n'), ((5421, 5434), 'torch.exp', 'torch.exp', (['dh'], {}), '(dh)\n', (5430, 5434), False, 'import torch\n'), ((299, 314), 'torch.ones', 'torch.ones', (['out'], {}), '(out)\n', (309, 314), False, 'import torch\n'), ((353, 369), 'torch.zeros', 'torch.zeros', (['out'], {}), '(out)\n', (364, 369), False, 'import torch\n'), ((768, 784), 'torch.zeros', 'torch.zeros', (['out'], {}), '(out)\n', (779, 784), False, 'import torch\n'), ((1794, 1906), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', 'self.weight', 'self.bias', 'self.stride'], {'padding': '(0)', 'dilation': 'self.dilation', 'groups': 'self.groups'}), '(input, self.weight, self.bias, self.stride, padding=0, dilation=\n self.dilation, groups=self.groups)\n', (1802, 1906), True, 'from torch.nn import functional as F\n'), ((2220, 2260), 'torch.nn.functional.pad', 'F.pad', (['input', '[0, cols_odd, 0, rows_odd]'], {}), '(input, [0, cols_odd, 0, rows_odd])\n', (2225, 2260), True, 'from torch.nn import functional as F\n')]
|
from textgenrnn import textgenrnn
textgen = textgenrnn('/home/amazonec2/hacker_news.hdf5')
def text_to_stego(ciphertext_to_steg):
stegotext = textgen.generate(interactive=True, temperature=0.2, top_n=2, ciphertext=ciphertext_to_steg)
print(stegotext)
return stegotext
text_to_stego(b'rnaodmdomeodshit')
|
[
"textgenrnn.textgenrnn"
] |
[((45, 91), 'textgenrnn.textgenrnn', 'textgenrnn', (['"""/home/amazonec2/hacker_news.hdf5"""'], {}), "('/home/amazonec2/hacker_news.hdf5')\n", (55, 91), False, 'from textgenrnn import textgenrnn\n')]
|
# encoding: utf-8
import os
import numpy as np
from histolab.slide import Slide
from histolab.tiler import GridTiler, RandomTiler, ScoreTiler
from histolab.scorer import NucleiScorer
from ..fixtures import SVS
from ..util import load_expectation
class DescribeRandomTiler:
def it_locates_tiles_on_the_slide(self, tmpdir):
slide = Slide(SVS.CMU_1_SMALL_REGION, os.path.join(tmpdir, "processed"))
slide.save_scaled_image(10)
random_tiles_extractor = RandomTiler(
tile_size=(512, 512), n_tiles=2, level=0, seed=42, check_tissue=False
)
expectation = load_expectation(
"tiles-location-images/cmu-1-small-region-tiles-location-random",
type_="png",
)
tiles_location_img = random_tiles_extractor.locate_tiles(slide, scale_factor=10)
np.testing.assert_array_almost_equal(
np.asarray(tiles_location_img), expectation
)
class DescribeGridTiler:
def it_locates_tiles_on_the_slide(self, tmpdir):
slide = Slide(SVS.CMU_1_SMALL_REGION, os.path.join(tmpdir, "processed"))
grid_tiles_extractor = GridTiler(
tile_size=(512, 512),
level=0,
check_tissue=False,
)
expectation = load_expectation(
"tiles-location-images/cmu-1-small-region-tiles-location-grid", type_="png"
)
tiles_location_img = grid_tiles_extractor.locate_tiles(slide, scale_factor=10)
np.testing.assert_array_almost_equal(
np.asarray(tiles_location_img), expectation
)
class DescribeScoreTiler:
def it_locates_tiles_on_the_slide(self, tmpdir):
slide = Slide(SVS.CMU_1_SMALL_REGION, os.path.join(tmpdir, "processed"))
scored_tiles_extractor = ScoreTiler(
scorer=NucleiScorer(),
tile_size=(512, 512),
n_tiles=100,
level=0,
check_tissue=False,
)
expectation = load_expectation(
"tiles-location-images/cmu-1-small-region-tiles-location-scored",
type_="png",
)
scored_location_img = scored_tiles_extractor.locate_tiles(
slide, scale_factor=10
)
np.testing.assert_array_almost_equal(
np.asarray(scored_location_img), expectation
)
|
[
"histolab.tiler.RandomTiler",
"numpy.asarray",
"histolab.tiler.GridTiler",
"os.path.join",
"histolab.scorer.NucleiScorer"
] |
[((482, 569), 'histolab.tiler.RandomTiler', 'RandomTiler', ([], {'tile_size': '(512, 512)', 'n_tiles': '(2)', 'level': '(0)', 'seed': '(42)', 'check_tissue': '(False)'}), '(tile_size=(512, 512), n_tiles=2, level=0, seed=42, check_tissue\n =False)\n', (493, 569), False, 'from histolab.tiler import GridTiler, RandomTiler, ScoreTiler\n'), ((1134, 1194), 'histolab.tiler.GridTiler', 'GridTiler', ([], {'tile_size': '(512, 512)', 'level': '(0)', 'check_tissue': '(False)'}), '(tile_size=(512, 512), level=0, check_tissue=False)\n', (1143, 1194), False, 'from histolab.tiler import GridTiler, RandomTiler, ScoreTiler\n'), ((378, 411), 'os.path.join', 'os.path.join', (['tmpdir', '"""processed"""'], {}), "(tmpdir, 'processed')\n", (390, 411), False, 'import os\n'), ((888, 918), 'numpy.asarray', 'np.asarray', (['tiles_location_img'], {}), '(tiles_location_img)\n', (898, 918), True, 'import numpy as np\n'), ((1068, 1101), 'os.path.join', 'os.path.join', (['tmpdir', '"""processed"""'], {}), "(tmpdir, 'processed')\n", (1080, 1101), False, 'import os\n'), ((1526, 1556), 'numpy.asarray', 'np.asarray', (['tiles_location_img'], {}), '(tiles_location_img)\n', (1536, 1556), True, 'import numpy as np\n'), ((1707, 1740), 'os.path.join', 'os.path.join', (['tmpdir', '"""processed"""'], {}), "(tmpdir, 'processed')\n", (1719, 1740), False, 'import os\n'), ((2268, 2299), 'numpy.asarray', 'np.asarray', (['scored_location_img'], {}), '(scored_location_img)\n', (2278, 2299), True, 'import numpy as np\n'), ((1806, 1820), 'histolab.scorer.NucleiScorer', 'NucleiScorer', ([], {}), '()\n', (1818, 1820), False, 'from histolab.scorer import NucleiScorer\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/devtools/resultstore/v2/upload_metadata.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/devtools/resultstore/v2/upload_metadata.proto',
package='google.devtools.resultstore.v2',
syntax='proto3',
serialized_options=b'\n\"com.google.devtools.resultstore.v2P\001ZIgoogle.golang.org/genproto/googleapis/devtools/resultstore/v2;resultstore',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n4google/devtools/resultstore/v2/upload_metadata.proto\x12\x1egoogle.devtools.resultstore.v2\"L\n\x0eUploadMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0cresume_token\x18\x02 \x01(\t\x12\x16\n\x0euploader_state\x18\x03 \x01(\x0c\x42q\n\"com.google.devtools.resultstore.v2P\x01ZIgoogle.golang.org/genproto/googleapis/devtools/resultstore/v2;resultstoreb\x06proto3'
)
_UPLOADMETADATA = _descriptor.Descriptor(
name='UploadMetadata',
full_name='google.devtools.resultstore.v2.UploadMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.devtools.resultstore.v2.UploadMetadata.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resume_token', full_name='google.devtools.resultstore.v2.UploadMetadata.resume_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uploader_state', full_name='google.devtools.resultstore.v2.UploadMetadata.uploader_state', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=164,
)
DESCRIPTOR.message_types_by_name['UploadMetadata'] = _UPLOADMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UploadMetadata = _reflection.GeneratedProtocolMessageType('UploadMetadata', (_message.Message,), {
'DESCRIPTOR' : _UPLOADMETADATA,
'__module__' : 'google.devtools.resultstore.v2.upload_metadata_pb2'
# @@protoc_insertion_point(class_scope:google.devtools.resultstore.v2.UploadMetadata)
})
_sym_db.RegisterMessage(UploadMetadata)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.reflection.GeneratedProtocolMessageType",
"google.protobuf.descriptor.FileDescriptor"
] |
[((451, 477), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (475, 477), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((495, 1243), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', ([], {'name': '"""google/devtools/resultstore/v2/upload_metadata.proto"""', 'package': '"""google.devtools.resultstore.v2"""', 'syntax': '"""proto3"""', 'serialized_options': 'b\'\\n"com.google.devtools.resultstore.v2P\\x01ZIgoogle.golang.org/genproto/googleapis/devtools/resultstore/v2;resultstore\'', 'create_key': '_descriptor._internal_create_key', 'serialized_pb': 'b\'\\n4google/devtools/resultstore/v2/upload_metadata.proto\\x12\\x1egoogle.devtools.resultstore.v2"L\\n\\x0eUploadMetadata\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x14\\n\\x0cresume_token\\x18\\x02 \\x01(\\t\\x12\\x16\\n\\x0euploader_state\\x18\\x03 \\x01(\\x0cBq\\n"com.google.devtools.resultstore.v2P\\x01ZIgoogle.golang.org/genproto/googleapis/devtools/resultstore/v2;resultstoreb\\x06proto3\''}), '(name=\n \'google/devtools/resultstore/v2/upload_metadata.proto\', package=\n \'google.devtools.resultstore.v2\', syntax=\'proto3\', serialized_options=\n b\'\\n"com.google.devtools.resultstore.v2P\\x01ZIgoogle.golang.org/genproto/googleapis/devtools/resultstore/v2;resultstore\'\n , create_key=_descriptor._internal_create_key, serialized_pb=\n b\'\\n4google/devtools/resultstore/v2/upload_metadata.proto\\x12\\x1egoogle.devtools.resultstore.v2"L\\n\\x0eUploadMetadata\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x14\\n\\x0cresume_token\\x18\\x02 \\x01(\\t\\x12\\x16\\n\\x0euploader_state\\x18\\x03 \\x01(\\x0cBq\\n"com.google.devtools.resultstore.v2P\\x01ZIgoogle.golang.org/genproto/googleapis/devtools/resultstore/v2;resultstoreb\\x06proto3\'\n )\n', (521, 1243), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3181, 3370), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""UploadMetadata"""', '(_message.Message,)', "{'DESCRIPTOR': _UPLOADMETADATA, '__module__':\n 'google.devtools.resultstore.v2.upload_metadata_pb2'}"], {}), "('UploadMetadata', (_message.\n Message,), {'DESCRIPTOR': _UPLOADMETADATA, '__module__':\n 'google.devtools.resultstore.v2.upload_metadata_pb2'})\n", (3221, 3370), True, 'from google.protobuf import reflection as _reflection\n'), ((2388, 2813), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""uploader_state"""', 'full_name': '"""google.devtools.resultstore.v2.UploadMetadata.uploader_state"""', 'index': '(2)', 'number': '(3)', 'type': '(12)', 'cpp_type': '(9)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': "b''", 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='uploader_state', full_name=\n 'google.devtools.resultstore.v2.UploadMetadata.uploader_state', index=2,\n number=3, type=12, cpp_type=9, label=1, has_default_value=False,\n default_value=b'', message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, serialized_options=None,\n file=DESCRIPTOR, create_key=_descriptor._internal_create_key)\n", (2415, 2813), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
from flask import Blueprint, render_template
from flask_jwt_extended import jwt_required
from IFSensor.views.view_controller import view_controller
views = Blueprint('views', __name__)
@views.route('/')
@jwt_required(optional=True, locations=["headers", "cookies"])
def home():
return render_template('home.html', controller=view_controller)
@views.route('/system/')
@jwt_required(optional=True, locations=["headers", "cookies"])
def control_panel():
return render_template('control-panel.html', controller=view_controller)
|
[
"flask_jwt_extended.jwt_required",
"flask.Blueprint",
"flask.render_template"
] |
[((157, 185), 'flask.Blueprint', 'Blueprint', (['"""views"""', '__name__'], {}), "('views', __name__)\n", (166, 185), False, 'from flask import Blueprint, render_template\n'), ((206, 267), 'flask_jwt_extended.jwt_required', 'jwt_required', ([], {'optional': '(True)', 'locations': "['headers', 'cookies']"}), "(optional=True, locations=['headers', 'cookies'])\n", (218, 267), False, 'from flask_jwt_extended import jwt_required\n'), ((375, 436), 'flask_jwt_extended.jwt_required', 'jwt_required', ([], {'optional': '(True)', 'locations': "['headers', 'cookies']"}), "(optional=True, locations=['headers', 'cookies'])\n", (387, 436), False, 'from flask_jwt_extended import jwt_required\n'), ((291, 347), 'flask.render_template', 'render_template', (['"""home.html"""'], {'controller': 'view_controller'}), "('home.html', controller=view_controller)\n", (306, 347), False, 'from flask import Blueprint, render_template\n'), ((469, 534), 'flask.render_template', 'render_template', (['"""control-panel.html"""'], {'controller': 'view_controller'}), "('control-panel.html', controller=view_controller)\n", (484, 534), False, 'from flask import Blueprint, render_template\n')]
|
import numpy
import cupy
from cupy import core
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
This function uses the first N elements of `vals`, where N is the number
of true values in `mask`.
Args:
arr (cupy.ndarray): Array to put data into.
mask (array-like): Boolean mask array. Must have the same size as `a`.
vals (array-like): Values to put into `a`. Only the first
N elements are used, where N is the number of True values in
`mask`. If `vals` is smaller than N, it will be repeated, and if
elements of `a` are to be masked, this sequence must be non-empty.
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.place`
"""
# TODO(niboshi): Avoid nonzero which may synchronize the device.
mask = cupy.asarray(mask)
if arr.size != mask.size:
raise ValueError('Mask and data must be the same size.')
vals = cupy.asarray(vals)
mask_indices = mask.ravel().nonzero()[0] # may synchronize
if mask_indices.size == 0:
return
if vals.size == 0:
raise ValueError('Cannot insert from an empty array.')
arr.put(mask_indices, vals, mode='wrap')
def put(a, ind, v, mode='wrap'):
"""Replaces specified elements of an array with given values.
Args:
a (cupy.ndarray): Target array.
ind (array-like): Target indices, interpreted as integers.
v (array-like): Values to place in `a` at target indices.
If `v` is shorter than `ind` it will be repeated as necessary.
mode (str): How out-of-bounds indices will behave. Its value must be
either `'raise'`, `'wrap'` or `'clip'`. Otherwise,
:class:`TypeError` is raised.
.. note::
Default `mode` is set to `'wrap'` to avoid unintended performance drop.
If you need NumPy's behavior, please pass `mode='raise'` manually.
.. seealso:: :func:`numpy.put`
"""
a.put(ind, v, mode=mode)
_putmask_kernel = core.ElementwiseKernel(
'Q mask, raw S values, uint64 len_vals', 'T out',
'''
if (mask) out = (T) values[i % len_vals];
''',
'putmask_kernel'
)
def putmask(a, mask, values):
"""
Changes elements of an array inplace, based on a conditional mask and
input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
Args:
a (cupy.ndarray): Target array.
mask (cupy.ndarray): Boolean mask array. It has to be
the same shape as `a`.
values (cupy.ndarray or scalar): Values to put into `a` where `mask`
is True. If `values` is smaller than `a`, then it will be
repeated.
Examples
--------
>>> x = cupy.arange(6).reshape(2, 3)
>>> cupy.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = cupy.arange(6)
>>> cupy.putmask(x, x>2, cupy.array([-33, -44]))
>>> x
array([ 0, 1, 2, -44, -33, -44])
.. seealso:: :func:`numpy.putmask`
"""
if not isinstance(a, cupy.ndarray):
raise TypeError('`a` should be of type cupy.ndarray')
if not isinstance(mask, cupy.ndarray):
raise TypeError('`mask` should be of type cupy.ndarray')
if not (cupy.isscalar(values) or isinstance(values, cupy.ndarray)):
raise TypeError('`values` should be of type cupy.ndarray')
if not a.shape == mask.shape:
raise ValueError('mask and data must be the same size')
mask = mask.astype(numpy.bool_)
if cupy.isscalar(values):
a[mask] = values
elif not numpy.can_cast(values.dtype, a.dtype):
raise TypeError('Cannot cast array data from'
' {} to {} according to the rule \'safe\''
.format(values.dtype, a.dtype))
elif a.shape == values.shape:
a[mask] = values[mask]
else:
values = values.ravel()
_putmask_kernel(mask, values, len(values), a)
def fill_diagonal(a, val, wrap=False):
"""Fills the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Args:
a (cupy.ndarray): The array, at least 2-D.
val (scalar): The value to be written on the diagonal.
Its type must be compatible with that of the array a.
wrap (bool): If specified, the diagonal is "wrapped" after N columns.
This affects only tall matrices.
Examples
--------
>>> a = cupy.zeros((3, 3), int)
>>> cupy.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
.. seealso:: :func:`numpy.fill_diagonal`
"""
# The followings are imported from the original numpy
if a.ndim < 2:
raise ValueError('array must be at least 2-d')
end = None
if a.ndim == 2:
step = a.shape[1] + 1
if not wrap:
end = a.shape[1] * a.shape[1]
else:
if not numpy.alltrue(numpy.diff(a.shape) == 0):
raise ValueError('All dimensions of input must be of equal length')
step = 1 + numpy.cumprod(a.shape[:-1]).sum()
a.flat[:end:step] = val
def diag_indices(n, ndim=2):
"""Return the indices to access the main diagonal of an array.
Returns a tuple of indices that can be used to access the main
diagonal of an array with ``ndim >= 2`` dimensions and shape
(n, n, ..., n).
Args:
n (int): The size, along each dimension of the arrays for which
the indices are to be returned.
ndim (int): The number of dimensions. default `2`.
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> di = cupy.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = cupy.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Create indices to manipulate a 3-D array:
>>> d3 = cupy.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = cupy.zeros((2, 2, 2), dtype=int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
<BLANKLINE>
[[0, 0],
[0, 1]]])
.. seealso:: :func:`numpy.diag_indices`
"""
idx = cupy.arange(n)
return (idx,) * ndim
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Args:
arr (cupy.ndarray): At least 2-D.
.. seealso:: :func:`numpy.diag_indices_from`
"""
if not isinstance(arr, cupy.ndarray):
raise TypeError("Argument must be cupy.ndarray")
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not cupy.all(cupy.diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
|
[
"numpy.cumprod",
"cupy.isscalar",
"cupy.asarray",
"cupy.core.ElementwiseKernel",
"numpy.can_cast",
"numpy.diff",
"cupy.arange",
"cupy.diff"
] |
[((2253, 2411), 'cupy.core.ElementwiseKernel', 'core.ElementwiseKernel', (['"""Q mask, raw S values, uint64 len_vals"""', '"""T out"""', '"""\n if (mask) out = (T) values[i % len_vals];\n """', '"""putmask_kernel"""'], {}), '(\'Q mask, raw S values, uint64 len_vals\', \'T out\',\n """\n if (mask) out = (T) values[i % len_vals];\n """, \'putmask_kernel\'\n )\n', (2275, 2411), False, 'from cupy import core\n'), ((1062, 1080), 'cupy.asarray', 'cupy.asarray', (['mask'], {}), '(mask)\n', (1074, 1080), False, 'import cupy\n'), ((1187, 1205), 'cupy.asarray', 'cupy.asarray', (['vals'], {}), '(vals)\n', (1199, 1205), False, 'import cupy\n'), ((3912, 3933), 'cupy.isscalar', 'cupy.isscalar', (['values'], {}), '(values)\n', (3925, 3933), False, 'import cupy\n'), ((7115, 7129), 'cupy.arange', 'cupy.arange', (['n'], {}), '(n)\n', (7126, 7129), False, 'import cupy\n'), ((3641, 3662), 'cupy.isscalar', 'cupy.isscalar', (['values'], {}), '(values)\n', (3654, 3662), False, 'import cupy\n'), ((3974, 4011), 'numpy.can_cast', 'numpy.can_cast', (['values.dtype', 'a.dtype'], {}), '(values.dtype, a.dtype)\n', (3988, 4011), False, 'import numpy\n'), ((7753, 7773), 'cupy.diff', 'cupy.diff', (['arr.shape'], {}), '(arr.shape)\n', (7762, 7773), False, 'import cupy\n'), ((5521, 5540), 'numpy.diff', 'numpy.diff', (['a.shape'], {}), '(a.shape)\n', (5531, 5540), False, 'import numpy\n'), ((5647, 5674), 'numpy.cumprod', 'numpy.cumprod', (['a.shape[:-1]'], {}), '(a.shape[:-1])\n', (5660, 5674), False, 'import numpy\n')]
|
from skimage.segmentation import slic
from skimage.util import img_as_float
from skimage import io
import datetime
from PIL import Image
import numpy as np
imgname="taili"
image = img_as_float(io.imread(imgname+".png"))
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+" Start.")
for numSegments in [8000]:#1000,2000,3000,4000,5000,6000,7000,9000,10000
for cp in [5]:#3,4,6,2
for sig in [6]:#2,4,6,
segments = slic(image, n_segments = numSegments, sigma = sig,compactness=cp)
img=Image.fromarray(np.array(segments, np.uint8))
img.save(imgname+"_%d seg_" % (numSegments)+str(cp)+"_comp"+"_%d_sigma.png" % (sig) , "png")
print(imgname+"_%d bodr" % (numSegments)+str(cp)+"_comp"+"_%d_sigma.png " % (sig)+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S ')+" Output over.")
|
[
"skimage.segmentation.slic",
"datetime.datetime.now",
"numpy.array",
"skimage.io.imread"
] |
[((202, 229), 'skimage.io.imread', 'io.imread', (["(imgname + '.png')"], {}), "(imgname + '.png')\n", (211, 229), False, 'from skimage import io\n'), ((461, 523), 'skimage.segmentation.slic', 'slic', (['image'], {'n_segments': 'numSegments', 'sigma': 'sig', 'compactness': 'cp'}), '(image, n_segments=numSegments, sigma=sig, compactness=cp)\n', (465, 523), False, 'from skimage.segmentation import slic\n'), ((236, 259), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (257, 259), False, 'import datetime\n'), ((562, 590), 'numpy.array', 'np.array', (['segments', 'np.uint8'], {}), '(segments, np.uint8)\n', (570, 590), True, 'import numpy as np\n'), ((795, 818), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (816, 818), False, 'import datetime\n')]
|
# coding: utf-8
import copy
from functools import reduce
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from flearn.common.strategy import AVG
from flearn.common.trainer import Trainer
class AVGTrainer(Trainer):
def __init__(self, model, optimizer, criterion, device, display=True):
super().__init__(model, optimizer, criterion, device, display)
# 源代码的梯度,是-pretrain的值?
self.model_o = copy.deepcopy(self.model.state_dict())
self.mse_criterion = nn.MSELoss()
self.kl_criterion = nn.KLDivLoss()
self.temp = 1
def forward(self, data, target):
data, target = data.to(self.device), target.to(self.device)
# _, _, output = self.model(data)
# (th, sh), (tx, sx), (ty, sy) = self.model(data)
(th_lst, sh_lst), (ty, sy) = self.model(data)
loss_ce = self.criterion(ty, target) + self.criterion(sy, target)
# loss_mse = 0.0
loss_mse = (
self.mse_criterion(th_lst[-1], sh_lst[-1]) / loss_ce
+ self.mse_criterion(th_lst[-2], sh_lst[-2]) / loss_ce
)
def ts_kl_f(a, b):
a_log_soft = F.log_softmax(a / self.temp, dim=1)
b_soft = F.softmax(b / self.temp, dim=1)
return self.kl_criterion(a_log_soft, b_soft)
loss_kl = ts_kl_f(ty, sy) / loss_ce + ts_kl_f(sy, ty) / loss_ce
loss = loss_ce + loss_kl + loss_mse
# 教师输出的精度
output = ty
iter_acc = self.metrics(output, target)
return output, loss, iter_acc
# def train(self, data_loader, epochs=1):
# self.model_o = copy.deepcopy(self.model.state_dict())
# return super().train(data_loader, epochs)
class FedKD(AVG):
"""
客户端两个模型的 Loss
仅上传学生模型(小模型)的参数,且用SVD后的参数进行传输
[1]
学生模型和教师模型分别在model中实现
"""
# def client(self, trainer, agg_weight=1):
# w_shared = {"agg_weight": agg_weight}
# w_local = trainer.weight
# w_shared["params"] = {
# k: v.cpu() for k, v in w_local.items() if "teacher" not in k
# }
# return w_shared
# https://github.com/wuch15/FedKD/blob/main/run.py
def client(self, trainer, agg_weight=1):
# 随着轮数的变化而变化, svd的k, energy
# energy = 0.95+((1+comm_round)/10)*(0.98-0.95)
self.energy = 1 # init_value
w_shared = {"agg_weight": agg_weight}
# w_local = trainer.weight
w_local = trainer.grads
w_shared["params"] = {}
for key, value in w_local.items():
conv_flag = False
params_mat = value.cpu().numpy()
w_shared["params"][key] = params_mat
if "bias" not in key and len(params_mat.shape) > 1:
# 卷积层
if len(params_mat.shape) == 4:
conv_flag = True
c, k, h, w = params_mat.shape
params_mat = params_mat.reshape(c * k, h * w)
U, Sigma, VT = np.linalg.svd(params_mat, full_matrices=False)
threshold = 0
sigma_square_sum = np.sum(np.square(Sigma))
if sigma_square_sum != 0:
for singular_value_num in range(len(Sigma)):
if (
np.sum(np.square(Sigma[:singular_value_num]))
> self.energy * sigma_square_sum
):
threshold = singular_value_num
break
U = U[:, :threshold]
Sigma = Sigma[:threshold]
VT = VT[:threshold, :]
# 原代码是在服务器上进行dot,但这样会增加通信成本(需要传输u、sigma、v),所以这里换成本地实现
# con_restruct1 = np.dot(np.dot(U, np.diag(Sigma)), VT)
w_shared["params"][key] = np.dot(np.dot(U, np.diag(Sigma)), VT)
if conv_flag:
w_shared["params"][key] = w_shared["params"][key].reshape(
c, k, h, w
)
return w_shared
def server_ensemble(self, agg_weight_lst, w_local_lst, key_lst=None):
if key_lst == None:
all_local_key_lst = [set(w_local.keys()) for w_local in w_local_lst]
key_lst = reduce(lambda x, y: x & y, all_local_key_lst)
# sum up weights
w_glob = {k: agg_weight_lst[0] * w_local_lst[0][k] for k in key_lst}
for agg_weight, w_local in zip(agg_weight_lst[1:], w_local_lst[1:]):
for k in key_lst:
w_glob[k] += agg_weight * w_local[k]
molecular = np.sum(agg_weight_lst)
for k in w_glob.keys():
w_glob[k] = np.divide(w_glob[k], molecular)
return w_glob
def client_revice(self, trainer, data_glob_d):
w_local = trainer.weight_o
w_glob = data_glob_d["w_glob"]
for key, value in w_glob.items():
real_params_value = value
conv_flag = False
# 类似的,在服务器端除了要dot,再mean之后还需要再做一次svd。这里换成本地实现
if "bias" not in key and len(value.shape) > 1:
# 卷积层
if len(value.shape) == 4:
conv_flag = True
c, k, h, w = value.shape
params_mat = value.reshape(c * k, h * w)
else:
params_mat = value
U, Sigma, VT = np.linalg.svd(params_mat, full_matrices=False)
sigma_square_sum = np.sum(np.square(Sigma))
if sigma_square_sum != 0:
threshold = 0
for singular_value_num in range(len(Sigma)):
if np.sum(
np.square(Sigma[:singular_value_num])
) >= self.energy * np.sum(np.square(Sigma)):
threshold = singular_value_num
break
U = U[:, :threshold]
Sigma = Sigma[:threshold]
VT = VT[:threshold, :]
# t_lst = [u, sigma, v]
real_params_value = np.dot(np.dot(U, np.diag(Sigma)), VT)
if conv_flag:
real_params_value = real_params_value.reshape(c, k, h, w)
w_local[key] = w_local[key] + torch.FloatTensor(real_params_value)
return w_local
if __name__ == "__main__":
from model import ModelFedCon
model_base = ModelFedCon("simple-cnn", out_dim=256, n_classes=10)
d = model_base.state_dict()
conv_m = d["features.conv1.weight"].numpy()
fc_m = d["l1.weight"].numpy()
u, s, v = np.linalg.svd(fc_m, full_matrices=False)
t1_r = np.dot(np.dot(u, np.diag(s)), v)
t1_dist = torch.dist(torch.tensor(fc_m), torch.tensor(t1_r))
print(t1_dist)
t2_r = np.dot(u, np.diag(s), v)
t2_dist = torch.dist(torch.tensor(fc_m), torch.tensor(t2_r))
print(t2_dist)
t3_r = np.matmul(u, np.diag(s), v)
t3_dist = torch.dist(torch.tensor(fc_m), torch.tensor(t3_r))
print(t3_dist)
# u, s, v = np.linalg.svd(conv_m, full_matrices=False)
U, Sigma, VT = np.linalg.svd(
np.reshape(
conv_m,
(
conv_m.shape[0] * conv_m.shape[1],
conv_m.shape[2] * conv_m.shape[3],
),
),
full_matrices=False,
)
con_restruct1 = np.dot(np.dot(U, np.diag(Sigma)), VT)
t4_r = np.reshape(
con_restruct1,
(conv_m.shape[0], conv_m.shape[1], conv_m.shape[2], conv_m.shape[3]),
)
# t4_r = np.dot(np.dot(u, s[:, None, :]), v)
t4_dist = torch.dist(torch.tensor(conv_m), torch.tensor(t4_r))
print(t4_dist)
|
[
"numpy.diag",
"numpy.divide",
"torch.nn.MSELoss",
"numpy.sum",
"torch.nn.KLDivLoss",
"numpy.square",
"torch.FloatTensor",
"torch.nn.functional.softmax",
"numpy.linalg.svd",
"numpy.reshape",
"torch.nn.functional.log_softmax",
"functools.reduce",
"model.ModelFedCon",
"torch.tensor"
] |
[((6475, 6527), 'model.ModelFedCon', 'ModelFedCon', (['"""simple-cnn"""'], {'out_dim': '(256)', 'n_classes': '(10)'}), "('simple-cnn', out_dim=256, n_classes=10)\n", (6486, 6527), False, 'from model import ModelFedCon\n'), ((6657, 6697), 'numpy.linalg.svd', 'np.linalg.svd', (['fc_m'], {'full_matrices': '(False)'}), '(fc_m, full_matrices=False)\n', (6670, 6697), True, 'import numpy as np\n'), ((7452, 7552), 'numpy.reshape', 'np.reshape', (['con_restruct1', '(conv_m.shape[0], conv_m.shape[1], conv_m.shape[2], conv_m.shape[3])'], {}), '(con_restruct1, (conv_m.shape[0], conv_m.shape[1], conv_m.shape[2\n ], conv_m.shape[3]))\n', (7462, 7552), True, 'import numpy as np\n'), ((523, 535), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (533, 535), True, 'import torch.nn as nn\n'), ((564, 578), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {}), '()\n', (576, 578), True, 'import torch.nn as nn\n'), ((4624, 4646), 'numpy.sum', 'np.sum', (['agg_weight_lst'], {}), '(agg_weight_lst)\n', (4630, 4646), True, 'import numpy as np\n'), ((6768, 6786), 'torch.tensor', 'torch.tensor', (['fc_m'], {}), '(fc_m)\n', (6780, 6786), False, 'import torch\n'), ((6788, 6806), 'torch.tensor', 'torch.tensor', (['t1_r'], {}), '(t1_r)\n', (6800, 6806), False, 'import torch\n'), ((6849, 6859), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (6856, 6859), True, 'import numpy as np\n'), ((6889, 6907), 'torch.tensor', 'torch.tensor', (['fc_m'], {}), '(fc_m)\n', (6901, 6907), False, 'import torch\n'), ((6909, 6927), 'torch.tensor', 'torch.tensor', (['t2_r'], {}), '(t2_r)\n', (6921, 6927), False, 'import torch\n'), ((6973, 6983), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (6980, 6983), True, 'import numpy as np\n'), ((7013, 7031), 'torch.tensor', 'torch.tensor', (['fc_m'], {}), '(fc_m)\n', (7025, 7031), False, 'import torch\n'), ((7033, 7051), 'torch.tensor', 'torch.tensor', (['t3_r'], {}), '(t3_r)\n', (7045, 7051), False, 'import torch\n'), ((7174, 7268), 'numpy.reshape', 'np.reshape', (['conv_m', '(conv_m.shape[0] * conv_m.shape[1], conv_m.shape[2] * conv_m.shape[3])'], {}), '(conv_m, (conv_m.shape[0] * conv_m.shape[1], conv_m.shape[2] *\n conv_m.shape[3]))\n', (7184, 7268), True, 'import numpy as np\n'), ((7645, 7665), 'torch.tensor', 'torch.tensor', (['conv_m'], {}), '(conv_m)\n', (7657, 7665), False, 'import torch\n'), ((7667, 7685), 'torch.tensor', 'torch.tensor', (['t4_r'], {}), '(t4_r)\n', (7679, 7685), False, 'import torch\n'), ((1178, 1213), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['(a / self.temp)'], {'dim': '(1)'}), '(a / self.temp, dim=1)\n', (1191, 1213), True, 'import torch.nn.functional as F\n'), ((1235, 1266), 'torch.nn.functional.softmax', 'F.softmax', (['(b / self.temp)'], {'dim': '(1)'}), '(b / self.temp, dim=1)\n', (1244, 1266), True, 'import torch.nn.functional as F\n'), ((4296, 4341), 'functools.reduce', 'reduce', (['(lambda x, y: x & y)', 'all_local_key_lst'], {}), '(lambda x, y: x & y, all_local_key_lst)\n', (4302, 4341), False, 'from functools import reduce\n'), ((4703, 4734), 'numpy.divide', 'np.divide', (['w_glob[k]', 'molecular'], {}), '(w_glob[k], molecular)\n', (4712, 4734), True, 'import numpy as np\n'), ((6727, 6737), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (6734, 6737), True, 'import numpy as np\n'), ((7420, 7434), 'numpy.diag', 'np.diag', (['Sigma'], {}), '(Sigma)\n', (7427, 7434), True, 'import numpy as np\n'), ((2990, 3036), 'numpy.linalg.svd', 'np.linalg.svd', (['params_mat'], {'full_matrices': '(False)'}), '(params_mat, full_matrices=False)\n', (3003, 3036), True, 'import numpy as np\n'), ((5411, 5457), 'numpy.linalg.svd', 'np.linalg.svd', (['params_mat'], {'full_matrices': '(False)'}), '(params_mat, full_matrices=False)\n', (5424, 5457), True, 'import numpy as np\n'), ((6334, 6370), 'torch.FloatTensor', 'torch.FloatTensor', (['real_params_value'], {}), '(real_params_value)\n', (6351, 6370), False, 'import torch\n'), ((3110, 3126), 'numpy.square', 'np.square', (['Sigma'], {}), '(Sigma)\n', (3119, 3126), True, 'import numpy as np\n'), ((5500, 5516), 'numpy.square', 'np.square', (['Sigma'], {}), '(Sigma)\n', (5509, 5516), True, 'import numpy as np\n'), ((3863, 3877), 'numpy.diag', 'np.diag', (['Sigma'], {}), '(Sigma)\n', (3870, 3877), True, 'import numpy as np\n'), ((6154, 6168), 'numpy.diag', 'np.diag', (['Sigma'], {}), '(Sigma)\n', (6161, 6168), True, 'import numpy as np\n'), ((3299, 3336), 'numpy.square', 'np.square', (['Sigma[:singular_value_num]'], {}), '(Sigma[:singular_value_num])\n', (3308, 3336), True, 'import numpy as np\n'), ((5723, 5760), 'numpy.square', 'np.square', (['Sigma[:singular_value_num]'], {}), '(Sigma[:singular_value_num])\n', (5732, 5760), True, 'import numpy as np\n'), ((5811, 5827), 'numpy.square', 'np.square', (['Sigma'], {}), '(Sigma)\n', (5820, 5827), True, 'import numpy as np\n')]
|
import logging as log
from common.config import MILVUS_TABLE, OUT_PATH, OUT_DATA
from indexer.index import milvus_client, search_vectors, get_vector_by_ids
from indexer.tools import connect_mysql, search_by_milvus_id
import numpy as np
import torch
import pickle
import dgl
import json
import random
def get_list_info(conn, cursor, table_name, host, list_ids):
if not table_name:
table_name = MILVUS_TABLE
list_info = {}
list_img = []
for ids in list_ids:
ids = ids[:-4]
info, img = get_ids_info(conn, cursor, table_name, host, int(ids))
title = info["Title"]
year = info["Year"]
list_info[ids] = [title, year, img]
return list_info
def get_ids_info(conn, cursor, table_name, host, ids):
if not table_name:
table_name = MILVUS_TABLE
info = search_by_milvus_id(conn, cursor, table_name, str(ids))
info = json.loads(info[1], strict=False)
img = "http://"+ str(host) + "/getImage?img=" + str(ids)
print("============", img)
return info, img
def do_search(index_client, conn, cursor, img_list, search_id, table_name):
if not table_name:
table_name = MILVUS_TABLE
_, vector_item = get_vector_by_ids(index_client, table_name, search_id)
status, results = search_vectors(index_client, table_name, vector_item)
print("-----milvus search status------", status)
results_ids = []
search_num = len(search_id)
num = 100/search_num
print("-----num:", num)
for results_id in results.id_array:
k = 0
for i in results_id:
if k >= num:
break
img = str(i) +'.jpg'
if img in img_list and i not in search_id:
results_ids.append(img)
k += 1
# print(results_ids)
return results_ids
|
[
"indexer.index.search_vectors",
"indexer.index.get_vector_by_ids",
"json.loads"
] |
[((896, 929), 'json.loads', 'json.loads', (['info[1]'], {'strict': '(False)'}), '(info[1], strict=False)\n', (906, 929), False, 'import json\n'), ((1200, 1254), 'indexer.index.get_vector_by_ids', 'get_vector_by_ids', (['index_client', 'table_name', 'search_id'], {}), '(index_client, table_name, search_id)\n', (1217, 1254), False, 'from indexer.index import milvus_client, search_vectors, get_vector_by_ids\n'), ((1277, 1330), 'indexer.index.search_vectors', 'search_vectors', (['index_client', 'table_name', 'vector_item'], {}), '(index_client, table_name, vector_item)\n', (1291, 1330), False, 'from indexer.index import milvus_client, search_vectors, get_vector_by_ids\n')]
|
from .celery import app as celery_app
import pymysql
__all__ = ('celery_app',)
pymysql.install_as_MySQLdb()
|
[
"pymysql.install_as_MySQLdb"
] |
[((81, 109), 'pymysql.install_as_MySQLdb', 'pymysql.install_as_MySQLdb', ([], {}), '()\n', (107, 109), False, 'import pymysql\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/.
from trac.util import backup_config_file
_svn_components = [
'svn_fs.SubversionConnector',
'svn_prop.SubversionMergePropertyDiffRenderer',
'svn_prop.SubversionMergePropertyRenderer',
'svn_prop.SubversionPropertyRenderer',
]
_old_path = 'trac.versioncontrol.'
_new_path = 'tracopt.versioncontrol.svn.'
def do_upgrade(env, version, cursor):
"""Automatically enable tracopt.versioncontrol.svn.* components,
unless they were explicitly disabled or the new svn components are
already enabled.
"""
enable = [c for c in _svn_components
if env.is_component_enabled(_old_path + c) and
not env.is_component_enabled(_new_path + c)]
if not enable:
return
backup_config_file(env, '.tracopt-svn.bak')
for c in enable:
env.config.set('components', _new_path + c, 'enabled')
env.config.save()
env.log.info("Enabled components %r to cope with the move from %s to %s.",
enable,
_old_path.replace('.', '/'), _new_path.replace('.', '/'))
|
[
"trac.util.backup_config_file"
] |
[((1222, 1265), 'trac.util.backup_config_file', 'backup_config_file', (['env', '""".tracopt-svn.bak"""'], {}), "(env, '.tracopt-svn.bak')\n", (1240, 1265), False, 'from trac.util import backup_config_file\n')]
|
#!/usr/bin/env python3
from aws_cdk import core
from spotzero.spotzero_stack import SpotZeroStack
app = core.App()
SpotZeroStack(app, "SpotZero")
app.synth()
|
[
"aws_cdk.core.App",
"spotzero.spotzero_stack.SpotZeroStack"
] |
[((108, 118), 'aws_cdk.core.App', 'core.App', ([], {}), '()\n', (116, 118), False, 'from aws_cdk import core\n'), ((119, 149), 'spotzero.spotzero_stack.SpotZeroStack', 'SpotZeroStack', (['app', '"""SpotZero"""'], {}), "(app, 'SpotZero')\n", (132, 149), False, 'from spotzero.spotzero_stack import SpotZeroStack\n')]
|
"""BlueprintEntity class"""
from typing import Any
from homeassistant.helpers.entity import Entity, DeviceInfo
from .data_coordinator import DataCoordinator
from .const import ATTRIBUTION, DOMAIN, NAME, VERSION
class PhonieboxEntity(Entity):
coordinator: DataCoordinator
def __init__(self, config_entry, coordinator: DataCoordinator):
self.config_entry = config_entry
self.coordinator = coordinator
self.mqtt_client = coordinator.mqtt_client
@property
def unique_id(self) -> str:
"""Return a unique ID to use for this entity."""
return f"{self.config_entry.entry_id}-{self.entity_id}"
@property
def device_info(self) -> DeviceInfo:
return DeviceInfo(
identifiers={(DOMAIN, self.config_entry.entry_id)},
name=NAME,
model=VERSION,
manufacturer=NAME,
sw_version=self.coordinator.version,
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes."""
return {
"attribution": ATTRIBUTION,
"id": self.config_entry.entry_id,
"integration": DOMAIN,
}
|
[
"homeassistant.helpers.entity.DeviceInfo"
] |
[((718, 866), 'homeassistant.helpers.entity.DeviceInfo', 'DeviceInfo', ([], {'identifiers': '{(DOMAIN, self.config_entry.entry_id)}', 'name': 'NAME', 'model': 'VERSION', 'manufacturer': 'NAME', 'sw_version': 'self.coordinator.version'}), '(identifiers={(DOMAIN, self.config_entry.entry_id)}, name=NAME,\n model=VERSION, manufacturer=NAME, sw_version=self.coordinator.version)\n', (728, 866), False, 'from homeassistant.helpers.entity import Entity, DeviceInfo\n')]
|
from collections import defaultdict
from django.db.models import Exists, OuterRef
from ...channel.models import Channel
from ...order.models import Order
from ...shipping.models import ShippingZone
from ..checkout.dataloaders import CheckoutByIdLoader, CheckoutLineByIdLoader
from ..core.dataloaders import DataLoader
from ..order.dataloaders import OrderByIdLoader, OrderLineByIdLoader
from ..shipping.dataloaders import ShippingZoneByIdLoader
class ChannelByIdLoader(DataLoader):
context_key = "channel_by_id"
def batch_load(self, keys):
channels = Channel.objects.in_bulk(keys)
return [channels.get(channel_id) for channel_id in keys]
class ChannelBySlugLoader(DataLoader):
context_key = "channel_by_slug"
def batch_load(self, keys):
channels = Channel.objects.in_bulk(keys, field_name="slug")
return [channels.get(slug) for slug in keys]
class ChannelByCheckoutLineIDLoader(DataLoader):
context_key = "channel_by_checkout_line"
def batch_load(self, keys):
def channel_by_lines(checkout_lines):
checkout_ids = [line.checkout_id for line in checkout_lines]
def channels_by_checkout(checkouts):
channel_ids = [checkout.channel_id for checkout in checkouts]
return ChannelByIdLoader(self.context).load_many(channel_ids)
return (
CheckoutByIdLoader(self.context)
.load_many(checkout_ids)
.then(channels_by_checkout)
)
return (
CheckoutLineByIdLoader(self.context).load_many(keys).then(channel_by_lines)
)
class ChannelByOrderLineIdLoader(DataLoader):
context_key = "channel_by_orderline"
def batch_load(self, keys):
def channel_by_lines(order_lines):
order_ids = [line.order_id for line in order_lines]
def channels_by_checkout(orders):
channel_ids = [order.channel_id for order in orders]
return ChannelByIdLoader(self.context).load_many(channel_ids)
return (
OrderByIdLoader(self.context)
.load_many(order_ids)
.then(channels_by_checkout)
)
return OrderLineByIdLoader(self.context).load_many(keys).then(channel_by_lines)
class ChannelWithHasOrdersByIdLoader(DataLoader):
context_key = "channel_with_has_orders_by_id"
def batch_load(self, keys):
orders = Order.objects.filter(channel=OuterRef("pk"))
channels = Channel.objects.annotate(has_orders=Exists(orders)).in_bulk(keys)
return [channels.get(channel_id) for channel_id in keys]
class ShippingZonesByChannelIdLoader(DataLoader):
context_key = "shippingzone_by_channel"
def batch_load(self, keys):
zone_and_channel_is_pairs = ShippingZone.objects.filter(
channels__id__in=keys
).values_list("pk", "channels__id")
channel_shipping_zone_map = defaultdict(list)
for zone_id, channel_id in zone_and_channel_is_pairs:
channel_shipping_zone_map[channel_id].append(zone_id)
def map_shipping_zones(shipping_zones):
zone_map = {zone.pk: zone for zone in shipping_zones}
return [
[zone_map[zone_id] for zone_id in channel_shipping_zone_map[channel_id]]
for channel_id in keys
]
return (
ShippingZoneByIdLoader(self.context)
.load_many({pk for pk, _ in zone_and_channel_is_pairs})
.then(map_shipping_zones)
)
|
[
"collections.defaultdict",
"django.db.models.OuterRef",
"django.db.models.Exists"
] |
[((2971, 2988), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2982, 2988), False, 'from collections import defaultdict\n'), ((2497, 2511), 'django.db.models.OuterRef', 'OuterRef', (['"""pk"""'], {}), "('pk')\n", (2505, 2511), False, 'from django.db.models import Exists, OuterRef\n'), ((2568, 2582), 'django.db.models.Exists', 'Exists', (['orders'], {}), '(orders)\n', (2574, 2582), False, 'from django.db.models import Exists, OuterRef\n')]
|
# -*- coding: utf-8 -*-
#%% NumPyの読み込み
import numpy as np
# SciPyのstatsモジュールの読み込み
import scipy.stats as st
# Pandasの読み込み
import pandas as pd
# PyMCの読み込み
import pymc3 as pm
# MatplotlibのPyplotモジュールの読み込み
import matplotlib.pyplot as plt
# tqdmからプログレスバーの関数を読み込む
from tqdm import trange
# 日本語フォントの設定
from matplotlib.font_manager import FontProperties
import sys
if sys.platform.startswith('win'):
FontPath = 'C:\\Windows\\Fonts\\meiryo.ttc'
elif sys.platform.startswith('darwin'):
FontPath = '/System/Library/Fonts/ヒラギノ角ゴシック W4.ttc'
elif sys.platform.startswith('linux'):
FontPath = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'
else:
print('このPythonコードが対応していないOSを使用しています.')
sys.exit()
jpfont = FontProperties(fname=FontPath)
#%% ギブズ・サンプラーによる正規分布の平均と分散に関するベイズ推論
# 正規分布の平均と分散のギブズ・サンプラー
def gibbs_gaussian(data, iterations, mu0, tau0, nu0, lam0):
"""
入力
data: データ
iterations: 反復回数
mu0: 平均の事前分布(正規分布)の平均
tau0: 平均の事前分布(正規分布)の標準偏差
nu0: 分散の事前分布(逆ガンマ分布)の形状パラメータ
lam0: 分散の事前分布(逆ガンマ分布)の尺度パラメータ
出力
runs: モンテカルロ標本
"""
n = data.size
sum_data = data.sum()
mean_data = sum_data / n
variance_data = data.var()
inv_tau02 = 1.0 / tau0**2
mu0_tau02 = mu0 * inv_tau02
a = 0.5 * (n + nu0)
c = n * variance_data + lam0
sigma2 = variance_data
runs = np.empty((iterations, 2))
for idx in trange(iterations):
variance_mu = 1.0 / (n / sigma2 + inv_tau02)
mean_mu = variance_mu * (sum_data / sigma2 + mu0_tau02)
mu = st.norm.rvs(loc=mean_mu, scale=np.sqrt(variance_mu))
b = 0.5 * (n * (mu - mean_data)**2 + c)
sigma2 = st.invgamma.rvs(a, scale=b)
runs[idx, 0] = mu
runs[idx, 1] = sigma2
return runs
# モンテカルロ標本からの事後統計量の計算
def mcmc_stats(runs, burnin, prob, batch):
"""
入力
runs: モンテカルロ標本
burnin: バーンインの回数
prob: 区間確率 (0 < prob < 1)
batch: 乱数系列の分割数
出力
事後統計量のデータフレーム
"""
traces = runs[burnin:, :]
n = traces.shape[0] // batch
k = traces.shape[1]
alpha = 100 * (1.0 - prob)
post_mean = np.mean(traces, axis=0)
post_median = np.median(traces, axis=0)
post_sd = np.std(traces, axis=0)
mc_err = [pm.mcse(traces[:, i].reshape((n, batch), order='F')).item(0) \
for i in range(k)]
ci_lower = np.percentile(traces, 0.5 * alpha, axis=0)
ci_upper = np.percentile(traces, 100 - 0.5 * alpha, axis=0)
hpdi = pm.hpd(traces, 1.0 - prob)
rhat = [pm.rhat(traces[:, i].reshape((n, batch), order='F')).item(0) \
for i in range(k)]
stats = np.vstack((post_mean, post_median, post_sd, mc_err,
ci_lower, ci_upper, hpdi.T, rhat)).T
stats_string = ['平均', '中央値', '標準偏差', '近似誤差',
'信用区間(下限)', '信用区間(上限)',
'HPDI(下限)', 'HPDI(上限)', '$\\hat R$']
param_string = ['平均 $\\mu$', '分散 $\\sigma^2$']
return pd.DataFrame(stats, index=param_string, columns=stats_string)
#%% 正規分布からのデータ生成
mu = 1.0
sigma = 2.0
n = 50
np.random.seed(99)
data = st.norm.rvs(loc=mu, scale=sigma, size=n)
#%% ギブズ・サンプラーの実行
mu0 = 0.0
tau0 = 1.0
nu0 = 5.0
lam0 = 7.0
prob = 0.95
burnin = 2000
samplesize = 20000
iterations = burnin + samplesize
np.random.seed(123)
runs = gibbs_gaussian(data, iterations, mu0, tau0, nu0, lam0)
#%% 事後統計量の計算
batch = 4
results = mcmc_stats(runs, burnin, prob, batch)
print(results.to_string(float_format='{:,.4f}'.format))
#%% 事後分布のグラフの作成
fig, ax = plt.subplots(2, 2, num=1, figsize=(8, 3), facecolor='w')
labels = ['$\\mu$', '$\\sigma^2$']
for index in range(2):
mc_trace = runs[burnin:, index]
if index == 0:
x_min = mc_trace.min() - 0.2 * np.abs(mc_trace.min())
x_max = mc_trace.max() + 0.2 * np.abs(mc_trace.max())
x = np.linspace(x_min, x_max, 250)
prior = st.norm.pdf(x, loc=mu0, scale=tau0)
else:
x_min = 0.0
x_max = mc_trace.max() + 0.2 * np.abs(mc_trace.max())
x = np.linspace(x_min, x_max, 250)
prior = st.invgamma.pdf(x, 0.5*nu0, scale=0.5*lam0)
ax[index, 0].set_xlabel('乱数系列', fontproperties=jpfont)
ax[index, 1].set_xlabel('周辺事後分布', fontproperties=jpfont)
posterior = st.gaussian_kde(mc_trace).evaluate(x)
ax[index, 0].plot(mc_trace, 'k-', linewidth=0.1)
ax[index, 0].set_xlim(1, samplesize)
ax[index, 0].set_ylabel(labels[index], fontproperties=jpfont)
ax[index, 1].plot(x, posterior, 'k-', label='事後分布')
ax[index, 1].plot(x, prior, 'k:', label='事前分布')
ax[index, 1].set_xlim(x_min, x_max)
ax[index, 1].set_ylim(0, 1.1*posterior.max())
ax[index, 1].set_ylabel('確率密度', fontproperties=jpfont)
ax[index, 1].legend(loc='best', frameon=False, prop=jpfont)
plt.tight_layout()
plt.savefig('pybayes_fig_gibbs_gaussian.png', dpi=300)
plt.show()
|
[
"sys.platform.startswith",
"numpy.random.seed",
"scipy.stats.norm.rvs",
"numpy.empty",
"scipy.stats.invgamma.rvs",
"numpy.mean",
"scipy.stats.invgamma.pdf",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.font_manager.FontProperties",
"numpy.std",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"tqdm.trange",
"numpy.median",
"scipy.stats.gaussian_kde",
"numpy.percentile",
"pymc3.hpd",
"numpy.vstack",
"sys.exit",
"scipy.stats.norm.pdf",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((372, 402), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (395, 402), False, 'import sys\n'), ((734, 764), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'fname': 'FontPath'}), '(fname=FontPath)\n', (748, 764), False, 'from matplotlib.font_manager import FontProperties\n'), ((3132, 3150), 'numpy.random.seed', 'np.random.seed', (['(99)'], {}), '(99)\n', (3146, 3150), True, 'import numpy as np\n'), ((3158, 3198), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'loc': 'mu', 'scale': 'sigma', 'size': 'n'}), '(loc=mu, scale=sigma, size=n)\n', (3169, 3198), True, 'import scipy.stats as st\n'), ((3336, 3355), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (3350, 3355), True, 'import numpy as np\n'), ((3571, 3627), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'num': '(1)', 'figsize': '(8, 3)', 'facecolor': '"""w"""'}), "(2, 2, num=1, figsize=(8, 3), facecolor='w')\n", (3583, 3627), True, 'import matplotlib.pyplot as plt\n'), ((4818, 4836), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4834, 4836), True, 'import matplotlib.pyplot as plt\n'), ((4837, 4891), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pybayes_fig_gibbs_gaussian.png"""'], {'dpi': '(300)'}), "('pybayes_fig_gibbs_gaussian.png', dpi=300)\n", (4848, 4891), True, 'import matplotlib.pyplot as plt\n'), ((4892, 4902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4900, 4902), True, 'import matplotlib.pyplot as plt\n'), ((457, 490), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (480, 490), False, 'import sys\n'), ((1427, 1452), 'numpy.empty', 'np.empty', (['(iterations, 2)'], {}), '((iterations, 2))\n', (1435, 1452), True, 'import numpy as np\n'), ((1468, 1486), 'tqdm.trange', 'trange', (['iterations'], {}), '(iterations)\n', (1474, 1486), False, 'from tqdm import trange\n'), ((2208, 2231), 'numpy.mean', 'np.mean', (['traces'], {'axis': '(0)'}), '(traces, axis=0)\n', (2215, 2231), True, 'import numpy as np\n'), ((2250, 2275), 'numpy.median', 'np.median', (['traces'], {'axis': '(0)'}), '(traces, axis=0)\n', (2259, 2275), True, 'import numpy as np\n'), ((2290, 2312), 'numpy.std', 'np.std', (['traces'], {'axis': '(0)'}), '(traces, axis=0)\n', (2296, 2312), True, 'import numpy as np\n'), ((2438, 2480), 'numpy.percentile', 'np.percentile', (['traces', '(0.5 * alpha)'], {'axis': '(0)'}), '(traces, 0.5 * alpha, axis=0)\n', (2451, 2480), True, 'import numpy as np\n'), ((2496, 2544), 'numpy.percentile', 'np.percentile', (['traces', '(100 - 0.5 * alpha)'], {'axis': '(0)'}), '(traces, 100 - 0.5 * alpha, axis=0)\n', (2509, 2544), True, 'import numpy as np\n'), ((2556, 2582), 'pymc3.hpd', 'pm.hpd', (['traces', '(1.0 - prob)'], {}), '(traces, 1.0 - prob)\n', (2562, 2582), True, 'import pymc3 as pm\n'), ((3025, 3086), 'pandas.DataFrame', 'pd.DataFrame', (['stats'], {'index': 'param_string', 'columns': 'stats_string'}), '(stats, index=param_string, columns=stats_string)\n', (3037, 3086), True, 'import pandas as pd\n'), ((553, 585), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (576, 585), False, 'import sys\n'), ((1736, 1763), 'scipy.stats.invgamma.rvs', 'st.invgamma.rvs', (['a'], {'scale': 'b'}), '(a, scale=b)\n', (1751, 1763), True, 'import scipy.stats as st\n'), ((2701, 2791), 'numpy.vstack', 'np.vstack', (['(post_mean, post_median, post_sd, mc_err, ci_lower, ci_upper, hpdi.T, rhat)'], {}), '((post_mean, post_median, post_sd, mc_err, ci_lower, ci_upper,\n hpdi.T, rhat))\n', (2710, 2791), True, 'import numpy as np\n'), ((3877, 3907), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', '(250)'], {}), '(x_min, x_max, 250)\n', (3888, 3907), True, 'import numpy as np\n'), ((3924, 3959), 'scipy.stats.norm.pdf', 'st.norm.pdf', (['x'], {'loc': 'mu0', 'scale': 'tau0'}), '(x, loc=mu0, scale=tau0)\n', (3935, 3959), True, 'import scipy.stats as st\n'), ((4064, 4094), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', '(250)'], {}), '(x_min, x_max, 250)\n', (4075, 4094), True, 'import numpy as np\n'), ((4111, 4158), 'scipy.stats.invgamma.pdf', 'st.invgamma.pdf', (['x', '(0.5 * nu0)'], {'scale': '(0.5 * lam0)'}), '(x, 0.5 * nu0, scale=0.5 * lam0)\n', (4126, 4158), True, 'import scipy.stats as st\n'), ((714, 724), 'sys.exit', 'sys.exit', ([], {}), '()\n', (722, 724), False, 'import sys\n'), ((4299, 4324), 'scipy.stats.gaussian_kde', 'st.gaussian_kde', (['mc_trace'], {}), '(mc_trace)\n', (4314, 4324), True, 'import scipy.stats as st\n'), ((1649, 1669), 'numpy.sqrt', 'np.sqrt', (['variance_mu'], {}), '(variance_mu)\n', (1656, 1669), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.optim
from torch.autograd import Variable
from FrEIA.framework import *
from FrEIA.modules import *
# From FrEIA
from FrEIA.framework import InputNode, OutputNode, Node, ReversibleGraphNet, ConditionNode
from FrEIA.modules import GLOWCouplingBlock, PermuteRandom
import config as c
def subnet_fc(c_in, c_out):
return nn.Sequential(nn.Linear(c_in, c.hidden_layer_sizes), nn.ReLU(),
nn.Linear(c.hidden_layer_sizes, c_out))
# Set up the conditional node (y)
cond_node = ConditionNode(c.ndim_y)
# Start from input layer
nodes = [InputNode(c.ndim_x, name='input')]
for k in range(c.N_blocks):
nodes.append(Node(nodes[-1],
GLOWCouplingBlock,
{'subnet_constructor':subnet_fc, 'clamp':2.0},
conditions=cond_node,
name=F'coupling_{k}'))
nodes.append(Node(nodes[-1],
PermuteRandom,
{'seed':k},
name=F'permute_{k}'))
nodes.append(OutputNode(nodes[-1], name='output'))
nodes.append(cond_node)
model = ReversibleGraphNet(nodes, verbose=False)
model.to(c.device)
params_trainable = list(filter(lambda p: p.requires_grad, model.parameters()))
for p in params_trainable:
p.data = c.init_scale * torch.randn(p.data.shape).to(c.device)
gamma = (c.final_decay)**(1./c.n_epochs)
optim = torch.optim.Adam(params_trainable, lr=c.lr_init, betas=c.adam_betas, eps=1e-6, weight_decay=c.l2_weight_reg)
# optim = torch.optim.SGD(params_trainable, lr=0.01, momentum=0.9)
weight_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=1, gamma=gamma)
#
def scheduler_step():
weight_scheduler.step()
pass
#
|
[
"torch.optim.lr_scheduler.StepLR",
"torch.nn.ReLU",
"FrEIA.framework.InputNode",
"FrEIA.framework.ConditionNode",
"FrEIA.framework.OutputNode",
"torch.randn",
"torch.optim.Adam",
"torch.nn.Linear",
"FrEIA.framework.Node",
"FrEIA.framework.ReversibleGraphNet"
] |
[((549, 572), 'FrEIA.framework.ConditionNode', 'ConditionNode', (['c.ndim_y'], {}), '(c.ndim_y)\n', (562, 572), False, 'from FrEIA.framework import InputNode, OutputNode, Node, ReversibleGraphNet, ConditionNode\n'), ((1135, 1175), 'FrEIA.framework.ReversibleGraphNet', 'ReversibleGraphNet', (['nodes'], {'verbose': '(False)'}), '(nodes, verbose=False)\n', (1153, 1175), False, 'from FrEIA.framework import InputNode, OutputNode, Node, ReversibleGraphNet, ConditionNode\n'), ((1419, 1533), 'torch.optim.Adam', 'torch.optim.Adam', (['params_trainable'], {'lr': 'c.lr_init', 'betas': 'c.adam_betas', 'eps': '(1e-06)', 'weight_decay': 'c.l2_weight_reg'}), '(params_trainable, lr=c.lr_init, betas=c.adam_betas, eps=\n 1e-06, weight_decay=c.l2_weight_reg)\n', (1435, 1533), False, 'import torch\n'), ((1615, 1679), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optim'], {'step_size': '(1)', 'gamma': 'gamma'}), '(optim, step_size=1, gamma=gamma)\n', (1646, 1679), False, 'import torch\n'), ((607, 640), 'FrEIA.framework.InputNode', 'InputNode', (['c.ndim_x'], {'name': '"""input"""'}), "(c.ndim_x, name='input')\n", (616, 640), False, 'from FrEIA.framework import InputNode, OutputNode, Node, ReversibleGraphNet, ConditionNode\n'), ((1065, 1101), 'FrEIA.framework.OutputNode', 'OutputNode', (['nodes[-1]'], {'name': '"""output"""'}), "(nodes[-1], name='output')\n", (1075, 1101), False, 'from FrEIA.framework import InputNode, OutputNode, Node, ReversibleGraphNet, ConditionNode\n'), ((386, 423), 'torch.nn.Linear', 'nn.Linear', (['c_in', 'c.hidden_layer_sizes'], {}), '(c_in, c.hidden_layer_sizes)\n', (395, 423), True, 'import torch.nn as nn\n'), ((425, 434), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (432, 434), True, 'import torch.nn as nn\n'), ((461, 499), 'torch.nn.Linear', 'nn.Linear', (['c.hidden_layer_sizes', 'c_out'], {}), '(c.hidden_layer_sizes, c_out)\n', (470, 499), True, 'import torch.nn as nn\n'), ((688, 819), 'FrEIA.framework.Node', 'Node', (['nodes[-1]', 'GLOWCouplingBlock', "{'subnet_constructor': subnet_fc, 'clamp': 2.0}"], {'conditions': 'cond_node', 'name': 'f"""coupling_{k}"""'}), "(nodes[-1], GLOWCouplingBlock, {'subnet_constructor': subnet_fc,\n 'clamp': 2.0}, conditions=cond_node, name=f'coupling_{k}')\n", (692, 819), False, 'from FrEIA.framework import InputNode, OutputNode, Node, ReversibleGraphNet, ConditionNode\n'), ((920, 984), 'FrEIA.framework.Node', 'Node', (['nodes[-1]', 'PermuteRandom', "{'seed': k}"], {'name': 'f"""permute_{k}"""'}), "(nodes[-1], PermuteRandom, {'seed': k}, name=f'permute_{k}')\n", (924, 984), False, 'from FrEIA.framework import InputNode, OutputNode, Node, ReversibleGraphNet, ConditionNode\n'), ((1330, 1355), 'torch.randn', 'torch.randn', (['p.data.shape'], {}), '(p.data.shape)\n', (1341, 1355), False, 'import torch\n')]
|
"""
@file
@brief Various function about programs such as guessing the language of a code
"""
import re
def guess_language_code(code):
"""
Guess the language of a piece of code.
The result can be: js, xml, html, cpp, py, sql, vba, css
@param code code
@return type of language or None if None if not found, score (in [0,1], 1 is good)
The algorithm is to compare two languages bamong each others on keywords they don't have in common
"""
code = code.replace(" ", " ").replace(
"\r", "").replace("\n", " ").replace("\t", " ")
stripcode = code.strip()
if stripcode.startswith("<html>") or \
stripcode.startswith("<xml") or \
stripcode.startswith("<!DOCTYPE html>"):
return ('xml', 1.0)
exp1 = re.compile("[^a-z]([a-z]{2,8})[^a-z0-9]")
exp2 = re.compile("(</?[a-z]{2,8}( |>))")
keywords = {"py": set(("format with len from numpy enumerate as and or ord range try except " +
"raise for while if else elif with self assert " +
"for in if not import del from map random sys append except in range elif " +
"float str def raise except none").split()),
"sql": set("on outer full as count and or desc asc from select group by order where join inner".split()),
"xml": set("<body> <xml> </body> <script> <script </script> <head> </head> <meta> <meta </meta>".split()),
"css": set("border font background size".split()),
"vb": set("error for sub function while wend then to end next dim set".split()),
"cpp": set(("ord try catch throw try for while if else push for foreach delete vector map if " +
"catch void double string new throw null").split()),
"js": set("try catch throw for while if else push for in if catch var throw new function null".split()),
}
comments = {"py": re.compile("#[^#]"),
"sql": re.compile("--[^-]"),
"css": re.compile("//[/]"),
"vb": re.compile("'' "),
"xml": re.compile("<!--[^-]"),
}
comments["cpp"] = comments["js"] = comments["css"]
mat = {}
for k, v in keywords.items():
for k2, v2 in keywords.items():
if k == k2:
continue
inter = v.intersection(v2)
vd = v - inter
v2d = v2 - inter
mat[k, k2] = (vd, v2d)
if comments[k] != comments[k2]:
mat[k, k2] += (comments[k], comments[k2])
token = exp1.findall(code) + exp2.findall(code)
counts = {}
for k, v in mat.items():
c = [0, 0, 0, 0, [], [], None, None]
for t in token:
if t in v[0]:
c[0] += 1
c[4].append(t)
if t in v[1]:
c[1] += 1
c[5].append(t)
if len(v) > 2:
co1 = v[2].findall(code)
co2 = v[3].findall(code)
c[6] = co1
c[7] = co2
c[2], c[3] = len(co1), len(co2)
counts[k] = c
# ~ for k in sorted(counts) :
#~ print (k,counts[k])
# ~ if sum(counts[k][:4]) == 0 :
#~ print (k, mat[k])
#~ print (token)
# we find a language which wins every battle
better = {}
for k, c in counts.items():
if c[0] + c[2] >= c[1] + c[3]:
better[k[0]] = better.get(k[0], 0) + 1
#print (better)
li = [(v, k) for k, v in better.items()]
li.sort()
if len(li) > 0:
if li[-1][0] == len(keywords) - 1 and (len(li) == 1 or li[-2][0] < len(keywords) - 1):
ans = li[-1][1]
sh = [(v, k) for k, v in counts.items() if k[0] == ans]
co = [((v[0] + v[2]) / sum(v[:4]), k) for v, k in sh]
co.sort()
#print (co)
return (ans, co[0][0])
else:
return None
else:
return None
|
[
"re.compile"
] |
[((811, 852), 're.compile', 're.compile', (['"""[^a-z]([a-z]{2,8})[^a-z0-9]"""'], {}), "('[^a-z]([a-z]{2,8})[^a-z0-9]')\n", (821, 852), False, 'import re\n'), ((864, 898), 're.compile', 're.compile', (['"""(</?[a-z]{2,8}( |>))"""'], {}), "('(</?[a-z]{2,8}( |>))')\n", (874, 898), False, 'import re\n'), ((2018, 2037), 're.compile', 're.compile', (['"""#[^#]"""'], {}), "('#[^#]')\n", (2028, 2037), False, 'import re\n'), ((2062, 2082), 're.compile', 're.compile', (['"""--[^-]"""'], {}), "('--[^-]')\n", (2072, 2082), False, 'import re\n'), ((2107, 2126), 're.compile', 're.compile', (['"""//[/]"""'], {}), "('//[/]')\n", (2117, 2126), False, 'import re\n'), ((2150, 2167), 're.compile', 're.compile', (['"""\'\' """'], {}), '("\'\' ")\n', (2160, 2167), False, 'import re\n'), ((2192, 2214), 're.compile', 're.compile', (['"""<!--[^-]"""'], {}), "('<!--[^-]')\n", (2202, 2214), False, 'import re\n')]
|
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
from scipy.stats import rv_discrete
import numpy as np
__all__ = ['nonuniform', 'gibbs']
# noinspection PyMethodOverriding,PyPep8Naming
class nonuniform_gen(rv_discrete):
"""A nonuniform discrete random variable.
%(before_notes)s
%(example)s
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) <NAME> and <NAME>
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def _argcheck(self, a):
self.a = a
return abs(a.sum() - 1) <= np.finfo(np.float16).eps
def _pmf(self, x, a):
# port discreteLogprob
raise NotImplementedError
def _ppf(self, q, a):
raise NotImplementedError
def _stats(self, a):
raise NotImplementedError
# noinspection PyArgumentList
def _rvs(self, a):
r = np.random.rand()
if self._size is not None:
r = np.random.rand(self._size)
s = np.zeros(self._size, dtype=np.int32)
cum_prob = np.cumsum(a.ravel())
if self._size is None:
cum_prob2 = cum_prob[0:-1]
s = np.sum(r > cum_prob2)
else:
n = a.size
if n < self._size:
for i in range(n - 1):
s += r > cum_prob[i]
else:
cum_prob2 = cum_prob[0:-1]
for i in range(self._size):
# noinspection PyTypeChecker
s[i] = np.sum(r[i] > cum_prob2)
return s
nonuniform = nonuniform_gen(name='nonuniform', longname='A discrete non-uniform '
'(random integer)')
# noinspection PyMethodOverriding,PyPep8Naming
class gibbs_gen(rv_discrete):
"""A Gibbs distributed discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `gibbs` is::
exp(a/t)
gibbs.pmf(x) = ------------
sum(exp(a/t)
%(example)s
"""
def _argcheck(self, t):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct
and 0's where they are not.
"""
return t >= 0
def _nonzero(self, k, t):
return k == k
def _pmf(self, a, t):
values = np.exp(a / t)
# noinspection PyTypeChecker
if np.any(t <= np.finfo(np.float16).eps):
max_value = max(a)
values = np.asarray([val if val == max_value else 0. for val in a])
return values / np.sum(values)
def _ppf(self, a, t):
raise NotImplementedError
def _stats(self, t):
raise NotImplementedError
gibbs = gibbs_gen(name='gibbs', longname='Gibbs distribution '
'(random integer)')
|
[
"numpy.sum",
"six.moves.range",
"numpy.asarray",
"numpy.zeros",
"numpy.finfo",
"numpy.exp",
"numpy.random.rand"
] |
[((1104, 1120), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1118, 1120), True, 'import numpy as np\n'), ((1211, 1247), 'numpy.zeros', 'np.zeros', (['self._size'], {'dtype': 'np.int32'}), '(self._size, dtype=np.int32)\n', (1219, 1247), True, 'import numpy as np\n'), ((2589, 2602), 'numpy.exp', 'np.exp', (['(a / t)'], {}), '(a / t)\n', (2595, 2602), True, 'import numpy as np\n'), ((1172, 1198), 'numpy.random.rand', 'np.random.rand', (['self._size'], {}), '(self._size)\n', (1186, 1198), True, 'import numpy as np\n'), ((1376, 1397), 'numpy.sum', 'np.sum', (['(r > cum_prob2)'], {}), '(r > cum_prob2)\n', (1382, 1397), True, 'import numpy as np\n'), ((2743, 2804), 'numpy.asarray', 'np.asarray', (['[(val if val == max_value else 0.0) for val in a]'], {}), '([(val if val == max_value else 0.0) for val in a])\n', (2753, 2804), True, 'import numpy as np\n'), ((2826, 2840), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (2832, 2840), True, 'import numpy as np\n'), ((796, 816), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (804, 816), True, 'import numpy as np\n'), ((1491, 1503), 'six.moves.range', 'range', (['(n - 1)'], {}), '(n - 1)\n', (1496, 1503), False, 'from six.moves import range\n'), ((1632, 1649), 'six.moves.range', 'range', (['self._size'], {}), '(self._size)\n', (1637, 1649), False, 'from six.moves import range\n'), ((1727, 1751), 'numpy.sum', 'np.sum', (['(r[i] > cum_prob2)'], {}), '(r[i] > cum_prob2)\n', (1733, 1751), True, 'import numpy as np\n'), ((2664, 2684), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (2672, 2684), True, 'import numpy as np\n')]
|
# Copyright (c) 2012-2020 Esri R&D Center Zurich
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A copy of the license is available in the repository's LICENSE file.
import unittest
import pyprt
import multiGeneration_test
import otherExporter_test
import pyGeometry_test
import shapeAttributesDict_test
import arcgis_test
class PyPRTTestResult(unittest.TextTestResult):
def startTestRun(self):
pyprt.initialize_prt()
def stopTestRun(self):
pyprt.shutdown_prt()
print('PRT is shut down.')
class PyPRTTestRunner(unittest.TextTestRunner):
def _makeResult(self):
return PyPRTTestResult(self.stream, self.descriptions, self.verbosity)
def test_suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(multiGeneration_test))
suite.addTests(loader.loadTestsFromModule(otherExporter_test))
suite.addTests(loader.loadTestsFromModule(pyGeometry_test))
suite.addTests(loader.loadTestsFromModule(shapeAttributesDict_test))
suite.addTests(loader.loadTestsFromModule(arcgis_test))
return suite
def run_tests():
runner = PyPRTTestRunner(verbosity=3)
runner.run(test_suite())
if __name__ == '__main__':
run_tests()
|
[
"pyprt.initialize_prt",
"pyprt.shutdown_prt",
"unittest.TestLoader",
"unittest.TestSuite"
] |
[((1252, 1273), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (1271, 1273), False, 'import unittest\n'), ((1287, 1307), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (1305, 1307), False, 'import unittest\n'), ((935, 957), 'pyprt.initialize_prt', 'pyprt.initialize_prt', ([], {}), '()\n', (955, 957), False, 'import pyprt\n'), ((997, 1017), 'pyprt.shutdown_prt', 'pyprt.shutdown_prt', ([], {}), '()\n', (1015, 1017), False, 'import pyprt\n')]
|
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
def calc_mean_std(x):
return (np.mean(x), np.std(x, ddof=1) / np.sqrt(len(x)))
def color_func(p):
if p > 0.2:
return 'dodgerblue'
elif p < 0.05:
return 'orange'
else:
return 'seagreen'
def match_i_Huskies_passing_table(filename, match_i):
'''
Match i-th Huskies players passing table
Return: {playername: [origin, destination]}
'''
passing = pd.read_csv(filename)
player_dic = {}
for i in range(len(passing)):
if passing['MatchID'][i] == match_i:
if passing['TeamID'][i] == 'Huskies':
if passing['OriginPlayerID'][i] not in player_dic:
player_dic[passing['OriginPlayerID'][i]] = [1, 0]
else:
player_dic[passing['OriginPlayerID'][i]][0] += 1
if passing['DestinationPlayerID'][i] not in player_dic:
player_dic[passing['DestinationPlayerID'][i]] = [0, 1]
else:
player_dic[passing['DestinationPlayerID'][i]][1] += 1
return player_dic
def match_i_passing_table(filename, team_id, match_i):
'''
Match i-th {TeamID} players passing table
Return: {playername: [origin, destination]}
'''
passing = pd.read_csv(filename)
player_dic = {}
if match_i == 'all':
for i in range(len(passing)):
if passing['TeamID'][i] == team_id:
if passing['OriginPlayerID'][i] not in player_dic:
player_dic[passing['OriginPlayerID'][i]] = [1, 0]
else:
player_dic[passing['OriginPlayerID'][i]][0] += 1
if passing['DestinationPlayerID'][i] not in player_dic:
player_dic[passing['DestinationPlayerID'][i]] = [0, 1]
else:
player_dic[passing['DestinationPlayerID'][i]][1] += 1
else:
for i in range(len(passing)):
if passing['MatchID'][i] == match_i:
if passing['TeamID'][i] == team_id:
if passing['OriginPlayerID'][i] not in player_dic:
player_dic[passing['OriginPlayerID'][i]] = [1, 0]
else:
player_dic[passing['OriginPlayerID'][i]][0] += 1
if passing['DestinationPlayerID'][i] not in player_dic:
player_dic[passing['DestinationPlayerID'][i]] = [0, 1]
else:
player_dic[passing['DestinationPlayerID'][i]][1] += 1
return player_dic
|
[
"pandas.read_csv",
"numpy.mean",
"numpy.std",
"warnings.filterwarnings"
] |
[((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((553, 574), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (564, 574), True, 'import pandas as pd\n'), ((1402, 1423), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1413, 1423), True, 'import pandas as pd\n'), ((178, 188), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (185, 188), True, 'import numpy as np\n'), ((190, 207), 'numpy.std', 'np.std', (['x'], {'ddof': '(1)'}), '(x, ddof=1)\n', (196, 207), True, 'import numpy as np\n')]
|
from collections import OrderedDict
import pandas as pd
import numpy as np
from tia.analysis.model.interface import (
TxnColumns as TC,
MarketDataColumns as MC,
PlColumns as PL,
TxnPlColumns as TPL,
)
from tia.analysis.perf import periods_in_year, guess_freq
from tia.util.decorator import lazy_property
from tia.util.fmt import new_dynamic_formatter
__all__ = ["ProfitAndLoss"]
def _dly_to_ltd(frame, dly_cols):
frame = frame.copy()
ilocs = [frame.columns.get_loc(_) for _ in dly_cols]
sums = frame[dly_cols].cumsum()
# BUG when copying a single row, oddly
if len(frame.index) == 1:
frame.iloc[0, ilocs] = sums.iloc[0, list(range(len(dly_cols)))]
else:
frame.iloc[:, ilocs] = sums.iloc[:, list(range(len(dly_cols)))]
return frame
def _ltd_to_dly(frame, ltd_cols):
pl = frame.copy()
ilocs = [frame.columns.get_loc(_) for _ in ltd_cols]
diff = frame[ltd_cols].diff()
# not sure why this is failing
# pl.iloc[1:, ilocs] = diff.iloc[1:]
for i, cidx in enumerate(ilocs):
pl.iloc[1:, cidx] = diff.iloc[1:, i]
return pl
class OpenAverageProfitAndLossCalculator(object):
def compute(self, txns):
"""Compute the long/short live-to-date transaction level profit and loss. Uses an open average calculation"""
txndata = txns.frame
mktdata = txns.pricer.get_eod_frame()
if not isinstance(mktdata.index, pd.DatetimeIndex):
mktdata.to_timestamp(freq="B")
# get the set of all txn dts and mkt data dts
pl = pd.merge(txndata, mktdata.reset_index(), how="outer", on=TPL.DT)
if pl[TC.PID].isnull().all():
ltd_frame = pd.DataFrame(index=pl.index)
ltd_frame[TPL.DT] = pl[PL.DT]
ltd_frame[TPL.POS] = 0
ltd_frame[TPL.PID] = 0
ltd_frame[TPL.TID] = 0
ltd_frame[TPL.TXN_QTY] = np.nan
ltd_frame[TPL.TXN_PX] = np.nan
ltd_frame[TPL.TXN_FEES] = 0
ltd_frame[TPL.TXN_PREMIUM] = 0
ltd_frame[TPL.TXN_INTENT] = 0
ltd_frame[TPL.TXN_ACTION] = 0
ltd_frame[TPL.CLOSE_PX] = pl[TPL.CLOSE_PX]
ltd_frame[TPL.OPEN_VAL] = 0
ltd_frame[TPL.MKT_VAL] = 0
ltd_frame[TPL.TOT_VAL] = 0
ltd_frame[TPL.DVDS] = 0
ltd_frame[TPL.FEES] = 0
ltd_frame[TPL.RPL_GROSS] = 0
ltd_frame[TPL.RPL] = 0
ltd_frame[TPL.UPL] = 0
ltd_frame[TPL.PL] = 0
return ltd_frame
else:
pl.sort([TC.DT, TC.PID, TC.TID], inplace=1)
pl.reset_index(inplace=1, drop=1)
# check that all days can be priced
has_position = pl[TC.PID] > 0
missing_pxs = pl[MC.CLOSE].isnull()
missing = pl[TC.DT][has_position & missing_pxs]
if len(missing) > 0:
msg = "insufficient price data: {0} prices missing for dates {1}"
mdates = ",".join([_.strftime("%Y-%m-%d") for _ in set(missing[:5])])
mdates += len(missing) > 5 and "..." or ""
raise Exception(msg.format(len(missing), mdates))
# Now there is a row for every timestamp. Now compute the pl and fill in where missing data should be
cols = [
TC.DT,
TC.POS,
TC.PID,
TC.TID,
TC.INTENT,
TC.ACTION,
TC.FEES,
TC.QTY,
TC.PX,
TC.PREMIUM,
TC.OPEN_VAL,
]
(
dts,
pos_qtys,
pids,
tids,
intents,
sides,
txn_fees,
txn_qtys,
txn_pxs,
premiums,
open_vals,
) = [pl[c] for c in cols]
dvds, closing_pxs, mkt_vals = [
pl[c] for c in [MC.DVDS, MC.CLOSE, MC.MKT_VAL]
]
# Ensure only end of day is kept for dividends (join will match dvd to any transaction during day
dvds = dvds.where(dts != dts.shift(-1), 0)
# fill in pl dates
open_vals.ffill(inplace=1)
open_vals.fillna(0, inplace=1)
pos_qtys.ffill(inplace=1)
pos_qtys.fillna(0, inplace=1)
# pid is the only tricky one, copy only while position is open
inpos = intents.notnull() | (pos_qtys != 0)
pids = np.where(inpos, pids.ffill(), 0)
pl["pid"] = pids.astype(int)
# Zero fill missing
dvds.fillna(0, inplace=1)
tids.fillna(0, inplace=1)
tids = tids.astype(int)
intents.fillna(0, inplace=1)
intents = intents.astype(int)
sides.fillna(0, inplace=1)
sides = sides.astype(int)
txn_fees.fillna(0, inplace=1)
premiums.fillna(0, inplace=1)
# LTD p/l calculation
fees = txn_fees.cumsum()
total_vals = premiums.cumsum()
mkt_vals *= pos_qtys
dvds = (dvds * pos_qtys).cumsum()
rpl_gross = total_vals - open_vals
rpl = rpl_gross + fees + dvds
upl = mkt_vals + open_vals
tpl = upl + rpl
# build the result
data = OrderedDict()
data[TPL.DT] = dts
data[TPL.POS] = pos_qtys
data[TPL.PID] = pids
data[TPL.TID] = tids
data[TPL.TXN_QTY] = txn_qtys
data[TPL.TXN_PX] = txn_pxs
data[TPL.TXN_FEES] = txn_fees
data[TPL.TXN_PREMIUM] = premiums
data[TPL.TXN_INTENT] = intents
data[TPL.TXN_ACTION] = sides
data[TPL.CLOSE_PX] = closing_pxs
data[TPL.OPEN_VAL] = open_vals
data[TPL.MKT_VAL] = mkt_vals
data[TPL.TOT_VAL] = total_vals
data[TPL.DVDS] = dvds
data[TPL.FEES] = fees
data[TPL.RPL_GROSS] = rpl_gross
data[TPL.RPL] = rpl
data[TPL.UPL] = upl
data[TPL.PL] = tpl
ltd_frame = pd.DataFrame(data, columns=list(data.keys()))
return ltd_frame
class TxnProfitAndLossDetails(object):
def __init__(self, txns=None, frame=None, ltd_frame=None):
"""
:param txns: Txns object
"""
if txns is None and frame is None and ltd_frame is None:
raise ValueError("Either {txns, frame, ltd_frame} must be defined")
self.txns = txns
self._frame = frame
self._ltd_frame = ltd_frame
self.ltd_cols = [
TPL.FEES,
TPL.TOT_VAL,
TPL.RPL_GROSS,
TPL.DVDS,
TPL.RPL,
TPL.RPL,
TPL.UPL,
TPL.PL,
]
@property
def ltd_frame(self):
if self._ltd_frame is None:
if self._frame is not None:
self._ltd_frame = _dly_to_ltd(self._frame, self.ltd_cols)
elif self.txns is not None:
self._ltd_frame = OpenAverageProfitAndLossCalculator().compute(
self.txns
)
else:
raise Exception("either txns or pl frame must be defined")
return self._ltd_frame
@property
def frame(self):
if self._frame is None:
ltd = self.ltd_frame
self._frame = _ltd_to_dly(ltd, self.ltd_cols)
return self._frame
def asfreq(self, freq):
frame = self.frame
pl = frame[PL.ALL].set_index(PL.DT)
if freq == "B":
resampled = pl.groupby(pl.index.date).apply(lambda f: f.sum())
resampled.index = pd.DatetimeIndex([i for i in resampled.index])
return ProfitAndLossDetails(resampled)
else:
resampled = pl.resample(freq, how="sum")
return ProfitAndLossDetails(resampled)
# -----------------------------------------------------------
# Resampled data
dly = lazy_property(lambda self: self.asfreq("B"), "dly")
weekly = lazy_property(lambda self: self.asfreq("W"), "weekly")
monthly = lazy_property(lambda self: self.asfreq("M"), "monthly")
quarterly = lazy_property(lambda self: self.asfreq("Q"), "quarterly")
annual = lazy_property(lambda self: self.asfreq("A"), "annual")
def get_pid_mask(self, pid):
return self.frame[TPL.PID] == pid
def truncate(self, before=None, after=None, pid=None):
if before is None and after is None and pid is None:
return self
elif before or after:
sub = self.frame.truncate(before, after)
return TxnProfitAndLossDetails(frame=sub)
else:
mask = self.get_pid_mask(pid)
frame = self.frame
sub = frame.ix[mask.values]
return TxnProfitAndLossDetails(frame=sub)
def iter_by_year(self):
for key, grp in self.frame.groupby(self.frame[TPL.DT].dt.year):
yield key, TxnProfitAndLossDetails(frame=grp)
def subset(self, txns):
"""To perform a subset it is not possible to reuse the frame since it is LTD, so we convert to daily then
compute ltd from daily
:param txns: the update Txns object
:return:
"""
result = TxnProfitAndLossDetails(txns)
# TODO - add reusing calcs. Issue is when removing PIDs, then could be multiple entries per dt
# use daily txn, clear all values where != pid
# determine which Timestamp columns can be removed as an old position may have multiple txns on same day
# recreate ltd from dly
# Need to take care if a dvd occurs at end of day
return result
class ProfitAndLossDetails(object):
def __init__(self, frame=None, ltd_frame=None):
self._frame = frame
self._ltd_frame = ltd_frame
@property
def ltd_frame(self):
ltd = self._ltd_frame
if ltd is None:
if self._frame is None:
raise Exception(
"Both frame and ltd frame are None. At least one must be defined."
)
self._ltd_frame = ltd = _dly_to_ltd(self._frame, PL.LTDS)
return ltd
@property
def frame(self):
obs = self._frame
if obs is None:
if self._ltd_frame is None:
raise Exception(
"Both frame and ltd frames are None. At least one must be defined."
)
self._frame = obs = _ltd_to_dly(self._ltd_frame, PL.LTDS)
return obs
def rolling_frame(self, n):
return pd.rolling_sum(self.frame, n)
def asfreq(self, freq):
"""Resample the p&l at the specified frequency
:param freq:
:return: Pl object
"""
frame = self.frame
if freq == "B":
resampled = frame.groupby(frame.index.date).apply(lambda f: f.sum())
resampled.index = pd.DatetimeIndex([i for i in resampled.index])
return ProfitAndLossDetails(resampled)
else:
resampled = frame.resample(freq, how="sum")
return ProfitAndLossDetails(resampled)
@lazy_property
def drawdown_info(self):
dd = self.drawdowns.to_frame()
last = dd.index[-1]
dd.columns = ["vals"]
dd["nonzero"] = (dd.vals != 0).astype(int)
dd["gid"] = (dd.nonzero.shift(1) != dd.nonzero).astype(int).cumsum()
ixs = (
dd.reset_index()
.groupby(["nonzero", "gid"])[dd.index.name or "index"]
.apply(lambda x: np.array(x))
)
rows = []
if 1 in ixs:
for ix in ixs[1]:
sub = dd.ix[ix]
# need to get t+1 since actually draw down ends on the 0 value
end = dd.index[
dd.index.get_loc(sub.index[-1]) + (last != sub.index[-1] and 1 or 0)
]
rows.append([sub.index[0], end, sub.vals.min(), sub.vals.idxmin()])
f = pd.DataFrame.from_records(
rows, columns=["dd start", "dd end", "maxdd", "maxdd dt"]
)
f["days"] = (f["dd end"] - f["dd start"]).astype("timedelta64[D]")
return f
@lazy_property
def drawdowns(self):
ltd = self.ltd_frame.pl
maxpl = pd.expanding_max(ltd)
maxpl[maxpl < 0] = 0
dd = ltd - maxpl
return dd
# scalar data
cnt = property(lambda self: self.frame.pl.notnull().astype(int).sum())
mean = lazy_property(lambda self: self.frame.pl.mean(), "mean")
avg = mean
std = lazy_property(lambda self: self.frame.pl.std(), "std")
std_ann = lazy_property(
lambda self: np.sqrt(periods_in_year(self.frame.pl)) * self.std, "std_ann"
)
maxdd = lazy_property(lambda self: self.drawdown_info["maxdd"].min(), "maxdd")
dd_avg = lazy_property(lambda self: self.drawdown_info["maxdd"].mean(), "dd_avg")
min = property(lambda self: self.frame.pl.min())
max = property(lambda self: self.frame.pl.max())
@lazy_property
def maxdd_dt(self):
if self.drawdown_info.empty:
return None
else:
return self.drawdown_info["maxdd dt"].ix[
self.drawdown_info["maxdd"].idxmin()
]
@lazy_property
def summary(self):
d = OrderedDict()
d["avg"] = self.avg
d["std"] = self.std
d["maxdd"] = self.maxdd
d["maxdd dt"] = self.maxdd_dt
d["dd avg"] = self.dd_avg
d["cnt"] = self.cnt
return pd.Series(d, name=self.frame.index.freq or guess_freq(self.frame.index))
def _repr_html_(self):
from tia.util.fmt import new_dynamic_formatter
fmt = new_dynamic_formatter(
method="row", precision=2, pcts=1, trunc_dot_zeros=1, parens=1
)
return fmt(self.summary.to_frame())._repr_html_()
def plot_ltd(
self, ax=None, style="k", label="ltd", show_dd=1, guess_xlabel=1, title=True
):
ltd = self.ltd_frame.pl
ax = ltd.plot(ax=ax, style=style, label=label)
if show_dd:
dd = self.drawdowns
dd.plot(style="r", label="drawdowns", alpha=0.5)
ax.fill_between(dd.index, 0, dd.values, facecolor="red", alpha=0.25)
fmt = lambda x: x
# guess the formatter
if guess_xlabel:
from tia.util.fmt import guess_formatter
from tia.util.mplot import AxesFormat
fmt = guess_formatter(ltd.abs().max(), precision=1)
AxesFormat().Y.apply_format(fmt).apply(ax)
ax.legend(loc="upper left", prop={"size": 12})
# show the actualy date and value
mdt, mdd = self.maxdd_dt, self.maxdd
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.25)
try:
dtstr = "{0}".format(mdt.to_period())
except:
# assume daily
dtstr = "{0}".format(hasattr(mdt, "date") and mdt.date() or mdt)
ax.text(
mdt,
dd[mdt],
"{1} \n {0}".format(fmt(mdd), dtstr).strip(),
ha="center",
va="top",
size=8,
bbox=bbox_props,
)
if title is True:
df = new_dynamic_formatter(precision=1, parens=False, trunc_dot_zeros=True)
total = df(ltd.iloc[-1])
vol = df(self.std)
mdd = df(self.maxdd)
title = "pnl %s vol %s maxdd %s" % (total, vol, mdd)
title and ax.set_title(title, fontdict=dict(fontsize=10, fontweight="bold"))
return ax
def truncate(self, before=None, after=None):
if before is None and after is None:
return self
else:
sub = self.frame.truncate(before, after)
return ProfitAndLossDetails(frame=sub)
class ProfitAndLoss(object):
def __init__(self, dly_details):
self._dly_details = dly_details
dly_details = property(lambda self: self._dly_details)
dly_frame = property(lambda self: self.dly_details.frame)
ltd_dly_frame = property(lambda self: self.dly_details.ltd_frame)
dly = property(lambda self: self.dly_frame.pl)
ltd_dly = property(lambda self: self.ltd_dly_frame.pl)
weekly_details = lazy_property(
lambda self: self.txn_details.weekly, "weekly_details"
)
weekly_frame = property(lambda self: self.weekly_details.frame)
ltd_weekly_frame = property(lambda self: self.weekly_details.ltd_frame)
weekly = property(lambda self: self.weekly_frame.pl)
ltd_weekly = property(lambda self: self.ltd_weekly_frame.pl)
monthly_details = lazy_property(
lambda self: self.txn_details.monthly, "monthly_details"
)
monthly_frame = property(lambda self: self.monthly_details.frame)
ltd_monthly_frame = property(lambda self: self.monthly_details.ltd_frame)
monthly = property(lambda self: self.monthly_frame.pl)
ltd_monthly = property(lambda self: self.ltd_monthly_frame.pl)
quarterly_details = lazy_property(
lambda self: self.txn_details.quarterly, "quarterly_details"
)
quarterly_frame = property(lambda self: self.quarterly_details.frame)
ltd_quarterly_frame = property(lambda self: self.quarterly_details.ltd_frame)
quarterly = property(lambda self: self.quarterly_frame.pl)
ltd_quarterly = property(lambda self: self.ltd_quarterly_frame.pl)
annual_details = lazy_property(
lambda self: self.txn_details.annual, "annual_details"
)
annual_frame = property(lambda self: self.annual_details.frame)
ltd_annual_frame = property(lambda self: self.annual_details.ltd_frame)
annual = property(lambda self: self.annual_frame.pl)
ltd_annual = property(lambda self: self.ltd_annual_frame.pl)
def iter_by_year(self):
for yr, details in self.dly_details.iter_by_year():
yield yr, ProfitAndLoss(details)
def truncate(self, before=None, after=None, pid=None):
if before is None and after is None and pid is None:
return self
else:
details = self.dly_details.truncate(before, after)
return ProfitAndLoss(details)
def report_by_year(
self,
summary_fct=None,
years=None,
ltd=1,
prior_n_yrs=None,
first_n_yrs=None,
ranges=None,
bm_rets=None,
):
"""Summarize the profit and loss by year
:param summary_fct: function(ProfitAndLoss) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame
"""
if years and np.isscalar(years):
years = [years]
if summary_fct is None:
def summary_fct(pl):
monthly = pl.monthly_details
dly = pl.dly_details
data = OrderedDict()
data["mpl avg"] = monthly.mean
data["mpl std ann"] = monthly.std_ann
data["maxdd"] = dly.maxdd
data["maxdd dt"] = dly.maxdd_dt
data["avg dd"] = dly.dd_avg
data["best month"] = monthly.max
data["worst month"] = monthly.min
data["best day"] = dly.max
data["worst day"] = dly.min
data["nmonths"] = monthly.cnt
return data
results = OrderedDict()
if years is not False:
for yr, pandl in self.iter_by_year():
if years is None or yr in years:
results[yr] = summary_fct(pandl)
# First n years
if first_n_yrs:
first_n_yrs = first_n_yrs if not np.isscalar(first_n_yrs) else [first_n_yrs]
for first in first_n_yrs:
after = "12/31/%s" % (self.dly.index[0].year + first)
firstN = self.truncate(after=after)
results["first {0}yrs".format(first)] = summary_fct(firstN)
# Ranges
if ranges:
for range in ranges:
yr_start, yr_end = range
rng_rets = self.truncate("1/1/%s" % yr_start, "12/31/%s" % yr_end)
results["{0}-{1}".format(yr_start, yr_end)] = summary_fct(rng_rets)
# Prior n years
if prior_n_yrs:
prior_n_yrs = prior_n_yrs if not np.isscalar(prior_n_yrs) else [prior_n_yrs]
for prior in prior_n_yrs:
before = "1/1/%s" % (self.dly.index[-1].year - prior)
priorN = self.truncate(before)
results["past {0}yrs".format(prior)] = summary_fct(priorN)
# LTD
if ltd:
results["ltd"] = summary_fct(self)
return pd.DataFrame(results, index=list(results.values())[0].keys()).T
class TxnProfitAndLoss(ProfitAndLoss):
def __init__(self, txns=None, txnpl_details=None):
if txns is None and txnpl_details is None:
raise ValueError("txns or txn_details must be specified")
self.txns = txns
self._txn_details = txnpl_details
# Don't set the attribute, wany lazy property to be called
# ProfitAndLoss.__init__(self, None)
@property
def txn_details(self):
if self._txn_details is None:
self._txn_details = TxnProfitAndLossDetails(self.txns)
return self._txn_details
txn_frame = property(lambda self: self.txn_details.frame)
ltd_txn_frame = property(lambda self: self.txn_details.ltd_frame)
txn = property(lambda self: self.txn_frame.set_index(PL.DT).pl)
ltd_txn = property(lambda self: self.ltd_txn_frame.set_index(PL.DT).pl)
dly_details = lazy_property(lambda self: self.txn_details.dly, "dly_details")
def truncate(self, before=None, after=None, pid=None):
if before is None and after is None and pid is None:
return self
else:
details = self.txn_details.truncate(before, after, pid)
return TxnProfitAndLoss(txnpl_details=details)
def get_pid_mask(self, pid):
return self.txn_details.get_pid_mask(pid)
|
[
"pandas.DataFrame",
"tia.analysis.perf.guess_freq",
"tia.util.decorator.lazy_property",
"numpy.isscalar",
"pandas.expanding_max",
"pandas.rolling_sum",
"pandas.DatetimeIndex",
"tia.util.fmt.new_dynamic_formatter",
"tia.util.mplot.AxesFormat",
"numpy.array",
"pandas.DataFrame.from_records",
"collections.OrderedDict",
"tia.analysis.perf.periods_in_year"
] |
[((16498, 16567), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.weekly)', '"""weekly_details"""'], {}), "(lambda self: self.txn_details.weekly, 'weekly_details')\n", (16511, 16567), False, 'from tia.util.decorator import lazy_property\n'), ((16871, 16942), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.monthly)', '"""monthly_details"""'], {}), "(lambda self: self.txn_details.monthly, 'monthly_details')\n", (16884, 16942), False, 'from tia.util.decorator import lazy_property\n'), ((17256, 17331), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.quarterly)', '"""quarterly_details"""'], {}), "(lambda self: self.txn_details.quarterly, 'quarterly_details')\n", (17269, 17331), False, 'from tia.util.decorator import lazy_property\n'), ((17658, 17727), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.annual)', '"""annual_details"""'], {}), "(lambda self: self.txn_details.annual, 'annual_details')\n", (17671, 17727), False, 'from tia.util.decorator import lazy_property\n'), ((22414, 22477), 'tia.util.decorator.lazy_property', 'lazy_property', (['(lambda self: self.txn_details.dly)', '"""dly_details"""'], {}), "(lambda self: self.txn_details.dly, 'dly_details')\n", (22427, 22477), False, 'from tia.util.decorator import lazy_property\n'), ((10738, 10767), 'pandas.rolling_sum', 'pd.rolling_sum', (['self.frame', 'n'], {}), '(self.frame, n)\n', (10752, 10767), True, 'import pandas as pd\n'), ((12147, 12235), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['rows'], {'columns': "['dd start', 'dd end', 'maxdd', 'maxdd dt']"}), "(rows, columns=['dd start', 'dd end', 'maxdd',\n 'maxdd dt'])\n", (12172, 12235), True, 'import pandas as pd\n'), ((12439, 12460), 'pandas.expanding_max', 'pd.expanding_max', (['ltd'], {}), '(ltd)\n', (12455, 12460), True, 'import pandas as pd\n'), ((13463, 13476), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13474, 13476), False, 'from collections import OrderedDict\n'), ((13851, 13940), 'tia.util.fmt.new_dynamic_formatter', 'new_dynamic_formatter', ([], {'method': '"""row"""', 'precision': '(2)', 'pcts': '(1)', 'trunc_dot_zeros': '(1)', 'parens': '(1)'}), "(method='row', precision=2, pcts=1, trunc_dot_zeros=1,\n parens=1)\n", (13872, 13940), False, 'from tia.util.fmt import new_dynamic_formatter\n'), ((20166, 20179), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20177, 20179), False, 'from collections import OrderedDict\n'), ((1690, 1718), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'pl.index'}), '(index=pl.index)\n', (1702, 1718), True, 'import pandas as pd\n'), ((5419, 5432), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5430, 5432), False, 'from collections import OrderedDict\n'), ((7804, 7850), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[i for i in resampled.index]'], {}), '([i for i in resampled.index])\n', (7820, 7850), True, 'import pandas as pd\n'), ((11075, 11121), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[i for i in resampled.index]'], {}), '([i for i in resampled.index])\n', (11091, 11121), True, 'import pandas as pd\n'), ((15480, 15550), 'tia.util.fmt.new_dynamic_formatter', 'new_dynamic_formatter', ([], {'precision': '(1)', 'parens': '(False)', 'trunc_dot_zeros': '(True)'}), '(precision=1, parens=False, trunc_dot_zeros=True)\n', (15501, 15550), False, 'from tia.util.fmt import new_dynamic_formatter\n'), ((19418, 19436), 'numpy.isscalar', 'np.isscalar', (['years'], {}), '(years)\n', (19429, 19436), True, 'import numpy as np\n'), ((11709, 11720), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (11717, 11720), True, 'import numpy as np\n'), ((19638, 19651), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19649, 19651), False, 'from collections import OrderedDict\n'), ((12833, 12863), 'tia.analysis.perf.periods_in_year', 'periods_in_year', (['self.frame.pl'], {}), '(self.frame.pl)\n', (12848, 12863), False, 'from tia.analysis.perf import periods_in_year, guess_freq\n'), ((13723, 13751), 'tia.analysis.perf.guess_freq', 'guess_freq', (['self.frame.index'], {}), '(self.frame.index)\n', (13733, 13751), False, 'from tia.analysis.perf import periods_in_year, guess_freq\n'), ((20458, 20482), 'numpy.isscalar', 'np.isscalar', (['first_n_yrs'], {}), '(first_n_yrs)\n', (20469, 20482), True, 'import numpy as np\n'), ((21110, 21134), 'numpy.isscalar', 'np.isscalar', (['prior_n_yrs'], {}), '(prior_n_yrs)\n', (21121, 21134), True, 'import numpy as np\n'), ((14698, 14710), 'tia.util.mplot.AxesFormat', 'AxesFormat', ([], {}), '()\n', (14708, 14710), False, 'from tia.util.mplot import AxesFormat\n')]
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : Dive-into-Deep-Learning
@File : sec0202.py
@Version : v0.1
@Time : 2020-12-27 9:25
@License : (C)Copyright 2018-2020, zYx.Tom
@Reference : 《动手学深度学习》
@Desc : Sec 5.11 残差网络(ResNet)
@小结:
1. 残差块通过跨层的数据通道从而能够训练出更加有效,也更有深度的深度神经网络
2. ResNet 深刻影响了后来的深度神经网络的设计
"""
import d2lzh as d2l
import mxnet as mx
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import data as gdata, loss as gloss, nn
from tools import beep_end, show_subtitle, show_title, show_figures
# ----------------------------------------------------------------------
def main():
# test_residual_block()
net = nn.Sequential()
net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),
nn.BatchNorm(),
nn.Activation('relu'),
nn.MaxPool2D(pool_size=3, strides=2, padding=1)
)
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
net.add(nn.GlobalAvgPool2D(),
nn.Dense(10)
)
data_size = 96
X = nd.random.uniform(shape=(1, 1, data_size, data_size))
print("X.shape:\t", X.shape)
net.initialize()
for layer in net:
X = layer(X)
print(layer.name, "output shape:\t", X.shape)
pass
lr, num_epochs, batch_size, ctx = 0.05, 15, 96, d2l.try_gpu()
net.initialize(init=init.Xavier(), ctx=ctx, force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=data_size)
d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)
pass
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.Sequential()
for i in range(num_residuals):
if i == 0 and not first_block:
blk.add(Residual(num_channels, use_1x1cov=True, strides=2))
else:
blk.add(Residual(num_channels))
pass
pass
return blk
def test_residual_block():
X = nd.random.uniform(shape=(4, 3, 6, 6))
show_subtitle("5.11.1 残差块")
show_subtitle("输入与输出的形状相同")
blk = Residual(3)
blk.initialize()
print("X.shape=", X.shape, "blk(X).shape=", blk(X).shape)
show_subtitle("输出通道数加倍,输出维度减半")
blk = Residual(6, use_1x1cov=True, strides=2)
blk.initialize()
print("X.shape=", X.shape, "blk(X).shape=", blk(X).shape)
class Residual(nn.Block):
def __init__(self, num_channels, use_1x1cov=False, strides=1, **kwargs):
super(Residual, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1cov:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
pass
def forward(self, X):
Y = nd.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
pass
return nd.relu(Y + X)
pass
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
# 运行结束的提醒
beep_end()
show_figures()
|
[
"d2lzh.load_data_fashion_mnist",
"tools.show_figures",
"tools.beep_end",
"mxnet.gluon.nn.Dense",
"mxnet.gluon.nn.MaxPool2D",
"mxnet.gluon.nn.Conv2D",
"tools.show_subtitle",
"mxnet.gluon.nn.Activation",
"mxnet.gluon.nn.GlobalAvgPool2D",
"mxnet.gluon.nn.Sequential",
"mxnet.gluon.nn.BatchNorm",
"mxnet.nd.random.uniform",
"mxnet.nd.relu",
"mxnet.init.Xavier",
"d2lzh.try_gpu",
"d2lzh.train_ch5"
] |
[((825, 840), 'mxnet.gluon.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (838, 840), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((1295, 1348), 'mxnet.nd.random.uniform', 'nd.random.uniform', ([], {'shape': '(1, 1, data_size, data_size)'}), '(shape=(1, 1, data_size, data_size))\n', (1312, 1348), False, 'from mxnet import autograd, gluon, init, nd\n'), ((1754, 1811), 'd2lzh.load_data_fashion_mnist', 'd2l.load_data_fashion_mnist', (['batch_size'], {'resize': 'data_size'}), '(batch_size, resize=data_size)\n', (1781, 1811), True, 'import d2lzh as d2l\n'), ((1816, 1895), 'd2lzh.train_ch5', 'd2l.train_ch5', (['net', 'train_iter', 'test_iter', 'batch_size', 'trainer', 'ctx', 'num_epochs'], {}), '(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)\n', (1829, 1895), True, 'import d2lzh as d2l\n'), ((1983, 1998), 'mxnet.gluon.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1996, 1998), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((2285, 2322), 'mxnet.nd.random.uniform', 'nd.random.uniform', ([], {'shape': '(4, 3, 6, 6)'}), '(shape=(4, 3, 6, 6))\n', (2302, 2322), False, 'from mxnet import autograd, gluon, init, nd\n'), ((2327, 2354), 'tools.show_subtitle', 'show_subtitle', (['"""5.11.1 残差块"""'], {}), "('5.11.1 残差块')\n", (2340, 2354), False, 'from tools import beep_end, show_subtitle, show_title, show_figures\n'), ((2359, 2386), 'tools.show_subtitle', 'show_subtitle', (['"""输入与输出的形状相同"""'], {}), "('输入与输出的形状相同')\n", (2372, 2386), False, 'from tools import beep_end, show_subtitle, show_title, show_figures\n'), ((2496, 2527), 'tools.show_subtitle', 'show_subtitle', (['"""输出通道数加倍,输出维度减半"""'], {}), "('输出通道数加倍,输出维度减半')\n", (2509, 2527), False, 'from tools import beep_end, show_subtitle, show_title, show_figures\n'), ((3554, 3564), 'tools.beep_end', 'beep_end', ([], {}), '()\n', (3562, 3564), False, 'from tools import beep_end, show_subtitle, show_title, show_figures\n'), ((3569, 3583), 'tools.show_figures', 'show_figures', ([], {}), '()\n', (3581, 3583), False, 'from tools import beep_end, show_subtitle, show_title, show_figures\n'), ((853, 903), 'mxnet.gluon.nn.Conv2D', 'nn.Conv2D', (['(64)'], {'kernel_size': '(7)', 'strides': '(2)', 'padding': '(3)'}), '(64, kernel_size=7, strides=2, padding=3)\n', (862, 903), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((917, 931), 'mxnet.gluon.nn.BatchNorm', 'nn.BatchNorm', ([], {}), '()\n', (929, 931), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((945, 966), 'mxnet.gluon.nn.Activation', 'nn.Activation', (['"""relu"""'], {}), "('relu')\n", (958, 966), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((980, 1027), 'mxnet.gluon.nn.MaxPool2D', 'nn.MaxPool2D', ([], {'pool_size': '(3)', 'strides': '(2)', 'padding': '(1)'}), '(pool_size=3, strides=2, padding=1)\n', (992, 1027), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((1207, 1227), 'mxnet.gluon.nn.GlobalAvgPool2D', 'nn.GlobalAvgPool2D', ([], {}), '()\n', (1225, 1227), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((1241, 1253), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['(10)'], {}), '(10)\n', (1249, 1253), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((1565, 1578), 'd2lzh.try_gpu', 'd2l.try_gpu', ([], {}), '()\n', (1576, 1578), True, 'import d2lzh as d2l\n'), ((2836, 2902), 'mxnet.gluon.nn.Conv2D', 'nn.Conv2D', (['num_channels'], {'kernel_size': '(3)', 'padding': '(1)', 'strides': 'strides'}), '(num_channels, kernel_size=3, padding=1, strides=strides)\n', (2845, 2902), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((2924, 2973), 'mxnet.gluon.nn.Conv2D', 'nn.Conv2D', (['num_channels'], {'kernel_size': '(3)', 'padding': '(1)'}), '(num_channels, kernel_size=3, padding=1)\n', (2933, 2973), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((3141, 3155), 'mxnet.gluon.nn.BatchNorm', 'nn.BatchNorm', ([], {}), '()\n', (3153, 3155), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((3175, 3189), 'mxnet.gluon.nn.BatchNorm', 'nn.BatchNorm', ([], {}), '()\n', (3187, 3189), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n'), ((3396, 3410), 'mxnet.nd.relu', 'nd.relu', (['(Y + X)'], {}), '(Y + X)\n', (3403, 3410), False, 'from mxnet import autograd, gluon, init, nd\n'), ((1603, 1616), 'mxnet.init.Xavier', 'init.Xavier', ([], {}), '()\n', (1614, 1616), False, 'from mxnet import autograd, gluon, init, nd\n'), ((3022, 3077), 'mxnet.gluon.nn.Conv2D', 'nn.Conv2D', (['num_channels'], {'kernel_size': '(1)', 'strides': 'strides'}), '(num_channels, kernel_size=1, strides=strides)\n', (3031, 3077), False, 'from mxnet.gluon import data as gdata, loss as gloss, nn\n')]
|
from seedwork.infrastructure.repository import InMemoryRepository
from seedwork.domain.entities import Entity
class Person(Entity):
first_name: str
last_name: str
def test_InMemoryRepository_persist_one():
# arrange
person = Person(first_name="John", last_name="Doe")
repository = InMemoryRepository()
# act
repository.insert(person)
# assert
assert repository.get_by_id(person.id) == person
def test_InMemoryRepository_persist_two():
# arrange
person1 = Person(first_name="John", last_name="Doe")
person2 = Person(first_name="Mary", last_name="Doe")
repository = InMemoryRepository()
# act
repository.insert(person1)
repository.insert(person2)
# assert
assert repository.get_by_id(person1.id) == person1
assert repository.get_by_id(person2.id) == person2
|
[
"seedwork.infrastructure.repository.InMemoryRepository"
] |
[((305, 325), 'seedwork.infrastructure.repository.InMemoryRepository', 'InMemoryRepository', ([], {}), '()\n', (323, 325), False, 'from seedwork.infrastructure.repository import InMemoryRepository\n'), ((624, 644), 'seedwork.infrastructure.repository.InMemoryRepository', 'InMemoryRepository', ([], {}), '()\n', (642, 644), False, 'from seedwork.infrastructure.repository import InMemoryRepository\n')]
|
import argparse
import time
import torch
import wandb
from torch import nn
from torch.autograd import profiler
from torch.nn import CrossEntropyLoss
from torch.nn.utils import prune
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torchvision.models import resnet50
from tqdm import tqdm
import simplify
from simplify.utils import set_seed
def profile_model(model, input, rows=10, cuda=False):
with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=cuda) as prof:
with profiler.record_function("model_inference"):
model(input)
return str(prof.key_averages().table(
sort_by="cpu_time_total", row_limit=rows))
def prune_model(model, amount, remove=False):
print("=> Pruning")
remaining_neurons = 0.
for module in model.modules():
if isinstance(module, nn.Conv2d):
prune.ln_structured(module, 'weight', amount=amount, n=2, dim=0)
w = module.weight.clone().reshape(module.weight.shape[0], -1).abs().sum(dim=1)
remaining_neurons += (w != 0).sum()
if remove:
prune.remove(module, 'weight')
return remaining_neurons
def main(config):
set_seed(0)
device = torch.device('cuda')
batch_size = 128
train_iteration = 10000
prune_iteration = config.prune_every
model = resnet50(False).to(device)
simplify.fuse(model, simplify.utils.get_bn_folding(model))
optimizer = SGD(model.parameters(), lr=0.001, weight_decay=1e-4)
scheduler = CosineAnnealingLR(optimizer, train_iteration, 1e-3)
criterion = CrossEntropyLoss()
total_neurons = 0
remaining_neurons = 0
for module in model.modules():
if isinstance(module, nn.Conv2d):
total_neurons += module.weight.shape[0]
remaining_neurons += module.weight.shape[0]
wandb.init(config=config)
model.eval()
profiled = profile_model(model, torch.randn((batch_size, 3, 224, 224), device=device), rows=1000)
with open('profile.txt', 'w') as f:
f.write('\n\n -- THRESHOLDED --\n')
f.write(profiled)
# Train
num_samples = 0
num_correct = 0
for i in tqdm(range(train_iteration)):
images = torch.randn((batch_size, 3, 224, 224), device=device)
target = torch.randint(0, 1000, (batch_size,), device=device)
# Prune the network by 5% at each pass
if (i + 1) % prune_iteration == 0:
remaining_neurons = prune_model(model, amount=0.10, remove=config.simplify)
print(f"The current model has {(remaining_neurons / total_neurons) * 100} % of the original neurons")
if config.simplify:
print("Simplifying model")
model.eval()
simplify.simplify(model, torch.zeros(1, 3, 224, 224, device=device), fuse_bn=False, training=True)
model.train()
profiled = profile_model(model, torch.randn((batch_size, 3, 224, 224), device=device), rows=1000)
with open('profile.txt', 'a') as f:
f.write(f'\n\n -- SIMPLIFIED {(remaining_neurons / total_neurons) * 100} --\n')
f.write(profiled)
# torch.cuda.empty_cache()
# Re-init optimizer and scheduler
optimizer = SGD(model.parameters(), lr=0.1, weight_decay=1e-4)
scheduler = CosineAnnealingLR(optimizer, train_iteration, 1e-3, last_epoch=-1)
for _ in range(i):
scheduler.step()
model.train()
with torch.enable_grad():
if device == torch.device("cuda"):
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
starter.record()
else:
start = time.perf_counter()
output = model(images) # FORWARD PASS
if device == torch.device("cuda"):
ender.record()
torch.cuda.synchronize()
else:
end = time.perf_counter()
forward_time = starter.elapsed_time(ender) if device == torch.device("cuda") else end - start
loss = criterion(output, target)
optimizer.zero_grad()
if device == torch.device("cuda"):
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
starter.record()
else:
start = time.perf_counter()
loss.backward() # BACKWARD PASS
if device == torch.device("cuda"):
ender.record()
torch.cuda.synchronize()
else:
end = time.perf_counter()
backward_time = starter.elapsed_time(ender) if device == torch.device("cuda") else end - start
for param in model.parameters():
param.grad.data.mul_(torch.abs(param.data) > 0)
optimizer.step()
optimizer.zero_grad()
_, predictions = output.max(1)
num_correct += (predictions == target).sum()
num_samples += predictions.size(0)
to_log = {
"Remaining neurons": (remaining_neurons / total_neurons),
"Train Accuracy": float(num_correct) / float(num_samples),
"Forward Time": forward_time,
"Backward Time": backward_time,
"epoch": i
}
current_lr = [group["lr"] for group in optimizer.param_groups]
for j, lr in enumerate(current_lr):
to_log[f"lr{j}"] = lr
wandb.log(to_log)
if (i + 1) % train_iteration == 0:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--prune_every', type=int, default=1000)
parser.add_argument('--simplify', action='store_true')
config = parser.parse_args()
main(config)
|
[
"wandb.log",
"torch.cuda.synchronize",
"argparse.ArgumentParser",
"torch.autograd.profiler.record_function",
"torch.randn",
"torch.device",
"simplify.utils.get_bn_folding",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.nn.utils.prune.ln_structured",
"torch.zeros",
"torch.randint",
"torch.cuda.Event",
"torch.autograd.profiler.profile",
"time.perf_counter",
"torch.abs",
"torchvision.models.resnet50",
"torch.enable_grad",
"torch.nn.CrossEntropyLoss",
"torch.nn.utils.prune.remove",
"wandb.init",
"simplify.utils.set_seed"
] |
[((1252, 1263), 'simplify.utils.set_seed', 'set_seed', (['(0)'], {}), '(0)\n', (1260, 1263), False, 'from simplify.utils import set_seed\n'), ((1277, 1297), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1289, 1297), False, 'import torch\n'), ((1590, 1642), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'CosineAnnealingLR', (['optimizer', 'train_iteration', '(0.001)'], {}), '(optimizer, train_iteration, 0.001)\n', (1607, 1642), False, 'from torch.optim.lr_scheduler import CosineAnnealingLR\n'), ((1658, 1676), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (1674, 1676), False, 'from torch.nn import CrossEntropyLoss\n'), ((1924, 1949), 'wandb.init', 'wandb.init', ([], {'config': 'config'}), '(config=config)\n', (1934, 1949), False, 'import wandb\n'), ((6042, 6067), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6065, 6067), False, 'import argparse\n'), ((446, 518), 'torch.autograd.profiler.profile', 'profiler.profile', ([], {'profile_memory': '(True)', 'record_shapes': '(True)', 'use_cuda': 'cuda'}), '(profile_memory=True, record_shapes=True, use_cuda=cuda)\n', (462, 518), False, 'from torch.autograd import profiler\n'), ((1462, 1498), 'simplify.utils.get_bn_folding', 'simplify.utils.get_bn_folding', (['model'], {}), '(model)\n', (1491, 1498), False, 'import simplify\n'), ((2008, 2061), 'torch.randn', 'torch.randn', (['(batch_size, 3, 224, 224)'], {'device': 'device'}), '((batch_size, 3, 224, 224), device=device)\n', (2019, 2061), False, 'import torch\n'), ((2306, 2359), 'torch.randn', 'torch.randn', (['(batch_size, 3, 224, 224)'], {'device': 'device'}), '((batch_size, 3, 224, 224), device=device)\n', (2317, 2359), False, 'import torch\n'), ((2377, 2429), 'torch.randint', 'torch.randint', (['(0)', '(1000)', '(batch_size,)'], {'device': 'device'}), '(0, 1000, (batch_size,), device=device)\n', (2390, 2429), False, 'import torch\n'), ((5912, 5929), 'wandb.log', 'wandb.log', (['to_log'], {}), '(to_log)\n', (5921, 5929), False, 'import wandb\n'), ((541, 584), 'torch.autograd.profiler.record_function', 'profiler.record_function', (['"""model_inference"""'], {}), "('model_inference')\n", (565, 584), False, 'from torch.autograd import profiler\n'), ((907, 971), 'torch.nn.utils.prune.ln_structured', 'prune.ln_structured', (['module', '"""weight"""'], {'amount': 'amount', 'n': '(2)', 'dim': '(0)'}), "(module, 'weight', amount=amount, n=2, dim=0)\n", (926, 971), False, 'from torch.nn.utils import prune\n'), ((1410, 1425), 'torchvision.models.resnet50', 'resnet50', (['(False)'], {}), '(False)\n', (1418, 1425), False, 'from torchvision.models import resnet50\n'), ((3714, 3733), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (3731, 3733), False, 'import torch\n'), ((1163, 1193), 'torch.nn.utils.prune.remove', 'prune.remove', (['module', '"""weight"""'], {}), "(module, 'weight')\n", (1175, 1193), False, 'from torch.nn.utils import prune\n'), ((3531, 3598), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'CosineAnnealingLR', (['optimizer', 'train_iteration', '(0.001)'], {'last_epoch': '(-1)'}), '(optimizer, train_iteration, 0.001, last_epoch=-1)\n', (3548, 3598), False, 'from torch.optim.lr_scheduler import CosineAnnealingLR\n'), ((3773, 3793), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3785, 3793), False, 'import torch\n'), ((3978, 3997), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3995, 3997), False, 'import time\n'), ((4100, 4120), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4112, 4120), False, 'import torch\n'), ((4169, 4193), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4191, 4193), False, 'import torch\n'), ((4234, 4253), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4251, 4253), False, 'import time\n'), ((4503, 4523), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4515, 4523), False, 'import torch\n'), ((4708, 4727), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4725, 4727), False, 'import time\n'), ((4824, 4844), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4836, 4844), False, 'import torch\n'), ((4893, 4917), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4915, 4917), False, 'import torch\n'), ((4958, 4977), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4975, 4977), False, 'import time\n'), ((2889, 2931), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', '(224)', '(224)'], {'device': 'device'}), '(1, 3, 224, 224, device=device)\n', (2900, 2931), False, 'import torch\n'), ((3058, 3111), 'torch.randn', 'torch.randn', (['(batch_size, 3, 224, 224)'], {'device': 'device'}), '((batch_size, 3, 224, 224), device=device)\n', (3069, 3111), False, 'import torch\n'), ((3828, 3864), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (3844, 3864), False, 'import torch\n'), ((3866, 3902), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (3882, 3902), False, 'import torch\n'), ((4335, 4355), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4347, 4355), False, 'import torch\n'), ((4558, 4594), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (4574, 4594), False, 'import torch\n'), ((4596, 4632), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (4612, 4632), False, 'import torch\n'), ((5060, 5080), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5072, 5080), False, 'import torch\n'), ((5181, 5202), 'torch.abs', 'torch.abs', (['param.data'], {}), '(param.data)\n', (5190, 5202), False, 'import torch\n')]
|
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import logging
import isceobj
from isceobj.Alos2Proc.runSwathMosaic import swathMosaic
from isceobj.Alos2Proc.runSwathMosaic import swathMosaicParameters
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
logger = logging.getLogger('isce.alos2burstinsar.runSwathMosaic')
def runSwathMosaic(self):
'''mosaic subswaths
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
referenceTrack = self._insar.loadTrack(reference=True)
secondaryTrack = self._insar.loadTrack(reference=False)
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
mosaicDir = 'mosaic'
os.makedirs(mosaicDir, exist_ok=True)
os.chdir(mosaicDir)
if self._insar.endingSwath-self._insar.startingSwath+1 == 1:
import shutil
swathDir = 's{}'.format(referenceTrack.frames[i].swaths[0].swathNumber)
if not os.path.isfile(self._insar.interferogram):
os.symlink(os.path.join('../', swathDir, self._insar.interferogram), self._insar.interferogram)
shutil.copy2(os.path.join('../', swathDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt')
shutil.copy2(os.path.join('../', swathDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml')
if not os.path.isfile(self._insar.amplitude):
os.symlink(os.path.join('../', swathDir, self._insar.amplitude), self._insar.amplitude)
shutil.copy2(os.path.join('../', swathDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt')
shutil.copy2(os.path.join('../', swathDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml')
# os.rename(os.path.join('../', swathDir, self._insar.interferogram), self._insar.interferogram)
# os.rename(os.path.join('../', swathDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt')
# os.rename(os.path.join('../', swathDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml')
# os.rename(os.path.join('../', swathDir, self._insar.amplitude), self._insar.amplitude)
# os.rename(os.path.join('../', swathDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt')
# os.rename(os.path.join('../', swathDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml')
#update frame parameters
#########################################################
frame = referenceTrack.frames[i]
infImg = isceobj.createImage()
infImg.load(self._insar.interferogram+'.xml')
#mosaic size
frame.numberOfSamples = infImg.width
frame.numberOfLines = infImg.length
#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
frame.startingRange = frame.swaths[0].startingRange
frame.rangeSamplingRate = frame.swaths[0].rangeSamplingRate
frame.rangePixelSize = frame.swaths[0].rangePixelSize
#azimuth parameters
frame.sensingStart = frame.swaths[0].sensingStart
frame.prf = frame.swaths[0].prf
frame.azimuthPixelSize = frame.swaths[0].azimuthPixelSize
frame.azimuthLineInterval = frame.swaths[0].azimuthLineInterval
#update frame parameters, secondary
#########################################################
frame = secondaryTrack.frames[i]
#mosaic size
frame.numberOfSamples = int(frame.swaths[0].numberOfSamples/self._insar.numberRangeLooks1)
frame.numberOfLines = int(frame.swaths[0].numberOfLines/self._insar.numberAzimuthLooks1)
#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
frame.startingRange = frame.swaths[0].startingRange
frame.rangeSamplingRate = frame.swaths[0].rangeSamplingRate
frame.rangePixelSize = frame.swaths[0].rangePixelSize
#azimuth parameters
frame.sensingStart = frame.swaths[0].sensingStart
frame.prf = frame.swaths[0].prf
frame.azimuthPixelSize = frame.swaths[0].azimuthPixelSize
frame.azimuthLineInterval = frame.swaths[0].azimuthLineInterval
os.chdir('../')
#save parameter file
self._insar.saveProduct(referenceTrack.frames[i], self._insar.referenceFrameParameter)
self._insar.saveProduct(secondaryTrack.frames[i], self._insar.secondaryFrameParameter)
os.chdir('../')
continue
#choose offsets
numberOfFrames = len(referenceTrack.frames)
numberOfSwaths = len(referenceTrack.frames[i].swaths)
if self.swathOffsetMatching:
#no need to do this as the API support 2-d list
#rangeOffsets = (np.array(self._insar.swathRangeOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
rangeOffsets = self._insar.swathRangeOffsetMatchingReference
azimuthOffsets = self._insar.swathAzimuthOffsetMatchingReference
else:
#rangeOffsets = (np.array(self._insar.swathRangeOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
rangeOffsets = self._insar.swathRangeOffsetGeometricalReference
azimuthOffsets = self._insar.swathAzimuthOffsetGeometricalReference
rangeOffsets = rangeOffsets[i]
azimuthOffsets = azimuthOffsets[i]
#list of input files
inputInterferograms = []
inputAmplitudes = []
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
inputInterferograms.append(os.path.join('../', swathDir, self._insar.interferogram))
inputAmplitudes.append(os.path.join('../', swathDir, self._insar.amplitude))
#note that frame parameters are updated after mosaicking
#mosaic amplitudes
swathMosaic(referenceTrack.frames[i], inputAmplitudes, self._insar.amplitude,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1, resamplingMethod=0)
#mosaic interferograms
swathMosaic(referenceTrack.frames[i], inputInterferograms, self._insar.interferogram,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1, updateFrame=True, resamplingMethod=1)
create_xml(self._insar.amplitude, referenceTrack.frames[i].numberOfSamples, referenceTrack.frames[i].numberOfLines, 'amp')
create_xml(self._insar.interferogram, referenceTrack.frames[i].numberOfSamples, referenceTrack.frames[i].numberOfLines, 'int')
#update secondary frame parameters here
#no matching for secondary, always use geometry
rangeOffsets = self._insar.swathRangeOffsetGeometricalSecondary
azimuthOffsets = self._insar.swathAzimuthOffsetGeometricalSecondary
rangeOffsets = rangeOffsets[i]
azimuthOffsets = azimuthOffsets[i]
swathMosaicParameters(secondaryTrack.frames[i], rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1)
os.chdir('../')
#save parameter file
self._insar.saveProduct(referenceTrack.frames[i], self._insar.referenceFrameParameter)
self._insar.saveProduct(secondaryTrack.frames[i], self._insar.secondaryFrameParameter)
os.chdir('../')
#mosaic spectral diversity interferograms
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
mosaicDir = 'mosaic'
os.makedirs(mosaicDir, exist_ok=True)
os.chdir(mosaicDir)
if self._insar.endingSwath-self._insar.startingSwath+1 == 1:
import shutil
swathDir = 's{}'.format(referenceTrack.frames[i].swaths[0].swathNumber)
for sdFile in self._insar.interferogramSd:
if not os.path.isfile(sdFile):
os.symlink(os.path.join('../', swathDir, 'spectral_diversity', sdFile), sdFile)
shutil.copy2(os.path.join('../', swathDir, 'spectral_diversity', sdFile+'.vrt'), sdFile+'.vrt')
shutil.copy2(os.path.join('../', swathDir, 'spectral_diversity', sdFile+'.xml'), sdFile+'.xml')
os.chdir('../')
os.chdir('../')
continue
#choose offsets
numberOfFrames = len(referenceTrack.frames)
numberOfSwaths = len(referenceTrack.frames[i].swaths)
if self.swathOffsetMatching:
#no need to do this as the API support 2-d list
#rangeOffsets = (np.array(self._insar.swathRangeOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
rangeOffsets = self._insar.swathRangeOffsetMatchingReference
azimuthOffsets = self._insar.swathAzimuthOffsetMatchingReference
else:
#rangeOffsets = (np.array(self._insar.swathRangeOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
rangeOffsets = self._insar.swathRangeOffsetGeometricalReference
azimuthOffsets = self._insar.swathAzimuthOffsetGeometricalReference
rangeOffsets = rangeOffsets[i]
azimuthOffsets = azimuthOffsets[i]
#list of input files
inputSd = [[], [], []]
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
for k, sdFile in enumerate(self._insar.interferogramSd):
inputSd[k].append(os.path.join('../', swathDir, 'spectral_diversity', sdFile))
#mosaic spectral diversity interferograms
for inputSdList, outputSdFile in zip(inputSd, self._insar.interferogramSd):
swathMosaic(referenceTrack.frames[i], inputSdList, outputSdFile,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1, updateFrame=False, phaseCompensation=True, pcRangeLooks=5, pcAzimuthLooks=5, filt=True, resamplingMethod=1)
for sdFile in self._insar.interferogramSd:
create_xml(sdFile, referenceTrack.frames[i].numberOfSamples, referenceTrack.frames[i].numberOfLines, 'int')
os.chdir('../')
os.chdir('../')
catalog.printToLog(logger, "runSwathMosaic")
self._insar.procDoc.addAllFromCatalog(catalog)
|
[
"os.makedirs",
"isceobj.Alos2Proc.runSwathMosaic.swathMosaic",
"isceobj.Catalog.createCatalog",
"os.path.isfile",
"isceobj.Alos2Proc.Alos2ProcPublic.create_xml",
"isceobj.createImage",
"isceobj.Alos2Proc.runSwathMosaic.swathMosaicParameters",
"os.path.join",
"os.chdir",
"logging.getLogger"
] |
[((297, 353), 'logging.getLogger', 'logging.getLogger', (['"""isce.alos2burstinsar.runSwathMosaic"""'], {}), "('isce.alos2burstinsar.runSwathMosaic')\n", (314, 353), False, 'import logging\n'), ((427, 482), 'isceobj.Catalog.createCatalog', 'isceobj.Catalog.createCatalog', (['self._insar.procDoc.name'], {}), '(self._insar.procDoc.name)\n', (456, 482), False, 'import isceobj\n'), ((770, 788), 'os.chdir', 'os.chdir', (['frameDir'], {}), '(frameDir)\n', (778, 788), False, 'import os\n'), ((827, 864), 'os.makedirs', 'os.makedirs', (['mosaicDir'], {'exist_ok': '(True)'}), '(mosaicDir, exist_ok=True)\n', (838, 864), False, 'import os\n'), ((873, 892), 'os.chdir', 'os.chdir', (['mosaicDir'], {}), '(mosaicDir)\n', (881, 892), False, 'import os\n'), ((6532, 6732), 'isceobj.Alos2Proc.runSwathMosaic.swathMosaic', 'swathMosaic', (['referenceTrack.frames[i]', 'inputAmplitudes', 'self._insar.amplitude', 'rangeOffsets', 'azimuthOffsets', 'self._insar.numberRangeLooks1', 'self._insar.numberAzimuthLooks1'], {'resamplingMethod': '(0)'}), '(referenceTrack.frames[i], inputAmplitudes, self._insar.\n amplitude, rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1,\n self._insar.numberAzimuthLooks1, resamplingMethod=0)\n', (6543, 6732), False, 'from isceobj.Alos2Proc.runSwathMosaic import swathMosaic\n'), ((6776, 7007), 'isceobj.Alos2Proc.runSwathMosaic.swathMosaic', 'swathMosaic', (['referenceTrack.frames[i]', 'inputInterferograms', 'self._insar.interferogram', 'rangeOffsets', 'azimuthOffsets', 'self._insar.numberRangeLooks1', 'self._insar.numberAzimuthLooks1'], {'updateFrame': '(True)', 'resamplingMethod': '(1)'}), '(referenceTrack.frames[i], inputInterferograms, self._insar.\n interferogram, rangeOffsets, azimuthOffsets, self._insar.\n numberRangeLooks1, self._insar.numberAzimuthLooks1, updateFrame=True,\n resamplingMethod=1)\n', (6787, 7007), False, 'from isceobj.Alos2Proc.runSwathMosaic import swathMosaic\n'), ((7016, 7142), 'isceobj.Alos2Proc.Alos2ProcPublic.create_xml', 'create_xml', (['self._insar.amplitude', 'referenceTrack.frames[i].numberOfSamples', 'referenceTrack.frames[i].numberOfLines', '"""amp"""'], {}), "(self._insar.amplitude, referenceTrack.frames[i].numberOfSamples,\n referenceTrack.frames[i].numberOfLines, 'amp')\n", (7026, 7142), False, 'from isceobj.Alos2Proc.Alos2ProcPublic import create_xml\n'), ((7147, 7278), 'isceobj.Alos2Proc.Alos2ProcPublic.create_xml', 'create_xml', (['self._insar.interferogram', 'referenceTrack.frames[i].numberOfSamples', 'referenceTrack.frames[i].numberOfLines', '"""int"""'], {}), "(self._insar.interferogram, referenceTrack.frames[i].\n numberOfSamples, referenceTrack.frames[i].numberOfLines, 'int')\n", (7157, 7278), False, 'from isceobj.Alos2Proc.Alos2ProcPublic import create_xml\n'), ((7617, 7767), 'isceobj.Alos2Proc.runSwathMosaic.swathMosaicParameters', 'swathMosaicParameters', (['secondaryTrack.frames[i]', 'rangeOffsets', 'azimuthOffsets', 'self._insar.numberRangeLooks1', 'self._insar.numberAzimuthLooks1'], {}), '(secondaryTrack.frames[i], rangeOffsets,\n azimuthOffsets, self._insar.numberRangeLooks1, self._insar.\n numberAzimuthLooks1)\n', (7638, 7767), False, 'from isceobj.Alos2Proc.runSwathMosaic import swathMosaicParameters\n'), ((7768, 7783), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (7776, 7783), False, 'import os\n'), ((8013, 8028), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (8021, 8028), False, 'import os\n'), ((8204, 8222), 'os.chdir', 'os.chdir', (['frameDir'], {}), '(frameDir)\n', (8212, 8222), False, 'import os\n'), ((8261, 8298), 'os.makedirs', 'os.makedirs', (['mosaicDir'], {'exist_ok': '(True)'}), '(mosaicDir, exist_ok=True)\n', (8272, 8298), False, 'import os\n'), ((8307, 8326), 'os.chdir', 'os.chdir', (['mosaicDir'], {}), '(mosaicDir)\n', (8315, 8326), False, 'import os\n'), ((11172, 11187), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (11180, 11187), False, 'import os\n'), ((11196, 11211), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (11204, 11211), False, 'import os\n'), ((2763, 2784), 'isceobj.createImage', 'isceobj.createImage', ([], {}), '()\n', (2782, 2784), False, 'import isceobj\n'), ((4543, 4558), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (4551, 4558), False, 'import os\n'), ((4804, 4819), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (4812, 4819), False, 'import os\n'), ((8959, 8974), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (8967, 8974), False, 'import os\n'), ((8987, 9002), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (8995, 9002), False, 'import os\n'), ((10707, 10986), 'isceobj.Alos2Proc.runSwathMosaic.swathMosaic', 'swathMosaic', (['referenceTrack.frames[i]', 'inputSdList', 'outputSdFile', 'rangeOffsets', 'azimuthOffsets', 'self._insar.numberRangeLooks1', 'self._insar.numberAzimuthLooks1'], {'updateFrame': '(False)', 'phaseCompensation': '(True)', 'pcRangeLooks': '(5)', 'pcAzimuthLooks': '(5)', 'filt': '(True)', 'resamplingMethod': '(1)'}), '(referenceTrack.frames[i], inputSdList, outputSdFile,\n rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self.\n _insar.numberAzimuthLooks1, updateFrame=False, phaseCompensation=True,\n pcRangeLooks=5, pcAzimuthLooks=5, filt=True, resamplingMethod=1)\n', (10718, 10986), False, 'from isceobj.Alos2Proc.runSwathMosaic import swathMosaic\n'), ((11055, 11167), 'isceobj.Alos2Proc.Alos2ProcPublic.create_xml', 'create_xml', (['sdFile', 'referenceTrack.frames[i].numberOfSamples', 'referenceTrack.frames[i].numberOfLines', '"""int"""'], {}), "(sdFile, referenceTrack.frames[i].numberOfSamples, referenceTrack\n .frames[i].numberOfLines, 'int')\n", (11065, 11167), False, 'from isceobj.Alos2Proc.Alos2ProcPublic import create_xml\n'), ((1105, 1146), 'os.path.isfile', 'os.path.isfile', (['self._insar.interferogram'], {}), '(self._insar.interferogram)\n', (1119, 1146), False, 'import os\n'), ((1285, 1350), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', "(self._insar.interferogram + '.vrt')"], {}), "('../', swathDir, self._insar.interferogram + '.vrt')\n", (1297, 1350), False, 'import os\n'), ((1409, 1474), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', "(self._insar.interferogram + '.xml')"], {}), "('../', swathDir, self._insar.interferogram + '.xml')\n", (1421, 1474), False, 'import os\n'), ((1527, 1564), 'os.path.isfile', 'os.path.isfile', (['self._insar.amplitude'], {}), '(self._insar.amplitude)\n', (1541, 1564), False, 'import os\n'), ((1695, 1756), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', "(self._insar.amplitude + '.vrt')"], {}), "('../', swathDir, self._insar.amplitude + '.vrt')\n", (1707, 1756), False, 'import os\n'), ((1811, 1872), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', "(self._insar.amplitude + '.xml')"], {}), "('../', swathDir, self._insar.amplitude + '.xml')\n", (1823, 1872), False, 'import os\n'), ((6284, 6340), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', 'self._insar.interferogram'], {}), "('../', swathDir, self._insar.interferogram)\n", (6296, 6340), False, 'import os\n'), ((6377, 6429), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', 'self._insar.amplitude'], {}), "('../', swathDir, self._insar.amplitude)\n", (6389, 6429), False, 'import os\n'), ((1175, 1231), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', 'self._insar.interferogram'], {}), "('../', swathDir, self._insar.interferogram)\n", (1187, 1231), False, 'import os\n'), ((1593, 1645), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', 'self._insar.amplitude'], {}), "('../', swathDir, self._insar.amplitude)\n", (1605, 1645), False, 'import os\n'), ((8598, 8620), 'os.path.isfile', 'os.path.isfile', (['sdFile'], {}), '(sdFile)\n', (8612, 8620), False, 'import os\n'), ((8751, 8819), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', '"""spectral_diversity"""', "(sdFile + '.vrt')"], {}), "('../', swathDir, 'spectral_diversity', sdFile + '.vrt')\n", (8763, 8819), False, 'import os\n'), ((8863, 8931), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', '"""spectral_diversity"""', "(sdFile + '.xml')"], {}), "('../', swathDir, 'spectral_diversity', sdFile + '.xml')\n", (8875, 8931), False, 'import os\n'), ((10499, 10558), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', '"""spectral_diversity"""', 'sdFile'], {}), "('../', swathDir, 'spectral_diversity', sdFile)\n", (10511, 10558), False, 'import os\n'), ((8653, 8712), 'os.path.join', 'os.path.join', (['"""../"""', 'swathDir', '"""spectral_diversity"""', 'sdFile'], {}), "('../', swathDir, 'spectral_diversity', sdFile)\n", (8665, 8712), False, 'import os\n')]
|
import argparse
import json
import sys
def convert(input_json, prefix):
for source_file in input_json.get('source_files', []):
source_file['name'] = prefix + source_file.get('name', '')
parser = argparse.ArgumentParser(
description="Add a prefix path to all CodeClimate coverage files")
parser.add_argument('CC_JSON_FILE', nargs='?', type=argparse.FileType('rb'),
default=sys.stdin)
parser.add_argument('--prefix', required=True)
if __name__ == '__main__':
args = parser.parse_args()
input_json = json.load(args.CC_JSON_FILE)
convert(input_json, args.prefix)
print(json.dumps(input_json)) # noqa - correct behavior
|
[
"json.load",
"argparse.ArgumentParser",
"json.dumps",
"argparse.FileType"
] |
[((211, 306), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Add a prefix path to all CodeClimate coverage files"""'}), "(description=\n 'Add a prefix path to all CodeClimate coverage files')\n", (234, 306), False, 'import argparse\n'), ((547, 575), 'json.load', 'json.load', (['args.CC_JSON_FILE'], {}), '(args.CC_JSON_FILE)\n', (556, 575), False, 'import json\n'), ((359, 382), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (376, 382), False, 'import argparse\n'), ((623, 645), 'json.dumps', 'json.dumps', (['input_json'], {}), '(input_json)\n', (633, 645), False, 'import json\n')]
|
import os
import unittest
import tempfile
from django.conf import settings
from django.db import connection, models
from south.db import db
from south.logger import close_logger
class TestLogger(unittest.TestCase):
"""
Tests if the logging is working reasonably. Some tests ignored if you don't
have write permission to the disk.
"""
def setUp(self):
db.debug = False
self.test_path = tempfile.mkstemp(suffix=".south.log")[1]
def test_db_execute_logging_nofile(self):
"Does logging degrade nicely if SOUTH_DEBUG_ON not set?"
settings.SOUTH_LOGGING_ON = False # this needs to be set to False
# to avoid issues where other tests
# set this to True. settings is shared
# between these tests.
db.create_table("test9", [('email_confirmed', models.BooleanField(default=False))])
def test_db_execute_logging_validfile(self):
"Does logging work when passing in a valid file?"
settings.SOUTH_LOGGING_ON = True
settings.SOUTH_LOGGING_FILE = self.test_path
# Check to see if we can make the logfile
try:
fh = open(self.test_path, "w")
except IOError:
# Permission was denied, ignore the test.
return
else:
fh.close()
# Do an action which logs
db.create_table("test10", [('email_confirmed', models.BooleanField(default=False))])
# Close the logged file
close_logger()
try:
os.remove(self.test_path)
except:
# It's a tempfile, it's not vital we remove it.
pass
def test_db_execute_logging_missingfilename(self):
"Does logging raise an error if there is a missing filename?"
settings.SOUTH_LOGGING_ON = True
settings.SOUTH_LOGGING_FILE = None
self.assertRaises(
IOError,
db.create_table,
"test11",
[('email_confirmed', models.BooleanField(default=False))],
)
|
[
"south.logger.close_logger",
"os.remove",
"django.db.models.BooleanField",
"tempfile.mkstemp"
] |
[((1610, 1624), 'south.logger.close_logger', 'close_logger', ([], {}), '()\n', (1622, 1624), False, 'from south.logger import close_logger\n'), ((430, 467), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".south.log"""'}), "(suffix='.south.log')\n", (446, 467), False, 'import tempfile\n'), ((1650, 1675), 'os.remove', 'os.remove', (['self.test_path'], {}), '(self.test_path)\n', (1659, 1675), False, 'import os\n'), ((955, 989), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (974, 989), False, 'from django.db import connection, models\n'), ((1532, 1566), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1551, 1566), False, 'from django.db import connection, models\n'), ((2111, 2145), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2130, 2145), False, 'from django.db import connection, models\n')]
|
# -*- coding: utf-8 -*-
import re
import codecs
import jieba
import pickle
import string
import warnings
import logging
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from gensim import corpora, models
"""
todo lda主题模型
"""
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(lineno)d: %(message)s')
logger = logging.getLogger(__name__)
stopwords = codecs.open('/data/rec/rec_lda/model/stop_words_cn.txt', 'r', encoding='utf8').readlines()
stopwords = [w.strip() for w in stopwords]
min_length = 100
train_set = []
doc_mapping = {}
def prepare_data(documents):
global train_set, doc_mapping
logging.debug('Start tokenzing the corpora')
punct = re.compile('[%s]' % re.escape(string.punctuation))
# doc_mapping = OrderedDict()
def gen_train_set(patent):
# print("send Series:" + str(patent))
if 'name' in patent.keys():
title = str(patent['name'])
text = str(patent['desc'])
else:
text = ''
# Skip document length < min_length
if len(text) >= min_length:
text = punct.sub("", text) # Remove all punctuations
tokens = jieba.cut(text) # Tokenize the whole text
# print(tokens)
# Lemmatize every word and add to tokens list if the word is not in stopword
train_set.append([word for word in tokens if word not in stopwords])
# Build doc-mapping
# k = str(patent['id']) if 'id' in patent.keys() is True else title
doc_mapping[patent['id']] = title
# tqdm.pandas(tqdm_notebook)
documents.apply(gen_train_set, axis=1)
logging.debug('Finished tokenzing the copora, train_set:' + str(len(train_set)))
return len(train_set)
class LDAModel:
def __init__(self, topic_num=900, min_doc=50, iter_num=100, pass_num=100):
# Built-in dictionary for word-parser, and path to corpora
self.stopword = stopwords
warnings.filterwarnings("ignore")
# Hyperparameters for training model
# Minimun length of single document
self.min_length = min_doc
# Num_topics in LDA
self.num_topics = topic_num
# Filter out tokens that appear in less than `no_below` documents (absolute number)
self.no_below_this_number = 5
# Filter out tokens that appear in more than `no_above` documents (fraction of total corpus size, *not* absolute number).
self.no_above_fraction_of_doc = 0.33
# Remove topic which weights less than this number
self.remove_topic_so_less = 0.05
# Number of iterations in training LDA model, the less the documents in total, the more the iterations for LDA model to converge
self.num_of_iterations = iter_num
# Number of passes in the model
self.passes = pass_num
# Print all hyperparameters
parameters = {}
parameters['min_length'] = self.min_length
parameters['num_topics'] = self.num_topics
parameters['no_below_this_number'] = self.no_below_this_number
parameters['no_above_fraction_of_doc'] = self.no_above_fraction_of_doc
parameters['remove_topic_so_less'] = self.remove_topic_so_less
parameters['num_of_iterations'] = self.num_of_iterations
parameters['passes'] = self.passes
for k in parameters:
logging.debug("Parameter for {0} is {1}".format(k, parameters[k]))
logging.debug('Finished initializing....')
def __convertListToDict(self, anylist):
'''
This code snippet could be easily done by one-liner dict comprehension:
{key:value for key,value in anylist}
'''
convertedDict = {}
for pair in anylist:
topic = pair[0]
weight = pair[1]
convertedDict[topic] = weight
return convertedDict
def __savePickleFile(self, fileName, objectName):
'''
Serialize objects into pickle files
'''
fileName = '/data/rec/rec_lda/model/' + fileName + '.pickle'
mappingFile = open(fileName, 'wb')
pickle.dump(objectName, mappingFile)
mappingFile.close()
def saveModel(self, lda, doc_mapping, dic, corpus, tail=''):
'''
Saving models and maps for later use
:param lda: the LDA model
:param doc_mapping: index-document mapping
:param corpus: the whole corpus in list[list[tokens]]
:param tail:
'''
logging.debug('Start saving LDA models & maps....')
# Save model output
save_path = '/data/rec/rec_lda/model/final_ldamodel' + tail
lda.save(save_path)
logging.debug('Model saved at {0}'.format(save_path))
# Save dict
save_path = '/data/rec/rec_lda/model/dic.dict' + tail
dic.save(save_path)
# Save the whole corpus
save_path = 'corpus' + tail
self.__savePickleFile(save_path, corpus)
logging.debug('Corpus saved at {0}'.format(save_path))
# Save index to document mapping
save_path = 'documentmapping' + tail
self.__savePickleFile(save_path, doc_mapping)
logging.debug('Document mapping saved at {0}'.format(save_path))
# Save index to link mapping
# save_path = 'linkmapping'
# self.__savePickleFile(save_path, link_mapping)
# print('Link mapping saved at {0}'.format(save_path))
# Save doc to topic matrix
doc_topic_matrix = {}
logging.debug('CORPUS: {}'.format(len(corpus)))
for index, p_id in enumerate(doc_mapping.keys()):
dense_vector = {}
vector = self.__convertListToDict(lda[corpus[index]])
# remove topic that is so irrelevant
for topic in vector:
if vector[topic] > self.remove_topic_so_less:
dense_vector[topic] = vector[topic]
doc_topic_matrix[p_id] = dense_vector
save_path = 'doc_topic_matrix' + tail
self.__savePickleFile(save_path, doc_topic_matrix)
logging.debug('doc to topic mapping saved at {0}'.format(save_path))
logging.debug('Finished saving LDA models & maps....')
def trainModel(self, debug=False):
'''
Train a LDA model, inclusive of 4 steps:
1. Parse the whole corpora into unigram token collections and document mapping (for later use)
2. Filter tokens which are not common (no_below_this_number), and too common (no_above_fraction_of_doc)
3. Indexing the token collections and do TF-IDF transformation
4. Call gensim.models.LdaModel and generate topic distributions of the corpora
'''
global train_set, doc_mapping
logging.debug('Start preparing unigram tokens....')
# Start of preparing list of documents and tokens [[words_in_1st_doc],[words_in_2nd_doc]....], which comprise Bag-Of-Words (BOW)
# Get document_count, tokens, and document-index mapping from the corpora
# Put the training data into gensim.corpora for later use
dic = corpora.Dictionary(train_set)
denominator = len(dic)
# Filtering infrequent words & common stopwords, thus reducing the dimension of terms (which prevents curse of dimensionality)
dic.filter_extremes(no_below=self.no_below_this_number, no_above=self.no_above_fraction_of_doc)
nominator = len(dic)
corpus = [dic.doc2bow(text) for text in train_set] # transform every token into BOW
if debug:
# print('There are {} documents in the pool'.format(len(documents)))
logging.debug("In the corpus there are " + str(denominator) + " raw tokens")
if denominator > 0:
logging.debug("After filtering, in the corpus there are " + str(nominator) + " unique tokens, reduced " + str(
1 - (nominator / denominator)) + "%")
logging.debug('Finished preparing unigram tokens....')
# END
logger.debug('Start training LDA model....')
# Implementing TF-IDF as a vector for each document, and train LDA model on top of that
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
logger.debug('use TF-IDF filter corpus.')
# 设置 eval_every=1 每次迭代均计算困惑度
lda = models.LdaModel(corpus_tfidf, id2word=dic, num_topics=self.num_topics, eval_every=1,
iterations=self.num_of_iterations, passes=self.passes)
corpus_lda = lda[corpus_tfidf]
# 直接应用 lda
# lda = models.LdaModel(corpus, id2word=dic, num_topics=self.num_topics, iterations=self.num_of_iterations, passes=self.passes)
# corpus_lda = lda[corpus]
# Once done training, print all the topics and related words
if debug:
logging.debug('Finished training LDA model.......Here is the list of all topics & their most frequent words')
for i in range(self.num_topics):
logging.debug('Topic {} : {}'.format(str(i), lda.print_topic(i)))
# Exhibit perplexity of current model under specific topic hyperparameter : k. The lower the better
logging.debug('===============================')
perplexity = lda.bound(corpus_lda)
logging.debug('Model perplexity : ' + str(perplexity) + ' when topic k = ' + str(self.num_topics))
return lda, doc_mapping, dic, corpus_tfidf, perplexity
def load(self, tail=''):
# current_working_dir = '/data/rec/rec_lda/'
# os.chdir(current_working_dir)
lda_model_path = "/data/rec/rec_lda/model/final_ldamodel" + tail
self.tail = tail
self.lda = LdaModel.load(lda_model_path)
self.dic = Dictionary.load('/data/rec/rec_lda/model/dic.dict' + tail)
self.corpus = loadPickleFile('corpus' + tail)
self.max_token_index = self.__get_max_token_index()
self.no_of_recommendation = 10
self.omit_topic_below_this_fraction = 0.1
self.mapping = self.load_mapping()
self.doc_topic_matrix = loadPickleFile('doc_topic_matrix' + tail)
def load_mapping(self):
path_mappingfile = '/data/rec/rec_lda/model/documentmapping' + self.tail + '.pickle'
mappingFile = open(path_mappingfile, 'rb')
mapping = pickle.load(mappingFile)
mappingFile.close()
return mapping
def __get_max_token_index(self):
token_index = 0
for doc in self.corpus:
for pair in doc:
if int(pair[0]) > token_index:
token_index = int(pair[0])
return token_index
def constructDocToTopicMatrix(self, lda, corpus):
'''
This code snippet could be easily done by one-liner dict comprehension:
{key:value for key,value in anylist}
'''
doc_topic_matrix = {}
count = 0
for doc in corpus:
if len(doc) > 0:
count = count + 1
vector = self.__convertListToDict(lda[doc])
doc_topic_matrix[count] = vector
return doc_topic_matrix
def constructDocDictToTopicMatrix(self, weight_dict, verbose=False):
user_topic_vector = {}
length = len(weight_dict)
try:
for seen_topic, weight in weight_dict.items():
# weight = user_dict[seen_doc][seen_topic]
if seen_topic in user_topic_vector:
current_weight = user_topic_vector[seen_topic]
current_weight = current_weight + weight / length
user_topic_vector[seen_topic] = current_weight
else:
user_topic_vector[seen_topic] = weight / length
except Exception as e:
logger.debug(e)
logger.debug('Warning: wrong value of weight_dict:' + str(weight_dict))
lightweight_user_topic_vector = {}
for k, v in user_topic_vector.items():
if v > self.omit_topic_below_this_fraction / 2:
lightweight_user_topic_vector[k] = v
denominator = sum(lightweight_user_topic_vector.values())
for topic in lightweight_user_topic_vector:
lightweight_user_topic_vector[topic] = lightweight_user_topic_vector[topic] / denominator
if verbose:
logger.debug('Topic distribution for current user : {0}'.format(lightweight_user_topic_vector))
logger.debug('Normalized topic distribution for current user : {0}'.format(lightweight_user_topic_vector))
return lightweight_user_topic_vector
def constructUserToTopicMatrix(self, user_dict, verbose=False):
""" Construct user-topic vector(dictionary)
args:
user_dict: a dictionary of user-doc and doc-topic
"""
user_topic_vector = {}
length = len(user_dict)
for seen_doc, seen_topics in user_dict.items():
try:
for seen_topic, weight in seen_topics.items():
# weight = user_dict[seen_doc][seen_topic]
if seen_topic in user_topic_vector:
current_weight = user_topic_vector[seen_topic]
current_weight = current_weight + weight / length
user_topic_vector[seen_topic] = current_weight
else:
user_topic_vector[seen_topic] = weight / length
except Exception as e:
logger.debug(e)
logger.debug('Warning: wrong value of seen_topics:' + str(seen_topics))
# Remove topic less than weight : omit_topic_below_this_fraction/2
lightweight_user_topic_vector = {}
for k, v in user_topic_vector.items():
if v > self.omit_topic_below_this_fraction / 2:
lightweight_user_topic_vector[k] = v
denominator = sum(lightweight_user_topic_vector.values())
for topic in lightweight_user_topic_vector:
lightweight_user_topic_vector[topic] = lightweight_user_topic_vector[topic] / denominator
if verbose:
logger.debug('Topic distribution for current user : {0}'.format(lightweight_user_topic_vector))
logger.debug('Normalized topic distribution for current user : {0}'.format(lightweight_user_topic_vector))
return lightweight_user_topic_vector
def getLink(self, sort, no_of_recommendation):
for i in list(sort.keys())[:no_of_recommendation]:
logger.debug('Recommend document: {0} '.format(self.mapping[i]))
def gen_corpus(self, doc_text):
tokens = jieba.cut(doc_text)
# user_corpus = [self.dic.doc2bow(text) for text in [tokens]]
user_corpus = []
for text in [tokens]:
try:
bow = self.dic.doc2bow(text)
except Exception as e:
logger.warning(e)
try:
bow = self.dic.doc2bow(list(text))
except:
bow = None
# logger.debug('Warning: doc2bow error for text:' + str(text))
if bow is not None:
user_corpus.append(bow)
ret_corpus = None
if len(user_corpus) > 0:
ret_corpus = user_corpus[0]
return ret_corpus
def gen_doc_matrix(self, text):
text_corpus = self.gen_corpus(text)
lightweight_user_topic_vector = {}
if text_corpus is not None:
# 删除语料库中未出现的关键词
t_key = []
for pair in text_corpus:
token_index = int(pair[0])
# logger.debug(token_index)
if token_index <= self.max_token_index:
t_key.append(pair)
text_score = self.lda[t_key]
text_dict = self.__convertListToDict(text_score)
user_topic_vector = {}
for seen_topic in text_dict:
weight = text_dict[seen_topic]
if seen_topic in user_topic_vector:
current_weight = user_topic_vector[seen_topic]
current_weight = current_weight + weight
user_topic_vector[seen_topic] = current_weight
else:
user_topic_vector[seen_topic] = weight
for k, v in user_topic_vector.items():
if v > 0.05:
lightweight_user_topic_vector[k] = v
denominator = sum(lightweight_user_topic_vector.values())
for topic in lightweight_user_topic_vector:
lightweight_user_topic_vector[topic] = lightweight_user_topic_vector[topic] / denominator
return lightweight_user_topic_vector
def predict_matrix(self, user_matrix, prob=0.7):
recommend_dict = {}
# Pearson correlation appears to be the most precise 'distance' metric in this case
for doc in self.doc_topic_matrix:
# sim = cossim(user_topic_matrix, self.doc_topic_matrix[doc]) # cosine similarity
# sim = KLDivergenceSim(user_topic_matrix, self.doc_topic_matrix[doc], self.lda.num_topics) # KLD similarity
sim = pearson_correlation(user_matrix, self.doc_topic_matrix[doc], self.lda.num_topics)
if sim > prob: # 0.7 is arbitrary, subject to developer's judge
recommend_dict[doc] = sim
# sort the dict descending by similarity
sort = getOrderedDict(recommend_dict)
return sort
def predict_text(self, text, prob=0.7):
lightweight_user_topic_vector = self.gen_doc_matrix(text)
sort = self.predict_matrix(lightweight_user_topic_vector, prob)
return sort
def predict_doc(self, doc_dict, verbose=False, prob=0.7):
logger.debug('Doc dict: {}'.format(doc_dict))
one_doc_topic_matrix = self.constructDocDictToTopicMatrix(doc_dict, verbose)
recommend_dict = {}
# Pearson correlation appears to be the most precise 'distance' metric in this case
for doc in self.doc_topic_matrix:
# sim = cossim(user_topic_matrix, self.doc_topic_matrix[doc]) # cosine similarity
# sim = KLDivergenceSim(user_topic_matrix, self.doc_topic_matrix[doc], self.lda.num_topics) # KLD similarity
sim = pearson_correlation(one_doc_topic_matrix, self.doc_topic_matrix[doc], self.lda.num_topics)
if sim > prob and doc not in doc_dict.keys(): # 0.7 is arbitrary, subject to developer's judge
if verbose:
logger.debug('Recommend document {0} of similarity : {1}'.format(doc, sim))
recommend_dict[doc] = sim
# sort the dict descending by similarity
sort = getOrderedDict(recommend_dict)
#recommend_str = str(list(sort.keys())[:self.no_of_recommendation]).replace('[', '').replace(']', '')
if verbose:
for title in doc_dict:
logger.debug('You viewed : {0}'.format(self.mapping[title]))
self.getLink(sort, self.no_of_recommendation)
return sort
def predict(self, user_dict, verbose=False, prob=0.7):
'''
Get recommendations from the user_dict which describes the topic distribution attibutes to a user/item
If verbose = True, return the result in a verbose way.
'''
logger.debug('User dict: {}'.format(user_dict))
user_topic_matrix = self.constructUserToTopicMatrix(user_dict, verbose)
recommend_dict = {}
# Pearson correlation appears to be the most precise 'distance' metric in this case
for doc in self.doc_topic_matrix:
# sim = cossim(user_topic_matrix, self.doc_topic_matrix[doc]) # cosine similarity
# sim = KLDivergenceSim(user_topic_matrix, self.doc_topic_matrix[doc], self.lda.num_topics) # KLD similarity
sim = pearson_correlation(user_topic_matrix, self.doc_topic_matrix[doc], self.lda.num_topics)
if sim > prob and doc not in user_dict.keys(): # 0.7 is arbitrary, subject to developer's judge
if verbose:
logger.debug('Recommend document {0} of similarity : {1}'.format(doc, sim))
recommend_dict[doc] = sim
# sort the dict descending by similarity
sort = getOrderedDict(recommend_dict)
#recommend_str = str(list(sort.keys())[:self.no_of_recommendation]).replace('[', '').replace(']', '')
if verbose:
for title in user_dict:
logger.debug('You viewed : {0}'.format(self.mapping[title]))
self.getLink(sort, self.no_of_recommendation)
return sort
if __name__ == '__main__':
parser = ArgumentParser(description="LDAModel", formatter_class=RawDescriptionHelpFormatter)
# parser.add_argument("-i", "--index", default=True, help="Mapping by index")
parser.add_argument("-d", "--doc", default=True, help="Target documents")
# doc is pd.DataFrame, construct:
# id name desc
args = parser.parse_args()
prepare_data(args.doc)
LDAmodel = LDAModel() # instantiate the LDAModel class
lda, doc_mapping, corpus = LDAmodel.trainModel() # train a LDA model using the assgined corpora
LDAmodel.saveModel(lda, doc_mapping, corpus) # save model for recommendations use
|
[
"pickle.dump",
"logging.debug",
"codecs.open",
"argparse.ArgumentParser",
"logging.basicConfig",
"warnings.filterwarnings",
"jieba.cut",
"gensim.models.TfidfModel",
"gensim.corpora.Dictionary",
"re.escape",
"gensim.models.LdaModel",
"pickle.load",
"logging.getLogger"
] |
[((243, 363), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(lineno)d: %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(lineno)d: %(message)s')\n", (262, 363), False, 'import logging\n'), ((368, 395), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (385, 395), False, 'import logging\n'), ((660, 704), 'logging.debug', 'logging.debug', (['"""Start tokenzing the corpora"""'], {}), "('Start tokenzing the corpora')\n", (673, 704), False, 'import logging\n'), ((20678, 20766), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""LDAModel"""', 'formatter_class': 'RawDescriptionHelpFormatter'}), "(description='LDAModel', formatter_class=\n RawDescriptionHelpFormatter)\n", (20692, 20766), False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((408, 486), 'codecs.open', 'codecs.open', (['"""/data/rec/rec_lda/model/stop_words_cn.txt"""', '"""r"""'], {'encoding': '"""utf8"""'}), "('/data/rec/rec_lda/model/stop_words_cn.txt', 'r', encoding='utf8')\n", (419, 486), False, 'import codecs\n'), ((1991, 2024), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2014, 2024), False, 'import warnings\n'), ((3475, 3517), 'logging.debug', 'logging.debug', (['"""Finished initializing...."""'], {}), "('Finished initializing....')\n", (3488, 3517), False, 'import logging\n'), ((4139, 4175), 'pickle.dump', 'pickle.dump', (['objectName', 'mappingFile'], {}), '(objectName, mappingFile)\n', (4150, 4175), False, 'import pickle\n'), ((4515, 4566), 'logging.debug', 'logging.debug', (['"""Start saving LDA models & maps...."""'], {}), "('Start saving LDA models & maps....')\n", (4528, 4566), False, 'import logging\n'), ((6166, 6220), 'logging.debug', 'logging.debug', (['"""Finished saving LDA models & maps...."""'], {}), "('Finished saving LDA models & maps....')\n", (6179, 6220), False, 'import logging\n'), ((6753, 6804), 'logging.debug', 'logging.debug', (['"""Start preparing unigram tokens...."""'], {}), "('Start preparing unigram tokens....')\n", (6766, 6804), False, 'import logging\n'), ((7105, 7134), 'gensim.corpora.Dictionary', 'corpora.Dictionary', (['train_set'], {}), '(train_set)\n', (7123, 7134), False, 'from gensim import corpora, models\n'), ((8178, 8203), 'gensim.models.TfidfModel', 'models.TfidfModel', (['corpus'], {}), '(corpus)\n', (8195, 8203), False, 'from gensim import corpora, models\n'), ((8342, 8485), 'gensim.models.LdaModel', 'models.LdaModel', (['corpus_tfidf'], {'id2word': 'dic', 'num_topics': 'self.num_topics', 'eval_every': '(1)', 'iterations': 'self.num_of_iterations', 'passes': 'self.passes'}), '(corpus_tfidf, id2word=dic, num_topics=self.num_topics,\n eval_every=1, iterations=self.num_of_iterations, passes=self.passes)\n', (8357, 8485), False, 'from gensim import corpora, models\n'), ((10323, 10347), 'pickle.load', 'pickle.load', (['mappingFile'], {}), '(mappingFile)\n', (10334, 10347), False, 'import pickle\n'), ((14636, 14655), 'jieba.cut', 'jieba.cut', (['doc_text'], {}), '(doc_text)\n', (14645, 14655), False, 'import jieba\n'), ((737, 766), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (746, 766), False, 'import re\n'), ((1198, 1213), 'jieba.cut', 'jieba.cut', (['text'], {}), '(text)\n', (1207, 1213), False, 'import jieba\n'), ((7944, 7998), 'logging.debug', 'logging.debug', (['"""Finished preparing unigram tokens...."""'], {}), "('Finished preparing unigram tokens....')\n", (7957, 7998), False, 'import logging\n'), ((8840, 8959), 'logging.debug', 'logging.debug', (['"""Finished training LDA model.......Here is the list of all topics & their most frequent words"""'], {}), "(\n 'Finished training LDA model.......Here is the list of all topics & their most frequent words'\n )\n", (8853, 8959), False, 'import logging\n'), ((9201, 9249), 'logging.debug', 'logging.debug', (['"""==============================="""'], {}), "('===============================')\n", (9214, 9249), False, 'import logging\n')]
|
#!/usr/bin/env python3
# Copyright 2019 The Kapitan Authors
# SPDX-FileCopyrightText: 2020 The Kapitan Authors <<EMAIL>>
#
# SPDX-License-Identifier: Apache-2.0
"jinja2 tests"
import base64
import unittest
import tempfile
import time
from kapitan.utils import render_jinja2_file
from kapitan.resources import inventory
from kapitan.inputs.jinja2_filters import base64_encode
from kapitan.refs.base import RefController, Revealer
from kapitan.refs.base64 import Base64Ref
from kapitan import cached
from collections import namedtuple
class Jinja2FiltersTest(unittest.TestCase):
def test_sha256(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|sha256 }}".encode("UTF-8"))
f.seek(0)
context = {"text": "this and that"}
output = "e863c1ac42619a2b429a08775a6acd89ff4c2c6b8dae12e3461a5fa63b2f92f5"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_base64_encode(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|b64encode }}".encode("UTF-8"))
f.seek(0)
context = {"text": "this and that"}
output = "dGhpcyBhbmQgdGhhdA=="
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_base64_decode(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|b64decode }}".encode("UTF-8"))
f.seek(0)
context = {"text": "dGhpcyBhbmQgdGhhdA=="}
output = "this and that"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_toml(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|toml }}".encode("UTF-8"))
f.seek(0)
context = {"text": {"foo": ["this", "that"]}}
output = 'foo = [ "this", "that",]\n'
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_yaml(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|yaml }}".encode("UTF-8"))
f.seek(0)
context = {"text": ["this", "that"]}
output = "- this\n- that\n"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_fileglob(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|fileglob }}".encode("UTF-8"))
f.seek(0)
context = {"text": "./tests/*jinja2.py"}
output = "['./tests/test_jinja2.py']"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_bool(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|bool }}".encode("UTF-8"))
f.seek(0)
context = {"text": "yes"}
output = "True"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_to_datetime(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|to_datetime }}".encode("UTF-8"))
f.seek(0)
context = {"text": "2019-03-07 13:37:00"}
output = "2019-03-07 13:37:00"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_strftime(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|strftime }}".encode("UTF-8"))
f.seek(0)
format = "%a, %d %b %Y %H:%M"
context = {"text": format}
output = time.strftime(format)
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_regex_replace(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|regex_replace(pattern='world', replacement='kapitan') }}".encode("UTF-8"))
f.seek(0)
context = {"text": "hello world"}
output = "hello kapitan"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_regex_escape(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|regex_escape }}".encode("UTF-8"))
f.seek(0)
context = {"text": "+s[a-z].*"}
output = "\\+s\\[a\\-z\\]\\.\\*"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_regex_search(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|regex_search('world.*') }}".encode("UTF-8"))
f.seek(0)
context = {"text": "hello world"}
output = "world"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_regex_findall(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|regex_findall('world.*') }}".encode("UTF-8"))
f.seek(0)
context = {"text": "hello world"}
output = "['world']"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_ternary(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|ternary('yes', 'no') }}".encode("UTF-8"))
f.seek(0)
context = {"text": "kapitan == kapitan"}
output = "yes"
self.assertEqual(render_jinja2_file(f.name, context), output)
def test_shuffle(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ text|shuffle }}".encode("UTF-8"))
f.seek(0)
array = [1, 2, 3, 4, 5]
context = {"text": array}
self.assertNotEqual(render_jinja2_file(f.name, context), array)
def test_reveal_maybe_b64encode_tag(self):
"""
creates ?{base64:some_value} and runs reveal_maybe|b64encode jinja2 filters
"""
with tempfile.NamedTemporaryFile() as f:
f.write("{{ my_ref_tag_var|reveal_maybe|b64encode }}".encode("UTF-8"))
f.seek(0)
# new argparse namespace with --reveal and --refs-path values
namespace = namedtuple("Namespace", [])
namespace.reveal = True
namespace.refs_path = tempfile.mkdtemp()
# reveal_maybe uses cached, so inject namespace
cached.args["compile"] = namespace
cached.ref_controller_obj = RefController(cached.args["compile"].refs_path)
cached.revealer_obj = Revealer(cached.ref_controller_obj)
ref_tag = "?{base64:some_value}"
ref_value = b"sitar_rock!"
cached.ref_controller_obj[ref_tag] = Base64Ref(ref_value)
context = {"my_ref_tag_var": ref_tag}
ref_value_b64 = base64.b64encode(ref_value).decode()
self.assertEqual(render_jinja2_file(f.name, context), ref_value_b64)
def test_reveal_maybe_tag_no_reveal_flag(self):
"""
creates ?{base64:some_value} and runs reveal_maybe jinja2 filters without --reveal flag
"""
with tempfile.NamedTemporaryFile() as f:
f.write("{{ my_ref_tag_var|reveal_maybe }}".encode("UTF-8"))
f.seek(0)
# new argparse namespace with --reveal and --refs-path values
namespace = namedtuple("Namespace", [])
namespace.reveal = False
namespace.refs_path = tempfile.mkdtemp()
# reveal_maybe uses cached, so inject namespace
cached.args["compile"] = namespace
cached.ref_controller_obj = RefController(cached.args["compile"].refs_path)
cached.revealer_obj = Revealer(cached.ref_controller_obj)
ref_tag = "?{base64:some_value}"
ref_value = b"sitar_rock!"
cached.ref_controller_obj[ref_tag] = Base64Ref(ref_value)
context = {"my_ref_tag_var": ref_tag}
self.assertEqual(render_jinja2_file(f.name, context), "?{base64:some_value}")
def test_reveal_maybe_no_tag(self):
"""
runs reveal_maybe jinja2 filter on data without ref tags
"""
with tempfile.NamedTemporaryFile() as f:
f.write("{{ my_var|reveal_maybe }}".encode("UTF-8"))
f.seek(0)
# new argparse namespace with --reveal and --refs-path values
namespace = namedtuple("Namespace", [])
namespace.reveal = True
namespace.refs_path = tempfile.mkdtemp()
# reveal_maybe uses cached, so inject namespace
cached.args["compile"] = namespace
cached.ref_controller_obj = RefController(cached.args["compile"].refs_path)
cached.revealer_obj = Revealer(cached.ref_controller_obj)
var_value = "heavy_rock!"
context = {"my_var": var_value}
self.assertEqual(render_jinja2_file(f.name, context), var_value)
class Jinja2ContextVars(unittest.TestCase):
def test_inventory_context(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ inventory.parameters.cluster.name }}".encode("UTF-8"))
cluster_name = "minikube"
target_name = "minikube-es"
inv = inventory(["examples/kubernetes"], target_name)
context = {"inventory": inv}
f.seek(0)
self.assertEqual(render_jinja2_file(f.name, context), cluster_name)
def test_inventory_global_context(self):
with tempfile.NamedTemporaryFile() as f:
target_name = "minikube-es"
f.write(
'{{ inventory_global["%s"].parameters.cluster.name }}'.encode("UTF-8")
% target_name.encode("UTF-8")
)
cluster_name = "minikube"
inv_global = inventory(["examples/kubernetes"], None)
context = {"inventory_global": inv_global}
f.seek(0)
self.assertEqual(render_jinja2_file(f.name, context), cluster_name)
class Jinja2ExternalFilterTest(unittest.TestCase):
def test_custom_filter_jinja2(self):
with tempfile.NamedTemporaryFile() as f:
f.write("{{ inventory.parameters.cluster.name | custom_jinja2_filter }}".encode("UTF-8"))
cluster_name = "minikube"
target_name = "minikube-es"
inv = inventory(["examples/kubernetes"], target_name)
context = {"inventory": inv}
f.seek(0)
actual_output = render_jinja2_file(
f.name, context, "./examples/kubernetes/lib/custom_jinja2_filter.py"
)
expected_output = base64_encode(cluster_name)
self.assertEqual(actual_output, expected_output)
|
[
"tempfile.NamedTemporaryFile",
"kapitan.inputs.jinja2_filters.base64_encode",
"kapitan.utils.render_jinja2_file",
"kapitan.refs.base.RefController",
"time.strftime",
"tempfile.mkdtemp",
"base64.b64encode",
"collections.namedtuple",
"kapitan.resources.inventory",
"kapitan.refs.base64.Base64Ref",
"kapitan.refs.base.Revealer"
] |
[((622, 651), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (649, 651), False, 'import tempfile\n'), ((995, 1024), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1022, 1024), False, 'import tempfile\n'), ((1327, 1356), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1354, 1356), False, 'import tempfile\n'), ((1650, 1679), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1677, 1679), False, 'import tempfile\n'), ((1984, 2013), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2011, 2013), False, 'import tempfile\n'), ((2303, 2332), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2330, 2332), False, 'import tempfile\n'), ((2636, 2665), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2663, 2665), False, 'import tempfile\n'), ((2935, 2964), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2962, 2964), False, 'import tempfile\n'), ((3269, 3298), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3296, 3298), False, 'import tempfile\n'), ((3488, 3509), 'time.strftime', 'time.strftime', (['format'], {}), '(format)\n', (3501, 3509), False, 'import time\n'), ((3632, 3661), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3659, 3661), False, 'import tempfile\n'), ((3998, 4027), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4025, 4027), False, 'import tempfile\n'), ((4329, 4358), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4356, 4358), False, 'import tempfile\n'), ((4658, 4687), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4685, 4687), False, 'import tempfile\n'), ((4986, 5015), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (5013, 5015), False, 'import tempfile\n'), ((5311, 5340), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (5338, 5340), False, 'import tempfile\n'), ((5746, 5775), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (5773, 5775), False, 'import tempfile\n'), ((5986, 6013), 'collections.namedtuple', 'namedtuple', (['"""Namespace"""', '[]'], {}), "('Namespace', [])\n", (5996, 6013), False, 'from collections import namedtuple\n'), ((6084, 6102), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (6100, 6102), False, 'import tempfile\n'), ((6251, 6298), 'kapitan.refs.base.RefController', 'RefController', (["cached.args['compile'].refs_path"], {}), "(cached.args['compile'].refs_path)\n", (6264, 6298), False, 'from kapitan.refs.base import RefController, Revealer\n'), ((6333, 6368), 'kapitan.refs.base.Revealer', 'Revealer', (['cached.ref_controller_obj'], {}), '(cached.ref_controller_obj)\n', (6341, 6368), False, 'from kapitan.refs.base import RefController, Revealer\n'), ((6503, 6523), 'kapitan.refs.base64.Base64Ref', 'Base64Ref', (['ref_value'], {}), '(ref_value)\n', (6512, 6523), False, 'from kapitan.refs.base64 import Base64Ref\n'), ((6906, 6935), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (6933, 6935), False, 'import tempfile\n'), ((7136, 7163), 'collections.namedtuple', 'namedtuple', (['"""Namespace"""', '[]'], {}), "('Namespace', [])\n", (7146, 7163), False, 'from collections import namedtuple\n'), ((7235, 7253), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (7251, 7253), False, 'import tempfile\n'), ((7402, 7449), 'kapitan.refs.base.RefController', 'RefController', (["cached.args['compile'].refs_path"], {}), "(cached.args['compile'].refs_path)\n", (7415, 7449), False, 'from kapitan.refs.base import RefController, Revealer\n'), ((7484, 7519), 'kapitan.refs.base.Revealer', 'Revealer', (['cached.ref_controller_obj'], {}), '(cached.ref_controller_obj)\n', (7492, 7519), False, 'from kapitan.refs.base import RefController, Revealer\n'), ((7654, 7674), 'kapitan.refs.base64.Base64Ref', 'Base64Ref', (['ref_value'], {}), '(ref_value)\n', (7663, 7674), False, 'from kapitan.refs.base64 import Base64Ref\n'), ((7958, 7987), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (7985, 7987), False, 'import tempfile\n'), ((8180, 8207), 'collections.namedtuple', 'namedtuple', (['"""Namespace"""', '[]'], {}), "('Namespace', [])\n", (8190, 8207), False, 'from collections import namedtuple\n'), ((8278, 8296), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (8294, 8296), False, 'import tempfile\n'), ((8445, 8492), 'kapitan.refs.base.RefController', 'RefController', (["cached.args['compile'].refs_path"], {}), "(cached.args['compile'].refs_path)\n", (8458, 8492), False, 'from kapitan.refs.base import RefController, Revealer\n'), ((8527, 8562), 'kapitan.refs.base.Revealer', 'Revealer', (['cached.ref_controller_obj'], {}), '(cached.ref_controller_obj)\n', (8535, 8562), False, 'from kapitan.refs.base import RefController, Revealer\n'), ((8820, 8849), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (8847, 8849), False, 'import tempfile\n'), ((9031, 9078), 'kapitan.resources.inventory', 'inventory', (["['examples/kubernetes']", 'target_name'], {}), "(['examples/kubernetes'], target_name)\n", (9040, 9078), False, 'from kapitan.resources import inventory\n'), ((9281, 9310), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (9308, 9310), False, 'import tempfile\n'), ((9588, 9628), 'kapitan.resources.inventory', 'inventory', (["['examples/kubernetes']", 'None'], {}), "(['examples/kubernetes'], None)\n", (9597, 9628), False, 'from kapitan.resources import inventory\n'), ((9893, 9922), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (9920, 9922), False, 'import tempfile\n'), ((10127, 10174), 'kapitan.resources.inventory', 'inventory', (["['examples/kubernetes']", 'target_name'], {}), "(['examples/kubernetes'], target_name)\n", (10136, 10174), False, 'from kapitan.resources import inventory\n'), ((10266, 10358), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context', '"""./examples/kubernetes/lib/custom_jinja2_filter.py"""'], {}), "(f.name, context,\n './examples/kubernetes/lib/custom_jinja2_filter.py')\n", (10284, 10358), False, 'from kapitan.utils import render_jinja2_file\n'), ((10415, 10442), 'kapitan.inputs.jinja2_filters.base64_encode', 'base64_encode', (['cluster_name'], {}), '(cluster_name)\n', (10428, 10442), False, 'from kapitan.inputs.jinja2_filters import base64_encode\n'), ((902, 937), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (920, 937), False, 'from kapitan.utils import render_jinja2_file\n'), ((1234, 1269), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (1252, 1269), False, 'from kapitan.utils import render_jinja2_file\n'), ((1566, 1601), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (1584, 1601), False, 'from kapitan.utils import render_jinja2_file\n'), ((1900, 1935), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (1918, 1935), False, 'from kapitan.utils import render_jinja2_file\n'), ((2215, 2250), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (2233, 2250), False, 'from kapitan.utils import render_jinja2_file\n'), ((2552, 2587), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (2570, 2587), False, 'from kapitan.utils import render_jinja2_file\n'), ((2844, 2879), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (2862, 2879), False, 'from kapitan.utils import render_jinja2_file\n'), ((3181, 3216), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (3199, 3216), False, 'from kapitan.utils import render_jinja2_file\n'), ((3539, 3574), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (3557, 3574), False, 'from kapitan.utils import render_jinja2_file\n'), ((3906, 3941), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (3924, 3941), False, 'from kapitan.utils import render_jinja2_file\n'), ((4237, 4272), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (4255, 4272), False, 'from kapitan.utils import render_jinja2_file\n'), ((4565, 4600), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (4583, 4600), False, 'from kapitan.utils import render_jinja2_file\n'), ((4899, 4934), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (4917, 4934), False, 'from kapitan.utils import render_jinja2_file\n'), ((5224, 5259), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (5242, 5259), False, 'from kapitan.utils import render_jinja2_file\n'), ((5533, 5568), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (5551, 5568), False, 'from kapitan.utils import render_jinja2_file\n'), ((6668, 6703), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (6686, 6703), False, 'from kapitan.utils import render_jinja2_file\n'), ((7754, 7789), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (7772, 7789), False, 'from kapitan.utils import render_jinja2_file\n'), ((8675, 8710), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (8693, 8710), False, 'from kapitan.utils import render_jinja2_file\n'), ((9171, 9206), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (9189, 9206), False, 'from kapitan.utils import render_jinja2_file\n'), ((9735, 9770), 'kapitan.utils.render_jinja2_file', 'render_jinja2_file', (['f.name', 'context'], {}), '(f.name, context)\n', (9753, 9770), False, 'from kapitan.utils import render_jinja2_file\n'), ((6602, 6629), 'base64.b64encode', 'base64.b64encode', (['ref_value'], {}), '(ref_value)\n', (6618, 6629), False, 'import base64\n')]
|
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
from keras.models import Sequential, load_model
from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding
sys.stderr = stderr
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def build_sample_model(vocab_size,emb_size=100,num_layers=1,hidden_size=100,dropout=0.2):
model = Sequential()
model.add(Embedding(vocab_size,emb_size,batch_input_shape=(1,1)))
for i in range(num_layers):
model.add(LSTM(hidden_size,return_sequences=(i<num_layers-1),stateful=True))
model.add(Dropout(dropout))
model.add(Dense(vocab_size))
model.add(Activation('softmax'))
return model
def sample(header, num_chars):
model = build_sample_model(vocab_size)
model.load_weights('generator/weights/weights.h5')
sampled = [char_to_ix[c] for c in header]
for c in header[:-1]:
batch = np.zeros((1, 1))
batch[0, 0] = char_to_ix[c]
model.predict_on_batch(batch)
for i in range(num_chars):
batch = np.zeros((1, 1))
if sampled:
batch[0, 0] = sampled[-1]
else:
batch[0, 0] = np.random.randint(vocab_size)
result = model.predict_on_batch(batch).ravel()
sample = np.random.choice(range(vocab_size), p=result)
if ix_to_char[sample] == "\n":
break
sampled.append(sample)
return ''.join(ix_to_char[c] for c in sampled)
text = open("generator/names.txt").read()
char_to_ix = {ch:i for (i,ch) in enumerate(sorted(list(set(text))))}
ix_to_char = {i:ch for (ch,i) in char_to_ix.items()}
vocab_size = len(char_to_ix)
|
[
"keras.layers.Activation",
"warnings.filterwarnings",
"keras.layers.LSTM",
"keras.layers.Dropout",
"numpy.zeros",
"keras.layers.Dense",
"numpy.random.randint",
"keras.layers.Embedding",
"keras.models.Sequential"
] |
[((45, 102), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (68, 102), False, 'import warnings\n'), ((464, 476), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (474, 476), False, 'from keras.models import Sequential, load_model\n'), ((488, 545), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'emb_size'], {'batch_input_shape': '(1, 1)'}), '(vocab_size, emb_size, batch_input_shape=(1, 1))\n', (497, 545), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((694, 711), 'keras.layers.Dense', 'Dense', (['vocab_size'], {}), '(vocab_size)\n', (699, 711), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((724, 745), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (734, 745), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((962, 978), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (970, 978), True, 'import numpy as np\n'), ((1080, 1096), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1088, 1096), True, 'import numpy as np\n'), ((585, 654), 'keras.layers.LSTM', 'LSTM', (['hidden_size'], {'return_sequences': '(i < num_layers - 1)', 'stateful': '(True)'}), '(hidden_size, return_sequences=i < num_layers - 1, stateful=True)\n', (589, 654), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((664, 680), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (671, 680), False, 'from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding\n'), ((1165, 1194), 'numpy.random.randint', 'np.random.randint', (['vocab_size'], {}), '(vocab_size)\n', (1182, 1194), True, 'import numpy as np\n')]
|
import bs4
import lxml
import discord
from urllib.request import urlopen, Request
import urllib
import json
import requests
import random
class Search:
def get_video_link(self, titleli):
return
def search_image(self, titleli):
title = ''
for i in titleli:
title = title + " " + i
enc_location = urllib.parse.quote(title)
hdr = {'User-Agent': 'Mozilla/5.0'}
#url = 'https://imgur.com/search/score?q='+enc_location
url = 'https://www.google.co.kr/search?hl=en&tbm=isch&q=' + enc_location
#디버깅용 코드.
#print(url)
#print(titleli)
#print(title)
#print(enc_location)
req = Request(url, headers=hdr)
html = urllib.request.urlopen(req)
bsObj = bs4.BeautifulSoup(html,"lxml")
embed = discord.Embed(colour = 0xb4151c)
imgdinfl = bsObj.find_all("img")
#print(imgdinfl)
try:
randomNum = random.randint(0,(len(imgdinfl) - 1)/2)
imgsrc = imgdinfl[randomNum].get('src')
embed.set_image(url=imgsrc)
print(imgsrc)
except ValueError:
embed.add_field(name="검색된 사진이 없음...",value = "사진이 없습니다.")
return embed
|
[
"urllib.request.Request",
"discord.Embed",
"urllib.request.urlopen",
"urllib.parse.quote",
"bs4.BeautifulSoup"
] |
[((356, 381), 'urllib.parse.quote', 'urllib.parse.quote', (['title'], {}), '(title)\n', (374, 381), False, 'import urllib\n'), ((699, 724), 'urllib.request.Request', 'Request', (['url'], {'headers': 'hdr'}), '(url, headers=hdr)\n', (706, 724), False, 'from urllib.request import urlopen, Request\n'), ((740, 767), 'urllib.request.urlopen', 'urllib.request.urlopen', (['req'], {}), '(req)\n', (762, 767), False, 'import urllib\n'), ((784, 815), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (801, 815), False, 'import bs4\n'), ((831, 861), 'discord.Embed', 'discord.Embed', ([], {'colour': '(11801884)'}), '(colour=11801884)\n', (844, 861), False, 'import discord\n')]
|
"""
An implementation of a logging.Handler for sending messages to Discord
"""
import datetime
import logging
from discord import Color, Embed
from discord.ext import commands
from bot.constants import LOGGING_CHANNEL_ID
LEVEL_COLORS = {
logging.CRITICAL: Color.red(),
logging.ERROR: Color.red(),
logging.WARNING: Color.gold(),
logging.INFO: Color.blurple()
}
class DiscordHandler(logging.Handler):
"""
A class implementing logging.Handler methods to send logs to a Discord channel.
"""
def __init__(self, bot: commands.Bot, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = bot
self.log_channel = self.client.get_channel(LOGGING_CHANNEL_ID)
def _level_to_color(self, level_number: int):
return LEVEL_COLORS.get(level_number)
def emit(self, record):
if not self.client.loop.is_running():
# The event loop is not running (discord is not connected) so
# do not send the message
return
# Create an embed with a title like "Info" or "Error" and a color
# relating to the level of the log message
embed = Embed(title=record.levelname.title(), color=self._level_to_color(record.levelno))
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Message", value=record.msg, inline=False)
embed.add_field(name="Function", value=f"`{record.funcName}`", inline=True)
embed.add_field(name="File name", value=f"`{record.filename}`", inline=True)
embed.add_field(name="Line number", value=record.lineno, inline=True)
if self.log_channel is None:
self.log_channel = self.client.get_channel(LOGGING_CHANNEL_ID)
# Create a task in the event loop to send the logging embed
self.client.loop.create_task(self.log_channel.send(embed=embed))
|
[
"discord.Color.blurple",
"datetime.datetime.utcnow",
"discord.Color.red",
"discord.Color.gold"
] |
[((264, 275), 'discord.Color.red', 'Color.red', ([], {}), '()\n', (273, 275), False, 'from discord import Color, Embed\n'), ((296, 307), 'discord.Color.red', 'Color.red', ([], {}), '()\n', (305, 307), False, 'from discord import Color, Embed\n'), ((330, 342), 'discord.Color.gold', 'Color.gold', ([], {}), '()\n', (340, 342), False, 'from discord import Color, Embed\n'), ((362, 377), 'discord.Color.blurple', 'Color.blurple', ([], {}), '()\n', (375, 377), False, 'from discord import Color, Embed\n'), ((1275, 1301), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1299, 1301), False, 'import datetime\n')]
|
import urx
import logging
import time
if __name__ == "__main__":
logging.basicConfig(level=logging.WARN)
# home_pos = [0.0755, -0.2824, 0.3477, -0.0387, -3.0754, 0.4400] # rest position (good to place/remove gripper)
rob = urx.Robot("192.168.56.1")
#rob = urx.Robot("localhost")
rob.set_tcp((0,0,0,0,0,0))
#rob.set_payload(0.5, (0,0,0))
try:
l = 0.05
v = 0.05
a = 0.3
pose = rob.getl() #gives a lists with 6 elements (x, y, z, rx, ry, rz) --> rotation vector
print("robot tcp is at: ", pose)
pose[2] += l
#rob.movej(pose, acc=a, vel=v) # moves each joint given joint goal pos
rob.movej_to_pose(pose, acc=a, vel=v) # move each joint to reach position goal of tool
time.sleep(1)
pose[2] -= l
#rob.movej(pose, acc=a, vel=v)
rob.movej_to_pose(pose, acc=a, vel=v)
pose_final = rob.getl()
print("robot tcp is at (final): ", pose_final)
finally:
rob.close()
|
[
"urx.Robot",
"logging.basicConfig",
"time.sleep"
] |
[((71, 110), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARN'}), '(level=logging.WARN)\n', (90, 110), False, 'import logging\n'), ((239, 264), 'urx.Robot', 'urx.Robot', (['"""192.168.56.1"""'], {}), "('192.168.56.1')\n", (248, 264), False, 'import urx\n'), ((773, 786), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (783, 786), False, 'import time\n')]
|
#!/usr/bin/env python
"""
Module implementing an XYZ file object class.
"""
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Apr 17, 2012"
import re
from pymatgen.core.structure import Molecule
class XYZ(object):
"""
Basic class for importing and exporting Molecules or Structures in XYZ
format.
.. note::
Exporting periodic structures in the XYZ format will lose information
about the periodicity. Essentially, only cartesian coordinates are
written in this format and no information is retained about the
lattice.
"""
def __init__(self, mol, coord_precision=6):
"""
Args:
mol:
Input molecule
"""
self._mol = mol
self.precision = coord_precision
@property
def molecule(self):
"""
Returns molecule associated with this XYZ.
"""
return self._mol
@staticmethod
def from_string(contents):
"""
Creates XYZ object from a string.
Args:
contents:
String representing an XYZ file.
Returns:
XYZ object
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
coord_patt = re.compile(
"(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)"
)
for i in xrange(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1))
coords.append(map(float, m.groups()[2:5]))
return XYZ(Molecule(sp, coords))
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename:
XYZ filename
Returns:
XYZ object
"""
with open(filename, "r") as f:
return XYZ.from_string(f.read())
def __str__(self):
output = [str(len(self._mol)), self._mol.composition.formula]
fmtstr = "{{}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}}".format(self.precision)
for site in self._mol:
output.append(fmtstr.format(site.specie, site.x, site.y, site.z))
return "\n".join(output)
def write_file(self, filename):
"""
Writes XYZ to file.
Args:
filename:
File name of output file.
"""
with open(filename, "w") as f:
f.write(self.__str__())
|
[
"pymatgen.core.structure.Molecule",
"re.compile"
] |
[((1430, 1504), 're.compile', 're.compile', (['"""(\\\\w+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)"""'], {}), "('(\\\\w+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)')\n", (1440, 1504), False, 'import re\n'), ((1738, 1758), 'pymatgen.core.structure.Molecule', 'Molecule', (['sp', 'coords'], {}), '(sp, coords)\n', (1746, 1758), False, 'from pymatgen.core.structure import Molecule\n')]
|
# Created by <NAME>
# Date: 13/03/2020
from gym.envs.registration import register
register(
id='Dummy-v0',
entry_point='gym_dummy.envs:DummyEnv',
# timestep_limit=1000,
)
register(
id='Walker2D-v0',
entry_point='gym_dummy.envs:Walker2DEnv',
)
|
[
"gym.envs.registration.register"
] |
[((85, 147), 'gym.envs.registration.register', 'register', ([], {'id': '"""Dummy-v0"""', 'entry_point': '"""gym_dummy.envs:DummyEnv"""'}), "(id='Dummy-v0', entry_point='gym_dummy.envs:DummyEnv')\n", (93, 147), False, 'from gym.envs.registration import register\n'), ((187, 255), 'gym.envs.registration.register', 'register', ([], {'id': '"""Walker2D-v0"""', 'entry_point': '"""gym_dummy.envs:Walker2DEnv"""'}), "(id='Walker2D-v0', entry_point='gym_dummy.envs:Walker2DEnv')\n", (195, 255), False, 'from gym.envs.registration import register\n')]
|
from setuptools import setup
setup(name='gym_qubit',
version='0.0.1',
install_requires=['gym>=0.10.5',
'qutip>=4.3.1',
'scipy>=1.0.1',
'numpy>=1.14.5']
)
|
[
"setuptools.setup"
] |
[((30, 157), 'setuptools.setup', 'setup', ([], {'name': '"""gym_qubit"""', 'version': '"""0.0.1"""', 'install_requires': "['gym>=0.10.5', 'qutip>=4.3.1', 'scipy>=1.0.1', 'numpy>=1.14.5']"}), "(name='gym_qubit', version='0.0.1', install_requires=['gym>=0.10.5',\n 'qutip>=4.3.1', 'scipy>=1.0.1', 'numpy>=1.14.5'])\n", (35, 157), False, 'from setuptools import setup\n')]
|
import os
from atve.script import AtveTestCase
from runner import TestAtveTestRunner as TSTR
from nose.tools import with_setup, raises, ok_, eq_
class TestAndroidTestRuner(TSTR):
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_01(self):
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_01.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_02(self):
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_02.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_03(self):
AtveTestCase.set("android.serial", "emulator-5554")
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_03.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_04(self):
AtveTestCase.set("android.serial", "emulator-5554")
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_04.py")
|
[
"atve.script.AtveTestCase.set",
"os.path.join",
"nose.tools.with_setup"
] |
[((186, 223), 'nose.tools.with_setup', 'with_setup', (['TSTR.setup', 'TSTR.teardown'], {}), '(TSTR.setup, TSTR.teardown)\n', (196, 223), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((413, 450), 'nose.tools.with_setup', 'with_setup', (['TSTR.setup', 'TSTR.teardown'], {}), '(TSTR.setup, TSTR.teardown)\n', (423, 450), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((640, 677), 'nose.tools.with_setup', 'with_setup', (['TSTR.setup', 'TSTR.teardown'], {}), '(TSTR.setup, TSTR.teardown)\n', (650, 677), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((927, 964), 'nose.tools.with_setup', 'with_setup', (['TSTR.setup', 'TSTR.teardown'], {}), '(TSTR.setup, TSTR.teardown)\n', (937, 964), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((306, 347), 'os.path.join', 'os.path.join', (['self.script_path', '"""android"""'], {}), "(self.script_path, 'android')\n", (318, 347), False, 'import os\n'), ((533, 574), 'os.path.join', 'os.path.join', (['self.script_path', '"""android"""'], {}), "(self.script_path, 'android')\n", (545, 574), False, 'import os\n'), ((741, 792), 'atve.script.AtveTestCase.set', 'AtveTestCase.set', (['"""android.serial"""', '"""emulator-5554"""'], {}), "('android.serial', 'emulator-5554')\n", (757, 792), False, 'from atve.script import AtveTestCase\n'), ((820, 861), 'os.path.join', 'os.path.join', (['self.script_path', '"""android"""'], {}), "(self.script_path, 'android')\n", (832, 861), False, 'import os\n'), ((1028, 1079), 'atve.script.AtveTestCase.set', 'AtveTestCase.set', (['"""android.serial"""', '"""emulator-5554"""'], {}), "('android.serial', 'emulator-5554')\n", (1044, 1079), False, 'from atve.script import AtveTestCase\n'), ((1107, 1148), 'os.path.join', 'os.path.join', (['self.script_path', '"""android"""'], {}), "(self.script_path, 'android')\n", (1119, 1148), False, 'import os\n')]
|
import os
import random
import numpy as np
import cv2
from keras.utils import Sequence
# This vvvvv is for example_preprocess function and augs
# from albumentations import (
# HorizontalFlip, VerticalFlip, Flip, Transpose, Rotate, ShiftScaleRotate, RandomScale,
# RandomBrightness, RandomContrast, RandomBrightnessContrast, JpegCompression, Blur,
# MedianBlur, Compose, OneOf
# )
class SegDataGenerator(Sequence):
''' Data generator class for segmentation
Note:
Used as data generator in fit_generator from keras.
Includes support for augmentations via passing prepocessing function
as preprocessing_function parameter. For example interface of preprocessing function
see example_preprocess function.
Args:
input_directory (str): path to the folder where the input images are stored
mask_directory (str): path to the folder where the masks are stored
input_extention (str): extention of the input images files
mask_extention (str): extention of the input masks files
input_shape (tuple/list): target shape of the input images
mask_shape (tuple/list): target shape of the masks
batch_size (int): batch size
preload_dataset (bool): if True input images and masks will be loaded to RAM (should be set to False if dataset if larger than available RAM)
prob_aug (float): probability of getting augmented image
preprocessing_function (func): function that performs preprocessing and augmentation (if needed) (see example_preprocess function)
Attributes:
no public attributes
'''
def __init__(self,
input_directory, mask_directory,
input_extention='.jpg', mask_extention='.png',
input_shape=(256, 256, 3), mask_shape=(256, 256, 1),
batch_size=4, preload_dataset=False, prob_aug=0.5,
preprocessing_function=None):
self._dir = input_directory
self._mask_dir = mask_directory
self._in_shape = input_shape
self._mask_shape = mask_shape
self._fext = input_extention
self._mext = mask_extention
self._batch_size = batch_size
in_files = list(filter(lambda x: x.endswith(self._fext), os.listdir(self._dir)))
in_files.sort()
mask_files = list(filter(lambda x: x.endswith(self._mext), os.listdir(self._mask_dir)))
mask_files.sort()
self._files = list()
for i, name in enumerate(in_files):
self._files.append((name, mask_files[i]))
random.shuffle(self._files)
self._preload = preload_dataset
self._prob_aug = prob_aug
self._data = None
self._masks = None
if (preprocessing_function is not None) and callable(preprocessing_function):
self._preprocess = preprocessing_function
else:
self._preprocess = self._def_preprocess
if self._preload:
self._data = list()
for i, names in enumerate(self._files):
img = cv2.imread(os.path.join(self._dir, names[0]), cv2.IMREAD_UNCHANGED)
mask = cv2.imread(os.path.join(self._mask_dir, names[1]), cv2.IMREAD_UNCHANGED)
self._data.append((img, mask))
def __len__(self):
return int(np.ceil(len(self._in_files) / float(self._batch_size)))
def __getitem__(self, idx):
h = 0
w = 1
c = 2
batch_x = np.empty((self._batch_size, self._in_shape[h], self._in_shape[w], self._in_shape[c]), dtype='float32')
batch_y = np.empty((self._batch_size, self._mask_shape[h], self._mask_shape[w], self._mask_shape[c]), dtype='float32')
inter = cv2.INTER_AREA
if self._preload:
for i, imgs in enumerate(self._data[idx*self._batch_size:(idx+1)*self._batch_size]):
if (imgs[0].shape[w] < self._in_shape[w]) or (imgs[0].shape[h] < self._in_shape[h]):
inter = cv2.INTER_CUBIC
batch_img = cv2.resize(imgs[0], dsize=(self._in_shape[w], self._in_shape[h]), interpolation=inter)
batch_mask = cv2.resize(imgs[1], dsize=(self._mask_shape[w], self._mask_shape[h]), interpolation=inter)
batch_img, batch_mask = self._preprocess(batch_img, batch_mask, self._prob_aug)
batch_x[i] = batch_img.astype('float32')
batch_y[i] = batch_mask.astype('float32')
else:
for i, names in enumerate(self._files[idx*self._batch_size:(idx+1)*self._batch_size]):
img = cv2.imread(os.path.join(self._dir, names[0]), cv2.IMREAD_UNCHANGED)
mask = cv2.imread(os.path.join(self._mask_dir, names[1]), cv2.IMREAD_UNCHANGED)
if (img.shape[w] < self._in_shape[w]) or (img.shape[h] < self._in_shape[h]):
inter = cv2.INTER_CUBIC
batch_img = cv2.resize(img, dsize=(self._in_shape[w], self._in_shape[h]), interpolation=inter)
batch_mask = cv2.resize(mask, dsize=(self._mask_shape[w], self._mask_shape[h]), interpolation=inter)
batch_img, batch_mask = self._preprocess(batch_img, batch_mask, self._prob_aug)
batch_x[i] = batch_img.astype('float32')
batch_y[i] = batch_mask.astype('float32')
return batch_x, batch_y
@staticmethod
def _def_preprocess(img, mask, prob_aug):
''' Default preprocessing and augmentation function for SegDataGenerator class
Args:
img (numpy.ndarray): input image as numpy array (loaded using opencv, skimage or other compatible modules)
mask (numpy.ndarray): mask as numpy array (loaded using opencv, skimage or other compatible modules)
prob_aug (float): probability of getting augmented image (if used)
Returns:
tuple: tuple of preprocessed (image, mask)
'''
return img, mask
# vvvvv Example augmentation and preprocessing function vvvvv Albumentation module must be installed
# def example_augs(p=0.5):
# return Compose([
# OneOf([
# Flip(p=0.5),
# Transpose(p=0.2),
# Rotate(limit=90, interpolation=cv2.INTER_CUBIC, p=0.2),
# ShiftScaleRotate(shift_limit=0.125,
# scale_limit=0.25,
# rotate_limit=90,
# interpolation=cv2.INTER_CUBIC, p=0.5),
# RandomScale(scale_limit=0.2, interpolation=cv2.INTER_CUBIC, p=0.2)
# ], p=0.75),
# OneOf([
# RandomBrightness(limit=0.1, p=0.5),
# RandomContrast(limit=0.1, p=0.2),
# RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.1)
# ], p=0.25),
# JpegCompression(quality_lower=90, p=0.1),
# OneOf([
# Blur(blur_limit=3, p=0.1),
# MedianBlur(blur_limit=5, p=0.1)
# ], p=0.1)
# ], p=p)
# def example_preprocess(img, mask, prob_aug):
# ''' Example preprocessing and augmentation function for SegDataGenerator class
# Args:
# img (numpy.ndarray): input image as numpy array (loaded using opencv, skimage or other compatible modules)
# mask (numpy.ndarray): mask as numpy array (loaded using opencv, skimage or other compatible modules)
# prob_aug (float): probability of getting augmented image (if used)
# Returns:
# tuple: tuple of preprocessed (image, mask)
# '''
# augs = example_augs(p=prob_aug)
# data = {'image': img, 'mask': mask}
# augmented = augs(**data)
# aimg = augmented['image']
# amask = augmented['mask']
# aimg_yuv = cv2.cvtColor(aimg, cv2.COLOR_BGR2YUV)
# aimg_hls = cv2.cvtColor(aimg, cv2.COLOR_BGR2HLS)
# clahe = cv2.createCLAHE(clipLimit=2., tileGridSize=(5,5))
# yuv_split = cv2.split(aimg_yuv)
# hls_split = cv2.split(aimg_hls)
# yuv_split[0] = clahe.apply(yuv_split[0])
# aimg = cv2.merge((yuv_split[0], hls_split[2], yuv_split[2]))
# return aimg, amask
|
[
"random.shuffle",
"numpy.empty",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((2673, 2700), 'random.shuffle', 'random.shuffle', (['self._files'], {}), '(self._files)\n', (2687, 2700), False, 'import random\n'), ((3604, 3711), 'numpy.empty', 'np.empty', (['(self._batch_size, self._in_shape[h], self._in_shape[w], self._in_shape[c])'], {'dtype': '"""float32"""'}), "((self._batch_size, self._in_shape[h], self._in_shape[w], self.\n _in_shape[c]), dtype='float32')\n", (3612, 3711), True, 'import numpy as np\n'), ((3726, 3839), 'numpy.empty', 'np.empty', (['(self._batch_size, self._mask_shape[h], self._mask_shape[w], self.\n _mask_shape[c])'], {'dtype': '"""float32"""'}), "((self._batch_size, self._mask_shape[h], self._mask_shape[w], self.\n _mask_shape[c]), dtype='float32')\n", (3734, 3839), True, 'import numpy as np\n'), ((2357, 2378), 'os.listdir', 'os.listdir', (['self._dir'], {}), '(self._dir)\n', (2367, 2378), False, 'import os\n'), ((2474, 2500), 'os.listdir', 'os.listdir', (['self._mask_dir'], {}), '(self._mask_dir)\n', (2484, 2500), False, 'import os\n'), ((4190, 4280), 'cv2.resize', 'cv2.resize', (['imgs[0]'], {'dsize': '(self._in_shape[w], self._in_shape[h])', 'interpolation': 'inter'}), '(imgs[0], dsize=(self._in_shape[w], self._in_shape[h]),\n interpolation=inter)\n', (4200, 4280), False, 'import cv2\n'), ((4307, 4401), 'cv2.resize', 'cv2.resize', (['imgs[1]'], {'dsize': '(self._mask_shape[w], self._mask_shape[h])', 'interpolation': 'inter'}), '(imgs[1], dsize=(self._mask_shape[w], self._mask_shape[h]),\n interpolation=inter)\n', (4317, 4401), False, 'import cv2\n'), ((5111, 5198), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(self._in_shape[w], self._in_shape[h])', 'interpolation': 'inter'}), '(img, dsize=(self._in_shape[w], self._in_shape[h]), interpolation\n =inter)\n', (5121, 5198), False, 'import cv2\n'), ((5224, 5315), 'cv2.resize', 'cv2.resize', (['mask'], {'dsize': '(self._mask_shape[w], self._mask_shape[h])', 'interpolation': 'inter'}), '(mask, dsize=(self._mask_shape[w], self._mask_shape[h]),\n interpolation=inter)\n', (5234, 5315), False, 'import cv2\n'), ((3195, 3228), 'os.path.join', 'os.path.join', (['self._dir', 'names[0]'], {}), '(self._dir, names[0])\n', (3207, 3228), False, 'import os\n'), ((3287, 3325), 'os.path.join', 'os.path.join', (['self._mask_dir', 'names[1]'], {}), '(self._mask_dir, names[1])\n', (3299, 3325), False, 'import os\n'), ((4769, 4802), 'os.path.join', 'os.path.join', (['self._dir', 'names[0]'], {}), '(self._dir, names[0])\n', (4781, 4802), False, 'import os\n'), ((4861, 4899), 'os.path.join', 'os.path.join', (['self._mask_dir', 'names[1]'], {}), '(self._mask_dir, names[1])\n', (4873, 4899), False, 'import os\n')]
|
import unittest
from src.countingValleys.countingValleys import counting_valleys;
class TestCountingValleys(unittest.TestCase):
def test_large_valley(self):
self.assertEqual(counting_valleys("UDDDUDUU"), 1)
|
[
"src.countingValleys.countingValleys.counting_valleys"
] |
[((189, 217), 'src.countingValleys.countingValleys.counting_valleys', 'counting_valleys', (['"""UDDDUDUU"""'], {}), "('UDDDUDUU')\n", (205, 217), False, 'from src.countingValleys.countingValleys import counting_valleys\n')]
|
# Copyright (c) 2021 Cisco Systems, Inc. and its affiliates
# All rights reserved.
# Use of this source code is governed by a BSD 3-Clause License
# that can be found in the LICENSE file.
from typing import Any, Dict
import contextlib
import json
import logging
import os
from pathlib import Path
import sqlite3
from typing import Any, Dict, List, Optional, Union
from swagger_server.events import SecurityEvent
from swagger_server.encoder import JSONEncoder
from swagger_server.errors import StreamDoesNotExist, SubjectNotInStream
from swagger_server.models import Status
CREATE_STREAMS_SQL = """
CREATE TABLE IF NOT EXISTS streams (
client_id TEXT PRIMARY KEY,
stream_data TEXT
)
"""
CREATE_SUBJECTS_SQL = """
CREATE TABLE IF NOT EXISTS subjects (
client_id TEXT,
email TEXT,
status TEXT,
FOREIGN KEY(client_id) REFERENCES streams(client_id),
PRIMARY KEY(client_id, email)
)
"""
CREATE_SETS_SQL = """
CREATE TABLE IF NOT EXISTS SETs (
client_id TEXT NOT NULL,
jti TEXT NOT NULL,
timestamp INTEGER NOT NULL,
event TEXT NOT NULL,
FOREIGN KEY(client_id) REFERENCES streams(client_id),
PRIMARY KEY(client_id, jti)
)
"""
@contextlib.contextmanager
def connection() -> sqlite3.Connection:
"""Yield a connection that is guaranteed to close"""
db_path = os.environ["DB_PATH"]
conn = sqlite3.connect(db_path)
conn.row_factory = sqlite3.Row
try:
yield conn
finally:
conn.close()
def create(drop=False):
if drop:
logging.warning("Dropping database")
db_path = Path(os.environ["DB_PATH"])
db_path.unlink(missing_ok=True)
logging.info("Creating database")
with connection() as conn:
with conn:
conn.execute(CREATE_STREAMS_SQL)
conn.execute(CREATE_SUBJECTS_SQL)
conn.execute(CREATE_SETS_SQL)
def stream_exists(client_id: str) -> bool:
"""Get a client_id info based on a token"""
with connection() as conn:
row = conn.execute(
"SELECT * FROM streams WHERE client_id=?",
(client_id,)
).fetchone()
return row is not None
def save_stream(client_id: str, stream_data: str) -> None:
"""Saves a stream (minus subjects and events) to the db"""
with connection() as conn:
# open a transaction and commit if successful
with conn:
conn.execute(
"REPLACE INTO streams VALUES (?, ?)", (client_id, stream_data)
)
def load_stream(client_id: str) -> Dict[str, Any]:
"""Load the data needed to create a stream from the database"""
with connection() as conn:
row = conn.execute(
"SELECT * FROM streams WHERE client_id=?",
(client_id,)
).fetchone()
if row:
return json.loads(row["stream_data"])
else:
raise StreamDoesNotExist()
def get_stream_ids() -> List[str]:
"""Load the client id for all streams"""
with connection() as conn:
rows = conn.execute("SELECT client_id from streams").fetchall()
return [row["client_id"] for row in rows]
def add_subject(client_id: str, email: str) -> None:
"""Add a subject to a stream"""
with connection() as conn:
with conn:
conn.execute(
"INSERT INTO subjects VALUES (?, ?, ?)",
(client_id, email, Status.enabled.value)
)
def set_subject_status(client_id: str, email: str, status: Status) -> None:
"""Set a subject's status"""
with connection() as conn:
with conn:
conn.execute("""
UPDATE subjects
SET
status = ?
WHERE
client_id = ? AND
email = ?
""", (status.value, client_id, email)
)
if conn.total_changes != 1:
raise SubjectNotInStream(email)
def get_subject_status(client_id: str, email: str) -> Status:
"""Get a subject's status"""
with connection() as conn:
row = conn.execute(
"SELECT * FROM subjects WHERE client_id=? AND email=?",
(client_id, email)
).fetchone()
if row:
return Status(row["status"])
else:
raise SubjectNotInStream(email)
def remove_subject(client_id: str, email: str) -> None:
"""Remove a subject from a stream"""
with connection() as conn:
with conn:
conn.execute(
"DELETE FROM subjects WHERE client_id=? AND email=?",
(client_id, email)
)
def delete_subjects(client_id: str) -> None:
"""Delete all subjects for a stream"""
with connection() as conn:
with conn:
conn.execute(
"DELETE FROM subjects WHERE client_id=?",
(client_id,)
)
def add_set(client_id: str, SET: SecurityEvent) -> None:
"""Add a SET to the stream"""
with connection() as conn:
with conn:
conn.execute(
"INSERT INTO SETs VALUES (?, ?, ?, ?)",
(client_id, SET.jti, SET.iat, JSONEncoder().encode(SET))
)
def delete_SETs(client_id: str, jtis: Optional[List[str]] = None) -> None:
"""Delete SETs from the stream, based on their jtis"""
sql = "DELETE FROM SETs WHERE client_id=?"
if jtis:
qmarks = ",".join(["?"] * len(jtis))
sql += f" AND jti IN ({qmarks})"
with connection() as conn:
with conn:
conn.execute(sql, (client_id, *jtis) if jtis else (client_id, ))
def count_SETs(client_id: str) -> int:
"""How many SETs are in the stream?"""
with connection() as conn:
return conn.execute(
"SELECT COUNT(*) FROM SETs WHERE client_id = ?",
(client_id,)
).fetchone()[0]
def get_SETs(client_id: str,
max_events: Optional[int] = None) -> List[SecurityEvent]:
"""Get up to max_events SETs from the stream"""
if max_events is not None and max_events <= 0:
return []
sql = "SELECT * FROM SETs WHERE client_id=? ORDER BY timestamp"
if max_events is not None:
sql += f" LIMIT {max_events}"
with connection() as conn:
results = conn.execute(sql, (client_id, )).fetchall()
return [
SecurityEvent.parse_obj(json.loads(r["event"]))
for r in results
]
|
[
"swagger_server.errors.StreamDoesNotExist",
"json.loads",
"logging.warning",
"swagger_server.models.Status",
"swagger_server.errors.SubjectNotInStream",
"logging.info",
"pathlib.Path",
"sqlite3.connect",
"swagger_server.encoder.JSONEncoder"
] |
[((1350, 1374), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (1365, 1374), False, 'import sqlite3\n'), ((1648, 1681), 'logging.info', 'logging.info', (['"""Creating database"""'], {}), "('Creating database')\n", (1660, 1681), False, 'import logging\n'), ((1520, 1556), 'logging.warning', 'logging.warning', (['"""Dropping database"""'], {}), "('Dropping database')\n", (1535, 1556), False, 'import logging\n'), ((1575, 1602), 'pathlib.Path', 'Path', (["os.environ['DB_PATH']"], {}), "(os.environ['DB_PATH'])\n", (1579, 1602), False, 'from pathlib import Path\n'), ((2814, 2844), 'json.loads', 'json.loads', (["row['stream_data']"], {}), "(row['stream_data'])\n", (2824, 2844), False, 'import json\n'), ((2877, 2897), 'swagger_server.errors.StreamDoesNotExist', 'StreamDoesNotExist', ([], {}), '()\n', (2895, 2897), False, 'from swagger_server.errors import StreamDoesNotExist, SubjectNotInStream\n'), ((3913, 3938), 'swagger_server.errors.SubjectNotInStream', 'SubjectNotInStream', (['email'], {}), '(email)\n', (3931, 3938), False, 'from swagger_server.errors import StreamDoesNotExist, SubjectNotInStream\n'), ((4251, 4272), 'swagger_server.models.Status', 'Status', (["row['status']"], {}), "(row['status'])\n", (4257, 4272), False, 'from swagger_server.models import Status\n'), ((4305, 4330), 'swagger_server.errors.SubjectNotInStream', 'SubjectNotInStream', (['email'], {}), '(email)\n', (4323, 4330), False, 'from swagger_server.errors import StreamDoesNotExist, SubjectNotInStream\n'), ((6377, 6399), 'json.loads', 'json.loads', (["r['event']"], {}), "(r['event'])\n", (6387, 6399), False, 'import json\n'), ((5163, 5176), 'swagger_server.encoder.JSONEncoder', 'JSONEncoder', ([], {}), '()\n', (5174, 5176), False, 'from swagger_server.encoder import JSONEncoder\n')]
|
import pytest
from manubot.cite.pubmed import (
get_pmcid_and_pmid_for_doi,
get_pmid_for_doi,
get_pubmed_ids_for_doi,
)
@pytest.mark.parametrize(
("doi", "pmid"),
[
("10.1098/rsif.2017.0387", "29618526"), # in PubMed and PMC
("10.1161/CIRCGENETICS.115.001181", "27094199"), # in PubMed but not PMC
("10.7717/peerj-cs.134", None), # DOI in journal not indexed by PubMed
("10.1161/CIRC", None), # invalid DOI
],
)
def test_get_pmid_for_doi(doi, pmid):
output = get_pmid_for_doi(doi)
assert pmid == output
@pytest.mark.parametrize(
("doi", "id_dict"),
[
("10.1098/rsif.2017.0387", {"PMCID": "PMC5938574", "PMID": "29618526"}),
("10.7554/ELIFE.32822", {"PMCID": "PMC5832410", "PMID": "29424689"}),
("10.1161/CIRCGENETICS.115.001181", {}), # only in PubMed, not in PMC
("10.7717/peerj.000", {}), # Non-existent DOI
("10.peerj.000", {}), # malformed DOI
],
)
def test_get_pmcid_and_pmid_for_doi(doi, id_dict):
output = get_pmcid_and_pmid_for_doi(doi)
assert id_dict == output
@pytest.mark.parametrize(
("doi", "id_dict"),
[
("10.1098/rsif.2017.0387", {"PMCID": "PMC5938574", "PMID": "29618526"}),
("10.7554/ELIFE.32822", {"PMCID": "PMC5832410", "PMID": "29424689"}),
(
"10.1161/CIRCGENETICS.115.001181",
{"PMID": "27094199"},
), # only in PubMed, not in PMC
("10.7717/peerj.000", {}), # Non-existent DOI
],
)
def test_get_pubmed_ids_for_doi(doi, id_dict):
output = get_pubmed_ids_for_doi(doi)
assert id_dict == output
|
[
"manubot.cite.pubmed.get_pubmed_ids_for_doi",
"pytest.mark.parametrize",
"manubot.cite.pubmed.get_pmcid_and_pmid_for_doi",
"manubot.cite.pubmed.get_pmid_for_doi"
] |
[((136, 332), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('doi', 'pmid')", "[('10.1098/rsif.2017.0387', '29618526'), ('10.1161/CIRCGENETICS.115.001181',\n '27094199'), ('10.7717/peerj-cs.134', None), ('10.1161/CIRC', None)]"], {}), "(('doi', 'pmid'), [('10.1098/rsif.2017.0387',\n '29618526'), ('10.1161/CIRCGENETICS.115.001181', '27094199'), (\n '10.7717/peerj-cs.134', None), ('10.1161/CIRC', None)])\n", (159, 332), False, 'import pytest\n'), ((577, 875), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('doi', 'id_dict')", "[('10.1098/rsif.2017.0387', {'PMCID': 'PMC5938574', 'PMID': '29618526'}), (\n '10.7554/ELIFE.32822', {'PMCID': 'PMC5832410', 'PMID': '29424689'}), (\n '10.1161/CIRCGENETICS.115.001181', {}), ('10.7717/peerj.000', {}), (\n '10.peerj.000', {})]"], {}), "(('doi', 'id_dict'), [('10.1098/rsif.2017.0387', {\n 'PMCID': 'PMC5938574', 'PMID': '29618526'}), ('10.7554/ELIFE.32822', {\n 'PMCID': 'PMC5832410', 'PMID': '29424689'}), (\n '10.1161/CIRCGENETICS.115.001181', {}), ('10.7717/peerj.000', {}), (\n '10.peerj.000', {})])\n", (600, 875), False, 'import pytest\n'), ((1109, 1403), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('doi', 'id_dict')", "[('10.1098/rsif.2017.0387', {'PMCID': 'PMC5938574', 'PMID': '29618526'}), (\n '10.7554/ELIFE.32822', {'PMCID': 'PMC5832410', 'PMID': '29424689'}), (\n '10.1161/CIRCGENETICS.115.001181', {'PMID': '27094199'}), (\n '10.7717/peerj.000', {})]"], {}), "(('doi', 'id_dict'), [('10.1098/rsif.2017.0387', {\n 'PMCID': 'PMC5938574', 'PMID': '29618526'}), ('10.7554/ELIFE.32822', {\n 'PMCID': 'PMC5832410', 'PMID': '29424689'}), (\n '10.1161/CIRCGENETICS.115.001181', {'PMID': '27094199'}), (\n '10.7717/peerj.000', {})])\n", (1132, 1403), False, 'import pytest\n'), ((526, 547), 'manubot.cite.pubmed.get_pmid_for_doi', 'get_pmid_for_doi', (['doi'], {}), '(doi)\n', (542, 547), False, 'from manubot.cite.pubmed import get_pmcid_and_pmid_for_doi, get_pmid_for_doi, get_pubmed_ids_for_doi\n'), ((1045, 1076), 'manubot.cite.pubmed.get_pmcid_and_pmid_for_doi', 'get_pmcid_and_pmid_for_doi', (['doi'], {}), '(doi)\n', (1071, 1076), False, 'from manubot.cite.pubmed import get_pmcid_and_pmid_for_doi, get_pmid_for_doi, get_pubmed_ids_for_doi\n'), ((1579, 1606), 'manubot.cite.pubmed.get_pubmed_ids_for_doi', 'get_pubmed_ids_for_doi', (['doi'], {}), '(doi)\n', (1601, 1606), False, 'from manubot.cite.pubmed import get_pmcid_and_pmid_for_doi, get_pmid_for_doi, get_pubmed_ids_for_doi\n')]
|
from unittest import TestCase, main, skip
from aocfw import TestCaseMixin
from p2 import Solution
class SolutionTests(TestCase, TestCaseMixin):
solution = Solution
source = "sample.txt"
given = 168
def test_triangular_numbers(self):
self.assertEqual(Solution().get_triangular_number(3), 6)
self.assertEqual(Solution().get_triangular_number(10), 55)
self.assertEqual(Solution().get_triangular_number(1), 1)
self.assertEqual(Solution().get_triangular_number(0), 0)
def test_get_target(self):
data = self.get_parsed_data()
self.assertEqual(Solution().get_target(data), (4, 5))
def test_get_fuel_cost_2(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 2)
self.assertEqual(ans, 206)
def test_get_fuel_cost_5(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 5)
self.assertEqual(ans, 168)
if __name__ == "__main__":
main()
|
[
"unittest.main",
"p2.Solution"
] |
[((996, 1002), 'unittest.main', 'main', ([], {}), '()\n', (1000, 1002), False, 'from unittest import TestCase, main, skip\n'), ((736, 746), 'p2.Solution', 'Solution', ([], {}), '()\n', (744, 746), False, 'from p2 import Solution\n'), ((894, 904), 'p2.Solution', 'Solution', ([], {}), '()\n', (902, 904), False, 'from p2 import Solution\n'), ((278, 288), 'p2.Solution', 'Solution', ([], {}), '()\n', (286, 288), False, 'from p2 import Solution\n'), ((343, 353), 'p2.Solution', 'Solution', ([], {}), '()\n', (351, 353), False, 'from p2 import Solution\n'), ((410, 420), 'p2.Solution', 'Solution', ([], {}), '()\n', (418, 420), False, 'from p2 import Solution\n'), ((475, 485), 'p2.Solution', 'Solution', ([], {}), '()\n', (483, 485), False, 'from p2 import Solution\n'), ((610, 620), 'p2.Solution', 'Solution', ([], {}), '()\n', (618, 620), False, 'from p2 import Solution\n')]
|
from mlagents.torch_utils import torch
from unittest import mock
import pytest
from mlagents.trainers.torch.encoders import (
VectorInput,
Normalizer,
SimpleVisualEncoder,
ResNetVisualEncoder,
NatureVisualEncoder,
)
# This test will also reveal issues with states not being saved in the state_dict.
def compare_models(module_1, module_2):
is_same = True
for key_item_1, key_item_2 in zip(
module_1.state_dict().items(), module_2.state_dict().items()
):
# Compare tensors in state_dict and not the keys.
is_same = torch.equal(key_item_1[1], key_item_2[1]) and is_same
return is_same
def test_normalizer():
input_size = 2
norm = Normalizer(input_size)
# These three inputs should mean to 0.5, and variance 2
# with the steps starting at 1
vec_input1 = torch.tensor([[1, 1]])
vec_input2 = torch.tensor([[1, 1]])
vec_input3 = torch.tensor([[0, 0]])
norm.update(vec_input1)
norm.update(vec_input2)
norm.update(vec_input3)
# Test normalization
for val in norm(vec_input1)[0]:
assert val == pytest.approx(0.707, abs=0.001)
# Test copy normalization
norm2 = Normalizer(input_size)
assert not compare_models(norm, norm2)
norm2.copy_from(norm)
assert compare_models(norm, norm2)
for val in norm2(vec_input1)[0]:
assert val == pytest.approx(0.707, abs=0.001)
@mock.patch("mlagents.trainers.torch.encoders.Normalizer")
def test_vector_encoder(mock_normalizer):
mock_normalizer_inst = mock.Mock()
mock_normalizer.return_value = mock_normalizer_inst
input_size = 64
normalize = False
vector_encoder = VectorInput(input_size, normalize)
output = vector_encoder(torch.ones((1, input_size)))
assert output.shape == (1, input_size)
normalize = True
vector_encoder = VectorInput(input_size, normalize)
new_vec = torch.ones((1, input_size))
vector_encoder.update_normalization(new_vec)
mock_normalizer.assert_called_with(input_size)
mock_normalizer_inst.update.assert_called_with(new_vec)
vector_encoder2 = VectorInput(input_size, normalize)
vector_encoder.copy_normalization(vector_encoder2)
mock_normalizer_inst.copy_from.assert_called_with(mock_normalizer_inst)
@pytest.mark.parametrize("image_size", [(36, 36, 3), (84, 84, 4), (256, 256, 5)])
@pytest.mark.parametrize(
"vis_class", [SimpleVisualEncoder, ResNetVisualEncoder, NatureVisualEncoder]
)
def test_visual_encoder(vis_class, image_size):
num_outputs = 128
enc = vis_class(image_size[0], image_size[1], image_size[2], num_outputs)
# Note: NCHW not NHWC
sample_input = torch.ones((1, image_size[2], image_size[0], image_size[1]))
encoding = enc(sample_input)
assert encoding.shape == (1, num_outputs)
|
[
"mlagents.trainers.torch.encoders.Normalizer",
"mlagents.trainers.torch.encoders.VectorInput",
"unittest.mock.Mock",
"mlagents.torch_utils.torch.ones",
"unittest.mock.patch",
"mlagents.torch_utils.torch.equal",
"mlagents.torch_utils.torch.tensor",
"pytest.mark.parametrize",
"pytest.approx"
] |
[((1407, 1464), 'unittest.mock.patch', 'mock.patch', (['"""mlagents.trainers.torch.encoders.Normalizer"""'], {}), "('mlagents.trainers.torch.encoders.Normalizer')\n", (1417, 1464), False, 'from unittest import mock\n'), ((2273, 2358), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""image_size"""', '[(36, 36, 3), (84, 84, 4), (256, 256, 5)]'], {}), "('image_size', [(36, 36, 3), (84, 84, 4), (256, 256, 5)]\n )\n", (2296, 2358), False, 'import pytest\n'), ((2355, 2460), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""vis_class"""', '[SimpleVisualEncoder, ResNetVisualEncoder, NatureVisualEncoder]'], {}), "('vis_class', [SimpleVisualEncoder,\n ResNetVisualEncoder, NatureVisualEncoder])\n", (2378, 2460), False, 'import pytest\n'), ((700, 722), 'mlagents.trainers.torch.encoders.Normalizer', 'Normalizer', (['input_size'], {}), '(input_size)\n', (710, 722), False, 'from mlagents.trainers.torch.encoders import VectorInput, Normalizer, SimpleVisualEncoder, ResNetVisualEncoder, NatureVisualEncoder\n'), ((836, 858), 'mlagents.torch_utils.torch.tensor', 'torch.tensor', (['[[1, 1]]'], {}), '([[1, 1]])\n', (848, 858), False, 'from mlagents.torch_utils import torch\n'), ((876, 898), 'mlagents.torch_utils.torch.tensor', 'torch.tensor', (['[[1, 1]]'], {}), '([[1, 1]])\n', (888, 898), False, 'from mlagents.torch_utils import torch\n'), ((916, 938), 'mlagents.torch_utils.torch.tensor', 'torch.tensor', (['[[0, 0]]'], {}), '([[0, 0]])\n', (928, 938), False, 'from mlagents.torch_utils import torch\n'), ((1182, 1204), 'mlagents.trainers.torch.encoders.Normalizer', 'Normalizer', (['input_size'], {}), '(input_size)\n', (1192, 1204), False, 'from mlagents.trainers.torch.encoders import VectorInput, Normalizer, SimpleVisualEncoder, ResNetVisualEncoder, NatureVisualEncoder\n'), ((1534, 1545), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1543, 1545), False, 'from unittest import mock\n'), ((1665, 1699), 'mlagents.trainers.torch.encoders.VectorInput', 'VectorInput', (['input_size', 'normalize'], {}), '(input_size, normalize)\n', (1676, 1699), False, 'from mlagents.trainers.torch.encoders import VectorInput, Normalizer, SimpleVisualEncoder, ResNetVisualEncoder, NatureVisualEncoder\n'), ((1843, 1877), 'mlagents.trainers.torch.encoders.VectorInput', 'VectorInput', (['input_size', 'normalize'], {}), '(input_size, normalize)\n', (1854, 1877), False, 'from mlagents.trainers.torch.encoders import VectorInput, Normalizer, SimpleVisualEncoder, ResNetVisualEncoder, NatureVisualEncoder\n'), ((1892, 1919), 'mlagents.torch_utils.torch.ones', 'torch.ones', (['(1, input_size)'], {}), '((1, input_size))\n', (1902, 1919), False, 'from mlagents.torch_utils import torch\n'), ((2104, 2138), 'mlagents.trainers.torch.encoders.VectorInput', 'VectorInput', (['input_size', 'normalize'], {}), '(input_size, normalize)\n', (2115, 2138), False, 'from mlagents.trainers.torch.encoders import VectorInput, Normalizer, SimpleVisualEncoder, ResNetVisualEncoder, NatureVisualEncoder\n'), ((2656, 2716), 'mlagents.torch_utils.torch.ones', 'torch.ones', (['(1, image_size[2], image_size[0], image_size[1])'], {}), '((1, image_size[2], image_size[0], image_size[1]))\n', (2666, 2716), False, 'from mlagents.torch_utils import torch\n'), ((1728, 1755), 'mlagents.torch_utils.torch.ones', 'torch.ones', (['(1, input_size)'], {}), '((1, input_size))\n', (1738, 1755), False, 'from mlagents.torch_utils import torch\n'), ((572, 613), 'mlagents.torch_utils.torch.equal', 'torch.equal', (['key_item_1[1]', 'key_item_2[1]'], {}), '(key_item_1[1], key_item_2[1])\n', (583, 613), False, 'from mlagents.torch_utils import torch\n'), ((1107, 1138), 'pytest.approx', 'pytest.approx', (['(0.707)'], {'abs': '(0.001)'}), '(0.707, abs=0.001)\n', (1120, 1138), False, 'import pytest\n'), ((1372, 1403), 'pytest.approx', 'pytest.approx', (['(0.707)'], {'abs': '(0.001)'}), '(0.707, abs=0.001)\n', (1385, 1403), False, 'import pytest\n')]
|
import os
import re
from maya import cmds, mel
import pymel.core as pm
import pyblish.api
import pype.api
from pype.hosts.maya import lib
class ValidateRenderSettings(pyblish.api.InstancePlugin):
"""Validates the global render settings
* File Name Prefix must start with: `maya/<Scene>`
all other token are customizable but sane values for Arnold are:
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
<Camera> token is supported also, useful for multiple renderable
cameras per render layer.
For Redshift omit <RenderPass> token. Redshift will append it
automatically if AOVs are enabled and if you user Multipart EXR
it doesn't make much sense.
* Frame Padding must be:
* default: 4
* Animation must be toggle on, in Render Settings - Common tab:
* vray: Animation on standard of specific
* arnold: Frame / Animation ext: Any choice without "(Single Frame)"
* redshift: Animation toggled on
NOTE:
The repair function of this plugin does not repair the animation
setting of the render settings due to multiple possibilities.
"""
order = pype.api.ValidateContentsOrder
label = "Render Settings"
hosts = ["maya"]
families = ["renderlayer"]
actions = [pype.api.RepairAction]
ImagePrefixes = {
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
'vray': 'vraySettings.fileNamePrefix',
'arnold': 'defaultRenderGlobals.imageFilePrefix',
'renderman': 'rmanGlobals.imageFileFormat',
'redshift': 'defaultRenderGlobals.imageFilePrefix'
}
ImagePrefixTokens = {
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>',
'vray': 'maya/<Scene>/<Layer>/<Layer>',
'renderman': '<layer>_<aov>.<f4>.<ext>'
}
# WARNING: There is bug? in renderman, translating <scene> token
# to something left behind mayas default image prefix. So instead
# `SceneName_v01` it translates to:
# `SceneName_v01/<RenderLayer>/<RenderLayers_<RenderPass>` that means
# for example:
# `SceneName_v01/Main/Main_<RenderPass>`. Possible solution is to define
# custom token like <scene_name> to point to determined scene name.
RendermanDirPrefix = "<ws>/renders/maya/<scene>/<layer>"
R_AOV_TOKEN = re.compile(
r'%a|<aov>|<renderpass>', re.IGNORECASE)
R_LAYER_TOKEN = re.compile(
r'%l|<layer>|<renderlayer>', re.IGNORECASE)
R_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
R_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
DEFAULT_PADDING = 4
VRAY_PREFIX = "maya/<Scene>/<Layer>/<Layer>"
DEFAULT_PREFIX = "maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
def process(self, instance):
invalid = self.get_invalid(instance)
assert invalid is False, ("Invalid render settings "
"found for '{}'!".format(instance.name))
@classmethod
def get_invalid(cls, instance):
invalid = False
renderer = instance.data['renderer']
layer = instance.data['setMembers']
cameras = instance.data.get("cameras", [])
# Get the node attributes for current renderer
attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS['default'])
prefix = lib.get_attr_in_layer(cls.ImagePrefixes[renderer],
layer=layer)
padding = lib.get_attr_in_layer("{node}.{padding}".format(**attrs),
layer=layer)
anim_override = lib.get_attr_in_layer("defaultRenderGlobals.animation",
layer=layer)
if not anim_override:
invalid = True
cls.log.error("Animation needs to be enabled. Use the same "
"frame for start and end to render single frame")
if not prefix.lower().startswith("maya/<scene>"):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
"doesn't start with: 'maya/<scene>'".format(prefix))
if not re.search(cls.R_LAYER_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
"doesn't have: '<renderlayer>' or "
"'<layer>' token".format(prefix))
if len(cameras) > 1:
if not re.search(cls.R_CAMERA_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
"doesn't have: '<camera>' token".format(prefix))
# renderer specific checks
if renderer == "vray":
# no vray checks implemented yet
pass
elif renderer == "redshift":
if re.search(cls.R_AOV_TOKEN, prefix):
invalid = True
cls.log.error("Do not use AOV token [ {} ] - "
"Redshift automatically append AOV name and "
"it doesn't make much sense with "
"Multipart EXR".format(prefix))
elif renderer == "renderman":
file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat")
dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir")
if file_prefix.lower() != cls.ImagePrefixTokens[renderer].lower():
invalid = True
cls.log.error("Wrong image prefix [ {} ]".format(file_prefix))
if dir_prefix.lower() != cls.RendermanDirPrefix.lower():
invalid = True
cls.log.error("Wrong directory prefix [ {} ]".format(
dir_prefix))
else:
multipart = cmds.getAttr("defaultArnoldDriver.mergeAOVs")
if multipart:
if re.search(cls.R_AOV_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
"You can't use '<renderpass>' token "
"with merge AOVs turned on".format(prefix))
else:
if not re.search(cls.R_AOV_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
"doesn't have: '<renderpass>' or "
"token".format(prefix))
# prefix check
if prefix.lower() != cls.ImagePrefixTokens[renderer].lower():
cls.log.warning("warning: prefix differs from "
"recommended {}".format(
cls.ImagePrefixTokens[renderer]))
if padding != cls.DEFAULT_PADDING:
invalid = True
cls.log.error("Expecting padding of {} ( {} )".format(
cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING))
return invalid
@classmethod
def repair(cls, instance):
renderer = instance.data['renderer']
layer_node = instance.data['setMembers']
with lib.renderlayer(layer_node):
default = lib.RENDER_ATTRS['default']
render_attrs = lib.RENDER_ATTRS.get(renderer, default)
# Repair prefix
if renderer != "renderman":
node = render_attrs["node"]
prefix_attr = render_attrs["prefix"]
fname_prefix = cls.ImagePrefixTokens[renderer]
cmds.setAttr("{}.{}".format(node, prefix_attr),
fname_prefix, type="string")
# Repair padding
padding_attr = render_attrs["padding"]
cmds.setAttr("{}.{}".format(node, padding_attr),
cls.DEFAULT_PADDING)
else:
# renderman handles stuff differently
cmds.setAttr("rmanGlobals.imageFileFormat",
cls.ImagePrefixTokens[renderer],
type="string")
cmds.setAttr("rmanGlobals.imageOutputDir",
cls.RendermanDirPrefix,
type="string")
|
[
"pype.hosts.maya.lib.RENDER_ATTRS.get",
"maya.cmds.getAttr",
"pype.hosts.maya.lib.get_attr_in_layer",
"maya.cmds.setAttr",
"pype.hosts.maya.lib.renderlayer",
"re.search",
"re.compile"
] |
[((2417, 2467), 're.compile', 're.compile', (['"""%a|<aov>|<renderpass>"""', 're.IGNORECASE'], {}), "('%a|<aov>|<renderpass>', re.IGNORECASE)\n", (2427, 2467), False, 'import re\n'), ((2498, 2551), 're.compile', 're.compile', (['"""%l|<layer>|<renderlayer>"""', 're.IGNORECASE'], {}), "('%l|<layer>|<renderlayer>', re.IGNORECASE)\n", (2508, 2551), False, 'import re\n'), ((2583, 2623), 're.compile', 're.compile', (['"""%c|<camera>"""', 're.IGNORECASE'], {}), "('%c|<camera>', re.IGNORECASE)\n", (2593, 2623), False, 'import re\n'), ((2645, 2684), 're.compile', 're.compile', (['"""%s|<scene>"""', 're.IGNORECASE'], {}), "('%s|<scene>', re.IGNORECASE)\n", (2655, 2684), False, 'import re\n'), ((3345, 3404), 'pype.hosts.maya.lib.RENDER_ATTRS.get', 'lib.RENDER_ATTRS.get', (['renderer', "lib.RENDER_ATTRS['default']"], {}), "(renderer, lib.RENDER_ATTRS['default'])\n", (3365, 3404), False, 'from pype.hosts.maya import lib\n'), ((3422, 3485), 'pype.hosts.maya.lib.get_attr_in_layer', 'lib.get_attr_in_layer', (['cls.ImagePrefixes[renderer]'], {'layer': 'layer'}), '(cls.ImagePrefixes[renderer], layer=layer)\n', (3443, 3485), False, 'from pype.hosts.maya import lib\n'), ((3679, 3747), 'pype.hosts.maya.lib.get_attr_in_layer', 'lib.get_attr_in_layer', (['"""defaultRenderGlobals.animation"""'], {'layer': 'layer'}), "('defaultRenderGlobals.animation', layer=layer)\n", (3700, 3747), False, 'from pype.hosts.maya import lib\n'), ((4238, 4274), 're.search', 're.search', (['cls.R_LAYER_TOKEN', 'prefix'], {}), '(cls.R_LAYER_TOKEN, prefix)\n', (4247, 4274), False, 'import re\n'), ((7204, 7231), 'pype.hosts.maya.lib.renderlayer', 'lib.renderlayer', (['layer_node'], {}), '(layer_node)\n', (7219, 7231), False, 'from pype.hosts.maya import lib\n'), ((7310, 7349), 'pype.hosts.maya.lib.RENDER_ATTRS.get', 'lib.RENDER_ATTRS.get', (['renderer', 'default'], {}), '(renderer, default)\n', (7330, 7349), False, 'from pype.hosts.maya import lib\n'), ((4531, 4568), 're.search', 're.search', (['cls.R_CAMERA_TOKEN', 'prefix'], {}), '(cls.R_CAMERA_TOKEN, prefix)\n', (4540, 4568), False, 'import re\n'), ((4922, 4956), 're.search', 're.search', (['cls.R_AOV_TOKEN', 'prefix'], {}), '(cls.R_AOV_TOKEN, prefix)\n', (4931, 4956), False, 'import re\n'), ((7994, 8089), 'maya.cmds.setAttr', 'cmds.setAttr', (['"""rmanGlobals.imageFileFormat"""', 'cls.ImagePrefixTokens[renderer]'], {'type': '"""string"""'}), "('rmanGlobals.imageFileFormat', cls.ImagePrefixTokens[renderer],\n type='string')\n", (8006, 8089), False, 'from maya import cmds, mel\n'), ((8160, 8246), 'maya.cmds.setAttr', 'cmds.setAttr', (['"""rmanGlobals.imageOutputDir"""', 'cls.RendermanDirPrefix'], {'type': '"""string"""'}), "('rmanGlobals.imageOutputDir', cls.RendermanDirPrefix, type=\n 'string')\n", (8172, 8246), False, 'from maya import cmds, mel\n'), ((5320, 5363), 'maya.cmds.getAttr', 'cmds.getAttr', (['"""rmanGlobals.imageFileFormat"""'], {}), "('rmanGlobals.imageFileFormat')\n", (5332, 5363), False, 'from maya import cmds, mel\n'), ((5389, 5431), 'maya.cmds.getAttr', 'cmds.getAttr', (['"""rmanGlobals.imageOutputDir"""'], {}), "('rmanGlobals.imageOutputDir')\n", (5401, 5431), False, 'from maya import cmds, mel\n'), ((5865, 5910), 'maya.cmds.getAttr', 'cmds.getAttr', (['"""defaultArnoldDriver.mergeAOVs"""'], {}), "('defaultArnoldDriver.mergeAOVs')\n", (5877, 5910), False, 'from maya import cmds, mel\n'), ((5956, 5990), 're.search', 're.search', (['cls.R_AOV_TOKEN', 'prefix'], {}), '(cls.R_AOV_TOKEN, prefix)\n', (5965, 5990), False, 'import re\n'), ((6283, 6317), 're.search', 're.search', (['cls.R_AOV_TOKEN', 'prefix'], {}), '(cls.R_AOV_TOKEN, prefix)\n', (6292, 6317), False, 'import re\n')]
|
"""https://github.com/kujason/scene_vis"""
import os
import numpy as np
import vtk
class VtkImage:
"""Image
"""
def __init__(self):
self.vtk_actor = vtk.vtkImageActor()
# Need to keep reference to the image
self.image = None
self.vtk_image_data = None
def _save_image_data(self, vtk_image_data):
self.vtk_image_data = vtk_image_data
self.vtk_actor.SetInputData(vtk_image_data)
def set_image(self, image):
"""Setup image actor from image data
Args:
image: RGB image array
"""
# Flip vertically and change BGR->RGB
image = np.copy(image)[::-1, :, ::-1]
# Save reference to image
self.image = np.ascontiguousarray(image, dtype=np.uint8)
# Setup vtkImageImport
height, width = image.shape[0:2]
vtk_image_import = vtk.vtkImageImport()
vtk_image_import.SetDataSpacing(1, 1, 1)
vtk_image_import.SetDataOrigin(0, 0, 0)
vtk_image_import.SetWholeExtent(0, width - 1, 0, height - 1, 0, 0)
vtk_image_import.SetDataExtentToWholeExtent()
vtk_image_import.SetDataScalarTypeToUnsignedChar()
vtk_image_import.SetNumberOfScalarComponents(3)
vtk_image_import.SetImportVoidPointer(self.image)
vtk_image_import.Update()
# Get vtkImageData
vtk_image_data = vtk_image_import.GetOutput()
self._save_image_data(vtk_image_data)
def set_image_path(self, image_path):
"""Setup image actor from image at given path
Args:
image_path: path to image
"""
# Check extension
extension = os.path.splitext(image_path)[1]
if extension == '.png':
# Setup vtk image data
vtk_png_reader = vtk.vtkPNGReader()
vtk_png_reader.SetFileName(image_path)
vtk_png_reader.Update()
vtk_image_data = vtk_png_reader.GetOutput()
else:
raise NotImplementedError('Only .png images are supported, file was', extension)
self._save_image_data(vtk_image_data)
@staticmethod
def center_camera(vtk_renderer, vtk_image_data):
"""Sets camera to fill render window with the image
Args:
vtk_renderer: vtkRenderer
vtk_image_data: vtkImageData to calculate extents for centering
"""
origin = vtk_image_data.GetOrigin()
spacing = vtk_image_data.GetSpacing()
extent = vtk_image_data.GetExtent()
camera = vtk_renderer.GetActiveCamera()
camera.ParallelProjectionOn()
xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]
yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]
# xd = (extent[1] - extent[0] + 1) * spacing[0]
yd = (extent[3] - extent[2] + 1) * spacing[1]
d = camera.GetDistance()
camera.SetParallelScale(0.5 * yd)
camera.SetFocalPoint(xc, yc, 0.0)
camera.SetPosition(xc, yc, d)
|
[
"vtk.vtkPNGReader",
"numpy.copy",
"vtk.vtkImageActor",
"os.path.splitext",
"vtk.vtkImageImport",
"numpy.ascontiguousarray"
] |
[((175, 194), 'vtk.vtkImageActor', 'vtk.vtkImageActor', ([], {}), '()\n', (192, 194), False, 'import vtk\n'), ((738, 781), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (758, 781), True, 'import numpy as np\n'), ((882, 902), 'vtk.vtkImageImport', 'vtk.vtkImageImport', ([], {}), '()\n', (900, 902), False, 'import vtk\n'), ((652, 666), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (659, 666), True, 'import numpy as np\n'), ((1673, 1701), 'os.path.splitext', 'os.path.splitext', (['image_path'], {}), '(image_path)\n', (1689, 1701), False, 'import os\n'), ((1802, 1820), 'vtk.vtkPNGReader', 'vtk.vtkPNGReader', ([], {}), '()\n', (1818, 1820), False, 'import vtk\n')]
|
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = fields = ['first_name', 'last_name', 'username', 'passport']
class AirlineSerializer(serializers.ModelSerializer):
class Meta:
model = Airline
fields = "__all__"
def create(self, validated_data):
airline = Airline(**validated_data)
airline.save()
return Airline(**validated_data)
class AirportCreateSerializer(serializers.ModelSerializer):
company = AirlineSerializer()
class Meta:
model = Airport
fields = "__all__"
def create(self, validated_data):
airoport = Airport(**validated_data)
airoport.save()
return Airport(**validated_data)
class AiraportNestedSerializer(serializers.ModelSerializer):
# делаем наследование
company = AirlineSerializer()
# уточняем поле
race = serializers.CharField(source="get_race_display", read_only=True)
class Meta:
model = Airport
fields = "__all__"
class CitySerializer(serializers.ModelSerializer):
class Meta:
model = City
fields = ["name"]
class CityCreateSerializer(serializers.ModelSerializer):
class Meta:
model = City
fields = "__all__"
def create(self, validated_data):
city = City(**validated_data)
city.save()
return City(**validated_data)
class RouteSerializer(serializers.ModelSerializer):
class Meta:
model = Route
fields = "__all__"
# class RouteCreateSerializer(serializers.ModelSerializer):
# class Meta:
# model = Route
# fields = "__all__"
#
# def create(self, validated_data):
# route = Route(**validated_data)
# route.save()
# return Route(**validated_data)
class ArrivalSerializer(serializers.ModelSerializer):
class Meta:
model = Arrival
fields = "__all__"
# class RouteCreateSerializer(serializers.ModelSerializer):
# class Meta:
# model = Route
# fields = "__all__"
#
# def create(self, validated_data):
# route = Route(**validated_data)
# route.save()
# return Route(**validated_data)
class DepartureSerializer(serializers.ModelSerializer):
class Meta:
model = Departure
fields = "__all__"
class FlightSerializer(serializers.ModelSerializer):
class Meta:
model = Flight
fields = "__all__"
class PlaneSerializer(serializers.ModelSerializer):
class Meta:
model = Plane
fields = "__all__"
class BoardSerializer(serializers.ModelSerializer):
class Meta:
model = Board
fields = "__all__"
class FlightAttendantSerializer(serializers.ModelSerializer):
class Meta:
model = FlightAttendant
fields = "__all__"
class PilotSerializer(serializers.ModelSerializer):
class Meta:
model = Pilot
fields = "__all__"
|
[
"rest_framework.serializers.CharField"
] |
[((969, 1033), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""get_race_display"""', 'read_only': '(True)'}), "(source='get_race_display', read_only=True)\n", (990, 1033), False, 'from rest_framework import serializers\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
sys.path.append("../../exos/LG/")
import csv
from Array import Array
from ftfy import fix_text
"""
@:param path : The path of the csv file to turn into an array
@:param sep : The separator to use for spliting the csv row data
@:return : Return an array of values
"""
def printCsvFile(path, sep=None):
with open(path, "r") as csvFile:
reader = csv.reader(csvFile) if sep is None else csv.reader(csvFile, delimiter=sep)
Array([elem for elem in reader])\
.map(lambda elem : Array(elem).map(fix_text).toArray()) \
.forEach(print)
#
#
"""
@:param path : The path to the csv file to turn into a dictionnary
@:param sep : The separator to use for spliting the csv row data
@:return : Return a dictionnary of 'dict[key] = value' with key being the name of the row
"""
def getCsvAsDict(path, sep=None):
csvFile = open(path, "r")
return csv.DictReader(csvFile) if sep is None else csv.DictReader(csvFile, delimiter=sep)
#
if __name__ == "__main__":
rscPath = "../../exos/rsc"
csvPath = rscPath + "/data/csv"
with open(csvPath + "/activites.csv", "rb") as csvFile :
reader = csv.reader(csvFile)
data = Array(list(reader))
data.forEach(print)
#
#
|
[
"sys.path.append",
"csv.DictReader",
"Array.Array",
"csv.reader"
] |
[((57, 90), 'sys.path.append', 'sys.path.append', (['"""../../exos/LG/"""'], {}), "('../../exos/LG/')\n", (72, 90), False, 'import sys\n'), ((971, 994), 'csv.DictReader', 'csv.DictReader', (['csvFile'], {}), '(csvFile)\n', (985, 994), False, 'import csv\n'), ((1015, 1053), 'csv.DictReader', 'csv.DictReader', (['csvFile'], {'delimiter': 'sep'}), '(csvFile, delimiter=sep)\n', (1029, 1053), False, 'import csv\n'), ((1236, 1255), 'csv.reader', 'csv.reader', (['csvFile'], {}), '(csvFile)\n', (1246, 1255), False, 'import csv\n'), ((431, 450), 'csv.reader', 'csv.reader', (['csvFile'], {}), '(csvFile)\n', (441, 450), False, 'import csv\n'), ((471, 505), 'csv.reader', 'csv.reader', (['csvFile'], {'delimiter': 'sep'}), '(csvFile, delimiter=sep)\n', (481, 505), False, 'import csv\n'), ((515, 547), 'Array.Array', 'Array', (['[elem for elem in reader]'], {}), '([elem for elem in reader])\n', (520, 547), False, 'from Array import Array\n'), ((577, 588), 'Array.Array', 'Array', (['elem'], {}), '(elem)\n', (582, 588), False, 'from Array import Array\n')]
|
#!/usr/bin/env python
'''
Plot degree values for a given set of nodes in a simple circle plot.
'''
import numpy as np
import matplotlib.pyplot as plt
import mne
from jumeg import get_jumeg_path
from jumeg.connectivity import plot_degree_circle
import bct
orig_labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml'
yaml_fname = get_jumeg_path() + '/data/desikan_aparc_cortex_based_grouping.yaml'
con_fname = get_jumeg_path() + '/data/sample,aparc-con.npy'
con = np.load(con_fname)
con_ = con[0, :, :, 2] + con[0, :, :, 2].T
# compute the degree
degrees = mne.connectivity.degree(con_, threshold_prop=0.2)
fig, ax = plot_degree_circle(degrees, yaml_fname, orig_labels_fname)
|
[
"jumeg.get_jumeg_path",
"numpy.load",
"mne.connectivity.degree",
"jumeg.connectivity.plot_degree_circle"
] |
[((480, 498), 'numpy.load', 'np.load', (['con_fname'], {}), '(con_fname)\n', (487, 498), True, 'import numpy as np\n'), ((574, 623), 'mne.connectivity.degree', 'mne.connectivity.degree', (['con_'], {'threshold_prop': '(0.2)'}), '(con_, threshold_prop=0.2)\n', (597, 623), False, 'import mne\n'), ((635, 693), 'jumeg.connectivity.plot_degree_circle', 'plot_degree_circle', (['degrees', 'yaml_fname', 'orig_labels_fname'], {}), '(degrees, yaml_fname, orig_labels_fname)\n', (653, 693), False, 'from jumeg.connectivity import plot_degree_circle\n'), ((280, 296), 'jumeg.get_jumeg_path', 'get_jumeg_path', ([], {}), '()\n', (294, 296), False, 'from jumeg import get_jumeg_path\n'), ((345, 361), 'jumeg.get_jumeg_path', 'get_jumeg_path', ([], {}), '()\n', (359, 361), False, 'from jumeg import get_jumeg_path\n'), ((425, 441), 'jumeg.get_jumeg_path', 'get_jumeg_path', ([], {}), '()\n', (439, 441), False, 'from jumeg import get_jumeg_path\n')]
|
import os
import time
run_properties_file = open("run.properties", "r")
exec(run_properties_file.read())
run_properties_file.close()
time_delay_each_frame = time_delay_each_frame
shell_keyword_to_clear = shell_keyword_to_clear
while True:
input_data = input("Enter filename: ")
if(os.path.exists(input_data)):
break
else:
print("Invalid Path")
gonna_be_run_file = open(input_data, "r")
run_data = gonna_be_run_file.read()
gonna_be_run_file.close()
run_data_list = run_data.splitlines()
current_line = 0
while True:
current_frame_list = []
while True:
if(run_data_list[current_line] == "-"):
break
else:
current_frame_list.append(run_data_list[current_line])
current_line += 1
if(outline):
print(" ", end="")
for x in range(0, len(current_frame_list[0])):
print("~", end="")
print("")
for x in range(0, len(current_frame_list)):
print("|", end="")
print(current_frame_list[x], end="")
print("|")
print(" ", end="")
for x in range(0, len(current_frame_list[0])):
print("~", end="")
print("")
else:
for x in range(0, len(current_frame_list)):
print(current_frame_list[x])
time.sleep(time_delay_each_frame)
os.system(shell_keyword_to_clear)
current_line += 1
del current_frame_list
|
[
"os.path.exists",
"os.system",
"time.sleep"
] |
[((292, 318), 'os.path.exists', 'os.path.exists', (['input_data'], {}), '(input_data)\n', (306, 318), False, 'import os\n'), ((1170, 1203), 'time.sleep', 'time.sleep', (['time_delay_each_frame'], {}), '(time_delay_each_frame)\n', (1180, 1203), False, 'import time\n'), ((1206, 1239), 'os.system', 'os.system', (['shell_keyword_to_clear'], {}), '(shell_keyword_to_clear)\n', (1215, 1239), False, 'import os\n')]
|
# coding=utf-8
"""
Ingest data from the command-line.
"""
from __future__ import absolute_import
import logging
import os
import uuid
from pathlib import Path
from xml.etree import ElementTree
import click
import rasterio.features
import shapely.affinity
import shapely.geometry
import shapely.ops
import yaml
from osgeo import osr
from rasterio.errors import RasterioIOError
# image boundary imports
# IMAGE BOUNDARY CODE
def safe_valid_region(images, mask_value=None):
try:
return valid_region(images, mask_value)
except (OSError, RasterioIOError):
return None
def valid_region(images, mask_value=None):
mask = None
for fname in images:
# ensure formats match
with rasterio.open(str(fname), 'r') as ds:
transform = ds.affine
img = ds.read(1)
if mask_value is not None:
new_mask = img & mask_value == mask_value
else:
# TODO update when sen2cor format write finalised new_mask = img != ds.nodata
new_mask = img != 0
if mask is None:
mask = new_mask
else:
mask |= new_mask
shapes = rasterio.features.shapes(mask.astype('uint8'), mask=mask)
shape = shapely.ops.unary_union([shapely.geometry.shape(shape) for shape, val in shapes if val == 1])
type(shapes)
geom = shape.convex_hull
# buffer by 1 pixel
geom = geom.buffer(1, join_style=3, cap_style=3)
# simplify with 1 pixel radius
geom = geom.simplify(1)
# intersect with image bounding box
geom = geom.intersection(shapely.geometry.box(0, 0, mask.shape[1], mask.shape[0]))
# transform from pixel space into CRS space
geom = shapely.affinity.affine_transform(geom, (transform.a, transform.b, transform.d,
transform.e, transform.xoff, transform.yoff))
output = shapely.geometry.mapping(geom)
return geom
def _to_lists(x):
"""
Returns lists of lists when given tuples of tuples
"""
if isinstance(x, tuple):
return [_to_lists(el) for el in x]
return x
def get_size(root, res):
nrows = int(root.findall('./*/Tile_Geocoding/Size[@resolution="'+str(res)+'"]/NROWS')[0].text)
ncols = int(root.findall('./*/Tile_Geocoding/Size[@resolution="'+str(res)+'"]/NCOLS')[0].text)
return {
'nrows': nrows,
'ncols': ncols,
}
def safe_get_grids(image):
try:
return get_grids(image)
except (OSError, RasterioIOError):
return None
def get_grids(image):
src = rasterio.open(str(image), 'r')
shape = src.shape
transform = src.transform
return {
'shape': shape,
'transform': [x for x in transform]
}
def get_geo_ref_points(root):
nrows = int(root.findall('./*/Tile_Geocoding/Size[@resolution="10"]/NROWS')[0].text)
ncols = int(root.findall('./*/Tile_Geocoding/Size[@resolution="10"]/NCOLS')[0].text)
ulx = int(root.findall('./*/Tile_Geocoding/Geoposition[@resolution="10"]/ULX')[0].text)
uly = int(root.findall('./*/Tile_Geocoding/Geoposition[@resolution="10"]/ULY')[0].text)
xdim = int(root.findall('./*/Tile_Geocoding/Geoposition[@resolution="10"]/XDIM')[0].text)
ydim = int(root.findall('./*/Tile_Geocoding/Geoposition[@resolution="10"]/YDIM')[0].text)
return {
'ul': {'x': ulx, 'y': uly},
'ur': {'x': ulx + ncols * abs(xdim), 'y': uly},
'll': {'x': ulx, 'y': uly - nrows * abs(ydim)},
'lr': {'x': ulx + ncols * abs(xdim), 'y': uly - nrows * abs(ydim)},
}
def get_coords(geo_ref_points, spatial_ref):
t = osr.CoordinateTransformation(spatial_ref, spatial_ref.CloneGeogCS())
def transform(p):
lon, lat, z = t.TransformPoint(p['x'], p['y'])
return {'lon': lon, 'lat': lat}
return {key: transform(p) for key, p in geo_ref_points.items()}
def prepare_dataset(path):
root = ElementTree.parse(str(path)).getroot()
level = root.findall('./*/Product_Info/PROCESSING_LEVEL')[0].text
product_type = root.findall('./*/Product_Info/PRODUCT_TYPE')[0].text
ct_time = root.findall('./*/Product_Info/GENERATION_TIME')[0].text
print(level, product_type, ct_time)
# granuleslist = [(granule.get('granuleIdentifier'), [imid.text for imid in granule.findall('IMAGE_FILE')]) for
# granule in
# root.findall('./*/Product_Info/Product_Organisation/Granule_List/Granules')]
# Assume multiple granules
single_granule_archive = False
granules = {granule.get('granuleIdentifier'): [imid.text for imid in granule.findall('IMAGE_ID')]
for granule in root.findall('./*/Product_Info/Product_Organisation/Granule_List/Granules')}
if not granules:
single_granule_archive = True
granules = {granule.get('granuleIdentifier'): [imid.text for imid in granule.findall('IMAGE_FILE')]
for granule in root.findall('./*/Product_Info/Product_Organisation/Granule_List/Granule')}
if not [] in granules.values():
single_granule_archive = True
else:
granules = {granule.get('granuleIdentifier'): [imid.text for imid in granule.findall('IMAGE_ID')]
for granule in root.findall('./*/Product_Info/Product_Organisation/Granule_List/Granule')}
single_granule_archive = False
# current = 0
# list = []
# granules = {}
# for i in granuleslist:
# granules[i[0]] = {}
# for key in granules.keys():
# granulecontent = []
# for j in granuleslist:
# if key in j:
# granulecontent = granulecontent + j[1]
# granules[key] = granulecontent
grouped_images = []
documents = []
for granule_id, images in granules.items():
images_ten_list = []
images_twenty_list = []
images_sixty_list = []
images_classification = []
# gran_path = str(path.parent.joinpath('GRANULE', granule_id, granule_id[:-7].replace('MSI', 'MTD') + '.xml'))
img_data_path = str(path.parent.joinpath('GRANULE', granule_id, 'IMG_DATA'))
gran_path = str(path.parent.joinpath('GRANULE', granule_id, granule_id[:-7].replace('MSI', 'MTD') + '.xml'))
if not Path(gran_path).exists():
gran_path = str(path.parent.joinpath(images[0]))
gran_path = str(Path(gran_path).parents[2].joinpath('MTD_TL.xml'))
root = ElementTree.parse(gran_path).getroot()
if not Path(img_data_path).exists():
# img_data_path = str(Path(gran_path).parents[0].joinpath('IMG_DATA'))
img_data_path = str(Path(path).parent)
if single_granule_archive is False:
img_data_path = img_data_path + str(Path('GRANULE').joinpath(granule_id, 'IMG_DATA'))
root = ElementTree.parse(gran_path).getroot()
sensing_time = root.findall('./*/SENSING_TIME')[0].text
img_data_path = str(path.parent.joinpath('GRANULE', granule_id, 'IMG_DATA'))
img_data_path_r10 = str(path.parent.joinpath('GRANULE', granule_id, 'IMG_DATA', 'R10m'))
img_data_path_r20 = str(path.parent.joinpath('GRANULE', granule_id, 'IMG_DATA', 'R20m'))
img_data_path_r60 = str(path.parent.joinpath('GRANULE', granule_id, 'IMG_DATA', 'R60m'))
for image in images:
# print('IMAGE',image)
# print('img_data_path', img_data_path)
# image = str(Path(image).name)
# print('IMAGE',image)
classification_list = ['SCL']
ten_list = ['B02_10m', 'B03_10m', 'B04_10m', 'B08_10m']
twenty_list = ['B05_20m', 'B06_20m', 'B07_20m', 'B11_20m', 'B12_20m', 'B8A_20m',
'B02_20m', 'B03_20m', 'B04_20m']
sixty_list = ['B01_60m', 'B02_60m', 'B03_60m', 'B04_60m', 'B8A_60m', 'B09_60m',
'B05_60m', 'B06_60m', 'B07_60m', 'B11_60m', 'B12_60m']
for item in classification_list:
if item in image:
# TODO include 60m classification
if '20m' in image:
images_classification.append(os.path.join(str(path.parent), image + ".jp2"))
for item in ten_list:
if item in image:
images_ten_list.append(os.path.join(str(path.parent), image + ".jp2"))
grouped_images.append(os.path.join(str(path.parent), image + ".jp2"))
for item in twenty_list:
if item in image:
images_twenty_list.append(os.path.join(str(path.parent), image + ".jp2"))
grouped_images.append(os.path.join(str(path.parent), image + ".jp2"))
for item in sixty_list:
if item in image:
images_sixty_list.append(os.path.join(str(path.parent), image + ".jp2"))
grouped_images.append(os.path.join(str(path.parent), image + ".jp2"))
station = root.findall('./*/Archiving_Info/ARCHIVING_CENTRE')[0].text
cs_code = root.findall('./*/Tile_Geocoding/HORIZONTAL_CS_CODE')[0].text
spatial_ref = osr.SpatialReference()
pvi_fileName = str(root.findall('./*/PVI_FILENAME')).split("/")[-1]
images_naming = pvi_fileName[0: -7]
spatial_ref.SetFromUserInput(cs_code)
spectral_dict = {image[-11:-4]: {'path': str(Path(image)), 'layer': 1, } for image in grouped_images}
scl_dict = {'SCL_20m': {'path': str(Path(classification)), 'layer': 1, } for classification in
images_classification}
spectral_dict.update(scl_dict)
geo_ref_points = get_geo_ref_points(root)
documents.append({
'id': str(uuid.uuid4()),
'$schema': 'https://schemas.opendatacube.org/dataset',
'product': {'name': 's2_sen2cor_ard_granule_EO3'},
'crs': cs_code,
'grids': {'default': safe_get_grids(images_twenty_list[0]), '10m_res': safe_get_grids(images_ten_list[0]), '60m_res': safe_get_grids(images_sixty_list[0])},
'measurements': { 'B01_60m': {'grid': '60m_res', 'path': spectral_dict['B01_60m']['path']},
'B02_10m': {'grid': '10m_res', 'path': spectral_dict['B02_10m']['path']},
'B03_10m': {'grid': '10m_res', 'path': spectral_dict['B03_10m']['path']},
'B04_10m': {'grid': '10m_res', 'path': spectral_dict['B04_10m']['path']},
'B05_20m': {'path': spectral_dict['B05_20m']['path']},
'B06_20m': {'path': spectral_dict['B06_20m']['path']},
'B07_20m': {'path': spectral_dict['B07_20m']['path']},
'B08_10m': {'grid': '10m_res', 'path': spectral_dict['B08_10m']['path']},
'B8A_20m': {'path': spectral_dict['B8A_20m']['path']},
'B09_60m': {'grid': '60m_res', 'path': spectral_dict['B09_60m']['path']},
'B11_20m': {'path': spectral_dict['B11_20m']['path']},
'B12_20m': {'path': spectral_dict['B12_20m']['path']},
'SCL_20m': {'path': spectral_dict['SCL_20m']['path']}
},
'properties': {'eo:platform': 'Sentinel-2A',
'eo:instrument': 'MSI',
'eo:product_type': product_type,
'datetime': ct_time,
'odc:file_format': 'JPEG2000',
'dea:dataset_maturity': 'final',
'odc:product_family': 'ard'
},
})
return documents
@click.command(
help="Prepare Sentinel 2 L2 sen2cor dataset SR and SC for ingestion into the Data Cube. "
"eg. python sen2cor_prepare.py <input>.SAFE --output <outfile>.yaml")
@click.argument('datasets',
type=click.Path(exists=True, readable=True, writable=False),
nargs=-1)
@click.option('--output', help="Write datasets into this directory",
type=click.Path(exists=False, writable=True, dir_okay=True))
def main(datasets, output):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
for dataset in datasets:
path = Path(dataset).absolute()
if path.is_dir():
# path = Path(path.joinpath(path.stem.replace('PRD_MSIL2A', 'MTD_SAFL2A') + '.xml'))
for file in os.listdir(path):
if file.endswith(".xml"):
if file.startswith("MTD"):
path = Path(os.path.join(path, file))
if path.suffix != '.xml':
raise RuntimeError('want xml')
logging.info("Processing %s", path)
documents = prepare_dataset(path)
output_path = Path(output)
if 'xml' in str(path):
yaml_path = output_path.joinpath(path.parent.name + '.yaml')
else:
yaml_path = output_path.joinpath(path.name + '.yaml')
if documents:
logging.info("Writing %s dataset(s) into %s", len(documents), yaml_path)
with open(yaml_path, 'w') as stream:
yaml.safe_dump_all(documents, stream, sort_keys=False)
else:
logging.info("No datasets discovered. Bye!")
if __name__ == "__main__":
main()
|
[
"xml.etree.ElementTree.parse",
"uuid.uuid4",
"logging.basicConfig",
"click.command",
"logging.info",
"pathlib.Path",
"yaml.safe_dump_all",
"click.Path",
"os.path.join",
"os.listdir",
"osgeo.osr.SpatialReference"
] |
[((11803, 11983), 'click.command', 'click.command', ([], {'help': '"""Prepare Sentinel 2 L2 sen2cor dataset SR and SC for ingestion into the Data Cube. eg. python sen2cor_prepare.py <input>.SAFE --output <outfile>.yaml"""'}), "(help=\n 'Prepare Sentinel 2 L2 sen2cor dataset SR and SC for ingestion into the Data Cube. eg. python sen2cor_prepare.py <input>.SAFE --output <outfile>.yaml'\n )\n", (11816, 11983), False, 'import click\n'), ((12298, 12390), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(levelname)s %(message)s', level=\n logging.INFO)\n", (12317, 12390), False, 'import logging\n'), ((9190, 9212), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (9210, 9212), False, 'from osgeo import osr\n'), ((12859, 12894), 'logging.info', 'logging.info', (['"""Processing %s"""', 'path'], {}), "('Processing %s', path)\n", (12871, 12894), False, 'import logging\n'), ((12961, 12973), 'pathlib.Path', 'Path', (['output'], {}), '(output)\n', (12965, 12973), False, 'from pathlib import Path\n'), ((12040, 12094), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'readable': '(True)', 'writable': '(False)'}), '(exists=True, readable=True, writable=False)\n', (12050, 12094), False, 'import click\n'), ((12210, 12264), 'click.Path', 'click.Path', ([], {'exists': '(False)', 'writable': '(True)', 'dir_okay': '(True)'}), '(exists=False, writable=True, dir_okay=True)\n', (12220, 12264), False, 'import click\n'), ((12604, 12620), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12614, 12620), False, 'import os\n'), ((13412, 13456), 'logging.info', 'logging.info', (['"""No datasets discovered. Bye!"""'], {}), "('No datasets discovered. Bye!')\n", (13424, 13456), False, 'import logging\n'), ((6487, 6515), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['gran_path'], {}), '(gran_path)\n', (6504, 6515), False, 'from xml.etree import ElementTree\n'), ((6865, 6893), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['gran_path'], {}), '(gran_path)\n', (6882, 6893), False, 'from xml.etree import ElementTree\n'), ((12432, 12445), 'pathlib.Path', 'Path', (['dataset'], {}), '(dataset)\n', (12436, 12445), False, 'from pathlib import Path\n'), ((13331, 13385), 'yaml.safe_dump_all', 'yaml.safe_dump_all', (['documents', 'stream'], {'sort_keys': '(False)'}), '(documents, stream, sort_keys=False)\n', (13349, 13385), False, 'import yaml\n'), ((6306, 6321), 'pathlib.Path', 'Path', (['gran_path'], {}), '(gran_path)\n', (6310, 6321), False, 'from pathlib import Path\n'), ((6542, 6561), 'pathlib.Path', 'Path', (['img_data_path'], {}), '(img_data_path)\n', (6546, 6561), False, 'from pathlib import Path\n'), ((6687, 6697), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6691, 6697), False, 'from pathlib import Path\n'), ((9444, 9455), 'pathlib.Path', 'Path', (['image'], {}), '(image)\n', (9448, 9455), False, 'from pathlib import Path\n'), ((9545, 9565), 'pathlib.Path', 'Path', (['classification'], {}), '(classification)\n', (9549, 9565), False, 'from pathlib import Path\n'), ((9787, 9799), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9797, 9799), False, 'import uuid\n'), ((6799, 6814), 'pathlib.Path', 'Path', (['"""GRANULE"""'], {}), "('GRANULE')\n", (6803, 6814), False, 'from pathlib import Path\n'), ((12747, 12771), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (12759, 12771), False, 'import os\n'), ((6421, 6436), 'pathlib.Path', 'Path', (['gran_path'], {}), '(gran_path)\n', (6425, 6436), False, 'from pathlib import Path\n')]
|
from mongoengine import connect
class Connect(object):
@staticmethod
def connect(table="test", username="superuser", password="<PASSWORD>#", authentication_source="admin"):
# return MongoClient("mongodb://superuser:Seltzer123#@localhost:27017/admin?authSource=admin")
connect('test', username='superuser', password='<PASSWORD>#', authentication_source='admin')
|
[
"mongoengine.connect"
] |
[((296, 392), 'mongoengine.connect', 'connect', (['"""test"""'], {'username': '"""superuser"""', 'password': '"""<PASSWORD>#"""', 'authentication_source': '"""admin"""'}), "('test', username='superuser', password='<PASSWORD>#',\n authentication_source='admin')\n", (303, 392), False, 'from mongoengine import connect\n')]
|
#!/usr/bin/python
import pandas as pd
import covidAnnotator
import sys
import argparse
import os
def main():
all_mutations = pd.read_csv(os.path.dirname(os.path.abspath(__file__))+"/all_mutations.csv")
b117muts = pd.read_csv(os.path.dirname(os.path.abspath(__file__))+"/b117muts.csv")
uniqueIDs = pd.unique(all_mutations["Sequence ID"])
uniqueMuts = pd.unique(all_mutations["nuc name"])
numofSequences = len(uniqueIDs)
mutNucCount = all_mutations['nuc name'].value_counts()
freqs = (mutNucCount / numofSequences * 100)
freqs = freqs.T.to_dict()
freqTable = pd.DataFrame()
freqTable['nuc name'] = uniqueMuts
mapping = dict(all_mutations[['nuc name', 'type']].values)
freqTable['type'] = freqTable['nuc name'].map(mapping)
mapping = dict(all_mutations[['nuc name', 'protein']].values)
freqTable['protein'] = freqTable['nuc name'].map(mapping)
mapping = dict(all_mutations[['nuc name', 'AAMutation']].values)
freqTable['AAMutation'] = freqTable['nuc name'].map(mapping)
count_title = 'Count (' + str(numofSequences) + ')'
freqTable[count_title] = freqTable['nuc name'].map(all_mutations['nuc name'].value_counts())
freqTable['Freq'] = freqTable['nuc name'].map(freqs)
freqTable.sort_values(by=['protein', 'Freq'], ascending=False, inplace=True)
freqTable = freqTable.loc[freqTable['Freq'] >= 2]
mapping = dict(b117muts[['nucleotide', 'lineage original']].values)
freqTable['isUKLineage'] = freqTable['nuc name'].map(mapping)
num_muts = len(freqTable.index) - 1
if num_muts <= 0:
num_muts = 0
geneFreq = freqTable['protein'].value_counts() # + " \ " + str(num_muts)
geneFreq['total'] = num_muts
geneFreq = pd.DataFrame(geneFreq)
if not geneFreq.empty:
geneFreq['Freq'] = geneFreq / num_muts * 100
writer = pd.ExcelWriter("Freq_Table" + ".xlsx", engine='openpyxl')
freqTable.to_excel(writer, sheet_name='Mutations Frequencies', index=False)
geneFreq.to_excel(writer, sheet_name='gene count')
writer.save()
print("Freq_Table" + ".csv is ready")
if __name__ == '__main__':
# userChoose = loop.main()
parser = argparse.ArgumentParser()
parser.add_argument('file')
parser.add_argument("-i", help="insertions files path", nargs=1, type=str,
dest='insPath') # dest = name of variable
parser.add_argument("-n", help="to add 'N' to excel", action='store_true') # dest = name of variable
args = parser.parse_args()
covidAnnotator.main(args)
main()
|
[
"pandas.DataFrame",
"os.path.abspath",
"argparse.ArgumentParser",
"pandas.unique",
"pandas.ExcelWriter",
"covidAnnotator.main"
] |
[((322, 361), 'pandas.unique', 'pd.unique', (["all_mutations['Sequence ID']"], {}), "(all_mutations['Sequence ID'])\n", (331, 361), True, 'import pandas as pd\n'), ((380, 416), 'pandas.unique', 'pd.unique', (["all_mutations['nuc name']"], {}), "(all_mutations['nuc name'])\n", (389, 416), True, 'import pandas as pd\n'), ((614, 628), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (626, 628), True, 'import pandas as pd\n'), ((1764, 1786), 'pandas.DataFrame', 'pd.DataFrame', (['geneFreq'], {}), '(geneFreq)\n', (1776, 1786), True, 'import pandas as pd\n'), ((1885, 1942), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["('Freq_Table' + '.xlsx')"], {'engine': '"""openpyxl"""'}), "('Freq_Table' + '.xlsx', engine='openpyxl')\n", (1899, 1942), True, 'import pandas as pd\n'), ((2221, 2246), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2244, 2246), False, 'import argparse\n'), ((2572, 2597), 'covidAnnotator.main', 'covidAnnotator.main', (['args'], {}), '(args)\n', (2591, 2597), False, 'import covidAnnotator\n'), ((168, 193), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (183, 193), False, 'import os\n'), ((261, 286), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (276, 286), False, 'import os\n')]
|
# Generated by Django 3.1.5 on 2021-03-14 03:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interflowApp', '0004_auto_20210311_1650'),
]
operations = [
migrations.AlterModelOptions(
name='board',
options={'verbose_name': '访客留言', 'verbose_name_plural': '访客留言'},
),
migrations.AddField(
model_name='board',
name='file',
field=models.FileField(default='', upload_to='files', verbose_name='上传文件'),
),
migrations.AlterField(
model_name='board',
name='content',
field=models.TextField(max_length=500, verbose_name='留言内容'),
),
]
|
[
"django.db.models.FileField",
"django.db.models.TextField",
"django.db.migrations.AlterModelOptions"
] |
[((240, 351), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""board"""', 'options': "{'verbose_name': '访客留言', 'verbose_name_plural': '访客留言'}"}), "(name='board', options={'verbose_name': '访客留言',\n 'verbose_name_plural': '访客留言'})\n", (268, 351), False, 'from django.db import migrations, models\n'), ((488, 556), 'django.db.models.FileField', 'models.FileField', ([], {'default': '""""""', 'upload_to': '"""files"""', 'verbose_name': '"""上传文件"""'}), "(default='', upload_to='files', verbose_name='上传文件')\n", (504, 556), False, 'from django.db import migrations, models\n'), ((678, 731), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)', 'verbose_name': '"""留言内容"""'}), "(max_length=500, verbose_name='留言内容')\n", (694, 731), False, 'from django.db import migrations, models\n')]
|
from shutil import copyfile
import pandas as pd
import os
from glob import glob
import json
import sys
from tqdm import tqdm
import numpy as np
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('config', './configs/train_config.json', 'Config file with data paths')
flags.DEFINE_string('input_images_dir', '/data/', 'Images directory that has folders "20x images" e.g.')
flags.DEFINE_string('output_dir', './tmp/test_slides/', 'test slides are copied here')
def main(unused_argv):
# read config
with open(FLAGS.config) as json_file:
config = json.load(json_file)
test_fold = int(config['test_fold_index'])
tile_sz = int(config['tile_sz'])
IMG_DIR = FLAGS.input_images_dir
OUT_DIR = FLAGS.output_dir
IMG_20_DIR = os.path.join(IMG_DIR, '20x_images')
IMG_40_DIR = os.path.join(IMG_DIR, '40x_images')
IMG_60_DIR = os.path.join(IMG_DIR, '60x_images')
input_files_20 = glob(IMG_20_DIR + '/*C04.tif')
input_files_40 = glob(IMG_40_DIR + '/*C04.tif')
input_files_60 = glob(IMG_60_DIR + '/*C04.tif')
target_files_20 = glob(IMG_20_DIR + '/*C01.tif') + glob(IMG_20_DIR + '/*C02.tif') + glob(IMG_20_DIR + '/*C03.tif')
target_files_40 = glob(IMG_40_DIR + '/*C01.tif') + glob(IMG_40_DIR + '/*C02.tif') + glob(IMG_40_DIR + '/*C03.tif')
target_files_60 = glob(IMG_60_DIR + '/*C01.tif') + glob(IMG_60_DIR + '/*C02.tif') + glob(IMG_60_DIR + '/*C03.tif')
for (input_files, target_files, size_text) in tqdm(zip(
[input_files_20, input_files_40, input_files_60],
[target_files_20, target_files_40, target_files_60],
['20','40','60'])
):
df = pd.read_csv(os.path.join(config['tile_data_dir'], f'train_{size_text}.csv'))
df = df[df.fold == test_fold]
test_slide_names = list(df.slide_name.values)
input_dir = os.path.join(OUT_DIR, f'{size_text}x_input')
target_dir = os.path.join(OUT_DIR, f'{size_text}x_target')
if not os.path.isdir(OUT_DIR):
os.mkdir(OUT_DIR)
if not os.path.isdir(input_dir):
os.mkdir(input_dir)
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
unique_slide_names = np.unique(np.array([(os.path.basename(fn).split('.')[0][:-9]) for fn in input_files]))
unique_slide_names = [slide_name for slide_name in unique_slide_names if slide_name in test_slide_names]
for unique_slide in unique_slide_names:
slide_input_files = [fn for fn in input_files if unique_slide in fn]
slide_target_files = [fn for fn in target_files if unique_slide in fn]
# copy test fold input files
for slide_input in slide_input_files:
dst = os.path.join(input_dir, os.path.basename(slide_input))
print(f'Copying {slide_input} to {dst}')
copyfile(slide_input, dst)
# copy target files
for slide_target in slide_target_files:
dst = os.path.join(target_dir, os.path.basename(slide_target))
print(f'Copying {slide_target} to {dst}')
copyfile(slide_target, dst)
print('Done')
if __name__ == '__main__':
FLAGS(sys.argv)
app.run(main)
|
[
"os.mkdir",
"json.load",
"os.path.basename",
"os.path.isdir",
"absl.flags.DEFINE_string",
"absl.app.run",
"glob.glob",
"shutil.copyfile",
"os.path.join"
] |
[((211, 306), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""config"""', '"""./configs/train_config.json"""', '"""Config file with data paths"""'], {}), "('config', './configs/train_config.json',\n 'Config file with data paths')\n", (230, 306), False, 'from absl import flags\n'), ((303, 411), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""input_images_dir"""', '"""/data/"""', '"""Images directory that has folders "20x images" e.g."""'], {}), '(\'input_images_dir\', \'/data/\',\n \'Images directory that has folders "20x images" e.g.\')\n', (322, 411), False, 'from absl import flags\n'), ((408, 498), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_dir"""', '"""./tmp/test_slides/"""', '"""test slides are copied here"""'], {}), "('output_dir', './tmp/test_slides/',\n 'test slides are copied here')\n", (427, 498), False, 'from absl import flags\n'), ((797, 832), 'os.path.join', 'os.path.join', (['IMG_DIR', '"""20x_images"""'], {}), "(IMG_DIR, '20x_images')\n", (809, 832), False, 'import os\n'), ((850, 885), 'os.path.join', 'os.path.join', (['IMG_DIR', '"""40x_images"""'], {}), "(IMG_DIR, '40x_images')\n", (862, 885), False, 'import os\n'), ((903, 938), 'os.path.join', 'os.path.join', (['IMG_DIR', '"""60x_images"""'], {}), "(IMG_DIR, '60x_images')\n", (915, 938), False, 'import os\n'), ((965, 995), 'glob.glob', 'glob', (["(IMG_20_DIR + '/*C04.tif')"], {}), "(IMG_20_DIR + '/*C04.tif')\n", (969, 995), False, 'from glob import glob\n'), ((1017, 1047), 'glob.glob', 'glob', (["(IMG_40_DIR + '/*C04.tif')"], {}), "(IMG_40_DIR + '/*C04.tif')\n", (1021, 1047), False, 'from glob import glob\n'), ((1069, 1099), 'glob.glob', 'glob', (["(IMG_60_DIR + '/*C04.tif')"], {}), "(IMG_60_DIR + '/*C04.tif')\n", (1073, 1099), False, 'from glob import glob\n'), ((3373, 3386), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (3380, 3386), False, 'from absl import app\n'), ((601, 621), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (610, 621), False, 'import json\n'), ((1189, 1219), 'glob.glob', 'glob', (["(IMG_20_DIR + '/*C03.tif')"], {}), "(IMG_20_DIR + '/*C03.tif')\n", (1193, 1219), False, 'from glob import glob\n'), ((1308, 1338), 'glob.glob', 'glob', (["(IMG_40_DIR + '/*C03.tif')"], {}), "(IMG_40_DIR + '/*C03.tif')\n", (1312, 1338), False, 'from glob import glob\n'), ((1427, 1457), 'glob.glob', 'glob', (["(IMG_60_DIR + '/*C03.tif')"], {}), "(IMG_60_DIR + '/*C03.tif')\n", (1431, 1457), False, 'from glob import glob\n'), ((1900, 1944), 'os.path.join', 'os.path.join', (['OUT_DIR', 'f"""{size_text}x_input"""'], {}), "(OUT_DIR, f'{size_text}x_input')\n", (1912, 1944), False, 'import os\n'), ((1966, 2011), 'os.path.join', 'os.path.join', (['OUT_DIR', 'f"""{size_text}x_target"""'], {}), "(OUT_DIR, f'{size_text}x_target')\n", (1978, 2011), False, 'import os\n'), ((1123, 1153), 'glob.glob', 'glob', (["(IMG_20_DIR + '/*C01.tif')"], {}), "(IMG_20_DIR + '/*C01.tif')\n", (1127, 1153), False, 'from glob import glob\n'), ((1156, 1186), 'glob.glob', 'glob', (["(IMG_20_DIR + '/*C02.tif')"], {}), "(IMG_20_DIR + '/*C02.tif')\n", (1160, 1186), False, 'from glob import glob\n'), ((1242, 1272), 'glob.glob', 'glob', (["(IMG_40_DIR + '/*C01.tif')"], {}), "(IMG_40_DIR + '/*C01.tif')\n", (1246, 1272), False, 'from glob import glob\n'), ((1275, 1305), 'glob.glob', 'glob', (["(IMG_40_DIR + '/*C02.tif')"], {}), "(IMG_40_DIR + '/*C02.tif')\n", (1279, 1305), False, 'from glob import glob\n'), ((1361, 1391), 'glob.glob', 'glob', (["(IMG_60_DIR + '/*C01.tif')"], {}), "(IMG_60_DIR + '/*C01.tif')\n", (1365, 1391), False, 'from glob import glob\n'), ((1394, 1424), 'glob.glob', 'glob', (["(IMG_60_DIR + '/*C02.tif')"], {}), "(IMG_60_DIR + '/*C02.tif')\n", (1398, 1424), False, 'from glob import glob\n'), ((1713, 1776), 'os.path.join', 'os.path.join', (["config['tile_data_dir']", 'f"""train_{size_text}.csv"""'], {}), "(config['tile_data_dir'], f'train_{size_text}.csv')\n", (1725, 1776), False, 'import os\n'), ((2028, 2050), 'os.path.isdir', 'os.path.isdir', (['OUT_DIR'], {}), '(OUT_DIR)\n', (2041, 2050), False, 'import os\n'), ((2064, 2081), 'os.mkdir', 'os.mkdir', (['OUT_DIR'], {}), '(OUT_DIR)\n', (2072, 2081), False, 'import os\n'), ((2110, 2134), 'os.path.isdir', 'os.path.isdir', (['input_dir'], {}), '(input_dir)\n', (2123, 2134), False, 'import os\n'), ((2148, 2167), 'os.mkdir', 'os.mkdir', (['input_dir'], {}), '(input_dir)\n', (2156, 2167), False, 'import os\n'), ((2196, 2221), 'os.path.isdir', 'os.path.isdir', (['target_dir'], {}), '(target_dir)\n', (2209, 2221), False, 'import os\n'), ((2235, 2255), 'os.mkdir', 'os.mkdir', (['target_dir'], {}), '(target_dir)\n', (2243, 2255), False, 'import os\n'), ((2973, 2999), 'shutil.copyfile', 'copyfile', (['slide_input', 'dst'], {}), '(slide_input, dst)\n', (2981, 2999), False, 'from shutil import copyfile\n'), ((3250, 3277), 'shutil.copyfile', 'copyfile', (['slide_target', 'dst'], {}), '(slide_target, dst)\n', (3258, 3277), False, 'from shutil import copyfile\n'), ((2869, 2898), 'os.path.basename', 'os.path.basename', (['slide_input'], {}), '(slide_input)\n', (2885, 2898), False, 'import os\n'), ((3144, 3174), 'os.path.basename', 'os.path.basename', (['slide_target'], {}), '(slide_target)\n', (3160, 3174), False, 'import os\n'), ((2319, 2339), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (2335, 2339), False, 'import os\n')]
|
import sys
import setuptools
from distutils import sysconfig
cfg_vars = sysconfig.get_config_vars()
for key, value in cfg_vars.items():
if type(value) == str:
cfg_vars[key] = cfg_vars[key].replace("-Wstrict-prototypes", "")
cfg_vars[key] = cfg_vars[key].replace("-Wall", "-w")
cfg_vars[key] = cfg_vars[key].replace("-O3", "")
cfg_vars[key] = cfg_vars[key].replace("-O2", "")
cfg_vars[key] = cfg_vars[key].replace("-DNDEBUG", "-UNDEBUG")
cfg_vars[key] = cfg_vars[key].replace(" -g ", " ")#linux-gnu gotcha
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy
from Cython.Build import cythonize
from Cython.Distutils.extension import Extension
from Cython.Distutils import build_ext
## \file setup.py setup.py
# \brief The python script for building proteus
#
# Set the DISTUTILS_DEBUG environment variable to print detailed information while setup.py is running.
#
from proteus import config
from proteus.config import *
###to turn on debugging in c++
##\todo Finishing cleaning up setup.py/setup.cfg, config.py...
PROTEUS_PETSC_EXTRA_LINK_ARGS = getattr(config, 'PROTEUS_PETSC_EXTRA_LINK_ARGS', [])
PROTEUS_PETSC_EXTRA_COMPILE_ARGS = getattr(config, 'PROTEUS_PETSC_EXTRA_COMPILE_ARGS', [])
PROTEUS_CHRONO_CXX_FLAGS = getattr(config, 'PROTEUS_CHRONO_CXX_FLAGS', [])
proteus_install_path = os.path.join(sysconfig.get_python_lib(), 'proteus')
# handle non-system installations
for arg in sys.argv:
if arg.startswith('--root'):
proteus_install_path = proteus_install_path.partition(sys.prefix + '/')[-1]
break
if arg.startswith('--prefix'):
proteus_install_path = proteus_install_path.partition(sys.prefix + '/')[-1]
break
EXTENSIONS_TO_BUILD = [
# Extension("MeshAdaptPUMI.MeshAdaptPUMI",
# sources = ['proteus/MeshAdaptPUMI/MeshAdaptPUMI.pyx', 'proteus/MeshAdaptPUMI/cMeshAdaptPUMI.cpp',
# 'proteus/MeshAdaptPUMI/MeshConverter.cpp', 'proteus/MeshAdaptPUMI/ParallelMeshConverter.cpp',
# 'proteus/MeshAdaptPUMI/MeshFields.cpp', 'proteus/MeshAdaptPUMI/SizeField.cpp',
# 'proteus/MeshAdaptPUMI/DumpMesh.cpp',
# 'proteus/MeshAdaptPUMI/ErrorResidualMethod.cpp','proteus/MeshAdaptPUMI/VMS.cpp','proteus/MeshAdaptPUMI/createAnalyticGeometry.cpp'],
# depends=["proteus/partitioning.h",
# "proteus/partitioning.cpp",
# "proteus/cpartitioning.pyx",
# "proteus/cmeshTools.pxd",
# "proteus/mesh.h",
# 'proteus/mesh.cpp',
# 'proteus/meshio.cpp'],
# define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H)],
# language='c++',
# include_dirs=[numpy.get_include(),'include',
# 'proteus','proteus/MeshAdaptPUMI']+
# PROTEUS_SCOREC_INCLUDE_DIRS,
# library_dirs=PROTEUS_SCOREC_LIB_DIRS,
# libraries=PROTEUS_SCOREC_LIBS,
# extra_compile_args=PROTEUS_SCOREC_EXTRA_COMPILE_ARGS+PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
# extra_link_args=PROTEUS_SCOREC_EXTRA_LINK_ARGS),#+PROTEUS_EXTRA_LINK_ARGS),
Extension("mprans.cPres",['proteus/mprans/cPres.pyx'],
depends=['proteus/mprans/Pres.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus'],),
Extension("mprans.cPresInit",['proteus/mprans/cPresInit.pyx'],
depends=['proteus/mprans/PresInit.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cPresInc",['proteus/mprans/cPresInc.pyx'],
depends=['proteus/mprans/PresInc.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cAddedMass",['proteus/mprans/cAddedMass.pyx'],
depends=['proteus/mprans/AddedMass.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.SedClosure",['proteus/mprans/SedClosure.pyx'],
depends=['proteus/mprans/SedClosure.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cVOF3P",['proteus/mprans/cVOF3P.pyx'],
depends=['proteus/mprans/VOF3P.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cVOS3P",['proteus/mprans/cVOS3P.pyx'],
depends=['proteus/mprans/VOS3P.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cNCLS3P",['proteus/mprans/cNCLS3P.pyx'],
depends=['proteus/mprans/NCLS3P.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cRDLS3P",['proteus/mprans/cRDLS3P.pyx'],
depends=['proteus/mprans/RDLS3P.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cMCorr3P",
["proteus/mprans/cMCorr3P.pyx"],
depends=["proteus/mprans/MCorr3P.h", 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
define_macros=[('PROTEUS_LAPACK_H',
PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',
PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',
PROTEUS_BLAS_H)],
language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("richards.cRichards",['proteus/richards/cRichards.pyx'],
depends=['proteus/richards/Richards.h','proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("elastoplastic.cElastoPlastic",
['proteus/elastoplastic/cElastoPlastic.pyx'],
define_macros=[('PROTEUS_LAPACK_H',
PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',
PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',
PROTEUS_BLAS_H)],
depends=['proteus/elastoplastic/ElastoPlastic.h','proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
include_dirs=[numpy.get_include(),'proteus'],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("mprans.cRANS3PF",['proteus/mprans/cRANS3PF.pyx'],
depends=['proteus/mprans/RANS3PF.h','proteus/mprans/RANS3PF2D.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.cRANS3PSed",['proteus/mprans/cRANS3PSed.pyx'],
depends=['proteus/mprans/RANS3PSed.h','proteus/mprans/RANS3PSed2D.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("Isosurface",['proteus/Isosurface.pyx'],
language='c',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus'],
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("BoundaryConditions",['proteus/BoundaryConditions.py'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.BoundaryConditions",['proteus/mprans/BoundaryConditions.py'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.MeshSmoothing",['proteus/mprans/MeshSmoothing.pyx'],
language='c++',
include_dirs=[numpy.get_include(),'proteus',PROTEUS_INCLUDE_DIR],
libraries=['stdc++','m'],
extra_compile_args=["-std=c++11","-mavx"]),
Extension("mprans.cMoveMeshMonitor",['proteus/mprans/cMoveMeshMonitor.pyx'],
language='c++',
include_dirs=[numpy.get_include(),'proteus',PROTEUS_INCLUDE_DIR],
libraries=['stdc++','m'],
extra_compile_args=["-std=c++11","-mavx"]),
Extension("mbd.CouplingFSI",
sources=['proteus/mbd/CouplingFSI.pyx',
'proteus/mbd/CouplingFSI.pxd',
'proteus/mbd/ChVariablesBodyAddedMass.cpp',
'proteus/mbd/ChBodyAddedMass.cpp',
'proteus/mbd/ChronoHeaders.pxd'],
depends=['proteus/mbd/ProtChBody.h',
'proteus/mbd/ProtChMoorings.h'],
language='c++',
include_dirs=[numpy.get_include(),
'proteus',
PROTEUS_INCLUDE_DIR,
PROTEUS_CHRONO_INCLUDE_DIR,
PROTEUS_CHRONO_INCLUDE_DIR+'/chrono',
PROTEUS_CHRONO_INCLUDE_DIR+'/chrono/collision/bullet',],
library_dirs=[PROTEUS_CHRONO_LIB_DIR],
libraries=['ChronoEngine',
'stdc++',
'm'],
extra_compile_args=PROTEUS_CHRONO_CXX_FLAGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("WaveTools",['proteus/WaveTools.py'],
depends=['proteus/WaveTools.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("fenton.Fenton",
sources=['proteus/fenton/Fenton.pyx',
'proteus/fenton/Solve.cpp',
'proteus/fenton/Dpythag.cpp',
'proteus/fenton/Dsvbksb.cpp',
'proteus/fenton/Dsvdcmp.cpp',
'proteus/fenton/Inout.cpp',
'proteus/fenton/Subroutines.cpp',
'proteus/fenton/Util.cpp',],
language='c++',
include_dirs=[numpy.get_include(),
'proteus',
PROTEUS_INCLUDE_DIR,
PROTEUS_NCURSES_INCLUDE_DIR,],
library_dirs=[PROTEUS_NCURSES_LIB_DIR,],
libraries=['ncurses','stdc++','m'],
extra_compile_args=["-std=c++11"]),
Extension("ADR",['proteus/ADR.pyx'],
depends=['proteus/ADR.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("subsurfaceTransportFunctions",['proteus/subsurfaceTransportFunctions.pyx'],
include_dirs=[numpy.get_include(),'proteus'],
extra_compile_args=PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension('cfemIntegrals',
['proteus/cfemIntegrals.pyx',
'proteus/femIntegrals.c',
'proteus/postprocessing.c'],
depends=['proteus/femIntegrals.h'],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
include_dirs=[numpy.get_include(),'proteus',
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("csparsity",['proteus/csparsity.pyx', 'proteus/sparsity.cpp'],
depends=['proteus/sparsity.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus'],),
Extension("cmeshTools",
['proteus/cmeshTools.pyx', 'proteus/mesh.cpp', 'proteus/meshio.cpp'],
language='c++',
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
library_dirs=[PROTEUS_DAETK_LIB_DIR]+PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m',PROTEUS_DAETK_LIB]+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT),
Extension('ctransportCoefficients',
['proteus/ctransportCoefficients.pyx','proteus/transportCoefficients.c'],
include_dirs=[numpy.get_include(),'proteus'],
depends=["proteus/transportCoefficients.h"],
language="c",
libraries=['m']),
Extension('csubgridError',
['proteus/csubgridError.pyx','proteus/subgridError.c'],
depends=["proteus/subgridError.h"],
language="c",
include_dirs=[numpy.get_include(),'proteus'],
libraries=['m'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension('cshockCapturing',
['proteus/cshockCapturing.pyx','proteus/shockCapturing.c'],
depends=["proteus/shockCapturing.h"],
language="c",
include_dirs=[numpy.get_include(),'proteus'],
libraries=['m'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension('superluWrappers',
['proteus/superluWrappers.pyx'],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
language="c",
include_dirs=[numpy.get_include(),
'proteus',
PROTEUS_SUPERLU_INCLUDE_DIR],
library_dirs=[PROTEUS_SUPERLU_LIB_DIR,
PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_SUPERLU_LIB,
PROTEUS_LAPACK_LIB,PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("csmoothers",["proteus/csmoothers.pyx", "proteus/smoothers.c"],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
language="c",
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR,
],
library_dirs=[PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_SUPERLU_LIB_DIR,
PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_SUPERLU_LIB,
PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("canalyticalSolutions",["proteus/canalyticalSolutions.pyx", "proteus/analyticalSolutions.c"],
depends=["proteus/analyticalSolutions.h"],
extra_compile_args=PROTEUS_OPT,
language="c", include_dirs=[numpy.get_include(), 'proteus']),
Extension("clapack",
["proteus/clapack.pyx"],
depends=["proteus/proteus_lapack.h","proteus/proteus_blas.h"],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS,
language="c",
include_dirs=[numpy.get_include(), 'proteus',
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB]),
Extension("cpostprocessing",
["proteus/cpostprocessing.pyx","proteus/postprocessing.c"],
depends=["proteus/postprocessing.h","proteus/postprocessing.pxd"],
define_macros=[('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS,
language="c",
include_dirs=[numpy.get_include(), 'proteus',
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB]),
Extension('cnumericalFlux',
['proteus/cnumericalFlux.pyx','proteus/numericalFlux.c'],
depends=["proteus/numericalFlux.h"],
extra_compile_args=PROTEUS_OPT,
language="c", include_dirs=[numpy.get_include(), 'proteus']),
Extension('ctimeIntegration',
['proteus/ctimeIntegration.pyx','proteus/timeIntegration.c'],
depends=["proteus/timeIntegration.h"],
extra_compile_args=PROTEUS_OPT,
language="c", include_dirs=[numpy.get_include(), 'proteus']),
Extension("cTwophaseDarcyCoefficients",
["proteus/cTwophaseDarcyCoefficients.pyx",
"proteus/SubsurfaceTransportCoefficients.cpp"],
depends=["proteus/SubsurfaceTransportCoefficients.h",
"proteus/pskRelations.h",
"proteus/pskRelations.pxd",
"proteus/densityRelations.h",
"proteus/twophaseDarcyCoefficients.pxd",
"proteus/twophaseDarcyCoefficients.h"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("cSubsurfaceTransportCoefficients",
["proteus/cSubsurfaceTransportCoefficients.pyx","proteus/SubsurfaceTransportCoefficients.cpp"],
depends=["proteus/SubsurfaceTransportCoefficients.pxd",
"proteus/SubsurfaceTransportCoefficients.h"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("cpskRelations",["proteus/cpskRelations.pyx"],
depends=["proteus/pskRelations.pxd",
"proteus/pskRelations.h"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("cpartitioning",["proteus/cpartitioning.pyx",
"proteus/partitioning.cpp",
'proteus/mesh.cpp',
'proteus/meshio.cpp',],
depends=["proteus/partitioning.h",
"proteus/partitioning.cpp",
"proteus/cpartitioning.pyx",
"proteus/cmeshTools.pxd",
"proteus/mesh.h",
'proteus/mesh.cpp',
'proteus/meshio.cpp'],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("flcbdfWrappers",["proteus/flcbdfWrappers.pyx"],
language="c++",
depends=["proteus/flcbdfWrappers.pxd"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
library_dirs=[PROTEUS_DAETK_LIB_DIR]+PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m',PROTEUS_DAETK_LIB]+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
),
Extension("mprans.cCLSVOF",["proteus/mprans/cCLSVOF.pyx"],
depends=["proteus/mprans/CLSVOF.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cNCLS",["proteus/mprans/cNCLS.pyx"],
depends=["proteus/mprans/NCLS.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cMCorr",["proteus/mprans/cMCorr.pyx"],
depends=["proteus/mprans/MCorr.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
define_macros=[('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("mprans.cRANS2P",["proteus/mprans/cRANS2P.pyx"],
depends=["proteus/mprans/RANS2P.h"] + ["proteus/MixedModelFactory.h","proteus/CompKernel.h"],
extra_compile_args=PROTEUS_OPT,
language="c++", include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cRANS2P2D",["proteus/mprans/cRANS2P2D.pyx"],
depends=["proteus/mprans/RANS2P2D.h"] + ["proteus/MixedModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cRDLS",["proteus/mprans/cRDLS.pyx"],
depends=["proteus/mprans/RDLS.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cVOF",["proteus/mprans/cVOF.pyx"],
depends=["proteus/mprans/VOF.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cMoveMesh",["proteus/mprans/cMoveMesh.pyx"],
depends=["proteus/mprans/MoveMesh.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cMoveMesh2D",["proteus/mprans/cMoveMesh2D.pyx"],
depends=["proteus/mprans/MoveMesh2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cSW2D",["proteus/mprans/cSW2D.pyx"],
depends=["proteus/mprans/SW2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+['-g']+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS+['-g']),
Extension("mprans.cSW2DCV",["proteus/mprans/cSW2DCV.pyx"],
depends=["proteus/mprans/SW2DCV.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+['-g']+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS+['-g']),
Extension("mprans.cGN_SW2DCV",["proteus/mprans/cGN_SW2DCV.pyx"],
depends=["proteus/mprans/GN_SW2DCV.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
include_dirs=[numpy.get_include(), 'proteus'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+['-g']+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS+['-g']),
Extension("mprans.cKappa",["proteus/mprans/cKappa.pyx"],
depends=["proteus/mprans/Kappa.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cKappa2D",["proteus/mprans/cKappa2D.pyx"],
depends=["proteus/mprans/Kappa2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cDissipation",["proteus/mprans/cDissipation.pyx"],
depends=["proteus/mprans/Dissipation.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
Extension("mprans.cDissipation2D",["proteus/mprans/cDissipation2D.pyx"],
depends=["proteus/mprans/Dissipation2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
language="c++",
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(), 'proteus']),
]
def setup_given_extensions(extensions):
setup(name='proteus',
version='1.6.1.dev0',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7'
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
],
description='Python tools for multiphysics modeling',
author='The Proteus Developers',
author_email='<EMAIL>',
url='http://proteustoolkit.org',
packages = ['proteus',
'proteus.fenton',
'proteus.mprans',
'proteus.richards',
'proteus.elastoplastic',
'proteus.mbd',
'proteus.test_utils',
'proteus.config',
'proteus.tests',
'proteus.tests.ci',
'proteus.tests.griffiths_lane_6',
'proteus.tests.levelset',
'proteus.tests.linalgebra_tests',
'proteus.tests.LS_with_edgeBased_EV',
'proteus.tests.LS_with_edgeBased_EV.VOF',
'proteus.tests.LS_with_edgeBased_EV.NCLS',
'proteus.tests.BernsteinPolynomials',
'proteus.tests.BernsteinPolynomials.poisson_eqn',
'proteus.tests.elliptic_redist',
'proteus.tests.elliptic_redist.RDLS',
'proteus.tests.elliptic_redist.RDLS3P',
'proteus.tests.surface_tension',
'proteus.tests.surface_tension.rising_bubble_rans3p',
'proteus.tests.CLSVOF',
'proteus.tests.CLSVOF.disc_ICs',
'proteus.tests.CLSVOF.with_RANS2P',
'proteus.tests.CLSVOF.with_RANS3PF',
'proteus.tests.CLSVOF.pure_level_set',
'proteus.TwoPhaseFlow',
'proteus.TwoPhaseFlow.utils',
'proteus.tests.TwoPhaseFlow',
'proteus.tests.SWEs',
'proteus.tests.SWEs.dam_over_bumps',
'proteus.tests.SWEs.oneD_dambreak_flat_bottom',
'proteus.tests.SWEs.paraboloid_with_friction',
'proteus.tests.SWEs.paraboloid_with_friction.oneD',
'proteus.tests.SWEs.paraboloid_with_friction.twoD',
'proteus.tests.SWEs.test_gauges',
'proteus.tests.SWEs.test_reflecting_BCs',
'proteus.tests.matrix_constructor',
'proteus.tests.matrix_constructor.import_modules',
'proteus.MeshAdaptPUMI',
'proteus.tests.MeshAdaptPUMI',
'proteus.tests.MeshAdaptPUMI.gauge_compare.dambreak_Colagrossi_2D',
'proteus.tests.mesh_tests',
'proteus.tests.mesh_tests.import_modules',
'proteus.tests.periodic',
'proteus.tests.periodic.petsc',
'proteus.tests.periodic.comparison_files',
'proteus.tests.poisson_2d',
'proteus.tests.post_processing',
'proteus.tests.post_processing.import_modules',
'proteus.tests.ProjScheme_with_EV',
'proteus.tests.single_phase_gw',
'proteus.tests.solver_tests',
'proteus.tests.solver_tests.import_modules',
'proteus.tests.solver_tests_slow',
'proteus.tests.solver_tests_slow.import_modules',
'proteus.tests.solver_tests_mprans',
'proteus.tests.solver_tests_mprans.import_modules',
'proteus.tests.cylinder2D',
'proteus.tests.cylinder2D.conforming_rans2p',
'proteus.tests.cylinder2D.conforming_rans3p',
'proteus.tests.cylinder2D.ibm_method',
'proteus.tests.cylinder2D.ibm_rans2p',
'proteus.tests.cylinder2D.ibm_rans2p_3D',
'proteus.tests.cylinder2D.sbm_method',
'proteus.tests.cylinder2D.sbm_3Dmesh',
'proteus.tests.HotStart_3P',
'proteus.tests.AddedMass',
'proteus.tests.MoveMeshMonitor',
'proteus.tests.wave_tests',
],
cmdclass = {'build_ext':build_ext},
ext_package='proteus',
ext_modules=extensions,
data_files=[(proteus_install_path,
['proteus/proteus_blas.h',
'proteus/proteus_lapack.h',
'proteus/proteus_superlu.h',
'proteus/ModelFactory.h',
'proteus/CompKernel.h'
]),
(os.path.join(proteus_install_path,'tests'),
['proteus/tests/hex_cube_3x3.xmf',
'proteus/tests/hex_cube_3x3.h5',
'proteus/tests/sparse_mat_ex.mtx']),
(os.path.join(proteus_install_path,'tests','linalgebra_tests'),
['proteus/tests/linalgebra_tests/sparse_mat_1.txt',
'proteus/tests/linalgebra_tests/jac.bin']),
(os.path.join(proteus_install_path,'tests','griffiths_lane_6'),
['proteus/tests/griffiths_lane_6/richards_expected.h5',
'proteus/tests/griffiths_lane_6/elastoplastic_expected.h5']),
(os.path.join(proteus_install_path,'tests','levelset'),
['proteus/tests/levelset/rotation/rotation_c0p1cg_vbdf_2_level_1_expected.h5',
'proteus/tests/levelset/vortex2D/vortex_c0p1cg_vbdf_2_level_1_expected.h5',
'proteus/tests/levelset/vortex/vortex_c0p1cg_bdf_2_level_1_expected.h5']),
(os.path.join(proteus_install_path,'tests','ci','comparison_files'),
['proteus/tests/ci/comparison_files/floating_bar.h5',
'proteus/tests/ci/comparison_files/phi_t_0.000000_000.tgz']),
(os.path.join(proteus_install_path,'tests','LS_with_edgeBased_EV','VOF','comparison_files'),
['proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_SmoothnessBased.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_TaylorGalerkin.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_stab4.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_EV1.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_SUPG.h5',
'proteus/tests/LS_with_edgeBased_EV/VOF/comparison_files/vof_level_3_EV2.h5']),
(os.path.join(proteus_install_path,'tests','LS_with_edgeBased_EV','NCLS','comparison_files'),
['proteus/tests/LS_with_edgeBased_EV/NCLS/comparison_files/ncls_level_3_non_saturated_ls.h5',
'proteus/tests/LS_with_edgeBased_EV/NCLS/comparison_files/ncls_level_3_pureAdvection_SUPG.h5',
'proteus/tests/LS_with_edgeBased_EV/NCLS/comparison_files/ncls_level_3_pureAdvection_EV1.h5',
'proteus/tests/LS_with_edgeBased_EV/NCLS/comparison_files/ncls_level_3_saturated_ls.h5']),
(os.path.join(proteus_install_path,'tests','BernsteinPolynomials','poisson_eqn','comparison_files'),
['proteus/tests/BernsteinPolynomials/poisson_eqn/comparison_files/2D_poisson_hex_degree2.h5',
'proteus/tests/BernsteinPolynomials/poisson_eqn/comparison_files/2D_poisson_simplex_degree2.h5',
'proteus/tests/BernsteinPolynomials/poisson_eqn/comparison_files/3D_poisson_hex_degree2.h5',
'proteus/tests/BernsteinPolynomials/poisson_eqn/comparison_files/3D_poisson_simplex_degree2.h5']),
(os.path.join(proteus_install_path,'tests','surface_tension','rising_bubble_rans3p','comparison_files'),
['proteus/tests/surface_tension/rising_bubble_rans3p/comparison_files/risingBubble_2D_supg.h5',
'proteus/tests/surface_tension/rising_bubble_rans3p/comparison_files/risingBubble_2D_ev.h5',
'proteus/tests/surface_tension/rising_bubble_rans3p/comparison_files/risingBubble_3D_supg.h5',
'proteus/tests/surface_tension/rising_bubble_rans3p/comparison_files/risingBubble_3D_ev.h5']),
(os.path.join(proteus_install_path,'tests','CLSVOF','disc_ICs','comparison_files'),
['proteus/tests/CLSVOF/disc_ICs/comparison_files/test_case_1.h5',
'proteus/tests/CLSVOF/disc_ICs/comparison_files/test_case_2.h5']),
(os.path.join(proteus_install_path,'tests','CLSVOF','pure_level_set','comparison_files'),
['proteus/tests/CLSVOF/pure_level_set/comparison_files/clsvof_test_case_1.h5',
'proteus/tests/CLSVOF/pure_level_set/comparison_files/clsvof_test_case_2.h5',
'proteus/tests/CLSVOF/pure_level_set/comparison_files/clsvof_test_case_3.h5',
'proteus/tests/CLSVOF/pure_level_set/comparison_files/clsvof_test_case_4.h5']),
(os.path.join(proteus_install_path,'tests','CLSVOF','with_RANS2P','comparison_files'),
['proteus/tests/CLSVOF/with_RANS2P/comparison_files/multiphase_2D_falling_bubble.h5']),
(os.path.join(proteus_install_path,'tests','CLSVOF','with_RANS3PF','comparison_files'),
['proteus/tests/CLSVOF/with_RANS3PF/comparison_files/multiphase_2D_falling_bubble.h5',
'proteus/tests/CLSVOF/with_RANS3PF/comparison_files/multiphase_3D_falling_bubble.h5']),
(os.path.join(proteus_install_path,'tests','TwoPhaseFlow','comparison_files'),
['proteus/tests/TwoPhaseFlow/comparison_files/risingBubble.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/damBreak.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/TwoDimBucklingFlow.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/fillingTank.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/marin.h5',
'proteus/tests/TwoPhaseFlow/comparison_files/moses.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','dam_over_bumps','comparison_files'),
['proteus/tests/SWEs/dam_over_bumps/comparison_files/SWEs_dam_over_bumps.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','oneD_dambreak_flat_bottom','comparison_files'),
['proteus/tests/SWEs/oneD_dambreak_flat_bottom/comparison_files/SWEs_oneD_dambreak_flat_bottom.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','paraboloid_with_friction','oneD','comparison_files'),
['proteus/tests/SWEs/paraboloid_with_friction/oneD/comparison_files/SWEs_oneD_paraboloid_with_friction.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','paraboloid_with_friction','twoD','comparison_files'),
['proteus/tests/SWEs/paraboloid_with_friction/twoD/comparison_files/SWEs_twoD_paraboloid_with_friction.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','test_gauges','comparison_files'),
['proteus/tests/SWEs/test_gauges/comparison_files/SWEs_test_gauges.h5']),
(os.path.join(proteus_install_path,'tests','SWEs','test_reflecting_BCs','comparison_files'),
['proteus/tests/SWEs/test_reflecting_BCs/comparison_files/SWEs_test_reflecting_BCs.h5']),
(os.path.join(proteus_install_path,'tests','solver_tests','import_modules'),
['proteus/tests/solver_tests/import_modules/quad_mass_matrix.npy',
'proteus/tests/solver_tests/import_modules/sol_10.npy',
'proteus/tests/solver_tests/import_modules/sol_20_lst.npy',
'proteus/tests/solver_tests/import_modules/input_vec_tppcd.bin',
'proteus/tests/solver_tests/import_modules/tp_pcd_y_output.bin',
'proteus/tests/solver_tests/import_modules/tppcd_y_dirichlet_dof.bin',
'proteus/tests/solver_tests/import_modules/Qp_visc.bin',
'proteus/tests/solver_tests/import_modules/Qp_dens.bin',
'proteus/tests/solver_tests/import_modules/Ap_rho.bin',
'proteus/tests/solver_tests/import_modules/Np_rho.bin',
'proteus/tests/solver_tests/import_modules/saddle_point_small.bin',
'proteus/tests/solver_tests/import_modules/saddle_point_matrix.bin',
'proteus/tests/solver_tests/import_modules/rans2p_step_newton_1.bin',
'proteus/tests/solver_tests/import_modules/rans2p_step_newton_5.bin',
'proteus/tests/solver_tests/import_modules/NSE_cavity_matrix.bin',
'proteus/tests/solver_tests/import_modules/NSE_step_no_slip.bin']),
(os.path.join(proteus_install_path,'tests','mesh_tests','comparison_files'),
['proteus/tests/mesh_tests/comparison_files/poiseulle_xmf.output',
'proteus/tests/mesh_tests/comparison_files/poiseulle_global_xmf.output']),
(os.path.join(proteus_install_path,'tests','solver_tests_slow','comparison_files'),
['proteus/tests/solver_tests_slow/comparison_files/Qp_expected.log',
'proteus/tests/solver_tests_slow/comparison_files/drivenCavityStokes_expected.h5']),
(os.path.join(proteus_install_path,'tests','matrix_constructor','comparison_files'),
['proteus/tests/matrix_constructor/comparison_files/mass_reference_c0p1_2D.txt',
'proteus/tests/matrix_constructor/comparison_files/mass_reference_TH_2D.npy']),
(os.path.join(proteus_install_path,'tests','periodic','petsc'),
['proteus/tests/periodic/petsc/petsc.options.schur.selfp_petsc.amg',
'proteus/tests/periodic/petsc/petsc.options.schur.selfp_petsc.gamg.superlu',
'proteus/tests/periodic/petsc/petsc.options.schur.selfp_petsc.superlu']),
(os.path.join(proteus_install_path,'tests','periodic','comparison_files'),
['proteus/tests/periodic/comparison_files/basic_2d_test.h5',
'proteus/tests/periodic/comparison_files/basic_3d_test.h5']),
(os.path.join(proteus_install_path,'tests','post_processing','import_modules'),
['proteus/tests/post_processing/import_modules/reference_simplex_keep.ele',
'proteus/tests/post_processing/import_modules/reference_simplex_keep.face',
'proteus/tests/post_processing/import_modules/reference_simplex_keep.node',
'proteus/tests/post_processing/import_modules/reference_simplex_keep.poly',
'proteus/tests/post_processing/import_modules/bdm2_3d_face_func_vals.data',
'proteus/tests/post_processing/import_modules/bdm2_3d_interior_func_vals.data',
'proteus/tests/post_processing/import_modules/bdm_bdy_func_values_3dmesh.data',
'proteus/tests/post_processing/import_modules/bdm_func_values_3dmesh.data']),
(os.path.join(proteus_install_path,'tests','post_processing','comparison_files'),
['proteus/tests/post_processing/comparison_files/BDM_Test_File.h5',
'proteus/tests/post_processing/comparison_files/bdm2_ref_proj_mat.txt',
'proteus/tests/post_processing/comparison_files/bdm2_reference_simplex_mat.data',
'proteus/tests/post_processing/comparison_files/bdm2_reference_simplex_rhs.data',
'proteus/tests/post_processing/comparison_files/bdm_bdy_func_values.npy',
'proteus/tests/post_processing/comparison_files/bdm_bdy_func_values_mesh_8.npy',
'proteus/tests/post_processing/comparison_files/bdm_bdy_func_values_trig.npy',
'proteus/tests/post_processing/comparison_files/bdm_func_values.npy',
'proteus/tests/post_processing/comparison_files/bdm_func_values_mesh_8.npy',
'proteus/tests/post_processing/comparison_files/bdm_func_values_trig.npy',
'proteus/tests/post_processing/comparison_files/poisson_bdm1_test.h5',
'proteus/tests/post_processing/comparison_files/test_bdm2_sshaped_region_expected.h5',
'proteus/tests/post_processing/comparison_files/test_bdm_sshaped_region_expected.h5',
'proteus/tests/post_processing/comparison_files/trig_velocity_rep.npy']),
(os.path.join(proteus_install_path,'tests','matrix_constructor','comparison_files'),
['proteus/tests/matrix_constructor/comparison_files/velocity_laplace_C0P2_mesh.npy',
'proteus/tests/matrix_constructor/comparison_files/single_phase_THQuad_4_expected.data']),
(os.path.join(proteus_install_path,'tests','solver_tests_slow','comparison_files'),
['proteus/tests/solver_tests_slow/comparison_files/drivenCavityNSE_LSC_expected.h5',
'proteus/tests/solver_tests_slow/comparison_files/drivenCavityNSE_LSC_expected.xmf',
'proteus/tests/solver_tests_slow/comparison_files/drivenCavityNSE_LSC_expected.log']),
(os.path.join(proteus_install_path,'tests','solver_tests_slow','import_modules'),
['proteus/tests/solver_tests_slow/import_modules/petsc.options.schur_lsc']),
(os.path.join(proteus_install_path,'tests','solver_tests_mprans','comparison_files'),
['proteus/tests/solver_tests_mprans/comparison_files/twp_navier_stokes_cavity_2d.h5',
'proteus/tests/solver_tests_mprans/comparison_files/twp_navier_stokes_cavity_2d.xmf']),
(os.path.join(proteus_install_path,'tests','MeshAdaptPUMI'),
['proteus/tests/MeshAdaptPUMI/cube0.smb',
'proteus/tests/MeshAdaptPUMI/cube.dmg',
'proteus/tests/MeshAdaptPUMI/Couette.null',
'proteus/tests/MeshAdaptPUMI/Couette.msh',
'proteus/tests/MeshAdaptPUMI/Couette2D.msh',
'proteus/tests/MeshAdaptPUMI/Rectangle0.smb',
'proteus/tests/MeshAdaptPUMI/Rectangle1.smb',
'proteus/tests/MeshAdaptPUMI/Rectangle.dmg',
'proteus/tests/MeshAdaptPUMI/TwoQuads0.smb',
'proteus/tests/MeshAdaptPUMI/TwoQuads.dmg']),
(os.path.join(proteus_install_path,'tests','MeshAdaptPUMI','gauge_compare','dambreak_Colagrossi_2D'),
['proteus/tests/MeshAdaptPUMI/gauge_compare/dambreak_Colagrossi_2D/Reconstructed.dmg',
'proteus/tests/MeshAdaptPUMI/gauge_compare/dambreak_Colagrossi_2D/Reconstructed0.smb']),
(os.path.join(proteus_install_path,'tests','poisson_2d'),
['proteus/tests/poisson_2d/square4x4.3dm',
'proteus/tests/poisson_2d/square4x4.bc']),
(os.path.join(proteus_install_path,'tests','cylinder2D','conforming_rans3p','comparison_files'),
['proteus/tests/cylinder2D/conforming_rans3p/comparison_files/T1P1.h5',
'proteus/tests/cylinder2D/conforming_rans3p/comparison_files/T4P2.h5',
'proteus/tests/cylinder2D/conforming_rans3p/comparison_files/T8P2.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','ibm_method','comparison_files'),
['proteus/tests/cylinder2D/ibm_method/comparison_files/T1_rans3p.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','ibm_rans2p','comparison_files'),
['proteus/tests/cylinder2D/ibm_rans2p/comparison_files/T1_ibm_rans2p.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','ibm_rans2p_3D','comparison_files'),
['proteus/tests/cylinder2D/ibm_rans2p_3D/comparison_files/T1_ibm_3D_rans2p.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','sbm_method','comparison_files'),
['proteus/tests/cylinder2D/sbm_method/comparison_files/T1_sbm_rans3p.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','sbm_3Dmesh','comparison_files'),
['proteus/tests/cylinder2D/sbm_3Dmesh/comparison_files/T001_P1_sbm_3Dmesh.h5']),
(os.path.join(proteus_install_path,'tests','cylinder2D','conforming_rans2p','comparison_files'),
['proteus/tests/cylinder2D/conforming_rans2p/comparison_files/T1_rans2p.h5']),
(os.path.join(proteus_install_path,'tests','HotStart_3P','comparison_files'),
['proteus/tests/HotStart_3P/comparison_files/T01P1_hotstart.h5',
'proteus/tests/HotStart_3P/comparison_files/T01P2_hotstart.h5']),
(os.path.join(proteus_install_path,'tests','AddedMass'),
['proteus/tests/AddedMass/petsc.options.superlu_dist']),
(os.path.join(proteus_install_path,'tests','MoveMeshMonitor'),
['proteus/tests/MoveMeshMonitor/petsc.options.asm',
'proteus/tests/MoveMeshMonitor/nodesResult.csv']),
(os.path.join(proteus_install_path,'tests','wave_tests'),
['proteus/tests/wave_tests/data_timeSeries.dat',
'proteus/tests/wave_tests/data_timeSeries.txt',
'proteus/tests/wave_tests/data_timeSeries_err1.csv',
'proteus/tests/wave_tests/data_timeSeries_err2.txt']),
],
scripts = ['scripts/parun','scripts/gf2poly','scripts/gatherArchives.py','scripts/qtm','scripts/waves2xmf','scripts/povgen.py',
'scripts/velocity2xmf','scripts/run_script_garnet','scripts/run_script_diamond',
'scripts/run_script_lonestar','scripts/run_script_ranger','scripts/run_script_mpiexec','scripts/gatherTimes','scripts/clearh5.py',
'scripts/runSWEs.py'],
requires=['numpy']
)
def setup_extensions_in_sequential():
setup_given_extensions(EXTENSIONS_TO_BUILD)
def setup_extensions_in_parallel():
import multiprocessing, logging
logger = multiprocessing.log_to_stderr()
logger.setLevel(logging.INFO)
multiprocessing.log_to_stderr()
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
EXTENSIONS=[[e] for e in EXTENSIONS_TO_BUILD]
pool.imap(setup_given_extensions, EXTENSIONS)
pool.close()
pool.join()
if "build_ext" in sys.argv:
setup_extensions_in_parallel()
else:
setup_extensions_in_sequential()
|
[
"distutils.sysconfig.get_config_vars",
"distutils.sysconfig.get_python_lib",
"multiprocessing.log_to_stderr",
"numpy.get_include",
"multiprocessing.cpu_count"
] |
[((72, 99), 'distutils.sysconfig.get_config_vars', 'sysconfig.get_config_vars', ([], {}), '()\n', (97, 99), False, 'from distutils import sysconfig\n'), ((1399, 1425), 'distutils.sysconfig.get_python_lib', 'sysconfig.get_python_lib', ([], {}), '()\n', (1423, 1425), False, 'from distutils import sysconfig\n'), ((58940, 58971), 'multiprocessing.log_to_stderr', 'multiprocessing.log_to_stderr', ([], {}), '()\n', (58969, 58971), False, 'import multiprocessing, logging\n'), ((59010, 59041), 'multiprocessing.log_to_stderr', 'multiprocessing.log_to_stderr', ([], {}), '()\n', (59039, 59041), False, 'import multiprocessing, logging\n'), ((59084, 59111), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (59109, 59111), False, 'import multiprocessing, logging\n'), ((3582, 3601), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3599, 3601), False, 'import numpy\n'), ((3890, 3909), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3907, 3909), False, 'import numpy\n'), ((4194, 4213), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (4211, 4213), False, 'import numpy\n'), ((4504, 4523), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (4521, 4523), False, 'import numpy\n'), ((4815, 4834), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (4832, 4834), False, 'import numpy\n'), ((5113, 5132), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (5130, 5132), False, 'import numpy\n'), ((5411, 5430), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (5428, 5430), False, 'import numpy\n'), ((5712, 5731), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (5729, 5731), False, 'import numpy\n'), ((6013, 6032), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (6030, 6032), False, 'import numpy\n'), ((6592, 6611), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (6609, 6611), False, 'import numpy\n'), ((7229, 7248), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (7246, 7248), False, 'import numpy\n'), ((7846, 7865), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (7863, 7865), False, 'import numpy\n'), ((8503, 8522), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (8520, 8522), False, 'import numpy\n'), ((8844, 8863), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (8861, 8863), False, 'import numpy\n'), ((9034, 9053), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9051, 9053), False, 'import numpy\n'), ((9296, 9315), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9313, 9315), False, 'import numpy\n'), ((9517, 9536), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9534, 9536), False, 'import numpy\n'), ((9683, 9702), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9700, 9702), False, 'import numpy\n'), ((9972, 9991), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (9989, 9991), False, 'import numpy\n'), ((10610, 10629), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (10627, 10629), False, 'import numpy\n'), ((11416, 11435), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (11433, 11435), False, 'import numpy\n'), ((11960, 11979), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (11977, 11979), False, 'import numpy\n'), ((12519, 12538), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (12536, 12538), False, 'import numpy\n'), ((12671, 12690), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (12688, 12690), False, 'import numpy\n'), ((13326, 13345), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (13343, 13345), False, 'import numpy\n'), ((14051, 14070), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (14068, 14070), False, 'import numpy\n'), ((15634, 15653), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (15651, 15653), False, 'import numpy\n'), ((15992, 16011), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (16009, 16011), False, 'import numpy\n'), ((16399, 16418), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (16416, 16418), False, 'import numpy\n'), ((16862, 16881), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (16879, 16881), False, 'import numpy\n'), ((17860, 17879), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (17877, 17879), False, 'import numpy\n'), ((18825, 18844), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (18842, 18844), False, 'import numpy\n'), ((19184, 19203), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (19201, 19203), False, 'import numpy\n'), ((20106, 20125), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (20123, 20125), False, 'import numpy\n'), ((20686, 20705), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (20703, 20705), False, 'import numpy\n'), ((20971, 20990), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (20988, 20990), False, 'import numpy\n'), ((29188, 29207), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (29205, 29207), False, 'import numpy\n'), ((29486, 29505), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (29503, 29505), False, 'import numpy\n'), ((29939, 29958), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (29956, 29958), False, 'import numpy\n'), ((30532, 30551), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (30549, 30551), False, 'import numpy\n'), ((30847, 30866), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (30864, 30866), False, 'import numpy\n'), ((31145, 31164), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (31162, 31164), False, 'import numpy\n'), ((31440, 31459), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (31457, 31459), False, 'import numpy\n'), ((31750, 31769), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (31767, 31769), False, 'import numpy\n'), ((32066, 32085), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (32083, 32085), False, 'import numpy\n'), ((32318, 32337), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (32335, 32337), False, 'import numpy\n'), ((32718, 32737), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (32735, 32737), False, 'import numpy\n'), ((33127, 33146), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (33144, 33146), False, 'import numpy\n'), ((33524, 33543), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (33541, 33543), False, 'import numpy\n'), ((33831, 33850), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (33848, 33850), False, 'import numpy\n'), ((34150, 34169), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (34167, 34169), False, 'import numpy\n'), ((34475, 34494), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (34492, 34494), False, 'import numpy\n'), ((14711, 14730), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (14728, 14730), False, 'import numpy\n'), ((22001, 22020), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (22018, 22020), False, 'import numpy\n'), ((23545, 23564), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (23562, 23564), False, 'import numpy\n'), ((24952, 24971), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (24969, 24971), False, 'import numpy\n'), ((26753, 26772), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (26770, 26772), False, 'import numpy\n'), ((28145, 28164), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (28162, 28164), False, 'import numpy\n')]
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pathlib import PurePath
from pants.backend.go.target_types import (
GoBinaryMainPackage,
GoBinaryMainPackageField,
GoBinaryMainPackageRequest,
)
from pants.backend.go.util_rules.build_pkg import BuildGoPackageTargetRequest, BuiltGoPackage
from pants.backend.go.util_rules.import_analysis import ImportConfig, ImportConfigRequest
from pants.backend.go.util_rules.link import LinkedGoBinary, LinkGoBinaryRequest
from pants.core.goals.package import (
BuiltPackage,
BuiltPackageArtifact,
OutputPathField,
PackageFieldSet,
)
from pants.core.goals.run import RunFieldSet
from pants.engine.fs import AddPrefix, Digest, MergeDigests
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionRule
@dataclass(frozen=True)
class GoBinaryFieldSet(PackageFieldSet, RunFieldSet):
required_fields = (GoBinaryMainPackageField,)
main: GoBinaryMainPackageField
output_path: OutputPathField
@rule
async def package_go_binary(field_set: GoBinaryFieldSet) -> BuiltPackage:
main_pkg = await Get(GoBinaryMainPackage, GoBinaryMainPackageRequest(field_set.main))
built_package = await Get(
BuiltGoPackage, BuildGoPackageTargetRequest(main_pkg.address, is_main=True)
)
main_pkg_a_file_path = built_package.import_paths_to_pkg_a_files["main"]
import_config = await Get(
ImportConfig, ImportConfigRequest(built_package.import_paths_to_pkg_a_files)
)
input_digest = await Get(Digest, MergeDigests([built_package.digest, import_config.digest]))
output_filename = PurePath(field_set.output_path.value_or_default(file_ending=None))
binary = await Get(
LinkedGoBinary,
LinkGoBinaryRequest(
input_digest=input_digest,
archives=(main_pkg_a_file_path,),
import_config_path=import_config.CONFIG_PATH,
output_filename=f"./{output_filename.name}",
description=f"Link Go binary for {field_set.address}",
),
)
renamed_output_digest = await Get(Digest, AddPrefix(binary.digest, str(output_filename.parent)))
artifact = BuiltPackageArtifact(relpath=str(output_filename))
return BuiltPackage(renamed_output_digest, (artifact,))
def rules():
return [*collect_rules(), UnionRule(PackageFieldSet, GoBinaryFieldSet)]
|
[
"pants.engine.unions.UnionRule",
"pants.backend.go.util_rules.import_analysis.ImportConfigRequest",
"pants.backend.go.util_rules.build_pkg.BuildGoPackageTargetRequest",
"pants.engine.fs.MergeDigests",
"pants.backend.go.target_types.GoBinaryMainPackageRequest",
"pants.core.goals.package.BuiltPackage",
"pants.backend.go.util_rules.link.LinkGoBinaryRequest",
"dataclasses.dataclass",
"pants.engine.rules.collect_rules"
] |
[((971, 993), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (980, 993), False, 'from dataclasses import dataclass\n'), ((2387, 2435), 'pants.core.goals.package.BuiltPackage', 'BuiltPackage', (['renamed_output_digest', '(artifact,)'], {}), '(renamed_output_digest, (artifact,))\n', (2399, 2435), False, 'from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact, OutputPathField, PackageFieldSet\n'), ((2481, 2525), 'pants.engine.unions.UnionRule', 'UnionRule', (['PackageFieldSet', 'GoBinaryFieldSet'], {}), '(PackageFieldSet, GoBinaryFieldSet)\n', (2490, 2525), False, 'from pants.engine.unions import UnionRule\n'), ((1295, 1337), 'pants.backend.go.target_types.GoBinaryMainPackageRequest', 'GoBinaryMainPackageRequest', (['field_set.main'], {}), '(field_set.main)\n', (1321, 1337), False, 'from pants.backend.go.target_types import GoBinaryMainPackage, GoBinaryMainPackageField, GoBinaryMainPackageRequest\n'), ((1394, 1453), 'pants.backend.go.util_rules.build_pkg.BuildGoPackageTargetRequest', 'BuildGoPackageTargetRequest', (['main_pkg.address'], {'is_main': '(True)'}), '(main_pkg.address, is_main=True)\n', (1421, 1453), False, 'from pants.backend.go.util_rules.build_pkg import BuildGoPackageTargetRequest, BuiltGoPackage\n'), ((1590, 1652), 'pants.backend.go.util_rules.import_analysis.ImportConfigRequest', 'ImportConfigRequest', (['built_package.import_paths_to_pkg_a_files'], {}), '(built_package.import_paths_to_pkg_a_files)\n', (1609, 1652), False, 'from pants.backend.go.util_rules.import_analysis import ImportConfig, ImportConfigRequest\n'), ((1696, 1754), 'pants.engine.fs.MergeDigests', 'MergeDigests', (['[built_package.digest, import_config.digest]'], {}), '([built_package.digest, import_config.digest])\n', (1708, 1754), False, 'from pants.engine.fs import AddPrefix, Digest, MergeDigests\n'), ((1902, 2142), 'pants.backend.go.util_rules.link.LinkGoBinaryRequest', 'LinkGoBinaryRequest', ([], {'input_digest': 'input_digest', 'archives': '(main_pkg_a_file_path,)', 'import_config_path': 'import_config.CONFIG_PATH', 'output_filename': 'f"""./{output_filename.name}"""', 'description': 'f"""Link Go binary for {field_set.address}"""'}), "(input_digest=input_digest, archives=(\n main_pkg_a_file_path,), import_config_path=import_config.CONFIG_PATH,\n output_filename=f'./{output_filename.name}', description=\n f'Link Go binary for {field_set.address}')\n", (1921, 2142), False, 'from pants.backend.go.util_rules.link import LinkedGoBinary, LinkGoBinaryRequest\n'), ((2464, 2479), 'pants.engine.rules.collect_rules', 'collect_rules', ([], {}), '()\n', (2477, 2479), False, 'from pants.engine.rules import collect_rules, rule\n')]
|
# -*- coding: utf-8 -*-
import time
import unittest
from convertdate import julianday
from convertdate.armenian import (_valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian,
to_jd, to_julian, tostring)
class TestArmenian(unittest.TestCase):
def setUp(self):
self.now = time.localtime()
self.today = julianday.from_gregorian(self.now[0], self.now[1], self.now[2])
def testValidDate(self):
self.assertTrue(_valid_date(1, 1, 1))
self.assertTrue(_valid_date(533, 1, 1, method="sarkawag"))
with self.assertRaises(ValueError):
_valid_date(401, 1, 1, method="sarkawag")
with self.assertRaises(ValueError):
_valid_date(30, 4, 31)
with self.assertRaises(ValueError):
_valid_date(536, 13, 6)
self.assertTrue(_valid_date(536, 13, 6, method="sarkawag"))
def testReflexive(self):
self.assertEqual(self.today, to_jd(*from_jd(self.today)))
self.assertEqual(self.today, to_jd(*from_jd(self.today, "sarkawag"), method="sarkawag"))
for jd in range(2159677, 2488395, 2000):
jd = jd + 0.5
self.assertEqual(jd, to_jd(*from_jd(jd)))
self.assertEqual(jd, to_jd(*from_jd(jd, "sarkawag"), method="sarkawag"))
def testLeap(self):
self.assertEqual(True, leap(600))
self.assertEqual(False, leap(601))
def testGregorian(self):
self.assertEqual((2019, 11, 3), to_gregorian(1469, 4, 14))
self.assertEqual((1469, 4, 14), from_gregorian(2019, 11, 3))
def testMonthLength(self):
self.assertEqual(30, month_length(600, 1))
self.assertEqual(5, month_length(600, 13))
self.assertEqual(6, month_length(600, 13, "sarkawag"))
def testJulian(self):
cases = [
# first date of the calendar
[(1, 1, 1), (552, 7, 11)],
# last day of the year
[(1, 13, 5), (553, 7, 10)],
# leap year moves the calendar
[(4, 13, 5), (556, 7, 9)],
[(5, 1, 1), (556, 7, 10)],
# check month boundaries for an entire year
[(420, 1, 1), (971, 3, 29)],
[(420, 1, 30), (971, 4, 27)],
[(420, 2, 1), (971, 4, 28)],
[(420, 2, 30), (971, 5, 27)],
[(420, 3, 1), (971, 5, 28)],
[(420, 3, 30), (971, 6, 26)],
[(420, 4, 1), (971, 6, 27)],
[(420, 4, 30), (971, 7, 26)],
[(420, 5, 1), (971, 7, 27)],
[(420, 5, 30), (971, 8, 25)],
[(420, 6, 1), (971, 8, 26)],
[(420, 6, 30), (971, 9, 24)],
[(420, 7, 1), (971, 9, 25)],
[(420, 7, 30), (971, 10, 24)],
[(420, 8, 1), (971, 10, 25)],
[(420, 8, 30), (971, 11, 23)],
[(420, 9, 1), (971, 11, 24)],
[(420, 9, 30), (971, 12, 23)],
[(420, 10, 1), (971, 12, 24)],
[(420, 10, 30), (972, 1, 22)],
[(420, 11, 1), (972, 1, 23)],
[(420, 11, 30), (972, 2, 21)],
[(420, 12, 1), (972, 2, 22)],
[(420, 12, 30), (972, 3, 22)],
[(420, 13, 1), (972, 3, 23)],
[(420, 13, 5), (972, 3, 27)],
# check month boundaries around Julian leap year
[(512, 13, 1), (1064, 2, 29)],
[(512, 13, 2), (1064, 3, 1)],
[(513, 1, 1), (1064, 3, 5)],
# check the two calendars in 1084
[(533, 6, 15), (1084, 8, 11)],
]
for a, j in cases:
self.assertEqual(a, from_julian(*j))
for a, j in cases:
self.assertEqual(j, to_julian(*a))
self.assertEqual((533, 1, 1), from_julian(1084, 8, 11, method="sarkawag"))
self.assertEqual((533, 13, 5), from_julian(1085, 8, 10, method="sarkawag"))
self.assertEqual((536, 13, 6), from_julian(1088, 8, 10, method="sarkawag"))
self.assertEqual((537, 1, 1), from_julian(1088, 8, 11, method="sarkawag"))
def testTostring(self):
self.assertEqual('14 trē 1469', tostring(1469, 4, 14))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"convertdate.armenian._valid_date",
"convertdate.armenian.from_julian",
"convertdate.armenian.tostring",
"convertdate.armenian.to_julian",
"convertdate.armenian.leap",
"convertdate.armenian.from_gregorian",
"convertdate.armenian.to_gregorian",
"convertdate.armenian.from_jd",
"convertdate.armenian.month_length",
"convertdate.julianday.from_gregorian",
"time.localtime"
] |
[((4171, 4186), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4184, 4186), False, 'import unittest\n'), ((349, 365), 'time.localtime', 'time.localtime', ([], {}), '()\n', (363, 365), False, 'import time\n'), ((387, 450), 'convertdate.julianday.from_gregorian', 'julianday.from_gregorian', (['self.now[0]', 'self.now[1]', 'self.now[2]'], {}), '(self.now[0], self.now[1], self.now[2])\n', (411, 450), False, 'from convertdate import julianday\n'), ((505, 525), 'convertdate.armenian._valid_date', '_valid_date', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (516, 525), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((552, 593), 'convertdate.armenian._valid_date', '_valid_date', (['(533)', '(1)', '(1)'], {'method': '"""sarkawag"""'}), "(533, 1, 1, method='sarkawag')\n", (563, 593), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((652, 693), 'convertdate.armenian._valid_date', '_valid_date', (['(401)', '(1)', '(1)'], {'method': '"""sarkawag"""'}), "(401, 1, 1, method='sarkawag')\n", (663, 693), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((751, 773), 'convertdate.armenian._valid_date', '_valid_date', (['(30)', '(4)', '(31)'], {}), '(30, 4, 31)\n', (762, 773), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((831, 854), 'convertdate.armenian._valid_date', '_valid_date', (['(536)', '(13)', '(6)'], {}), '(536, 13, 6)\n', (842, 854), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((880, 922), 'convertdate.armenian._valid_date', '_valid_date', (['(536)', '(13)', '(6)'], {'method': '"""sarkawag"""'}), "(536, 13, 6, method='sarkawag')\n", (891, 922), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1387, 1396), 'convertdate.armenian.leap', 'leap', (['(600)'], {}), '(600)\n', (1391, 1396), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1430, 1439), 'convertdate.armenian.leap', 'leap', (['(601)'], {}), '(601)\n', (1434, 1439), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1511, 1536), 'convertdate.armenian.to_gregorian', 'to_gregorian', (['(1469)', '(4)', '(14)'], {}), '(1469, 4, 14)\n', (1523, 1536), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1578, 1605), 'convertdate.armenian.from_gregorian', 'from_gregorian', (['(2019)', '(11)', '(3)'], {}), '(2019, 11, 3)\n', (1592, 1605), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1668, 1688), 'convertdate.armenian.month_length', 'month_length', (['(600)', '(1)'], {}), '(600, 1)\n', (1680, 1688), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1718, 1739), 'convertdate.armenian.month_length', 'month_length', (['(600)', '(13)'], {}), '(600, 13)\n', (1730, 1739), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1769, 1802), 'convertdate.armenian.month_length', 'month_length', (['(600)', '(13)', '"""sarkawag"""'], {}), "(600, 13, 'sarkawag')\n", (1781, 1802), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((3750, 3793), 'convertdate.armenian.from_julian', 'from_julian', (['(1084)', '(8)', '(11)'], {'method': '"""sarkawag"""'}), "(1084, 8, 11, method='sarkawag')\n", (3761, 3793), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((3834, 3877), 'convertdate.armenian.from_julian', 'from_julian', (['(1085)', '(8)', '(10)'], {'method': '"""sarkawag"""'}), "(1085, 8, 10, method='sarkawag')\n", (3845, 3877), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((3918, 3961), 'convertdate.armenian.from_julian', 'from_julian', (['(1088)', '(8)', '(10)'], {'method': '"""sarkawag"""'}), "(1088, 8, 10, method='sarkawag')\n", (3929, 3961), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((4001, 4044), 'convertdate.armenian.from_julian', 'from_julian', (['(1088)', '(8)', '(11)'], {'method': '"""sarkawag"""'}), "(1088, 8, 11, method='sarkawag')\n", (4012, 4044), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((4116, 4137), 'convertdate.armenian.tostring', 'tostring', (['(1469)', '(4)', '(14)'], {}), '(1469, 4, 14)\n', (4124, 4137), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((3619, 3634), 'convertdate.armenian.from_julian', 'from_julian', (['*j'], {}), '(*j)\n', (3630, 3634), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((3696, 3709), 'convertdate.armenian.to_julian', 'to_julian', (['*a'], {}), '(*a)\n', (3705, 3709), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((998, 1017), 'convertdate.armenian.from_jd', 'from_jd', (['self.today'], {}), '(self.today)\n', (1005, 1017), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1064, 1095), 'convertdate.armenian.from_jd', 'from_jd', (['self.today', '"""sarkawag"""'], {}), "(self.today, 'sarkawag')\n", (1071, 1095), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1232, 1243), 'convertdate.armenian.from_jd', 'from_jd', (['jd'], {}), '(jd)\n', (1239, 1243), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n'), ((1286, 1309), 'convertdate.armenian.from_jd', 'from_jd', (['jd', '"""sarkawag"""'], {}), "(jd, 'sarkawag')\n", (1293, 1309), False, 'from convertdate.armenian import _valid_date, from_gregorian, from_jd, from_julian, leap, month_length, to_gregorian, to_jd, to_julian, tostring\n')]
|
from euler.big_int import BigInt
def compute() -> int:
result = 0
for i in range(1, 10):
j, n = 1, BigInt(i)
while len(n) == j:
result += 1
j += 1
n *= i
return result
|
[
"euler.big_int.BigInt"
] |
[((117, 126), 'euler.big_int.BigInt', 'BigInt', (['i'], {}), '(i)\n', (123, 126), False, 'from euler.big_int import BigInt\n')]
|
"""
All logic regarding extensions management
"""
import time
import importlib
import threading
import logging
import collections
from collections import defaultdict
import scapy.layers.dot11 as dot11
import scapy.arch.linux as linux
import wifiphisher.common.constants as constants
import wifiphisher.extensions.deauth as deauth_extension
logger = logging.getLogger(__name__)
is_deauth_cont = True
def register_backend_funcs(func):
"""
Register the specific function in extension as backend methods
:param func: The instance function needed to register as backend
method
:type func: instancemethod
:return: None
"""
func.is_backendmethod = True
return func
class ExtensionManager(object):
"""
Extension Manager (EM) defines an API for modular
architecture in Wifiphisher.
All extensions that lie under "extensions" directory
and are also defined in EXTENSIONS constant are loaded
and leveraged by EM. Each extension can take advantage
of the second wireless card (the first is used for the
rogue AP), aka run in "Advanced mode".
Each extension needs to be defined as a class that has
the name of the filename in camelcase. For example,
deauth.py would have a Deauth() class. Currently,
extensions need to provide the following methods:
* __init__(self, data): Basic initialization that
received a dictionary with data from the main engine.
* get_packet(self, pkt): Method to process individually
each packet captured from the second card (monitor
mode).
* send_output(self): Method that returns in a list
of strings the entry logs that we need to output.
* on_exit(self): Method that frees all the used resources
* each extension can define the backend method as follows:
ex:
@extensions.register_backend_funcs
def psk_verify(self, *list_data):
return list_data
"""
def __init__(self, network_manager):
"""
Init the EM object.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None
"""
self._nm = network_manager
self._extensions_str = []
self._extensions = []
self._interface = None
self._socket = None
self._should_continue = True
self._packets_to_send = defaultdict(list)
self._channels_to_hop = []
self._current_channel = "1"
self._listen_thread = threading.Thread(target=self._listen)
self._send_thread = threading.Thread(target=self._send)
self._channelhop_thread = threading.Thread(target=self._channel_hop)
self._shared_data = None
def get_ui_funcs(self):
"""
Returns a list of all the uimethods.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: List Object
:rtype: List
"""
ui_funcs = []
# loop each extension object
for extension in self._extensions:
# loop all the attribute for the extension object
for attr in dir(extension):
if callable(getattr(extension, attr)):
method = getattr(extension, attr)
if hasattr(method, "is_uimethod"):
ui_funcs.append(method)
return ui_funcs
def get_backend_funcs(self):
"""
Returns a list of all the backend methods
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: dict object
:rtype: dict
"""
backend_funcs = {}
for extension in self._extensions:
for attrname in dir(extension):
method = getattr(extension, attrname)
if hasattr(method, 'is_backendmethod'):
# store the method name to extension map
backend_funcs[method.__name__] = extension
return backend_funcs
def _channel_hop(self):
"""
Change the interface's channel every three seconds
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None
.. note: The channel range is between 1 to 13
"""
# set the current channel to the ap channel
self._nm.set_interface_channel(self._interface,
int(self._current_channel))
# if the stop flag not set, change the channel
while self._should_continue:
for channel in self._channels_to_hop:
if self._current_channel != channel:
self._current_channel = channel
# added this check to reduce shutdown time
if self._should_continue:
try:
self._socket.close()
self._nm.set_interface_channel(
self._interface, int(self._current_channel))
self._socket = linux.L2Socket(
iface=self._interface)
# extends the channel hopping time to sniff
# more frames
time.sleep(3)
except BaseException:
continue
else:
break
def set_interface(self, interface):
"""
Sets interface for EM.
:param self: An ExtensionManager object
:type self: ExtensionManager
:param interface: Interface name
:type interface: String
:return: None
:rtype: None
"""
self._interface = interface
self._socket = linux.L2Socket(iface=self._interface)
def set_extensions(self, extensions):
"""
Sets extensions for EM.
:param self: An ExtensionManager object
:type self: ExtensionManager
:param extensions: List of str extension names
:type extensions: List
:return: None
:rtype: None
"""
self._extensions_str = extensions
def init_extensions(self, shared_data):
"""
Init EM extensions. Should be run
when all shared data has been gathered.
:param self: An ExtensionManager object
:type self: ExtensionManager
:param shared_data: Dictionary object
:type shared_data: Dictionary
:return: None
:rtype: None
"""
# Convert shared_data from dict to named tuple
shared_data = collections.namedtuple('GenericDict',
shared_data.keys())(**shared_data)
self._shared_data = shared_data
# Initialize all extensions with the shared data
for extension in self._extensions_str:
mod = importlib.import_module(constants.EXTENSIONS_LOADPATH +
extension)
extension_class = getattr(mod, extension.title())
obj = extension_class(shared_data)
self._extensions.append(obj)
def start_extensions(self):
"""
Starts the two main daemons of EM:
1) Daemon that listens to every packet and
forwards it to each extension for further processing.
2) Daemon that receives special-crafted packets
from extensions and broadcasts them in the air.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None
"""
# One daemon is listening for packets...
self._listen_thread.start()
# ...another daemon is sending packets
self._send_thread.start()
# daemon for channel hopping
self.get_channels()
if self._shared_data.is_freq_hop_allowed:
self._channelhop_thread.start()
else:
self._current_channel = self._shared_data.target_ap_channel
def on_exit(self):
"""
Stops both daemons of EM on exit.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None
"""
self._should_continue = False
if self._listen_thread.is_alive():
self._listen_thread.join(3)
if self._send_thread.is_alive():
self._send_thread.join(3)
if (self._shared_data is not None
and self._shared_data.is_freq_hop_allowed
and self._channelhop_thread.is_alive()):
self._channelhop_thread.join(3)
# Close socket if it's open
try:
self._socket.close()
except AttributeError:
pass
# Clean resources used by extension modules
for extension in self._extensions:
extension.on_exit()
def get_channels(self):
"""
Gets the channels from each extension.
Merges them to create a list of channels
to hop.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None
"""
for extension in self._extensions:
channels_interested = extension.send_channels()
number_of_channels = len(channels_interested)
if channels_interested and number_of_channels > 0:
# Append only new channels (no duplicates)
self._channels_to_hop += list(
set(channels_interested) - set(self._channels_to_hop))
def get_output(self):
"""
Gets the output of each extensions.
Merges them in a list and returns it.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None
"""
output = []
for extension in self._extensions:
m_output = extension.send_output()
num_of_lines = len(m_output)
if m_output and num_of_lines > 0:
output += m_output
return output
def _process_packet(self, pkt):
"""
Pass each captured packet to each module.
Gets the packets to send.
:param self: An ExtensionManager object
:type self: ExtensionManager
:param pkt: A Scapy packet object
:type pkt: Scapy Packet
:return: None
:rtype: None
"""
# clear the _packets_to_send on every run of the
# sniffed frame
self._packets_to_send = defaultdict(list)
channels = [str(ch) for ch in constants.ALL_2G_CHANNELS] + ["*"]
for extension in self._extensions:
ext_pkts = extension.get_packet(pkt)
for channel in channels:
self._packets_to_send[channel] += ext_pkts[channel]
def _stopfilter(self, pkt):
"""
A scapy filter to determine if we need to stop.
:param self: An ExtensionManager object
:type self: ExtensionManager
:param self: A Scapy packet object
:type self: Scapy Packet
:return: True or False
:rtype: Boolean
"""
return not self._should_continue
def _listen(self):
"""
Listening thread. Listens for packets and forwards them
to _process_packet.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None
"""
# continue to find clients until told otherwise
while self._should_continue:
dot11.sniff(
iface=self._interface,
prn=self._process_packet,
count=1,
store=0,
stop_filter=self._stopfilter)
def _send(self):
"""
Sending thread. Continously broadcasting packets
crafted by extensions.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None
"""
while self._should_continue:
for pkt in self._packets_to_send[self._current_channel] + \
self._packets_to_send["*"]:
try:
if is_deauth_cont or not deauth_extension.is_deauth_frame(pkt):
logger.debug("Send pkt with A1:%s A2:%s subtype:%s in channel:%s",
pkt.addr1, pkt.addr2, pkt.subtype,
self._current_channel)
self._socket.send(pkt)
except BaseException:
continue
time.sleep(1)
|
[
"threading.Thread",
"scapy.layers.dot11.sniff",
"importlib.import_module",
"scapy.arch.linux.L2Socket",
"time.sleep",
"collections.defaultdict",
"wifiphisher.extensions.deauth.is_deauth_frame",
"logging.getLogger"
] |
[((351, 378), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (368, 378), False, 'import logging\n'), ((2393, 2410), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2404, 2410), False, 'from collections import defaultdict\n'), ((2512, 2549), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._listen'}), '(target=self._listen)\n', (2528, 2549), False, 'import threading\n'), ((2578, 2613), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._send'}), '(target=self._send)\n', (2594, 2613), False, 'import threading\n'), ((2648, 2690), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._channel_hop'}), '(target=self._channel_hop)\n', (2664, 2690), False, 'import threading\n'), ((5832, 5869), 'scapy.arch.linux.L2Socket', 'linux.L2Socket', ([], {'iface': 'self._interface'}), '(iface=self._interface)\n', (5846, 5869), True, 'import scapy.arch.linux as linux\n'), ((10649, 10666), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10660, 10666), False, 'from collections import defaultdict\n'), ((12744, 12757), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12754, 12757), False, 'import time\n'), ((6958, 7024), 'importlib.import_module', 'importlib.import_module', (['(constants.EXTENSIONS_LOADPATH + extension)'], {}), '(constants.EXTENSIONS_LOADPATH + extension)\n', (6981, 7024), False, 'import importlib\n'), ((11684, 11797), 'scapy.layers.dot11.sniff', 'dot11.sniff', ([], {'iface': 'self._interface', 'prn': 'self._process_packet', 'count': '(1)', 'store': '(0)', 'stop_filter': 'self._stopfilter'}), '(iface=self._interface, prn=self._process_packet, count=1, store\n =0, stop_filter=self._stopfilter)\n', (11695, 11797), True, 'import scapy.layers.dot11 as dot11\n'), ((5108, 5145), 'scapy.arch.linux.L2Socket', 'linux.L2Socket', ([], {'iface': 'self._interface'}), '(iface=self._interface)\n', (5122, 5145), True, 'import scapy.arch.linux as linux\n'), ((5321, 5334), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (5331, 5334), False, 'import time\n'), ((12360, 12397), 'wifiphisher.extensions.deauth.is_deauth_frame', 'deauth_extension.is_deauth_frame', (['pkt'], {}), '(pkt)\n', (12392, 12397), True, 'import wifiphisher.extensions.deauth as deauth_extension\n')]
|
""" Line analysis tools
These are intended to be methods generic to emission and absorption
(e.g. Equivalent width)
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
from astropy.modeling import models, fitting
def box_ew(spec):
""" Boxcar EW calculation
Observer frame, not rest-frame
Parameters
----------
spec : Tuple of (wave, fx, sig)
Returns
-------
EW, sigEW : EW and error in observer frame
"""
# Note: Tested in test_absline_anly
# Grab
wv,fx,sig = spec
# Cut spectrum
# dwv
dwv = wv - np.roll(wv,1)
dwv[0] = dwv[1]
# Simple boxcar
EW = np.sum( dwv * (1. - fx) )
varEW = np.sum( dwv**2 * sig**2 )
sigEW = np.sqrt(varEW)
# Return
return EW, sigEW
def gaussian_ew(spec, ltype, initial_guesses=None):
""" EW calculation using Gaussian fit
Observer frame, not rest-frame. wvlim and spec must be set!
Parameters
----------
spec : Tuple of (wave, fx, sig)
ltype : string
whether this is for absorption or emission line (see SpectralLine Class)
initial_guesses, optional : Tuple of (amplitude, mean, stddev)
Initial guesses of the Gaussian fit (unitless)
Returns
-------
EW, sigEW : EW and error in observer frame
"""
# Note: Tested in test_absline_anly
# Grab
wv,fx,sig = spec
# dwv
dwv = wv - np.roll(wv,1)
dwv[0] = dwv[1]
# Initial guesses of the Gaussian fit
if initial_guesses is None:
amp_init = np.mean(fx).value/2. #half the mean flux
stddev_init = 3*np.mean(dwv).value #3 pixels
mean_init = np.mean(wv).value #half wave range
elif len(initial_guesses)==3:
amp_init = initial_guesses[0]
mean_init = initial_guesses[1]
stddev_init = initial_guesses[2]
#check whether these values are sensible
if (mean_init < np.min(wv.value)) or (mean_init > np.max(wv.value)):
raise ValueError('gaussian_ew: The initial guess for Gaussian mean is not sensible; check it!')
if (amp_init < 0):
raise ValueError('gaussian_ew: The initial guess for Gaussian amplitude is not sensible; check it!')
if (stddev_init < 0):
raise ValueError('gaussian_ew: The initial guess for Gaussian stddev is not sensible; check it!')
else:
raise ValueError('gaussian_ew: Format of the initial_guesses is incorrect')
# Model initialization
if ltype == 'Abs':
g_init = models.GaussianAbsorption1D(amplitude=amp_init, mean=mean_init, stddev=stddev_init) # This model does not support units
elif ltype == 'Emiss':
g_init = models.Gaussian1D(amplitude=amp_init, mean=mean_init, stddev=stddev_init) # This model does not support units
else:
raise ValueError("gaussian_ew: ltype has to be either 'Abs' or 'Emiss'")
# Fitting algorithm initialization
fit_g = fitting.LevMarLSQFitter()
# Use only good values (i.e. with meaningful errors)
cond = (sig > 0.) & (np.isfinite(sig))
# Actual fit
g = fit_g(g_init, wv[cond], fx[cond], weights=1./sig[cond])
#Check whether the final fit is sensible
fit_info = fit_g.fit_info
if fit_info['param_cov'] is None:
raise ValueError('gaussian_ew: The fit is not sensible! Check initial_guesses')
# Area under curve of Gaussian is [amplitude*stddev*sqrt(2*pi)]
EW = g.amplitude.value * g.stddev.value * np.sqrt(2 * np.pi) #unitless
EW = EW * wv.unit #add the same unit as wv
#error estimation
cov = fit_g.fit_info['param_cov'] #covariance matrix
x = g.parameters[0] # amplitude
y = g.parameters[2] # stddev
sigEW = EW * np.sqrt(cov[0,0] / x**2 + cov[2,2] / y**2 + 2 * cov[0,2] / (x*y))
return EW, sigEW
|
[
"numpy.sum",
"numpy.roll",
"astropy.modeling.models.Gaussian1D",
"astropy.modeling.models.GaussianAbsorption1D",
"numpy.isfinite",
"astropy.modeling.fitting.LevMarLSQFitter",
"numpy.min",
"numpy.mean",
"numpy.max",
"numpy.sqrt"
] |
[((697, 721), 'numpy.sum', 'np.sum', (['(dwv * (1.0 - fx))'], {}), '(dwv * (1.0 - fx))\n', (703, 721), True, 'import numpy as np\n'), ((736, 763), 'numpy.sum', 'np.sum', (['(dwv ** 2 * sig ** 2)'], {}), '(dwv ** 2 * sig ** 2)\n', (742, 763), True, 'import numpy as np\n'), ((774, 788), 'numpy.sqrt', 'np.sqrt', (['varEW'], {}), '(varEW)\n', (781, 788), True, 'import numpy as np\n'), ((2991, 3016), 'astropy.modeling.fitting.LevMarLSQFitter', 'fitting.LevMarLSQFitter', ([], {}), '()\n', (3014, 3016), False, 'from astropy.modeling import models, fitting\n'), ((632, 646), 'numpy.roll', 'np.roll', (['wv', '(1)'], {}), '(wv, 1)\n', (639, 646), True, 'import numpy as np\n'), ((1457, 1471), 'numpy.roll', 'np.roll', (['wv', '(1)'], {}), '(wv, 1)\n', (1464, 1471), True, 'import numpy as np\n'), ((2566, 2654), 'astropy.modeling.models.GaussianAbsorption1D', 'models.GaussianAbsorption1D', ([], {'amplitude': 'amp_init', 'mean': 'mean_init', 'stddev': 'stddev_init'}), '(amplitude=amp_init, mean=mean_init, stddev=\n stddev_init)\n', (2593, 2654), False, 'from astropy.modeling import models, fitting\n'), ((3099, 3115), 'numpy.isfinite', 'np.isfinite', (['sig'], {}), '(sig)\n', (3110, 3115), True, 'import numpy as np\n'), ((3515, 3533), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3522, 3533), True, 'import numpy as np\n'), ((3761, 3835), 'numpy.sqrt', 'np.sqrt', (['(cov[0, 0] / x ** 2 + cov[2, 2] / y ** 2 + 2 * cov[0, 2] / (x * y))'], {}), '(cov[0, 0] / x ** 2 + cov[2, 2] / y ** 2 + 2 * cov[0, 2] / (x * y))\n', (3768, 3835), True, 'import numpy as np\n'), ((1699, 1710), 'numpy.mean', 'np.mean', (['wv'], {}), '(wv)\n', (1706, 1710), True, 'import numpy as np\n'), ((2730, 2803), 'astropy.modeling.models.Gaussian1D', 'models.Gaussian1D', ([], {'amplitude': 'amp_init', 'mean': 'mean_init', 'stddev': 'stddev_init'}), '(amplitude=amp_init, mean=mean_init, stddev=stddev_init)\n', (2747, 2803), False, 'from astropy.modeling import models, fitting\n'), ((1585, 1596), 'numpy.mean', 'np.mean', (['fx'], {}), '(fx)\n', (1592, 1596), True, 'import numpy as np\n'), ((1650, 1662), 'numpy.mean', 'np.mean', (['dwv'], {}), '(dwv)\n', (1657, 1662), True, 'import numpy as np\n'), ((1960, 1976), 'numpy.min', 'np.min', (['wv.value'], {}), '(wv.value)\n', (1966, 1976), True, 'import numpy as np\n'), ((1994, 2010), 'numpy.max', 'np.max', (['wv.value'], {}), '(wv.value)\n', (2000, 2010), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Validation of an ISA investigation
Eventually, all format independent content- and specification-related validations which
don't interrupt model creation definitely (e.g. when parsing from ISA-tab) should go
here. Then, validations can be performed on whole models (e.g. after parsing or before
writing) and provide a comprehensive list of warnings of different degree.
"""
import re
from typing import Dict, Tuple
import warnings
from ..exceptions import (
AdvisoryIsaValidationWarning,
CriticalIsaValidationWarning,
ModerateIsaValidationWarning,
)
from .helpers import is_ontology_term_ref
from . import models
from .validate_assay_study import _OntologyTermRefValidator
__author__ = "<NAME> <<EMAIL>>"
# Pattern and helper functions for validation ------------------------------------------------------
# DATE_PATTERN = re.compile("^\\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12]\\d|3[01])$")
MAIL_PATTERN = re.compile("^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$")
PHONE_PATTERN = re.compile("^\\+?[\\d /()-]+$") # only checks characters!
DOI_PATTERN = re.compile("^(?:(?:DOI|doi):)?10[.][0-9]{4,}(?:[.][0-9]+)*/\\S+$")
PMID_PATTERN = re.compile("^\\d+$")
def _validate_mail_address(mail_address) -> str:
"""Helper function to validate mail strings"""
if mail_address and not MAIL_PATTERN.match(mail_address):
tpl = "Invalid mail address: {}"
msg = tpl.format(mail_address)
warnings.warn(msg, AdvisoryIsaValidationWarning)
def _validate_phone_number(phone_number) -> str:
"""Helper function to validate phone/fax number strings"""
if phone_number and not PHONE_PATTERN.match(phone_number):
tpl = "Invalid phone/fax number: {}"
msg = tpl.format(phone_number)
warnings.warn(msg, AdvisoryIsaValidationWarning)
def _validate_doi(doi) -> str:
"""Helper function to validate doi strings"""
if doi and not DOI_PATTERN.match(doi):
tpl = "Invalid doi string: {}"
msg = tpl.format(doi)
warnings.warn(msg, AdvisoryIsaValidationWarning)
def _validate_pubmed_id(pubmed_id) -> str:
"""Helper function to validate pubmed id strings"""
if pubmed_id and not PMID_PATTERN.match(pubmed_id):
tpl = "Invalid pubmed_id string: {}"
msg = tpl.format(pubmed_id)
warnings.warn(msg, AdvisoryIsaValidationWarning)
# Validator classes --------------------------------------------------------------------
class InvestigationValidator:
"""
Validator for Investigation
:type investigation: models.InvestigationInfo
:param investigation: The investigation model to validate
"""
def __init__(self, investigation: models.InvestigationInfo):
self._investigation = investigation
self._ontology_validator = _OntologyTermRefValidator(investigation.ontology_source_refs)
self._study_ids = set()
self._study_paths = set()
self._study_titles = set()
self._assay_paths = set()
def validate(self):
"""Validate the investigation"""
self._validate_ontology_sources()
self._validate_sections()
def _validate_ontology_sources(self):
# Check that ontology sources are complete
for source in self._investigation.ontology_source_refs.values():
if not all((source.name, source.file, source.version, source.description)):
tpl = "Incomplete ontology source; found: {}, {}, {}, {}, {}"
msg = tpl.format(
source.name, source.file, source.version, source.description, source.comments
)
warnings.warn(msg, CriticalIsaValidationWarning)
def _validate_sections(self):
self._validate_publications(self._investigation.publications)
self._validate_contacts(self._investigation.contacts)
self._validate_studies()
def _validate_studies(self):
# Check if any study exists
if not self._investigation.studies:
tpl = "No studies declared in investigation: {}"
msg = tpl.format(self._investigation.info.path)
warnings.warn(msg, CriticalIsaValidationWarning)
return
for study in self._investigation.studies:
# Validate availability of minimal study information (ids, paths, titles) and
if not (study.info.identifier and study.info.path):
tpl = (
"Study with incomplete minimal information (ID and path):"
"\nID:\t{}\nTitle:\t{}\nPath:\t{}"
)
msg = tpl.format(study.info.identifier, study.info.title, study.info.path or "")
warnings.warn(msg, CriticalIsaValidationWarning)
if not study.info.title:
tpl = "Study without title:\nID:\t{}\nTitle:\t{}\nPath:\t{}"
msg = tpl.format(study.info.identifier, study.info.title, study.info.path or "")
warnings.warn(msg, ModerateIsaValidationWarning)
# Assure distinct studies, i.e. unique ids, paths and preferably titles
if study.info.identifier in self._study_ids:
tpl = "Study identifier used more than once: {}"
msg = tpl.format(study.info.identifier)
warnings.warn(msg, CriticalIsaValidationWarning)
else:
self._study_ids.add(study.info.identifier)
if study.info.path:
if study.info.path in self._study_paths:
tpl = "Study path used more than once: {}"
msg = tpl.format(study.info.path or "")
warnings.warn(msg, CriticalIsaValidationWarning)
else:
self._study_paths.add(study.info.path)
if study.info.title:
if study.info.title in self._study_titles:
tpl = "Study title used more than once: {}"
msg = tpl.format(study.info.title)
warnings.warn(msg, ModerateIsaValidationWarning)
else:
self._study_titles.add(study.info.title)
# Validate study sections
self._validate_publications(study.publications)
self._validate_contacts(study.contacts)
self._validate_designs(study.designs)
self._validate_factors(study.factors)
self._validate_assays(study.assays, study.info.identifier)
self._validate_protocols(study.protocols)
def _validate_publications(self, publications: Tuple[models.PublicationInfo]):
# Validate format of specific fields in publications
for publication in publications:
_validate_pubmed_id(publication.pubmed_id)
_validate_doi(publication.doi)
if is_ontology_term_ref(publication.status):
self._ontology_validator.validate(publication.status)
def _validate_contacts(self, contacts: Tuple[models.ContactInfo]):
# Validate format of specific fields in contacts
for contact in contacts:
_validate_mail_address(contact.email)
_validate_phone_number(contact.phone)
_validate_phone_number(contact.fax)
if is_ontology_term_ref(contact.role):
self._ontology_validator.validate(contact.role)
def _validate_designs(self, designs: Tuple[models.DesignDescriptorsInfo]):
# Validate format of specific fields in designs
for design in designs:
if is_ontology_term_ref(design.type):
self._ontology_validator.validate(design.type)
def _validate_factors(self, factors: Dict[str, models.FactorInfo]):
# Validate format of specific fields in factors
for factor in factors.values():
if is_ontology_term_ref(factor.type):
self._ontology_validator.validate(factor.type)
def _validate_assays(self, assays: Tuple[models.AssayInfo], study_id: str):
# Check if any assays exists
if not assays:
tpl = "No assays declared in study '{}' of investigation '{}'"
msg = tpl.format(study_id, self._investigation.info.path)
warnings.warn(msg, CriticalIsaValidationWarning)
return
for assay in assays:
# Validate availability of minimal assay information
# (path, measurement type, technology type and technology platform)
meas_type = (
assay.measurement_type.name
if is_ontology_term_ref(assay.measurement_type)
else assay.measurement_type
)
tech_type = (
assay.technology_type.name
if is_ontology_term_ref(assay.technology_type)
else assay.technology_type
)
if not (assay.path and meas_type and tech_type):
tpl = (
"Assay with incomplete minimal information (path, measurement and "
"technology type):\nPath:\t{}\nMeasurement Type:\t{}\nTechnology Type:\t{"
"}\nTechnology Platform:\t{}"
)
msg = tpl.format(assay.path or "", meas_type, tech_type, assay.platform)
warnings.warn(msg, CriticalIsaValidationWarning)
if not assay.platform:
tpl = (
"Assay without platform:\nPath:\t{}"
"\nMeasurement Type:\t{}\nTechnology Type:\t{}\nTechnology Platform:\t{}"
)
msg = tpl.format(assay.path or "", meas_type, tech_type, assay.platform)
warnings.warn(msg, AdvisoryIsaValidationWarning)
# Assure distinct assays, i.e. unique paths
if assay.path:
if assay.path in self._assay_paths:
tpl = "Assay path used more than once: {}"
msg = tpl.format(assay.path or "")
warnings.warn(msg, CriticalIsaValidationWarning)
else:
self._assay_paths.add(assay.path)
# Validate format of specific fields in assays
if is_ontology_term_ref(assay.measurement_type):
self._ontology_validator.validate(assay.measurement_type)
if is_ontology_term_ref(assay.technology_type):
self._ontology_validator.validate(assay.technology_type)
def _validate_protocols(self, protocols: Dict[str, models.ProtocolInfo]):
# Validate format of specific fields in protocols
for protocol in protocols.values():
if is_ontology_term_ref(protocol.type):
self._ontology_validator.validate(protocol.type)
for parameter in protocol.parameters.values():
if is_ontology_term_ref(parameter):
self._ontology_validator.validate(parameter)
for component in protocol.components.values():
if is_ontology_term_ref(component.type):
self._ontology_validator.validate(component.type)
|
[
"warnings.warn",
"re.compile"
] |
[((948, 1011), 're.compile', 're.compile', (['"""^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$"""'], {}), "('^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$')\n", (958, 1011), False, 'import re\n'), ((1028, 1059), 're.compile', 're.compile', (['"""^\\\\+?[\\\\d /()-]+$"""'], {}), "('^\\\\+?[\\\\d /()-]+$')\n", (1038, 1059), False, 'import re\n'), ((1101, 1167), 're.compile', 're.compile', (['"""^(?:(?:DOI|doi):)?10[.][0-9]{4,}(?:[.][0-9]+)*/\\\\S+$"""'], {}), "('^(?:(?:DOI|doi):)?10[.][0-9]{4,}(?:[.][0-9]+)*/\\\\S+$')\n", (1111, 1167), False, 'import re\n'), ((1183, 1203), 're.compile', 're.compile', (['"""^\\\\d+$"""'], {}), "('^\\\\d+$')\n", (1193, 1203), False, 'import re\n'), ((1456, 1504), 'warnings.warn', 'warnings.warn', (['msg', 'AdvisoryIsaValidationWarning'], {}), '(msg, AdvisoryIsaValidationWarning)\n', (1469, 1504), False, 'import warnings\n'), ((1774, 1822), 'warnings.warn', 'warnings.warn', (['msg', 'AdvisoryIsaValidationWarning'], {}), '(msg, AdvisoryIsaValidationWarning)\n', (1787, 1822), False, 'import warnings\n'), ((2026, 2074), 'warnings.warn', 'warnings.warn', (['msg', 'AdvisoryIsaValidationWarning'], {}), '(msg, AdvisoryIsaValidationWarning)\n', (2039, 2074), False, 'import warnings\n'), ((2321, 2369), 'warnings.warn', 'warnings.warn', (['msg', 'AdvisoryIsaValidationWarning'], {}), '(msg, AdvisoryIsaValidationWarning)\n', (2334, 2369), False, 'import warnings\n'), ((4133, 4181), 'warnings.warn', 'warnings.warn', (['msg', 'CriticalIsaValidationWarning'], {}), '(msg, CriticalIsaValidationWarning)\n', (4146, 4181), False, 'import warnings\n'), ((8219, 8267), 'warnings.warn', 'warnings.warn', (['msg', 'CriticalIsaValidationWarning'], {}), '(msg, CriticalIsaValidationWarning)\n', (8232, 8267), False, 'import warnings\n'), ((3637, 3685), 'warnings.warn', 'warnings.warn', (['msg', 'CriticalIsaValidationWarning'], {}), '(msg, CriticalIsaValidationWarning)\n', (3650, 3685), False, 'import warnings\n'), ((4694, 4742), 'warnings.warn', 'warnings.warn', (['msg', 'CriticalIsaValidationWarning'], {}), '(msg, CriticalIsaValidationWarning)\n', (4707, 4742), False, 'import warnings\n'), ((4970, 5018), 'warnings.warn', 'warnings.warn', (['msg', 'ModerateIsaValidationWarning'], {}), '(msg, ModerateIsaValidationWarning)\n', (4983, 5018), False, 'import warnings\n'), ((5297, 5345), 'warnings.warn', 'warnings.warn', (['msg', 'CriticalIsaValidationWarning'], {}), '(msg, CriticalIsaValidationWarning)\n', (5310, 5345), False, 'import warnings\n'), ((9283, 9331), 'warnings.warn', 'warnings.warn', (['msg', 'CriticalIsaValidationWarning'], {}), '(msg, CriticalIsaValidationWarning)\n', (9296, 9331), False, 'import warnings\n'), ((9665, 9713), 'warnings.warn', 'warnings.warn', (['msg', 'AdvisoryIsaValidationWarning'], {}), '(msg, AdvisoryIsaValidationWarning)\n', (9678, 9713), False, 'import warnings\n'), ((5655, 5703), 'warnings.warn', 'warnings.warn', (['msg', 'CriticalIsaValidationWarning'], {}), '(msg, CriticalIsaValidationWarning)\n', (5668, 5703), False, 'import warnings\n'), ((6016, 6064), 'warnings.warn', 'warnings.warn', (['msg', 'ModerateIsaValidationWarning'], {}), '(msg, ModerateIsaValidationWarning)\n', (6029, 6064), False, 'import warnings\n'), ((9987, 10035), 'warnings.warn', 'warnings.warn', (['msg', 'CriticalIsaValidationWarning'], {}), '(msg, CriticalIsaValidationWarning)\n', (10000, 10035), False, 'import warnings\n')]
|
from math import pi
from unittest.mock import Mock, call
from pygfx import WorldObject
from pygfx.linalg import Euler, Vector3, Quaternion
def test_traverse():
root = WorldObject()
layer1_child1 = WorldObject()
root.add(layer1_child1)
layer1_child2 = WorldObject()
root.add(layer1_child2)
layer2_child1 = WorldObject()
layer1_child2.add(layer2_child1)
layer2_child2 = WorldObject()
layer1_child2.add(layer2_child2)
mock = Mock()
root.traverse(mock)
mock.assert_has_calls(
[
call(root),
call(layer1_child1),
call(layer1_child2),
call(layer2_child1),
call(layer2_child2),
]
)
assert len(mock.mock_calls) == 5
def test_remove():
root = WorldObject()
layer1_child1 = WorldObject()
root.add(layer1_child1)
layer1_child2 = WorldObject()
root.add(layer1_child2)
layer2_child1 = WorldObject()
layer1_child2.add(layer2_child1)
layer2_child2 = WorldObject()
layer1_child2.add(layer2_child2)
root.remove(layer1_child2)
# layer1_child2 removed
assert layer1_child2.parent is None
assert layer1_child2 not in root.children
# layer1_child1 not removed
assert layer1_child1.parent is root
assert layer1_child1 in root.children
def test_update_matrix():
root = WorldObject()
root.position.set(3, 6, 8)
root.scale.set(1, 1.2, 1)
root.rotation.set_from_euler(Euler(pi / 2, 0, 0))
root.update_matrix()
t, r, s = Vector3(), Quaternion(), Vector3()
root.matrix.decompose(t, r, s)
assert t == root.position
# todo: do somehting like np.allclose
# assert r == root.rotation # close, but not quite the same
# assert s == root.scale
assert root.matrix_world_dirty
def test_update_matrix_world():
root = WorldObject()
root.position.set(-5, 8, 0)
root.rotation.set_from_euler(Euler(pi / 4, 0, 0))
root.update_matrix()
child1 = WorldObject()
child1.position.set(0, 0, 5)
root.add(child1)
child2 = WorldObject()
child2.rotation.set_from_euler(Euler(0, -pi / 4, 0))
child1.add(child2)
objs = [root, child1, child2]
assert all(obj.matrix_world_dirty for obj in objs)
# test both updating parents and children
child1.update_matrix_world(update_parents=True)
assert all(not obj.matrix_world_dirty for obj in objs)
p = Vector3(10, 10, 10)
p.apply_matrix4(child2.matrix)
p.apply_matrix4(child1.matrix)
p.apply_matrix4(root.matrix)
x = Vector3(10, 10, 10)
x.apply_matrix4(child2.matrix_world)
# if there is a difference it's a floating point error
assert Vector3().sub_vectors(p, x).length() < 0.00000000001
# reorganize such that child1 and 2 become siblings
child1.remove(child2)
root.add(child2)
assert not child1.matrix_world_dirty
# child2 should be flagged as dirty again now
assert child2.matrix_world_dirty
|
[
"pygfx.linalg.Vector3",
"unittest.mock.Mock",
"pygfx.linalg.Quaternion",
"pygfx.WorldObject",
"unittest.mock.call",
"pygfx.linalg.Euler"
] |
[((174, 187), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (185, 187), False, 'from pygfx import WorldObject\n'), ((209, 222), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (220, 222), False, 'from pygfx import WorldObject\n'), ((271, 284), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (282, 284), False, 'from pygfx import WorldObject\n'), ((334, 347), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (345, 347), False, 'from pygfx import WorldObject\n'), ((405, 418), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (416, 418), False, 'from pygfx import WorldObject\n'), ((468, 474), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (472, 474), False, 'from unittest.mock import Mock, call\n'), ((777, 790), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (788, 790), False, 'from pygfx import WorldObject\n'), ((812, 825), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (823, 825), False, 'from pygfx import WorldObject\n'), ((874, 887), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (885, 887), False, 'from pygfx import WorldObject\n'), ((937, 950), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (948, 950), False, 'from pygfx import WorldObject\n'), ((1008, 1021), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (1019, 1021), False, 'from pygfx import WorldObject\n'), ((1358, 1371), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (1369, 1371), False, 'from pygfx import WorldObject\n'), ((1843, 1856), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (1854, 1856), False, 'from pygfx import WorldObject\n'), ((1982, 1995), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (1993, 1995), False, 'from pygfx import WorldObject\n'), ((2064, 2077), 'pygfx.WorldObject', 'WorldObject', ([], {}), '()\n', (2075, 2077), False, 'from pygfx import WorldObject\n'), ((2415, 2434), 'pygfx.linalg.Vector3', 'Vector3', (['(10)', '(10)', '(10)'], {}), '(10, 10, 10)\n', (2422, 2434), False, 'from pygfx.linalg import Euler, Vector3, Quaternion\n'), ((2547, 2566), 'pygfx.linalg.Vector3', 'Vector3', (['(10)', '(10)', '(10)'], {}), '(10, 10, 10)\n', (2554, 2566), False, 'from pygfx.linalg import Euler, Vector3, Quaternion\n'), ((1466, 1485), 'pygfx.linalg.Euler', 'Euler', (['(pi / 2)', '(0)', '(0)'], {}), '(pi / 2, 0, 0)\n', (1471, 1485), False, 'from pygfx.linalg import Euler, Vector3, Quaternion\n'), ((1527, 1536), 'pygfx.linalg.Vector3', 'Vector3', ([], {}), '()\n', (1534, 1536), False, 'from pygfx.linalg import Euler, Vector3, Quaternion\n'), ((1538, 1550), 'pygfx.linalg.Quaternion', 'Quaternion', ([], {}), '()\n', (1548, 1550), False, 'from pygfx.linalg import Euler, Vector3, Quaternion\n'), ((1552, 1561), 'pygfx.linalg.Vector3', 'Vector3', ([], {}), '()\n', (1559, 1561), False, 'from pygfx.linalg import Euler, Vector3, Quaternion\n'), ((1922, 1941), 'pygfx.linalg.Euler', 'Euler', (['(pi / 4)', '(0)', '(0)'], {}), '(pi / 4, 0, 0)\n', (1927, 1941), False, 'from pygfx.linalg import Euler, Vector3, Quaternion\n'), ((2113, 2133), 'pygfx.linalg.Euler', 'Euler', (['(0)', '(-pi / 4)', '(0)'], {}), '(0, -pi / 4, 0)\n', (2118, 2133), False, 'from pygfx.linalg import Euler, Vector3, Quaternion\n'), ((548, 558), 'unittest.mock.call', 'call', (['root'], {}), '(root)\n', (552, 558), False, 'from unittest.mock import Mock, call\n'), ((572, 591), 'unittest.mock.call', 'call', (['layer1_child1'], {}), '(layer1_child1)\n', (576, 591), False, 'from unittest.mock import Mock, call\n'), ((605, 624), 'unittest.mock.call', 'call', (['layer1_child2'], {}), '(layer1_child2)\n', (609, 624), False, 'from unittest.mock import Mock, call\n'), ((638, 657), 'unittest.mock.call', 'call', (['layer2_child1'], {}), '(layer2_child1)\n', (642, 657), False, 'from unittest.mock import Mock, call\n'), ((671, 690), 'unittest.mock.call', 'call', (['layer2_child2'], {}), '(layer2_child2)\n', (675, 690), False, 'from unittest.mock import Mock, call\n'), ((2679, 2688), 'pygfx.linalg.Vector3', 'Vector3', ([], {}), '()\n', (2686, 2688), False, 'from pygfx.linalg import Euler, Vector3, Quaternion\n')]
|
import math; # importiert das ganze Mathe Modul
a = float(input("Bitte geben Sie eine Zahl ein:"));
wurzel = math.sqrt(a);
print("Die Wurzel von " + str(a) + " ist " + str(wurzel));
hochFünf = math.pow(a,5);
print(str(a) + " hoch 5 ergibt " + str(hochFünf));
|
[
"math.pow",
"math.sqrt"
] |
[((110, 122), 'math.sqrt', 'math.sqrt', (['a'], {}), '(a)\n', (119, 122), False, 'import math\n'), ((196, 210), 'math.pow', 'math.pow', (['a', '(5)'], {}), '(a, 5)\n', (204, 210), False, 'import math\n')]
|
"""
This class fetches the earthquakes that occured within the United States
within the past 24 hours. Creates a report of the quakes that have
happened. Report is retrievable through 'get_report'. Also, returns
the amount of quakes that have happened through 'get_count'.
"""
import requests
from datetime import datetime, timedelta
import csv
import time
import pandas as pd
from math import cos, radians
class EarthQuake():
def __init__(self, sequence: int):
self._today = datetime.today().date() #- timedelta(days=1) necessary for aws EC2
self._today = self._today
self._yesterday = self._today - timedelta(days=1)
self._data_file = open(f'data_file{sequence}.csv', 'w')
self._data_file_path = f'data_file{sequence}.csv'
self._csv_writer = csv.writer(self._data_file)
self._quake_data = self.fetch_quake()
self._count_quake = self._quake_data['metadata']['count']
def fetch_quake(self):
"""Fetches earthquake data from USGS api."""
r = requests.get(f"https://earthquake.usgs.gov/fdsnws/event/1/query?"
f"format=geojson&starttime={self._today}&"
f"minlatitude=21&minlongitude=-165&maxlatitude=70&"
f"maxlongitude=-65&minmagnitude=2&maxdepth=70")
return r.json()
def fill_in_csv(self):
"""Fills in CSV of earthquake data"""
data = self.fetch_quake()
count = 0
for row in data['features']:
if count == 0:
headers = (list(row['properties'].keys()) +
list(row['geometry'].keys()))
headers.append('Square Coordinates')
self._csv_writer.writerow(headers)
count += 1
else:
values = list(row['properties'].values()) + \
list(row['geometry'].values())
values.append(self.square_coordinate(
row['geometry']['coordinates'][:2]))
self._csv_writer.writerow(values)
self._data_file.close()
def square_coordinate(self, coordinate):
"""
coordinate: Latitidude & Longitude coordinate of Earthquake
Returns square coordinate points that are a 10 mile radius
from the initial coordinate point that is passed through.
"""
y, x = tuple(coordinate)
search_radius = 10 # in miles
earth_radius = 3958.8
dY = 360*search_radius/earth_radius
dX = dY*cos(radians(y))
upper_x = x - dX
upper_y = y - dY
lower_x = x + dX
lower_y = y + dY
coordinate = [upper_y, lower_x, lower_y, upper_x]
return coordinate
@property
def get_count(self):
return self._count_quake
@property
def get_report(self):
return self._data_file_path
|
[
"datetime.datetime.today",
"csv.writer",
"math.radians",
"datetime.timedelta",
"requests.get"
] |
[((799, 826), 'csv.writer', 'csv.writer', (['self._data_file'], {}), '(self._data_file)\n', (809, 826), False, 'import csv\n'), ((1032, 1238), 'requests.get', 'requests.get', (['f"""https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&starttime={self._today}&minlatitude=21&minlongitude=-165&maxlatitude=70&maxlongitude=-65&minmagnitude=2&maxdepth=70"""'], {}), "(\n f'https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&starttime={self._today}&minlatitude=21&minlongitude=-165&maxlatitude=70&maxlongitude=-65&minmagnitude=2&maxdepth=70'\n )\n", (1044, 1238), False, 'import requests\n'), ((632, 649), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (641, 649), False, 'from datetime import datetime, timedelta\n'), ((491, 507), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (505, 507), False, 'from datetime import datetime, timedelta\n'), ((2546, 2556), 'math.radians', 'radians', (['y'], {}), '(y)\n', (2553, 2556), False, 'from math import cos, radians\n')]
|
import pytest
from autofit import database as db
from autofit.mock import mock as m
@pytest.fixture(
name="gaussian_1"
)
def make_gaussian_1():
return db.Fit(
id="gaussian_1",
instance=m.Gaussian(
centre=1
),
info={"info": 1},
is_complete=True,
unique_tag="one"
)
@pytest.fixture(
name="gaussian_2"
)
def make_gaussian_2():
return db.Fit(
id="gaussian_2",
instance=m.Gaussian(
centre=2
),
info={"info": 2},
is_complete=False,
unique_tag="two"
)
@pytest.fixture(
autouse=True
)
def add_to_session(
gaussian_1,
gaussian_2,
session
):
session.add_all([
gaussian_1,
gaussian_2
])
session.commit()
|
[
"autofit.mock.mock.Gaussian",
"pytest.fixture"
] |
[((88, 121), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""gaussian_1"""'}), "(name='gaussian_1')\n", (102, 121), False, 'import pytest\n'), ((342, 375), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""gaussian_2"""'}), "(name='gaussian_2')\n", (356, 375), False, 'import pytest\n'), ((597, 625), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (611, 625), False, 'import pytest\n'), ((212, 232), 'autofit.mock.mock.Gaussian', 'm.Gaussian', ([], {'centre': '(1)'}), '(centre=1)\n', (222, 232), True, 'from autofit.mock import mock as m\n'), ((466, 486), 'autofit.mock.mock.Gaussian', 'm.Gaussian', ([], {'centre': '(2)'}), '(centre=2)\n', (476, 486), True, 'from autofit.mock import mock as m\n')]
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class UserAccountManagementGranularInformation(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, can_manage_account_security_settings=None, can_manage_account_security_settings_metadata=None, can_manage_account_settings=None, can_manage_account_settings_metadata=None, can_manage_admins=None, can_manage_admins_metadata=None, can_manage_reporting=None, can_manage_reporting_metadata=None, can_manage_sharing=None, can_manage_sharing_metadata=None, can_manage_signing_groups=None, can_manage_signing_groups_metadata=None, can_manage_users=None, can_manage_users_metadata=None):
"""
UserAccountManagementGranularInformation - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'can_manage_account_security_settings': 'str',
'can_manage_account_security_settings_metadata': 'SettingsMetadata',
'can_manage_account_settings': 'str',
'can_manage_account_settings_metadata': 'SettingsMetadata',
'can_manage_admins': 'str',
'can_manage_admins_metadata': 'SettingsMetadata',
'can_manage_reporting': 'str',
'can_manage_reporting_metadata': 'SettingsMetadata',
'can_manage_sharing': 'str',
'can_manage_sharing_metadata': 'SettingsMetadata',
'can_manage_signing_groups': 'str',
'can_manage_signing_groups_metadata': 'SettingsMetadata',
'can_manage_users': 'str',
'can_manage_users_metadata': 'SettingsMetadata'
}
self.attribute_map = {
'can_manage_account_security_settings': 'canManageAccountSecuritySettings',
'can_manage_account_security_settings_metadata': 'canManageAccountSecuritySettingsMetadata',
'can_manage_account_settings': 'canManageAccountSettings',
'can_manage_account_settings_metadata': 'canManageAccountSettingsMetadata',
'can_manage_admins': 'canManageAdmins',
'can_manage_admins_metadata': 'canManageAdminsMetadata',
'can_manage_reporting': 'canManageReporting',
'can_manage_reporting_metadata': 'canManageReportingMetadata',
'can_manage_sharing': 'canManageSharing',
'can_manage_sharing_metadata': 'canManageSharingMetadata',
'can_manage_signing_groups': 'canManageSigningGroups',
'can_manage_signing_groups_metadata': 'canManageSigningGroupsMetadata',
'can_manage_users': 'canManageUsers',
'can_manage_users_metadata': 'canManageUsersMetadata'
}
self._can_manage_account_security_settings = can_manage_account_security_settings
self._can_manage_account_security_settings_metadata = can_manage_account_security_settings_metadata
self._can_manage_account_settings = can_manage_account_settings
self._can_manage_account_settings_metadata = can_manage_account_settings_metadata
self._can_manage_admins = can_manage_admins
self._can_manage_admins_metadata = can_manage_admins_metadata
self._can_manage_reporting = can_manage_reporting
self._can_manage_reporting_metadata = can_manage_reporting_metadata
self._can_manage_sharing = can_manage_sharing
self._can_manage_sharing_metadata = can_manage_sharing_metadata
self._can_manage_signing_groups = can_manage_signing_groups
self._can_manage_signing_groups_metadata = can_manage_signing_groups_metadata
self._can_manage_users = can_manage_users
self._can_manage_users_metadata = can_manage_users_metadata
@property
def can_manage_account_security_settings(self):
"""
Gets the can_manage_account_security_settings of this UserAccountManagementGranularInformation.
:return: The can_manage_account_security_settings of this UserAccountManagementGranularInformation.
:rtype: str
"""
return self._can_manage_account_security_settings
@can_manage_account_security_settings.setter
def can_manage_account_security_settings(self, can_manage_account_security_settings):
"""
Sets the can_manage_account_security_settings of this UserAccountManagementGranularInformation.
:param can_manage_account_security_settings: The can_manage_account_security_settings of this UserAccountManagementGranularInformation.
:type: str
"""
self._can_manage_account_security_settings = can_manage_account_security_settings
@property
def can_manage_account_security_settings_metadata(self):
"""
Gets the can_manage_account_security_settings_metadata of this UserAccountManagementGranularInformation.
:return: The can_manage_account_security_settings_metadata of this UserAccountManagementGranularInformation.
:rtype: SettingsMetadata
"""
return self._can_manage_account_security_settings_metadata
@can_manage_account_security_settings_metadata.setter
def can_manage_account_security_settings_metadata(self, can_manage_account_security_settings_metadata):
"""
Sets the can_manage_account_security_settings_metadata of this UserAccountManagementGranularInformation.
:param can_manage_account_security_settings_metadata: The can_manage_account_security_settings_metadata of this UserAccountManagementGranularInformation.
:type: SettingsMetadata
"""
self._can_manage_account_security_settings_metadata = can_manage_account_security_settings_metadata
@property
def can_manage_account_settings(self):
"""
Gets the can_manage_account_settings of this UserAccountManagementGranularInformation.
:return: The can_manage_account_settings of this UserAccountManagementGranularInformation.
:rtype: str
"""
return self._can_manage_account_settings
@can_manage_account_settings.setter
def can_manage_account_settings(self, can_manage_account_settings):
"""
Sets the can_manage_account_settings of this UserAccountManagementGranularInformation.
:param can_manage_account_settings: The can_manage_account_settings of this UserAccountManagementGranularInformation.
:type: str
"""
self._can_manage_account_settings = can_manage_account_settings
@property
def can_manage_account_settings_metadata(self):
"""
Gets the can_manage_account_settings_metadata of this UserAccountManagementGranularInformation.
:return: The can_manage_account_settings_metadata of this UserAccountManagementGranularInformation.
:rtype: SettingsMetadata
"""
return self._can_manage_account_settings_metadata
@can_manage_account_settings_metadata.setter
def can_manage_account_settings_metadata(self, can_manage_account_settings_metadata):
"""
Sets the can_manage_account_settings_metadata of this UserAccountManagementGranularInformation.
:param can_manage_account_settings_metadata: The can_manage_account_settings_metadata of this UserAccountManagementGranularInformation.
:type: SettingsMetadata
"""
self._can_manage_account_settings_metadata = can_manage_account_settings_metadata
@property
def can_manage_admins(self):
"""
Gets the can_manage_admins of this UserAccountManagementGranularInformation.
:return: The can_manage_admins of this UserAccountManagementGranularInformation.
:rtype: str
"""
return self._can_manage_admins
@can_manage_admins.setter
def can_manage_admins(self, can_manage_admins):
"""
Sets the can_manage_admins of this UserAccountManagementGranularInformation.
:param can_manage_admins: The can_manage_admins of this UserAccountManagementGranularInformation.
:type: str
"""
self._can_manage_admins = can_manage_admins
@property
def can_manage_admins_metadata(self):
"""
Gets the can_manage_admins_metadata of this UserAccountManagementGranularInformation.
:return: The can_manage_admins_metadata of this UserAccountManagementGranularInformation.
:rtype: SettingsMetadata
"""
return self._can_manage_admins_metadata
@can_manage_admins_metadata.setter
def can_manage_admins_metadata(self, can_manage_admins_metadata):
"""
Sets the can_manage_admins_metadata of this UserAccountManagementGranularInformation.
:param can_manage_admins_metadata: The can_manage_admins_metadata of this UserAccountManagementGranularInformation.
:type: SettingsMetadata
"""
self._can_manage_admins_metadata = can_manage_admins_metadata
@property
def can_manage_reporting(self):
"""
Gets the can_manage_reporting of this UserAccountManagementGranularInformation.
:return: The can_manage_reporting of this UserAccountManagementGranularInformation.
:rtype: str
"""
return self._can_manage_reporting
@can_manage_reporting.setter
def can_manage_reporting(self, can_manage_reporting):
"""
Sets the can_manage_reporting of this UserAccountManagementGranularInformation.
:param can_manage_reporting: The can_manage_reporting of this UserAccountManagementGranularInformation.
:type: str
"""
self._can_manage_reporting = can_manage_reporting
@property
def can_manage_reporting_metadata(self):
"""
Gets the can_manage_reporting_metadata of this UserAccountManagementGranularInformation.
:return: The can_manage_reporting_metadata of this UserAccountManagementGranularInformation.
:rtype: SettingsMetadata
"""
return self._can_manage_reporting_metadata
@can_manage_reporting_metadata.setter
def can_manage_reporting_metadata(self, can_manage_reporting_metadata):
"""
Sets the can_manage_reporting_metadata of this UserAccountManagementGranularInformation.
:param can_manage_reporting_metadata: The can_manage_reporting_metadata of this UserAccountManagementGranularInformation.
:type: SettingsMetadata
"""
self._can_manage_reporting_metadata = can_manage_reporting_metadata
@property
def can_manage_sharing(self):
"""
Gets the can_manage_sharing of this UserAccountManagementGranularInformation.
:return: The can_manage_sharing of this UserAccountManagementGranularInformation.
:rtype: str
"""
return self._can_manage_sharing
@can_manage_sharing.setter
def can_manage_sharing(self, can_manage_sharing):
"""
Sets the can_manage_sharing of this UserAccountManagementGranularInformation.
:param can_manage_sharing: The can_manage_sharing of this UserAccountManagementGranularInformation.
:type: str
"""
self._can_manage_sharing = can_manage_sharing
@property
def can_manage_sharing_metadata(self):
"""
Gets the can_manage_sharing_metadata of this UserAccountManagementGranularInformation.
:return: The can_manage_sharing_metadata of this UserAccountManagementGranularInformation.
:rtype: SettingsMetadata
"""
return self._can_manage_sharing_metadata
@can_manage_sharing_metadata.setter
def can_manage_sharing_metadata(self, can_manage_sharing_metadata):
"""
Sets the can_manage_sharing_metadata of this UserAccountManagementGranularInformation.
:param can_manage_sharing_metadata: The can_manage_sharing_metadata of this UserAccountManagementGranularInformation.
:type: SettingsMetadata
"""
self._can_manage_sharing_metadata = can_manage_sharing_metadata
@property
def can_manage_signing_groups(self):
"""
Gets the can_manage_signing_groups of this UserAccountManagementGranularInformation.
:return: The can_manage_signing_groups of this UserAccountManagementGranularInformation.
:rtype: str
"""
return self._can_manage_signing_groups
@can_manage_signing_groups.setter
def can_manage_signing_groups(self, can_manage_signing_groups):
"""
Sets the can_manage_signing_groups of this UserAccountManagementGranularInformation.
:param can_manage_signing_groups: The can_manage_signing_groups of this UserAccountManagementGranularInformation.
:type: str
"""
self._can_manage_signing_groups = can_manage_signing_groups
@property
def can_manage_signing_groups_metadata(self):
"""
Gets the can_manage_signing_groups_metadata of this UserAccountManagementGranularInformation.
:return: The can_manage_signing_groups_metadata of this UserAccountManagementGranularInformation.
:rtype: SettingsMetadata
"""
return self._can_manage_signing_groups_metadata
@can_manage_signing_groups_metadata.setter
def can_manage_signing_groups_metadata(self, can_manage_signing_groups_metadata):
"""
Sets the can_manage_signing_groups_metadata of this UserAccountManagementGranularInformation.
:param can_manage_signing_groups_metadata: The can_manage_signing_groups_metadata of this UserAccountManagementGranularInformation.
:type: SettingsMetadata
"""
self._can_manage_signing_groups_metadata = can_manage_signing_groups_metadata
@property
def can_manage_users(self):
"""
Gets the can_manage_users of this UserAccountManagementGranularInformation.
:return: The can_manage_users of this UserAccountManagementGranularInformation.
:rtype: str
"""
return self._can_manage_users
@can_manage_users.setter
def can_manage_users(self, can_manage_users):
"""
Sets the can_manage_users of this UserAccountManagementGranularInformation.
:param can_manage_users: The can_manage_users of this UserAccountManagementGranularInformation.
:type: str
"""
self._can_manage_users = can_manage_users
@property
def can_manage_users_metadata(self):
"""
Gets the can_manage_users_metadata of this UserAccountManagementGranularInformation.
:return: The can_manage_users_metadata of this UserAccountManagementGranularInformation.
:rtype: SettingsMetadata
"""
return self._can_manage_users_metadata
@can_manage_users_metadata.setter
def can_manage_users_metadata(self, can_manage_users_metadata):
"""
Sets the can_manage_users_metadata of this UserAccountManagementGranularInformation.
:param can_manage_users_metadata: The can_manage_users_metadata of this UserAccountManagementGranularInformation.
:type: SettingsMetadata
"""
self._can_manage_users_metadata = can_manage_users_metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"six.iteritems"
] |
[((15948, 15977), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (15957, 15977), False, 'from six import iteritems\n')]
|
from __future__ import unicode_literals
from model_utils.models import TimeStampedModel
from django.db import models
from hackupc.users.models import User
# Create your models here.
class Proposal(TimeStampedModel):
title = models.TextField(blank=False, max_length=100)
description = models.TextField(blank=False, max_length=2000)
image = models.ImageField(blank=False)
created_by = models.ForeignKey(User)
@property
def get_votes(self):
return self.proposalvote_set.all().count()
class ProposalVote(TimeStampedModel):
user = models.ForeignKey(User)
proposal = models.ForeignKey(Proposal)
class Meta:
unique_together = ("user", "proposal")
|
[
"django.db.models.ImageField",
"django.db.models.TextField",
"django.db.models.ForeignKey"
] |
[((231, 276), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(False)', 'max_length': '(100)'}), '(blank=False, max_length=100)\n', (247, 276), False, 'from django.db import models\n'), ((295, 341), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(False)', 'max_length': '(2000)'}), '(blank=False, max_length=2000)\n', (311, 341), False, 'from django.db import models\n'), ((354, 384), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(False)'}), '(blank=False)\n', (371, 384), False, 'from django.db import models\n'), ((402, 425), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (419, 425), False, 'from django.db import models\n'), ((568, 591), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (585, 591), False, 'from django.db import models\n'), ((607, 634), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Proposal'], {}), '(Proposal)\n', (624, 634), False, 'from django.db import models\n')]
|
# SPDX-License-Identifier: MIT
import pytest
from attr._compat import metadata_proxy
@pytest.fixture(name="mp")
def _mp():
return metadata_proxy({"x": 42, "y": "foo"})
class TestMetadataProxy:
"""
Ensure properties of metadata_proxy independently of hypothesis strategies.
"""
def test_repr(self, mp):
"""
repr makes sense and is consistent across Python versions.
"""
assert any(
[
"mappingproxy({'x': 42, 'y': 'foo'})" == repr(mp),
"mappingproxy({'y': 'foo', 'x': 42})" == repr(mp),
]
)
def test_immutable(self, mp):
"""
All mutating methods raise errors.
"""
with pytest.raises(TypeError, match="not support item assignment"):
mp["z"] = 23
with pytest.raises(TypeError, match="not support item deletion"):
del mp["x"]
with pytest.raises(AttributeError, match="no attribute 'update'"):
mp.update({})
with pytest.raises(AttributeError, match="no attribute 'clear'"):
mp.clear()
with pytest.raises(AttributeError, match="no attribute 'pop'"):
mp.pop("x")
with pytest.raises(AttributeError, match="no attribute 'popitem'"):
mp.popitem()
with pytest.raises(AttributeError, match="no attribute 'setdefault'"):
mp.setdefault("x")
|
[
"pytest.raises",
"pytest.fixture",
"attr._compat.metadata_proxy"
] |
[((90, 115), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""mp"""'}), "(name='mp')\n", (104, 115), False, 'import pytest\n'), ((138, 175), 'attr._compat.metadata_proxy', 'metadata_proxy', (["{'x': 42, 'y': 'foo'}"], {}), "({'x': 42, 'y': 'foo'})\n", (152, 175), False, 'from attr._compat import metadata_proxy\n'), ((727, 788), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""not support item assignment"""'}), "(TypeError, match='not support item assignment')\n", (740, 788), False, 'import pytest\n'), ((829, 888), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""not support item deletion"""'}), "(TypeError, match='not support item deletion')\n", (842, 888), False, 'import pytest\n'), ((928, 988), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""no attribute \'update\'"""'}), '(AttributeError, match="no attribute \'update\'")\n', (941, 988), False, 'import pytest\n'), ((1030, 1089), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""no attribute \'clear\'"""'}), '(AttributeError, match="no attribute \'clear\'")\n', (1043, 1089), False, 'import pytest\n'), ((1128, 1185), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""no attribute \'pop\'"""'}), '(AttributeError, match="no attribute \'pop\'")\n', (1141, 1185), False, 'import pytest\n'), ((1225, 1286), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""no attribute \'popitem\'"""'}), '(AttributeError, match="no attribute \'popitem\'")\n', (1238, 1286), False, 'import pytest\n'), ((1327, 1391), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""no attribute \'setdefault\'"""'}), '(AttributeError, match="no attribute \'setdefault\'")\n', (1340, 1391), False, 'import pytest\n')]
|
#!/usr/bin/python
import re
import sys
def remove_rtti(text):
return re.sub(r'dynamic_cast<(.* \*)>', r'(\1)', text)
def make_dalvik_compat(text):
init_text = """/* Utility class for managing the JNI environment */
class JNIEnvWrapper {
const Director *director_;
JNIEnv *jenv_;
public:
JNIEnvWrapper(const Director *director) : director_(director), jenv_(0) {
#if defined(SWIG_JAVA_ATTACH_CURRENT_THREAD_AS_DAEMON)
// Attach a daemon thread to the JVM. Useful when the JVM should not wait for
// the thread to exit upon shutdown. Only for jdk-1.4 and later.
director_->swig_jvm_->AttachCurrentThreadAsDaemon((void **) &jenv_, NULL);
#else
director_->swig_jvm_->AttachCurrentThread((void **) &jenv_, NULL);
#endif
}
~JNIEnvWrapper() {
#if !defined(SWIG_JAVA_NO_DETACH_CURRENT_THREAD)
// Some JVMs, eg jdk-1.4.2 and lower on Solaris have a bug and crash with the DetachCurrentThread call.
// However, without this call, the JVM hangs on exit when the thread was not created by the JVM and creates a memory leak.
director_->swig_jvm_->DetachCurrentThread();
#endif
}
JNIEnv *getJNIEnv() const {
return jenv_;
}
};"""
final_text = """/* Utility class for managing the JNI environment */
class JNIEnvWrapper {
const Director *director_;
JNIEnv *jenv_;
int env_status;
JNIEnv *g_env;
public:
JNIEnvWrapper(const Director *director) : director_(director), jenv_(0) {
env_status = director_->swig_jvm_->GetEnv( (void **) &g_env, JNI_VERSION_1_6);
#if defined(SWIG_JAVA_ATTACH_CURRENT_THREAD_AS_DAEMON)
// Attach a daemon thread to the JVM. Useful when the JVM should not wait for
// the thread to exit upon shutdown. Only for jdk-1.4 and later.
director_->swig_jvm_->AttachCurrentThreadAsDaemon( &jenv_, NULL);
#else
director_->swig_jvm_->AttachCurrentThread( &jenv_, NULL);
#endif
}
~JNIEnvWrapper() {
#if !defined(SWIG_JAVA_NO_DETACH_CURRENT_THREAD)
// Some JVMs, eg jdk-1.4.2 and lower on Solaris have a bug and crash with the DetachCurrentThread call.
// However, without this call, the JVM hangs on exit when the thread was not created by the JVM and creates a memory leak.
if( env_status == JNI_EDETACHED ){
director_->swig_jvm_->DetachCurrentThread();
}
#endif
}
JNIEnv *getJNIEnv() const {
return jenv_;
}
};"""
return text.replace(init_text, final_text)
if __name__ == '__main__':
filename = sys.argv[1]
brut_code = open(filename).read()
code_wo_rtti = remove_rtti(brut_code)
code_dalvik_compat = make_dalvik_compat(code_wo_rtti)
print(code_dalvik_compat)
|
[
"re.sub"
] |
[((72, 119), 're.sub', 're.sub', (['"""dynamic_cast<(.* \\\\*)>"""', '"""(\\\\1)"""', 'text'], {}), "('dynamic_cast<(.* \\\\*)>', '(\\\\1)', text)\n", (78, 119), False, 'import re\n')]
|
from abc import ABC
from unittest.mock import MagicMock, call
from uuid import uuid4, UUID
import pytest
from erica.domain.repositories.base_repository_interface import BaseRepositoryInterface
from erica.infrastructure.sqlalchemy.repositories.base_repository import BaseRepository, EntityNotFoundError
from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema
class MockBaseRepository(
BaseRepository[MockDomainModel, MockSchema],
BaseRepositoryInterface,
ABC
):
def __init__(self, db_connection):
super().__init__(db_connection)
self.DatabaseEntity = MockSchema
self.DomainModel = MockDomainModel
class TestBaseRepositoryCreate:
def test_if_entity_of_type_domain_model_as_input_then_entity_with_correct_data_in_database(self,transactional_session_with_mock_schema):
repository = MockBaseRepository(db_connection=transactional_session_with_mock_schema)
repository.create(MockDomainModel(payload={'endboss': 'Melkor'}))
assert len(transactional_session_with_mock_schema.query(MockSchema).all()) == 1
assert isinstance(transactional_session_with_mock_schema.query(MockSchema).all()[0], MockSchema)
def test_if_entity_of_type_domain_model_as_input_then_entity_of_schema_type_is_in_database(self,transactional_session_with_mock_schema):
repository = MockBaseRepository(db_connection=transactional_session_with_mock_schema)
repository.create(MockDomainModel(payload={'endboss': 'Melkor'}))
assert isinstance(transactional_session_with_mock_schema.query(MockSchema).all()[0], MockSchema)
def test_if_entity_of_type_domain_model_as_input_then_return_schema_type(self, transactional_session_with_mock_schema):
repository = MockBaseRepository(db_connection=transactional_session_with_mock_schema)
returned_value = repository.create(MockDomainModel(payload={'endboss': 'Melkor'}))
assert isinstance(returned_value, MockDomainModel)
class TestBaseRepositoryGet:
def test_if_entity_of_type_domain_model_as_input_then_return_list_with_schema_repr_of_entities(self, transactional_session_with_mock_schema):
mock_object = MockDomainModel(payload={'endboss': 'Melkor'})
list_of_schema_object = [MockSchema(**mock_object.dict()), MockSchema(**mock_object.dict()), MockSchema(**mock_object.dict())]
transactional_session_with_mock_schema.add(list_of_schema_object[0])
transactional_session_with_mock_schema.add(list_of_schema_object[1])
transactional_session_with_mock_schema.add(list_of_schema_object[2])
transactional_session_with_mock_schema.commit()
found_entities = MockBaseRepository(db_connection=transactional_session_with_mock_schema).get()
assert found_entities == list_of_schema_object
def test_if_table_is_empty_then_return_empty_list(self, transactional_session_with_mock_schema):
found_entities = MockBaseRepository(db_connection=transactional_session_with_mock_schema).get()
assert found_entities == []
class TestBaseRepositoryGetById:
def test_if_entity_in_database_then_return_domain_representation(self, transactional_session_with_mock_schema):
mock_object = MockDomainModel(payload={'endboss': 'Melkor'})
schema_object = MockSchema(**mock_object.dict())
transactional_session_with_mock_schema.add(schema_object)
transactional_session_with_mock_schema.commit()
found_entity = MockBaseRepository(db_connection=transactional_session_with_mock_schema).get_by_id(schema_object.id)
assert found_entity == mock_object
def test_if_entity_not_in_database_then_raise_exception(self, transactional_session_with_mock_schema):
mock_object = MockDomainModel(payload={'endboss': 'Melkor'})
mock_object.request_id = uuid4()
schema_object = MockSchema(**mock_object.dict())
with pytest.raises(EntityNotFoundError):
MockBaseRepository(db_connection=transactional_session_with_mock_schema).get_by_id(schema_object.id)
class TestBaseRepositoryUpdate:
def test_if_entity_in_database_then_return_updated_domain_representation(self, transactional_session_with_mock_schema):
mock_object = MockDomainModel(payload={'endboss': 'Melkor'})
schema_object = MockSchema(**mock_object.dict())
transactional_session_with_mock_schema.add(schema_object)
transactional_session_with_mock_schema.commit()
updated_object = MockDomainModel(payload={'endboss': 'Sauron'})
updated_entity = MockBaseRepository(db_connection=transactional_session_with_mock_schema).update(schema_object.id, updated_object)
assert updated_entity == updated_object
def test_if_entity_in_database_then_update_in_database(self, transactional_session_with_mock_schema):
mock_object = MockDomainModel(payload={'endboss': 'Melkor'})
schema_object = MockSchema(**mock_object.dict())
transactional_session_with_mock_schema.add(schema_object)
transactional_session_with_mock_schema.commit()
updated_object = MockDomainModel(payload={'endboss': 'Sauron'})
MockBaseRepository(db_connection=transactional_session_with_mock_schema).update(schema_object.id, updated_object)
updated_entry_in_db = transactional_session_with_mock_schema.query(MockSchema).filter(MockSchema.id == schema_object.id).first()
assert updated_entry_in_db.id == schema_object.id
assert updated_entry_in_db.payload == {'endboss': 'Sauron'}
def test_if_entity_not_in_database_then_raise_error(self, transactional_session_with_mock_schema):
mock_object = MockDomainModel(payload={'endboss': 'Melkor'})
schema_object = MockSchema(**mock_object.dict())
updated_object = MockDomainModel(payload={'endboss': 'Sauron'})
with pytest.raises(EntityNotFoundError):
MockBaseRepository(db_connection=transactional_session_with_mock_schema).update(schema_object.id, updated_object)
@pytest.mark.freeze_uuids
def test_if_only_request_id_changed_then_only_call_update_with_changed_attributes(self, transactional_session_with_mock_schema):
mock_object = MockDomainModel(payload={'endboss': 'Melkor'})
schema_object = MockSchema(**mock_object.dict())
transactional_session_with_mock_schema.add(schema_object)
transactional_session_with_mock_schema.commit()
updated_object = MockDomainModel(request_id=uuid4(),
payload={'endboss': 'Melkor'})
# We need a mock object to be able to intercept the call to the update function
repo = MockBaseRepository(db_connection=transactional_session_with_mock_schema)
update_mock = MagicMock()
mocked_get_by_id = MagicMock(side_effect=lambda request_id: MagicMock(
first=MagicMock(return_value=MockBaseRepository(db_connection=transactional_session_with_mock_schema)._get_by_id(request_id).first()),
update=update_mock))
repo._get_by_id = mocked_get_by_id
repo.update(schema_object.id, updated_object)
assert update_mock.mock_calls == [call({'request_id': UUID('00000000-0000-0000-0000-000000000000')})]
class TestBaseRepositoryDelete:
def test_if_entity_in_database_then_delete_from_database(self, transactional_session_with_mock_schema):
mock_object = MockDomainModel(payload={'endboss': 'Melkor'})
schema_object = MockSchema(**mock_object.dict())
transactional_session_with_mock_schema.add(schema_object)
transactional_session_with_mock_schema.commit()
MockBaseRepository(db_connection=transactional_session_with_mock_schema).delete(schema_object.id)
assert len(transactional_session_with_mock_schema.query(MockSchema).all()) == 0
def test_if_entity_not_in_database_then_raise_error(self, transactional_session_with_mock_schema):
mock_object = MockDomainModel(payload={'endboss': 'Melkor'})
schema_object = MockSchema(**mock_object.dict())
with pytest.raises(EntityNotFoundError):
MockBaseRepository(db_connection=transactional_session_with_mock_schema).delete(schema_object.id)
|
[
"uuid.uuid4",
"unittest.mock.MagicMock",
"tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel",
"pytest.raises",
"uuid.UUID"
] |
[((2215, 2261), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (2230, 2261), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((3263, 3309), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (3278, 3309), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((3788, 3834), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (3803, 3834), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((3868, 3875), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3873, 3875), False, 'from uuid import uuid4, UUID\n'), ((4277, 4323), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (4292, 4323), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((4528, 4574), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Sauron'}"}), "(payload={'endboss': 'Sauron'})\n", (4543, 4574), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((4893, 4939), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (4908, 4939), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((5144, 5190), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Sauron'}"}), "(payload={'endboss': 'Sauron'})\n", (5159, 5190), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((5704, 5750), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (5719, 5750), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((5833, 5879), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Sauron'}"}), "(payload={'endboss': 'Sauron'})\n", (5848, 5879), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((6242, 6288), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (6257, 6288), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((6801, 6812), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6810, 6812), False, 'from unittest.mock import MagicMock, call\n'), ((7446, 7492), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (7461, 7492), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((7994, 8040), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (8009, 8040), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((985, 1031), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (1000, 1031), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((1490, 1536), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (1505, 1536), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((1907, 1953), 'tests.infrastructure.sqlalechemy.repositories.mock_repositories.MockDomainModel', 'MockDomainModel', ([], {'payload': "{'endboss': 'Melkor'}"}), "(payload={'endboss': 'Melkor'})\n", (1922, 1953), False, 'from tests.infrastructure.sqlalechemy.repositories.mock_repositories import MockDomainModel, MockSchema\n'), ((3947, 3981), 'pytest.raises', 'pytest.raises', (['EntityNotFoundError'], {}), '(EntityNotFoundError)\n', (3960, 3981), False, 'import pytest\n'), ((5894, 5928), 'pytest.raises', 'pytest.raises', (['EntityNotFoundError'], {}), '(EntityNotFoundError)\n', (5907, 5928), False, 'import pytest\n'), ((8112, 8146), 'pytest.raises', 'pytest.raises', (['EntityNotFoundError'], {}), '(EntityNotFoundError)\n', (8125, 8146), False, 'import pytest\n'), ((6521, 6528), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (6526, 6528), False, 'from uuid import uuid4, UUID\n'), ((7233, 7277), 'uuid.UUID', 'UUID', (['"""00000000-0000-0000-0000-000000000000"""'], {}), "('00000000-0000-0000-0000-000000000000')\n", (7237, 7277), False, 'from uuid import uuid4, UUID\n')]
|
# Generated by Django 2.0.7 on 2018-09-01 11:47
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0006_image_tags'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notifications', '0003_auto_20180901_2013'),
]
operations = [
migrations.RenameModel(
old_name='Notifiaction',
new_name='Notification',
),
migrations.RenameField(
model_name='notification',
old_name='notifiaction_type',
new_name='notification_type',
),
]
|
[
"django.db.migrations.swappable_dependency",
"django.db.migrations.RenameField",
"django.db.migrations.RenameModel"
] |
[((225, 282), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (256, 282), False, 'from django.db import migrations\n'), ((372, 444), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Notifiaction"""', 'new_name': '"""Notification"""'}), "(old_name='Notifiaction', new_name='Notification')\n", (394, 444), False, 'from django.db import migrations\n'), ((489, 603), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""notification"""', 'old_name': '"""notifiaction_type"""', 'new_name': '"""notification_type"""'}), "(model_name='notification', old_name=\n 'notifiaction_type', new_name='notification_type')\n", (511, 603), False, 'from django.db import migrations\n')]
|
from vint.ast.plugin.abstract_ast_plugin import AbstractASTPlugin
from vint.ast.plugin.scope_plugin.reference_reachability_tester import (
ReferenceReachabilityTester,
is_reference_identifier as _is_reference_identifier,
is_declarative_identifier as _is_declarative_identifier,
is_reachable_reference_identifier as _is_reachable_reference_identifier,
is_referenced_declarative_identifier as _is_referenced_declarative_identifier,
)
from vint.ast.plugin.scope_plugin.scope_detector import (
ScopeVisibility as _ScopeVisibility,
ExplicityOfScopeVisibility as _ExplicityOfScopeVisibility,
detect_possible_scope_visibility as _detect_possible_scope_visibility,
)
from vint.ast.plugin.scope_plugin.identifier_attribute import (
is_autoload_identifier as _is_autoload_identifier,
is_function_identifier as _is_function_identifier,
)
from vint.ast.plugin.scope_plugin.variable_name_normalizer import (
normalize_variable_name as _normalize_variable_name
)
# Expose to out of ScopePlugin
ScopeVisibility = _ScopeVisibility
ExplicityOfScopeVisibility = _ExplicityOfScopeVisibility
class ScopePlugin(AbstractASTPlugin):
def __init__(self):
super(ScopePlugin, self).__init__()
self._ref_tester = ReferenceReachabilityTester()
def process(self, ast):
processed_ast = self._ref_tester.process(ast)
return processed_ast
def _get_link_registry(self):
# NOTE: This is a hack for performance. We should build LinkRegistry
# by this method if ReferenceReachabilityTester hide the link_registry.
return self._ref_tester._scope_linker.link_registry
def is_unreachable_reference_identifier(self, node):
return _is_reference_identifier(node) \
and not _is_reachable_reference_identifier(node)
def is_unused_declarative_identifier(self, node):
return _is_declarative_identifier(node) \
and not _is_referenced_declarative_identifier(node)
def is_autoload_identifier(self, node):
return _is_autoload_identifier(node)
def is_function_identifier(self, node):
return _is_function_identifier(node)
def get_objective_scope_visibility(self, node):
scope_visibility_hint = self._ref_tester.get_objective_scope_visibility(node)
return scope_visibility_hint.scope_visibility
def get_explicity_of_scope_visibility(self, node):
scope_visibility_hint = self._ref_tester.get_objective_scope_visibility(node)
return scope_visibility_hint.explicity
def normalize_variable_name(self, node):
return _normalize_variable_name(node, self._ref_tester)
|
[
"vint.ast.plugin.scope_plugin.identifier_attribute.is_autoload_identifier",
"vint.ast.plugin.scope_plugin.identifier_attribute.is_function_identifier",
"vint.ast.plugin.scope_plugin.reference_reachability_tester.ReferenceReachabilityTester",
"vint.ast.plugin.scope_plugin.reference_reachability_tester.is_reachable_reference_identifier",
"vint.ast.plugin.scope_plugin.reference_reachability_tester.is_referenced_declarative_identifier",
"vint.ast.plugin.scope_plugin.reference_reachability_tester.is_declarative_identifier",
"vint.ast.plugin.scope_plugin.reference_reachability_tester.is_reference_identifier",
"vint.ast.plugin.scope_plugin.variable_name_normalizer.normalize_variable_name"
] |
[((1253, 1282), 'vint.ast.plugin.scope_plugin.reference_reachability_tester.ReferenceReachabilityTester', 'ReferenceReachabilityTester', ([], {}), '()\n', (1280, 1282), False, 'from vint.ast.plugin.scope_plugin.reference_reachability_tester import ReferenceReachabilityTester, is_reference_identifier as _is_reference_identifier, is_declarative_identifier as _is_declarative_identifier, is_reachable_reference_identifier as _is_reachable_reference_identifier, is_referenced_declarative_identifier as _is_referenced_declarative_identifier\n'), ((2048, 2077), 'vint.ast.plugin.scope_plugin.identifier_attribute.is_autoload_identifier', '_is_autoload_identifier', (['node'], {}), '(node)\n', (2071, 2077), True, 'from vint.ast.plugin.scope_plugin.identifier_attribute import is_autoload_identifier as _is_autoload_identifier, is_function_identifier as _is_function_identifier\n'), ((2139, 2168), 'vint.ast.plugin.scope_plugin.identifier_attribute.is_function_identifier', '_is_function_identifier', (['node'], {}), '(node)\n', (2162, 2168), True, 'from vint.ast.plugin.scope_plugin.identifier_attribute import is_autoload_identifier as _is_autoload_identifier, is_function_identifier as _is_function_identifier\n'), ((2615, 2663), 'vint.ast.plugin.scope_plugin.variable_name_normalizer.normalize_variable_name', '_normalize_variable_name', (['node', 'self._ref_tester'], {}), '(node, self._ref_tester)\n', (2639, 2663), True, 'from vint.ast.plugin.scope_plugin.variable_name_normalizer import normalize_variable_name as _normalize_variable_name\n'), ((1723, 1753), 'vint.ast.plugin.scope_plugin.reference_reachability_tester.is_reference_identifier', '_is_reference_identifier', (['node'], {}), '(node)\n', (1747, 1753), True, 'from vint.ast.plugin.scope_plugin.reference_reachability_tester import ReferenceReachabilityTester, is_reference_identifier as _is_reference_identifier, is_declarative_identifier as _is_declarative_identifier, is_reachable_reference_identifier as _is_reachable_reference_identifier, is_referenced_declarative_identifier as _is_referenced_declarative_identifier\n'), ((1888, 1920), 'vint.ast.plugin.scope_plugin.reference_reachability_tester.is_declarative_identifier', '_is_declarative_identifier', (['node'], {}), '(node)\n', (1914, 1920), True, 'from vint.ast.plugin.scope_plugin.reference_reachability_tester import ReferenceReachabilityTester, is_reference_identifier as _is_reference_identifier, is_declarative_identifier as _is_declarative_identifier, is_reachable_reference_identifier as _is_reachable_reference_identifier, is_referenced_declarative_identifier as _is_referenced_declarative_identifier\n'), ((1776, 1816), 'vint.ast.plugin.scope_plugin.reference_reachability_tester.is_reachable_reference_identifier', '_is_reachable_reference_identifier', (['node'], {}), '(node)\n', (1810, 1816), True, 'from vint.ast.plugin.scope_plugin.reference_reachability_tester import ReferenceReachabilityTester, is_reference_identifier as _is_reference_identifier, is_declarative_identifier as _is_declarative_identifier, is_reachable_reference_identifier as _is_reachable_reference_identifier, is_referenced_declarative_identifier as _is_referenced_declarative_identifier\n'), ((1943, 1986), 'vint.ast.plugin.scope_plugin.reference_reachability_tester.is_referenced_declarative_identifier', '_is_referenced_declarative_identifier', (['node'], {}), '(node)\n', (1980, 1986), True, 'from vint.ast.plugin.scope_plugin.reference_reachability_tester import ReferenceReachabilityTester, is_reference_identifier as _is_reference_identifier, is_declarative_identifier as _is_declarative_identifier, is_reachable_reference_identifier as _is_reachable_reference_identifier, is_referenced_declarative_identifier as _is_referenced_declarative_identifier\n')]
|
# gridftp.py
"""Module provides an interface to GridFTP command-line interface."""
from collections import namedtuple
from datetime import datetime
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
from typing import Any, List, Optional, Tuple, Union
File = namedtuple('File', ['directory', 'perms', 'subfiles', 'owner', 'group', 'size', 'date', 'name'])
logger = logging.getLogger('gridftp')
def _cmd(cmd: List[str], timeout: int = 1200) -> None:
completed_process = subprocess.run(cmd, timeout=timeout, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# if our command failed
if completed_process.returncode != 0:
logger.info(f"GridFTP._cmd Command failed: {completed_process.args}")
logger.info(f"returncode: {completed_process.returncode}")
logger.info(f"stdout: {str(completed_process.stdout)}")
logger.info(f"stderr: {str(completed_process.stderr)}")
def _cmd_output(cmd: List[str], timeout: int = 1200) -> Tuple[int, str]:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
output = p.communicate(timeout=timeout)[0].decode('utf-8')
return (p.returncode, output)
except subprocess.TimeoutExpired:
p.kill()
raise Exception('Request timed out')
def cksm(filename: str, type: str, buffersize: int = 16384, file: bool = True) -> Any:
"""Return checksum of file using algorithm specified."""
if type not in ('md5', 'sha1', 'sha256', 'sha512'):
raise Exception('cannot get checksum for type %r', type)
try:
digest = getattr(hashlib, type)()
except Exception:
raise Exception('cannot get checksum for type %r', type)
if file and os.path.exists(filename):
# checksum file contents
with open(filename, 'rb') as filed:
buffer = filed.read(buffersize)
while buffer:
digest.update(buffer)
buffer = filed.read(buffersize)
else:
# just checksum the contents of the first argument
digest.update(filename)
return digest.hexdigest()
def listify(lines: str, details: bool = False, dotfiles: bool = False) -> List[Union[File, str]]:
"""Turn ls output into a list of NamedTuples."""
out: List[Union[File, str]] = []
if details:
months = {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12}
for x in lines.split('\n'):
if not x.strip():
continue
pieces = x.split()
name = pieces[-1]
if name.startswith('.') and not dotfiles:
continue
d = x[0] == 'd'
perms = pieces[0][1:]
year = datetime.now().year
month = months[pieces[5].lower()]
day = int(pieces[6])
if ':' in pieces[7]:
hour, minute = pieces[7].split(':')
dt = datetime(year, month, day, int(hour), int(minute))
else:
year = int(pieces[7])
dt = datetime(year, month, day)
out.append(File(d, perms, int(pieces[1]), pieces[2], pieces[3],
int(pieces[4]), dt, name))
else:
for x in lines.split('\n'):
if not x.strip():
continue
f = x.split()[-1]
if not f.startswith('.') or dotfiles:
out.append(f)
return out
class GridFTP(object):
"""
GridFTP interface to command line client.
Example:
GridFTP.get('gsiftp://data.icecube.wisc.edu/file',
filename='/path/to/file')
"""
_timeout = 1200 # 20 min default timeout
@classmethod
def supported_address(cls, address: str) -> bool:
"""Return False for address types that are not supported."""
if '://' not in address:
return False
addr_type = address.split(':')[0]
if addr_type not in ('gsiftp', 'ftp'):
return False
return True
@classmethod
def address_split(cls, address: str) -> Tuple[str, str]:
"""Split an address into server/path parts."""
pieces = address.split('://', 1)
if '/' in pieces[1]:
pieces2 = pieces[1].split('/', 1)
return (pieces[0]+'://'+pieces2[0], '/'+pieces2[1])
else:
return (address, '/')
@classmethod
def get(cls, address: str, filename: Optional[str] = None, request_timeout: Optional[int] = None) -> Optional[str]:
"""
Do a GridFTP get request.
Either data is returned directly or filename must be defined.
Args:
address (str): url to get from
filename (str): filename to write data to
request_timeout (float): timeout in secodns
Returns:
str: data, if filename is not defined
Raises:
Exception for failure
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
tmpdir = None
if filename is None:
tmpdir = tempfile.mkdtemp(dir=os.getcwd())
dest = 'file:'+os.path.join(tmpdir, 'get_tmp_file')
else:
dest = 'file:'+filename
cmd = ['globus-url-copy', address, dest]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
try:
_cmd(cmd, timeout=timeout)
if filename is None:
with open(dest[5:]) as f:
return f.read()
finally:
if tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
return None
@classmethod
def put(cls, address: str, data: Optional[str] = None, filename: Optional[str] = None, request_timeout: Optional[int] = None) -> None:
"""
Do a GridFTP put request.
Either data or filename must be defined.
Args:
address (str): url to put to
data (str): the data to put
filename (str): filename for data to put
request_timeout (float): timeout in seconds
Raises:
Exception for failure
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
tmpdir = None
if data is not None:
tmpdir = tempfile.mkdtemp(dir=os.getcwd())
src = 'file:'+os.path.join(tmpdir, 'put_tmp_file')
with open(src[5:], 'w' if isinstance(data, str) else 'wb') as f:
f.write(data)
elif filename is not None:
src = 'file:'+filename
else:
raise Exception('Neither data or filename is defined')
cmd = ['globus-url-copy', '-cd', src, address]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
try:
_cmd(cmd, timeout=timeout)
finally:
if tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
@classmethod
def list(cls, address: str, request_timeout: Optional[int] = None, details: bool = False, dotfiles: bool = False) -> List[Union[File, str]]:
"""
Do a GridFTP list request.
Args:
address (str): url to list
request_timeout (float): timeout in seconds
details (bool): result is a list of NamedTuples
dotfiles (bool): result includes '.', '..', and other '.' files
Returns:
list: a list of files
Raises:
Exception on error
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
cmd = ['uberftp', '-retry', '5', '-ls', address]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
ret = _cmd_output(cmd, timeout=timeout)
if ret[0]:
raise Exception('Error getting listing')
return listify(ret[1], details=details, dotfiles=dotfiles)
@classmethod
def mkdir(cls, address: str, request_timeout: Optional[int] = None, parents: bool = False) -> None:
"""
Make a directory on the ftp server.
Args:
address (str): url to directory
request_timeout (float): timeout in seconds
parents (bool): make parent directories as needed
Raises:
Exception on error
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
if parents:
# recursively make directory
try:
cls.mkdir(os.path.basename(address),
request_timeout=request_timeout, parents=True)
except Exception:
pass
cmd = ['uberftp', '-retry', '5', '-mkdir', address]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
_cmd(cmd, timeout=timeout)
@classmethod
def rmdir(cls, address: str, request_timeout: Optional[int] = None) -> None:
"""
Remove a directory on the ftp server.
This fails if the directory is not empty. Use :py:func:`rmtree` for
recursive removal.
Args:
address (str): url to directory
request_timeout (float): timeout in seconds
Raises:
Exception on error
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
cmd = ['uberftp', '-retry', '5', '-rmdir', address]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
ret = _cmd_output(cmd, timeout=timeout)
if ret[0] and 'No match for' not in ret[1]:
raise Exception('Error removing dir')
@classmethod
def delete(cls, address: str, request_timeout: Optional[int] = None) -> None:
"""
Delete a file on the ftp server.
Args:
address (str): url to file
request_timeout (float): timeout in seconds
Raises:
Exception on error
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
cmd = ['uberftp', '-retry', '5', '-rm', address]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
ret = _cmd_output(cmd, timeout=timeout)
if ret[0] and 'No match for' not in ret[1]:
raise Exception('Error removing dir')
@classmethod
def rmtree(cls, address: str, request_timeout: Optional[int] = None) -> None:
"""
Delete a file or directory on the ftp server.
This is recursive, like `rm -rf`.
Args:
address (str): url to file or directory
request_timeout (float): timeout in seconds
Raises:
Exception on error
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
cmd = ['uberftp', '-retry', '5', '-rm', '-r', address]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
ret = _cmd_output(cmd, timeout=timeout)
if ret[0] and 'No match for' not in ret[1]:
raise Exception('Error removing dir')
@classmethod
def move(cls, src: str, dest: str, request_timeout: Optional[int] = None) -> None:
"""
Move a file on the ftp server.
Args:
src (str): url to source file
dest (str): url to destination file
request_timeout (float): timeout in seconds
Raises:
Exception on error
"""
if not cls.supported_address(src):
raise Exception('address type not supported for src %s' % str(src))
if not cls.supported_address(dest):
raise Exception('address type not supported for dest %s' % str(dest))
cmd = ['uberftp', '-retry', '5', '-rename', src, cls.address_split(dest)[-1]]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
_cmd(cmd, timeout=timeout)
@classmethod
def exists(cls, address: str, request_timeout: Optional[int] = None) -> bool:
"""
Check if a file exists on the ftp server.
Args:
address (str): url to file
request_timeout (float): timeout in seconds
Returns:
bool: True, if the file exists on the ftp server, otherwise False
Raises:
Exception on error
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
cmd = ['uberftp', '-retry', '5', '-size', address]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
ret = _cmd_output(cmd, timeout=timeout)
return (not ret[0])
@classmethod
def chmod(cls, address: str, mode: str, request_timeout: Optional[int] = None) -> None:
"""
Chmod a file on the ftp server.
Args:
address (str): url to file
mode (str): mode of file
request_timeout (float): timeout in seconds
Raises:
Exception on error
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
cmd = ['uberftp', '-retry', '5', '-chmod', mode, address]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
_cmd(cmd, timeout=timeout)
@classmethod
def size(cls, address: str, request_timeout: Optional[int] = None) -> int:
"""
Get the size of a file on the ftp server.
Args:
address (str): url to file
request_timeout (float): timeout in seconds
Returns:
int: size of file in bytes
Raises:
Exception on error
"""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
cmd = ['uberftp', '-retry', '5', '-size', address]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
ret = _cmd_output(cmd, timeout=timeout)
if ret[0]:
raise Exception('failed to get size')
return int(ret[1])
@classmethod
def _chksum(cls, type: str, address: str, request_timeout: Optional[int] = None) -> Any:
"""Chksum is faked by redownloading the file and checksumming that."""
if not cls.supported_address(address):
raise Exception('address type not supported for address %s' % str(address))
if type.endswith('sum'):
type = type[:-3]
tmpdir = tempfile.mkdtemp(dir=os.getcwd())
dest = 'file:'+os.path.join(tmpdir, 'dest')
cmd = ['globus-url-copy', address, dest]
if request_timeout is None:
timeout = cls._timeout
else:
timeout = request_timeout
try:
_cmd(cmd, timeout=timeout)
if not os.path.exists(dest[5:]):
raise Exception('failed to redownload')
return cksm(dest[5:], type)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
@classmethod
def md5sum(cls, address: str, request_timeout: Optional[int] = None) -> Any:
"""
Get the md5sum of a file on an ftp server.
Args:
address (str): url to file
request_timeout (float): timeout in seconds
Returns:
str: the md5sum
Raises:
Exception on error
"""
return cls._chksum('md5sum', address, request_timeout=request_timeout)
@classmethod
def sha1sum(cls, address: str, request_timeout: Optional[int] = None) -> Any:
"""
Get the sha1sum of a file on an ftp server.
Args:
address (str): url to file
request_timeout (float): timeout in seconds
Returns:
str: the sha1sum
Raises:
Exception on error
"""
return cls._chksum('sha1sum', address, request_timeout=request_timeout)
@classmethod
def sha256sum(cls, address: str, request_timeout: Optional[int] = None) -> Any:
"""
Get the sha256sum of a file on an ftp server.
Args:
address (str): url to file
request_timeout (float): timeout in seconds
Returns:
str: the sha256sum
Raises:
Exception on error
"""
return cls._chksum('sha256sum', address, request_timeout=request_timeout)
@classmethod
def sha512sum(cls, address: str, request_timeout: Optional[int] = None) -> Any:
"""
Get the sha512sum of a file on an ftp server.
Args:
address (str): url to file
request_timeout (float): timeout in seconds
Returns:
str: the sha512sum
Raises:
Exception on error
"""
return cls._chksum('sha512sum', address, request_timeout=request_timeout)
|
[
"subprocess.run",
"subprocess.Popen",
"os.path.basename",
"os.getcwd",
"os.path.exists",
"datetime.datetime.now",
"datetime.datetime",
"collections.namedtuple",
"shutil.rmtree",
"os.path.join",
"logging.getLogger"
] |
[((298, 398), 'collections.namedtuple', 'namedtuple', (['"""File"""', "['directory', 'perms', 'subfiles', 'owner', 'group', 'size', 'date', 'name']"], {}), "('File', ['directory', 'perms', 'subfiles', 'owner', 'group',\n 'size', 'date', 'name'])\n", (308, 398), False, 'from collections import namedtuple\n'), ((404, 432), 'logging.getLogger', 'logging.getLogger', (['"""gridftp"""'], {}), "('gridftp')\n", (421, 432), False, 'import logging\n'), ((513, 614), 'subprocess.run', 'subprocess.run', (['cmd'], {'timeout': 'timeout', 'check': '(False)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, timeout=timeout, check=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n', (527, 614), False, 'import subprocess\n'), ((1036, 1107), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (1052, 1107), False, 'import subprocess\n'), ((1748, 1772), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1762, 1772), False, 'import os\n'), ((15662, 15690), 'os.path.join', 'os.path.join', (['tmpdir', '"""dest"""'], {}), "(tmpdir, 'dest')\n", (15674, 15690), False, 'import os\n'), ((16088, 16129), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {'ignore_errors': '(True)'}), '(tmpdir, ignore_errors=True)\n', (16101, 16129), False, 'import shutil\n'), ((2814, 2828), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2826, 2828), False, 'from datetime import datetime\n'), ((3147, 3173), 'datetime.datetime', 'datetime', (['year', 'month', 'day'], {}), '(year, month, day)\n', (3155, 3173), False, 'from datetime import datetime\n'), ((5301, 5337), 'os.path.join', 'os.path.join', (['tmpdir', '"""get_tmp_file"""'], {}), "(tmpdir, 'get_tmp_file')\n", (5313, 5337), False, 'import os\n'), ((5782, 5823), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {'ignore_errors': '(True)'}), '(tmpdir, ignore_errors=True)\n', (5795, 5823), False, 'import shutil\n'), ((6633, 6669), 'os.path.join', 'os.path.join', (['tmpdir', '"""put_tmp_file"""'], {}), "(tmpdir, 'put_tmp_file')\n", (6645, 6669), False, 'import os\n'), ((7217, 7258), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {'ignore_errors': '(True)'}), '(tmpdir, ignore_errors=True)\n', (7230, 7258), False, 'import shutil\n'), ((15626, 15637), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15635, 15637), False, 'import os\n'), ((15937, 15961), 'os.path.exists', 'os.path.exists', (['dest[5:]'], {}), '(dest[5:])\n', (15951, 15961), False, 'import os\n'), ((5261, 5272), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5270, 5272), False, 'import os\n'), ((6594, 6605), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6603, 6605), False, 'import os\n'), ((8986, 9011), 'os.path.basename', 'os.path.basename', (['address'], {}), '(address)\n', (9002, 9011), False, 'import os\n')]
|
import collections
import inspect
import typing
import numpy as np
import pandas as pd
import torch
from river import base
__all__ = ["PyTorch2RiverBase", "PyTorch2RiverRegressor", "PyTorch2RiverClassifier"]
class PyTorch2RiverBase(base.Estimator):
"""An estimator that integrates neural Networks from PyTorch."""
def __init__(
self,
build_fn,
loss_fn: typing.Type[torch.nn.modules.loss._Loss],
optimizer_fn: typing.Type[torch.optim.Optimizer] = torch.optim.Adam,
learning_rate=1e-3,
seed=42,
**net_params,
):
self.build_fn = build_fn
self.loss_fn = loss_fn
self.loss = loss_fn()
self.optimizer_fn = optimizer_fn
self.learning_rate = learning_rate
self.net_params = net_params
self.seed = seed
torch.manual_seed(seed)
np.random.seed(seed)
self.net = None
@classmethod
def _unit_test_params(cls):
def build_torch_linear_regressor(n_features):
net = torch.nn.Sequential(
torch.nn.Linear(n_features, 1), torch.nn.Sigmoid()
)
return net
return {
"build_fn": build_torch_linear_regressor,
"loss_fn": torch.nn.MSELoss,
"optimizer_fn": torch.optim.SGD,
}
@classmethod
def _unit_test_skips(self):
"""Indicates which checks to skip during unit testing.
Most estimators pass the full test suite. However, in some cases, some estimators might not
be able to pass certain checks.
"""
return {
"check_pickling",
"check_shuffle_features_no_impact",
"check_emerging_features",
"check_disappearing_features",
"check_predict_proba_one",
"check_predict_proba_one_binary",
}
def _learn_one(self, x: torch.Tensor, y: torch.Tensor):
self.net.zero_grad()
y_pred = self.net(x)
loss = self.loss(y_pred, y)
loss.backward()
self.optimizer.step()
def learn_one(self, x: dict, y: base.typing.ClfTarget):
"""Update the model with a set of features `x` and a label `y`.
Parameters
----------
x
A dictionary of features.
y
A label.
Returns
-------
self
"""
if self.net is None:
self._init_net(n_features=len(list(x.values())))
x = torch.Tensor([list(x.values())])
y = torch.Tensor([[y]])
self._learn_one(x=x, y=y)
return self
def _filter_torch_params(self, fn, override=None):
"""Filters `torch_params` and returns those in `fn`'s arguments.
Parameters
----------
fn
arbitrary function
override
dictionary, values to override `torch_params`
Returns
-------
res
dictionary containing variables in both and fn's arguments
"""
override = override or {}
res = {}
for name, value in self.net_params.items():
args = list(inspect.signature(fn).parameters)
if name in args:
res.update({name: value})
res.update(override)
return res
def _init_net(self, n_features):
self.net = self.build_fn(
n_features=n_features, **self._filter_torch_params(self.build_fn)
)
# Only optimizers with learning rate as parameter are supported, needs to be fixed
self.optimizer = self.optimizer_fn(self.net.parameters(), self.learning_rate)
class PyTorch2RiverClassifier(PyTorch2RiverBase, base.Classifier):
"""A river classifier that integrates neural Networks from PyTorch.
Parameters
----------
build_fn
loss_fn
optimizer_fn
learning_rate
net_params
Examples
--------
>>> from river import compat
>>> from river import datasets
>>> from river import evaluate
>>> from river import metrics
>>> from river import preprocessing
>>> from torch import nn
>>> from torch import optim
>>> from torch import manual_seed
>>> _ = manual_seed(0)
>>> def build_torch_mlp_classifier(n_features):
... net = nn.Sequential(
... nn.Linear(n_features, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 1),
... nn.Sigmoid()
... )
... return net
...
>>> model = compat.PyTorch2RiverClassifier(
... build_fn= build_torch_mlp_classifier,
... loss_fn=nn.BCELoss,
... optimizer_fn=optim.Adam,
... learning_rate=1e-3
... )
>>> dataset = datasets.Phishing()
>>> metric = metrics.Accuracy()
>>> evaluate.progressive_val_score(dataset=dataset, model=model, metric=metric)
Accuracy: 74.38%
"""
def __init__(
self,
build_fn,
loss_fn: typing.Type[torch.nn.modules.loss._Loss],
optimizer_fn: typing.Type[torch.optim.Optimizer] = torch.optim.Adam,
learning_rate=1e-3,
**net_params,
):
self.classes = collections.Counter()
self.n_classes = 1
super().__init__(
build_fn=build_fn,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
learning_rate=learning_rate,
**net_params,
)
def _update_classes(self):
self.n_classes = len(self.classes)
layers = list(self.net.children())
i = -1
layer_to_convert = layers[i]
while not hasattr(layer_to_convert, "weight"):
layer_to_convert = layers[i]
i -= 1
removed = list(self.net.children())[: i + 1]
new_net = removed
new_layer = torch.nn.Linear(
in_features=layer_to_convert.in_features, out_features=self.n_classes
)
# copy the original weights back
with torch.no_grad():
new_layer.weight[:-1, :] = layer_to_convert.weight
new_layer.weight[-1:, :] = torch.mean(layer_to_convert.weight, 0)
new_net.append(new_layer)
if i + 1 < -1:
for layer in layers[i + 2 :]:
new_net.append(layer)
self.net = torch.nn.Sequential(*new_net)
self.optimizer = self.optimizer_fn(self.net.parameters(), self.learning_rate)
def learn_one(self, x: dict, y: base.typing.ClfTarget, **kwargs) -> base.Classifier:
self.classes.update([y])
# check if model is initialized
if self.net is None:
self._init_net(len(list(x.values())))
# check last layer and update if needed
if len(self.classes) != self.n_classes:
self._update_classes()
# training process
proba = {c: 0.0 for c in self.classes}
proba[y] = 1.0
x = list(x.values())
y = list(proba.values())
x = torch.Tensor([x])
y = torch.Tensor([y])
self._learn_one(x=x, y=y)
return self
def predict_proba_one(self, x: dict) -> typing.Dict[base.typing.ClfTarget, float]:
if self.net is None:
self._init_net(len(list(x.values())))
x = torch.Tensor(list(x.values()))
yp = self.net(x).detach().numpy()
proba = {c: 0.0 for c in self.classes}
for idx, val in enumerate(self.classes):
proba[val] = yp[idx]
return proba
def predict_proba_many(self, X: pd.DataFrame) -> pd.DataFrame:
if self.net is None:
self._init_net(len(X.columns))
x = torch.Tensor(list(X.to_numpy()))
yp = self.net(x).detach().numpy()
proba = {c: [0.0] * len(X) for c in self.classes}
for idx, val in enumerate(self.classes):
proba[val] = yp[idx]
return pd.DataFrame(proba)
class PyTorch2RiverRegressor(PyTorch2RiverBase, base.MiniBatchRegressor):
"""Compatibility layer from PyTorch to River for regression.
Parameters
----------
build_fn
loss_fn
optimizer_fn
learning_rate
net_params
Examples
--------
>>> from river import compat
>>> from river import datasets
>>> from river import evaluate
>>> from river import metrics
>>> from river import preprocessing
>>> from torch import nn
>>> from torch import optim
>>> _ = torch.manual_seed(0)
>>> dataset = datasets.TrumpApproval()
>>> def build_torch_mlp_regressor(n_features):
... net = nn.Sequential(
... nn.Linear(n_features, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 5),
... nn.Linear(5, 1)
... )
... return net
...
>>> model = compat.PyTorch2RiverRegressor(
... build_fn= build_torch_mlp_regressor,
... loss_fn=nn.MSELoss,
... optimizer_fn=optim.Adam,
... )
>>> metric = metrics.MAE()
>>> metric = evaluate.progressive_val_score(dataset=dataset, model=model, metric=metric)
>>> round(metric.get(), 2)
78.98
"""
def __init__(
self,
build_fn,
loss_fn: typing.Type[torch.nn.modules.loss._Loss],
optimizer_fn: typing.Type[torch.optim.Optimizer],
learning_rate=1e-3,
**net_params,
):
super().__init__(
build_fn=build_fn,
loss_fn=loss_fn,
optimizer_fn=optimizer_fn,
learning_rate=learning_rate,
**net_params,
)
def learn_many(self, X: pd.DataFrame, y: pd.Series, **kwargs):
if self.net is None:
self._init_net(n_features=len(X.columns))
x = torch.Tensor(X.to_numpy())
y = torch.Tensor([y])
self._learn_one(x=x, y=y)
return self
def predict_one(self, x):
if self.net is None:
self._init_net(len(x))
x = torch.Tensor(list(x.values()))
return self.net(x).item()
def predict_many(self, X: pd.DataFrame) -> pd.Series:
if self.net is None:
self._init_net(len(X.columns))
x = torch.Tensor(X.to_numpy())
return pd.Series(self.net(x).item())
|
[
"pandas.DataFrame",
"torch.mean",
"numpy.random.seed",
"torch.nn.Sequential",
"torch.manual_seed",
"torch.Tensor",
"inspect.signature",
"torch.nn.Linear",
"collections.Counter",
"torch.no_grad",
"torch.nn.Sigmoid"
] |
[((832, 855), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (849, 855), False, 'import torch\n'), ((864, 884), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (878, 884), True, 'import numpy as np\n'), ((2531, 2550), 'torch.Tensor', 'torch.Tensor', (['[[y]]'], {}), '([[y]])\n', (2543, 2550), False, 'import torch\n'), ((5213, 5234), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (5232, 5234), False, 'import collections\n'), ((5849, 5940), 'torch.nn.Linear', 'torch.nn.Linear', ([], {'in_features': 'layer_to_convert.in_features', 'out_features': 'self.n_classes'}), '(in_features=layer_to_convert.in_features, out_features=self\n .n_classes)\n', (5864, 5940), False, 'import torch\n'), ((6326, 6355), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*new_net'], {}), '(*new_net)\n', (6345, 6355), False, 'import torch\n'), ((6990, 7007), 'torch.Tensor', 'torch.Tensor', (['[x]'], {}), '([x])\n', (7002, 7007), False, 'import torch\n'), ((7020, 7037), 'torch.Tensor', 'torch.Tensor', (['[y]'], {}), '([y])\n', (7032, 7037), False, 'import torch\n'), ((7876, 7895), 'pandas.DataFrame', 'pd.DataFrame', (['proba'], {}), '(proba)\n', (7888, 7895), True, 'import pandas as pd\n'), ((9771, 9788), 'torch.Tensor', 'torch.Tensor', (['[y]'], {}), '([y])\n', (9783, 9788), False, 'import torch\n'), ((6012, 6027), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6025, 6027), False, 'import torch\n'), ((6131, 6169), 'torch.mean', 'torch.mean', (['layer_to_convert.weight', '(0)'], {}), '(layer_to_convert.weight, 0)\n', (6141, 6169), False, 'import torch\n'), ((1069, 1099), 'torch.nn.Linear', 'torch.nn.Linear', (['n_features', '(1)'], {}), '(n_features, 1)\n', (1084, 1099), False, 'import torch\n'), ((1101, 1119), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (1117, 1119), False, 'import torch\n'), ((3146, 3167), 'inspect.signature', 'inspect.signature', (['fn'], {}), '(fn)\n', (3163, 3167), False, 'import inspect\n')]
|
import ipaddress
import os
import shutil
from natlas import logging
utillogger = logging.get_logger("Utilities")
def validate_target(target, config):
try:
iptarget = ipaddress.ip_address(target)
if iptarget.is_private and not config.scan_local:
utillogger.error("We're not configured to scan local addresses!")
return False
except ipaddress.AddressValueError:
utillogger.error("%s is not a valid IP Address" % target)
return False
return True
def create_data_dir(scan_id):
data_folder = f"data/natlas.{scan_id}"
os.makedirs(data_folder, exist_ok=True)
def get_data_dir(scan_id):
data_folder = f"data/natlas.{scan_id}"
return data_folder
def delete_files(scan_id):
data_folder = f"data/natlas.{scan_id}"
if os.path.isdir(data_folder):
shutil.rmtree(data_folder)
def save_files(scan_id):
failroot = "data/failures"
if not os.path.isdir(failroot):
os.mkdir(failroot)
if os.path.isdir(f"data/natlas.{scan_id}"):
src = f"data/natlas.{scan_id}"
dst = f"data/failures/"
shutil.move(src, dst)
def cleanup_files(scan_id, failed=False, saveFails=False):
utillogger.info("Cleaning up files for %s" % scan_id)
if saveFails and failed:
save_files(scan_id)
else:
delete_files(scan_id)
|
[
"os.mkdir",
"os.makedirs",
"os.path.isdir",
"ipaddress.ip_address",
"natlas.logging.get_logger",
"shutil.move",
"shutil.rmtree"
] |
[((84, 115), 'natlas.logging.get_logger', 'logging.get_logger', (['"""Utilities"""'], {}), "('Utilities')\n", (102, 115), False, 'from natlas import logging\n'), ((538, 577), 'os.makedirs', 'os.makedirs', (['data_folder'], {'exist_ok': '(True)'}), '(data_folder, exist_ok=True)\n', (549, 577), False, 'import os\n'), ((740, 766), 'os.path.isdir', 'os.path.isdir', (['data_folder'], {}), '(data_folder)\n', (753, 766), False, 'import os\n'), ((910, 949), 'os.path.isdir', 'os.path.isdir', (['f"""data/natlas.{scan_id}"""'], {}), "(f'data/natlas.{scan_id}')\n", (923, 949), False, 'import os\n'), ((174, 202), 'ipaddress.ip_address', 'ipaddress.ip_address', (['target'], {}), '(target)\n', (194, 202), False, 'import ipaddress\n'), ((770, 796), 'shutil.rmtree', 'shutil.rmtree', (['data_folder'], {}), '(data_folder)\n', (783, 796), False, 'import shutil\n'), ((860, 883), 'os.path.isdir', 'os.path.isdir', (['failroot'], {}), '(failroot)\n', (873, 883), False, 'import os\n'), ((887, 905), 'os.mkdir', 'os.mkdir', (['failroot'], {}), '(failroot)\n', (895, 905), False, 'import os\n'), ((1012, 1033), 'shutil.move', 'shutil.move', (['src', 'dst'], {}), '(src, dst)\n', (1023, 1033), False, 'import shutil\n')]
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='gmreader',
version='0.1.6',
description='Let python read your google emails to you. Listen to your gmails instead of reading them',
author='<NAME>',
license='MIT',
keywords = "email gmail google mail read text to speech",
author_email='<EMAIL>',
url='http://github.com/jawerty/gmreader',
scripts=['gmreader.py'],
install_requires=['BeautifulSoup'],
entry_points = {
'console_scripts': [
'gmreader = gmreader:main'
],
}
)
|
[
"setuptools.setup"
] |
[((53, 503), 'setuptools.setup', 'setup', ([], {'name': '"""gmreader"""', 'version': '"""0.1.6"""', 'description': '"""Let python read your google emails to you. Listen to your gmails instead of reading them"""', 'author': '"""<NAME>"""', 'license': '"""MIT"""', 'keywords': '"""email gmail google mail read text to speech"""', 'author_email': '"""<EMAIL>"""', 'url': '"""http://github.com/jawerty/gmreader"""', 'scripts': "['gmreader.py']", 'install_requires': "['BeautifulSoup']", 'entry_points': "{'console_scripts': ['gmreader = gmreader:main']}"}), "(name='gmreader', version='0.1.6', description=\n 'Let python read your google emails to you. Listen to your gmails instead of reading them'\n , author='<NAME>', license='MIT', keywords=\n 'email gmail google mail read text to speech', author_email='<EMAIL>',\n url='http://github.com/jawerty/gmreader', scripts=['gmreader.py'],\n install_requires=['BeautifulSoup'], entry_points={'console_scripts': [\n 'gmreader = gmreader:main']})\n", (58, 503), False, 'from setuptools import setup\n')]
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""services enable command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.services import services_util
from googlecloudsdk.api_lib.services import serviceusage
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.services import common_flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
_OP_BASE_CMD = 'gcloud beta services operations '
_OP_WAIT_CMD = _OP_BASE_CMD + 'wait {0}'
_DETAILED_HELP = {
'DESCRIPTION':
"""\
This command enables a service for consumption for a project.
To see a list of available services for a project, run:
$ {parent_command} list --available
More information on listing services can be found at:
https://cloud.google.com/service-usage/docs/list-services and on
disabling a service at:
https://cloud.google.com/service-usage/docs/enable-disable
""",
'EXAMPLES':
"""\
To enable a service called `my-consumed-service` on the current
project, run:
$ {command} my-consumed-service
To run the same command asynchronously (non-blocking), run:
$ {command} my-consumed-service --async
To enable services called `service1`, `service2`, and `service3` on the
current project, run:
$ {command} service1 service2 service3
""",
}
class Enable(base.SilentCommand):
"""Enables a service for consumption for a project."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
common_flags.available_service_flag(suffix='to enable').AddToParser(parser)
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
"""Run 'services enable'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
Nothing.
"""
project = properties.VALUES.core.project.Get(required=True)
if len(args.service) == 1:
op = serviceusage.EnableApiCall(project, args.service[0])
else:
op = serviceusage.BatchEnableApiCall(project, args.service)
if op.done:
return
if args.async_:
cmd = _OP_WAIT_CMD.format(op.name)
log.status.Print('Asynchronous operation is in progress... '
'Use the following command to wait for its '
'completion:\n {0}'.format(cmd))
return
op = services_util.WaitOperation(op.name, serviceusage.GetOperation)
services_util.PrintOperation(op)
Enable.detailed_help = _DETAILED_HELP
|
[
"googlecloudsdk.core.properties.VALUES.core.project.Get",
"googlecloudsdk.calliope.base.ASYNC_FLAG.AddToParser",
"googlecloudsdk.api_lib.services.serviceusage.BatchEnableApiCall",
"googlecloudsdk.command_lib.services.common_flags.available_service_flag",
"googlecloudsdk.api_lib.services.services_util.PrintOperation",
"googlecloudsdk.api_lib.services.services_util.WaitOperation",
"googlecloudsdk.api_lib.services.serviceusage.EnableApiCall"
] |
[((2560, 2595), 'googlecloudsdk.calliope.base.ASYNC_FLAG.AddToParser', 'base.ASYNC_FLAG.AddToParser', (['parser'], {}), '(parser)\n', (2587, 2595), False, 'from googlecloudsdk.calliope import base\n'), ((2804, 2853), 'googlecloudsdk.core.properties.VALUES.core.project.Get', 'properties.VALUES.core.project.Get', ([], {'required': '(True)'}), '(required=True)\n', (2838, 2853), False, 'from googlecloudsdk.core import properties\n'), ((3328, 3391), 'googlecloudsdk.api_lib.services.services_util.WaitOperation', 'services_util.WaitOperation', (['op.name', 'serviceusage.GetOperation'], {}), '(op.name, serviceusage.GetOperation)\n', (3355, 3391), False, 'from googlecloudsdk.api_lib.services import services_util\n'), ((3396, 3428), 'googlecloudsdk.api_lib.services.services_util.PrintOperation', 'services_util.PrintOperation', (['op'], {}), '(op)\n', (3424, 3428), False, 'from googlecloudsdk.api_lib.services import services_util\n'), ((2896, 2948), 'googlecloudsdk.api_lib.services.serviceusage.EnableApiCall', 'serviceusage.EnableApiCall', (['project', 'args.service[0]'], {}), '(project, args.service[0])\n', (2922, 2948), False, 'from googlecloudsdk.api_lib.services import serviceusage\n'), ((2970, 3024), 'googlecloudsdk.api_lib.services.serviceusage.BatchEnableApiCall', 'serviceusage.BatchEnableApiCall', (['project', 'args.service'], {}), '(project, args.service)\n', (3001, 3024), False, 'from googlecloudsdk.api_lib.services import serviceusage\n'), ((2480, 2535), 'googlecloudsdk.command_lib.services.common_flags.available_service_flag', 'common_flags.available_service_flag', ([], {'suffix': '"""to enable"""'}), "(suffix='to enable')\n", (2515, 2535), False, 'from googlecloudsdk.command_lib.services import common_flags\n')]
|
import numpy as np
import math
import matplotlib.pyplot as plt
import pickle
from time import time
from numpy.linalg import matrix_rank
from numpy.linalg import pinv,inv
from numpy.linalg import eig as eig
from numpy.linalg import eigh,lstsq
from numpy.linalg import matrix_power
from scipy.linalg import expm,pinvh,solve
from tqdm.notebook import tqdm,trange
from support.omniglot_loaders import OmniglotNShot
from support.tools import *
from sklearn import decomposition
from scipy.spatial.distance import pdist,squareform
from sklearn.neighbors import NearestCentroid
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestNeighbors,KNeighborsClassifier
from meta_cntk import MetaCNTK
import argparse
from types import SimpleNamespace
from sklearn.decomposition import PCA,IncrementalPCA,KernelPCA
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from collections import deque
import argparse
from time import time
import typing
import pandas as pd
import matplotlib as mpl
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import higher
import pickle
from tqdm.notebook import tqdm,trange
from support.omniglot_loaders import OmniglotNShot
def load_dataset(n_task,random=True,seed=0,load_embeddings=False):
# Load preprocesses Ominiglot dataset for 20-way 1-shot classification
# path = f'saved_models/tasks-{n_task}.p'
# if os.path.exists(path):
# print(f"Dataset exists at {path}")
# tasks = pickle.load(open(path,'rb'))
# else:
tasks = pickle.load(open(f'saved_models/tasks-200.p', 'rb'))
# Get the subset of the size we need
n_all_tasks = len(tasks['X_qry'])
assert n_all_tasks == 200
assert n_task <= n_all_tasks
if random:
np.random.seed(seed)
idxes = np.random.choice(n_all_tasks, size=n_task, replace=False)
else:
idxes = np.arange(n_task)
tasks['X_qry'] = tasks['X_qry'][idxes]
tasks['X_spt'] = tasks['X_spt'][idxes]
tasks['Y_qry'] = tasks['Y_qry'][idxes]
tasks['Y_spt'] = tasks['Y_spt'][idxes]
tasks['idx_Xs']= tasks['idx_Xs'][idxes]
tasks['idx_Xs_'] = tasks['idx_Xs_'][idxes]
tasks['n_task'] = n_task
# if load_embeddings:
# embeddings = load_label_embeddings(200,random_cnn=random_cnn_embedding)
# tasks['Y_qry_emb'] = embeddings['Y_qry_emb'][idxes]
# tasks['Y_spt_emb'] = embeddings['Y_spt_emb'][idxes]
# tasks['test_Y_qry_emb'] = embeddings['test_Y_qry_emb']
# tasks['test_Y_spt_emb'] = embeddings['test_Y_spt_emb']
tasks['load_embeddings'] = load_embeddings
return SimpleNamespace(**tasks)
def load_precomputed_base_kernels(dataset,kernel='CNTK'):
# path = f'saved_models/CNTK-{n_task}.npy'
# if os.path.exists(path):
# print(f"Precomputed CNTK exists at {path}")
# CNTK = np.load(path)
# else:
CNTK_all = np.load(f'saved_models/CNTK-200.npy')
all_idxes = np.concatenate([dataset.idx_Xs.flatten(),
dataset.idx_Xs_.flatten(),
dataset.idx_test_Xs.flatten(),
dataset.idx_test_Xs_.flatten()])
dataset.all_idxes = all_idxes
dataset.CNTK = CNTK_all[all_idxes][:, all_idxes]
def load_label_embeddings(n_task,random_cnn=False):
# postfix = f'-{emb_method}' if emb_method != '' else ''
if not random_cnn:
path = f'saved_models/tasks-{n_task}-embeddings.p'
else:
path = f'saved_models/tasks-{n_task}-embeddings-random_cnn.p'
embedding_dict = pickle.load(open(path, 'rb'))
return embedding_dict
def get_embeddings_from_PCA(dataset, n_components=784, PCA_method='regular'):
# 784 = 28*28, which is the number of pixels in each original image.
# It is also the maximum n_componetns for PCA that we can choose
X_qry = dataset.X_qry if not dataset.load_embeddings else dataset.Y_qry_emb
X_spt = dataset.X_spt if not dataset.load_embeddings else dataset.Y_spt_emb
test_X_qry = dataset.test_X_qry if not dataset.load_embeddings else dataset.test_Y_qry_emb
test_X_spt = dataset.test_X_spt if not dataset.load_embeddings else dataset.test_Y_spt_emb
if PCA_method == 'regular':
pca = PCA(n_components=n_components, svd_solver='randomized')
else:
assert PCA_method in ['linear', 'poly', 'rbf', 'sigmoid', 'cosine']
pca = KernelPCA(n_components=n_components, kernel=PCA_method, fit_inverse_transform=True)
if not dataset.load_embeddings:
# Reshape images from vectors to their original size: 32*32
new_shape = (-1, 32, 32)
X_train = np.concatenate([dataset.X_qry.reshape(new_shape), dataset.X_spt.reshape(new_shape)], axis=0)
X_train = X_train[:, 2:-2, 2:-2] # remove paddings
pca.fit(X_train.reshape(X_train.shape[0], -1))
else:
emb_dim = X_qry.shape[-1]
X_train = np.concatenate([X_qry.reshape(-1,emb_dim),
X_spt.reshape(-1,emb_dim)], axis=0)
pca.fit(X_train)
# print('X_train',X_train.shape)
Xs = [X_qry, X_spt, test_X_qry, test_X_spt]
Y_qry_emb, Y_spt_emb, test_Y_qry_emb, test_Y_spt_emb = [], [], [], []
Ys = [Y_qry_emb, Y_spt_emb, test_Y_qry_emb, test_Y_spt_emb]
for x, y in zip(Xs, Ys):
# The following 3 lines are to remove the padding in original images (28*28 pixels),
# since we pad the original images to 32*32 for convenience of CNTK computing via CUDA
if not dataset.load_embeddings:
x = x.reshape(-1, 32, 32)
x = x[:, 2:-2, 2:-2]
x = x.reshape(x.shape[0], -1)
else:
x = x.reshape(-1,emb_dim)
result = pca.transform(x)
y.append(result)
dataset.Y_qry_emb, dataset.Y_spt_emb, dataset.test_Y_qry_emb, dataset.test_Y_spt_emb = Y_qry_emb[0], Y_spt_emb[0], test_Y_qry_emb[0], \
test_Y_spt_emb[0]
# return SimpleNamespace(Y_qry_emb=Y_qry_emb, Y_spt_emb=Y_spt_emb,
# test_Y_qry_emb=test_Y_qry_emb, test_Y_spt_emb=test_Y_spt_emb)
def preprocess_label_embeddings(dataset,pred_test_Y_qry=None,test_all = False):
# Find the center of embeddings in each class, then use this center as the label for this class
n_components = dataset.Y_qry_emb.shape[-1]
Y_qry_emb = dataset.Y_qry_emb.reshape(*dataset.Y_qry.shape, n_components)
Y_spt_emb = dataset.Y_spt_emb.reshape(*dataset.Y_spt.shape, n_components)
test_Y_qry_emb = dataset.test_Y_qry_emb.reshape(*dataset.test_Y_qry.shape, n_components)
test_Y_spt_emb = dataset.test_Y_spt_emb.reshape(*dataset.test_Y_spt.shape, n_components)
# Y_qry_emb,Y_spt_emb
clf = NearestCentroid()
Y_train = np.concatenate([dataset.Y_qry, dataset.Y_spt], axis=1)
Y_train_emb = np.concatenate([Y_qry_emb, Y_spt_emb], axis=1)
N_train = len(Y_train)
n_class = len(np.unique(Y_train[0]))
Y_centroids = []
for i in range(N_train):
clf.fit(Y_train_emb[i], Y_train[i])
for j in range(n_class):
Y_train_emb[i][Y_train[i] == j] = clf.centroids_[j]
centroids = clf.centroids_
Y_centroids.append(centroids)
Y_qry_emb = Y_train_emb[:, :dataset.Y_qry.shape[1], :]
Y_spt_emb = Y_train_emb[:, dataset.Y_qry.shape[1]:, :]
Y_centroids = np.array(Y_centroids)
# Y_qry_emb,Y_spt_emb
clf = NearestCentroid()
Y_test = np.concatenate([dataset.test_Y_qry, dataset.test_Y_spt], axis=1)
Y_test_emb = np.concatenate([test_Y_qry_emb, test_Y_spt_emb], axis=1)
if pred_test_Y_qry is not None:
pred_Y_test = np.concatenate([pred_test_Y_qry,dataset.test_Y_spt],axis=1)
N_test = len(Y_test)
n_class = len(np.unique(Y_test[0]))
test_Y_centroids = []
for i in range(N_test):
if pred_test_Y_qry is None:
Y_emb = test_Y_spt_emb[i]
Y = dataset.test_Y_spt[i]
clf.fit(Y_emb, Y)
else:
clf.fit(Y_test_emb[i], pred_Y_test[i])
for j in range(n_class):
nbrs = NearestNeighbors(n_neighbors=1)
embs = Y_test_emb[i][pred_Y_test[i] == j]
nbrs.fit(embs)
_,[[emb_idx]] = nbrs.kneighbors([clf.centroids_[j]])
clf.centroids_[j]=embs[emb_idx]
for j in range(n_class):
Y_test_emb[i][Y_test[i] == j] = clf.centroids_[j]
centroids = clf.centroids_
test_Y_centroids.append(centroids)
test_Y_qry_emb = Y_test_emb[:, :dataset.test_Y_qry.shape[1], :]
test_Y_spt_emb = Y_test_emb[:, dataset.test_Y_qry.shape[1]:, :]
test_Y_centroids = np.array(test_Y_centroids)
dataset.Y_qry_emb=Y_qry_emb
dataset.Y_spt_emb=Y_spt_emb
dataset.test_Y_qry_emb=test_Y_qry_emb
dataset.test_Y_spt_emb=test_Y_spt_emb
dataset.Y_centroids=Y_centroids
dataset.test_Y_centroids=test_Y_centroids
dataset.n_components=n_components
dataset.N_train = N_train
dataset.N_test = N_test
def pred_from_emb(embeddings, dataset, n_neighbors=1):
nbrs = NearestNeighbors(n_neighbors=n_neighbors)
assert len(embeddings) == len(dataset.test_Y_centroids)
preds = []
for i in range(dataset.N_test):
nbrs.fit(dataset.test_Y_centroids[i])
emb = embeddings[i]
_, pred = nbrs.kneighbors(emb)
pred = pred.flatten()
preds.append(pred)
preds = np.array(preds)
return preds
def build_MetaCNTK(dataset, ridge_coef=[1e-5, 1e-5], normalize_NTK=True, normalize_metaNTK=True):
model = MetaCNTK(d_max=20, fix=False, GAP=True,
inner_lr=np.inf, train_time=np.inf,
invMetaNTK=False,
kernel_ridge=True,
ridge_coef=ridge_coef,
normalize_NTK=normalize_NTK,
normalize_metaNTK=normalize_metaNTK)
model.fit(dataset.X_qry,dataset.Y_qry_emb,dataset.X_spt,dataset.Y_spt_emb)
model.load_test_tasks(X_query=dataset.test_X_qry,X_support=dataset.test_X_spt,Y_support=dataset.test_Y_spt_emb)
model.load_precompute_NTKs(dataset.CNTK)
return model
def test_MetaCNTK(dataset,model):
t0 = time()
pred_test_Y = model.predict()
print(f"Took {round(time() - t0, 2)}")
loss = np.mean( (pred_test_Y - dataset.test_Y_qry_emb)**2)
pred_test_Y = pred_from_emb(pred_test_Y, dataset)
pred_test_Y = pred_test_Y.reshape(*dataset.test_Y_qry.shape)
test_acc = np.mean(pred_test_Y == dataset.test_Y_qry)
return test_acc, pred_test_Y, loss
def augment_train_data(dataset,enlarge_ratio=10,n_way=5,n_shot=1,seed=0):
new_n_task =dataset.n_task*enlarge_ratio
X = np.concatenate([dataset.X_qry,dataset.X_spt],axis=1)
Y = np.concatenate([dataset.Y_qry,dataset.Y_spt],axis=1)
idx_X = np.concatenate([dataset.idx_Xs,dataset.idx_Xs_],axis=1)
dict_idx_x = {}
for i in range(idx_X.shape[0]):
for j in range(idx_X.shape[1]):
idx = idx_X[i][j]
x = X[i][j]
dict_idx_x[idx] = x
n_local_labels = len(np.unique(Y))
n_global_labels = 0
for i in range(Y.shape[0]):
Y[i] += n_global_labels
n_global_labels += n_local_labels
global_labels = np.unique(Y)
Y = Y.flatten()
idx_X = idx_X.flatten()
dict_label_idx = {}
dict_idx_label = {}
for label in global_labels:
idxes_for_label = idx_X[Y == label]
dict_label_idx[label] = idxes_for_label
for idx in idxes_for_label:
dict_idx_label[idx] = label
X_qry,X_spt,Y_spt,Y_qry,idx_X_qry,idx_X_spt = [],[],[],[],[],[]
np.random.seed(seed)
all_labels = np.concatenate([np.random.choice(global_labels, size=len(global_labels), replace=False) for _ in
range(enlarge_ratio)]).reshape(-1, n_way)
assert len(all_labels) == new_n_task
for i_task in range(new_n_task):
# labels = np.random.choice(global_labels,size = n_way,replace=False)
labels = all_labels[i_task]
idx_X_qry.append([]),idx_X_spt.append([])
# Y_qry.append([]),Y_spt.append([])
for label in labels:
# print(labels)
idx_spt,idx_qry = train_test_split(dict_label_idx[label],train_size = n_shot)
idx_X_qry[-1].append(idx_qry)
idx_X_spt[-1].append(idx_spt)
idx_X_qry = np.array(idx_X_qry).reshape(len(idx_X_qry),-1)
idx_X_spt = np.array(idx_X_spt).reshape(len(idx_X_spt),-1)
Y_qry_emb,Y_spt_emb,test_Y_qry_emb,test_Y_spt_emb = [],[],[],[]
for idx in idx_X_qry.flatten():
Y_qry.append(dict_idx_label[idx])
X_qry.append(dict_idx_x[idx])
Y_qry_emb.append(dataset.dict_idx_emb[idx])
for idx in idx_X_spt.flatten():
Y_spt.append(dict_idx_label[idx])
X_spt.append(dict_idx_x[idx])
Y_spt_emb.append(dataset.dict_idx_emb[idx])
x_shape = X_spt[0].shape
emb_shape = Y_spt_emb[0].shape
Y_qry,Y_spt = np.array(Y_qry),np.array(Y_spt)
Y_qry_emb,Y_spt_emb = np.array(Y_qry_emb), np.array(Y_spt_emb)
X_qry,X_spt = np.array(X_qry),np.array(X_spt)
Y_qry,Y_spt = Y_qry.reshape(idx_X_qry.shape),Y_spt.reshape(idx_X_spt.shape)
Y_qry_emb,Y_spt_emb = Y_qry_emb.reshape(idx_X_qry.shape+emb_shape), Y_spt_emb.reshape(idx_X_spt.shape+emb_shape)
X_qry,X_spt = X_qry.reshape(idx_X_qry.shape + x_shape),X_spt.reshape(idx_X_spt.shape+x_shape)
from copy import deepcopy
np.random.seed(seed)
for i in range(len(Y_qry)):
ys_qry = deepcopy(Y_qry[i])
ys_spt = deepcopy(Y_spt[i])
label_mapping = {}
labels = np.unique(ys_qry)
new_labels = np.arange(n_way)
np.random.shuffle(new_labels)
for label,new_label in zip(labels,new_labels):
Y_qry[i][ys_qry==label] = new_label
Y_spt[i][ys_spt==label] = new_label
dataset.idx_Xs = idx_X_qry
dataset.idx_Xs_ = idx_X_spt
dataset.X_qry = X_qry
dataset.X_spt = X_spt
dataset.Y_qry = Y_qry
dataset.Y_spt = Y_spt
dataset.Y_qry_emb = Y_qry_emb
dataset.Y_spt_emb = Y_spt_emb
def train_supervised(model, device, train_loader, optimizer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
def test_supervised(model, device, test_loader, verbose=False):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_acc = correct / len(test_loader.dataset)
if verbose:
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss,test_acc
class NaiveDataset(torch.utils.data.Dataset):
def __init__(self, samples,labels):
'Initialization'
self.labels = torch.from_numpy(labels).long()
self.samples = torch.from_numpy(samples).float()
assert len(labels) == len(samples)
def __len__(self):
'Denotes the total number of samples'
return len(self.labels)
def __getitem__(self, index):
'Generates one sample of data'
X = self.samples[index]
y = self.labels[index]
return X, y
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def build_CNN(n_way,device,n_channel=64,batch_norm = True,dropout=None):
if dropout == 0:
dropout = None
modules = [nn.Conv2d(1, n_channel, 3),
nn.BatchNorm2d(n_channel, momentum=1, affine=True) if batch_norm else None,
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Dropout2d(dropout) if dropout is not None else None,
nn.Conv2d(n_channel, n_channel, 3),
nn.BatchNorm2d(n_channel, momentum=1, affine=True) if batch_norm else None,
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Dropout2d(dropout) if dropout is not None else None,
nn.Conv2d(n_channel, n_channel, 3),
nn.BatchNorm2d(n_channel, momentum=1, affine=True) if batch_norm else None,
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Dropout2d(dropout) if dropout is not None else None,
Flatten(),
nn.Linear(n_channel, n_way)]
for i,module in enumerate(modules):
if module is None:
del modules[i]
net = nn.Sequential(*modules).to(device)
net.eval()
return net
def get_train_data(dataset, n_test_per_class=0):
X = np.concatenate([dataset.X_qry, dataset.X_spt], axis=1)
Y = np.concatenate([dataset.Y_qry, dataset.Y_spt], axis=1)
idx_X = np.concatenate([dataset.idx_Xs, dataset.idx_Xs_], axis=1)
dict_idx_x = {}
for i in range(idx_X.shape[0]):
for j in range(idx_X.shape[1]):
idx = idx_X[i][j]
x = X[i][j]
dict_idx_x[idx] = x
n_local_labels = len(np.unique(Y))
n_global_labels = 0
for i in range(Y.shape[0]):
Y[i] += n_global_labels
n_global_labels += n_local_labels
global_labels = np.unique(Y)
Y = Y.flatten()
idx_X = idx_X.flatten()
dict_label_idx = {}
dict_idx_label = {}
for label in global_labels:
idxes_for_label = idx_X[Y == label]
dict_label_idx[label] = idxes_for_label
for idx in idxes_for_label:
dict_idx_label[idx] = label
labels = []
samples = []
for label, idxes in dict_label_idx.items():
if n_test_per_class > 0:
idxes = idxes[:-n_test_per_class]
for idx in idxes:
labels.append(label)
samples.append(dict_idx_x[idx])
samples = np.array(samples)
labels = np.array(labels)
if samples.shape[-1] == 32: # remove useless padding
samples = samples[:, :, 2:-2, 2:-2]
train_set = {'samples': samples, 'labels': labels}
n_class = len(dict_label_idx.keys())
assert n_class == len(np.unique(train_set['labels']))
assert np.max(train_set['labels']) == n_class - 1
train_set['n_class'] = n_class
if n_test_per_class > 0:
labels = []
samples = []
for label, idxes in dict_label_idx.items():
idxes = idxes[-n_test_per_class:]
for idx in idxes:
labels.append(label)
samples.append(dict_idx_x[idx])
samples = np.array(samples)
labels = np.array(labels)
if samples.shape[-1] == 32: # remove useless padding
samples = samples[:, :, 2:-2, 2:-2]
test_set = {'samples': samples, 'labels': labels}
return train_set, test_set
else:
return train_set,None
def pretrain(net,train_set, test_set, device, batch_size=64, lr=1e-3, epochs=40, seed=0,weight_decay=0.):
if epochs == 0:
return net
kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}
torch.manual_seed(seed)
np.random.seed(seed)
train_loader = torch.utils.data.DataLoader(
NaiveDataset(train_set['samples'], train_set['labels']),
batch_size=batch_size, shuffle=True, **kwargs)
if test_set is not None:
test_loader = torch.utils.data.DataLoader(
NaiveDataset(test_set['samples'], test_set['labels']),
batch_size=batch_size, shuffle=True, **kwargs)
optimizer = optim.Adam(net.parameters(), lr=lr,weight_decay=weight_decay)
test_accs = []
test_losses = []
for epoch in trange(epochs, leave=False, desc='Train Supervised'):
train_supervised(net, device, train_loader, optimizer)
if test_set is not None:
test_loss, test_acc = test_supervised(net, device, test_loader)
test_accs.append(test_acc)
test_losses.append(test_loss)
if test_set is not None:
return net,np.array(test_accs),np.array(test_losses)
else:
return net, None, None
def encode_labels(dataset,net,device):
feature_extractor = net[:-1]
Y_qry_emb,Y_spt_emb,test_Y_qry_emb,test_Y_spt_emb = [],[],[],[]
for x,y in [(dataset.X_qry,Y_qry_emb),(dataset.X_spt,Y_spt_emb),
(dataset.test_X_qry,test_Y_qry_emb),(dataset.test_X_spt,test_Y_spt_emb)]:
x = x.reshape(-1,1,32,32)
x = x[:,:,2:-2,2:-2]
x = torch.from_numpy(x).to(device)
x = x.reshape(( -1, 5 ,)+x.shape[1:]) # reshape into batches of size = 5 for memory efficiency
result = []
for batch_x in x:
result.append(feature_extractor(batch_x).detach().cpu().numpy())
result = np.concatenate(result,axis=0)
y.append(result)
Y_qry_emb,Y_spt_emb,test_Y_qry_emb,test_Y_spt_emb = Y_qry_emb[0],Y_spt_emb[0],test_Y_qry_emb[0],test_Y_spt_emb[0]
emb_dim = Y_qry_emb.shape[-1]
dict_idx_emb = {}
for embs, idxes in [(Y_qry_emb, dataset.idx_Xs), (Y_spt_emb, dataset.idx_Xs_),
(test_Y_qry_emb, dataset.idx_test_Xs),
(test_Y_spt_emb, dataset.idx_test_Xs_)]:
idxes = idxes.flatten()
for emb, idx in zip(embs, idxes):
dict_idx_emb[idx] = emb
dataset.Y_qry_emb = Y_qry_emb.reshape(dataset.n_task,-1,emb_dim)
dataset.Y_spt_emb = Y_spt_emb.reshape(dataset.n_task,-1,emb_dim)
dataset.test_Y_qry_emb = test_Y_qry_emb.reshape(dataset.test_Y_qry.shape+(emb_dim,))
dataset.test_Y_spt_emb = test_Y_spt_emb.reshape(dataset.test_Y_spt.shape+(emb_dim,))
dataset.dict_idx_emb = dict_idx_emb
|
[
"numpy.load",
"numpy.random.seed",
"sklearn.model_selection.train_test_split",
"tqdm.notebook.trange",
"numpy.mean",
"numpy.arange",
"torch.no_grad",
"numpy.unique",
"numpy.max",
"sklearn.neighbors.NearestNeighbors",
"numpy.random.choice",
"torch.nn.Linear",
"sklearn.neighbors.NearestCentroid",
"meta_cntk.MetaCNTK",
"types.SimpleNamespace",
"numpy.random.shuffle",
"copy.deepcopy",
"torch.nn.Dropout2d",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.nn.functional.cross_entropy",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.nn.MaxPool2d",
"numpy.concatenate",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.Sequential",
"time.time",
"numpy.array",
"sklearn.decomposition.PCA",
"sklearn.decomposition.KernelPCA"
] |
[((2683, 2707), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**tasks)\n', (2698, 2707), False, 'from types import SimpleNamespace\n'), ((2958, 2995), 'numpy.load', 'np.load', (['f"""saved_models/CNTK-200.npy"""'], {}), "(f'saved_models/CNTK-200.npy')\n", (2965, 2995), True, 'import numpy as np\n'), ((6803, 6820), 'sklearn.neighbors.NearestCentroid', 'NearestCentroid', ([], {}), '()\n', (6818, 6820), False, 'from sklearn.neighbors import NearestCentroid\n'), ((6835, 6889), 'numpy.concatenate', 'np.concatenate', (['[dataset.Y_qry, dataset.Y_spt]'], {'axis': '(1)'}), '([dataset.Y_qry, dataset.Y_spt], axis=1)\n', (6849, 6889), True, 'import numpy as np\n'), ((6908, 6954), 'numpy.concatenate', 'np.concatenate', (['[Y_qry_emb, Y_spt_emb]'], {'axis': '(1)'}), '([Y_qry_emb, Y_spt_emb], axis=1)\n', (6922, 6954), True, 'import numpy as np\n'), ((7424, 7445), 'numpy.array', 'np.array', (['Y_centroids'], {}), '(Y_centroids)\n', (7432, 7445), True, 'import numpy as np\n'), ((7483, 7500), 'sklearn.neighbors.NearestCentroid', 'NearestCentroid', ([], {}), '()\n', (7498, 7500), False, 'from sklearn.neighbors import NearestCentroid\n'), ((7514, 7578), 'numpy.concatenate', 'np.concatenate', (['[dataset.test_Y_qry, dataset.test_Y_spt]'], {'axis': '(1)'}), '([dataset.test_Y_qry, dataset.test_Y_spt], axis=1)\n', (7528, 7578), True, 'import numpy as np\n'), ((7596, 7652), 'numpy.concatenate', 'np.concatenate', (['[test_Y_qry_emb, test_Y_spt_emb]'], {'axis': '(1)'}), '([test_Y_qry_emb, test_Y_spt_emb], axis=1)\n', (7610, 7652), True, 'import numpy as np\n'), ((8732, 8758), 'numpy.array', 'np.array', (['test_Y_centroids'], {}), '(test_Y_centroids)\n', (8740, 8758), True, 'import numpy as np\n'), ((9153, 9194), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neighbors'}), '(n_neighbors=n_neighbors)\n', (9169, 9194), False, 'from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier\n'), ((9489, 9504), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (9497, 9504), True, 'import numpy as np\n'), ((9636, 9845), 'meta_cntk.MetaCNTK', 'MetaCNTK', ([], {'d_max': '(20)', 'fix': '(False)', 'GAP': '(True)', 'inner_lr': 'np.inf', 'train_time': 'np.inf', 'invMetaNTK': '(False)', 'kernel_ridge': '(True)', 'ridge_coef': 'ridge_coef', 'normalize_NTK': 'normalize_NTK', 'normalize_metaNTK': 'normalize_metaNTK'}), '(d_max=20, fix=False, GAP=True, inner_lr=np.inf, train_time=np.inf,\n invMetaNTK=False, kernel_ridge=True, ridge_coef=ridge_coef,\n normalize_NTK=normalize_NTK, normalize_metaNTK=normalize_metaNTK)\n', (9644, 9845), False, 'from meta_cntk import MetaCNTK\n'), ((10265, 10271), 'time.time', 'time', ([], {}), '()\n', (10269, 10271), False, 'from time import time\n'), ((10360, 10412), 'numpy.mean', 'np.mean', (['((pred_test_Y - dataset.test_Y_qry_emb) ** 2)'], {}), '((pred_test_Y - dataset.test_Y_qry_emb) ** 2)\n', (10367, 10412), True, 'import numpy as np\n'), ((10547, 10589), 'numpy.mean', 'np.mean', (['(pred_test_Y == dataset.test_Y_qry)'], {}), '(pred_test_Y == dataset.test_Y_qry)\n', (10554, 10589), True, 'import numpy as np\n'), ((10757, 10811), 'numpy.concatenate', 'np.concatenate', (['[dataset.X_qry, dataset.X_spt]'], {'axis': '(1)'}), '([dataset.X_qry, dataset.X_spt], axis=1)\n', (10771, 10811), True, 'import numpy as np\n'), ((10818, 10872), 'numpy.concatenate', 'np.concatenate', (['[dataset.Y_qry, dataset.Y_spt]'], {'axis': '(1)'}), '([dataset.Y_qry, dataset.Y_spt], axis=1)\n', (10832, 10872), True, 'import numpy as np\n'), ((10883, 10940), 'numpy.concatenate', 'np.concatenate', (['[dataset.idx_Xs, dataset.idx_Xs_]'], {'axis': '(1)'}), '([dataset.idx_Xs, dataset.idx_Xs_], axis=1)\n', (10897, 10940), True, 'import numpy as np\n'), ((11313, 11325), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (11322, 11325), True, 'import numpy as np\n'), ((11697, 11717), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11711, 11717), True, 'import numpy as np\n'), ((13524, 13544), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13538, 13544), True, 'import numpy as np\n'), ((17265, 17319), 'numpy.concatenate', 'np.concatenate', (['[dataset.X_qry, dataset.X_spt]'], {'axis': '(1)'}), '([dataset.X_qry, dataset.X_spt], axis=1)\n', (17279, 17319), True, 'import numpy as np\n'), ((17328, 17382), 'numpy.concatenate', 'np.concatenate', (['[dataset.Y_qry, dataset.Y_spt]'], {'axis': '(1)'}), '([dataset.Y_qry, dataset.Y_spt], axis=1)\n', (17342, 17382), True, 'import numpy as np\n'), ((17395, 17452), 'numpy.concatenate', 'np.concatenate', (['[dataset.idx_Xs, dataset.idx_Xs_]'], {'axis': '(1)'}), '([dataset.idx_Xs, dataset.idx_Xs_], axis=1)\n', (17409, 17452), True, 'import numpy as np\n'), ((17827, 17839), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (17836, 17839), True, 'import numpy as np\n'), ((18416, 18433), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (18424, 18433), True, 'import numpy as np\n'), ((18447, 18463), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (18455, 18463), True, 'import numpy as np\n'), ((19646, 19669), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (19663, 19669), False, 'import torch\n'), ((19674, 19694), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (19688, 19694), True, 'import numpy as np\n'), ((20205, 20257), 'tqdm.notebook.trange', 'trange', (['epochs'], {'leave': '(False)', 'desc': '"""Train Supervised"""'}), "(epochs, leave=False, desc='Train Supervised')\n", (20211, 20257), False, 'from tqdm.notebook import tqdm, trange\n'), ((1830, 1850), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1844, 1850), True, 'import numpy as np\n'), ((1867, 1924), 'numpy.random.choice', 'np.random.choice', (['n_all_tasks'], {'size': 'n_task', 'replace': '(False)'}), '(n_all_tasks, size=n_task, replace=False)\n', (1883, 1924), True, 'import numpy as np\n'), ((1951, 1968), 'numpy.arange', 'np.arange', (['n_task'], {}), '(n_task)\n', (1960, 1968), True, 'import numpy as np\n'), ((4304, 4359), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components', 'svd_solver': '"""randomized"""'}), "(n_components=n_components, svd_solver='randomized')\n", (4307, 4359), False, 'from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA\n'), ((4460, 4547), 'sklearn.decomposition.KernelPCA', 'KernelPCA', ([], {'n_components': 'n_components', 'kernel': 'PCA_method', 'fit_inverse_transform': '(True)'}), '(n_components=n_components, kernel=PCA_method,\n fit_inverse_transform=True)\n', (4469, 4547), False, 'from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA\n'), ((7000, 7021), 'numpy.unique', 'np.unique', (['Y_train[0]'], {}), '(Y_train[0])\n', (7009, 7021), True, 'import numpy as np\n'), ((7712, 7773), 'numpy.concatenate', 'np.concatenate', (['[pred_test_Y_qry, dataset.test_Y_spt]'], {'axis': '(1)'}), '([pred_test_Y_qry, dataset.test_Y_spt], axis=1)\n', (7726, 7773), True, 'import numpy as np\n'), ((7816, 7836), 'numpy.unique', 'np.unique', (['Y_test[0]'], {}), '(Y_test[0])\n', (7825, 7836), True, 'import numpy as np\n'), ((11148, 11160), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (11157, 11160), True, 'import numpy as np\n'), ((13043, 13058), 'numpy.array', 'np.array', (['Y_qry'], {}), '(Y_qry)\n', (13051, 13058), True, 'import numpy as np\n'), ((13059, 13074), 'numpy.array', 'np.array', (['Y_spt'], {}), '(Y_spt)\n', (13067, 13074), True, 'import numpy as np\n'), ((13101, 13120), 'numpy.array', 'np.array', (['Y_qry_emb'], {}), '(Y_qry_emb)\n', (13109, 13120), True, 'import numpy as np\n'), ((13122, 13141), 'numpy.array', 'np.array', (['Y_spt_emb'], {}), '(Y_spt_emb)\n', (13130, 13141), True, 'import numpy as np\n'), ((13160, 13175), 'numpy.array', 'np.array', (['X_qry'], {}), '(X_qry)\n', (13168, 13175), True, 'import numpy as np\n'), ((13176, 13191), 'numpy.array', 'np.array', (['X_spt'], {}), '(X_spt)\n', (13184, 13191), True, 'import numpy as np\n'), ((13594, 13612), 'copy.deepcopy', 'deepcopy', (['Y_qry[i]'], {}), '(Y_qry[i])\n', (13602, 13612), False, 'from copy import deepcopy\n'), ((13630, 13648), 'copy.deepcopy', 'deepcopy', (['Y_spt[i]'], {}), '(Y_spt[i])\n', (13638, 13648), False, 'from copy import deepcopy\n'), ((13693, 13710), 'numpy.unique', 'np.unique', (['ys_qry'], {}), '(ys_qry)\n', (13702, 13710), True, 'import numpy as np\n'), ((13733, 13749), 'numpy.arange', 'np.arange', (['n_way'], {}), '(n_way)\n', (13742, 13749), True, 'import numpy as np\n'), ((13759, 13788), 'numpy.random.shuffle', 'np.random.shuffle', (['new_labels'], {}), '(new_labels)\n', (13776, 13788), True, 'import numpy as np\n'), ((14456, 14487), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (14471, 14487), True, 'import torch.nn.functional as F\n'), ((14663, 14678), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14676, 14678), False, 'import torch\n'), ((16180, 16206), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'n_channel', '(3)'], {}), '(1, n_channel, 3)\n', (16189, 16206), False, 'from torch import nn\n'), ((16308, 16329), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (16315, 16329), False, 'from torch import nn\n'), ((16343, 16361), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (16355, 16361), False, 'from torch import nn\n'), ((16443, 16477), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channel', 'n_channel', '(3)'], {}), '(n_channel, n_channel, 3)\n', (16452, 16477), False, 'from torch import nn\n'), ((16579, 16600), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (16586, 16600), False, 'from torch import nn\n'), ((16614, 16632), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (16626, 16632), False, 'from torch import nn\n'), ((16714, 16748), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channel', 'n_channel', '(3)'], {}), '(n_channel, n_channel, 3)\n', (16723, 16748), False, 'from torch import nn\n'), ((16850, 16871), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (16857, 16871), False, 'from torch import nn\n'), ((16885, 16903), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (16897, 16903), False, 'from torch import nn\n'), ((17008, 17035), 'torch.nn.Linear', 'nn.Linear', (['n_channel', 'n_way'], {}), '(n_channel, n_way)\n', (17017, 17035), False, 'from torch import nn\n'), ((17662, 17674), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (17671, 17674), True, 'import numpy as np\n'), ((18731, 18758), 'numpy.max', 'np.max', (["train_set['labels']"], {}), "(train_set['labels'])\n", (18737, 18758), True, 'import numpy as np\n'), ((19110, 19127), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (19118, 19127), True, 'import numpy as np\n'), ((19145, 19161), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (19153, 19161), True, 'import numpy as np\n'), ((19608, 19633), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19631, 19633), False, 'import torch\n'), ((21293, 21323), 'numpy.concatenate', 'np.concatenate', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (21307, 21323), True, 'import numpy as np\n'), ((12280, 12338), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dict_label_idx[label]'], {'train_size': 'n_shot'}), '(dict_label_idx[label], train_size=n_shot)\n', (12296, 12338), False, 'from sklearn.model_selection import train_test_split\n'), ((12441, 12460), 'numpy.array', 'np.array', (['idx_X_qry'], {}), '(idx_X_qry)\n', (12449, 12460), True, 'import numpy as np\n'), ((12504, 12523), 'numpy.array', 'np.array', (['idx_X_spt'], {}), '(idx_X_spt)\n', (12512, 12523), True, 'import numpy as np\n'), ((16220, 16270), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_channel'], {'momentum': '(1)', 'affine': '(True)'}), '(n_channel, momentum=1, affine=True)\n', (16234, 16270), False, 'from torch import nn\n'), ((16375, 16396), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout'], {}), '(dropout)\n', (16387, 16396), False, 'from torch import nn\n'), ((16491, 16541), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_channel'], {'momentum': '(1)', 'affine': '(True)'}), '(n_channel, momentum=1, affine=True)\n', (16505, 16541), False, 'from torch import nn\n'), ((16646, 16667), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout'], {}), '(dropout)\n', (16658, 16667), False, 'from torch import nn\n'), ((16762, 16812), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_channel'], {'momentum': '(1)', 'affine': '(True)'}), '(n_channel, momentum=1, affine=True)\n', (16776, 16812), False, 'from torch import nn\n'), ((16917, 16938), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropout'], {}), '(dropout)\n', (16929, 16938), False, 'from torch import nn\n'), ((17141, 17164), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (17154, 17164), False, 'from torch import nn\n'), ((18688, 18718), 'numpy.unique', 'np.unique', (["train_set['labels']"], {}), "(train_set['labels'])\n", (18697, 18718), True, 'import numpy as np\n'), ((20560, 20579), 'numpy.array', 'np.array', (['test_accs'], {}), '(test_accs)\n', (20568, 20579), True, 'import numpy as np\n'), ((20580, 20601), 'numpy.array', 'np.array', (['test_losses'], {}), '(test_losses)\n', (20588, 20601), True, 'import numpy as np\n'), ((8161, 8192), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (8177, 8192), False, 'from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier\n'), ((15555, 15579), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (15571, 15579), False, 'import torch\n'), ((15610, 15635), 'torch.from_numpy', 'torch.from_numpy', (['samples'], {}), '(samples)\n', (15626, 15635), False, 'import torch\n'), ((21019, 21038), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (21035, 21038), False, 'import torch\n'), ((14841, 14889), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (14856, 14889), True, 'import torch.nn.functional as F\n'), ((10330, 10336), 'time.time', 'time', ([], {}), '()\n', (10334, 10336), False, 'from time import time\n')]
|
from sampling import Sampler
import algos
import numpy as np
from simulation_utils import create_env, get_feedback, run_algo
import sys
def batch(task, method, N, M, b):
if N % b != 0:
print('N must be divisible to b')
exit(0)
B = 20*b
simulation_object = create_env(task)
d = simulation_object.num_of_features
w_true = 2*np.random.rand(d)-1
w_true = w_true / np.linalg.norm(w_true)
print('If in automated mode: true w = {}'.format(w_true/np.linalg.norm(w_true)))
lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
upper_input_bound = [x[1] for x in simulation_object.feed_bounds]
w_sampler = Sampler(d)
psi_set = []
s_set = []
i = 0
while i < N:
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
mean_w_samples = np.mean(w_samples,axis=0)
print('Samples so far: ' + str(i))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
inputA_set, inputB_set = run_algo(method, simulation_object, w_samples, b, B)
for j in range(b):
input_A = inputA_set[j]
input_B = inputB_set[j]
psi, s = get_feedback(simulation_object, input_B, input_A, w_true)
psi_set.append(psi)
s_set.append(s)
i += b
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
mean_w_samples = np.mean(w_samples, axis=0)
print('Samples so far: ' + str(N))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
def nonbatch(task, method, N, M):
simulation_object = create_env(task)
d = simulation_object.num_of_features
w_true = 2*np.random.rand(d)-1
w_true = w_true / np.linalg.norm(w_true)
print('If in automated mode: true w = {}'.format(w_true/np.linalg.norm(w_true)))
lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
upper_input_bound = [x[1] for x in simulation_object.feed_bounds]
w_sampler = Sampler(d)
psi_set = []
s_set = []
for i in range(N):
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
mean_w_samples = np.mean(w_samples,axis=0)
print('Samples so far: ' + str(i))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
input_A, input_B = run_algo(method, simulation_object, w_samples)
psi, s = get_feedback(simulation_object, input_A, input_B, w_true)
psi_set.append(psi)
s_set.append(s)
w_sampler.A = psi_set
w_sampler.y = np.array(s_set).reshape(-1,1)
w_samples = w_sampler.sample(M)
print('Samples so far: ' + str(N))
print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
|
[
"simulation_utils.get_feedback",
"numpy.mean",
"numpy.linalg.norm",
"simulation_utils.create_env",
"simulation_utils.run_algo",
"numpy.array",
"numpy.random.rand",
"sampling.Sampler"
] |
[((286, 302), 'simulation_utils.create_env', 'create_env', (['task'], {}), '(task)\n', (296, 302), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((671, 681), 'sampling.Sampler', 'Sampler', (['d'], {}), '(d)\n', (678, 681), False, 'from sampling import Sampler\n'), ((1612, 1638), 'numpy.mean', 'np.mean', (['w_samples'], {'axis': '(0)'}), '(w_samples, axis=0)\n', (1619, 1638), True, 'import numpy as np\n'), ((1916, 1932), 'simulation_utils.create_env', 'create_env', (['task'], {}), '(task)\n', (1926, 1932), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((2301, 2311), 'sampling.Sampler', 'Sampler', (['d'], {}), '(d)\n', (2308, 2311), False, 'from sampling import Sampler\n'), ((404, 426), 'numpy.linalg.norm', 'np.linalg.norm', (['w_true'], {}), '(w_true)\n', (418, 426), True, 'import numpy as np\n'), ((888, 914), 'numpy.mean', 'np.mean', (['w_samples'], {'axis': '(0)'}), '(w_samples, axis=0)\n', (895, 914), True, 'import numpy as np\n'), ((1175, 1227), 'simulation_utils.run_algo', 'run_algo', (['method', 'simulation_object', 'w_samples', 'b', 'B'], {}), '(method, simulation_object, w_samples, b, B)\n', (1183, 1227), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((2034, 2056), 'numpy.linalg.norm', 'np.linalg.norm', (['w_true'], {}), '(w_true)\n', (2048, 2056), True, 'import numpy as np\n'), ((2514, 2540), 'numpy.mean', 'np.mean', (['w_samples'], {'axis': '(0)'}), '(w_samples, axis=0)\n', (2521, 2540), True, 'import numpy as np\n'), ((2795, 2841), 'simulation_utils.run_algo', 'run_algo', (['method', 'simulation_object', 'w_samples'], {}), '(method, simulation_object, w_samples)\n', (2803, 2841), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((2859, 2916), 'simulation_utils.get_feedback', 'get_feedback', (['simulation_object', 'input_A', 'input_B', 'w_true'], {}), '(simulation_object, input_A, input_B, w_true)\n', (2871, 2916), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((362, 379), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (376, 379), True, 'import numpy as np\n'), ((1348, 1405), 'simulation_utils.get_feedback', 'get_feedback', (['simulation_object', 'input_B', 'input_A', 'w_true'], {}), '(simulation_object, input_B, input_A, w_true)\n', (1360, 1405), False, 'from simulation_utils import create_env, get_feedback, run_algo\n'), ((1525, 1540), 'numpy.array', 'np.array', (['s_set'], {}), '(s_set)\n', (1533, 1540), True, 'import numpy as np\n'), ((1992, 2009), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (2006, 2009), True, 'import numpy as np\n'), ((3013, 3028), 'numpy.array', 'np.array', (['s_set'], {}), '(s_set)\n', (3021, 3028), True, 'import numpy as np\n'), ((487, 509), 'numpy.linalg.norm', 'np.linalg.norm', (['w_true'], {}), '(w_true)\n', (501, 509), True, 'import numpy as np\n'), ((793, 808), 'numpy.array', 'np.array', (['s_set'], {}), '(s_set)\n', (801, 808), True, 'import numpy as np\n'), ((1728, 1758), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (1742, 1758), True, 'import numpy as np\n'), ((1822, 1852), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (1836, 1852), True, 'import numpy as np\n'), ((2117, 2139), 'numpy.linalg.norm', 'np.linalg.norm', (['w_true'], {}), '(w_true)\n', (2131, 2139), True, 'import numpy as np\n'), ((2419, 2434), 'numpy.array', 'np.array', (['s_set'], {}), '(s_set)\n', (2427, 2434), True, 'import numpy as np\n'), ((3168, 3198), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (3182, 3198), True, 'import numpy as np\n'), ((3262, 3292), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (3276, 3292), True, 'import numpy as np\n'), ((1011, 1041), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (1025, 1041), True, 'import numpy as np\n'), ((1109, 1139), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (1123, 1139), True, 'import numpy as np\n'), ((2637, 2667), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (2651, 2667), True, 'import numpy as np\n'), ((2735, 2765), 'numpy.linalg.norm', 'np.linalg.norm', (['mean_w_samples'], {}), '(mean_w_samples)\n', (2749, 2765), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint, get_link_to_form
from frappe.model.document import Document
class AssetCategory(Document):
def validate(self):
self.validate_finance_books()
self.validate_account_types()
self.validate_account_currency()
self.valide_cwip_account()
def validate_finance_books(self):
for d in self.finance_books:
for field in ("Total Number of Depreciations", "Frequency of Depreciation"):
if cint(d.get(frappe.scrub(field)))<1:
frappe.throw(_("Row {0}: {1} must be greater than 0").format(d.idx, field), frappe.MandatoryError)
def validate_account_currency(self):
account_types = [
'fixed_asset_account', 'accumulated_depreciation_account', 'depreciation_expense_account', 'capital_work_in_progress_account'
]
invalid_accounts = []
for d in self.accounts:
company_currency = frappe.get_value('Company', d.get('company_name'), 'default_currency')
for type_of_account in account_types:
if d.get(type_of_account):
account_currency = frappe.get_value("Account", d.get(type_of_account), "account_currency")
if account_currency != company_currency:
invalid_accounts.append(frappe._dict({ 'type': type_of_account, 'idx': d.idx, 'account': d.get(type_of_account) }))
for d in invalid_accounts:
frappe.throw(_("Row #{}: Currency of {} - {} doesn't matches company currency.")
.format(d.idx, frappe.bold(frappe.unscrub(d.type)), frappe.bold(d.account)),
title=_("Invalid Account"))
def validate_account_types(self):
account_type_map = {
'fixed_asset_account': { 'account_type': 'Fixed Asset' },
'accumulated_depreciation_account': { 'account_type': 'Accumulated Depreciation' },
'depreciation_expense_account': { 'root_type': 'Expense' },
'capital_work_in_progress_account': { 'account_type': 'Capital Work in Progress' }
}
for d in self.accounts:
for fieldname in account_type_map.keys():
if d.get(fieldname):
selected_account = d.get(fieldname)
key_to_match = next(iter(account_type_map.get(fieldname))) # acount_type or root_type
selected_key_type = frappe.db.get_value('Account', selected_account, key_to_match)
expected_key_type = account_type_map[fieldname][key_to_match]
if selected_key_type != expected_key_type:
frappe.throw(_("Row #{}: {} of {} should be {}. Please modify the account or select a different account.")
.format(d.idx, frappe.unscrub(key_to_match), frappe.bold(selected_account), frappe.bold(expected_key_type)),
title=_("Invalid Account"))
def valide_cwip_account(self):
if self.enable_cwip_accounting:
missing_cwip_accounts_for_company = []
for d in self.accounts:
if (not d.capital_work_in_progress_account and
not frappe.db.get_value("Company", d.company_name, "capital_work_in_progress_account")):
missing_cwip_accounts_for_company.append(get_link_to_form("Company", d.company_name))
if missing_cwip_accounts_for_company:
msg = _("""To enable Capital Work in Progress Accounting, """)
msg += _("""you must select Capital Work in Progress Account in accounts table""")
msg += "<br><br>"
msg += _("You can also set default CWIP account in Company {}").format(", ".join(missing_cwip_accounts_for_company))
frappe.throw(msg, title=_("Missing Account"))
@frappe.whitelist()
def get_asset_category_account(fieldname, item=None, asset=None, account=None, asset_category = None, company = None):
if item and frappe.db.get_value("Item", item, "is_fixed_asset"):
asset_category = frappe.db.get_value("Item", item, ["asset_category"])
elif not asset_category or not company:
if account:
if frappe.db.get_value("Account", account, "account_type") != "Fixed Asset":
account=None
if not account:
asset_details = frappe.db.get_value("Asset", asset, ["asset_category", "company"])
asset_category, company = asset_details or [None, None]
account = frappe.db.get_value("Asset Category Account",
filters={"parent": asset_category, "company_name": company}, fieldname=fieldname)
return account
|
[
"frappe.utils.get_link_to_form",
"frappe.whitelist",
"frappe.db.get_value",
"frappe.bold",
"frappe.scrub",
"frappe.unscrub",
"frappe._"
] |
[((3509, 3527), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (3525, 3527), False, 'import frappe\n'), ((4115, 4246), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Asset Category Account"""'], {'filters': "{'parent': asset_category, 'company_name': company}", 'fieldname': 'fieldname'}), "('Asset Category Account', filters={'parent':\n asset_category, 'company_name': company}, fieldname=fieldname)\n", (4134, 4246), False, 'import frappe\n'), ((3660, 3711), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item"""', 'item', '"""is_fixed_asset"""'], {}), "('Item', item, 'is_fixed_asset')\n", (3679, 3711), False, 'import frappe\n'), ((3732, 3785), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Item"""', 'item', "['asset_category']"], {}), "('Item', item, ['asset_category'])\n", (3751, 3785), False, 'import frappe\n'), ((3169, 3221), 'frappe._', '_', (['"""To enable Capital Work in Progress Accounting, """'], {}), "('To enable Capital Work in Progress Accounting, ')\n", (3170, 3221), False, 'from frappe import _\n'), ((3237, 3308), 'frappe._', '_', (['"""you must select Capital Work in Progress Account in accounts table"""'], {}), "('you must select Capital Work in Progress Account in accounts table')\n", (3238, 3308), False, 'from frappe import _\n'), ((3977, 4043), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Asset"""', 'asset', "['asset_category', 'company']"], {}), "('Asset', asset, ['asset_category', 'company'])\n", (3996, 4043), False, 'import frappe\n'), ((1628, 1650), 'frappe.bold', 'frappe.bold', (['d.account'], {}), '(d.account)\n', (1639, 1650), False, 'import frappe\n'), ((1663, 1683), 'frappe._', '_', (['"""Invalid Account"""'], {}), "('Invalid Account')\n", (1664, 1683), False, 'from frappe import _\n'), ((2300, 2362), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Account"""', 'selected_account', 'key_to_match'], {}), "('Account', selected_account, key_to_match)\n", (2319, 2362), False, 'import frappe\n'), ((3848, 3903), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Account"""', 'account', '"""account_type"""'], {}), "('Account', account, 'account_type')\n", (3867, 3903), False, 'import frappe\n'), ((1504, 1571), 'frappe._', '_', (['"""Row #{}: Currency of {} - {} doesn\'t matches company currency."""'], {}), '("Row #{}: Currency of {} - {} doesn\'t matches company currency.")\n', (1505, 1571), False, 'from frappe import _\n'), ((1603, 1625), 'frappe.unscrub', 'frappe.unscrub', (['d.type'], {}), '(d.type)\n', (1617, 1625), False, 'import frappe\n'), ((2941, 3027), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Company"""', 'd.company_name', '"""capital_work_in_progress_account"""'], {}), "('Company', d.company_name,\n 'capital_work_in_progress_account')\n", (2960, 3027), False, 'import frappe\n'), ((3072, 3115), 'frappe.utils.get_link_to_form', 'get_link_to_form', (['"""Company"""', 'd.company_name'], {}), "('Company', d.company_name)\n", (3088, 3115), False, 'from frappe.utils import cint, get_link_to_form\n'), ((3346, 3402), 'frappe._', '_', (['"""You can also set default CWIP account in Company {}"""'], {}), "('You can also set default CWIP account in Company {}')\n", (3347, 3402), False, 'from frappe import _\n'), ((3484, 3504), 'frappe._', '_', (['"""Missing Account"""'], {}), "('Missing Account')\n", (3485, 3504), False, 'from frappe import _\n'), ((656, 675), 'frappe.scrub', 'frappe.scrub', (['field'], {}), '(field)\n', (668, 675), False, 'import frappe\n'), ((699, 739), 'frappe._', '_', (['"""Row {0}: {1} must be greater than 0"""'], {}), "('Row {0}: {1} must be greater than 0')\n", (700, 739), False, 'from frappe import _\n'), ((2614, 2642), 'frappe.unscrub', 'frappe.unscrub', (['key_to_match'], {}), '(key_to_match)\n', (2628, 2642), False, 'import frappe\n'), ((2644, 2673), 'frappe.bold', 'frappe.bold', (['selected_account'], {}), '(selected_account)\n', (2655, 2673), False, 'import frappe\n'), ((2675, 2705), 'frappe.bold', 'frappe.bold', (['expected_key_type'], {}), '(expected_key_type)\n', (2686, 2705), False, 'import frappe\n'), ((2721, 2741), 'frappe._', '_', (['"""Invalid Account"""'], {}), "('Invalid Account')\n", (2722, 2741), False, 'from frappe import _\n'), ((2498, 2596), 'frappe._', '_', (['"""Row #{}: {} of {} should be {}. Please modify the account or select a different account."""'], {}), "('Row #{}: {} of {} should be {}. Please modify the account or select a different account.'\n )\n", (2499, 2596), False, 'from frappe import _\n')]
|
# -*- coding: utf-8 -*-
import json
import io, sys
import nltk
import xml.etree.cElementTree as ET
from xml.etree.ElementTree import Element, SubElement, dump, ElementTree
file_ko = 'test_ko.txt'
file_en = 'test_en.txt'
ko = open(file_ko).readlines()
en = open(file_en).readlines()
data_ko = []
for t in ko:
if t.replace("\n",'') == '':
continue
else:
data_ko.append(t.replace("\n",''))
data_en = []
for b in en:
if b.replace("\n",'') == '':
continue
else:
data_en.append(b.replace("\n",''))
print(data_ko[0])
print(data_en[0])
for a in range(1, len(data_ko)):
tu = Element("tu")
tuv_ko = SubElement(tu, "tuv")
tuv_ko.attrib["xml:lang"] = "ko"
val_ko = data_ko[0]
seg_ko = SubElement(tuv_ko, "seg").text = val_ko
tuv_en = SubElement(tu, "tuv")
tuv_en.attrib["xml:lang"] = "en"
val_en = data_en[0]
seg_en = SubElement(tuv_en, "seg").text = val_en
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(tu)
dump(tu)
|
[
"xml.etree.ElementTree.dump",
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.SubElement"
] |
[((624, 637), 'xml.etree.ElementTree.Element', 'Element', (['"""tu"""'], {}), "('tu')\n", (631, 637), False, 'from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n'), ((652, 673), 'xml.etree.ElementTree.SubElement', 'SubElement', (['tu', '"""tuv"""'], {}), "(tu, 'tuv')\n", (662, 673), False, 'from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n'), ((801, 822), 'xml.etree.ElementTree.SubElement', 'SubElement', (['tu', '"""tuv"""'], {}), "(tu, 'tuv')\n", (811, 822), False, 'from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n'), ((1482, 1490), 'xml.etree.ElementTree.dump', 'dump', (['tu'], {}), '(tu)\n', (1486, 1490), False, 'from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n'), ((748, 773), 'xml.etree.ElementTree.SubElement', 'SubElement', (['tuv_ko', '"""seg"""'], {}), "(tuv_ko, 'seg')\n", (758, 773), False, 'from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n'), ((897, 922), 'xml.etree.ElementTree.SubElement', 'SubElement', (['tuv_en', '"""seg"""'], {}), "(tuv_en, 'seg')\n", (907, 922), False, 'from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n')]
|
# Generated by Django 2.1 on 2018-08-16 20:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('leagues', '0015_league_season'),
]
operations = [
migrations.AlterUniqueTogether(
name='season',
unique_together={('start_date', 'end_date')},
),
]
|
[
"django.db.migrations.AlterUniqueTogether"
] |
[((220, 316), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""season"""', 'unique_together': "{('start_date', 'end_date')}"}), "(name='season', unique_together={(\n 'start_date', 'end_date')})\n", (250, 316), False, 'from django.db import migrations\n')]
|
#!/usr/bin/env python
"""
Tests for the Mininet Walkthrough
TODO: missing xterm test
"""
import unittest
import pexpect
import os
import re
from mininet.util import quietRun
from distutils.version import StrictVersion
def tsharkVersion():
"Return tshark version"
versionStr = quietRun( 'tshark -v' )
versionMatch = re.findall( r'TShark[^\d]*(\d+.\d+.\d+)', versionStr )
return versionMatch[ 0 ]
# pylint doesn't understand pexpect.match, unfortunately!
# pylint:disable=maybe-no-member
class testWalkthrough( unittest.TestCase ):
"Test Mininet walkthrough"
prompt = 'mininet>'
# PART 1
def testHelp( self ):
"Check the usage message"
p = pexpect.spawn( 'mn -h' )
index = p.expect( [ 'Usage: mn', pexpect.EOF ] )
self.assertEqual( index, 0 )
def testWireshark( self ):
"Use tshark to test the of dissector"
# Satisfy pylint
assert self
if StrictVersion( tsharkVersion() ) < StrictVersion( '1.12.0' ):
tshark = pexpect.spawn( 'tshark -i lo -R of' )
else:
tshark = pexpect.spawn( 'tshark -i lo -Y openflow_v1' )
tshark.expect( [ 'Capturing on lo', "Capturing on 'Loopback'" ] )
mn = pexpect.spawn( 'mn --test pingall' )
mn.expect( '0% dropped' )
tshark.expect( [ '74 Hello', '74 of_hello', '74 Type: OFPT_HELLO' ] )
tshark.sendintr()
mn.expect( pexpect.EOF )
tshark.expect( pexpect.EOF )
def testBasic( self ):
"Test basic CLI commands (help, nodes, net, dump)"
p = pexpect.spawn( 'mn' )
p.expect( self.prompt )
# help command
p.sendline( 'help' )
index = p.expect( [ 'commands', self.prompt ] )
self.assertEqual( index, 0, 'No output for "help" command')
# nodes command
p.sendline( 'nodes' )
p.expect( r'([chs]\d ?){4}' )
nodes = p.match.group( 0 ).split()
self.assertEqual( len( nodes ), 4, 'No nodes in "nodes" command')
p.expect( self.prompt )
# net command
p.sendline( 'net' )
expected = [ x for x in nodes ]
while len( expected ) > 0:
index = p.expect( expected )
node = p.match.group( 0 )
expected.remove( node )
p.expect( '\n' )
self.assertEqual( len( expected ), 0, '"nodes" and "net" differ')
p.expect( self.prompt )
# dump command
p.sendline( 'dump' )
expected = [ r'<\w+ (%s)' % n for n in nodes ]
actual = []
for _ in nodes:
index = p.expect( expected )
node = p.match.group( 1 )
actual.append( node )
p.expect( '\n' )
self.assertEqual( actual.sort(), nodes.sort(),
'"nodes" and "dump" differ' )
p.expect( self.prompt )
p.sendline( 'exit' )
p.wait()
def testHostCommands( self ):
"Test ifconfig and ps on h1 and s1"
p = pexpect.spawn( 'mn' )
p.expect( self.prompt )
# Third pattern is a local interface beginning with 'eth' or 'en'
interfaces = [ 'h1-eth0', 's1-eth1', '[^-](eth|en)\w*\d', 'lo', self.prompt ]
# h1 ifconfig
p.sendline( 'h1 ifconfig -a' )
ifcount = 0
while True:
index = p.expect( interfaces )
if index == 0 or index == 3:
ifcount += 1
elif index == 1:
self.fail( 's1 interface displayed in "h1 ifconfig"' )
elif index == 2:
self.fail( 'eth0 displayed in "h1 ifconfig"' )
else:
break
self.assertEqual( ifcount, 2, 'Missing interfaces on h1')
# s1 ifconfig
p.sendline( 's1 ifconfig -a' )
ifcount = 0
while True:
index = p.expect( interfaces )
if index == 0:
self.fail( 'h1 interface displayed in "s1 ifconfig"' )
elif index == 1 or index == 2 or index == 3:
ifcount += 1
else:
break
self.assertTrue( ifcount >= 3, 'Missing interfaces on s1')
# h1 ps
p.sendline( "h1 ps -a | egrep -v 'ps|grep'" )
p.expect( self.prompt )
h1Output = p.before
# s1 ps
p.sendline( "s1 ps -a | egrep -v 'ps|grep'" )
p.expect( self.prompt )
s1Output = p.before
# strip command from ps output and compute diffs
h1Output = h1Output.split( '\n' )[ 1: ]
s1Output = s1Output.split( '\n' )[ 1: ]
diffs = set( h1Output ).difference( set( s1Output ) )
# allow up to two diffs to account for daemons, etc.
self.assertTrue( len( diffs ) <= 2,
'h1 and s1 "ps" output differ too much: %s' % diffs )
p.sendline( 'exit' )
p.wait()
def testConnectivity( self ):
"Test ping and pingall"
p = pexpect.spawn( 'mn' )
p.expect( self.prompt )
p.sendline( 'h1 ping -c 1 h2' )
p.expect( '1 packets transmitted, 1 received' )
p.expect( self.prompt )
p.sendline( 'pingall' )
p.expect( '0% dropped' )
p.expect( self.prompt )
p.sendline( 'exit' )
p.wait()
def testSimpleHTTP( self ):
"Start an HTTP server on h1 and wget from h2"
p = pexpect.spawn( 'mn' )
p.expect( self.prompt )
p.sendline( 'h1 python -m SimpleHTTPServer 80 &' )
p.expect( self.prompt )
p.sendline( ' h2 wget -O - h1' )
p.expect( '200 OK' )
p.expect( self.prompt )
p.sendline( 'h1 kill %python' )
p.expect( self.prompt )
p.sendline( 'exit' )
p.wait()
# PART 2
def testRegressionRun( self ):
"Test pingpair (0% drop) and iperf (bw > 0) regression tests"
# test pingpair
p = pexpect.spawn( 'mn --test pingpair' )
p.expect( '0% dropped' )
p.expect( pexpect.EOF )
# test iperf
p = pexpect.spawn( 'mn --test iperf' )
p.expect( r"Results: \['([\d\.]+) .bits/sec'," )
bw = float( p.match.group( 1 ) )
self.assertTrue( bw > 0 )
p.expect( pexpect.EOF )
def testTopoChange( self ):
"Test pingall on single,3 and linear,4 topos"
# testing single,3
p = pexpect.spawn( 'mn --test pingall --topo single,3' )
p.expect( r'(\d+)/(\d+) received')
received = int( p.match.group( 1 ) )
sent = int( p.match.group( 2 ) )
self.assertEqual( sent, 6, 'Wrong number of pings sent in single,3' )
self.assertEqual( sent, received, 'Dropped packets in single,3')
p.expect( pexpect.EOF )
# testing linear,4
p = pexpect.spawn( 'mn --test pingall --topo linear,4' )
p.expect( r'(\d+)/(\d+) received')
received = int( p.match.group( 1 ) )
sent = int( p.match.group( 2 ) )
self.assertEqual( sent, 12, 'Wrong number of pings sent in linear,4' )
self.assertEqual( sent, received, 'Dropped packets in linear,4')
p.expect( pexpect.EOF )
def testLinkChange( self ):
"Test TCLink bw and delay"
p = pexpect.spawn( 'mn --link tc,bw=10,delay=10ms' )
# test bw
p.expect( self.prompt )
p.sendline( 'iperf' )
p.expect( r"Results: \['([\d\.]+) Mbits/sec'," )
bw = float( p.match.group( 1 ) )
self.assertTrue( bw < 10.1, 'Bandwidth > 10 Mb/s')
self.assertTrue( bw > 9.0, 'Bandwidth < 9 Mb/s')
p.expect( self.prompt )
# test delay
p.sendline( 'h1 ping -c 4 h2' )
p.expect( r'rtt min/avg/max/mdev = '
r'([\d\.]+)/([\d\.]+)/([\d\.]+)/([\d\.]+) ms' )
delay = float( p.match.group( 2 ) )
self.assertTrue( delay > 40, 'Delay < 40ms' )
self.assertTrue( delay < 45, 'Delay > 40ms' )
p.expect( self.prompt )
p.sendline( 'exit' )
p.wait()
def testVerbosity( self ):
"Test debug and output verbosity"
# test output
p = pexpect.spawn( 'mn -v output' )
p.expect( self.prompt )
self.assertEqual( len( p.before ), 0, 'Too much output for "output"' )
p.sendline( 'exit' )
p.wait()
# test debug
p = pexpect.spawn( 'mn -v debug --test none' )
p.expect( pexpect.EOF )
lines = p.before.split( '\n' )
self.assertTrue( len( lines ) > 70, "Debug output is too short" )
def testCustomTopo( self ):
"Start Mininet using a custom topo, then run pingall"
# Satisfy pylint
assert self
custom = os.path.dirname( os.path.realpath( __file__ ) )
custom = os.path.join( custom, '../../custom/topo-2sw-2host.py' )
custom = os.path.normpath( custom )
p = pexpect.spawn(
'mn --custom %s --topo mytopo --test pingall' % custom )
p.expect( '0% dropped' )
p.expect( pexpect.EOF )
def testStaticMAC( self ):
"Verify that MACs are set to easy to read numbers"
p = pexpect.spawn( 'mn --mac' )
p.expect( self.prompt )
for i in range( 1, 3 ):
p.sendline( 'h%d ifconfig' % i )
p.expect( 'HWaddr 00:00:00:00:00:0%d' % i )
p.expect( self.prompt )
p.sendline( 'exit' )
p.expect( pexpect.EOF )
def testSwitches( self ):
"Run iperf test using user and ovsk switches"
switches = [ 'user', 'ovsk' ]
for sw in switches:
p = pexpect.spawn( 'mn --switch %s --test iperf' % sw )
p.expect( r"Results: \['([\d\.]+) .bits/sec'," )
bw = float( p.match.group( 1 ) )
self.assertTrue( bw > 0 )
p.expect( pexpect.EOF )
def testBenchmark( self ):
"Run benchmark and verify that it takes less than 2 seconds"
p = pexpect.spawn( 'mn --test none' )
p.expect( r'completed in ([\d\.]+) seconds' )
time = float( p.match.group( 1 ) )
self.assertTrue( time < 2, 'Benchmark takes more than 2 seconds' )
def testOwnNamespace( self ):
"Test running user switch in its own namespace"
p = pexpect.spawn( 'mn --innamespace --switch user' )
p.expect( self.prompt )
interfaces = [ 'h1-eth0', 's1-eth1', '[^-]eth0', 'lo', self.prompt ]
p.sendline( 's1 ifconfig -a' )
ifcount = 0
while True:
index = p.expect( interfaces )
if index == 1 or index == 3:
ifcount += 1
elif index == 0:
self.fail( 'h1 interface displayed in "s1 ifconfig"' )
elif index == 2:
self.fail( 'eth0 displayed in "s1 ifconfig"' )
else:
break
self.assertEqual( ifcount, 2, 'Missing interfaces on s1' )
# verify that all hosts a reachable
p.sendline( 'pingall' )
p.expect( r'(\d+)% dropped' )
dropped = int( p.match.group( 1 ) )
self.assertEqual( dropped, 0, 'pingall failed')
p.expect( self.prompt )
p.sendline( 'exit' )
p.wait()
# PART 3
def testPythonInterpreter( self ):
"Test py and px by checking IP for h1 and adding h3"
p = pexpect.spawn( 'mn' )
p.expect( self.prompt )
# test host IP
p.sendline( 'py h1.IP()' )
p.expect( '10.0.0.1' )
p.expect( self.prompt )
# test adding host
p.sendline( "px net.addHost('h3')" )
p.expect( self.prompt )
p.sendline( "px net.addLink(s1, h3)" )
p.expect( self.prompt )
p.sendline( 'net' )
p.expect( 'h3' )
p.expect( self.prompt )
p.sendline( 'py h3.MAC()' )
p.expect( '([a-f0-9]{2}:?){6}' )
p.expect( self.prompt )
p.sendline( 'exit' )
p.wait()
def testLink( self ):
"Test link CLI command using ping"
p = pexpect.spawn( 'mn' )
p.expect( self.prompt )
p.sendline( 'link s1 h1 down' )
p.expect( self.prompt )
p.sendline( 'h1 ping -c 1 h2' )
p.expect( 'unreachable' )
p.expect( self.prompt )
p.sendline( 'link s1 h1 up' )
p.expect( self.prompt )
p.sendline( 'h1 ping -c 1 h2' )
p.expect( '0% packet loss' )
p.expect( self.prompt )
p.sendline( 'exit' )
p.wait()
@unittest.skipUnless( os.path.exists( '/tmp/pox' ) or
'1 received' in quietRun( 'ping -c 1 github.com' ),
'Github is not reachable; cannot download Pox' )
def testRemoteController( self ):
"Test Mininet using Pox controller"
# Satisfy pylint
assert self
if not os.path.exists( '/tmp/pox' ):
p = pexpect.spawn(
'git clone https://github.com/noxrepo/pox.git /tmp/pox' )
p.expect( pexpect.EOF )
pox = pexpect.spawn( '/tmp/pox/pox.py forwarding.l2_learning' )
net = pexpect.spawn(
'mn --controller=remote,ip=127.0.0.1,port=6633 --test pingall' )
net.expect( '0% dropped' )
net.expect( pexpect.EOF )
pox.sendintr()
pox.wait()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"pexpect.spawn",
"distutils.version.StrictVersion",
"os.path.realpath",
"os.path.exists",
"re.findall",
"os.path.normpath",
"mininet.util.quietRun",
"os.path.join"
] |
[((288, 309), 'mininet.util.quietRun', 'quietRun', (['"""tshark -v"""'], {}), "('tshark -v')\n", (296, 309), False, 'from mininet.util import quietRun\n'), ((331, 386), 're.findall', 're.findall', (['"""TShark[^\\\\d]*(\\\\d+.\\\\d+.\\\\d+)"""', 'versionStr'], {}), "('TShark[^\\\\d]*(\\\\d+.\\\\d+.\\\\d+)', versionStr)\n", (341, 386), False, 'import re\n'), ((13245, 13260), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13258, 13260), False, 'import unittest\n'), ((694, 716), 'pexpect.spawn', 'pexpect.spawn', (['"""mn -h"""'], {}), "('mn -h')\n", (707, 716), False, 'import pexpect\n'), ((1237, 1271), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --test pingall"""'], {}), "('mn --test pingall')\n", (1250, 1271), False, 'import pexpect\n'), ((1581, 1600), 'pexpect.spawn', 'pexpect.spawn', (['"""mn"""'], {}), "('mn')\n", (1594, 1600), False, 'import pexpect\n'), ((3000, 3019), 'pexpect.spawn', 'pexpect.spawn', (['"""mn"""'], {}), "('mn')\n", (3013, 3019), False, 'import pexpect\n'), ((4945, 4964), 'pexpect.spawn', 'pexpect.spawn', (['"""mn"""'], {}), "('mn')\n", (4958, 4964), False, 'import pexpect\n'), ((5369, 5388), 'pexpect.spawn', 'pexpect.spawn', (['"""mn"""'], {}), "('mn')\n", (5382, 5388), False, 'import pexpect\n'), ((5889, 5924), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --test pingpair"""'], {}), "('mn --test pingpair')\n", (5902, 5924), False, 'import pexpect\n'), ((6025, 6057), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --test iperf"""'], {}), "('mn --test iperf')\n", (6038, 6057), False, 'import pexpect\n'), ((6350, 6400), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --test pingall --topo single,3"""'], {}), "('mn --test pingall --topo single,3')\n", (6363, 6400), False, 'import pexpect\n'), ((6754, 6804), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --test pingall --topo linear,4"""'], {}), "('mn --test pingall --topo linear,4')\n", (6767, 6804), False, 'import pexpect\n'), ((7200, 7246), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --link tc,bw=10,delay=10ms"""'], {}), "('mn --link tc,bw=10,delay=10ms')\n", (7213, 7246), False, 'import pexpect\n'), ((8085, 8114), 'pexpect.spawn', 'pexpect.spawn', (['"""mn -v output"""'], {}), "('mn -v output')\n", (8098, 8114), False, 'import pexpect\n'), ((8307, 8347), 'pexpect.spawn', 'pexpect.spawn', (['"""mn -v debug --test none"""'], {}), "('mn -v debug --test none')\n", (8320, 8347), False, 'import pexpect\n'), ((8717, 8771), 'os.path.join', 'os.path.join', (['custom', '"""../../custom/topo-2sw-2host.py"""'], {}), "(custom, '../../custom/topo-2sw-2host.py')\n", (8729, 8771), False, 'import os\n'), ((8791, 8815), 'os.path.normpath', 'os.path.normpath', (['custom'], {}), '(custom)\n', (8807, 8815), False, 'import os\n'), ((8830, 8899), 'pexpect.spawn', 'pexpect.spawn', (["('mn --custom %s --topo mytopo --test pingall' % custom)"], {}), "('mn --custom %s --topo mytopo --test pingall' % custom)\n", (8843, 8899), False, 'import pexpect\n'), ((9082, 9107), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --mac"""'], {}), "('mn --mac')\n", (9095, 9107), False, 'import pexpect\n'), ((9884, 9915), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --test none"""'], {}), "('mn --test none')\n", (9897, 9915), False, 'import pexpect\n'), ((10193, 10240), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --innamespace --switch user"""'], {}), "('mn --innamespace --switch user')\n", (10206, 10240), False, 'import pexpect\n'), ((11261, 11280), 'pexpect.spawn', 'pexpect.spawn', (['"""mn"""'], {}), "('mn')\n", (11274, 11280), False, 'import pexpect\n'), ((11941, 11960), 'pexpect.spawn', 'pexpect.spawn', (['"""mn"""'], {}), "('mn')\n", (11954, 11960), False, 'import pexpect\n'), ((12937, 12992), 'pexpect.spawn', 'pexpect.spawn', (['"""/tmp/pox/pox.py forwarding.l2_learning"""'], {}), "('/tmp/pox/pox.py forwarding.l2_learning')\n", (12950, 12992), False, 'import pexpect\n'), ((13009, 13086), 'pexpect.spawn', 'pexpect.spawn', (['"""mn --controller=remote,ip=127.0.0.1,port=6633 --test pingall"""'], {}), "('mn --controller=remote,ip=127.0.0.1,port=6633 --test pingall')\n", (13022, 13086), False, 'import pexpect\n'), ((982, 1005), 'distutils.version.StrictVersion', 'StrictVersion', (['"""1.12.0"""'], {}), "('1.12.0')\n", (995, 1005), False, 'from distutils.version import StrictVersion\n'), ((1030, 1065), 'pexpect.spawn', 'pexpect.spawn', (['"""tshark -i lo -R of"""'], {}), "('tshark -i lo -R of')\n", (1043, 1065), False, 'import pexpect\n'), ((1103, 1147), 'pexpect.spawn', 'pexpect.spawn', (['"""tshark -i lo -Y openflow_v1"""'], {}), "('tshark -i lo -Y openflow_v1')\n", (1116, 1147), False, 'import pexpect\n'), ((8669, 8695), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (8685, 8695), False, 'import os\n'), ((9539, 9588), 'pexpect.spawn', 'pexpect.spawn', (["('mn --switch %s --test iperf' % sw)"], {}), "('mn --switch %s --test iperf' % sw)\n", (9552, 9588), False, 'import pexpect\n'), ((12752, 12778), 'os.path.exists', 'os.path.exists', (['"""/tmp/pox"""'], {}), "('/tmp/pox')\n", (12766, 12778), False, 'import os\n'), ((12798, 12868), 'pexpect.spawn', 'pexpect.spawn', (['"""git clone https://github.com/noxrepo/pox.git /tmp/pox"""'], {}), "('git clone https://github.com/noxrepo/pox.git /tmp/pox')\n", (12811, 12868), False, 'import pexpect\n'), ((12425, 12451), 'os.path.exists', 'os.path.exists', (['"""/tmp/pox"""'], {}), "('/tmp/pox')\n", (12439, 12451), False, 'import os\n'), ((12499, 12531), 'mininet.util.quietRun', 'quietRun', (['"""ping -c 1 github.com"""'], {}), "('ping -c 1 github.com')\n", (12507, 12531), False, 'from mininet.util import quietRun\n')]
|
"""
Advances in Financial Machine Learning, <NAME>
Chapter 2: Financial Data Structures
This module contains the functions to help users create structured financial data from raw unstructured data,
in the form of time, tick, volume, and dollar bars.
These bars are used throughout the text book (Advances in Financial Machine Learning, By <NAME>, 2018,
pg 25) to build the more interesting features for predicting financial time series data.
These financial data structures have better statistical properties when compared to those based on fixed time interval
sampling. A great paper to read more about this is titled: The Volume Clock: Insights into the high frequency paradigm,
Lopez de Prado, et al.
Many of the projects going forward will require Dollar and Volume bars.
"""
# Imports
from typing import Tuple
import numpy as np
import pandas as pd
from mlfinlab.data_structures.base_bars import BaseBars
class StandardBars(BaseBars):
"""
Contains all of the logic to construct the standard bars from chapter 2. This class shouldn't be used directly.
We have added functions to the package such as get_dollar_bars which will create an instance of this
class and then construct the standard bars, to return to the user.
This is because we wanted to simplify the logic as much as possible, for the end user.
"""
def __init__(self, file_path_or_df: Tuple[str, pd.DataFrame], metric: str, threshold: int = 50000,
batch_size: int = 20000000):
BaseBars.__init__(self, file_path_or_df, metric, batch_size)
# Threshold at which to sample
self.threshold = threshold
def _reset_cache(self):
"""
Implementation of abstract method _reset_cache for standard bars
"""
self.open_price = None
self.high_price, self.low_price = -np.inf, np.inf
self.cum_statistics = {'cum_ticks': 0, 'cum_dollar_value': 0, 'cum_volume': 0, 'cum_buy_volume': 0}
def _extract_bars(self, data: pd.DataFrame) -> list:
"""
For loop which compiles the various bars: dollar, volume, or tick.
We did investigate the use of trying to solve this in a vectorised manner but found that a For loop worked well.
:param data: Contains 3 columns - date_time, price, and volume.
"""
# Iterate over rows
list_bars = []
for row in data.values:
# Set variables
date_time = row[0]
self.tick_num += 1
price = np.float(row[1])
volume = row[2]
dollar_value = price * volume
signed_tick = self._apply_tick_rule(price)
if self.open_price is None:
self.open_price = price
# Update high low prices
self.high_price, self.low_price = self._update_high_low(price)
# Calculations
self.cum_statistics['cum_ticks'] += 1
self.cum_statistics['cum_dollar_value'] += dollar_value
self.cum_statistics['cum_volume'] += volume
if signed_tick == 1:
self.cum_statistics['cum_buy_volume'] += volume
# If threshold reached then take a sample
if self.cum_statistics[self.metric] >= self.threshold: # pylint: disable=eval-used
self._create_bars(date_time, price,
self.high_price, self.low_price, list_bars)
# Reset cache
self._reset_cache()
return list_bars
def get_dollar_bars(file_path_or_df: Tuple[str, pd.DataFrame], threshold: float = 70000000, batch_size: int = 20000000,
verbose: bool = True, to_csv: bool = False, output_path: str = None):
"""
Creates the dollar bars: date_time, open, high, low, close, volume, cum_buy_volume, cum_ticks, cum_dollar_value.
Following the paper "The Volume Clock: Insights into the high frequency paradigm" by <NAME>, et al,
it is suggested that using 1/50 of the average daily dollar value, would result in more desirable statistical
properties.
:param file_path_or_df: (str or pd.DataFrame) Path to the csv file or Pandas Data Frame containing raw tick data in the format[date_time, price, volume]
:param threshold: (float) A cumulative value above this threshold triggers a sample to be taken.
:param batch_size: (int) The number of rows per batch. Less RAM = smaller batch size.
:param verbose: (bool) Print out batch numbers (True or False)
:param to_csv: (bool) Save bars to csv after every batch run (True or False)
:param output_path: (str) Path to csv file, if to_csv is True
:return: (pd.DataFrame) Dataframe of dollar bars
"""
bars = StandardBars(file_path_or_df=file_path_or_df, metric='cum_dollar_value', threshold=threshold,
batch_size=batch_size)
dollar_bars = bars.batch_run(verbose=verbose, to_csv=to_csv, output_path=output_path)
return dollar_bars
def get_volume_bars(file_path_or_df: Tuple[str, pd.DataFrame], threshold: float = 70000000, batch_size: int = 20000000,
verbose: bool = True, to_csv: bool = False, output_path: str = None):
"""
Creates the volume bars: date_time, open, high, low, close, volume, cum_buy_volume, cum_ticks, cum_dollar_value.
Following the paper "The Volume Clock: Insights into the high frequency paradigm" by <NAME>, et al,
it is suggested that using 1/50 of the average daily volume, would result in more desirable statistical properties.
:param file_path_or_df: (str or pd.DataFrame) Path to the csv file or Pandas Data Frame containing raw tick data in the format[date_time, price, volume]
:param threshold: (float) A cumulative value above this threshold triggers a sample to be taken.
:param batch_size: (int) The number of rows per batch. Less RAM = smaller batch size.
:param verbose: (bool) Print out batch numbers (True or False)
:param to_csv: (bool) Save bars to csv after every batch run (True or False)
:param output_path: (str) Path to csv file, if to_csv is True
:return: (pd.DataFrame) Dataframe of volume bars
"""
bars = StandardBars(file_path_or_df=file_path_or_df, metric='cum_volume',
threshold=threshold, batch_size=batch_size)
volume_bars = bars.batch_run(verbose=verbose, to_csv=to_csv, output_path=output_path)
return volume_bars
def get_tick_bars(file_path_or_df: Tuple[str, pd.DataFrame], threshold: float = 70000000, batch_size: int = 20000000,
verbose: bool = True, to_csv: bool = False, output_path: str = None):
"""
Creates the tick bars: date_time, open, high, low, close, volume, cum_buy_volume, cum_ticks, cum_dollar_value.
:param file_path_or_df: (str or pd.DataFrame) Path to the csv file or Pandas Data Frame containing raw tick data in the format[date_time, price, volume]
:param threshold: (float) A cumulative value above this threshold triggers a sample to be taken.
:param batch_size: (int) The number of rows per batch. Less RAM = smaller batch size.
:param verbose: (bool) Print out batch numbers (True or False)
:param to_csv: (bool) Save bars to csv after every batch run (True or False)
:param output_path: (str) Path to csv file, if to_csv is True
:return: (pd.DataFrame) Dataframe of volume bars
"""
bars = StandardBars(file_path_or_df=file_path_or_df, metric='cum_ticks',
threshold=threshold, batch_size=batch_size)
tick_bars = bars.batch_run(verbose=verbose, to_csv=to_csv, output_path=output_path)
return tick_bars
|
[
"numpy.float",
"mlfinlab.data_structures.base_bars.BaseBars.__init__"
] |
[((1508, 1568), 'mlfinlab.data_structures.base_bars.BaseBars.__init__', 'BaseBars.__init__', (['self', 'file_path_or_df', 'metric', 'batch_size'], {}), '(self, file_path_or_df, metric, batch_size)\n', (1525, 1568), False, 'from mlfinlab.data_structures.base_bars import BaseBars\n'), ((2513, 2529), 'numpy.float', 'np.float', (['row[1]'], {}), '(row[1])\n', (2521, 2529), True, 'import numpy as np\n')]
|
# Ignoring some linting rules in tests
# pylint: disable=redefined-outer-name
# pylint: disable=missing-docstring
import pytest
import logging
from bingo.util import log
@pytest.mark.parametrize("verbosity, expected_level",
[("debug", 10),
("detailed", log.DETAILED_INFO),
("standard", log.INFO),
("quiet", 30),
(31, 31),
(0.5, 25)])
@pytest.mark.filterwarnings("ignore:Unrecognized verbosity level provided. "
"Using standard verbosity.")
def test_configure_logging_verbosity(verbosity, expected_level, mocker):
mocker.patch('logging.Logger.setLevel')
log.configure_logging(verbosity)
logging.Logger.setLevel.assert_called_with(expected_level)
@pytest.mark.parametrize("module", [True, False])
@pytest.mark.parametrize("timestamp", [True, False])
def test_configure_logging_formatting(module, timestamp, mocker):
mocker.patch('logging.StreamHandler.setFormatter')
log.configure_logging(module=module, timestamp=timestamp)
positional_args, _ = logging.StreamHandler.setFormatter.call_args
formatter = positional_args[0]
assert ("module" in formatter._fmt) == module
assert ("asctime" in formatter._fmt) == timestamp
def test_configure_logging_makes_console_handler(mocker):
mocker.patch('logging.Logger.addHandler')
log.configure_logging()
positional_args, _ = logging.Logger.addHandler.call_args
assert isinstance(positional_args[0], logging.StreamHandler)
def test_console_handler_gets_two_filters(mocker):
mocker.patch('logging.StreamHandler.addFilter')
log.configure_logging()
calls = logging.StreamHandler.addFilter.call_args_list
filters = [positional_args[0] for positional_args, _ in calls]
assert isinstance(filters[0], log.StatsFilter)
assert isinstance(filters[1], log.MpiFilter)
def test_configure_logging_makes_stats_file_handler(mocker):
mocker.patch('logging.Logger.addHandler')
log.configure_logging(stats_file="test.log")
positional_args, _ = logging.Logger.addHandler.call_args
assert isinstance(positional_args[0], logging.FileHandler)
def test_stats_file_handler_gets_two_filters(mocker):
mocker.patch('logging.FileHandler.addFilter')
log.configure_logging(stats_file="test.log")
calls = logging.FileHandler.addFilter.call_args_list
filters = [positional_args[0] for positional_args, _ in calls]
assert isinstance(filters[0], log.StatsFilter)
assert isinstance(filters[1], log.MpiFilter)
@pytest.mark.parametrize("level, mpi_on, mpi_rank, expected_filter",
[(log.INFO, True, 0, True),
(log.INFO, True, 1, False),
(log.INFO, False, None, True),
(log.DETAILED_INFO, True, 0, True),
(log.DETAILED_INFO, True, 1, True),
(log.DETAILED_INFO, False, None, True)])
def test_mpi_filtering(level, mpi_on, mpi_rank, expected_filter, mocker):
log.USING_MPI = mpi_on
log.MPIRANK = mpi_rank
record = mocker.Mock()
record.levelno = level
record.msg = ""
mpi_filter = log.MpiFilter()
assert mpi_filter.filter(record) == expected_filter
@pytest.mark.parametrize("add_proc_num", [True, False])
@pytest.mark.parametrize("mpi_on", [True, False])
def test_mpi_filter_adds_proc_num(add_proc_num, mpi_on, mocker):
log.USING_MPI = mpi_on
log.MPIRANK = 0
record = mocker.Mock()
record.levelno = log.DETAILED_INFO
record.msg = ""
mpi_filter = log.MpiFilter(add_proc_num)
proc_num_expected = add_proc_num and mpi_on
_ = mpi_filter.filter(record)
assert ("0>" in record.msg) == proc_num_expected
@pytest.mark.parametrize("filter_out", [True, False])
@pytest.mark.parametrize("stats_extra", [True, False, None])
def test_mpi_filter_adds_proc_num(filter_out, stats_extra, mocker):
record = mocker.Mock()
if stats_extra is not None:
record.stats = stats_extra
stats_filter = log.StatsFilter(filter_out)
if stats_extra is None:
expected_filter = filter_out
else:
expected_filter = filter_out != stats_extra
assert stats_filter.filter(record) == expected_filter
|
[
"bingo.util.log.MpiFilter",
"pytest.mark.filterwarnings",
"bingo.util.log.configure_logging",
"bingo.util.log.StatsFilter",
"logging.Logger.setLevel.assert_called_with",
"pytest.mark.parametrize"
] |
[((174, 345), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""verbosity, expected_level"""', "[('debug', 10), ('detailed', log.DETAILED_INFO), ('standard', log.INFO), (\n 'quiet', 30), (31, 31), (0.5, 25)]"], {}), "('verbosity, expected_level', [('debug', 10), (\n 'detailed', log.DETAILED_INFO), ('standard', log.INFO), ('quiet', 30),\n (31, 31), (0.5, 25)])\n", (197, 345), False, 'import pytest\n'), ((493, 599), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Unrecognized verbosity level provided. Using standard verbosity."""'], {}), "(\n 'ignore:Unrecognized verbosity level provided. Using standard verbosity.')\n", (519, 599), False, 'import pytest\n'), ((846, 894), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""module"""', '[True, False]'], {}), "('module', [True, False])\n", (869, 894), False, 'import pytest\n'), ((896, 947), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""timestamp"""', '[True, False]'], {}), "('timestamp', [True, False])\n", (919, 947), False, 'import pytest\n'), ((2623, 2904), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""level, mpi_on, mpi_rank, expected_filter"""', '[(log.INFO, True, 0, True), (log.INFO, True, 1, False), (log.INFO, False,\n None, True), (log.DETAILED_INFO, True, 0, True), (log.DETAILED_INFO, \n True, 1, True), (log.DETAILED_INFO, False, None, True)]'], {}), "('level, mpi_on, mpi_rank, expected_filter', [(log.\n INFO, True, 0, True), (log.INFO, True, 1, False), (log.INFO, False,\n None, True), (log.DETAILED_INFO, True, 0, True), (log.DETAILED_INFO, \n True, 1, True), (log.DETAILED_INFO, False, None, True)])\n", (2646, 2904), False, 'import pytest\n'), ((3341, 3395), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""add_proc_num"""', '[True, False]'], {}), "('add_proc_num', [True, False])\n", (3364, 3395), False, 'import pytest\n'), ((3397, 3445), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mpi_on"""', '[True, False]'], {}), "('mpi_on', [True, False])\n", (3420, 3445), False, 'import pytest\n'), ((3828, 3880), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""filter_out"""', '[True, False]'], {}), "('filter_out', [True, False])\n", (3851, 3880), False, 'import pytest\n'), ((3882, 3941), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stats_extra"""', '[True, False, None]'], {}), "('stats_extra', [True, False, None])\n", (3905, 3941), False, 'import pytest\n'), ((747, 779), 'bingo.util.log.configure_logging', 'log.configure_logging', (['verbosity'], {}), '(verbosity)\n', (768, 779), False, 'from bingo.util import log\n'), ((784, 842), 'logging.Logger.setLevel.assert_called_with', 'logging.Logger.setLevel.assert_called_with', (['expected_level'], {}), '(expected_level)\n', (826, 842), False, 'import logging\n'), ((1073, 1130), 'bingo.util.log.configure_logging', 'log.configure_logging', ([], {'module': 'module', 'timestamp': 'timestamp'}), '(module=module, timestamp=timestamp)\n', (1094, 1130), False, 'from bingo.util import log\n'), ((1450, 1473), 'bingo.util.log.configure_logging', 'log.configure_logging', ([], {}), '()\n', (1471, 1473), False, 'from bingo.util import log\n'), ((1709, 1732), 'bingo.util.log.configure_logging', 'log.configure_logging', ([], {}), '()\n', (1730, 1732), False, 'from bingo.util import log\n'), ((2072, 2116), 'bingo.util.log.configure_logging', 'log.configure_logging', ([], {'stats_file': '"""test.log"""'}), "(stats_file='test.log')\n", (2093, 2116), False, 'from bingo.util import log\n'), ((2351, 2395), 'bingo.util.log.configure_logging', 'log.configure_logging', ([], {'stats_file': '"""test.log"""'}), "(stats_file='test.log')\n", (2372, 2395), False, 'from bingo.util import log\n'), ((3266, 3281), 'bingo.util.log.MpiFilter', 'log.MpiFilter', ([], {}), '()\n', (3279, 3281), False, 'from bingo.util import log\n'), ((3662, 3689), 'bingo.util.log.MpiFilter', 'log.MpiFilter', (['add_proc_num'], {}), '(add_proc_num)\n', (3675, 3689), False, 'from bingo.util import log\n'), ((4124, 4151), 'bingo.util.log.StatsFilter', 'log.StatsFilter', (['filter_out'], {}), '(filter_out)\n', (4139, 4151), False, 'from bingo.util import log\n')]
|
# -*- coding:utf-8 -*-
import re
import json
import requests
"""
目标APP:比心陪练APP
目标url:APP短视频分享链接
爬取思路:
1. 通过APP里的分享获取视频url,获取其timelineId
2. 对https://h5.hibixin.com/capi/bixin/timeline/shareTimeline发送post请求,获取json数据
"""
class BiXin(object):
def __init__(self, url):
self.url = url
self.session = requests.Session()
def get_video(self):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36",
"Host": "h5.hibixin.com",
"Origin": "https://h5.hibixin.com",
"Content-Type": "application/json"
}
pattern = re.compile("dynamic_id=(\w+)", re.S)
dynamic_id = re.findall(pattern, str(self.url).strip())[0]
try:
# 用户的单个视频
base_url = "https://h5.hibixin.com/capi/bixin/timeline/shareTimeline"
data = {
"timelineId": dynamic_id
}
response = self.session.post(url=base_url, headers=headers, data=json.dumps(data), timeout=10)
if response.status_code == 200:
doc = response.json()
title = doc["result"]["timelineInfo"]["content"]
cover = doc["result"]["timelineInfo"]["videoInfoDTO"]["videoFirstImg"]
video = doc["result"]["timelineInfo"]["videoInfoDTO"]["videoUrl"]
info = {
"title": title,
"cover": cover,
"video": video
}
return json.dumps(info, ensure_ascii=False)
else:
return json.dumps({"info": "暂无相关数据,请检查相关数据:"}, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
if __name__ == '__main__':
bi_xin = BiXin("https://h5.hibixin.com/bixin/web-share/index?refer_page=ExploreDynamicDetailPage"
"&refer_share_channel=qqFriends#/?dynamic_id=1011146143398583404")
print(bi_xin.get_video())
|
[
"requests.Session",
"json.dumps",
"re.compile"
] |
[((325, 343), 'requests.Session', 'requests.Session', ([], {}), '()\n', (341, 343), False, 'import requests\n'), ((697, 734), 're.compile', 're.compile', (['"""dynamic_id=(\\\\w+)"""', 're.S'], {}), "('dynamic_id=(\\\\w+)', re.S)\n", (707, 734), False, 'import re\n'), ((1592, 1628), 'json.dumps', 'json.dumps', (['info'], {'ensure_ascii': '(False)'}), '(info, ensure_ascii=False)\n', (1602, 1628), False, 'import json\n'), ((1670, 1729), 'json.dumps', 'json.dumps', (["{'info': '暂无相关数据,请检查相关数据:'}"], {'ensure_ascii': '(False)'}), "({'info': '暂无相关数据,请检查相关数据:'}, ensure_ascii=False)\n", (1680, 1729), False, 'import json\n'), ((1073, 1089), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1083, 1089), False, 'import json\n')]
|
from get_adelphi_info import AdelphiInfo
import json
create_json = AdelphiInfo()
filename = "tmp/adelphi_calendar.json"
calendar_info_dict = dict()
with open(filename) as f:
calendar_info_dict = json.load(f)
|
[
"json.load",
"get_adelphi_info.AdelphiInfo"
] |
[((67, 80), 'get_adelphi_info.AdelphiInfo', 'AdelphiInfo', ([], {}), '()\n', (78, 80), False, 'from get_adelphi_info import AdelphiInfo\n'), ((201, 213), 'json.load', 'json.load', (['f'], {}), '(f)\n', (210, 213), False, 'import json\n')]
|
# Generated by Django 2.0.4 on 2018-04-10 16:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0005_auto_20180410_1458'),
]
operations = [
migrations.AddField(
model_name='account',
name='last_modified',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='account',
name='origin',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='account',
name='slug',
field=models.SlugField(blank=True, max_length=150),
),
]
|
[
"django.db.models.DateTimeField",
"django.db.models.SlugField"
] |
[((342, 388), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (362, 388), False, 'from django.db import migrations, models\n'), ((509, 559), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (529, 559), False, 'from django.db import migrations, models\n'), ((678, 722), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)', 'max_length': '(150)'}), '(blank=True, max_length=150)\n', (694, 722), False, 'from django.db import migrations, models\n')]
|