code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#
# Copyright 2017 Human Longevity, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from disdat.pipe import PipeTask
import disdat.api as api
import pandas as pd
"""
DF Duplicate Example
Double the size of an input dataframe or dictionary by replicating its rows.
Note, this pipeline has no upstream dependencies.
This examples shows:
1.) A simple single upstream dependency
2.) How to return a dataframe in 'DataMaker' and how DFDup reads it.
Pre Execution:
$export PYTHONPATH=$DISDAT_HOME/disdat/examples/pipelines
$dsdt context examples; dsdt switch examples
Execution:
$python ./df_dup.py
or:
$dsdt apply - - df_dup.DFDup
"""
class DataMaker(PipeTask):
def pipe_run(self):
data = pd.DataFrame({'heart_rate': [60, 70, 100, 55], 'age': [30, 44, 18, 77]})
return data
class DFDup(PipeTask):
def pipe_requires(self):
self.add_dependency('example_data', DataMaker, {})
def pipe_run(self, example_data=None):
"""
Doubles data in a dataframe or dictionary and writes to the output
Args:
pipeline_input: The user's input
example_data: Data if the user doesn't give us anything
"""
pipeline_input = example_data
if isinstance(pipeline_input, dict):
pipeline_input.update({"{}_copy".format(k): v for k, v in pipeline_input.items()})
output = pipeline_input
elif isinstance(pipeline_input, pd.DataFrame):
output = pd.concat([pipeline_input, pipeline_input], axis=0)
else:
print ("Copy Task requires an input DataFrame or an input dictionary, not {}".format(type(pipeline_input)))
output = None
return output
if __name__ == "__main__":
api.apply('examples', 'DFDup', params={})
|
[
"pandas.DataFrame",
"disdat.api.apply",
"pandas.concat"
] |
[((2249, 2290), 'disdat.api.apply', 'api.apply', (['"""examples"""', '"""DFDup"""'], {'params': '{}'}), "('examples', 'DFDup', params={})\n", (2258, 2290), True, 'import disdat.api as api\n'), ((1210, 1282), 'pandas.DataFrame', 'pd.DataFrame', (["{'heart_rate': [60, 70, 100, 55], 'age': [30, 44, 18, 77]}"], {}), "({'heart_rate': [60, 70, 100, 55], 'age': [30, 44, 18, 77]})\n", (1222, 1282), True, 'import pandas as pd\n'), ((1981, 2032), 'pandas.concat', 'pd.concat', (['[pipeline_input, pipeline_input]'], {'axis': '(0)'}), '([pipeline_input, pipeline_input], axis=0)\n', (1990, 2032), True, 'import pandas as pd\n')]
|
import sys
#print(sys.path)
sys.path.append('/home/pi/.local/lib/python3.7/site-packages')
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from keras.models import load_model
model = load_model('chatbot_model4.h5')
import json
import random
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))
from nlip2 import name
def clean_up_sentence(sentence):
# tokenize the pattern - split words into array
sentence_words = nltk.word_tokenize(sentence)
# stem each word - create short form for word
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
m=[]
k=0
for j in res:
# print(j)
m.append({'intent':k,'prob':j})
k=k+1
o=0
for j in m:
print(j['intent'],j['prob'])
if j['prob'] > o :
o=j['prob']
l=j['intent']
print(o,l)
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
return_list.append({"intent": classes[l], "probability": str(o)})
return return_list,o
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def chatbot_response(text):
ints,o= predict_class(text, model)
i=0
for j in ints:
if j['intent'] =="goodbye":
i=1
res = getResponse(ints, intents)
return res,i,o
from keras.models import load_model
#tezt="are you hungry now"
#k=clean_up_sentence(tezt)
#print(k)
#s=bow(tezt,k)
#print(s)
#p=predict_class(tezt, model)
#print(p)
while True:
tezt=input("user:")
k,s,o=chatbot_response(tezt)
if k=="":
print("your name")
k=name(tezt)
k="nice to meet you "+k
if o < 0.68:
print("browser getting activated")
print("bot:",k)
if s==1:
break
|
[
"sys.path.append",
"keras.models.load_model",
"nltk.stem.WordNetLemmatizer",
"random.choice",
"numpy.array",
"nlip2.name",
"nltk.word_tokenize"
] |
[((30, 92), 'sys.path.append', 'sys.path.append', (['"""/home/pi/.local/lib/python3.7/site-packages"""'], {}), "('/home/pi/.local/lib/python3.7/site-packages')\n", (45, 92), False, 'import sys\n'), ((161, 180), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (178, 180), False, 'from nltk.stem import WordNetLemmatizer\n'), ((262, 293), 'keras.models.load_model', 'load_model', (['"""chatbot_model4.h5"""'], {}), "('chatbot_model4.h5')\n", (272, 293), False, 'from keras.models import load_model\n'), ((602, 630), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (620, 630), False, 'import nltk\n'), ((1388, 1401), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (1396, 1401), True, 'import numpy as np\n'), ((2951, 2961), 'nlip2.name', 'name', (['tezt'], {}), '(tezt)\n', (2955, 2961), False, 'from nlip2 import name\n'), ((1562, 1575), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (1570, 1575), True, 'import numpy as np\n'), ((2367, 2396), 'random.choice', 'random.choice', (["i['responses']"], {}), "(i['responses'])\n", (2380, 2396), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
from dao import db, Base
class ItemLista(Base):
__tablename__ = 'itenslistas'
lista_id = db.Column(db.Integer, db.ForeignKey('listas.id'), primary_key=True)
item_id = db.Column(db.Integer, db.ForeignKey('itens.id'), primary_key=True)
preco = db.Column(db.String(100))
item = db.relationship("ItemModel", back_populates="listas", uselist=False)
lista = db.relationship("ListaModel", back_populates="itens", uselist=False)
def __init__(self, preco):
self.preco = preco
|
[
"dao.db.String",
"dao.db.relationship",
"dao.db.ForeignKey"
] |
[((321, 389), 'dao.db.relationship', 'db.relationship', (['"""ItemModel"""'], {'back_populates': '"""listas"""', 'uselist': '(False)'}), "('ItemModel', back_populates='listas', uselist=False)\n", (336, 389), False, 'from dao import db, Base\n'), ((402, 470), 'dao.db.relationship', 'db.relationship', (['"""ListaModel"""'], {'back_populates': '"""itens"""', 'uselist': '(False)'}), "('ListaModel', back_populates='itens', uselist=False)\n", (417, 470), False, 'from dao import db, Base\n'), ((145, 171), 'dao.db.ForeignKey', 'db.ForeignKey', (['"""listas.id"""'], {}), "('listas.id')\n", (158, 171), False, 'from dao import db, Base\n'), ((227, 252), 'dao.db.ForeignKey', 'db.ForeignKey', (['"""itens.id"""'], {}), "('itens.id')\n", (240, 252), False, 'from dao import db, Base\n'), ((294, 308), 'dao.db.String', 'db.String', (['(100)'], {}), '(100)\n', (303, 308), False, 'from dao import db, Base\n')]
|
from django.http import HttpResponse, HttpRequest, JsonResponse
from rest_framework.decorators import api_view
from rest_framework.request import Request
@api_view(["POST"])
def success_view(request: Request) -> HttpResponse:
return JsonResponse({"status": "success", "body": request.data.get("field")})
def server_error_view(request: HttpRequest) -> HttpResponse:
return HttpResponse("Internal server error.", status=500)
|
[
"rest_framework.decorators.api_view",
"django.http.HttpResponse"
] |
[((157, 175), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (165, 175), False, 'from rest_framework.decorators import api_view\n'), ((384, 434), 'django.http.HttpResponse', 'HttpResponse', (['"""Internal server error."""'], {'status': '(500)'}), "('Internal server error.', status=500)\n", (396, 434), False, 'from django.http import HttpResponse, HttpRequest, JsonResponse\n')]
|
from django import template
register = template.Library()
def get_parent_geo(geo_levels, geo):
"""
only return the parent geo for a particular geography
"""
compare_level = []
for level in geo["parents"]:
compare_level.append(level)
return compare_level[:2]
register.filter("parent_geo", get_parent_geo)
|
[
"django.template.Library"
] |
[((40, 58), 'django.template.Library', 'template.Library', ([], {}), '()\n', (56, 58), False, 'from django import template\n')]
|
import dill
import numpy as np
import tensorflow as tf
from collections import defaultdict
from sklearn.model_selection import train_test_split
with open('motion_capture_20181011-1931.dill', 'rb') as f:
x = dill.load(f)
vec = [l[4] for l in x]
# print(len(vec))
x = map(str, vec)
x = list(x)
#X_train, X_test = train_test_split(x, test_size=0.33, shuffle=False)
corpus = [x]
#restore model for testing
sess = tf.Session()
new_saver = tf.train.import_meta_graph('model.ckpt.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('./'))
all_vars = tf.get_collection('vars')
for v in all_vars:
w1 = sess.run(v)
print(w1)
#generate data for testing
word_counts = defaultdict(int)
for row in corpus:
for word in row:
word_counts[word] += 1
v_count = len(word_counts.keys())
# GENERATE LOOKUP DICTIONARIES
words_list = sorted(list(word_counts.keys()), reverse=False)
word_index = dict((word, i) for i, word in enumerate(words_list))
index_word = dict((i, word) for i, word in enumerate(words_list))
def vec_sim(vec, top_n):
# CYCLE THROUGH VOCAB
word_sim = {}
output = []
for i in range(v_count):
v_w2 = w1[i]
theta_num = np.dot(vec, v_w2)
theta_den = np.linalg.norm(vec) * np.linalg.norm(v_w2)
theta = theta_num / theta_den
word = index_word[i]
word_sim[word] = theta
words_sorted = sorted(word_sim.items(), reverse=True)
# words_sorted = sorted(word_sim.items(), key=lambda word, sim: sim, reverse=True)
for word, sim in words_sorted[:top_n]:
print('vec_sim', word, sim)
output.append(word)
output.append(sim)
return output
corpus = [(1,1)]
output = vec_sim(corpus,1)
print(output)
|
[
"tensorflow.train.import_meta_graph",
"tensorflow.get_collection",
"tensorflow.Session",
"dill.load",
"collections.defaultdict",
"tensorflow.train.latest_checkpoint",
"numpy.linalg.norm",
"numpy.dot"
] |
[((420, 432), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (430, 432), True, 'import tensorflow as tf\n'), ((445, 490), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""model.ckpt.meta"""'], {}), "('model.ckpt.meta')\n", (471, 490), True, 'import tensorflow as tf\n'), ((560, 585), 'tensorflow.get_collection', 'tf.get_collection', (['"""vars"""'], {}), "('vars')\n", (577, 585), True, 'import tensorflow as tf\n'), ((682, 698), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (693, 698), False, 'from collections import defaultdict\n'), ((214, 226), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (223, 226), False, 'import dill\n'), ((515, 547), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""./"""'], {}), "('./')\n", (541, 547), True, 'import tensorflow as tf\n'), ((1187, 1204), 'numpy.dot', 'np.dot', (['vec', 'v_w2'], {}), '(vec, v_w2)\n', (1193, 1204), True, 'import numpy as np\n'), ((1225, 1244), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (1239, 1244), True, 'import numpy as np\n'), ((1247, 1267), 'numpy.linalg.norm', 'np.linalg.norm', (['v_w2'], {}), '(v_w2)\n', (1261, 1267), True, 'import numpy as np\n')]
|
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
# create ear and mouth
ear = Runtime.createAndStart("ear","Sphinx")
mouth = Runtime.createAndStart("mouth","Speech")
mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Heather&txt=")
gender = 1
# start listening for the words we are interested in
ear.startListening("hello | forward | back | go |turn left | turn right | male voice | female voice")
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", python.getName(), "heard");
# this method is invoked when something is
# recognized by the ear - in this case we
# have the mouth "talk back" the word it recognized
def heard():
data = msg_ear_recognized.data[0]
if (data == "male voice"):
mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Rod&txt=")
global gender
gender = 0
mouth.speak("i am a man now")
elif (data == "female voice"):
mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Heather&txt=")
global gender
gender = 1
mouth.speak("i am a women now")
elif (data == "hello"):
if gender == 0 :
mouth.speak("Hello")
elif gender == 1 :
mouth.speak("Hello.")
elif (data == "forward"):
if gender == 0 :
mouth.speak("forward")
elif gender == 1 :
mouth.speak("forward.")
elif (data == "back"):
if gender == 0 :
mouth.speak("back")
elif gender == 1:
mouth.speak("back.")
elif (data == "go"):
if gender == 0 :
mouth.speak("go")
elif gender == 1 :
mouth.speak("go.")
elif (data == "turn left"):
if gender == 0 :
mouth.speak("turn left")
elif gender == 1 :
mouth.speak("turn left.")
elif (data == "turn right"):
if gender == 0 :
mouth.speak("turn right")
elif gender == 1 :
mouth.speak("turn right.")
# prevent infinite loop - this will suppress the
# recognition when speaking - default behavior
# when attaching an ear to a mouth :)
ear.attach("mouth")
|
[
"org.myrobotlab.service.Runtime.createAndStart"
] |
[((188, 227), 'org.myrobotlab.service.Runtime.createAndStart', 'Runtime.createAndStart', (['"""ear"""', '"""Sphinx"""'], {}), "('ear', 'Sphinx')\n", (210, 227), False, 'from org.myrobotlab.service import Runtime\n'), ((235, 276), 'org.myrobotlab.service.Runtime.createAndStart', 'Runtime.createAndStart', (['"""mouth"""', '"""Speech"""'], {}), "('mouth', 'Speech')\n", (257, 276), False, 'from org.myrobotlab.service import Runtime\n')]
|
# encoding: utf-8
# !/usr/bin/python
from redis import Redis
from functools import wraps
from flask import session, g, make_response, Blueprint, jsonify, request, redirect
from flask_login import LoginManager, UserMixin, login_required
from Tools.Mysql_db import DB
from Function.Common import *
from dms.utils.manager import Explorer
__author__ = 'zhouheng'
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
db = DB()
ip = IPManager()
# control = ControlManager()
# my_email = EmailManager(conf_dir)
redis = Redis(host=redis_host, port=redis_port)
# job_store = SQLAlchemyJobStore(url=db.url)
# dms_scheduler.add_jobstore(job_store)
class User(UserMixin):
user_name = ""
def get_id(self):
return self.user_name
login_manager = LoginManager()
# login_manager.session_protection = 'strong'
@login_manager.user_loader
def load_user(user_name):
user = User()
user.user_name = user_name
if "policies" not in session:
session["policies"] = dict()
user.policies = session["policies"]
if "role" not in session:
session["role"] = 0
user.role = session["role"]
return user
login_manager.login_view = "dms_view.index"
web_prefix = web_prefix_url
config_url_prefix = web_prefix + '/config'
api_url_prefix = web_prefix + "/dev/api"
status_url_prefix = web_prefix + "/dev/api/status"
test_url_prefix = web_prefix + "/dev/api/test"
bug_url_prefix = web_prefix + "/dev/problem"
right_url_prefix = web_prefix + "/dev/right"
param_url_prefix = web_prefix + "/dev/param"
dev_url_prefix = web_prefix + "/dev"
dms_url_prefix = web_prefix + ""
data_url_prefix = web_prefix + "/data"
log_url_prefix = web_prefix + "/log"
tools_url_prefix = web_prefix + "/tools"
release_url_prefix = web_prefix + "/dev/release"
dyups_url_prefix = web_prefix + "/dev/dyups"
github_url_prefix = web_prefix + "/github"
chat_url_prefix = web_prefix + "/chat"
others_url_prefix = web_prefix + "/others"
pay_url_prefix = web_prefix + "/wx/pay"
jingdu_url_prefix = web_prefix + "/jd"
editor_url_prefix = web_prefix + "/editor"
article_url_prefix = web_prefix + "/article"
message_url_prefix = web_prefix + "/message"
short_link_prefix = web_prefix + "/s"
dist_key_prefix = web_prefix + "/dist/key"
performance_prefix = web_prefix + "/performance"
data_dir = "/geneac/dmsdata"
editor_data_dir = data_dir + "/editor"
article_data_dir = data_dir + "/article"
# if os.path.isdir(article_data_dir) is False:
# os.mkdir(article_data_dir)
import os
# if os.path.isdir(data_dir) is False:
# os.mkdir(data_dir)
# if os.path.isdir(editor_data_dir) is False:
# os.mkdir(editor_data_dir)
def company_ip_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "request_IP" not in g:
return make_response(u"因为一些原因页面丢失了", 404)
if g.request_IP not in range(company_ips[0], company_ips[1]) and g.user_name != "zh_test":
return make_response(u"因为一些原因页面不知道去哪了", 404)
return f(*args, **kwargs)
return decorated_function
blues = {}
dms_job = []
explorer = Explorer.get_instance()
def create_blue(blue_name, url_prefix="/", auth_required=True, special_protocol=False, **kwargs):
required_resource = kwargs.pop('required_resource', None)
add_blue = Blueprint(blue_name, __name__)
if auth_required:
@add_blue.before_request
@login_required
def before_request():
if required_resource:
for rr in required_resource:
if rr in explorer.missing_config:
redirect_url = "/config?keys=%s" % \
",".join(explorer.missing_config[rr])
return redirect(redirect_url)
if special_protocol is True:
r_protocol = request.headers.get("X-Request-Protocol", "http")
if r_protocol not in request_special_protocol:
redirect_url = "%s://%s%s" % (request_special_protocol[0], request.host, request.full_path)
return redirect(redirect_url)
# g.role_value = control.role_value
@add_blue.route("/ping/", methods=["GET"])
def ping():
from time import sleep
sleep(5)
return jsonify({"status": True, "message": "ping %s success" % request.path})
if blue_name not in blues:
blues[blue_name] = [add_blue, url_prefix]
return add_blue
# @login_manager.unauthorized_callback
# def unauthorized_callback_func():
# if request.is_xhr:
# return make_response("登录状态已过期,需要重新登录", 302)
|
[
"redis.Redis",
"flask.Blueprint",
"flask.redirect",
"flask.request.headers.get",
"Tools.Mysql_db.DB",
"time.sleep",
"dms.utils.manager.Explorer.get_instance",
"flask.jsonify",
"functools.wraps",
"flask.make_response",
"flask_login.LoginManager"
] |
[((404, 408), 'Tools.Mysql_db.DB', 'DB', ([], {}), '()\n', (406, 408), False, 'from Tools.Mysql_db import DB\n'), ((500, 539), 'redis.Redis', 'Redis', ([], {'host': 'redis_host', 'port': 'redis_port'}), '(host=redis_host, port=redis_port)\n', (505, 539), False, 'from redis import Redis\n'), ((740, 754), 'flask_login.LoginManager', 'LoginManager', ([], {}), '()\n', (752, 754), False, 'from flask_login import LoginManager, UserMixin, login_required\n'), ((3042, 3065), 'dms.utils.manager.Explorer.get_instance', 'Explorer.get_instance', ([], {}), '()\n', (3063, 3065), False, 'from dms.utils.manager import Explorer\n'), ((2642, 2650), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (2647, 2650), False, 'from functools import wraps\n'), ((3243, 3273), 'flask.Blueprint', 'Blueprint', (['blue_name', '__name__'], {}), '(blue_name, __name__)\n', (3252, 3273), False, 'from flask import session, g, make_response, Blueprint, jsonify, request, redirect\n'), ((4205, 4213), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (4210, 4213), False, 'from time import sleep\n'), ((4229, 4299), 'flask.jsonify', 'jsonify', (["{'status': True, 'message': 'ping %s success' % request.path}"], {}), "({'status': True, 'message': 'ping %s success' % request.path})\n", (4236, 4299), False, 'from flask import session, g, make_response, Blueprint, jsonify, request, redirect\n'), ((2749, 2783), 'flask.make_response', 'make_response', (['u"""因为一些原因页面丢失了"""', '(404)'], {}), "(u'因为一些原因页面丢失了', 404)\n", (2762, 2783), False, 'from flask import session, g, make_response, Blueprint, jsonify, request, redirect\n'), ((2902, 2939), 'flask.make_response', 'make_response', (['u"""因为一些原因页面不知道去哪了"""', '(404)'], {}), "(u'因为一些原因页面不知道去哪了', 404)\n", (2915, 2939), False, 'from flask import session, g, make_response, Blueprint, jsonify, request, redirect\n'), ((3778, 3827), 'flask.request.headers.get', 'request.headers.get', (['"""X-Request-Protocol"""', '"""http"""'], {}), "('X-Request-Protocol', 'http')\n", (3797, 3827), False, 'from flask import session, g, make_response, Blueprint, jsonify, request, redirect\n'), ((4030, 4052), 'flask.redirect', 'redirect', (['redirect_url'], {}), '(redirect_url)\n', (4038, 4052), False, 'from flask import session, g, make_response, Blueprint, jsonify, request, redirect\n'), ((3685, 3707), 'flask.redirect', 'redirect', (['redirect_url'], {}), '(redirect_url)\n', (3693, 3707), False, 'from flask import session, g, make_response, Blueprint, jsonify, request, redirect\n')]
|
import pytest
import Levenshtein as lev
from ..comp import cmp_titles
from ..helpers import std
from ..config import *
#test linking by titles
@pytest.mark.parametrize(
"titles1, titles2, output",
[
(
[
"Resident Evil 2",
"Biohazard 2"
],
[
"Resident Evil 2",
"RE2"
], 1),
(
[
"Resident Evil 2",
"Biohazard 2"
],
[
"Resident Evil",
"RE"
], 1 - NUMBERING_WEIGHT),
(
[
"FIFA 2015"
],
[
"Fifa '16",
"Fifa football 2016"
], 1 - NUMBERING_WEIGHT),
(
[
"Resident Evil 2",
],
[
"Resident Evil II",
], 1),
],
)
def test_linking_by_titles(titles1, titles2, output):
assert cmp_titles(titles1, titles2) == output
|
[
"pytest.mark.parametrize"
] |
[((145, 492), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""titles1, titles2, output"""', '[([\'Resident Evil 2\', \'Biohazard 2\'], [\'Resident Evil 2\', \'RE2\'], 1), ([\n \'Resident Evil 2\', \'Biohazard 2\'], [\'Resident Evil\', \'RE\'], 1 -\n NUMBERING_WEIGHT), ([\'FIFA 2015\'], ["Fifa \'16", \'Fifa football 2016\'], \n 1 - NUMBERING_WEIGHT), ([\'Resident Evil 2\'], [\'Resident Evil II\'], 1)]'], {}), '(\'titles1, titles2, output\', [([\'Resident Evil 2\',\n \'Biohazard 2\'], [\'Resident Evil 2\', \'RE2\'], 1), ([\'Resident Evil 2\',\n \'Biohazard 2\'], [\'Resident Evil\', \'RE\'], 1 - NUMBERING_WEIGHT), ([\n \'FIFA 2015\'], ["Fifa \'16", \'Fifa football 2016\'], 1 - NUMBERING_WEIGHT),\n ([\'Resident Evil 2\'], [\'Resident Evil II\'], 1)])\n', (168, 492), False, 'import pytest\n')]
|
""" Holds the Data class """
import tensorflow as tf
import rnn
class Data:
""" Train holds functions responsible for producing training examples from Shakespearian text """
@staticmethod
def get_sequences():
"""
Returns batch sequences of the training text
:return: [sequences]
"""
seq_length = 100
text_as_int = rnn.Vectorize.get_text_as_int()
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
return char_dataset.batch(seq_length + 1, drop_remainder=True)
@staticmethod
def split_input_target(chunk):
"""
Takes a chunk of a sequence and splits it according to a source and a target
:param chunk: string - chunk of sequence
:return: tuple - (input, target) e.g chunk = hello => (hell, ello)
"""
input_txt = chunk[:-1]
target_txt = chunk[1:]
return input_txt, target_txt
@staticmethod
def get_dataset():
"""
Returns the training dataset
:return: [(input, target)]
"""
sequences = Data.get_sequences()
return sequences.map(Data.split_input_target)
@staticmethod
def get_training_dataset():
""" Get training dataset """
batch_size = 64
buffer_size = 10000 # buffer size to shuffle the dataset
dataset = Data.get_dataset()
return dataset.shuffle(buffer_size).batch(batch_size, drop_remainder=True)
|
[
"rnn.Vectorize.get_text_as_int",
"tensorflow.data.Dataset.from_tensor_slices"
] |
[((379, 410), 'rnn.Vectorize.get_text_as_int', 'rnn.Vectorize.get_text_as_int', ([], {}), '()\n', (408, 410), False, 'import rnn\n'), ((434, 481), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['text_as_int'], {}), '(text_as_int)\n', (468, 481), True, 'import tensorflow as tf\n')]
|
import os
from shutil import copyfile
from shutil import move
from random import randint
source = "/home/vegas/CBIS-DDSM"
destination = "/home/vegas/CBIS-DDSM-COCO_format"
train = os.path.join(destination, 'train')
test = os.path.join(destination, 'test')
val = os.path.join(destination, 'validation')
os.mkdir(destination)
os.mkdir(train)
os.mkdir(test)
os.mkdir(val)
os.mkdir(os.path.join(train, 'annotations'))
os.mkdir(os.path.join(train, 'shapes'))
os.mkdir(os.path.join(test, 'annotations'))
os.mkdir(os.path.join(test, 'shapes'))
os.mkdir(os.path.join(val, 'annotations'))
os.mkdir(os.path.join(val, 'shapes'))
counter = 0
for root, _, files in os.walk(source):
if 'Train' in root:
counter += 1
for file in files:
if '.png' in file:
if 'mask' in file:
mask = os.path.join(root, file)
sep = mask.split(os.sep)
image_id = sep[len(sep) - 2]
copyfile(mask, os.path.join(train, 'annotations', image_id + '_mass.png'))
else:
image = os.path.join(root, file)
sep = image.split(os.sep)
image_id = sep[len(sep) - 2]
copyfile(image, os.path.join(train, 'shapes', image_id + '.png'))
elif 'Test' in root:
counter += 1
for file in files:
if '.png' in file:
if 'mask' in file:
mask = os.path.join(root, file)
sep = mask.split(os.sep)
image_id = sep[len(sep) - 2]
copyfile(mask, os.path.join(test, 'annotations', image_id + '_mass.png'))
else:
image = os.path.join(root, file)
sep = image.split(os.sep)
image_id = sep[len(sep) - 2]
copyfile(image, os.path.join(test, 'shapes', image_id + '.png'))
print('Processing {} of 1592'.format(counter))
validation = []
for root, _, files in os.walk(os.path.join(train, 'shapes')):
for file in files:
if randint(0,1) <= 0.2:
validation.append(file[:-4])
move(os.path.join(root, file), os.path.join(val, 'shapes', file))
for root, _, files in os.walk(os.path.join(train, 'annotations')):
for file in files:
if file[:-9] in validation:
move(os.path.join(root, file), os.path.join(val, 'annotations', file))
|
[
"os.mkdir",
"os.walk",
"os.path.join",
"random.randint"
] |
[((188, 222), 'os.path.join', 'os.path.join', (['destination', '"""train"""'], {}), "(destination, 'train')\n", (200, 222), False, 'import os\n'), ((231, 264), 'os.path.join', 'os.path.join', (['destination', '"""test"""'], {}), "(destination, 'test')\n", (243, 264), False, 'import os\n'), ((272, 311), 'os.path.join', 'os.path.join', (['destination', '"""validation"""'], {}), "(destination, 'validation')\n", (284, 311), False, 'import os\n'), ((315, 336), 'os.mkdir', 'os.mkdir', (['destination'], {}), '(destination)\n', (323, 336), False, 'import os\n'), ((338, 353), 'os.mkdir', 'os.mkdir', (['train'], {}), '(train)\n', (346, 353), False, 'import os\n'), ((355, 369), 'os.mkdir', 'os.mkdir', (['test'], {}), '(test)\n', (363, 369), False, 'import os\n'), ((371, 384), 'os.mkdir', 'os.mkdir', (['val'], {}), '(val)\n', (379, 384), False, 'import os\n'), ((680, 695), 'os.walk', 'os.walk', (['source'], {}), '(source)\n', (687, 695), False, 'import os\n'), ((395, 429), 'os.path.join', 'os.path.join', (['train', '"""annotations"""'], {}), "(train, 'annotations')\n", (407, 429), False, 'import os\n'), ((441, 470), 'os.path.join', 'os.path.join', (['train', '"""shapes"""'], {}), "(train, 'shapes')\n", (453, 470), False, 'import os\n'), ((482, 515), 'os.path.join', 'os.path.join', (['test', '"""annotations"""'], {}), "(test, 'annotations')\n", (494, 515), False, 'import os\n'), ((527, 555), 'os.path.join', 'os.path.join', (['test', '"""shapes"""'], {}), "(test, 'shapes')\n", (539, 555), False, 'import os\n'), ((567, 599), 'os.path.join', 'os.path.join', (['val', '"""annotations"""'], {}), "(val, 'annotations')\n", (579, 599), False, 'import os\n'), ((611, 638), 'os.path.join', 'os.path.join', (['val', '"""shapes"""'], {}), "(val, 'shapes')\n", (623, 638), False, 'import os\n'), ((2098, 2127), 'os.path.join', 'os.path.join', (['train', '"""shapes"""'], {}), "(train, 'shapes')\n", (2110, 2127), False, 'import os\n'), ((2341, 2375), 'os.path.join', 'os.path.join', (['train', '"""annotations"""'], {}), "(train, 'annotations')\n", (2353, 2375), False, 'import os\n'), ((2166, 2179), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (2173, 2179), False, 'from random import randint\n'), ((2247, 2271), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2259, 2271), False, 'import os\n'), ((2273, 2306), 'os.path.join', 'os.path.join', (['val', '"""shapes"""', 'file'], {}), "(val, 'shapes', file)\n", (2285, 2306), False, 'import os\n'), ((2457, 2481), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2469, 2481), False, 'import os\n'), ((2483, 2521), 'os.path.join', 'os.path.join', (['val', '"""annotations"""', 'file'], {}), "(val, 'annotations', file)\n", (2495, 2521), False, 'import os\n'), ((868, 892), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (880, 892), False, 'import os\n'), ((1137, 1161), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1149, 1161), False, 'import os\n'), ((1025, 1083), 'os.path.join', 'os.path.join', (['train', '"""annotations"""', "(image_id + '_mass.png')"], {}), "(train, 'annotations', image_id + '_mass.png')\n", (1037, 1083), False, 'import os\n'), ((1296, 1344), 'os.path.join', 'os.path.join', (['train', '"""shapes"""', "(image_id + '.png')"], {}), "(train, 'shapes', image_id + '.png')\n", (1308, 1344), False, 'import os\n'), ((1520, 1544), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1532, 1544), False, 'import os\n'), ((1788, 1812), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1800, 1812), False, 'import os\n'), ((1677, 1734), 'os.path.join', 'os.path.join', (['test', '"""annotations"""', "(image_id + '_mass.png')"], {}), "(test, 'annotations', image_id + '_mass.png')\n", (1689, 1734), False, 'import os\n'), ((1947, 1994), 'os.path.join', 'os.path.join', (['test', '"""shapes"""', "(image_id + '.png')"], {}), "(test, 'shapes', image_id + '.png')\n", (1959, 1994), False, 'import os\n')]
|
import FWCore.ParameterSet.Config as cms
# the Emulator kBMTF DQM module
from DQM.L1TMonitor.L1TdeStage2BMTF_cfi import *
# compares the unpacked BMTF2 regional muon collection to the emulated BMTF2 regional muon collection (after the TriggerAlgoSelector decide which is BMTF2)
# Plots for BMTF
l1tdeStage2BmtfSecond = l1tdeStage2Bmtf.clone()
l1tdeStage2BmtfSecond.regionalMuonCollection1 = cms.InputTag("bmtfDigis","BMTF2")
l1tdeStage2BmtfSecond.regionalMuonCollection2 = cms.InputTag("valBmtfAlgoSel", "BMTF2")
l1tdeStage2BmtfSecond.monitorDir = cms.untracked.string("L1TEMU/L1TdeStage2BMTF/L1TdeStage2BMTF-Secondary")
l1tdeStage2BmtfSecond.regionalMuonCollection1Title = cms.untracked.string("BMTF2 data")
l1tdeStage2BmtfSecond.regionalMuonCollection2Title = cms.untracked.string("BMTF2 emulator")
l1tdeStage2BmtfSecond.summaryTitle = cms.untracked.string("Summary of comparison between BMTF2 muons and BMTF2 emulator muons")
l1tdeStage2BmtfSecond.ignoreBin = cms.untracked.vint32(ignoreBinsDeStage2Bmtf)
l1tdeStage2BmtfSecond.verbose = cms.untracked.bool(False)
l1tdeStage2BmtfSecond.isBmtf = cms.untracked.bool(True)
# sequences
|
[
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.InputTag",
"FWCore.ParameterSet.Config.untracked.vint32",
"FWCore.ParameterSet.Config.untracked.string"
] |
[((393, 427), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""bmtfDigis"""', '"""BMTF2"""'], {}), "('bmtfDigis', 'BMTF2')\n", (405, 427), True, 'import FWCore.ParameterSet.Config as cms\n'), ((475, 514), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""valBmtfAlgoSel"""', '"""BMTF2"""'], {}), "('valBmtfAlgoSel', 'BMTF2')\n", (487, 514), True, 'import FWCore.ParameterSet.Config as cms\n'), ((550, 622), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""L1TEMU/L1TdeStage2BMTF/L1TdeStage2BMTF-Secondary"""'], {}), "('L1TEMU/L1TdeStage2BMTF/L1TdeStage2BMTF-Secondary')\n", (570, 622), True, 'import FWCore.ParameterSet.Config as cms\n'), ((676, 710), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""BMTF2 data"""'], {}), "('BMTF2 data')\n", (696, 710), True, 'import FWCore.ParameterSet.Config as cms\n'), ((764, 802), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""BMTF2 emulator"""'], {}), "('BMTF2 emulator')\n", (784, 802), True, 'import FWCore.ParameterSet.Config as cms\n'), ((840, 935), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""Summary of comparison between BMTF2 muons and BMTF2 emulator muons"""'], {}), "(\n 'Summary of comparison between BMTF2 muons and BMTF2 emulator muons')\n", (860, 935), True, 'import FWCore.ParameterSet.Config as cms\n'), ((965, 1009), 'FWCore.ParameterSet.Config.untracked.vint32', 'cms.untracked.vint32', (['ignoreBinsDeStage2Bmtf'], {}), '(ignoreBinsDeStage2Bmtf)\n', (985, 1009), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1042, 1067), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (1060, 1067), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1099, 1123), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (1117, 1123), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
General gateway tests.
@since: 0.1.0
"""
import unittest
import sys
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway, amf0
class TestService(object):
def spam(self):
return 'spam'
def echo(self, x):
return x
class FaultTestCase(unittest.TestCase):
def test_create(self):
x = remoting.ErrorFault()
self.assertEqual(x.code, '')
self.assertEqual(x.details, '')
self.assertEqual(x.description, '')
x = remoting.ErrorFault(
code=404,
details='Not Found',
description='Spam eggs'
)
self.assertEqual(x.code, 404)
self.assertEqual(x.details, 'Not Found')
self.assertEqual(x.description, 'Spam eggs')
def test_build(self):
fault = None
try:
raise TypeError("Unknown type")
except TypeError:
fault = amf0.build_fault(*sys.exc_info())
self.assertTrue(isinstance(fault, remoting.ErrorFault))
self.assertEqual(fault.level, 'error')
self.assertEqual(fault.code, 'TypeError')
self.assertEqual(fault.details, None)
def test_build_traceback(self):
fault = None
try:
raise TypeError("Unknown type")
except TypeError:
fault = amf0.build_fault(include_traceback=True, *sys.exc_info())
self.assertTrue(isinstance(fault, remoting.ErrorFault))
self.assertEqual(fault.level, 'error')
self.assertEqual(fault.code, 'TypeError')
self.assertTrue("\\n" not in fault.details)
def test_encode(self):
encoder = pyamf.get_encoder(pyamf.AMF0)
decoder = pyamf.get_decoder(pyamf.AMF0)
decoder.stream = encoder.stream
try:
raise TypeError("Unknown type")
except TypeError:
encoder.writeElement(amf0.build_fault(*sys.exc_info()))
buffer = encoder.stream
buffer.seek(0, 0)
fault = decoder.readElement()
old_fault = amf0.build_fault(*sys.exc_info())
self.assertEqual(fault.level, old_fault.level)
self.assertEqual(fault.type, old_fault.type)
self.assertEqual(fault.code, old_fault.code)
self.assertEqual(fault.details, old_fault.details)
self.assertEqual(fault.description, old_fault.description)
def test_explicit_code(self):
class X(Exception):
_amf_code = 'Server.UnknownResource'
try:
raise X()
except X:
fault = amf0.build_fault(*sys.exc_info())
self.assertEqual(fault.code, 'Server.UnknownResource')
class ServiceWrapperTestCase(unittest.TestCase):
def test_create(self):
x = gateway.ServiceWrapper('blah')
self.assertEqual(x.service, 'blah')
def test_create_preprocessor(self):
x = gateway.ServiceWrapper('blah', preprocessor=ord)
self.assertEqual(x.preprocessor, ord)
def test_cmp(self):
x = gateway.ServiceWrapper('blah')
y = gateway.ServiceWrapper('blah')
z = gateway.ServiceWrapper('bleh')
self.assertEqual(x, y)
self.assertNotEquals(y, z)
def test_call(self):
def add(x, y):
self.assertEqual(x, 1)
self.assertEqual(y, 2)
return x + y
x = gateway.ServiceWrapper(add)
self.assertTrue(callable(x))
self.assertEqual(x(None, [1, 2]), 3)
x = gateway.ServiceWrapper('blah')
self.assertRaises(gateway.UnknownServiceMethodError, x, None, [])
x = gateway.ServiceWrapper(TestService)
self.assertRaises(gateway.UnknownServiceMethodError, x, None, [])
self.assertEqual(x('spam', []), 'spam')
self.assertRaises(gateway.UnknownServiceMethodError, x, 'xyx', [])
self.assertRaises(gateway.InvalidServiceMethodError, x, '_private', [])
self.assertEqual(x('echo', [x]), x)
class ServiceRequestTestCase(unittest.TestCase):
def test_create(self):
sw = gateway.ServiceWrapper(TestService)
request = remoting.Envelope()
x = gateway.ServiceRequest(request, sw, None)
self.assertEqual(x.request, request)
self.assertEqual(x.service, sw)
self.assertEqual(x.method, None)
def test_call(self):
sw = gateway.ServiceWrapper(TestService)
request = remoting.Envelope()
x = gateway.ServiceRequest(request, sw, None)
self.assertRaises(gateway.UnknownServiceMethodError, x)
x = gateway.ServiceRequest(request, sw, 'spam')
self.assertEqual(x(), 'spam')
x = gateway.ServiceRequest(request, sw, 'echo')
self.assertEqual(x(x), x)
class ServiceCollectionTestCase(unittest.TestCase):
def test_contains(self):
x = gateway.ServiceCollection()
self.assertFalse(TestService in x)
self.assertFalse('spam.eggs' in x)
x['spam.eggs'] = gateway.ServiceWrapper(TestService)
self.assertTrue(TestService in x)
self.assertTrue('spam.eggs' in x)
class BaseGatewayTestCase(unittest.TestCase):
def test_create(self):
x = gateway.BaseGateway()
self.assertEqual(x.services, {})
x = gateway.BaseGateway({})
self.assertEqual(x.services, {})
x = gateway.BaseGateway({})
self.assertEqual(x.services, {})
x = gateway.BaseGateway({'x': TestService})
self.assertEqual(x.services, {'x': TestService})
x = gateway.BaseGateway({}, timezone_offset=-180)
self.assertEqual(x.timezone_offset, -180)
self.assertRaises(TypeError, gateway.BaseGateway, [])
self.assertRaises(TypeError, gateway.BaseGateway, foo='bar')
def test_add_service(self):
gw = gateway.BaseGateway()
self.assertEqual(gw.services, {})
gw.addService(TestService)
self.assertTrue(TestService in gw.services)
self.assertTrue('TestService' in gw.services)
del gw.services['TestService']
gw.addService(TestService, 'spam.eggs')
self.assertTrue(TestService in gw.services)
self.assertTrue('spam.eggs' in gw.services)
del gw.services['spam.eggs']
class SpamService(object):
def __str__(self):
return 'spam'
def __call__(*args, **kwargs):
pass
x = SpamService()
gw.addService(x)
self.assertTrue(x in gw.services)
self.assertTrue('spam' in gw.services)
del gw.services['spam']
self.assertEqual(gw.services, {})
self.assertRaises(TypeError, gw.addService, 1)
import new
temp = new.module('temp')
gw.addService(temp)
self.assertTrue(temp in gw.services)
self.assertTrue('temp' in gw.services)
del gw.services['temp']
self.assertEqual(gw.services, {})
def test_remove_service(self):
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue('test' in gw.services)
wrapper = gw.services['test']
gw.removeService('test')
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEqual(gw.services, {})
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue(TestService in gw.services)
wrapper = gw.services['test']
gw.removeService(TestService)
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEqual(gw.services, {})
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue(TestService in gw.services)
wrapper = gw.services['test']
gw.removeService(wrapper)
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEqual(gw.services, {})
x = TestService()
gw = gateway.BaseGateway({'test': x})
gw.removeService(x)
self.assertFalse('test' in gw.services)
self.assertEqual(gw.services, {})
self.assertRaises(NameError, gw.removeService, 'test')
self.assertRaises(NameError, gw.removeService, TestService)
self.assertRaises(NameError, gw.removeService, wrapper)
def test_service_request(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
message = remoting.Request('spam', [], envelope=envelope)
with self.assertRaises(gateway.UnknownServiceError):
gw.getServiceRequest(message, 'spam')
message = remoting.Request('test.spam', [], envelope=envelope)
sr = gw.getServiceRequest(message, 'test.spam')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, envelope)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, 'spam')
message = remoting.Request('test')
sr = gw.getServiceRequest(message, 'test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, None)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, None)
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
message = remoting.Request('test')
sr = gw.getServiceRequest(message, 'test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, None)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, None)
# try to access an unknown service
message = remoting.Request('spam')
with self.assertRaises(gateway.UnknownServiceError):
gw.getServiceRequest(message, 'spam')
# check x.x calls
message = remoting.Request('test.test')
sr = gw.getServiceRequest(message, 'test.test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, None)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, 'test')
def test_long_service_name(self):
gw = gateway.BaseGateway({'a.c.b.d': TestService})
envelope = remoting.Envelope()
message = remoting.Request('a.c.b.d', [], envelope=envelope)
sr = gw.getServiceRequest(message, 'a.c.b.d.spam')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEqual(sr.request, envelope)
self.assertEqual(sr.service, TestService)
self.assertEqual(sr.method, 'spam')
def test_get_response(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
self.assertRaises(NotImplementedError, gw.getResponse, envelope)
def test_process_request(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_unknown_service(self):
# Test a non existant service call
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('nope', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertFalse(gw.debug)
self.assertTrue(isinstance(response, remoting.Message))
self.assertEqual(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEqual(response.body.code, 'Service.ResourceNotFound')
self.assertEqual(response.body.description, 'Unknown service nope')
self.assertEqual(response.body.details, None)
def test_debug_traceback(self):
# Test a non existant service call
gw = gateway.BaseGateway({'test': TestService}, debug=True)
envelope = remoting.Envelope()
# Test a non existant service call
request = remoting.Request('nope', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Message))
self.assertEqual(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEqual(response.body.code, 'Service.ResourceNotFound')
self.assertEqual(response.body.description, 'Unknown service nope')
self.assertNotEquals(response.body.details, None)
def test_malformed_credentials_header(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
request.headers['Credentials'] = {'spam': 'eggs'}
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEqual(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEqual(response.body.code, 'KeyError')
def test_authenticate(self):
gw = gateway.BaseGateway({'test': TestService})
sr = gateway.ServiceRequest(None, gw.services['test'], None)
self.assertTrue(gw.authenticateRequest(sr, None, None))
def auth(u, p):
if u == 'spam' and p == 'eggs':
return True
return False
gw = gateway.BaseGateway({'test': TestService}, authenticator=auth)
self.assertFalse(gw.authenticateRequest(sr, None, None))
self.assertTrue(gw.authenticateRequest(sr, 'spam', 'eggs'))
def test_null_target(self):
gw = gateway.BaseGateway({})
request = remoting.Request(None)
processor = gw.getProcessor(request)
from pyamf.remoting import amf3
self.assertTrue(isinstance(processor, amf3.RequestProcessor))
def test_empty_target(self):
gw = gateway.BaseGateway({})
request = remoting.Request('')
processor = gw.getProcessor(request)
from pyamf.remoting import amf3
self.assertTrue(isinstance(processor, amf3.RequestProcessor))
class QueryBrowserTestCase(unittest.TestCase):
def test_request(self):
gw = gateway.BaseGateway()
def echo(x):
return x
gw.addService(echo, 'echo', description='This is a test')
envelope = remoting.Envelope()
request = remoting.Request('echo')
envelope['/1'] = request
request.headers['DescribeService'] = None
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'This is a test')
class AuthenticatorTestCase(unittest.TestCase):
def setUp(self):
self.called = False
def tearDown(self):
if self.called is False:
self.fail("authenticator not called")
def _auth(self, username, password):
self.called = True
if username == 'fred' and password == '<PASSWORD>':
return True
return False
def test_gateway(self):
gw = gateway.BaseGateway(authenticator=self._auth)
def echo(x):
return x
gw.addService(echo, 'echo')
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_service(self):
gw = gateway.BaseGateway()
def echo(x):
return x
gw.addService(echo, 'echo', authenticator=self._auth)
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_class_decorator(self):
class TestService:
def echo(self, x):
return x
TestService.echo = gateway.authenticate(TestService.echo, self._auth)
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_func_decorator(self):
def echo(x):
return x
echo = gateway.authenticate(echo, self._auth)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_expose_request_decorator(self):
def echo(x):
return x
def exposed_auth(request, username, password):
return self._auth(username, password)
exposed_auth = gateway.expose_request(exposed_auth)
echo = gateway.authenticate(echo, exposed_auth)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
def test_expose_request_keyword(self):
def echo(x):
return x
def exposed_auth(request, username, password):
return self._auth(username, password)
echo = gateway.authenticate(echo, exposed_auth, expose_request=True)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='<PASSWORD>')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
class ExposeRequestTestCase(unittest.TestCase):
def test_default(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertFalse(gw.mustExposeRequest(service_request))
def test_gateway(self):
gw = gateway.BaseGateway(expose_request=True)
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertTrue(gw.mustExposeRequest(service_request))
def test_service(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test', expose_request=True)
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertTrue(gw.mustExposeRequest(service_request))
def test_decorator(self):
def echo(x):
return x
gateway.expose_request(echo)
gw = gateway.BaseGateway()
gw.addService(echo, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertTrue(gw.mustExposeRequest(service_request))
class PreProcessingTestCase(unittest.TestCase):
def _preproc(self):
pass
def test_default(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertEqual(gw.getPreprocessor(service_request), None)
def test_global(self):
gw = gateway.BaseGateway(preprocessor=self._preproc)
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertEqual(gw.getPreprocessor(service_request), self._preproc)
def test_service(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test', preprocessor=self._preproc)
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertEqual(gw.getPreprocessor(service_request), self._preproc)
def test_decorator(self):
def echo(x):
return x
gateway.preprocess(echo, self._preproc)
gw = gateway.BaseGateway()
gw.addService(echo, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(
envelope,
gw.services['test'],
None
)
self.assertEqual(gw.getPreprocessor(service_request), self._preproc)
def test_call(self):
def preproc(sr, *args):
self.called = True
self.assertEqual(args, tuple())
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc)
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEqual(response.status, remoting.STATUS_OK)
self.assertEqual(response.body, 'spam')
self.assertTrue(self.called)
def test_fail(self):
def preproc(sr, *args):
raise IndexError
gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc)
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEqual(response.status, remoting.STATUS_ERROR)
|
[
"pyamf.remoting.Envelope",
"pyamf.get_encoder",
"pyamf.remoting.Request",
"pyamf.remoting.gateway.authenticate",
"new.module",
"pyamf.remoting.gateway.preprocess",
"pyamf.remoting.gateway.ServiceWrapper",
"pyamf.remoting.gateway.ServiceCollection",
"sys.exc_info",
"pyamf.remoting.gateway.expose_request",
"pyamf.remoting.gateway.BaseGateway",
"pyamf.remoting.gateway.ServiceRequest",
"pyamf.get_decoder",
"pyamf.remoting.ErrorFault"
] |
[((416, 437), 'pyamf.remoting.ErrorFault', 'remoting.ErrorFault', ([], {}), '()\n', (435, 437), False, 'from pyamf import remoting\n'), ((573, 648), 'pyamf.remoting.ErrorFault', 'remoting.ErrorFault', ([], {'code': '(404)', 'details': '"""Not Found"""', 'description': '"""Spam eggs"""'}), "(code=404, details='Not Found', description='Spam eggs')\n", (592, 648), False, 'from pyamf import remoting\n'), ((1710, 1739), 'pyamf.get_encoder', 'pyamf.get_encoder', (['pyamf.AMF0'], {}), '(pyamf.AMF0)\n', (1727, 1739), False, 'import pyamf\n'), ((1758, 1787), 'pyamf.get_decoder', 'pyamf.get_decoder', (['pyamf.AMF0'], {}), '(pyamf.AMF0)\n', (1775, 1787), False, 'import pyamf\n'), ((2794, 2824), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['"""blah"""'], {}), "('blah')\n", (2816, 2824), False, 'from pyamf.remoting import gateway, amf0\n'), ((2923, 2971), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['"""blah"""'], {'preprocessor': 'ord'}), "('blah', preprocessor=ord)\n", (2945, 2971), False, 'from pyamf.remoting import gateway, amf0\n'), ((3056, 3086), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['"""blah"""'], {}), "('blah')\n", (3078, 3086), False, 'from pyamf.remoting import gateway, amf0\n'), ((3099, 3129), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['"""blah"""'], {}), "('blah')\n", (3121, 3129), False, 'from pyamf.remoting import gateway, amf0\n'), ((3142, 3172), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['"""bleh"""'], {}), "('bleh')\n", (3164, 3172), False, 'from pyamf.remoting import gateway, amf0\n'), ((3398, 3425), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['add'], {}), '(add)\n', (3420, 3425), False, 'from pyamf.remoting import gateway, amf0\n'), ((3522, 3552), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['"""blah"""'], {}), "('blah')\n", (3544, 3552), False, 'from pyamf.remoting import gateway, amf0\n'), ((3641, 3676), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['TestService'], {}), '(TestService)\n', (3663, 3676), False, 'from pyamf.remoting import gateway, amf0\n'), ((4092, 4127), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['TestService'], {}), '(TestService)\n', (4114, 4127), False, 'from pyamf.remoting import gateway, amf0\n'), ((4146, 4165), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (4163, 4165), False, 'from pyamf import remoting\n'), ((4179, 4220), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['request', 'sw', 'None'], {}), '(request, sw, None)\n', (4201, 4220), False, 'from pyamf.remoting import gateway, amf0\n'), ((4387, 4422), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['TestService'], {}), '(TestService)\n', (4409, 4422), False, 'from pyamf.remoting import gateway, amf0\n'), ((4441, 4460), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (4458, 4460), False, 'from pyamf import remoting\n'), ((4474, 4515), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['request', 'sw', 'None'], {}), '(request, sw, None)\n', (4496, 4515), False, 'from pyamf.remoting import gateway, amf0\n'), ((4594, 4637), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['request', 'sw', '"""spam"""'], {}), "(request, sw, 'spam')\n", (4616, 4637), False, 'from pyamf.remoting import gateway, amf0\n'), ((4689, 4732), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['request', 'sw', '"""echo"""'], {}), "(request, sw, 'echo')\n", (4711, 4732), False, 'from pyamf.remoting import gateway, amf0\n'), ((4862, 4889), 'pyamf.remoting.gateway.ServiceCollection', 'gateway.ServiceCollection', ([], {}), '()\n', (4887, 4889), False, 'from pyamf.remoting import gateway, amf0\n'), ((5003, 5038), 'pyamf.remoting.gateway.ServiceWrapper', 'gateway.ServiceWrapper', (['TestService'], {}), '(TestService)\n', (5025, 5038), False, 'from pyamf.remoting import gateway, amf0\n'), ((5211, 5232), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (5230, 5232), False, 'from pyamf.remoting import gateway, amf0\n'), ((5287, 5310), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (['{}'], {}), '({})\n', (5306, 5310), False, 'from pyamf.remoting import gateway, amf0\n'), ((5365, 5388), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (['{}'], {}), '({})\n', (5384, 5388), False, 'from pyamf.remoting import gateway, amf0\n'), ((5443, 5482), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'x': TestService}"], {}), "({'x': TestService})\n", (5462, 5482), False, 'from pyamf.remoting import gateway, amf0\n'), ((5553, 5598), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (['{}'], {'timezone_offset': '(-180)'}), '({}, timezone_offset=-180)\n', (5572, 5598), False, 'from pyamf.remoting import gateway, amf0\n'), ((5827, 5848), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (5846, 5848), False, 'from pyamf.remoting import gateway, amf0\n'), ((6736, 6754), 'new.module', 'new.module', (['"""temp"""'], {}), "('temp')\n", (6746, 6754), False, 'import new\n'), ((7001, 7043), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (7020, 7043), False, 'from pyamf.remoting import gateway, amf0\n'), ((7370, 7412), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (7389, 7412), False, 'from pyamf.remoting import gateway, amf0\n'), ((7749, 7791), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (7768, 7791), False, 'from pyamf.remoting import gateway, amf0\n'), ((8150, 8182), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': x}"], {}), "({'test': x})\n", (8169, 8182), False, 'from pyamf.remoting import gateway, amf0\n'), ((8549, 8591), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (8568, 8591), False, 'from pyamf.remoting import gateway, amf0\n'), ((8611, 8630), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (8628, 8630), False, 'from pyamf import remoting\n'), ((8650, 8697), 'pyamf.remoting.Request', 'remoting.Request', (['"""spam"""', '[]'], {'envelope': 'envelope'}), "('spam', [], envelope=envelope)\n", (8666, 8697), False, 'from pyamf import remoting\n'), ((8828, 8880), 'pyamf.remoting.Request', 'remoting.Request', (['"""test.spam"""', '[]'], {'envelope': 'envelope'}), "('test.spam', [], envelope=envelope)\n", (8844, 8880), False, 'from pyamf import remoting\n'), ((9162, 9186), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (9178, 9186), False, 'from pyamf import remoting\n'), ((9452, 9494), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (9471, 9494), False, 'from pyamf.remoting import gateway, amf0\n'), ((9514, 9533), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (9531, 9533), False, 'from pyamf import remoting\n'), ((9552, 9576), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (9568, 9576), False, 'from pyamf import remoting\n'), ((9891, 9915), 'pyamf.remoting.Request', 'remoting.Request', (['"""spam"""'], {}), "('spam')\n", (9907, 9915), False, 'from pyamf import remoting\n'), ((10072, 10101), 'pyamf.remoting.Request', 'remoting.Request', (['"""test.test"""'], {}), "('test.test')\n", (10088, 10101), False, 'from pyamf import remoting\n'), ((10412, 10457), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'a.c.b.d': TestService}"], {}), "({'a.c.b.d': TestService})\n", (10431, 10457), False, 'from pyamf.remoting import gateway, amf0\n'), ((10477, 10496), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (10494, 10496), False, 'from pyamf import remoting\n'), ((10516, 10566), 'pyamf.remoting.Request', 'remoting.Request', (['"""a.c.b.d"""', '[]'], {'envelope': 'envelope'}), "('a.c.b.d', [], envelope=envelope)\n", (10532, 10566), False, 'from pyamf import remoting\n'), ((10879, 10921), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (10898, 10921), False, 'from pyamf.remoting import gateway, amf0\n'), ((10941, 10960), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (10958, 10960), False, 'from pyamf import remoting\n'), ((11085, 11127), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (11104, 11127), False, 'from pyamf.remoting import gateway, amf0\n'), ((11147, 11166), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (11164, 11166), False, 'from pyamf import remoting\n'), ((11186, 11234), 'pyamf.remoting.Request', 'remoting.Request', (['"""test.spam"""'], {'envelope': 'envelope'}), "('test.spam', envelope=envelope)\n", (11202, 11234), False, 'from pyamf import remoting\n'), ((11588, 11630), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (11607, 11630), False, 'from pyamf.remoting import gateway, amf0\n'), ((11650, 11669), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (11667, 11669), False, 'from pyamf import remoting\n'), ((11689, 11732), 'pyamf.remoting.Request', 'remoting.Request', (['"""nope"""'], {'envelope': 'envelope'}), "('nope', envelope=envelope)\n", (11705, 11732), False, 'from pyamf import remoting\n'), ((12350, 12404), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {'debug': '(True)'}), "({'test': TestService}, debug=True)\n", (12369, 12404), False, 'from pyamf.remoting import gateway, amf0\n'), ((12424, 12443), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (12441, 12443), False, 'from pyamf import remoting\n'), ((12506, 12549), 'pyamf.remoting.Request', 'remoting.Request', (['"""nope"""'], {'envelope': 'envelope'}), "('nope', envelope=envelope)\n", (12522, 12549), False, 'from pyamf import remoting\n'), ((13106, 13148), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (13125, 13148), False, 'from pyamf.remoting import gateway, amf0\n'), ((13168, 13187), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (13185, 13187), False, 'from pyamf import remoting\n'), ((13207, 13255), 'pyamf.remoting.Request', 'remoting.Request', (['"""test.spam"""'], {'envelope': 'envelope'}), "('test.spam', envelope=envelope)\n", (13223, 13255), False, 'from pyamf import remoting\n'), ((13706, 13748), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (13725, 13748), False, 'from pyamf.remoting import gateway, amf0\n'), ((13762, 13817), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['None', "gw.services['test']", 'None'], {}), "(None, gw.services['test'], None)\n", (13784, 13817), False, 'from pyamf.remoting import gateway, amf0\n'), ((14020, 14082), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {'authenticator': 'auth'}), "({'test': TestService}, authenticator=auth)\n", (14039, 14082), False, 'from pyamf.remoting import gateway, amf0\n'), ((14263, 14286), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (['{}'], {}), '({})\n', (14282, 14286), False, 'from pyamf.remoting import gateway, amf0\n'), ((14306, 14328), 'pyamf.remoting.Request', 'remoting.Request', (['None'], {}), '(None)\n', (14322, 14328), False, 'from pyamf import remoting\n'), ((14533, 14556), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (['{}'], {}), '({})\n', (14552, 14556), False, 'from pyamf.remoting import gateway, amf0\n'), ((14576, 14596), 'pyamf.remoting.Request', 'remoting.Request', (['""""""'], {}), "('')\n", (14592, 14596), False, 'from pyamf import remoting\n'), ((14844, 14865), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (14863, 14865), False, 'from pyamf.remoting import gateway, amf0\n'), ((14996, 15015), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (15013, 15015), False, 'from pyamf import remoting\n'), ((15034, 15058), 'pyamf.remoting.Request', 'remoting.Request', (['"""echo"""'], {}), "('echo')\n", (15050, 15058), False, 'from pyamf import remoting\n'), ((15773, 15818), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {'authenticator': 'self._auth'}), '(authenticator=self._auth)\n', (15792, 15818), False, 'from pyamf.remoting import gateway, amf0\n'), ((15919, 15938), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (15936, 15938), False, 'from pyamf import remoting\n'), ((15957, 15996), 'pyamf.remoting.Request', 'remoting.Request', (['"""echo"""'], {'body': "['spam']"}), "('echo', body=['spam'])\n", (15973, 15996), False, 'from pyamf import remoting\n'), ((16352, 16373), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (16371, 16373), False, 'from pyamf.remoting import gateway, amf0\n'), ((16500, 16519), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (16517, 16519), False, 'from pyamf import remoting\n'), ((16538, 16577), 'pyamf.remoting.Request', 'remoting.Request', (['"""echo"""'], {'body': "['spam']"}), "('echo', body=['spam'])\n", (16554, 16577), False, 'from pyamf import remoting\n'), ((17039, 17089), 'pyamf.remoting.gateway.authenticate', 'gateway.authenticate', (['TestService.echo', 'self._auth'], {}), '(TestService.echo, self._auth)\n', (17059, 17089), False, 'from pyamf.remoting import gateway, amf0\n'), ((17104, 17146), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {}), "({'test': TestService})\n", (17123, 17146), False, 'from pyamf.remoting import gateway, amf0\n'), ((17167, 17186), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (17184, 17186), False, 'from pyamf import remoting\n'), ((17205, 17249), 'pyamf.remoting.Request', 'remoting.Request', (['"""test.echo"""'], {'body': "['spam']"}), "('test.echo', body=['spam'])\n", (17221, 17249), False, 'from pyamf import remoting\n'), ((17657, 17695), 'pyamf.remoting.gateway.authenticate', 'gateway.authenticate', (['echo', 'self._auth'], {}), '(echo, self._auth)\n', (17677, 17695), False, 'from pyamf.remoting import gateway, amf0\n'), ((17710, 17745), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'echo': echo}"], {}), "({'echo': echo})\n", (17729, 17745), False, 'from pyamf.remoting import gateway, amf0\n'), ((17766, 17785), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (17783, 17785), False, 'from pyamf import remoting\n'), ((17804, 17843), 'pyamf.remoting.Request', 'remoting.Request', (['"""echo"""'], {'body': "['spam']"}), "('echo', body=['spam'])\n", (17820, 17843), False, 'from pyamf import remoting\n'), ((18375, 18411), 'pyamf.remoting.gateway.expose_request', 'gateway.expose_request', (['exposed_auth'], {}), '(exposed_auth)\n', (18397, 18411), False, 'from pyamf.remoting import gateway, amf0\n'), ((18428, 18468), 'pyamf.remoting.gateway.authenticate', 'gateway.authenticate', (['echo', 'exposed_auth'], {}), '(echo, exposed_auth)\n', (18448, 18468), False, 'from pyamf.remoting import gateway, amf0\n'), ((18482, 18517), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'echo': echo}"], {}), "({'echo': echo})\n", (18501, 18517), False, 'from pyamf.remoting import gateway, amf0\n'), ((18538, 18557), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (18555, 18557), False, 'from pyamf import remoting\n'), ((18576, 18615), 'pyamf.remoting.Request', 'remoting.Request', (['"""echo"""'], {'body': "['spam']"}), "('echo', body=['spam'])\n", (18592, 18615), False, 'from pyamf import remoting\n'), ((19137, 19198), 'pyamf.remoting.gateway.authenticate', 'gateway.authenticate', (['echo', 'exposed_auth'], {'expose_request': '(True)'}), '(echo, exposed_auth, expose_request=True)\n', (19157, 19198), False, 'from pyamf.remoting import gateway, amf0\n'), ((19212, 19247), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'echo': echo}"], {}), "({'echo': echo})\n", (19231, 19247), False, 'from pyamf.remoting import gateway, amf0\n'), ((19268, 19287), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (19285, 19287), False, 'from pyamf import remoting\n'), ((19306, 19345), 'pyamf.remoting.Request', 'remoting.Request', (['"""echo"""'], {'body': "['spam']"}), "('echo', body=['spam'])\n", (19322, 19345), False, 'from pyamf import remoting\n'), ((19750, 19771), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (19769, 19771), False, 'from pyamf.remoting import gateway, amf0\n'), ((19836, 19855), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (19853, 19855), False, 'from pyamf import remoting\n'), ((19874, 19898), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (19890, 19898), False, 'from pyamf import remoting\n'), ((19959, 20018), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['envelope', "gw.services['test']", 'None'], {}), "(envelope, gw.services['test'], None)\n", (19981, 20018), False, 'from pyamf.remoting import gateway, amf0\n'), ((20172, 20212), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {'expose_request': '(True)'}), '(expose_request=True)\n', (20191, 20212), False, 'from pyamf.remoting import gateway, amf0\n'), ((20277, 20296), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (20294, 20296), False, 'from pyamf import remoting\n'), ((20315, 20339), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (20331, 20339), False, 'from pyamf import remoting\n'), ((20400, 20459), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['envelope', "gw.services['test']", 'None'], {}), "(envelope, gw.services['test'], None)\n", (20422, 20459), False, 'from pyamf.remoting import gateway, amf0\n'), ((20612, 20633), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (20631, 20633), False, 'from pyamf.remoting import gateway, amf0\n'), ((20719, 20738), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (20736, 20738), False, 'from pyamf import remoting\n'), ((20757, 20781), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (20773, 20781), False, 'from pyamf import remoting\n'), ((20842, 20901), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['envelope', "gw.services['test']", 'None'], {}), "(envelope, gw.services['test'], None)\n", (20864, 20901), False, 'from pyamf.remoting import gateway, amf0\n'), ((21094, 21122), 'pyamf.remoting.gateway.expose_request', 'gateway.expose_request', (['echo'], {}), '(echo)\n', (21116, 21122), False, 'from pyamf.remoting import gateway, amf0\n'), ((21137, 21158), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (21156, 21158), False, 'from pyamf.remoting import gateway, amf0\n'), ((21216, 21235), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (21233, 21235), False, 'from pyamf import remoting\n'), ((21254, 21278), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (21270, 21278), False, 'from pyamf import remoting\n'), ((21339, 21398), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['envelope', "gw.services['test']", 'None'], {}), "(envelope, gw.services['test'], None)\n", (21361, 21398), False, 'from pyamf.remoting import gateway, amf0\n'), ((21638, 21659), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (21657, 21659), False, 'from pyamf.remoting import gateway, amf0\n'), ((21724, 21743), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (21741, 21743), False, 'from pyamf import remoting\n'), ((21762, 21786), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (21778, 21786), False, 'from pyamf import remoting\n'), ((21847, 21906), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['envelope', "gw.services['test']", 'None'], {}), "(envelope, gw.services['test'], None)\n", (21869, 21906), False, 'from pyamf.remoting import gateway, amf0\n'), ((22063, 22110), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {'preprocessor': 'self._preproc'}), '(preprocessor=self._preproc)\n', (22082, 22110), False, 'from pyamf.remoting import gateway, amf0\n'), ((22175, 22194), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (22192, 22194), False, 'from pyamf import remoting\n'), ((22213, 22237), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (22229, 22237), False, 'from pyamf import remoting\n'), ((22298, 22357), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['envelope', "gw.services['test']", 'None'], {}), "(envelope, gw.services['test'], None)\n", (22320, 22357), False, 'from pyamf.remoting import gateway, amf0\n'), ((22524, 22545), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (22543, 22545), False, 'from pyamf.remoting import gateway, amf0\n'), ((22638, 22657), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (22655, 22657), False, 'from pyamf import remoting\n'), ((22676, 22700), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (22692, 22700), False, 'from pyamf import remoting\n'), ((22761, 22820), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['envelope', "gw.services['test']", 'None'], {}), "(envelope, gw.services['test'], None)\n", (22783, 22820), False, 'from pyamf.remoting import gateway, amf0\n'), ((23027, 23066), 'pyamf.remoting.gateway.preprocess', 'gateway.preprocess', (['echo', 'self._preproc'], {}), '(echo, self._preproc)\n', (23045, 23066), False, 'from pyamf.remoting import gateway, amf0\n'), ((23081, 23102), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', ([], {}), '()\n', (23100, 23102), False, 'from pyamf.remoting import gateway, amf0\n'), ((23160, 23179), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (23177, 23179), False, 'from pyamf import remoting\n'), ((23198, 23222), 'pyamf.remoting.Request', 'remoting.Request', (['"""test"""'], {}), "('test')\n", (23214, 23222), False, 'from pyamf import remoting\n'), ((23283, 23342), 'pyamf.remoting.gateway.ServiceRequest', 'gateway.ServiceRequest', (['envelope', "gw.services['test']", 'None'], {}), "(envelope, gw.services['test'], None)\n", (23305, 23342), False, 'from pyamf.remoting import gateway, amf0\n'), ((23683, 23747), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {'preprocessor': 'preproc'}), "({'test': TestService}, preprocessor=preproc)\n", (23702, 23747), False, 'from pyamf.remoting import gateway, amf0\n'), ((23767, 23786), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (23784, 23786), False, 'from pyamf import remoting\n'), ((23806, 23854), 'pyamf.remoting.Request', 'remoting.Request', (['"""test.spam"""'], {'envelope': 'envelope'}), "('test.spam', envelope=envelope)\n", (23822, 23854), False, 'from pyamf import remoting\n'), ((24253, 24317), 'pyamf.remoting.gateway.BaseGateway', 'gateway.BaseGateway', (["{'test': TestService}"], {'preprocessor': 'preproc'}), "({'test': TestService}, preprocessor=preproc)\n", (24272, 24317), False, 'from pyamf.remoting import gateway, amf0\n'), ((24337, 24356), 'pyamf.remoting.Envelope', 'remoting.Envelope', ([], {}), '()\n', (24354, 24356), False, 'from pyamf import remoting\n'), ((24376, 24424), 'pyamf.remoting.Request', 'remoting.Request', (['"""test.spam"""'], {'envelope': 'envelope'}), "('test.spam', envelope=envelope)\n", (24392, 24424), False, 'from pyamf import remoting\n'), ((2116, 2130), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2128, 2130), False, 'import sys\n'), ((1006, 1020), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1018, 1020), False, 'import sys\n'), ((1434, 1448), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1446, 1448), False, 'import sys\n'), ((2624, 2638), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2636, 2638), False, 'import sys\n'), ((1963, 1977), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1975, 1977), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
"""
Try to automatically ingest row data from a URL into a Rowpack file.
"""
from . import RowpackWriter, RowpackReader, intuit_rows, intuit_types, run_stats, IngestionError
from os.path import abspath
def get_cache():
from fs.opener import fsopendir
import tempfile
return fsopendir(tempfile.gettempdir())
def ingest(url, path=None, cache=None, encoding=None, filetype=None, urlfiletype=None,
cb=None, url_resolver=None):
"""
:param url:
:param path:
:param cache:
:param encoding:
:param filetype:
:param urlfiletype:
:return:
"""
from rowgenerators import SourceSpec
from tableintuit.exceptions import RowIntuitError
import sys
warnings = []
# There are certainly better ways to do this, like chardet or UnicodeDammit,
# but in several years, I've never seen a data file that wasn't ascii, utf8 or latin1,
# so i'm punting. Until there is a better solution, users should use a caracter detecting program,
# then explicitly set the encoding parameter.
if encoding is None:
encodings = ('ascii', 'utf8', 'latin1')
else:
encodings = (encoding,)
if cache is None:
cache = get_cache()
in_path = path
for encoding in encodings:
d = dict(
url=url,
encoding=encoding,
filetype=filetype,
urlfiletype=urlfiletype
)
if url_resolver:
ss = url_resolver(SourceSpec(**d), cache)
else:
ss = SourceSpec(**d)
gen = ss.get_generator(cache)
if not in_path:
path = abspath(ss.file_name + '.rp')
else:
path = in_path
try:
with RowpackWriter(path) as w:
for row in gen:
w.write_row(row)
w.meta['encoding'] = encoding
w.meta['url'] = url
w.meta['filename'] = path
break
except UnicodeDecodeError:
warnings.append("WARNING: encoding failed, trying another")
if cb:
cb(warnings[-1])
continue
else:
raise IngestionError("ERROR: all encodings failed")
# Need to re-open b/c n_rows isn't set until the writer is closed
with RowpackReader(path) as r:
if cb:
cb("Wrote {} rows".format(r.n_rows))
try:
ri = intuit_rows(path)
if ri.start_line < 1:
warnings.append("WARNING: Row intuition could not find start line; skipping type intuition and stats"+
"Set row types manually with -H -e ")
if cb:
cb(warnings[-1])
else:
intuit_types(path)
run_stats(path)
except RowIntuitError as e:
raise
with RowpackWriter(path, 'r+b') as w:
w.meta['sourcespec'] = ss.dict
return path, encoding, warnings
|
[
"os.path.abspath",
"tempfile.gettempdir",
"rowgenerators.SourceSpec"
] |
[((467, 488), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (486, 488), False, 'import tempfile\n'), ((1701, 1716), 'rowgenerators.SourceSpec', 'SourceSpec', ([], {}), '(**d)\n', (1711, 1716), False, 'from rowgenerators import SourceSpec\n'), ((1800, 1829), 'os.path.abspath', 'abspath', (["(ss.file_name + '.rp')"], {}), "(ss.file_name + '.rp')\n", (1807, 1829), False, 'from os.path import abspath\n'), ((1646, 1661), 'rowgenerators.SourceSpec', 'SourceSpec', ([], {}), '(**d)\n', (1656, 1661), False, 'from rowgenerators import SourceSpec\n')]
|
from core import GameConfig as Game
from core import Board
from config import TRAINING_CONFIG
from keras import Sequential, Model, Input
from keras.layers import InputLayer
from keras.layers.core import Activation, Dense, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.merge import Add
from keras.layers.normalization import BatchNormalization
from keras.optimizers import sgd
from keras.regularizers import l2
from keras import backend as K
import numpy as np
import os
def ConvBlock(
filter_size=256,
kernel_size=(3, 3),
activation=None,
input_shape=None
) -> list:
"""
Conv block with no activation func.
if activation is None,
then Activation layer will not be added
"""
return [
*([InputLayer(input_shape)] if input_shape else []),
Conv2D(
filters=filter_size,
kernel_size=kernel_size,
padding="same",
data_format="channels_first",
kernel_regularizer=l2()
),
BatchNormalization(epsilon=1e-5),
*([Activation(activation)] if activation else [])
]
# def ResBlock(identity_input) -> list:
# """ Residual Conv block """
# return Sequential([
# Add()([
# identity_input,
# Sequential([
# ]),
# ]),
# Activation("relu")
# ])
class PolicyValueNetwork:
""" AlphaZero Residual-CNN """
def __init__(self, model_file=None):
# Build Network Architecture
input_shape = Board().encoded_states().shape # (6, 15, 15)
inputs = Input(input_shape)
shared_net = Sequential([
*ConvBlock(32, input_shape=input_shape),
*ConvBlock(64),
*ConvBlock(128)
], "shared_net")
policy_head = Sequential([
shared_net,
*ConvBlock(4, (1, 1), "relu"),
Flatten(),
Dense(Game["board_size"], kernel_regularizer=l2()),
Activation("softmax")
], "policy_head")
value_head = Sequential([
shared_net,
*ConvBlock(2, (1, 1), "relu"),
Flatten(),
Dense(64, activation="relu", kernel_regularizer=l2()),
Dense(1, kernel_regularizer=l2()),
Activation("tanh")
], "value_head")
self.model = Model(
inputs,
[value_head(inputs), policy_head(inputs)]
)
if model_file is not None:
self.restore_model(model_file)
def compile(self, opt):
"""
Optimization and Loss definition
"""
self.model.compile(
optimizer=sgd(),
loss=["mse", "categorical_crossentropy"]
)
def eval_state(self, state):
"""
Evaluate a board state.
"""
vp = self.model.predict_on_batch(state.encoded_states()[np.newaxis, :])
# format to (float, np.array((255,1),dtype=float)) structure
return vp[0][0][0], vp[1][0]
def train_step(self, optimizer):
"""
One Network Tranning step.
"""
opt = self.model.optimizer
K.set_value(opt.lr, optimizer["lr"])
K.set_value(opt.momentum, optimizer["momentum"])
# loss = self.model.train_on_batch(inputs, [winner, probs])
# return loss
def save_model(self, filename):
base_path = "{}/keras".format(TRAINING_CONFIG["model_path"])
if not os.path.exists(base_path):
os.mkdir(base_path)
self.model.save_weights("{}/{}.h5".format(base_path, filename))
def restore_model(self, filename):
base_path = "{}/keras".format(TRAINING_CONFIG["model_path"])
if os.path.exists("{}/{}.h5".format(base_path, filename)):
self.model.load_weights("{}/{}.h5".format(base_path, filename))
|
[
"keras.Input",
"os.mkdir",
"keras.regularizers.l2",
"keras.backend.set_value",
"core.Board",
"keras.layers.core.Activation",
"os.path.exists",
"keras.optimizers.sgd",
"keras.layers.InputLayer",
"keras.layers.core.Flatten",
"keras.layers.normalization.BatchNormalization"
] |
[((1043, 1076), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(1e-05)'}), '(epsilon=1e-05)\n', (1061, 1076), False, 'from keras.layers.normalization import BatchNormalization\n'), ((1616, 1634), 'keras.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (1621, 1634), False, 'from keras import Sequential, Model, Input\n'), ((3171, 3207), 'keras.backend.set_value', 'K.set_value', (['opt.lr', "optimizer['lr']"], {}), "(opt.lr, optimizer['lr'])\n", (3182, 3207), True, 'from keras import backend as K\n'), ((3216, 3264), 'keras.backend.set_value', 'K.set_value', (['opt.momentum', "optimizer['momentum']"], {}), "(opt.momentum, optimizer['momentum'])\n", (3227, 3264), True, 'from keras import backend as K\n'), ((3476, 3501), 'os.path.exists', 'os.path.exists', (['base_path'], {}), '(base_path)\n', (3490, 3501), False, 'import os\n'), ((3515, 3534), 'os.mkdir', 'os.mkdir', (['base_path'], {}), '(base_path)\n', (3523, 3534), False, 'import os\n'), ((1019, 1023), 'keras.regularizers.l2', 'l2', ([], {}), '()\n', (1021, 1023), False, 'from keras.regularizers import l2\n'), ((1919, 1928), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (1926, 1928), False, 'from keras.layers.core import Activation, Dense, Flatten\n'), ((2006, 2027), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2016, 2027), False, 'from keras.layers.core import Activation, Dense, Flatten\n'), ((2168, 2177), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (2175, 2177), False, 'from keras.layers.core import Activation, Dense, Flatten\n'), ((2305, 2323), 'keras.layers.core.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (2315, 2323), False, 'from keras.layers.core import Activation, Dense, Flatten\n'), ((2685, 2690), 'keras.optimizers.sgd', 'sgd', ([], {}), '()\n', (2688, 2690), False, 'from keras.optimizers import sgd\n'), ((782, 805), 'keras.layers.InputLayer', 'InputLayer', (['input_shape'], {}), '(input_shape)\n', (792, 805), False, 'from keras.layers import InputLayer\n'), ((1088, 1110), 'keras.layers.core.Activation', 'Activation', (['activation'], {}), '(activation)\n', (1098, 1110), False, 'from keras.layers.core import Activation, Dense, Flatten\n'), ((1553, 1560), 'core.Board', 'Board', ([], {}), '()\n', (1558, 1560), False, 'from core import Board\n'), ((1987, 1991), 'keras.regularizers.l2', 'l2', ([], {}), '()\n', (1989, 1991), False, 'from keras.regularizers import l2\n'), ((2239, 2243), 'keras.regularizers.l2', 'l2', ([], {}), '()\n', (2241, 2243), False, 'from keras.regularizers import l2\n'), ((2286, 2290), 'keras.regularizers.l2', 'l2', ([], {}), '()\n', (2288, 2290), False, 'from keras.regularizers import l2\n')]
|
#! /usr/bin/python
def binary_search(lst, item):
""" Perform binary search on a sorted list.
Return the index of the element if it is in
the list, otherwise return -1.
"""
low = 0
high = len(lst) - 1
while low < high:
middle = (high+low)/2
current = lst[middle]
if current == item:
return middle
elif current < item:
low = middle+1
elif current > item:
high = middle-1
if lst[low] == item:
return low
return -1
class unit_test:
"""
>>> binary_search(range(1000), 547)
547
>>> binary_search(range(1000), 999)
999
>>> binary_search(range(1000), 0)
0
>>> binary_search(range(1000), 1000)
-1
>>> binary_search(range(1000), -1)
-1
>>> binary_search([1,1,1,1,1,2,2,2], 2) > 4
True
>>> 5 > binary_search([1,1,1,1,1,2,2,2], 1) > -1
True
>>> binary_search([1,1,1,1,1,2,2,2], 3)
-1
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"doctest.testmod"
] |
[((1023, 1040), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1038, 1040), False, 'import doctest\n')]
|
from django.contrib import admin
from .models import *
class TrainAdmin(admin.ModelAdmin):
pass
admin.site.register(User)
admin.site.register(Tweet)
|
[
"django.contrib.admin.site.register"
] |
[((105, 130), 'django.contrib.admin.site.register', 'admin.site.register', (['User'], {}), '(User)\n', (124, 130), False, 'from django.contrib import admin\n'), ((131, 157), 'django.contrib.admin.site.register', 'admin.site.register', (['Tweet'], {}), '(Tweet)\n', (150, 157), False, 'from django.contrib import admin\n')]
|
# Generated from D:/AnacondaProjects/iust_start/grammars\expr3.g4 by ANTLR 4.8
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\r")
buf.write("\64\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\3\2\3\2\3\2\3\2\3")
buf.write("\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\7\3\31\n\3\f\3")
buf.write("\16\3\34\13\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4")
buf.write("\'\n\4\f\4\16\4*\13\4\3\5\3\5\3\5\3\5\3\5\3\5\5\5\62\n")
buf.write("\5\3\5\2\4\4\6\6\2\4\6\b\2\2\2\65\2\n\3\2\2\2\4\17\3\2")
buf.write("\2\2\6\35\3\2\2\2\b\61\3\2\2\2\n\13\7\n\2\2\13\f\7\t\2")
buf.write("\2\f\r\5\4\3\2\r\16\7\2\2\3\16\3\3\2\2\2\17\20\b\3\1\2")
buf.write("\20\21\5\6\4\2\21\32\3\2\2\2\22\23\f\5\2\2\23\24\7\5\2")
buf.write("\2\24\31\5\6\4\2\25\26\f\4\2\2\26\27\7\6\2\2\27\31\5\6")
buf.write("\4\2\30\22\3\2\2\2\30\25\3\2\2\2\31\34\3\2\2\2\32\30\3")
buf.write("\2\2\2\32\33\3\2\2\2\33\5\3\2\2\2\34\32\3\2\2\2\35\36")
buf.write("\b\4\1\2\36\37\5\b\5\2\37(\3\2\2\2 !\f\5\2\2!\"\7\7\2")
buf.write("\2\"\'\5\b\5\2#$\f\4\2\2$%\7\b\2\2%\'\5\b\5\2& \3\2\2")
buf.write("\2&#\3\2\2\2\'*\3\2\2\2(&\3\2\2\2()\3\2\2\2)\7\3\2\2\2")
buf.write("*(\3\2\2\2+\62\7\n\2\2,\62\7\13\2\2-.\7\3\2\2./\5\4\3")
buf.write("\2/\60\7\4\2\2\60\62\3\2\2\2\61+\3\2\2\2\61,\3\2\2\2\61")
buf.write("-\3\2\2\2\62\t\3\2\2\2\7\30\32&(\61")
return buf.getvalue()
class testParser ( Parser ):
grammarFileName = "expr3.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'('", "')'", "'+'", "'-'", "'*'", "'/'",
"'='", "<INVALID>", "<INVALID>", "<INVALID>", "'\n'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "Plus", "MINUS",
"MUL", "DIVIDE", "ASSIGN", "Id", "Number", "Whitespace",
"Newline" ]
RULE_start = 0
RULE_expr = 1
RULE_term = 2
RULE_fact = 3
ruleNames = [ "start", "expr", "term", "fact" ]
EOF = Token.EOF
T__0=1
T__1=2
Plus=3
MINUS=4
MUL=5
DIVIDE=6
ASSIGN=7
Id=8
Number=9
Whitespace=10
Newline=11
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class StartContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Id(self):
return self.getToken(testParser.Id, 0)
def ASSIGN(self):
return self.getToken(testParser.ASSIGN, 0)
def expr(self):
return self.getTypedRuleContext(testParser.ExprContext,0)
def EOF(self):
return self.getToken(testParser.EOF, 0)
def getRuleIndex(self):
return testParser.RULE_start
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStart" ):
listener.enterStart(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStart" ):
listener.exitStart(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStart" ):
return visitor.visitStart(self)
else:
return visitor.visitChildren(self)
def start(self):
localctx = testParser.StartContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_start)
try:
self.enterOuterAlt(localctx, 1)
self.state = 8
self.match(testParser.Id)
self.state = 9
self.match(testParser.ASSIGN)
self.state = 10
self.expr(0)
self.state = 11
self.match(testParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return testParser.RULE_expr
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class Rule_minusContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a testParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(testParser.ExprContext,0)
def MINUS(self):
return self.getToken(testParser.MINUS, 0)
def term(self):
return self.getTypedRuleContext(testParser.TermContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRule_minus" ):
listener.enterRule_minus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRule_minus" ):
listener.exitRule_minus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRule_minus" ):
return visitor.visitRule_minus(self)
else:
return visitor.visitChildren(self)
class Rule_plusContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a testParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(testParser.ExprContext,0)
def Plus(self):
return self.getToken(testParser.Plus, 0)
def term(self):
return self.getTypedRuleContext(testParser.TermContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRule_plus" ):
listener.enterRule_plus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRule_plus" ):
listener.exitRule_plus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRule_plus" ):
return visitor.visitRule_plus(self)
else:
return visitor.visitChildren(self)
class Rule3Context(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a testParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def term(self):
return self.getTypedRuleContext(testParser.TermContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRule3" ):
listener.enterRule3(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRule3" ):
listener.exitRule3(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRule3" ):
return visitor.visitRule3(self)
else:
return visitor.visitChildren(self)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = testParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 2
self.enterRecursionRule(localctx, 2, self.RULE_expr, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = testParser.Rule3Context(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 14
self.term(0)
self._ctx.stop = self._input.LT(-1)
self.state = 24
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,1,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 22
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,0,self._ctx)
if la_ == 1:
localctx = testParser.Rule_plusContext(self, testParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 16
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 17
self.match(testParser.Plus)
self.state = 18
self.term(0)
pass
elif la_ == 2:
localctx = testParser.Rule_minusContext(self, testParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 19
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 20
self.match(testParser.MINUS)
self.state = 21
self.term(0)
pass
self.state = 26
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,1,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class TermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def fact(self):
return self.getTypedRuleContext(testParser.FactContext,0)
def term(self):
return self.getTypedRuleContext(testParser.TermContext,0)
def MUL(self):
return self.getToken(testParser.MUL, 0)
def DIVIDE(self):
return self.getToken(testParser.DIVIDE, 0)
def getRuleIndex(self):
return testParser.RULE_term
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerm" ):
listener.enterTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerm" ):
listener.exitTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerm" ):
return visitor.visitTerm(self)
else:
return visitor.visitChildren(self)
def term(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = testParser.TermContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 4
self.enterRecursionRule(localctx, 4, self.RULE_term, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 28
self.fact()
self._ctx.stop = self._input.LT(-1)
self.state = 38
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 36
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
localctx = testParser.TermContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_term)
self.state = 30
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 31
self.match(testParser.MUL)
self.state = 32
self.fact()
pass
elif la_ == 2:
localctx = testParser.TermContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_term)
self.state = 33
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 34
self.match(testParser.DIVIDE)
self.state = 35
self.fact()
pass
self.state = 40
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class FactContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Id(self):
return self.getToken(testParser.Id, 0)
def Number(self):
return self.getToken(testParser.Number, 0)
def expr(self):
return self.getTypedRuleContext(testParser.ExprContext,0)
def getRuleIndex(self):
return testParser.RULE_fact
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFact" ):
listener.enterFact(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFact" ):
listener.exitFact(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFact" ):
return visitor.visitFact(self)
else:
return visitor.visitChildren(self)
def fact(self):
localctx = testParser.FactContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_fact)
try:
self.state = 47
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [testParser.Id]:
self.enterOuterAlt(localctx, 1)
self.state = 41
self.match(testParser.Id)
pass
elif token in [testParser.Number]:
self.enterOuterAlt(localctx, 2)
self.state = 42
self.match(testParser.Number)
pass
elif token in [testParser.T__0]:
self.enterOuterAlt(localctx, 3)
self.state = 43
self.match(testParser.T__0)
self.state = 44
self.expr(0)
self.state = 45
self.match(testParser.T__1)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[1] = self.expr_sempred
self._predicates[2] = self.term_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expr_sempred(self, localctx:ExprContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 3)
if predIndex == 1:
return self.precpred(self._ctx, 2)
def term_sempred(self, localctx:TermContext, predIndex:int):
if predIndex == 2:
return self.precpred(self._ctx, 3)
if predIndex == 3:
return self.precpred(self._ctx, 2)
|
[
"io.StringIO",
"antlr4.error.Errors.FailedPredicateException"
] |
[((276, 286), 'io.StringIO', 'StringIO', ([], {}), '()\n', (284, 286), False, 'from io import StringIO\n'), ((9510, 9571), 'antlr4.error.Errors.FailedPredicateException', 'FailedPredicateException', (['self', '"""self.precpred(self._ctx, 3)"""'], {}), "(self, 'self.precpred(self._ctx, 3)')\n", (9534, 9571), False, 'from antlr4.error.Errors import FailedPredicateException\n'), ((13517, 13578), 'antlr4.error.Errors.FailedPredicateException', 'FailedPredicateException', (['self', '"""self.precpred(self._ctx, 3)"""'], {}), "(self, 'self.precpred(self._ctx, 3)')\n", (13541, 13578), False, 'from antlr4.error.Errors import FailedPredicateException\n'), ((10243, 10304), 'antlr4.error.Errors.FailedPredicateException', 'FailedPredicateException', (['self', '"""self.precpred(self._ctx, 2)"""'], {}), "(self, 'self.precpred(self._ctx, 2)')\n", (10267, 10304), False, 'from antlr4.error.Errors import FailedPredicateException\n'), ((14212, 14273), 'antlr4.error.Errors.FailedPredicateException', 'FailedPredicateException', (['self', '"""self.precpred(self._ctx, 2)"""'], {}), "(self, 'self.precpred(self._ctx, 2)')\n", (14236, 14273), False, 'from antlr4.error.Errors import FailedPredicateException\n')]
|
"""
Script to analyze distribution of squared Euclidean distance between gradients.
"""
from math import sqrt
import numpy as np
from scipy import stats
# Set constants.
k_vals = [35, 30, 36]
n_vals = [1, 18, 1]
total_n = sum(n_vals)
sigma = 0.01
start_t = 200
t = 250
num_trials = 100
alpha = 0.05
load = "vecs.np"
reject_probs = []
outlier_count = 0
for m in range(num_trials):
max_k = max(k_vals)
vecs = np.zeros((t, total_n, max_k, 2))
if load is None:
start = 0
for k, n in zip(k_vals, n_vals):
vecs[:, start : start + n, :k, :] = np.random.normal(
scale=sigma, size=(t, n, k, 2)
)
start = start + n
else:
with open(load, "rb") as f:
vecs = np.load(f)
count = 0
for current_t in range(start_t, t):
z = []
start = 0
for k, n in zip(k_vals, n_vals):
# Compute expected distribution of sample means.
length_mu = 2 * k * (sigma ** 2)
length_sigma = 2 * sqrt(2 * k) * (sigma ** 2)
# Compute sample means and z-scores.
diffs = (
vecs[: current_t + 1, start : start + n, :, 0]
- vecs[: current_t + 1, start : start + n, :, 1]
)
lengths = np.linalg.norm(diffs, ord=2, axis=2) ** 2
sample_mean = np.mean(lengths, axis=0)
current_z = (sample_mean - length_mu) / (length_sigma / sqrt(current_t + 1))
z.append(current_z)
start = start + n
z = np.concatenate(z)
# Check sizes.
assert z.shape == (total_n,)
"""
# Compute QQ plot correlation coefficient
baseline = np.random.normal(size=z_sample_size)
sorted_z = np.sort(z)
sorted_baseline = np.sort(baseline)
_, _, r, p, _ = stats.linregress(sorted_z, sorted_baseline)
print("Correlation coefficient: %f" % r)
print("p-value: %f" % p)
print("")
"""
# Compare z-score distribution against standard normal.
s, p = stats.kstest(z, "norm")
if p < alpha:
count += 1
reject_prob = count / (t - start_t)
reject_probs.append(reject_prob)
if count > 0:
outlier_count += 1
"""
for outlier in outliers:
print("Total outliers: %d/%d" % (outlier, (t - start_t)))
"""
avg_reject_prob = sum(reject_probs) / len(reject_probs)
print("reject_probs: %s" % str(reject_probs))
print("avg reject_prob: %f" % avg_reject_prob)
print("num rejects: %d/%d" % (outlier_count, num_trials))
|
[
"scipy.stats.kstest",
"numpy.load",
"math.sqrt",
"numpy.zeros",
"numpy.mean",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.concatenate"
] |
[((421, 453), 'numpy.zeros', 'np.zeros', (['(t, total_n, max_k, 2)'], {}), '((t, total_n, max_k, 2))\n', (429, 453), True, 'import numpy as np\n'), ((1557, 1574), 'numpy.concatenate', 'np.concatenate', (['z'], {}), '(z)\n', (1571, 1574), True, 'import numpy as np\n'), ((2089, 2112), 'scipy.stats.kstest', 'stats.kstest', (['z', '"""norm"""'], {}), "(z, 'norm')\n", (2101, 2112), False, 'from scipy import stats\n'), ((582, 630), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'sigma', 'size': '(t, n, k, 2)'}), '(scale=sigma, size=(t, n, k, 2))\n', (598, 630), True, 'import numpy as np\n'), ((756, 766), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (763, 766), True, 'import numpy as np\n'), ((1367, 1391), 'numpy.mean', 'np.mean', (['lengths'], {'axis': '(0)'}), '(lengths, axis=0)\n', (1374, 1391), True, 'import numpy as np\n'), ((1299, 1335), 'numpy.linalg.norm', 'np.linalg.norm', (['diffs'], {'ord': '(2)', 'axis': '(2)'}), '(diffs, ord=2, axis=2)\n', (1313, 1335), True, 'import numpy as np\n'), ((1036, 1047), 'math.sqrt', 'sqrt', (['(2 * k)'], {}), '(2 * k)\n', (1040, 1047), False, 'from math import sqrt\n'), ((1460, 1479), 'math.sqrt', 'sqrt', (['(current_t + 1)'], {}), '(current_t + 1)\n', (1464, 1479), False, 'from math import sqrt\n')]
|
import os
class Tomcat:
def get_details_for_each_tomcat(self,server_xml):
self.tcf = server_xml
self.th = os.path.dirname(os.path.dirname(server_xml))
return None
def display_details(self):
print(f'The tomcat config file is : {self.tcf} \nThe tomcat home is : {self.th}')
return None
def main():
tomcat7 = Tomcat()
tomcat9 = Tomcat()
tomcat7.get_details_for_each_tomcat("/home/Automation/tomcat7/conf/server.xml")
# The above line is same as:
# tomcat7.get_details_for_each_tomcat("tomcat7",/home/Automation/tomcat7/conf/server.xml")
tomcat9.get_details_for_each_tomcat("/home/Automation/tomcat9/conf/server.xml")
# The above line is same as:
# tomcat7.get_details_for_each_tomcat("tomcat9",/home/Automation/tomcat9/conf/server.xml")
print(tomcat9.tcf)
tomcat9.display_details()
# The above line is same as:
#display_details("tomcat9")
tomcat7.display_details()
return None
if __name__ == "__main__":
main()
|
[
"os.path.dirname"
] |
[((128, 155), 'os.path.dirname', 'os.path.dirname', (['server_xml'], {}), '(server_xml)\n', (143, 155), False, 'import os\n')]
|
import unittest
import fibonacci
class TestFibonacci(unittest.TestCase):
def test_fib(self):
self.assertEqual(fibonacci.fib(1), 1)
self.assertEqual(fibonacci.fib(2), 1)
self.assertEqual(fibonacci.fib(3), 2)
self.assertEqual(fibonacci.fib(4), 3)
self.assertEqual(fibonacci.fib(5), 5)
self.assertEqual(fibonacci.fib(6), 8)
self.assertEqual(fibonacci.fib(7), 13)
self.assertEqual(fibonacci.fib(8), 21)
def test_fib_rec(self):
self.assertEqual(fibonacci.fib_rec(1), 1)
self.assertEqual(fibonacci.fib_rec(2), 1)
self.assertEqual(fibonacci.fib_rec(3), 2)
self.assertEqual(fibonacci.fib_rec(4), 3)
self.assertEqual(fibonacci.fib_rec(5), 5)
self.assertEqual(fibonacci.fib_rec(6), 8)
self.assertEqual(fibonacci.fib_rec(7), 13)
self.assertEqual(fibonacci.fib_rec(8), 21)
def test_fib_binet(self):
self.assertEqual(fibonacci.fib_binet(1), 1)
self.assertEqual(fibonacci.fib_binet(2), 1)
self.assertEqual(fibonacci.fib_binet(3), 2)
self.assertEqual(fibonacci.fib_binet(4), 3)
self.assertEqual(fibonacci.fib_binet(5), 5)
self.assertEqual(fibonacci.fib_binet(6), 8)
self.assertEqual(fibonacci.fib_binet(7), 13)
self.assertEqual(fibonacci.fib_binet(8), 21)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"fibonacci.fib_rec",
"fibonacci.fib_binet",
"fibonacci.fib"
] |
[((1381, 1396), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1394, 1396), False, 'import unittest\n'), ((124, 140), 'fibonacci.fib', 'fibonacci.fib', (['(1)'], {}), '(1)\n', (137, 140), False, 'import fibonacci\n'), ((170, 186), 'fibonacci.fib', 'fibonacci.fib', (['(2)'], {}), '(2)\n', (183, 186), False, 'import fibonacci\n'), ((216, 232), 'fibonacci.fib', 'fibonacci.fib', (['(3)'], {}), '(3)\n', (229, 232), False, 'import fibonacci\n'), ((262, 278), 'fibonacci.fib', 'fibonacci.fib', (['(4)'], {}), '(4)\n', (275, 278), False, 'import fibonacci\n'), ((308, 324), 'fibonacci.fib', 'fibonacci.fib', (['(5)'], {}), '(5)\n', (321, 324), False, 'import fibonacci\n'), ((354, 370), 'fibonacci.fib', 'fibonacci.fib', (['(6)'], {}), '(6)\n', (367, 370), False, 'import fibonacci\n'), ((400, 416), 'fibonacci.fib', 'fibonacci.fib', (['(7)'], {}), '(7)\n', (413, 416), False, 'import fibonacci\n'), ((447, 463), 'fibonacci.fib', 'fibonacci.fib', (['(8)'], {}), '(8)\n', (460, 463), False, 'import fibonacci\n'), ((523, 543), 'fibonacci.fib_rec', 'fibonacci.fib_rec', (['(1)'], {}), '(1)\n', (540, 543), False, 'import fibonacci\n'), ((573, 593), 'fibonacci.fib_rec', 'fibonacci.fib_rec', (['(2)'], {}), '(2)\n', (590, 593), False, 'import fibonacci\n'), ((623, 643), 'fibonacci.fib_rec', 'fibonacci.fib_rec', (['(3)'], {}), '(3)\n', (640, 643), False, 'import fibonacci\n'), ((673, 693), 'fibonacci.fib_rec', 'fibonacci.fib_rec', (['(4)'], {}), '(4)\n', (690, 693), False, 'import fibonacci\n'), ((723, 743), 'fibonacci.fib_rec', 'fibonacci.fib_rec', (['(5)'], {}), '(5)\n', (740, 743), False, 'import fibonacci\n'), ((773, 793), 'fibonacci.fib_rec', 'fibonacci.fib_rec', (['(6)'], {}), '(6)\n', (790, 793), False, 'import fibonacci\n'), ((823, 843), 'fibonacci.fib_rec', 'fibonacci.fib_rec', (['(7)'], {}), '(7)\n', (840, 843), False, 'import fibonacci\n'), ((874, 894), 'fibonacci.fib_rec', 'fibonacci.fib_rec', (['(8)'], {}), '(8)\n', (891, 894), False, 'import fibonacci\n'), ((956, 978), 'fibonacci.fib_binet', 'fibonacci.fib_binet', (['(1)'], {}), '(1)\n', (975, 978), False, 'import fibonacci\n'), ((1008, 1030), 'fibonacci.fib_binet', 'fibonacci.fib_binet', (['(2)'], {}), '(2)\n', (1027, 1030), False, 'import fibonacci\n'), ((1060, 1082), 'fibonacci.fib_binet', 'fibonacci.fib_binet', (['(3)'], {}), '(3)\n', (1079, 1082), False, 'import fibonacci\n'), ((1112, 1134), 'fibonacci.fib_binet', 'fibonacci.fib_binet', (['(4)'], {}), '(4)\n', (1131, 1134), False, 'import fibonacci\n'), ((1164, 1186), 'fibonacci.fib_binet', 'fibonacci.fib_binet', (['(5)'], {}), '(5)\n', (1183, 1186), False, 'import fibonacci\n'), ((1216, 1238), 'fibonacci.fib_binet', 'fibonacci.fib_binet', (['(6)'], {}), '(6)\n', (1235, 1238), False, 'import fibonacci\n'), ((1268, 1290), 'fibonacci.fib_binet', 'fibonacci.fib_binet', (['(7)'], {}), '(7)\n', (1287, 1290), False, 'import fibonacci\n'), ((1321, 1343), 'fibonacci.fib_binet', 'fibonacci.fib_binet', (['(8)'], {}), '(8)\n', (1340, 1343), False, 'import fibonacci\n')]
|
# Generated by Django 3.0.6 on 2020-05-25 13:25
from decimal import Decimal
import django.contrib.auth.models
import django.contrib.auth.validators
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0011_update_proxy_permissions"),
("contenttypes", "0002_remove_content_type_name"),
]
operations = [
migrations.CreateModel(
name="CoursistUser",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(blank=True, null=True, verbose_name="last login"),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name="username",
),
),
(
"first_name",
models.CharField(blank=True, max_length=30, verbose_name="first name"),
),
(
"last_name",
models.CharField(blank=True, max_length=150, verbose_name="last name"),
),
(
"email",
models.EmailField(blank=True, max_length=254, verbose_name="email address"),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(default=django.utils.timezone.now, verbose_name="date joined"),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
"abstract": False,
},
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name="Course",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("course_number", models.IntegerField(unique=True)),
("name", models.CharField(max_length=100, unique=True)),
("credits", models.IntegerField(default=0)),
],
options={
"ordering": ["course_number"],
},
),
migrations.CreateModel(
name="StudyBlock",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("name", models.CharField(max_length=50)),
("min_credits", models.IntegerField()),
("courses", models.ManyToManyField(to="academic_helper.Course")),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="StudyPlan",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("name", models.CharField(max_length=50)),
("credits", models.IntegerField()),
("is_public", models.BooleanField(default=True)),
("blocks", models.ManyToManyField(to="academic_helper.StudyBlock")),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="ExtendedRating",
fields=[
(
"id",
models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID"),
),
("count", models.PositiveIntegerField(default=0)),
("total", models.PositiveIntegerField(default=0)),
(
"average",
models.DecimalField(decimal_places=3, default=Decimal("0"), max_digits=6),
),
("object_id", models.PositiveIntegerField(blank=True, null=True)),
(
"content_type",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
),
),
],
options={
"abstract": False,
"unique_together": {("content_type", "object_id")},
},
),
migrations.CreateModel(
name="CompletedCourse",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("grade", models.IntegerField(blank=True, null=True)),
(
"block",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="academic_helper.StudyBlock"),
),
(
"course",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="academic_helper.Course"),
),
(
"user",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
],
options={
"unique_together": {("user", "course")},
},
),
]
|
[
"django.db.models.ManyToManyField",
"decimal.Decimal",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.BooleanField",
"django.db.models.EmailField",
"django.db.models.AutoField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((580, 647), 'django.db.models.AutoField', 'models.AutoField', ([], {'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(editable=False, primary_key=True, serialize=False)\n', (596, 647), False, 'from django.db import migrations, models\n'), ((679, 736), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""password"""'}), "(max_length=128, verbose_name='password')\n", (695, 736), False, 'from django.db import migrations, models\n'), ((811, 881), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""last login"""'}), "(blank=True, null=True, verbose_name='last login')\n", (831, 881), False, 'from django.db import migrations, models\n'), ((976, 1147), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Designates that this user has all permissions without explicitly assigning them."""', 'verbose_name': '"""superuser status"""'}), "(default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')\n", (995, 1147), False, 'from django.db import migrations, models\n'), ((1881, 1951), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'verbose_name': '"""first name"""'}), "(blank=True, max_length=30, verbose_name='first name')\n", (1897, 1951), False, 'from django.db import migrations, models\n'), ((2043, 2113), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(150)', 'verbose_name': '"""last name"""'}), "(blank=True, max_length=150, verbose_name='last name')\n", (2059, 2113), False, 'from django.db import migrations, models\n'), ((2201, 2276), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'verbose_name': '"""email address"""'}), "(blank=True, max_length=254, verbose_name='email address')\n", (2218, 2276), False, 'from django.db import migrations, models\n'), ((2367, 2510), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Designates whether the user can log into this admin site."""', 'verbose_name': '"""staff status"""'}), "(default=False, help_text=\n 'Designates whether the user can log into this admin site.',\n verbose_name='staff status')\n", (2386, 2510), False, 'from django.db import migrations, models\n'), ((2688, 2869), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Designates whether this user should be treated as active. Unselect this instead of deleting accounts."""', 'verbose_name': '"""active"""'}), "(default=True, help_text=\n 'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'\n , verbose_name='active')\n", (2707, 2869), False, 'from django.db import migrations, models\n'), ((3048, 3136), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""date joined"""'}), "(default=django.utils.timezone.now, verbose_name=\n 'date joined')\n", (3068, 3136), False, 'from django.db import migrations, models\n'), ((3220, 3471), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""The groups this user belongs to. A user will get all permissions granted to each of their groups."""', 'related_name': '"""user_set"""', 'related_query_name': '"""user"""', 'to': '"""auth.Group"""', 'verbose_name': '"""groups"""'}), "(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to='auth.Group',\n verbose_name='groups')\n", (3242, 3471), False, 'from django.db import migrations, models\n'), ((3723, 3927), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""Specific permissions for this user."""', 'related_name': '"""user_set"""', 'related_query_name': '"""user"""', 'to': '"""auth.Permission"""', 'verbose_name': '"""user permissions"""'}), "(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions')\n", (3745, 3927), False, 'from django.db import migrations, models\n'), ((4499, 4566), 'django.db.models.AutoField', 'models.AutoField', ([], {'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(editable=False, primary_key=True, serialize=False)\n', (4515, 4566), False, 'from django.db import migrations, models\n'), ((4603, 4635), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'unique': '(True)'}), '(unique=True)\n', (4622, 4635), False, 'from django.db import migrations, models\n'), ((4663, 4708), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (4679, 4708), False, 'from django.db import migrations, models\n'), ((4739, 4769), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4758, 4769), False, 'from django.db import migrations, models\n'), ((4989, 5056), 'django.db.models.AutoField', 'models.AutoField', ([], {'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(editable=False, primary_key=True, serialize=False)\n', (5005, 5056), False, 'from django.db import migrations, models\n'), ((5084, 5115), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (5100, 5115), False, 'from django.db import migrations, models\n'), ((5150, 5171), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (5169, 5171), False, 'from django.db import migrations, models\n'), ((5202, 5253), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""academic_helper.Course"""'}), "(to='academic_helper.Course')\n", (5224, 5253), False, 'from django.db import migrations, models\n'), ((5460, 5527), 'django.db.models.AutoField', 'models.AutoField', ([], {'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(editable=False, primary_key=True, serialize=False)\n', (5476, 5527), False, 'from django.db import migrations, models\n'), ((5555, 5586), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (5571, 5586), False, 'from django.db import migrations, models\n'), ((5617, 5638), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (5636, 5638), False, 'from django.db import migrations, models\n'), ((5671, 5704), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (5690, 5704), False, 'from django.db import migrations, models\n'), ((5734, 5789), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""academic_helper.StudyBlock"""'}), "(to='academic_helper.StudyBlock')\n", (5756, 5789), False, 'from django.db import migrations, models\n'), ((6042, 6135), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (6058, 6135), False, 'from django.db import migrations, models\n'), ((6178, 6216), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (6205, 6216), False, 'from django.db import migrations, models\n'), ((6245, 6283), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (6272, 6283), False, 'from django.db import migrations, models\n'), ((6479, 6529), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6506, 6529), False, 'from django.db import migrations, models\n'), ((6606, 6727), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""contenttypes.ContentType"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='contenttypes.ContentType')\n", (6623, 6727), False, 'from django.db import migrations, models\n'), ((7140, 7207), 'django.db.models.AutoField', 'models.AutoField', ([], {'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(editable=False, primary_key=True, serialize=False)\n', (7156, 7207), False, 'from django.db import migrations, models\n'), ((7236, 7278), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7255, 7278), False, 'from django.db import migrations, models\n'), ((7348, 7448), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""academic_helper.StudyBlock"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'academic_helper.StudyBlock')\n", (7365, 7448), False, 'from django.db import migrations, models\n'), ((7532, 7628), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""academic_helper.Course"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'academic_helper.Course')\n", (7549, 7628), False, 'from django.db import migrations, models\n'), ((7710, 7806), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (7727, 7806), False, 'from django.db import migrations, models\n'), ((6401, 6413), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (6408, 6413), False, 'from decimal import Decimal\n')]
|
# tf_unet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tf_unet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tf_unet. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jul 28, 2016
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import glob
import click
from tf_unet import unet
from tf_unet import util
from scripts.radio_util import DataProvider
def create_training_path(output_path):
idx = 0
path = os.path.join(output_path, "run_{:03d}".format(idx))
while os.path.exists(path):
idx += 1
path = os.path.join(output_path, "run_{:03d}".format(idx))
return path
@click.command()
@click.option('--data_root', default="./bleien_data")
@click.option('--output_path', default="./daint_unet_trained_rfi_bleien")
@click.option('--training_iters', default=32)
@click.option('--epochs', default=100)
@click.option('--restore', default=False)
@click.option('--layers', default=5)
@click.option('--features_root', default=64)
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
print("Using data from: %s"%data_root)
data_provider = DataProvider(600, glob.glob(data_root+"/*"))
net = unet.Unet(channels=data_provider.channels,
n_class=data_provider.n_class,
layers=layers,
features_root=features_root,
cost_kwargs=dict(regularizer=0.001),
)
path = output_path if restore else create_training_path(output_path)
trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
path = trainer.train(data_provider, path,
training_iters=training_iters,
epochs=epochs,
dropout=0.5,
display_step=2,
restore=restore)
x_test, y_test = data_provider(1)
prediction = net.predict(path, x_test)
print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
if __name__ == '__main__':
launch()
|
[
"tf_unet.util.crop_to_shape",
"click.option",
"os.path.exists",
"click.command",
"glob.glob"
] |
[((1135, 1150), 'click.command', 'click.command', ([], {}), '()\n', (1148, 1150), False, 'import click\n'), ((1152, 1204), 'click.option', 'click.option', (['"""--data_root"""'], {'default': '"""./bleien_data"""'}), "('--data_root', default='./bleien_data')\n", (1164, 1204), False, 'import click\n'), ((1206, 1278), 'click.option', 'click.option', (['"""--output_path"""'], {'default': '"""./daint_unet_trained_rfi_bleien"""'}), "('--output_path', default='./daint_unet_trained_rfi_bleien')\n", (1218, 1278), False, 'import click\n'), ((1280, 1324), 'click.option', 'click.option', (['"""--training_iters"""'], {'default': '(32)'}), "('--training_iters', default=32)\n", (1292, 1324), False, 'import click\n'), ((1326, 1363), 'click.option', 'click.option', (['"""--epochs"""'], {'default': '(100)'}), "('--epochs', default=100)\n", (1338, 1363), False, 'import click\n'), ((1365, 1405), 'click.option', 'click.option', (['"""--restore"""'], {'default': '(False)'}), "('--restore', default=False)\n", (1377, 1405), False, 'import click\n'), ((1407, 1442), 'click.option', 'click.option', (['"""--layers"""'], {'default': '(5)'}), "('--layers', default=5)\n", (1419, 1442), False, 'import click\n'), ((1444, 1487), 'click.option', 'click.option', (['"""--features_root"""'], {'default': '(64)'}), "('--features_root', default=64)\n", (1456, 1487), False, 'import click\n'), ((1011, 1031), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1025, 1031), False, 'import os\n'), ((1661, 1688), 'glob.glob', 'glob.glob', (["(data_root + '/*')"], {}), "(data_root + '/*')\n", (1670, 1688), False, 'import glob\n'), ((2562, 2606), 'tf_unet.util.crop_to_shape', 'util.crop_to_shape', (['y_test', 'prediction.shape'], {}), '(y_test, prediction.shape)\n', (2580, 2606), False, 'from tf_unet import util\n')]
|
import pygame,sys
import random
import math
from pygame.locals import *
from pygame.sprite import Group
import gF
import Bullet
import DADcharacter
import Slave
import global_var
import Effect
import Item
import gameRule
class Menu():
def __init__(self):
super(Menu,self).__init__()
self.image=pygame.image.load('resource/title/menu.png').convert()
self.sign=global_var.get_value('menuSign')
self.shadow=global_var.get_value('menuShadow')
self.playerTitleImg=global_var.get_value('playerTitleImg')
self.kanjiLogo=global_var.get_value('kanjiLogo')
self.engLogo=global_var.get_value('engLogo')
self.lightLogo=global_var.get_value('lightLogo')
self.tachie=global_var.get_value('reimuLogo')
self.selectImg=global_var.get_value('menuSelectImg')
self.levelImg=global_var.get_value('levelImg')
self.font=pygame.font.SysFont('arial', 20)
self.selectNum=[0,0,0,0]
self.stairMax=[7,0,1,1]
self.menuStair=0 #0:main menu, 1 stage selection, 2 player selection, 3 practice menu
self.playerReset=False
self.lightStrength=0.0
self.logoPosAdj=[0,0]
self.lastFrame=0
self.testSpellNum=1
self.ifSpell=False
self.substract=False
self.plus=False
def update(self,screen,pressed_keys,pressed_keys_last,player):
self.lastFrame+=1
if self.lastFrame>360:
self.lastFrame=self.lastFrame%360
screen.blit(self.image,(0,0))
self.alterSelect(pressed_keys,pressed_keys_last)
self.drawSign(screen)
self.doSelection(pressed_keys,pressed_keys_last,player)
def alterSelect(self,pressed_keys,pressed_keys_last):
if self.menuStair!=2 and self.menuStair!=3:
if not (pressed_keys[K_UP] and pressed_keys_last[K_UP]):
if pressed_keys[K_UP]:
self.selectNum[self.menuStair]-=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_DOWN] and pressed_keys_last[K_DOWN]):
if pressed_keys[K_DOWN]:
self.selectNum[self.menuStair]+=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
elif self.menuStair==2:
if not (pressed_keys[K_LEFT] and pressed_keys_last[K_LEFT]):
if pressed_keys[K_LEFT]:
self.selectNum[self.menuStair]-=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_RIGHT] and pressed_keys_last[K_RIGHT]):
if pressed_keys[K_RIGHT]:
self.selectNum[self.menuStair]+=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
elif self.menuStair==3:
if not (pressed_keys[K_LEFT] and pressed_keys_last[K_LEFT]):
if pressed_keys[K_LEFT]:
self.testSpellNum-=1
self.substract=True
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_RIGHT] and pressed_keys_last[K_RIGHT]):
if pressed_keys[K_RIGHT]:
self.testSpellNum+=1
self.plus=True
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if self.testSpellNum>10:
self.testSpellNum=1
elif self.testSpellNum<1:
self.testSpellNum=10
if not (pressed_keys[K_DOWN] and pressed_keys_last[K_DOWN]):
if pressed_keys[K_DOWN]:
self.ifSpell=False
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_UP] and pressed_keys_last[K_UP]):
if pressed_keys[K_UP]:
self.ifSpell=True
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not self.ifSpell and self.testSpellNum==10:
if self.substract:
self.testSpellNum=9
elif self.plus:
self.testSpellNum=1
else:
self.ifSpell=True
self.substract=False
self.plus=False
if (pressed_keys[K_ESCAPE]!=pressed_keys_last[K_ESCAPE] and pressed_keys[K_ESCAPE]) or (pressed_keys[K_x]!=pressed_keys_last[K_x] and pressed_keys[K_x]):
if self.menuStair>0:
self.menuStair-=1
global_var.get_value('cancel_sound').play()
else:
if self.selectNum[0]!=7:
self.selectNum[0]=7
global_var.get_value('cancel_sound').play()
else:
global_var.get_value('cancel_sound').play()
sys.exit()
if self.selectNum[self.menuStair]>self.stairMax[self.menuStair]:
self.selectNum[self.menuStair]=0
elif self.selectNum[self.menuStair]<0:
self.selectNum[self.menuStair]=self.stairMax[self.menuStair]
def drawSign(self,screen):
if self.menuStair==0:
self.logoPosAdj=[math.sin(self.lastFrame*math.pi/180)*20,math.sin(self.lastFrame*0.5*math.pi/180)*5]
screen.blit(self.kanjiLogo,(100+self.logoPosAdj[0],30+self.logoPosAdj[1]))
self.lightStrength=0.5*math.sin(self.lastFrame*2*math.pi/180)+0.5
alpha=round(self.lightStrength*256)
self.lightLogo.set_alpha(alpha)
screen.blit(self.lightLogo,(100-5,164))
screen.blit(self.engLogo,(100,164))
screen.blit(self.tachie,(600,90))
for i in range(0,8):
if i!=self.selectNum[self.menuStair]:
screen.blit(self.shadow[i],(100,250+i*48))
else:
screen.blit(self.sign[i],(100,250+i*48))
elif self.menuStair==1:
screen.blit(self.selectImg[0],(40,10))
screen.blit(self.levelImg[0],(288,264))
elif self.menuStair==2:
if self.selectNum[0]==0 or self.selectNum[0]==2:
screen.blit(self.selectImg[1],(40,10))
for i in range(0,2):
self.playerTitleImg[i].set_alpha(256)
if self.selectNum[2]==0:
self.playerTitleImg[1].set_alpha(100)
elif self.selectNum[2]==1:
self.playerTitleImg[0].set_alpha(100)
for i in range(0,2):
screen.blit(self.playerTitleImg[i],(450*i,120))
elif self.menuStair==3:
if self.selectNum[0]==2:
if self.ifSpell:
pracText=self.font.render('Test: Start From Spell No.'+str(self.testSpellNum),True,(255,255,255))
else:
pracText=self.font.render('Test: Start From non-Spell No.'+str(self.testSpellNum),True,(255,255,255))
screen.blit(pracText,(200,300))
def doSelection(self,pressed_keys,pressed_keys_last,player):
if pressed_keys[K_z]!=pressed_keys_last[K_z] and pressed_keys[K_z]:
if self.menuStair==0:
if self.selectNum[self.menuStair]==0:
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.selectNum[self.menuStair]==2:
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.selectNum[self.menuStair]==7:
global_var.get_value('ok_sound').play()
sys.exit()
else:
global_var.get_value('invalid_sound').stop()
global_var.get_value('invalid_sound').play()
elif self.menuStair==1:
if self.selectNum[0]==0 or self.selectNum[0]==2:
if self.selectNum[self.menuStair]==0:
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.menuStair==2:
if self.selectNum[0]==0:
if self.selectNum[self.menuStair]==0:
global_var.set_value('playerNum',0)
elif self.selectNum[self.menuStair]==1:
global_var.set_value('playerNum',1)
global_var.get_value('ok_sound').play()
global_var.get_value('ok_sound').play()
global_var.set_value('ifTest',False)
pygame.mixer.music.stop()
pygame.mixer.music.load('resource/bgm/lightnessOnTheWay.mp3') # 载入背景音乐文件
#pygame.mixer.music.load('resource/bgm/上海アリス幻樂団 - 死体旅行~ Be of good cheer!.mp3')
pygame.mixer.music.set_volume(0.6) # 设定背景音乐音量
pygame.mixer.music.play(loops=-1)
self.menuStair=0
global_var.set_value('menu',False)
self.playerReset=True
if self.selectNum[0]==2:
if self.selectNum[self.menuStair]==0:
global_var.set_value('playerNum',0)
elif self.selectNum[self.menuStair]==1:
global_var.set_value('playerNum',1)
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.menuStair==3:
if self.selectNum[0]==2:
global_var.get_value('ok_sound').play()
global_var.set_value('ifTest',True)
global_var.set_value('ifSpellTest',self.ifSpell)
global_var.set_value('spellNum',self.testSpellNum)
pygame.mixer.music.stop()
pygame.mixer.music.load('resource/bgm/lightnessOnTheWay.mp3') # 载入背景音乐文件
#pygame.mixer.music.load('resource/bgm/上海アリス幻樂団 - 死体旅行~ Be of good cheer!.mp3')
pygame.mixer.music.set_volume(0.6) # 设定背景音乐音量
pygame.mixer.music.play(loops=-1)
self.menuStair=0
global_var.set_value('menu',False)
self.playerReset=True
|
[
"pygame.font.SysFont",
"pygame.mixer.music.play",
"math.sin",
"global_var.get_value",
"pygame.mixer.music.set_volume",
"global_var.set_value",
"pygame.mixer.music.load",
"pygame.image.load",
"pygame.mixer.music.stop",
"sys.exit"
] |
[((389, 421), 'global_var.get_value', 'global_var.get_value', (['"""menuSign"""'], {}), "('menuSign')\n", (409, 421), False, 'import global_var\n'), ((442, 476), 'global_var.get_value', 'global_var.get_value', (['"""menuShadow"""'], {}), "('menuShadow')\n", (462, 476), False, 'import global_var\n'), ((505, 543), 'global_var.get_value', 'global_var.get_value', (['"""playerTitleImg"""'], {}), "('playerTitleImg')\n", (525, 543), False, 'import global_var\n'), ((567, 600), 'global_var.get_value', 'global_var.get_value', (['"""kanjiLogo"""'], {}), "('kanjiLogo')\n", (587, 600), False, 'import global_var\n'), ((622, 653), 'global_var.get_value', 'global_var.get_value', (['"""engLogo"""'], {}), "('engLogo')\n", (642, 653), False, 'import global_var\n'), ((677, 710), 'global_var.get_value', 'global_var.get_value', (['"""lightLogo"""'], {}), "('lightLogo')\n", (697, 710), False, 'import global_var\n'), ((731, 764), 'global_var.get_value', 'global_var.get_value', (['"""reimuLogo"""'], {}), "('reimuLogo')\n", (751, 764), False, 'import global_var\n'), ((788, 825), 'global_var.get_value', 'global_var.get_value', (['"""menuSelectImg"""'], {}), "('menuSelectImg')\n", (808, 825), False, 'import global_var\n'), ((848, 880), 'global_var.get_value', 'global_var.get_value', (['"""levelImg"""'], {}), "('levelImg')\n", (868, 880), False, 'import global_var\n'), ((899, 931), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""arial"""', '(20)'], {}), "('arial', 20)\n", (918, 931), False, 'import pygame, sys\n'), ((316, 360), 'pygame.image.load', 'pygame.image.load', (['"""resource/title/menu.png"""'], {}), "('resource/title/menu.png')\n", (333, 360), False, 'import pygame, sys\n'), ((5262, 5272), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5270, 5272), False, 'import pygame, sys\n'), ((5601, 5641), 'math.sin', 'math.sin', (['(self.lastFrame * math.pi / 180)'], {}), '(self.lastFrame * math.pi / 180)\n', (5609, 5641), False, 'import math\n'), ((5641, 5687), 'math.sin', 'math.sin', (['(self.lastFrame * 0.5 * math.pi / 180)'], {}), '(self.lastFrame * 0.5 * math.pi / 180)\n', (5649, 5687), False, 'import math\n'), ((5807, 5851), 'math.sin', 'math.sin', (['(self.lastFrame * 2 * math.pi / 180)'], {}), '(self.lastFrame * 2 * math.pi / 180)\n', (5815, 5851), False, 'import math\n'), ((4949, 4985), 'global_var.get_value', 'global_var.get_value', (['"""cancel_sound"""'], {}), "('cancel_sound')\n", (4969, 4985), False, 'import global_var\n'), ((1967, 2003), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (1987, 2003), False, 'import global_var\n'), ((2031, 2067), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (2051, 2067), False, 'import global_var\n'), ((2263, 2299), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (2283, 2299), False, 'import global_var\n'), ((2327, 2363), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (2347, 2363), False, 'import global_var\n'), ((5112, 5148), 'global_var.get_value', 'global_var.get_value', (['"""cancel_sound"""'], {}), "('cancel_sound')\n", (5132, 5148), False, 'import global_var\n'), ((5198, 5234), 'global_var.get_value', 'global_var.get_value', (['"""cancel_sound"""'], {}), "('cancel_sound')\n", (5218, 5234), False, 'import global_var\n'), ((7674, 7706), 'global_var.get_value', 'global_var.get_value', (['"""ok_sound"""'], {}), "('ok_sound')\n", (7694, 7706), False, 'import global_var\n'), ((8042, 8052), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8050, 8052), False, 'import pygame, sys\n'), ((8926, 8963), 'global_var.set_value', 'global_var.set_value', (['"""ifTest"""', '(False)'], {}), "('ifTest', False)\n", (8946, 8963), False, 'import global_var\n'), ((8983, 9008), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (9006, 9008), False, 'import pygame, sys\n'), ((9029, 9090), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""resource/bgm/lightnessOnTheWay.mp3"""'], {}), "('resource/bgm/lightnessOnTheWay.mp3')\n", (9052, 9090), False, 'import pygame, sys\n'), ((9224, 9258), 'pygame.mixer.music.set_volume', 'pygame.mixer.music.set_volume', (['(0.6)'], {}), '(0.6)\n', (9253, 9258), False, 'import pygame, sys\n'), ((9307, 9340), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {'loops': '(-1)'}), '(loops=-1)\n', (9330, 9340), False, 'import pygame, sys\n'), ((9398, 9433), 'global_var.set_value', 'global_var.set_value', (['"""menu"""', '(False)'], {}), "('menu', False)\n", (9418, 9433), False, 'import global_var\n'), ((2591, 2627), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (2611, 2627), False, 'import global_var\n'), ((2655, 2691), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (2675, 2691), False, 'import global_var\n'), ((2890, 2926), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (2910, 2926), False, 'import global_var\n'), ((2954, 2990), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (2974, 2990), False, 'import global_var\n'), ((7828, 7860), 'global_var.get_value', 'global_var.get_value', (['"""ok_sound"""'], {}), "('ok_sound')\n", (7848, 7860), False, 'import global_var\n'), ((8630, 8666), 'global_var.set_value', 'global_var.set_value', (['"""playerNum"""', '(0)'], {}), "('playerNum', 0)\n", (8650, 8666), False, 'import global_var\n'), ((9598, 9634), 'global_var.set_value', 'global_var.set_value', (['"""playerNum"""', '(0)'], {}), "('playerNum', 0)\n", (9618, 9634), False, 'import global_var\n'), ((10009, 10045), 'global_var.set_value', 'global_var.set_value', (['"""ifTest"""', '(True)'], {}), "('ifTest', True)\n", (10029, 10045), False, 'import global_var\n'), ((10065, 10114), 'global_var.set_value', 'global_var.set_value', (['"""ifSpellTest"""', 'self.ifSpell'], {}), "('ifSpellTest', self.ifSpell)\n", (10085, 10114), False, 'import global_var\n'), ((10134, 10185), 'global_var.set_value', 'global_var.set_value', (['"""spellNum"""', 'self.testSpellNum'], {}), "('spellNum', self.testSpellNum)\n", (10154, 10185), False, 'import global_var\n'), ((10205, 10230), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (10228, 10230), False, 'import pygame, sys\n'), ((10251, 10312), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""resource/bgm/lightnessOnTheWay.mp3"""'], {}), "('resource/bgm/lightnessOnTheWay.mp3')\n", (10274, 10312), False, 'import pygame, sys\n'), ((10446, 10480), 'pygame.mixer.music.set_volume', 'pygame.mixer.music.set_volume', (['(0.6)'], {}), '(0.6)\n', (10475, 10480), False, 'import pygame, sys\n'), ((10529, 10562), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {'loops': '(-1)'}), '(loops=-1)\n', (10552, 10562), False, 'import pygame, sys\n'), ((10620, 10655), 'global_var.set_value', 'global_var.set_value', (['"""menu"""', '(False)'], {}), "('menu', False)\n", (10640, 10655), False, 'import global_var\n'), ((3245, 3281), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (3265, 3281), False, 'import global_var\n'), ((3309, 3345), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (3329, 3345), False, 'import global_var\n'), ((3566, 3602), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (3586, 3602), False, 'import global_var\n'), ((3630, 3666), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (3650, 3666), False, 'import global_var\n'), ((3995, 4031), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (4015, 4031), False, 'import global_var\n'), ((4059, 4095), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (4079, 4095), False, 'import global_var\n'), ((4269, 4305), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (4289, 4305), False, 'import global_var\n'), ((4333, 4369), 'global_var.get_value', 'global_var.get_value', (['"""select_sound"""'], {}), "('select_sound')\n", (4353, 4369), False, 'import global_var\n'), ((7982, 8014), 'global_var.get_value', 'global_var.get_value', (['"""ok_sound"""'], {}), "('ok_sound')\n", (8002, 8014), False, 'import global_var\n'), ((8095, 8132), 'global_var.get_value', 'global_var.get_value', (['"""invalid_sound"""'], {}), "('invalid_sound')\n", (8115, 8132), False, 'import global_var\n'), ((8160, 8197), 'global_var.get_value', 'global_var.get_value', (['"""invalid_sound"""'], {}), "('invalid_sound')\n", (8180, 8197), False, 'import global_var\n'), ((8389, 8421), 'global_var.get_value', 'global_var.get_value', (['"""ok_sound"""'], {}), "('ok_sound')\n", (8409, 8421), False, 'import global_var\n'), ((8750, 8786), 'global_var.set_value', 'global_var.set_value', (['"""playerNum"""', '(1)'], {}), "('playerNum', 1)\n", (8770, 8786), False, 'import global_var\n'), ((8806, 8838), 'global_var.get_value', 'global_var.get_value', (['"""ok_sound"""'], {}), "('ok_sound')\n", (8826, 8838), False, 'import global_var\n'), ((8866, 8898), 'global_var.get_value', 'global_var.get_value', (['"""ok_sound"""'], {}), "('ok_sound')\n", (8886, 8898), False, 'import global_var\n'), ((9718, 9754), 'global_var.set_value', 'global_var.set_value', (['"""playerNum"""', '(1)'], {}), "('playerNum', 1)\n", (9738, 9754), False, 'import global_var\n'), ((9774, 9806), 'global_var.get_value', 'global_var.get_value', (['"""ok_sound"""'], {}), "('ok_sound')\n", (9794, 9806), False, 'import global_var\n'), ((9949, 9981), 'global_var.get_value', 'global_var.get_value', (['"""ok_sound"""'], {}), "('ok_sound')\n", (9969, 9981), False, 'import global_var\n')]
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions used to provision Fuchsia boot images."""
import common
import logging
import os
import subprocess
import tempfile
import time
import uuid
_SSH_CONFIG_TEMPLATE = """
Host *
CheckHostIP no
StrictHostKeyChecking no
ForwardAgent no
ForwardX11 no
UserKnownHostsFile {known_hosts}
User fuchsia
IdentitiesOnly yes
IdentityFile {identity}
ServerAliveInterval 2
ServerAliveCountMax 5
ControlMaster auto
ControlPersist 1m
ControlPath /tmp/ssh-%r@%h:%p
ConnectTimeout 5
"""
FVM_TYPE_QCOW = 'qcow'
FVM_TYPE_SPARSE = 'sparse'
# Specifies boot files intended for use by an emulator.
TARGET_TYPE_QEMU = 'qemu'
# Specifies boot files intended for use by anything (incl. physical devices).
TARGET_TYPE_GENERIC = 'generic'
def _GetPubKeyPath(output_dir):
"""Returns a path to the generated SSH public key."""
return os.path.join(output_dir, 'id_ed25519.pub')
def ProvisionSSH(output_dir):
"""Generates a keypair and config file for SSH."""
host_key_path = os.path.join(output_dir, 'ssh_key')
host_pubkey_path = host_key_path + '.pub'
id_key_path = os.path.join(output_dir, 'id_ed25519')
id_pubkey_path = _GetPubKeyPath(output_dir)
known_hosts_path = os.path.join(output_dir, 'known_hosts')
ssh_config_path = os.path.join(output_dir, 'ssh_config')
logging.debug('Generating SSH credentials.')
if not os.path.isfile(host_key_path):
subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-h', '-f',
host_key_path, '-P', '', '-N', ''],
stdout=open(os.devnull))
if not os.path.isfile(id_key_path):
subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-f', id_key_path,
'-P', '', '-N', ''], stdout=open(os.devnull))
with open(ssh_config_path, "w") as ssh_config:
ssh_config.write(
_SSH_CONFIG_TEMPLATE.format(identity=id_key_path,
known_hosts=known_hosts_path))
if os.path.exists(known_hosts_path):
os.remove(known_hosts_path)
def GetTargetFile(filename, target_arch, target_type):
"""Computes a path to |filename| in the Fuchsia boot image directory specific
to |target_type| and |target_arch|."""
assert target_type == TARGET_TYPE_QEMU or target_type == TARGET_TYPE_GENERIC
return os.path.join(common.IMAGES_ROOT, target_arch, target_type, filename)
def GetSSHConfigPath(output_dir):
return output_dir + '/ssh_config'
def GetBootImage(output_dir, target_arch, target_type):
""""Gets a path to the Zircon boot image, with the SSH client public key
added."""
ProvisionSSH(output_dir)
pubkey_path = _GetPubKeyPath(output_dir)
zbi_tool = common.GetHostToolPathFromPlatform('zbi')
image_source_path = GetTargetFile('zircon-a.zbi', target_arch, target_type)
image_dest_path = os.path.join(output_dir, 'gen', 'fuchsia-with-keys.zbi')
cmd = [ zbi_tool, '-o', image_dest_path, image_source_path,
'-e', 'data/ssh/authorized_keys=' + pubkey_path ]
subprocess.check_call(cmd)
return image_dest_path
def GetKernelArgs(output_dir):
return ['devmgr.epoch=%d' % time.time()]
def AssertBootImagesExist(arch, platform):
assert os.path.exists(GetTargetFile('zircon-a.zbi', arch, platform)), \
'This checkout is missing the files necessary for\n' \
'booting this configuration of Fuchsia.\n' \
'To check out the files, add this entry to the "custom_vars"\n' \
'section of your .gclient file:\n\n' \
' "checkout_fuchsia_boot_images": "%s.%s"\n\n' % \
(platform, arch)
|
[
"os.remove",
"logging.debug",
"os.path.exists",
"time.time",
"os.path.isfile",
"common.GetHostToolPathFromPlatform",
"os.path.join",
"subprocess.check_call"
] |
[((1016, 1058), 'os.path.join', 'os.path.join', (['output_dir', '"""id_ed25519.pub"""'], {}), "(output_dir, 'id_ed25519.pub')\n", (1028, 1058), False, 'import os\n'), ((1163, 1198), 'os.path.join', 'os.path.join', (['output_dir', '"""ssh_key"""'], {}), "(output_dir, 'ssh_key')\n", (1175, 1198), False, 'import os\n'), ((1259, 1297), 'os.path.join', 'os.path.join', (['output_dir', '"""id_ed25519"""'], {}), "(output_dir, 'id_ed25519')\n", (1271, 1297), False, 'import os\n'), ((1365, 1404), 'os.path.join', 'os.path.join', (['output_dir', '"""known_hosts"""'], {}), "(output_dir, 'known_hosts')\n", (1377, 1404), False, 'import os\n'), ((1425, 1463), 'os.path.join', 'os.path.join', (['output_dir', '"""ssh_config"""'], {}), "(output_dir, 'ssh_config')\n", (1437, 1463), False, 'import os\n'), ((1467, 1511), 'logging.debug', 'logging.debug', (['"""Generating SSH credentials."""'], {}), "('Generating SSH credentials.')\n", (1480, 1511), False, 'import logging\n'), ((2127, 2159), 'os.path.exists', 'os.path.exists', (['known_hosts_path'], {}), '(known_hosts_path)\n', (2141, 2159), False, 'import os\n'), ((2461, 2529), 'os.path.join', 'os.path.join', (['common.IMAGES_ROOT', 'target_arch', 'target_type', 'filename'], {}), '(common.IMAGES_ROOT, target_arch, target_type, filename)\n', (2473, 2529), False, 'import os\n'), ((2831, 2872), 'common.GetHostToolPathFromPlatform', 'common.GetHostToolPathFromPlatform', (['"""zbi"""'], {}), "('zbi')\n", (2865, 2872), False, 'import common\n'), ((2971, 3027), 'os.path.join', 'os.path.join', (['output_dir', '"""gen"""', '"""fuchsia-with-keys.zbi"""'], {}), "(output_dir, 'gen', 'fuchsia-with-keys.zbi')\n", (2983, 3027), False, 'import os\n'), ((3153, 3179), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {}), '(cmd)\n', (3174, 3179), False, 'import subprocess\n'), ((1521, 1550), 'os.path.isfile', 'os.path.isfile', (['host_key_path'], {}), '(host_key_path)\n', (1535, 1550), False, 'import os\n'), ((1745, 1772), 'os.path.isfile', 'os.path.isfile', (['id_key_path'], {}), '(id_key_path)\n', (1759, 1772), False, 'import os\n'), ((2165, 2192), 'os.remove', 'os.remove', (['known_hosts_path'], {}), '(known_hosts_path)\n', (2174, 2192), False, 'import os\n'), ((3269, 3280), 'time.time', 'time.time', ([], {}), '()\n', (3278, 3280), False, 'import time\n')]
|
##################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2019 #
##################################################
import torch, copy, random
import torch.utils.data as data
class SearchDataset(data.Dataset):
def __init__(self, name, data, train_split, valid_split, direct_index=False, check=True, true_length=None, merge_train_val=False):
self.datasetname = name
self.direct_index = direct_index
self.merge_train_val = merge_train_val
if isinstance(data, (list, tuple)): # new type of SearchDataset
assert len(data) == 2, 'invalid length: {:}'.format( len(data) )
print("V2 SearchDataset")
self.train_data = data[0]
self.valid_data = data[1]
self.train_split = train_split.copy()
self.valid_split = valid_split.copy()
self.mode_str = 'V2' # new mode
else:
print("V1 Search Dataset")
self.mode_str = 'V1' # old mode
self.data = data
self.train_split = train_split.copy()
self.valid_split = valid_split.copy()
if check:
if len(train_split) != len(valid_split) and len(train_split) < 48000 and not merge_train_val:
intersection = set(train_split).intersection(set(valid_split))
assert len(intersection) == 0, 'the splitted train and validation sets should have no intersection'
else:
print(f"Skipping checking intersection because since len(train_split)={len(train_split)}, len(valid_split)={len(valid_split)}")
self.length = len(self.train_split) if true_length is None else true_length
def __repr__(self):
return ('{name}(name={datasetname}, train={tr_L}, valid={val_L}, version={ver})'.format(name=self.__class__.__name__, datasetname=self.datasetname, tr_L=len(self.train_split), val_L=len(self.valid_split), ver=self.mode_str))
def __len__(self):
return self.length
def __getitem__(self, index):
if self.direct_index:
assert index in self.train_split and index not in self.valid_split
train_index = index
else:
assert index >= 0 and index < self.length, 'invalid index = {:}'.format(index)
train_index = self.train_split[index]
valid_index = random.choice( self.valid_split )
if not self.merge_train_val:
assert valid_index not in self.train_split or (self.datasetname in ["cifar100", "ImageNet16-120"] and not self.merge_train_val)
if self.mode_str == 'V1':
train_image, train_label = self.data[train_index]
valid_image, valid_label = self.data[valid_index]
elif self.mode_str == 'V2':
train_image, train_label = self.train_data[train_index]
valid_image, valid_label = self.valid_data[valid_index]
else: raise ValueError('invalid mode : {:}'.format(self.mode_str))
return train_image, train_label, valid_image, valid_label
|
[
"random.choice"
] |
[((2211, 2242), 'random.choice', 'random.choice', (['self.valid_split'], {}), '(self.valid_split)\n', (2224, 2242), False, 'import torch, copy, random\n')]
|
from functools import reduce
from typing import List
from snakemake.io import glob_wildcards
import pandas as pd
import numpy as np
import os
N_JOBS, MAX_ITER, MAX_NR = 28, 100, 20
MODEL_NAMES = [
# "lda", "bayes",
# "log_reg",
"rf"
]
META_MODEL_NAMES = [
"stacking",
"voting_hard",
"voting_soft"
]
def get_csv_names(dataset):
wc = glob_wildcards(f"data/{dataset}/csv/all/{{csv_names}}")[0]
return [e for e in wc if "csv" in e]
def get_model():
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
return {
"lda": LinearDiscriminantAnalysis(),
"bayes": GaussianNB(),
"log_reg": LogisticRegression(max_iter=2000),
"rf": RandomForestClassifier(n_jobs=-1)
}
def get_meta_model():
from optimizer.ensemble import StackingClassifier
from optimizer.ensemble import VotingClassifier
return {
"stacking": StackingClassifier(estimators=None, n_jobs=-1),
"voting_hard": VotingClassifier(estimators=None, voting="hard", n_jobs=-1),
"voting_soft": VotingClassifier(estimators=None, voting="soft", n_jobs=-1)
}
def concat_datasets(paths_list):
encoded_datasets_tmp = [pd.read_csv(p, index_col=0) for p in paths_list]
df_dummy = get_all_present_indices_df(encoded_datasets_tmp)
df = pd.concat([df.loc[df_dummy.index, :].iloc[:, :-1].sort_index()
for df in encoded_datasets_tmp], axis=1)
return df.values, df_dummy.y.values
def get_all_present_indices_df(df_list: List[pd.DataFrame]):
# get indices present in all encoded datasets: {1,2,3,4,5}, {1,3,5}, {1,2,3,4} -> {1,3}
idcs_new = sorted(
set.intersection(*[set(df.index) for df in df_list]),
key=lambda n: int(n.split("_")[1])
)
df_dummy = pd.DataFrame(np.zeros((len(idcs_new), 1)), index=idcs_new)
df_dummy["y"] = df_list[0].loc[idcs_new, "y"]
return df_dummy
|
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.naive_bayes.GaussianNB",
"optimizer.ensemble.StackingClassifier",
"pandas.read_csv",
"optimizer.ensemble.VotingClassifier",
"snakemake.io.glob_wildcards",
"sklearn.linear_model.LogisticRegression",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis"
] |
[((367, 422), 'snakemake.io.glob_wildcards', 'glob_wildcards', (['f"""data/{dataset}/csv/all/{{csv_names}}"""'], {}), "(f'data/{dataset}/csv/all/{{csv_names}}')\n", (381, 422), False, 'from snakemake.io import glob_wildcards\n'), ((746, 774), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (772, 774), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((793, 805), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (803, 805), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((826, 859), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(2000)'}), '(max_iter=2000)\n', (844, 859), False, 'from sklearn.linear_model import LogisticRegression\n'), ((875, 908), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (897, 908), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1078, 1124), 'optimizer.ensemble.StackingClassifier', 'StackingClassifier', ([], {'estimators': 'None', 'n_jobs': '(-1)'}), '(estimators=None, n_jobs=-1)\n', (1096, 1124), False, 'from optimizer.ensemble import StackingClassifier\n'), ((1149, 1208), 'optimizer.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'None', 'voting': '"""hard"""', 'n_jobs': '(-1)'}), "(estimators=None, voting='hard', n_jobs=-1)\n", (1165, 1208), False, 'from optimizer.ensemble import VotingClassifier\n'), ((1233, 1292), 'optimizer.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'None', 'voting': '"""soft"""', 'n_jobs': '(-1)'}), "(estimators=None, voting='soft', n_jobs=-1)\n", (1249, 1292), False, 'from optimizer.ensemble import VotingClassifier\n'), ((1362, 1389), 'pandas.read_csv', 'pd.read_csv', (['p'], {'index_col': '(0)'}), '(p, index_col=0)\n', (1373, 1389), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
from django import forms
from .models import restaurants
# Para campos individuales:
class RestaurantesForm(forms.Form):
nombre = forms.CharField(required=True, label='Name', max_length=80)
cocina = forms.CharField(required=True, label='Cuisine', widget=forms.TextInput(attrs={'placeholder': 'Granaina'}))
direccion = forms.CharField(required=True, label='Street')
barrio = forms.CharField(required=True, label='Borough', widget=forms.TextInput())
imagen = forms.ImageField(required=False, label='Photo')
'''
#for mongoengine
class RestaurantesForm(ModelForm):
class Meta:
model = restaurants
fields = ['name', 'cuisine', 'address.street', 'borough', 'image']
'''
|
[
"django.forms.TextInput",
"django.forms.CharField",
"django.forms.ImageField"
] |
[((163, 222), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)', 'label': '"""Name"""', 'max_length': '(80)'}), "(required=True, label='Name', max_length=80)\n", (178, 222), False, 'from django import forms\n'), ((358, 404), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)', 'label': '"""Street"""'}), "(required=True, label='Street')\n", (373, 404), False, 'from django import forms\n'), ((507, 554), 'django.forms.ImageField', 'forms.ImageField', ([], {'required': '(False)', 'label': '"""Photo"""'}), "(required=False, label='Photo')\n", (523, 554), False, 'from django import forms\n'), ((292, 342), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Granaina'}"}), "(attrs={'placeholder': 'Granaina'})\n", (307, 342), False, 'from django import forms\n'), ((474, 491), 'django.forms.TextInput', 'forms.TextInput', ([], {}), '()\n', (489, 491), False, 'from django import forms\n')]
|
import pytest
from process_reports import read_rpc, check_excel, check_extension, read_info
import os
@pytest.fixture
def good_fn():
return os.path.join('Sample_Reports', 'ALL_RPC-2-1-2018_Scrubbed.xlsx')
@pytest.fixture
def updated_fn():
return os.path.join('Sample_Reports', 'ALL_RPC-7-3_2018_Scrubbed.xlsx')
@pytest.mark.parametrize("fn,acceptable,case,expected", [
('cool.txt', ['txt', 'xls'], True, True),
('cool.ini', ['txt', 'xls'], True, False),
('C:\monster\cool.ini', ['txt', 'xls'], True, False),
('C:\monster\cool.txt', ['txt', 'xls'], True, True),
('C:\monster\cool.xls', ['txt', 'xls'], True, True),
('C:\monster\cool.XLS', ['txt', 'xls'], True, True),
('C:\monster\cool.XLS', ['txt', 'xls'], False, False),
('C:\monster\cool.xls', ['txt', 'XLS'], False, False),
('C:\monster\cool.xls', ['txt', 'XLS'], True, True),
('C:\monster\cool.ini', ['txt', 'XLS'], True, False),
('C:\monster\cool.ini', ['txt', 'XLS'], False, False),
])
def test_check_extensions(fn, acceptable, case, expected):
assert check_extension(fn, acceptable, case_insensitive=case) == expected
@pytest.mark.parametrize("fn,expected", [
('cool.xls', True),
('cool.xlsx', True),
('cool.XLSX', True),
('cool.xlsm', True),
('cool.xlsb', True),
('cool.csv', False),
('cool.ini', False),
])
def test_check_excel(fn, expected):
assert check_excel(fn) == expected
@pytest.mark.parametrize("f_type", ['csv', 'ini'])
def test_bad_f_types(good_fn, caplog, f_type):
with pytest.raises(NotImplementedError):
read_info(good_fn, f_type=f_type)
for record in caplog.records:
assert record.levelname == 'ERROR'
assert 'f_type' in caplog.text
assert 'supported' in caplog.text
assert f_type in caplog.text
def test_bad_f_type_file(caplog):
with pytest.raises(ValueError):
read_info('cool.csv')
for record in caplog.records:
assert record.levelname == 'ERROR'
def test_good_file(good_fn):
df = read_rpc(good_fn)
assert 'Created By Qcc' in df.columns.values
assert 'Acct Id Acc' in df.columns.values
assert 'Call Action Type Qcc' in df.columns.values
assert 'Call Result Type Qcc' in df.columns.values
def test_good_file2(updated_fn):
df = read_rpc(updated_fn)
assert 'Created By Qcc' in df.columns.values
assert 'Acct Id Acc' in df.columns.values
assert 'Call Action Type Qcc' in df.columns.values
assert 'Call Result Type Qcc' in df.columns.values
def test_kwargs(good_fn):
df = read_rpc(good_fn, usecols=2)
assert 'Call Result Type Qcc' not in df.columns.values
assert len(df.columns.values) == 3
|
[
"process_reports.read_rpc",
"process_reports.check_excel",
"pytest.raises",
"process_reports.read_info",
"process_reports.check_extension",
"pytest.mark.parametrize",
"os.path.join"
] |
[((326, 1019), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fn,acceptable,case,expected"""', "[('cool.txt', ['txt', 'xls'], True, True), ('cool.ini', ['txt', 'xls'], \n True, False), ('C:\\\\monster\\\\cool.ini', ['txt', 'xls'], True, False), (\n 'C:\\\\monster\\\\cool.txt', ['txt', 'xls'], True, True), (\n 'C:\\\\monster\\\\cool.xls', ['txt', 'xls'], True, True), (\n 'C:\\\\monster\\\\cool.XLS', ['txt', 'xls'], True, True), (\n 'C:\\\\monster\\\\cool.XLS', ['txt', 'xls'], False, False), (\n 'C:\\\\monster\\\\cool.xls', ['txt', 'XLS'], False, False), (\n 'C:\\\\monster\\\\cool.xls', ['txt', 'XLS'], True, True), (\n 'C:\\\\monster\\\\cool.ini', ['txt', 'XLS'], True, False), (\n 'C:\\\\monster\\\\cool.ini', ['txt', 'XLS'], False, False)]"], {}), "('fn,acceptable,case,expected', [('cool.txt', ['txt',\n 'xls'], True, True), ('cool.ini', ['txt', 'xls'], True, False), (\n 'C:\\\\monster\\\\cool.ini', ['txt', 'xls'], True, False), (\n 'C:\\\\monster\\\\cool.txt', ['txt', 'xls'], True, True), (\n 'C:\\\\monster\\\\cool.xls', ['txt', 'xls'], True, True), (\n 'C:\\\\monster\\\\cool.XLS', ['txt', 'xls'], True, True), (\n 'C:\\\\monster\\\\cool.XLS', ['txt', 'xls'], False, False), (\n 'C:\\\\monster\\\\cool.xls', ['txt', 'XLS'], False, False), (\n 'C:\\\\monster\\\\cool.xls', ['txt', 'XLS'], True, True), (\n 'C:\\\\monster\\\\cool.ini', ['txt', 'XLS'], True, False), (\n 'C:\\\\monster\\\\cool.ini', ['txt', 'XLS'], False, False)])\n", (349, 1019), False, 'import pytest\n'), ((1140, 1336), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fn,expected"""', "[('cool.xls', True), ('cool.xlsx', True), ('cool.XLSX', True), ('cool.xlsm',\n True), ('cool.xlsb', True), ('cool.csv', False), ('cool.ini', False)]"], {}), "('fn,expected', [('cool.xls', True), ('cool.xlsx', \n True), ('cool.XLSX', True), ('cool.xlsm', True), ('cool.xlsb', True), (\n 'cool.csv', False), ('cool.ini', False)])\n", (1163, 1336), False, 'import pytest\n'), ((1436, 1485), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""f_type"""', "['csv', 'ini']"], {}), "('f_type', ['csv', 'ini'])\n", (1459, 1485), False, 'import pytest\n'), ((146, 210), 'os.path.join', 'os.path.join', (['"""Sample_Reports"""', '"""ALL_RPC-2-1-2018_Scrubbed.xlsx"""'], {}), "('Sample_Reports', 'ALL_RPC-2-1-2018_Scrubbed.xlsx')\n", (158, 210), False, 'import os\n'), ((258, 322), 'os.path.join', 'os.path.join', (['"""Sample_Reports"""', '"""ALL_RPC-7-3_2018_Scrubbed.xlsx"""'], {}), "('Sample_Reports', 'ALL_RPC-7-3_2018_Scrubbed.xlsx')\n", (270, 322), False, 'import os\n'), ((2022, 2039), 'process_reports.read_rpc', 'read_rpc', (['good_fn'], {}), '(good_fn)\n', (2030, 2039), False, 'from process_reports import read_rpc, check_excel, check_extension, read_info\n'), ((2289, 2309), 'process_reports.read_rpc', 'read_rpc', (['updated_fn'], {}), '(updated_fn)\n', (2297, 2309), False, 'from process_reports import read_rpc, check_excel, check_extension, read_info\n'), ((2552, 2580), 'process_reports.read_rpc', 'read_rpc', (['good_fn'], {'usecols': '(2)'}), '(good_fn, usecols=2)\n', (2560, 2580), False, 'from process_reports import read_rpc, check_excel, check_extension, read_info\n'), ((1070, 1124), 'process_reports.check_extension', 'check_extension', (['fn', 'acceptable'], {'case_insensitive': 'case'}), '(fn, acceptable, case_insensitive=case)\n', (1085, 1124), False, 'from process_reports import read_rpc, check_excel, check_extension, read_info\n'), ((1405, 1420), 'process_reports.check_excel', 'check_excel', (['fn'], {}), '(fn)\n', (1416, 1420), False, 'from process_reports import read_rpc, check_excel, check_extension, read_info\n'), ((1542, 1576), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1555, 1576), False, 'import pytest\n'), ((1586, 1619), 'process_reports.read_info', 'read_info', (['good_fn'], {'f_type': 'f_type'}), '(good_fn, f_type=f_type)\n', (1595, 1619), False, 'from process_reports import read_rpc, check_excel, check_extension, read_info\n'), ((1848, 1873), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1861, 1873), False, 'import pytest\n'), ((1883, 1904), 'process_reports.read_info', 'read_info', (['"""cool.csv"""'], {}), "('cool.csv')\n", (1892, 1904), False, 'from process_reports import read_rpc, check_excel, check_extension, read_info\n')]
|
import cherrypy
from mako.lookup import TemplateLookup
class Tool(cherrypy.Tool):
_lookups = {}
def __init__(self):
cherrypy.Tool.__init__(self, 'before_handler',
self.callable,
priority=40)
def callable(self,
filename=None,
directories=None,
module_directory=None,
collection_size=-1):
if filename is None or directories is None:
return
# Find the appropriate template lookup.
key = (tuple(directories), module_directory)
try:
lookup = self._lookups[key]
except KeyError:
lookup = TemplateLookup(directories=directories,
module_directory=module_directory,
collection_size=collection_size,
input_encoding='utf8')
self._lookups[key] = lookup
cherrypy.request.lookup = lookup
cherrypy.request.template = lookup.get_template(filename)
# Replace the current handler.
inner_handler = cherrypy.serving.request.handler
def wrapper(*args, **kwargs):
context = inner_handler(*args, **kwargs)
response = cherrypy.request.template.render(**context)
return response
cherrypy.serving.request.handler = wrapper
|
[
"cherrypy.request.template.render",
"cherrypy.Tool.__init__",
"mako.lookup.TemplateLookup"
] |
[((135, 209), 'cherrypy.Tool.__init__', 'cherrypy.Tool.__init__', (['self', '"""before_handler"""', 'self.callable'], {'priority': '(40)'}), "(self, 'before_handler', self.callable, priority=40)\n", (157, 209), False, 'import cherrypy\n'), ((1310, 1353), 'cherrypy.request.template.render', 'cherrypy.request.template.render', ([], {}), '(**context)\n', (1342, 1353), False, 'import cherrypy\n'), ((713, 847), 'mako.lookup.TemplateLookup', 'TemplateLookup', ([], {'directories': 'directories', 'module_directory': 'module_directory', 'collection_size': 'collection_size', 'input_encoding': '"""utf8"""'}), "(directories=directories, module_directory=module_directory,\n collection_size=collection_size, input_encoding='utf8')\n", (727, 847), False, 'from mako.lookup import TemplateLookup\n')]
|
import flask_restful
import re
from miRNASNP3 import app, api
from miRNASNP3.core import mongo
from flask_restful import Resource, fields, marshal_with, reqparse, marshal
from flask import send_file
mirna_exp_df = {
"ACC": fields.String,
"DLBC": fields.String,
"READ": fields.String,
"GBM": fields.String,
"LGG": fields.String,
"THCA": fields.String,
"STAD": fields.String,
"UCEC": fields.String,
"PCPG": fields.String,
"CESC": fields.String,
"UCS": fields.String,
"TGCT": fields.String,
"LIHC": fields.String,
"CHOL": fields.String,
"HNSC": fields.String,
"UVM": fields.String,
"SKCM": fields.String,
"COAD": fields.String,
"PAAD": fields.String,
"THYM": fields.String,
"LUSC": fields.String,
"MESO": fields.String,
"OV": fields.String,
"ESCA": fields.String,
"SARC": fields.String,
"KIRP": fields.String,
"BLCA": fields.String,
"PRAD": fields.String,
"LUAD": fields.String,
"BRCA": fields.String,
"KIRC": fields.String,
"KICH": fields.String,
}
mirna_expression = {
"exp_df": fields.Nested(mirna_exp_df),
"exp_mean": fields.String,
"mir_id": fields.String,
}
mirna_expression_list = {
"mirna_expression_list": fields.Nested(mirna_expression),
"mirna_expression_count": fields.Integer,
}
class MirExpression(Resource):
@marshal_with(mirna_expression_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
args = parser.parse_args()
mirna_id = args["mirna_id"]
condition = {}
if mirna_id:
condition["mir_id"] = mirna_id
mirna_expression_list = mongo.db.mirna_expression.find(condition)
mirna_expression_count = mongo.db.mirna_expression.find(condition).count()
else:
mirna_expression_list = {}
mirna_expression_count = 0
return {
"mirna_expression_list": list(mirna_expression_list),
"mirna_expression_count": mirna_expression_count,
}
api.add_resource(MirExpression, "/api/mirna_expression")
site_info = {
"align_1": fields.String,
"align_2": fields.String,
"align_3": fields.String,
"align_4": fields.String,
"align_5": fields.String,
"align6": fields.String,
"align7": fields.String,
"align8": fields.String,
"mm_start": fields.String,
"mm_end": fields.String,
"tgs_start": fields.String,
"tgs_end": fields.String,
"tgs_score": fields.String,
"dg_duplex": fields.String,
"dg_binding": fields.String,
"dg_open": fields.String,
"tgs_au": fields.String,
"prob_exac": fields.String(attribute="prob_exac"),
"chrome": fields.String,
}
snp_info = {
"distance": fields.String,
"chr": fields.String,
"position": fields.String,
"snp_id": fields.String,
"alt": fields.String,
"ref": fields.String,
"curalt": fields.String,
}
gene_exp_df = {
"ACC": fields.String,
"DLBC": fields.String,
"READ": fields.String,
"GBM": fields.String,
"LGG": fields.String,
"THCA": fields.String,
"STAD": fields.String,
"UCEC": fields.String,
"PCPG": fields.String,
"CESC": fields.String,
"UCS": fields.String,
"TGCT": fields.String,
"LIHC": fields.String,
"CHOL": fields.String,
"HNSC": fields.String,
"UVM": fields.String,
"SKCM": fields.String,
"COAD": fields.String,
"PAAD": fields.String,
"THYM": fields.String,
"LUSC": fields.String,
"MESO": fields.String,
"OV": fields.String,
"ESCA": fields.String,
"SARC": fields.String,
"KIRP": fields.String,
"BLCA": fields.String,
"PRAD": fields.String,
"LUAD": fields.String,
"BRCA": fields.String,
"KIRC": fields.String,
"KICH": fields.String,
}
gene_expression = {
"exp_df": fields.Nested(gene_exp_df),
"exp_mean": fields.String,
"symbol": fields.String,
}
utr_info = {
"acc": fields.List(fields.String),
"position": fields.String,
"enst_id": fields.String,
"gene_symbol": fields.String,
}
gainsite_info = {
"snp_id": fields.String,
"mir_seedstart": fields.String,
"strand": fields.String,
"mir_seedchr": fields.String,
"mir_seedend": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"snp_info": fields.Nested(snp_info),
"site_info": fields.Nested(site_info),
"utr_info": fields.Nested(utr_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"cor_key": fields.String,
}
snp_seed_gain = {
"snp_seed_gain_list": fields.Nested(gainsite_info),
"snp_seed_gain_count": fields.Integer,
}
class SnpSeedGainFull(Resource):
@marshal_with(snp_seed_gain)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("mirna_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
print(args["mirna_id"])
page = args["page"]
#per_page = 15
#record_skip = (int(page) - 1) * per_page
condition = {}
pipline = []
print(args["mirna_id"])
if args["snp_id"]:
condition["snp_id"] = args["snp_id"]
if args["mirna_id"]:
condition["mirna_id"] = args["mirna_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
match = {"$match": condition}
group_count = {"$group": {"_id": "null", "count": {"$sum": 1}}}
print(pipline)
pipline = [match,lookup_gene, lookup_mirna]
snp_seed4666_gain_count = mongo.db.seed_gain_4666_redundancy.find(condition).count()
snp_indel_gain_count = mongo.db.seed_gain_addindel_redundancy.find(condition).count()
snp_seed_gain_count = snp_seed4666_gain_count + snp_indel_gain_count
# snp_seed_gain_count=[]
snp_seed4666_gain_list = mongo.db.seed_gain_4666_redundancy.aggregate(pipline)
indel_seed_gain_list = mongo.db.seed_gain_addindel_redundancy.aggregate(pipline)
snp_seed_gain_list = list(snp_seed4666_gain_list) + list(indel_seed_gain_list)
return {
"snp_seed_gain_list": list(snp_seed_gain_list),
"snp_seed_gain_count": snp_seed_gain_count,
}
api.add_resource(SnpSeedGainFull, "/api/snp_seed_gain_full")
class SnpSeedGain(Resource):
@marshal_with(snp_seed_gain)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("mirna_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
print(args["mirna_id"])
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
pipline = []
print(args["mirna_id"])
if args["snp_id"]:
condition["snp_id"] = args["snp_id"]
if args["mirna_id"]:
condition["mirna_id"] = args["mirna_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
group_count = {"$group": {"_id": "null", "count": {"$sum": 1}}}
print(pipline)
pipline = [match, skip, limit, lookup_gene, lookup_mirna]
snp_seed4666_gain_count = mongo.db.seed_gain_4666_redundancy.find(
condition
).count()
snp_indel_gain_count = mongo.db.seed_gain_addindel_redundancy.find(
condition
).count()
snp_seed_gain_count = snp_seed4666_gain_count + snp_indel_gain_count
# snp_seed_gain_count=[]
snp_seed4666_gain_list = mongo.db.seed_gain_4666_redundancy.aggregate(pipline)
indel_seed_gain_list = mongo.db.seed_gain_addindel_redundancy.aggregate(pipline)
# snp_seed4666_gain_count=mongo.db.seed_gain_4666.aggregate(pipline_count)
# indel_seed_gain_count=mongo.db.seed_gain_addindel.aggregate(pipline_count)
# snp_seed_gain_count=list(snp_seed4666_gain_count)+list(indel_seed_gain_count)
# for i in snp_seed4666_gain_count:
# snp_seed_gain_count.append(i)
# for i in indel_seed_gain_count:
# snp_seed_gain_count.append(i)
# print("snp_seed_gain_count")
# print(snp_seed_gain_count)
if args["snp_id"]:
snp_seed_gain_list = list(snp_seed4666_gain_list) + list(
indel_seed_gain_list
)
elif record_skip > snp_seed4666_gain_count:
print("view end pages")
print(record_skip)
print(snp_seed4666_gain_count)
record_skip_indel = record_skip - snp_seed4666_gain_count
skip_indel = {"$skip": record_skip_indel}
pipline_indel = [match, skip_indel, limit, lookup_gene, lookup_mirna]
snp_seed_gain_list = mongo.db.seed_gain_addindel_redundancy.aggregate(
pipline_indel
)
elif (
snp_seed_gain_count - record_skip < 15
and snp_seed_gain_count - record_skip > 0
):
print("view across pages")
print(record_skip)
print(snp_seed4666_gain_count)
snp_seed4666_gain_list = mongo.db.seed_gain_4666_redundancy.aggregate(
pipline
)
limit_indel = snp_seed4666_gain_count - record_skip
limit_indel_pip = {"$limit": limit_indel}
pipline_indel = [match, limit_indel_pip, lookup_gene, lookup_mirna]
indel_seed_gain_list = mongo.db.seed_gain_addindel_redundancy.aggregate(
pipline_indel
)
snp_seed_gain_list = list(snp_seed4666_gain_list) + list(
indel_seed_gain_list
)
else:
snp_seed_gain_list = mongo.db.seed_gain_4666_redundancy.aggregate(pipline)
# snp_seed_gain_list=mongo.db.indel_target_test.aggregate(pipline)
# snp_seed_gain_count=mongo.db.indel_target_test.find(condition).count()
return {
"snp_seed_gain_list": list(snp_seed_gain_list),
"snp_seed_gain_count": snp_seed_gain_count,
}
api.add_resource(SnpSeedGain, "/api/snp_seed_gain")
cor_df = {
"ACC": fields.String,
"BLCA": fields.String,
"BRCA": fields.String,
"CESC": fields.String,
"CHOL": fields.String,
"COAD": fields.String,
"DLBC": fields.String,
"ESCA": fields.String,
"GBM": fields.String,
"HNSC": fields.String,
"KICH": fields.String,
"KIRC": fields.String,
"KIRP": fields.String,
"LGG": fields.String,
"LIHC": fields.String,
"LUAD": fields.String,
"LUSC": fields.String,
"MESO": fields.String,
"OV": fields.String,
"PAAD": fields.String,
"PCPG": fields.String,
"PRAD": fields.String,
"READ": fields.String,
"SARC": fields.String,
"SKCM": fields.String,
"STAD": fields.String,
"TGCT": fields.String,
"THCA": fields.String,
"THYM": fields.String,
"UCEC": fields.String,
"UCS": fields.String,
"UVM": fields.String,
}
corelation_detail = {"cor_df": fields.Nested(cor_df), "mir_gene": fields.String}
losssite_info = {
"snp_id": fields.String,
"mir_seedstart": fields.String,
"strand": fields.String,
"mir_seedchr": fields.String,
"mir_seedend": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"cor_key": fields.String,
"expr_corelation": fields.String,
"experiment_valid": fields.Integer,
"snp_info": fields.Nested(snp_info),
"site_info": fields.Nested(site_info),
"utr_info": fields.Nested(utr_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"corelation_detail": fields.Nested(corelation_detail),
}
snp_seed_loss_list = {
"snp_seed_loss_list": fields.Nested(losssite_info),
"snp_seed_loss_count": fields.Integer,
}
class SnpSeedLossFull(Resource):
@marshal_with(snp_seed_loss_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("mirna_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
page = args["page"]
condition = {}
print(args["mirna_id"])
if args["snp_id"]:
condition["snp_id"] = args["snp_id"]
if args["mirna_id"]:
condition["mirna_id"] = args["mirna_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
match = {"$match": condition}
pipline = [match, lookup_gene, lookup_mirna, lookup_corelation]
snp_seed4666_loss_count = mongo.db.seed_loss_4666_redundancy.find(
condition
).count()
snp_indel_loss_count = mongo.db.seed_loss_addindel_redundancy.find(
condition
).count()
snp_seed_loss_count = snp_seed4666_loss_count + snp_indel_loss_count
snp_seed4666_loss_list = mongo.db.seed_loss_4666_redundancy.aggregate(
pipline
)
indel_seed_loss_list = mongo.db.seed_loss_addindel_redundancy.aggregate(
pipline
)
snp_seed_loss_list = list(snp_seed4666_loss_list) + list(indel_seed_loss_list)
return {
"snp_seed_loss_list": list(snp_seed_loss_list),
"snp_seed_loss_count": snp_seed_loss_count,
}
api.add_resource(SnpSeedLossFull, "/api/snp_seed_loss_full")
class SnpSeedLoss(Resource):
@marshal_with(snp_seed_loss_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("mirna_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
print(args["mirna_id"])
if args["snp_id"]:
condition["snp_id"] = args["snp_id"]
if args["mirna_id"]:
condition["mirna_id"] = args["mirna_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_gene, lookup_mirna, lookup_corelation]
snp_seed4666_loss_count = mongo.db.seed_loss_4666_redundancy.find(
condition
).count()
snp_indel_loss_count = mongo.db.seed_loss_addindel_redundancy.find(
condition
).count()
snp_seed_loss_count = snp_seed4666_loss_count + snp_indel_loss_count
if args["snp_id"]:
snp_seed4666_loss_list = mongo.db.seed_loss_4666_redundancy.aggregate(
pipline
)
indel_seed_loss_list = mongo.db.seed_loss_addindel_redundancy.aggregate(
pipline
)
snp_seed_loss_list = list(snp_seed4666_loss_list) + list(
indel_seed_loss_list
)
elif record_skip > snp_seed4666_loss_count:
record_skip_indel = record_skip - snp_seed4666_loss_count
skip_indel = {"$skip": record_skip_indel}
pipline_indel = [match, skip_indel, limit, lookup_gene, lookup_mirna]
snp_seed_loss_list = mongo.db.seed_loss_addindel_redundancy.aggregate(
pipline_indel
)
elif (
snp_seed4666_loss_count - record_skip < 15
and snp_seed4666_loss_count - record_skip > 0
):
snp_seed4666_loss_list = mongo.db.seed_loss_4666_redundancy.aggregate(
pipline
)
limit_indel = snp_seed4666_loss_count - record_skip
limit_indel_pip = {"$limit": limit_indel}
pipline_indel = [match, limit_indel_pip, lookup_gene, lookup_mirna]
indel_seed_loss_list = mongo.db.seed_loss_addindel_redundancy.aggregate(
pipline_indel
)
snp_seed_loss_list = list(snp_seed4666_loss_list) + list(
indel_seed_loss_list
)
else:
snp_seed_loss_list = mongo.db.seed_loss_4666_redundancy.aggregate(pipline)
return {
"snp_seed_loss_list": list(snp_seed_loss_list),
"snp_seed_loss_count": snp_seed_loss_count,
}
api.add_resource(SnpSeedLoss, "/api/snp_seed_loss")
mut_info = {
"distance": fields.String,
"chr": fields.String,
"position": fields.String,
"mut_id": fields.String,
"alt": fields.String,
"ref": fields.String,
"curalt": fields.String,
"distance_align": fields.String,
}
mut_gainsite_info = {
"mut_id": fields.String,
"mir_seedstart": fields.String,
"strand": fields.String,
"mir_seedchr": fields.String,
"mir_seedend": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"mut_info": fields.Nested(mut_info),
"site_info": fields.Nested(site_info),
"utr_info": fields.Nested(utr_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
}
mut_seed_gain_list = {
"mut_seed_gain_list": fields.Nested(mut_gainsite_info),
"mut_seed_gain_count": fields.Integer,
}
class MutSeedGain(Resource):
@marshal_with(mut_seed_gain_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
parser.add_argument("mut_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
mirna_id = args["mirna_id"]
page = 1
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
if args["mirna_id"]:
condition["mirna_id"] = mirna_id
if args["mut_id"]:
condition["mut_id"] = args["mut_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
match = {"$match": condition}
print("mut_seed_gain")
print(condition)
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_gene, lookup_mirna]
tysnv_mut_seed_gain_list = mongo.db.seed_cosmic_gain_redundancy.aggregate(
pipline
)
tysnv_mut_seed_gain_count = mongo.db.seed_cosmic_gain_redundancy.find(
condition
).count()
indel_mut_seed_gain_list = mongo.db.indel_seed_mutation_gain_redundancy.aggregate(
pipline
)
indel_mut_seed_gain_count = mongo.db.indel_seed_mutation_gain_redundancy.find(
condition
).count()
mut_seed_gain_list = list(tysnv_mut_seed_gain_list) + list(
indel_mut_seed_gain_list
)
mut_seed_gain_count = tysnv_mut_seed_gain_count + indel_mut_seed_gain_count
print(mut_seed_gain_count)
return {
"mut_seed_gain_list": list(mut_seed_gain_list),
"mut_seed_gain_count": mut_seed_gain_count,
}
api.add_resource(MutSeedGain, "/api/mut_seed_gain")
mut_losssite_info = {
"mut_id": fields.String,
"mir_seedstart": fields.String,
"strand": fields.String,
"mir_seedchr": fields.String,
"mir_seedend": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"cor_key": fields.String,
"expr_corelation": fields.String,
"experiment_valid": fields.Integer,
"mut_info": fields.Nested(mut_info),
"site_info": fields.Nested(site_info),
"utr_info": fields.Nested(utr_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"corelation_detail": fields.Nested(corelation_detail),
}
mut_seed_loss_list = {
"mut_seed_loss_list": fields.Nested(mut_losssite_info),
"mut_seed_loss_count": fields.Integer,
}
class MutSeedLoss(Resource):
@marshal_with(mut_seed_loss_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
parser.add_argument("mut_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
mirna_id = args["mirna_id"]
page = 1
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
if args["mirna_id"]:
condition["mirna_id"] = mirna_id
if args["mut_id"]:
condition["mut_id"] = args["mut_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
match = {"$match": condition}
print("mut_seed_loss")
print(condition)
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_mirna, lookup_gene, lookup_corelation]
tysnv_mut_seed_loss_list = mongo.db.seed_cosmic_loss_redundancy.aggregate(
pipline
)
tysnv_mut_seed_loss_count = mongo.db.seed_cosmic_loss_redundancy.find(
condition
).count()
indel_mut_seed_loss_list = mongo.db.indel_seed_mutation_loss_redundancy.aggregate(
pipline
)
indel_mut_seed_loss_count = mongo.db.indel_seed_mutation_loss_redundancy.find(
condition
).count()
mut_seed_loss_list = list(tysnv_mut_seed_loss_list) + list(
indel_mut_seed_loss_list
)
mut_seed_loss_count = tysnv_mut_seed_loss_count + indel_mut_seed_loss_count
print(mut_seed_loss_count)
return {
"mut_seed_loss_list": list(mut_seed_loss_list),
"mut_seed_loss_count": mut_seed_loss_count,
}
api.add_resource(MutSeedLoss, "/api/mut_seed_loss")
utr_site_info = {
"chrome": fields.String,
"mm_start": fields.String,
"mm_end": fields.String,
"tgs_start": fields.String,
"tgs_end": fields.String,
"dg_duplex": fields.String,
"dg_binding": fields.String,
"dg_open": fields.String,
"tgs_au": fields.String,
"tgs_score": fields.String,
"prob_exac": fields.String,
"align_1": fields.String,
"align_2": fields.String,
"align_3": fields.String,
"align_4": fields.String,
"align_5": fields.String,
"align6": fields.String,
"align7": fields.String,
"align8": fields.String,
"truncate_start": fields.String,
"truncate_end": fields.String,
"distance": fields.Integer,
"alt_start": fields.Integer,
"alt_end": fields.Integer,
"alt_color": fields.String,
"alt_display": fields.Integer,
}
snp_info_line = {
"distance": fields.String,
"distance_align": fields.String,
"chr": fields.String,
"position": fields.String,
"snp_id": fields.String,
"ref": fields.String,
"alt": fields.String,
"curalt": fields.String,
}
utr_info_line = {
"gene_symbol": fields.String,
"enst_id": fields.String,
"acc": fields.List(fields.String),
"chr": fields.String,
"end": fields.String,
"start": fields.String,
"strand": fields.String,
"position": fields.String,
}
experiment_valid = {
"pubmedid": fields.String,
"evidence": fields.String,
"source": fields.String,
"mirna": fields.String,
"experiment_valid_key": fields.String,
"gene": fields.String,
}
snv_utr_loss = {
"snv": fields.Integer,
"indel": fields.Integer,
"snp_id": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"experiment_valid": fields.Nested(experiment_valid),
"expr_corelation": fields.String,
"snp_info": fields.Nested(snp_info_line),
"utr_info": fields.Nested(utr_info_line),
"site_info": fields.Nested(utr_site_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"corelation_detail": fields.Nested(corelation_detail),
}
utr_loss_list = {
"utr_loss_list": fields.Nested(snv_utr_loss),
"utr_loss_count": fields.Integer,
}
class SnvUtrLoss(Resource):
@marshal_with(utr_loss_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
snp_id = args["snp_id"]
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
if snp_id:
condition["snp_id"] = snp_id
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
lookup_experiment_valid = {
"$lookup": {
"from": "gene_mirna_experiment_validation",
"localField": "cor_key",
"foreignField": "experiment_valid_key",
"as": "experiment_valid",
}
}
print(condition)
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [
match,
skip,
limit,
lookup_gene,
lookup_mirna,
lookup_corelation,
lookup_experiment_valid,
]
snv_utr_loss_list = mongo.db.snv_utr_loss_v2_redundancy.aggregate(pipline)
snv_utr_loss_count = mongo.db.snv_utr_loss_v2_redundancy.find(condition).count()
indel_utr_loss_list = mongo.db.indel_utr_loss_v2_redundancy.aggregate(pipline)
indel_utr_loss_count = mongo.db.indel_utr_loss_v2_redundancy.find(
condition
).count()
utr_loss_list = list(snv_utr_loss_list) + list(indel_utr_loss_list)
utr_loss_count = snv_utr_loss_count + indel_utr_loss_count
return {"utr_loss_list": list(utr_loss_list), "utr_loss_count": utr_loss_count}
api.add_resource(SnvUtrLoss, "/api/snv_utr_loss")
snv_utr_gain = {
"snp_id": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"snp_info": fields.Nested(snp_info_line),
"utr_info": fields.Nested(utr_info_line),
"site_info": fields.Nested(utr_site_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
}
utr_gain_list = {
"utr_gain_list": fields.Nested(snv_utr_gain),
"utr_gain_count": fields.Integer,
}
class SnvUtrGain(Resource):
@marshal_with(utr_gain_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
snp_id = args["snp_id"]
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
if snp_id:
condition["snp_id"] = snp_id
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
print(condition)
pipline = [match, skip, limit, lookup_gene, lookup_mirna]
snv_utr_gain_list = mongo.db.snv_utr_gain_v2_redundancy.aggregate(pipline)
snv_utr_gain_count = mongo.db.snv_utr_gain_v2_redundancy.find(condition).count()
indel_utr_gain_list = mongo.db.indel_utr_gain_v2_redundancy.aggregate(pipline)
indel_utr_gain_count = mongo.db.indel_utr_gain_v2_redundancy.find(
condition
).count()
utr_gain_list = list(snv_utr_gain_list) + list(indel_utr_gain_list)
utr_gain_count = snv_utr_gain_count + indel_utr_gain_count
return {"utr_gain_list": list(utr_gain_list), "utr_gain_count": utr_gain_count}
api.add_resource(SnvUtrGain, "/api/snv_utr_gain")
mut_gain_utr_site = {
"mut_id": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"mut_info": fields.Nested(mut_info),
"site_info": fields.Nested(utr_site_info),
"utr_info": fields.Nested(utr_info_line),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
}
mut_utr_gain = {
"mut_utr_gain_list": fields.Nested(mut_gain_utr_site),
"mut_utr_gain_count": fields.Integer,
}
class MutUtrGain(Resource):
@marshal_with(mut_utr_gain)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id")
parser.add_argument("page")
args = parser.parse_args()
page = 1
per_page = 15
record_skip = (page - 1) * per_page
condition = {}
if args["page"]:
record_skip = (int(args["page"]) - 1) * per_page
if args["mut_id"]:
condition["mut_id"] = args["mut_id"]
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_gene, lookup_mirna]
if args["mut_id"].lower().startswith("cosn"):
tynsv_mut_utr_gain_list = mongo.db.utr_cosmic_gain_redundancy.aggregate(
pipline
)
tysnv_mut_utr_gain_count = mongo.db.utr_cosmic_gain_redundancy.find(
condition
).count()
indel_mut_utr_gain_list = mongo.db.utr_cosmic_gain_indel_redundancy.aggregate(
pipline
)
indel_mut_utr_gain_count = mongo.db.utr_cosmic_gain_indel_redundancy.find(
condition
).count()
mut_utr_gain_list = list(tynsv_mut_utr_gain_list) + list(
indel_mut_utr_gain_list
)
mut_utr_gain_count = tysnv_mut_utr_gain_count + indel_mut_utr_gain_count
else:
tynsv_mut_utr_gain_list = mongo.db.utr_clinvar_gain_redundancy.aggregate(
pipline
)
tysnv_mut_utr_gain_count = mongo.db.utr_clinvar_gain_redundancy.find(
condition
).count()
indel_mut_utr_gain_list = mongo.db.utr_clinvar_gain_indel_redundancy.aggregate(
pipline
)
indel_mut_utr_gain_count = mongo.db.utr_clinvar_gain_indel_redundancy.find(
condition
).count()
mut_utr_gain_list = list(tynsv_mut_utr_gain_list) + list(
indel_mut_utr_gain_list
)
mut_utr_gain_count = tysnv_mut_utr_gain_count + indel_mut_utr_gain_count
return {
"mut_utr_gain_list": list(mut_utr_gain_list),
"mut_utr_gain_count": mut_utr_gain_count,
}
api.add_resource(MutUtrGain, "/api/mut_utr_gain")
mut_loss_utr_site = {
"mut_id": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"experiment_valid": fields.Integer,
"expr_corelation": fields.String,
"mut_info": fields.Nested(mut_info),
"utr_info": fields.Nested(utr_info_line),
"site_info": fields.Nested(utr_site_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"corelation_detail": fields.Nested(corelation_detail),
}
mut_utr_loss = {
"mut_utr_loss_list": fields.Nested(mut_loss_utr_site),
"mut_utr_loss_count": fields.Integer,
}
class MutUtrLoss(Resource):
@marshal_with(mut_utr_loss)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id")
parser.add_argument("page")
args = parser.parse_args()
page = 1
per_page = 15
record_skip = (page - 1) * per_page
condition = {}
if args["page"]:
record_skip = (int(args["page"]) - 1) * per_page
if args["mut_id"]:
condition["mut_id"] = args["mut_id"]
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
print(condition)
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_gene, lookup_mirna, lookup_corelation]
if args["mut_id"].lower().startswith("cos"):
tysnv_mut_utr_loss_list = mongo.db.utr_cosmic_loss_redundancy.aggregate(
pipline
)
tysnv_mut_utr_loss_count = mongo.db.utr_cosmic_loss_redundancy.find(
condition
).count()
indel_mut_utr_loss_list = mongo.db.utr_cosmic_loss_indel_redundancy.aggregate(
pipline
)
indel_mut_utr_loss_count = mongo.db.utr_cosmic_loss_indel_redundancy.find(
condition
).count()
mut_utr_loss_list = list(tysnv_mut_utr_loss_list) + list(
indel_mut_utr_loss_list
)
mut_utr_loss_count = tysnv_mut_utr_loss_count + indel_mut_utr_loss_count
else:
tysnv_mut_utr_loss_list = mongo.db.utr_clinvar_loss_redundancy.aggregate(
pipline
)
tysnv_mut_utr_loss_count = mongo.db.utr_clinvar_loss_redundancy.find(
condition
).count()
indel_mut_utr_loss_list = mongo.db.utr_clinvar_loss_indel_redundancy.aggregate(
pipline
)
indel_mut_utr_loss_count = mongo.db.utr_clinvar_loss_indel_redundancy.find(
condition
).count()
mut_utr_loss_list = list(tysnv_mut_utr_loss_list) + list(
indel_mut_utr_loss_list
)
mut_utr_loss_count = tysnv_mut_utr_loss_count + indel_mut_utr_loss_count
return {
"mut_utr_loss_list": list(mut_utr_loss_list),
"mut_utr_loss_count": mut_utr_loss_count,
}
api.add_resource(MutUtrLoss, "/api/mut_utr_loss")
browse_info = {
"mir_id": fields.String,
"mir_acc": fields.String,
"mir_chr": fields.String,
"mir_start": fields.String,
"mir_end": fields.String,
"mir_strand": fields.String,
"location": fields.String,
"count_snp": fields.Integer,
"snp_info": fields.String,
"count_nutation": fields.Integer,
"mutation_info": fields.String,
}
browse_list = {"browse_list": fields.List(fields.Nested(browse_info))}
class BrowseMir(Resource):
@marshal_with(browse_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("chr", type=str)
parser.add_argument("page", type=int, default=1)
parser.add_argument("per_page", type=int, default=30)
args = parser.parse_args()
page = args["page"]
per_page = args["per_page"]
chrome = args["chr"]
record_skip = (page - 1) * per_page
condition = {}
browse_list = []
if chrome:
condition = {"mir_chr": chrome}
browse_list = mongo.db.browseY.find(condition).skip(record_skip).limit(per_page)
return {"browse_list": list(browse_list)}
api.add_resource(BrowseMir, "/api/browsemir")
mir_summary = {
"mir_id": fields.String,
"mir_acc": fields.String,
"mir_chr": fields.String,
"mir_start": fields.String,
"mir_end": fields.String,
"mir_strand": fields.String,
"matureSeq": fields.String,
"pre_id": fields.String,
"pre_acc": fields.String,
"pre_chr": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"pre_strand": fields.String,
"harpin_seq": fields.String,
"snp_in_seed": fields.Integer,
"snp_in_mature": fields.Integer,
"snp_in_premir": fields.Integer,
"cosmic_in_seed": fields.Integer,
"cosmic_in_mature": fields.Integer,
"cosmic_in_premir": fields.Integer,
"clinvar_in_seed": fields.Integer,
"clinvar_in_mature": fields.Integer,
"clinvar_in_premir": fields.Integer,
"snp_gwas_in_seed": fields.Integer,
"snp_gwas_in_mature": fields.Integer,
"snp_gwas_in_premir": fields.Integer,
"drv_in_seed": fields.Integer,
"drv_in_mature": fields.Integer,
"drv_in_premir": fields.Integer,
}
mirna_summary_list = {
"mirna_summary_list": fields.Nested(mir_summary),
"mirna_summary_count": fields.Integer,
}
class MirSummary(Resource):
@marshal_with(mirna_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("page", type=int, default=1)
parser.add_argument("chrome", type=str)
parser.add_argument("mirna_id")
args = parser.parse_args()
page = args["page"]
chrome = args["chrome"]
mirna_id = args["mirna_id"]
per_page = 15
record_skip = (page - 1) * per_page
print(mirna_id)
condition = {}
if chrome != "All":
condition["mir_chr"] = chrome
if mirna_id:
condition["mir_id"] = {"$regex": mirna_id, "$options": "$i"}
# mirna_summary_list = mongo.db.mirna_summary_sort.find(condition).skip(record_skip).limit(per_page)
# mirna_summary_count=mongo.db.mirna_summary_sort.find(condition).count()
mirna_summary_list = (
mongo.db.seed_mature_pre_var_v1.find(condition)
.skip(record_skip)
.limit(per_page)
)
mirna_summary_count = mongo.db.seed_mature_pre_var_v1.find(condition).count()
return {
"mirna_summary_list": list(mirna_summary_list),
"mirna_summary_count": mirna_summary_count,
}
api.add_resource(MirSummary, "/api/mirna_summary")
class MirInfo(Resource):
@marshal_with(mirna_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
condition = {}
print(search_ids)
if search_ids:
condition["mir_id"] = {
"$regex": "".join(["^", search_ids, "$"]),
"$options": "$i",
}
mirna_summary_list = mongo.db.seed_mature_pre_var_v1.find(condition)
mirna_summary_count = mongo.db.seed_mature_pre_var_v1.find(
condition
).count()
else:
mirna_summary_list = {}
mirna_summary_count = 0
return {
"mirna_summary_list": list(mirna_summary_list),
"mirna_summary_count": mirna_summary_count,
}
api.add_resource(MirInfo, "/api/mirinfo")
drug_name = {
"pubchem_sid": fields.String,
"drug_name": fields.String,
"fda_status": fields.String,
"nsc_id": fields.String,
"machanism_of_action": fields.String,
}
nci60_item = {
"miRNA": fields.String,
"NSC": fields.String,
"pubchem": fields.String,
"cor": fields.String,
"pv": fields.String,
"fdr": fields.String,
"drug_name": fields.Nested(drug_name),
}
drug_cor = {"nci60_list": fields.Nested(nci60_item), "nci60_count": fields.Integer}
class MirDrug(Resource):
@marshal_with(drug_cor)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mature_id", type=str)
args = parser.parse_args()
mature_id = args["mature_id"]
condition_ccle = {}
condition_nci60 = {}
pipeline = []
if mature_id:
condition_nci60["miRNA"] = mature_id
"""
condition_ccle['pv']={'$lt':'0.05'}
condition_ccle['fdr']={'$lt':'0.05'}
condition_nci60['miRNA']=mature_id
condition_nci60['pv']={'$lt':'0.05'}
"""
condition_nci60["fdr"] = {"$lt": "0.05"}
#
# ccle_list=mongo.db.ccle_drug_correlation.find(condition_ccle)
# ccle_count=mongo.db.ccle_drug_correlation.find(condition_ccle).count()
lookup_name = {
"$lookup": {
"from": "nscid_psid",
"localField": "NSC",
"foreignField": "nsc_id",
"as": "drug_name",
}
}
print(condition_nci60)
match = {"$match": condition_nci60}
pipeline = [match, lookup_name]
nci60_list = mongo.db.nci60_drug_correlation.aggregate(pipeline)
nci60_count = 1
else:
nci60_list = []
nci60_count = 0
return {"nci60_list": list(nci60_list), "nci60_count": nci60_count}
api.add_resource(MirDrug, "/api/mirdrug")
mirna_key_list = {
"mirna_key_list": fields.Nested(mir_summary),
"premir_key_list": fields.Nested(mir_summary),
}
mirnago_item = {
"go_name": fields.String,
"go_id": fields.String,
"precursor_id": fields.String,
"reference": fields.String,
}
mirnago_list = {
"mirnago_list": fields.Nested(mirnago_item),
"mirnago_count": fields.Integer,
}
class MirnaGo(Resource):
@marshal_with(mirnago_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("precursor_id", type=str)
args = parser.parse_args()
precursor_id = args["precursor_id"]
condition = {}
if precursor_id:
condition["precursor_id"] = precursor_id
mirnago_list = mongo.db.mirnago.find(condition)
mirnago_count = mongo.db.mirnago.find(condition).count()
else:
mirnago_list = []
mirnago_count = 0
return {"mirnago_list": list(mirnago_list), "mirnago_count": mirnago_count}
api.add_resource(MirnaGo, "/api/mirnago")
class MirnaKey(Resource):
@marshal_with(mirna_key_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
args = parser.parse_args()
mirna_id = args["mirna_id"]
condition = {}
condition_pre = {}
if mirna_id:
condition["mir_id"] = {"$regex": mirna_id, "$options": "$i"}
condition_pre["pre_id"] = {"$regex": mirna_id, "$options": "$i"}
print(condition)
mirna_key_list = mongo.db.pri_mir_summary.find(condition)
premir_key_list = mongo.db.pri_mir_summary.find(condition_pre)
else:
mirna_key_list = {}
premir_key_list = {}
return {
"mirna_key_list": list(mirna_key_list),
"premir_key_list": list(premir_key_list),
}
api.add_resource(MirnaKey, "/api/mirna_key")
pri_id = {
"pre_id": fields.String,
"pre_chr": fields.String,
"pre_acc": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"pre_strand": fields.String,
"snp_in_premir": fields.Integer,
"cosmic_in_premir": fields.Integer,
"clinvar_in_premir": fields.Integer,
}
mature_info = {
"mir_id": fields.List(fields.String),
"mir_acc": fields.List(fields.String),
}
pri_count = {"_id": fields.String, "count": fields.String}
primir_summary = {
"pre_id": fields.String,
"pre_chr": fields.String,
"pre_acc": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"pre_strand": fields.String,
"snp_in_premir": fields.Integer,
"cosmic_in_premir": fields.Integer,
"clinvar_in_premir": fields.Integer,
"drv_in_premir": fields.Integer,
"mature_info": fields.Nested(mature_info),
}
primir_summary_list = {
"primir_summary_list": fields.Nested(primir_summary),
"primir_summary_count": fields.Integer,
}
class PrimirSummary(Resource):
@marshal_with(primir_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("page", type=int, default=1)
parser.add_argument("chrome", type=str)
parser.add_argument("pre_id")
args = parser.parse_args()
page = args["page"]
chrome = args["chrome"]
pre_id = args["pre_id"]
per_page = 15
record_skip = (page - 1) * per_page
print(page)
condition = {}
if chrome != "All":
condition["pre_chr"] = chrome
if pre_id:
condition["pre_id"] = {"$regex": pre_id, "$options": "$i"}
"""
group={'$group':{
'_id':{
'pre_id':'$pre_id',
'pre_acc':'$pre_acc',
'pre_chr':'$pre_chr',
'pre_start':'$pre_start',
'pre_end':'$pre_end',
'pre_strand':'$pre_strand',
'snp_in_premir':'$snp_in_premir',
'cosmic_in_premir':'$cosmic_in_premir',
'clinvar_in_premir':'$clinvar_in_premir',
},
'mature_info':{'$push':{
'mir_id':'$mir_id',
'mir_acc':'$mir_acc',
}},
}}
group_sum={'$group':{
'_id':'null',
'count':{'$sum':1}
}}
"""
print(condition)
premir_summary_list = (
mongo.db.premir_summary_v1.find(condition).skip(record_skip).limit(per_page)
)
premir_summary_count = mongo.db.premir_summary_v1.find(condition).count()
print("done serch")
# print(pip_sum)
# print(pipline)
return {
"primir_summary_list": list(premir_summary_list),
"primir_summary_count": premir_summary_count,
}
api.add_resource(PrimirSummary, "/api/primir_summary")
"""
premir_genome={
'start':fields.String,
'end':fields.String,
'stand':fields.String,
'chromosome':fields.String
}
mir_cluster5k={
'id':fields.String,
'confidence':fields.String,
'cluster5k_id':fields.String,
'accession':fields.String,
'genome':fields.List(fields.Nested(premir_genome)),
'rpm':fields.String
}
mir_cluster10k={
'id':fields.String,
'confidence':fields.String,
'cluster10k_id':fields.String,
'accession':fields.String,
'genome':fields.List(fields.Nested(premir_genome)),
'rpm':fields.String
}
"""
mut_item = {
"mut_id": fields.String,
"chr": fields.String,
"position": fields.String,
"ref": fields.String,
"alt": fields.String,
"structure_analys": fields.Integer,
}
premir_cluster = {
"pre_id": fields.String,
"cluster10k_id": fields.String,
"cluster5k_id": fields.String,
}
mirset_v9_item = {
"Function": fields.List(fields.String),
"precurser_id": fields.String,
"HMDD": fields.List(fields.String),
}
premir_context = {
"precursor_id": fields.String,
"host_gene": fields.String,
"region": fields.String,
}
premir_info = {
"pre_id": fields.String,
"cluster10k_id": fields.List(fields.List(fields.String)),
"cluster5k_id": fields.List(fields.List(fields.String)),
"sequence": fields.String,
"dotfold": fields.String,
"cosmic": fields.Nested(mut_item),
"clinvar": fields.Nested(mut_item),
"snv": fields.Nested(mut_item),
"mfe": fields.String,
"host_gene": fields.Nested(premir_context),
"mirinfo": fields.Nested(mir_summary),
"mature_position": fields.List(fields.List(fields.String)),
"mirset_v9": fields.Nested(mirset_v9_item),
}
premir_info_list = {"premir_info": fields.Nested(premir_info)}
class PremirInfo(Resource):
@marshal_with(premir_info_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
condition = {}
print(search_ids)
if search_ids:
match = {"$match": {"pre_id": search_ids}}
lookup_mirinfo = {
"$lookup": {
"from": "pri_mir_summary",
"localField": "pre_id",
"foreignField": "pre_id",
"as": "mirinfo",
}
}
lookup_function = {
"$lookup": {
"from": "mirset_v9",
"localField": "pre_id",
"foreignField": "precurser_id",
"as": "mirset_v9",
}
}
lookup_context = {
"$lookup": {
"from": "premir_context",
"localField": "pre_id",
"foreignField": "precursor_id",
"as": "host_gene",
}
}
pipline = [match, lookup_mirinfo, lookup_function, lookup_context]
print(pipline)
# premir_info=mongo.db.premir_info.aggregate(pipline)
premir_info = mongo.db.premir_info_addindel_v1.aggregate(pipline)
else:
premir_info = {}
return {"premir_info": list(premir_info)}
api.add_resource(PremirInfo, "/api/premir_info")
pri_alt = {
"pre_id": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"snp_id": fields.String,
"snp_chr": fields.String,
"snp_position": fields.String,
"ref": fields.String,
"snp_ref_freq": fields.String,
"alt": fields.String(attribute="snp_alt"),
"snp_alt_freq": fields.String,
"curalt": fields.String,
"pre_altseq": fields.String,
"dotfold": fields.String,
"mfe": fields.String,
"pre_strand": fields.String,
"pre_acc": fields.String,
"rela_loc": fields.String,
"insert": fields.Integer,
"delete": fields.Integer,
"alt_start": fields.String,
"alt_end": fields.String,
}
primir_alt_list = {
"primir_alt_list": fields.Nested(pri_alt),
"primir_alt_count": fields.Integer,
}
class PrimirAlt(Resource):
@marshal_with(primir_alt_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
parser.add_argument("pre_id", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
condition = {}
print(search_ids)
if search_ids:
condition["snp_id"] = search_ids
condition["pre_id"] = args["pre_id"]
# primir_alt_list=mongo.db.primary_altseq.find(condition)
# primir_alt_count=mongo.db.primary_altseq.find(condition).count()
primir_alt_list = mongo.db.primary_altseq_indel.find(condition)
primir_alt_count = mongo.db.primary_altseq_indel.find(condition).count()
else:
primir_alt_list = {}
primit_alt_count = 0
return {
"primir_alt_list": list(primir_alt_list),
"primir_alt_count": primir_alt_count,
}
api.add_resource(PrimirAlt, "/api/primir_altseq")
primir_mut = {
"pre_id": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"mut_id": fields.String,
"mut_chr": fields.String,
"mut_position": fields.String,
"ref": fields.String,
"curalt": fields.String,
"pre_altseq": fields.String,
"dotfold": fields.String,
"mfe": fields.String,
"pre_strand": fields.String,
"pre_acc": fields.String,
"rela_loc": fields.String,
"source": fields.String,
"insert": fields.Integer,
"delete": fields.Integer,
"alt_start": fields.String,
"alt_end": fields.String,
}
primir_mut_list = {
"primir_mut_list": fields.Nested(primir_mut),
"primir_mut_count": fields.Integer,
}
class PrimirMut(Resource):
@marshal_with(primir_mut_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("pre_id", type=str)
args = parser.parse_args()
mut_id = args["mut_id"]
pre_id = args["pre_id"]
condition = {}
if mut_id:
condition["mut_id"] = mut_id
condition["pre_id"] = pre_id
# primir_mut_list=mongo.db.primir_altseq_mut.find(condition)
# primir_mut_count=mongo.db.primir_altseq_mut.find(condition).count()
primir_mut_list = mongo.db.primir_altseq_mut_indel.find(condition)
primir_mut_count = mongo.db.primir_altseq_mut_indel.find(condition).count()
else:
primir_mut_count = 0
primir_mut_list = {}
return {
"primir_mut_list": list(primir_mut_list),
"primir_mut_count": primir_mut_count,
}
api.add_resource(PrimirMut, "/api/primir_altseq_mut")
snpinfo_line = {
"snp_id": fields.String,
"snp_chr": fields.String,
"snp_coordinate": fields.String,
"ref": fields.String,
"alt": fields.String,
"ref_freq": fields.String,
"alt_freq": fields.String,
"location": fields.String,
"identifier": fields.String,
"ldsnp": fields.Integer,
"mutation_rela": fields.Integer,
"gain_count": fields.String,
"loss_count": fields.String,
}
snpinfo = {"snpinfo": fields.Nested(snpinfo_line), "snpinfo_count": fields.Integer}
class SnpInfo(Resource):
@marshal_with(snpinfo)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("query_snp", type=str)
parser.add_argument("page")
args = parser.parse_args()
page = args["page"]
query_snp = args["query_snp"]
per_page = 15
record_skip = (int(page) - 1) * int(per_page)
condition = {}
if query_snp == "summary":
snpinfo = mongo.db.snp_summary.find().skip(record_skip).limit(per_page)
snpinfo_count = mongo.db.snp_summary.find().count()
elif query_snp.startswith("rs"):
condition = {"snp_id": query_snp}
snpinfo = mongo.db.snp_summary.find(condition)
snpinfo_count = mongo.db.snp_summary.find(condition).count()
else:
snpinfo = {}
snpinfo_count = 0
return {"snpinfo": list(snpinfo), "snpinfo_count": snpinfo_count}
api.add_resource(SnpInfo, "/api/snpinfo")
catalog_line = {
"snp_id": fields.String(attribute="SNPS"),
"risk_allele": fields.String(attribute="STRONGEST_SNP-RISK_ALLELE"),
"risk_allele_fre": fields.String(attribute="RISK_ALLELE_FREQUENCY"),
"disease": fields.String(attribute="DISEASE/TRAIT"),
"reported_gene": fields.String(attribute="REPORTED_GENE"),
"p_value": fields.String(attribute="P-VALUE"),
"or_beta": fields.String(attribute="OR_or_BETA"),
"ci95": fields.String(attribute="CI_95_TEXT"),
"pubmed_id": fields.String(attribute="PUBMEDID"),
"pubmed_link": fields.String(attribute="LINK"),
}
catalog_list = {
"catalog_list": fields.Nested(catalog_line),
"catalog_count": fields.Integer,
}
class GwasCatalog(Resource):
@marshal_with(catalog_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
print(search_ids)
if search_ids:
condition = {"SNPS": search_ids}
catalog_list = mongo.db.gwas_catalog_alternative.find(condition)
catalog_count = mongo.db.gwas_catalog_alternative.find(condition).count()
else:
catalog_list = {}
catalog_count = 0
return {"catalog_list": list(catalog_list), "catalog_count": catalog_count}
api.add_resource(GwasCatalog, "/api/gwas_catalog")
tag_info = {
"population": fields.String,
"ld_start": fields.String,
"ld_end": fields.String,
}
relate_tag_info = {
"population": fields.String,
"relate_tag_chr": fields.String,
"relate_tag_ld_start": fields.String,
"relate_tag_ld_end": fields.String,
"d_prime": fields.String,
"r2": fields.String,
}
ld_info_id = {
"snp_id": fields.String,
"snp_chr": fields.String(attribute="chrome"),
"snp_position": fields.String(attribute="position"),
"is_tag": fields.String,
"is_ld": fields.String,
"location": fields.String,
"rela_tag": fields.String,
"relate_tag_pos": fields.String,
}
ld_info = {
"_id": fields.Nested(ld_info_id),
"tag_info": fields.Nested(tag_info),
"relate_tag_info": fields.Nested(relate_tag_info),
"catalog_info": fields.Nested(catalog_line),
}
ld_info_list = {"ld_list": fields.Nested(ld_info), "ld_item_lenth": fields.Integer}
class LDinfo(Resource):
@marshal_with(ld_info_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
print(search_ids)
# condition = {}
match = {"$match": {"snp_id": search_ids}}
group = {
"$group": {
"_id": {
"snp_id": "$snp_id",
"chrome": "$chrome",
"position": "$position",
"is_tag": "$is_tag",
"is_ld": "$is_ld",
"location": "$location",
"rela_tag": "$rela_tag",
"relate_tag_pos": "$relate_tag_pos",
},
"tag_info": {
"$push": {
"population": "$population",
"ld_start": "$ld_start",
"ld_end": "$ld_end",
}
},
"relate_tag_info": {
"$push": {
"population": "$population",
"relate_tag_chr": "$relate_tag_chr",
"relate_tag_ld_start": "$relate_tag_ld_start",
"relate_tag_ld_end": "$relate_tag_ld_end",
"d_prime": "$d_prime",
"r2": "$r2",
}
},
}
}
lookup = {
"$lookup": {
"from": "gwas_catalog_alternative",
"localField": "_id.rela_tag",
"foreignField": "SNPS",
"as": "catalog_info",
}
}
pipline = [match, group, lookup]
print(pipline)
ld_list = mongo.db.ld_region.aggregate(pipline)
ld_item_lenth = mongo.db.ld_region.find({"snp_id": search_ids}).count()
return {"ld_list": list(ld_list), "ld_item_lenth": ld_item_lenth}
api.add_resource(LDinfo, "/api/ldinfo")
disease_pubmed_item = {"disease": fields.String, "pubmed_id": fields.String}
mutation_line = {
"analysis": fields.Integer,
"mut_chr": fields.String,
"mut_position": fields.String,
"mut_id": fields.String,
"ref": fields.String,
"alt": fields.String,
"rela_tag_snp": fields.String,
"location": fields.String,
"source": fields.String,
"gain_count": fields.String,
"loss_count": fields.String,
"mature_id": fields.String,
"gene": fields.String,
"identifier_lower": fields.String,
"pre_id": fields.String,
"energy_change": fields.String,
"expression_change": fields.String,
"snp_id": fields.String,
"disease_pubmed": fields.Nested(disease_pubmed_item),
}
count_group = {"_id": fields.String, "count": fields.Integer}
mutation_summary_list = {
"mutation_seed_list": fields.Nested(mutation_line),
"mutation_seed_count": fields.Nested(count_group),
"mutation_mature_list": fields.Nested(mutation_line),
"mutation_mature_count": fields.Nested(count_group),
"mutation_premir_list": fields.Nested(mutation_line),
"mutation_premir_count": fields.Nested(count_group),
"mutation_utr3_list": fields.Nested(mutation_line),
#'mutation_utr3_count':fields.Nested(count_group),
"mutation_utr3_count": fields.Integer,
"mutation_summary_list": fields.Nested(mutation_line),
"mutation_summary_count": fields.Nested(count_group),
}
"""
class MutationSummary(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('mut_id', type=str)
parser.add_argument('page')
#parser.add_argument('chrome')
#parser.add_argument('location')
parser.add_argument('resource')
#parser.add_argument('snp_rela')
#parser.add_argument('pubmed_id')
parser.add_argument('histology')
parser.add_argument('pathology')
parser.add_argument('gene')
args = parser.parse_args()
#print(args['chrome'])
page=1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
histology_dict={}
pathology_dict={}
match_histology={}
match_pathology={}
pipline=[]
if args['page']:
page=args['page']
record_skip = (int(page) - 1) * per_page
if args['gene']:
condition['identifier_lower']=args['gene'].lower()
#if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
#if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args['resource']!='All' and args['resource']:
condition['source']=args['resource'].lower()
if args['histology'] and args['histology'] != 'All':
histology_dict['disease']={'$regex':args['histology'],'$options':'$i'}
match_histology={'$match':histology_dict}
if args['pathology'] and args['pathology']!='All':
pathology_dict['disease']={'$regex':args['pathology'],'$options':'$i'}
match_pathology={'$match':pathology_dict}
if args['mut_id']:
mut_id=args['mut_id']
if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition['mut_id']=args['mut_id']
#if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
#if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
match_condition={'$match':condition}
skip={'$skip':record_skip}
limit={'$limit':per_page}
count_group={'$group':{'_id':'null','count':{'$sum':1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count=pipline+[count_group]
pipline.append(skip)
pipline.append(limit)
print("condition:")
print(condition)
print("histology:")
print(histology_dict)
print("pathology:")
print(pathology_dict)
if condition or histology_dict or pathology_dict:
mutation_summary_list=mongo.db.mutation_summary_addtarget.aggregate(pipline)
else:
mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_summary_count=mongo.db.mutation_summary_addtarget.aggregate(pipline_count)
return{'mutation_summary_list':list(mutation_summary_list),'mutation_summary_count':list(mutation_summary_count)}
api.add_resource(MutationSummary,'/api/mutation_summary')
"""
gene_symbol = {"gene_symbol": fields.String, "gene_symbol_lower": fields.String}
gene_list = {
"gene_list": fields.Nested(gene_symbol),
"gene_query": fields.Nested(gene_symbol),
}
class GetGene(Resource):
@marshal_with(gene_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("gene", type=str)
args = parser.parse_args()
condition = {}
accurate_condition = {}
print(args["gene"])
if args["gene"]:
condition["gene_symbol"] = {
"$regex": args["gene"].lower(),
"$options": "$i",
}
accurate_condition["gene_symbol_lower"] = args["gene"].lower()
print(accurate_condition)
gene_list = mongo.db.snp_summary_genelist.find(condition).limit(10)
gene_query = mongo.db.snp_summary_genelist.find(accurate_condition)
else:
gene_list = {}
gene_query = {}
return {"gene_list": list(gene_list), "gene_query": list(gene_query)}
api.add_resource(GetGene, "/api/snp_summary_gene")
class MutGetGene(Resource):
@marshal_with(gene_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("gene", type=str)
args = parser.parse_args()
condition = {}
accurate_condition = {}
print(args["gene"])
if args["gene"]:
condition["gene_symbol"] = {
"$regex": args["gene"].lower(),
"$options": "$i",
}
accurate_condition["gene_symbol_lower"] = args["gene"].lower()
print(accurate_condition)
gene_list = mongo.db.mutation_summary_genelist.find(condition).limit(10)
gene_query = mongo.db.mutation_summary_genelist.find(accurate_condition)
else:
gene_list = {}
gene_query = {}
return {"gene_list": list(gene_list), "gene_query": list(gene_query)}
api.add_resource(MutGetGene, "/api/mutation_summary_gene")
phenotype_line = {"phenotype": fields.String}
phenotype_list = {"phenotype_list": fields.Nested(phenotype_line)}
class GetPhenotype(Resource):
@marshal_with(phenotype_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("phenotype", type=str)
args = parser.parse_args()
condition = {}
# accurate_condition={}
print(args["phenotype"])
if args["phenotype"]:
condition["phenotype"] = {"$regex": args["phenotype"], "$options": "$i"}
# accurate_condition['gene_symbol_lower']=args['gene'].lower()
# print(accurate_condition)
phenotype_list = mongo.db.phenotype_list.find(condition).limit(10)
# gene_query=mongo.db.mutation_summary_genelist.find(accurate_condition)
else:
phenotype_list = {}
# gene_query={}
return {"phenotype_list": list(phenotype_list)}
api.add_resource(GetPhenotype, "/api/mutation_summary_phenotype")
class MutationSummarySeed(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("page")
# parser.add_argument('chrome')
parser.add_argument("location")
parser.add_argument("resource")
# parser.add_argument('snp_rela')
# parser.add_argument('pubmed_id')
parser.add_argument("histology")
parser.add_argument("pathology")
parser.add_argument("gene")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
histology_dict = {}
pathology_dict = {}
match_histology = {}
match_pathology = {}
pipline = []
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
# if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args["resource"] != "All" and args["resource"]:
condition["source"] = args["resource"]
if args["histology"] and args["histology"] != "All":
histology_dict["disease_pubmed.disease"] = {
"$regex": args["histology"],
"$options": "$i",
}
match_histology = {"$match": histology_dict}
if args["pathology"] and args["pathology"] != "All":
pathology_dict["disease_pubmed.disease"] = {
"$regex": args["pathology"],
"$options": "$i",
}
match_pathology = {"$match": pathology_dict}
if args["mut_id"]:
# mut_id=args['mut_id']
# if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition["mut_id"] = args["mut_id"]
# if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
# if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
match_condition = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
count_group = {"$group": {"_id": "null", "count": {"$sum": 1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count = pipline + [count_group]
pipline.append(skip)
pipline.append(limit)
print("search srv seed")
print(condition)
print(histology_dict)
print(pathology_dict)
# if condition or histology_dict or pathology_dict:
mutation_seed_list = mongo.db.drv_in_seed_v3_redundancy.aggregate(pipline)
# else:
# mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_seed_count = mongo.db.drv_in_seed_v3_redundancy.aggregate(
pipline_count
)
return {
"mutation_seed_list": list(mutation_seed_list),
"mutation_seed_count": list(mutation_seed_count),
}
api.add_resource(MutationSummarySeed, "/api/mutation_summary_seed")
class MutationSummaryMature(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("page")
# parser.add_argument('chrome')
# parser.add_argument('location')
parser.add_argument("resource")
# parser.add_argument('snp_rela')
# parser.add_argument('pubmed_id')
parser.add_argument("histology")
parser.add_argument("pathology")
parser.add_argument("gene")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
histology_dict = {}
pathology_dict = {}
match_histology = {}
match_pathology = {}
pipline = []
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
# if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args["resource"] != "All" and args["resource"]:
condition["resource"] = args["resource"]
if args["histology"] and args["histology"] != "All":
histology_dict["pathology"] = {
"$regex": args["histology"],
"$options": "$i",
}
match_histology = {"$match": histology_dict}
if args["pathology"] and args["pathology"] != "All":
pathology_dict["disease"] = {"$regex": args["pathology"], "$options": "$i"}
match_pathology = {"$match": pathology_dict}
if args["mut_id"]:
# mut_id=args['mut_id']
# if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition["mut_id"] = args["mut_id"]
# if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
# if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
condition["location"] = "Mature"
match_condition = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
count_group = {"$group": {"_id": "null", "count": {"$sum": 1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count = pipline + [count_group]
pipline.append(skip)
pipline.append(limit)
print(condition)
print(histology_dict)
print(pathology_dict)
# if condition or histology_dict or pathology_dict:
mutation_mature_tmp_list = mongo.db.drv_in_premir_v3_redundancy.aggregate(
pipline
)
# else:
# mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_mature_tmp_count = mongo.db.drv_in_premir_v3_redundancy.aggregate(
pipline_count
)
condition["location"] = "Seed"
match_condition = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
count_group = {"$group": {"_id": "null", "count": {"$sum": 1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count = pipline + [count_group]
pipline.append(skip)
pipline.append(limit)
mutation_seed_list = mongo.db.drv_in_premir_v2.aggregate(pipline)
mutation_seed_count = mongo.db.drv_in_premir_v2.aggregate(pipline_count)
mutation_mature_list = list(mutation_mature_tmp_list) + list(mutation_seed_list)
mutation_mature_count = list(mutation_mature_tmp_count) + list(
mutation_seed_count
)
return {
"mutation_mature_list": list(mutation_mature_list),
"mutation_mature_count": list(mutation_mature_count),
}
api.add_resource(MutationSummaryMature, "/api/mutation_summary_mature")
class MutationSummaryPremir(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("page")
# parser.add_argument('chrome')
# parser.add_argument('location')
parser.add_argument("resource")
# parser.add_argument('snp_rela')
# parser.add_argument('pubmed_id')
parser.add_argument("histology")
parser.add_argument("pathology")
parser.add_argument("gene")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
histology_dict = {}
pathology_dict = {}
match_histology = {}
match_pathology = {}
# find_gene={}
pipline = []
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
# condition['identifier_lower']=args['gene'].lower()
condition["$or"] = [
{"identifier_lower": args["gene"].lower()},
{"pre_id": args["gene"].lower()},
]
# if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
# if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args["resource"] != "All" and args["resource"]:
condition["source"] = args["resource"]
if args["histology"] and args["histology"] != "All":
histology_dict["disease_pubmed.disease"] = {
"$regex": args["histology"],
"$options": "$i",
}
match_histology = {"$match": histology_dict}
if args["pathology"] and args["pathology"] != "All":
pathology_dict["disease_pubmed.disease"] = {
"$regex": args["pathology"],
"$options": "$i",
}
match_pathology = {"$match": pathology_dict}
if args["mut_id"]:
# mut_id=args['mut_id']
# if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition["mut_id"] = args["mut_id"]
# if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
# if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
match_condition = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
count_group = {"$group": {"_id": "null", "count": {"$sum": 1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count = pipline + [count_group]
pipline.append(skip)
pipline.append(limit)
print(condition)
print(histology_dict)
print(pathology_dict)
# if condition or histology_dict or pathology_dict:
mutation_premir_list = mongo.db.drv_in_premir_v3_redundancy.aggregate(pipline)
# else:
# mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_premir_count = mongo.db.drv_in_premir_v3_redundancy.aggregate(
pipline_count
)
return {
"mutation_premir_list": list(mutation_premir_list),
"mutation_premir_count": list(mutation_premir_count),
}
api.add_resource(MutationSummaryPremir, "/api/mutation_summary_premir")
class MutationSummaryUtr3(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("page")
# parser.add_argument('chrome')
# parser.add_argument('location')
parser.add_argument("resource")
# parser.add_argument('snp_rela')
# parser.add_argument('pubmed_id')
parser.add_argument("histology")
parser.add_argument("pathology")
parser.add_argument("gene")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
page_condition = {}
histology_dict = {}
pathology_dict = {}
match_histology = {}
match_pathology = {}
pipline = []
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
# page_condition['item_number']={"$gt":record_skip}
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
# if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args["resource"] != "All" and args["resource"]:
condition["source"] = args["resource"]
if args["histology"] and args["histology"] != "All":
# histology_dict['disease_pubmed.disease']={'$regex':args['histology'],'$options':'$i'}
condition["disease_pubmed.disease"] = {
"$regex": args["histology"],
"$options": "$i",
}
match_histology = {"$match": histology_dict}
if args["pathology"] and args["pathology"] != "All":
# pathology_dict['disease_pubmed.disease']={'$regex':args['pathology'],'$options':'$i'}
condition["disease_pubmed.disease"] = {
"$regex": args["pathology"],
"$options": "$i",
}
# match_pathology={'$match':pathology_dict}
if args["mut_id"]:
# mut_id=args['mut_id']
# if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition["mut_id"] = args["mut_id"]
# if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
# if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
"""
match_condition={'$match':condition}
#skip={'$skip':record_skip}
limit={'$limit':per_page}
skip={'$skip':record_skip}
count_group={'$group':{'_id':'null','count':{'$sum':1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count=pipline+[count_group]
#pipline.append(skip)
if args['gene'] or (args['resource']!='All' and args['resource']) or (args['pathology'] and args['pathology']!='All') or (args['histology'] and args['histology'] != 'All') or args['mut_id']:
pipline.append(skip)
else:
#pipline.append({'$match':page_condition})
pipline.append(skip)
pipline.append(limit)
print('get mutation summary UTR3')
print(condition)
print(histology_dict)
print(pathology_dict)
print(pipline)
#if condition or histology_dict or pathology_dict:
mutation_utr3_list=mongo.db.drv_in_utr_v3_redundancy.aggregate(pipline)
#print(list(mutation_utr3_list))
#else:
# mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_utr3_count=mongo.db.drv_in_utr_v3_redundancy.aggregate(pipline_count)
"""
mutation_utr3_list = (
mongo.db.drv_in_utr_v3_redundancy.find(condition)
.skip(record_skip)
.limit(per_page)
)
mutation_utr3_count = mongo.db.drv_in_utr_v3_redundancy.find(condition).count()
return {
"mutation_utr3_list": list(mutation_utr3_list),
"mutation_utr3_count": mutation_utr3_count,
}
api.add_resource(MutationSummaryUtr3, "/api/mutation_summary_utr3")
snp_line = {
"snp_id": fields.String,
"snp_chr": fields.String,
"snp_position": fields.String,
"ref": fields.String,
"alt": fields.String,
"curalt": fields.String,
"ref_freq": fields.String,
"alt_freq": fields.String,
"location": fields.String,
"gene": fields.String,
"mature_chr": fields.String,
"mature_start": fields.String,
"mature_end": fields.String,
"mature_strand": fields.String,
"mature_id": fields.String,
"is_ld": fields.String,
"gain_count": fields.String,
"loss_count": fields.String,
"pre_id": fields.String,
"energy_change": fields.String,
"expression_change": fields.String,
"analysis": fields.Integer,
"snp_energy": fields.String,
"wild_energy": fields.String,
}
"""
indel_line={
'chr':fields.String,
'position':fields.String,
'snp_id':fields.String,
'ref':fields.String,
'alt':fields.String,
'ref_freq':fields.String,
'alt_freq':fields.String,
'transcript_chr':fields.String,
'trnascript_start':fields.String,
'transcript_end':fields.String,
'transcript_strand':fields.String,
'enst_id':fields.String,
'ref_seq':fields.String,
'identifier':fields.String,
'location':fields.String,
'identifier_lower':fields.String,
'mir_chr':fields.String,
'mir_start':fields.String,
'mir_end':fields.String,
'mir_strand':fields.String
}
snp_summary_list={
'snp_seed_list':fields.Nested(snp_line),
'snp_seed_count':fields.Integer,
'snp_mature_list':fields.Nested(snp_line),
'snp_mature_count':fields.Integer,
'snp_premir_list':fields.Nested(snp_line),
'snp_premir_count':fields.Integer,
'snp_utr3_list':fields.Nested(snp_line),
'snp_utr3_count':fields.Integer,
'snp_summary_list':fields.Nested(snp_line),
'snp_summary_count':fields.Integer,
'indel_seed_list':fields.Nested(indel_line),
'indel_seed_count':fields.Integer,
'indel_premir_list':fields.Nested(indel_line),
'indel_premir_count':fields.Integer,
'indel_utr_list':fields.Nested(indel_line),
'indel_utr_count':fields.Integer
}
"""
snp_summary_list = {
"snp_seed_list": fields.Nested(snp_line),
"snp_seed_count": fields.Integer,
"snp_premir_list": fields.Nested(snp_line),
"snp_premir_count": fields.Integer,
"snp_utr3_list": fields.Nested(snp_line),
"snp_utr3_count": fields.Integer,
"snp_mature_list": fields.Nested(snp_line),
"snp_mature_count": fields.Integer,
"snp_summary_list": fields.Nested(snp_line),
"snp_summary_count": fields.Integer,
}
class SnpSummary(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
# parser.add_argument('page')
# parser.add_argument('chrome')
# parser.add_argument('location')
parser.add_argument("identifier")
# parser.add_argument('gmaf')
# parser.add_argument('ldsnp')
# parser.add_argument('mutation_rela')
# parser.add_argument('gene')
# parser.add_argument('spe_snp_id')
args = parser.parse_args()
# print(args['chrome'])
# page=1
# per_page = 15
# record_skip = (int(page)-1)*per_page
condition = {}
pipline = []
# print(args['page'])
# print(record_skip)
print(args)
# if args['page']:
# page=args['page']
# record_skip = (int(page)-1)*per_page
# if args['gene']:
# condition['identifier_lower']=args['gene'].lower()
# if args['chrome'] != 'All' and args['chrome']:
# condition['snp_chr'] = args['chrome']
# if args['spe_snp_id']:
# condition['snp_id']=args['spe_snp_id']
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
# if args['ldsnp']:
# condition['ldsnp']=args['ldsnp']
# if args['mutation_rela']:
# condition['mutation_rela']=args['mutation_rela']
# if args['gmaf'] !='All' and args['gmaf']:
# condition['alt_freq']={'$gt':args['gmaf'][1:]}
# if args['location']=="All":
# condition_utr3=condition
# condition_utr3['location']='UTR3'
# snp_utr3_list=mongo.db.snp_summary.find(condition_utr3).skip(record_skip).limit(per_page)
# snp_utr3_count=mongo.db.snp_summary.find(condition_utr3).count()
# condition_seed=condition
# condition_seed['location']='mirseed'
# snp_seed_list=mongo.db.snp_summary.find(condition_seed).skip(record_skip).limit(per_page)
# snp_seed_count=mongo.db.snp_summary.find(condition_seed).count()
# condition_mature=condition
# condition_mature['location']='mature'
# snp_mature_list=mongo.db.snp_summary.find(condition_mature).skip(record_skip).limit(per_page)
# snp_mature_count=mongo.db.snp_summary.find(condition_mature).count()
# condition_premir=condition
# condition_premir['location']='pre-miRNA'
# snp_premir_list=mongo.db.snp_summary.find(condition_premir).skip(record_skip).limit(per_page)
# snp_premir_count=mongo.db.snp_summary.find(condition_premir).count()
# elif args['location']=='mirseed':
# condition['location']='mirseed'
# snp_seed_list=mongo.db.snp_summary.find(condition).skip(record_skip).limit(per_page)
# snp_seed_count=mongo.db.snp_summary.find(condition).count()
# elif args['location']=='mature':
# condition['location']='mature'
# snp_mature_list=mongo.db.snp_summary.find(condition).skip(record_skip).limit(per_page)
# snp_mature_count=mongo.db.snp_summary.find(condition).count()
# elif args['location']=='pre-miRNA':
# condition['location']='pre-miRNA'
# snp_premir_list=mongo.db.snp_summary.find(condition).skip(record_skip).limit(per_page)
# snp_premir_count=mongo.db.snp_summary.find(condition).count()
# elif args['location']=='UTR3':
# condition['location']='UTR3'
# snp_utr3_list=mongo.db.snp_summary.find(condition).skip(record_skip).limit(per_page)
# snp_utr3_count=mongo.db.snp_summary.find(condition).count()
# print(condition)
# snp_summary_list=mongo.db.snp_summary.find(condition)
# snp_summary_count=mongo.db.snp_summary.find(condition).count()
snp_summary_seed = mongo.db.snp_in_seed_v2.find(condition)
snp_summary_premir = mongo.db.snp_in_premir_v2.find(condition)
snp_summary_utr3 = mongo.db.snp_in_utr_v2.find(condition)
snp_summary_seed_count = mongo.db.snp_in_seed_v2.find(condition).count()
snp_summary_premir_count = mongo.db.snp_in_premir_v2.find(condition).count()
snp_summary_utr3_count = mongo.db.snp_in_utr_v2.find(condition).count()
snp_summary_list = (
list(snp_summary_seed) + list(snp_summary_premir) + list(snp_summary_utr3)
)
snp_summary_count = (
snp_summary_seed_count + snp_summary_premir_count + snp_summary_utr3_count
)
return {
"snp_summary_list": list(snp_summary_list),
"snp_summary_count": snp_summary_count,
}
api.add_resource(SnpSummary, "/api/snp_summary")
class SnpSummarySeed(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page")
parser.add_argument("chrome")
parser.add_argument("location")
parser.add_argument("identifier")
parser.add_argument("gmaf")
parser.add_argument("ldsnp")
parser.add_argument("gene")
parser.add_argument("spe_snp_id")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
# condition['location']='mirseed'
pipline = []
snp_seed_list = {}
snp_mature_list = {}
snp_premir_list = {}
snp_utr3_list = {}
snp_seed_count = 0
snp_mature_count = 0
snp_premir_count = 0
snp_utr3_count = 0
print(args["page"])
print(record_skip)
print(args)
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome'] != 'All' and args['chrome']:
# condition['snp_chr'] = args['chrome']
# if args['spe_snp_id']:
# condition['snp_id']=args['spe_snp_id']
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
if args["ldsnp"]:
condition["is_ld"] = str(args["ldsnp"])
# if args['mutation_rela']:
# condition['mutation_rela']=args['mutation_rela']
if args["gmaf"] != "All" and args["gmaf"]:
condition["alt_freq"] = {"$gt": args["gmaf"][1:]}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit]
# snp_seed_list=mongo.db.snp_summary_mirseed.aggregate(pipline)
snp_seed_count = mongo.db.snp_in_seed_v2.find(condition).count()
snp_seed_list = (
mongo.db.snp_in_seed_v2.find(condition).skip(record_skip).limit(per_page)
)
return {"snp_seed_list": list(snp_seed_list), "snp_seed_count": snp_seed_count}
api.add_resource(SnpSummarySeed, "/api/snp_summary_seed")
class SnpSummaryMature(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page")
parser.add_argument("chrome")
parser.add_argument("location")
parser.add_argument("identifier")
parser.add_argument("gmaf")
parser.add_argument("ldsnp")
parser.add_argument("mutation_rela")
parser.add_argument("gene")
parser.add_argument("spe_snp_id")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
condition["location"] = "mature"
pipline = []
snp_seed_list = {}
snp_mature_list = {}
snp_premir_list = {}
snp_utr3_list = {}
snp_seed_count = 0
snp_mature_count = 0
snp_premir_count = 0
snp_utr3_count = 0
print(args["page"])
print(record_skip)
print(args)
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
if args["chrome"] != "All" and args["chrome"]:
condition["snp_chr"] = args["chrome"]
if args["spe_snp_id"]:
condition["snp_id"] = args["spe_snp_id"]
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
if args["ldsnp"]:
condition["id_ld"] = args["ldsnp"]
if args["mutation_rela"]:
condition["mutation_rela"] = args["mutation_rela"]
if args["gmaf"] != "All" and args["gmaf"]:
condition["alt_freq"] = {"$gt": args["gmaf"][1:]}
condition["location"] = "Seed"
snp_seed_count = mongo.db.snp_in_premir_v2.find(condition).count()
snp_seed_list = (
mongo.db.snp_in_premir_v2.find(condition).skip(record_skip).limit(per_page)
)
condition["location"] = "Mature"
snp_mature_tmp_list = (
mongo.db.snp_in_premir_v2.find(condition).skip(record_skip).limit(per_page)
)
snp_mature_tmp_count = mongo.db.snp_in_premir_v2.find(condition).count()
snp_mature_list = list(snp_seed_list) + list(snp_mature_tmp_list)
snp_mature_count = snp_seed_count + snp_mature_tmp_count
return {
"snp_mature_list": list(snp_mature_list),
"snp_mature_count": snp_mature_count,
}
api.add_resource(SnpSummaryMature, "/api/snp_summary_mature")
class SnpSummaryPremir(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page")
parser.add_argument("chrome")
parser.add_argument("location")
parser.add_argument("identifier")
parser.add_argument("gmaf")
parser.add_argument("ldsnp")
parser.add_argument("mutation_rela")
parser.add_argument("gene")
parser.add_argument("spe_snp_id")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
pipline = []
snp_seed_list = {}
snp_mature_list = {}
snp_premir_list = {}
snp_utr3_list = {}
snp_seed_count = 0
snp_mature_count = 0
snp_premir_count = 0
snp_utr3_count = 0
print(args)
print(condition)
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["$or"] = [
{"identifier_lower": args["gene"].lower()},
{"pre_id": args["gene"].lower()},
]
# if args['chrome'] != 'All' and args['chrome']:
# condition['snp_chr'] = args['chrome']
if args["spe_snp_id"]:
condition["snp_id"] = args["spe_snp_id"]
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
if args["ldsnp"]:
condition["is_ld"] = args["ldsnp"]
if args["gmaf"] != "All" and args["gmaf"]:
condition["alt_freq"] = {"$gt": args["gmaf"][1:]}
print(condition)
snp_premir_list = (
mongo.db.snp_in_premir_v2.find(condition).skip(record_skip).limit(per_page)
)
snp_premir_count = mongo.db.snp_in_premir_v2.find(condition).count()
return {
"snp_premir_list": list(snp_premir_list),
"snp_premir_count": snp_premir_count,
}
api.add_resource(SnpSummaryPremir, "/api/snp_summary_premir")
class SnpSummaryUtr3(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page")
parser.add_argument("chrome")
parser.add_argument("location")
parser.add_argument("identifier")
parser.add_argument("gmaf")
parser.add_argument("ldsnp")
parser.add_argument("gene")
parser.add_argument("spe_snp_id")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
condition_indel = {}
# condition['location']='UTR3'
pipline = []
snp_seed_list = {}
snp_mature_list = {}
snp_premir_list = {}
snp_utr3_list = {}
snp_seed_count = 0
snp_mature_count = 0
snp_premir_count = 0
snp_utr3_count = 0
print(args["page"])
print(record_skip)
print(args)
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome'] != 'All' and args['chrome']:
# condition['snp_chr'] = args['chrome']
# if args['spe_snp_id']:
# condition['snp_id']=args['spe_snp_id']
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
if args["ldsnp"]:
condition["is_ld"] = args["ldsnp"]
# if args['mutation_rela']:
# condition['mutation_rela']=args['mutation_rela']
if args["gmaf"] != "All" and args["gmaf"]:
condition["alt_freq"] = {"$gt": args["gmaf"][1:]}
if (
args["gene"]
or args["snp_id"]
or args["identifier"]
or args["ldsnp"]
or (args["gmaf"] != "All" and args["gmaf"])
):
snp_utr3_list = (
mongo.db.snp_in_utr_v2.find(condition).skip(record_skip).limit(per_page)
)
snp_utr3_count = mongo.db.snp_in_utr_v2.find(condition).count()
elif int(page) <= 50000:
snp_utr3_list = (
mongo.db.snp_in_utr_v2.find(condition).skip(record_skip).limit(per_page)
)
snp_utr3_count = mongo.db.snp_in_utr_v2.find(condition).count()
else:
condition["item_number"] = {"$gt": str(record_skip)}
snp_utr3_list = mongo.db.snp_in_utr_v2.find(condition).limit(per_page)
snp_utr3_count = mongo.db.snp_in_utr_v2.find(condition).count()
# snp_utr3_list=mongo.db.snp_summary_utr3.aggregate(pipline)
print(condition)
return {"snp_utr3_list": list(snp_utr3_list), "snp_utr3_count": snp_utr3_count}
api.add_resource(SnpSummaryUtr3, "/api/snp_summary_utr3")
cosmic_line = {
"ID_NCV": fields.String,
"snp_rela": fields.String,
"Primary_histology": fields.String(attribute="Primary histology"),
"chrome": fields.String,
"Mutation_somatic_status": fields.String(attribute="Mutation somatic status"),
"Primary_site": fields.String(attribute="Primary site"),
"PUBMED_PMID": fields.String,
"SNP": fields.String,
"snp_id": fields.String,
"position": fields.String,
"alt": fields.String,
"ref": fields.String,
"location": fields.String,
}
cosmic_list = {"cosmic_list": fields.Nested(cosmic_line), "data_length": fields.Integer}
class CosmicInfo(Resource):
@marshal_with(cosmic_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
parser.add_argument("page")
args = parser.parse_args()
search_ids = args["search_ids"]
page = args["page"]
per_page = 30
print(page)
print(search_ids)
# skip_records = per_page * (page - 1)
record_skip = (int(page) - 1) * per_page
print(search_ids)
if search_ids == "summary":
cosmic_list = (
mongo.db.cosmic_summary.find().skip(record_skip).limit(per_page)
)
cosmic_count = mongo.db.cosmic_summary.find().count()
elif search_ids:
condition = {"snp_id": search_ids}
cosmic_list = mongo.db.cosmic_summary.find(condition)
cosmic_count = mongo.db.cosmic_summary.find(condition).count()
else:
cosmic_list = {}
cosmic_count = 0
return {"cosmic_list": list(cosmic_list), "data_length": cosmic_count}
api.add_resource(CosmicInfo, "/api/cosmicinfo")
clinvar_line = {
"chrome": fields.String,
"position": fields.String,
"clinvar_id": fields.String,
"disease": fields.String,
"snp_rela": fields.String,
"snp_id": fields.String,
"ref": fields.String,
"alt": fields.String,
"location": fields.String,
}
clinvar_list = {
"clinvar_list": fields.Nested(clinvar_line),
"data_length": fields.Integer,
}
class ClinvarInfo(Resource):
@marshal_with(clinvar_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
parser.add_argument("page")
args = parser.parse_args()
search_ids = args["search_ids"]
per_page = 15
page = args["page"]
skip_records = (int(page) - 1) * per_page
if search_ids == "summary":
clinvar_list = (
mongo.db.clinvar_summary.find().skip(skip_records).limit(per_page)
)
clinvar_count = mongo.db.clinvar_summary.find().count()
elif search_ids:
condition = {"snp_id": search_ids}
clinvar_list = mongo.db.clinvar_summary.find(condition)
clinvar_count = mongo.db.clinvar_summary.find(condition).count()
else:
clinvar_list = {}
clinvar_count = 0
return {"clinvar_list": list(clinvar_list), "data_length": clinvar_count}
api.add_resource(ClinvarInfo, "/api/clinvarinfo")
csv_table = {
"op": fields.String(attribute="ONTOLOGY_pathway"),
"id": fields.String(attribute="ID"),
"description": fields.String(attribute="Description"),
"gene_ratio": fields.String(attribute="GeneRatio"),
"bg_ratio": fields.String(attribute="BgRatio"),
"pvalue": fields.String,
"padjust": fields.String,
"qvalue": fields.String,
"gene_id": fields.String(attribute="geneID"),
"gene_count": fields.String(attribute="Count"),
"csv_file": fields.String,
}
enrich_line = {
"mirna_id": fields.String,
"variation_id": fields.String,
"alt": fields.String,
"ref": fields.String,
"enrich_type": fields.String,
"effect": fields.String,
"csv_file": fields.String,
"dot_file": fields.String,
"csv_table": fields.Nested(csv_table),
"go_pathway_count": fields.String,
}
enrich_result_list = {
"enrich_result_list": fields.Nested(enrich_line),
"enrich_result_count": fields.Integer,
}
class EnrichResult(Resource):
@marshal_with(enrich_result_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
parser.add_argument("variate_id")
args = parser.parse_args()
condition = {}
search = 0
if args["mirna_id"]:
search = 1
condition["mirna_id"] = args["mirna_id"]
match = {"$match": {"mirna_id": args["mirna_id"]}}
if args["variate_id"]:
search = 1
condition["variation_id"] = args["variate_id"]
match["$match"]["variation_id"] = args["variate_id"]
lookup_csv = {
"$lookup": {
"from": "enrichment_csv_v2",
"localField": "csv_file",
"foreignField": "csv_file",
"as": "csv_table",
}
}
if search:
pipline = [match, lookup_csv]
enrich_result_list = mongo.db.enrichment_summary_v2.aggregate(pipline)
enrich_result_count = mongo.db.enrichment_summary_v2.find(condition).count()
else:
enrich_result_list = {}
enrich_result_count = 0
return {
"enrich_result_list": list(enrich_result_list),
"enrich_result_count": enrich_result_count,
}
api.add_resource(EnrichResult, "/api/enrich_result")
var_item = {
"var_id": fields.String,
"ref": fields.String,
"alt": fields.String,
"color": fields.String,
"count": fields.Integer,
}
snp_distribute = {
"base": fields.String,
"pos": fields.Integer,
"var_list": fields.Nested(var_item),
"mature_id": fields.String,
}
snp_distribute_list = {
"snp_distribute_list": fields.Nested(snp_distribute),
"snp_distribute_count": fields.Integer,
}
class SnpDistribute(Resource):
@marshal_with(snp_distribute_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
args = parser.parse_args()
condition = {}
if args["mirna_id"]:
condition["mature_id"] = args["mirna_id"]
snp_distribute_list = mongo.db.variation_distribute_deduplicate.find(
condition
)
snp_distribute_count = mongo.db.variation_distribute_deduplicate.find(
condition
).count()
else:
snp_distribute_list = []
snp_distribute_count = 0
return {
"snp_distribute_list": list(snp_distribute_list),
"snp_distribute_count": snp_distribute_count,
}
api.add_resource(SnpDistribute, "/api/snp_distribute")
class BIGDIndexBS(Resource):
def get(self):
filepath_indexbs = "index.bs"
return send_file(filepath_indexbs, mimetype="text/plain")
api.add_resource(BIGDIndexBS, "/index.bs")
|
[
"miRNASNP3.core.mongo.db.utr_cosmic_gain_redundancy.aggregate",
"miRNASNP3.core.mongo.db.indel_seed_mutation_gain_redundancy.find",
"miRNASNP3.core.mongo.db.utr_clinvar_gain_indel_redundancy.aggregate",
"miRNASNP3.core.mongo.db.seed_gain_addindel_redundancy.aggregate",
"miRNASNP3.core.mongo.db.snp_in_seed_v2.find",
"miRNASNP3.core.mongo.db.pri_mir_summary.find",
"miRNASNP3.core.mongo.db.utr_clinvar_loss_indel_redundancy.find",
"miRNASNP3.core.mongo.db.indel_utr_gain_v2_redundancy.find",
"miRNASNP3.core.mongo.db.mirnago.find",
"miRNASNP3.core.mongo.db.seed_cosmic_gain_redundancy.find",
"miRNASNP3.core.mongo.db.drv_in_utr_v3_redundancy.find",
"miRNASNP3.core.mongo.db.cosmic_summary.find",
"miRNASNP3.core.mongo.db.nci60_drug_correlation.aggregate",
"miRNASNP3.core.mongo.db.ld_region.aggregate",
"miRNASNP3.core.mongo.db.snv_utr_loss_v2_redundancy.find",
"miRNASNP3.core.mongo.db.indel_seed_mutation_gain_redundancy.aggregate",
"miRNASNP3.core.mongo.db.seed_loss_addindel_redundancy.aggregate",
"flask_restful.marshal_with",
"miRNASNP3.core.mongo.db.utr_cosmic_loss_redundancy.aggregate",
"miRNASNP3.core.mongo.db.mutation_summary_genelist.find",
"miRNASNP3.api.add_resource",
"miRNASNP3.core.mongo.db.seed_cosmic_loss_redundancy.aggregate",
"miRNASNP3.core.mongo.db.drv_in_seed_v3_redundancy.aggregate",
"miRNASNP3.core.mongo.db.utr_clinvar_loss_indel_redundancy.aggregate",
"miRNASNP3.core.mongo.db.phenotype_list.find",
"miRNASNP3.core.mongo.db.snp_summary.find",
"miRNASNP3.core.mongo.db.utr_cosmic_loss_redundancy.find",
"miRNASNP3.core.mongo.db.ld_region.find",
"miRNASNP3.core.mongo.db.seed_loss_4666_redundancy.aggregate",
"miRNASNP3.core.mongo.db.indel_utr_gain_v2_redundancy.aggregate",
"miRNASNP3.core.mongo.db.seed_gain_4666_redundancy.aggregate",
"miRNASNP3.core.mongo.db.utr_cosmic_loss_indel_redundancy.aggregate",
"miRNASNP3.core.mongo.db.indel_utr_loss_v2_redundancy.aggregate",
"miRNASNP3.core.mongo.db.utr_clinvar_gain_redundancy.find",
"miRNASNP3.core.mongo.db.snv_utr_loss_v2_redundancy.aggregate",
"miRNASNP3.core.mongo.db.seed_mature_pre_var_v1.find",
"miRNASNP3.core.mongo.db.enrichment_summary_v2.find",
"miRNASNP3.core.mongo.db.utr_cosmic_gain_indel_redundancy.find",
"miRNASNP3.core.mongo.db.seed_cosmic_gain_redundancy.aggregate",
"flask_restful.fields.List",
"miRNASNP3.core.mongo.db.drv_in_premir_v2.aggregate",
"flask.send_file",
"miRNASNP3.core.mongo.db.snv_utr_gain_v2_redundancy.find",
"miRNASNP3.core.mongo.db.snp_summary_genelist.find",
"miRNASNP3.core.mongo.db.mirna_expression.find",
"miRNASNP3.core.mongo.db.premir_summary_v1.find",
"miRNASNP3.core.mongo.db.clinvar_summary.find",
"flask_restful.reqparse.RequestParser",
"miRNASNP3.core.mongo.db.variation_distribute_deduplicate.find",
"miRNASNP3.core.mongo.db.seed_gain_addindel_redundancy.find",
"miRNASNP3.core.mongo.db.primir_altseq_mut_indel.find",
"miRNASNP3.core.mongo.db.gwas_catalog_alternative.find",
"miRNASNP3.core.mongo.db.indel_utr_loss_v2_redundancy.find",
"miRNASNP3.core.mongo.db.premir_info_addindel_v1.aggregate",
"miRNASNP3.core.mongo.db.utr_cosmic_gain_redundancy.find",
"miRNASNP3.core.mongo.db.enrichment_summary_v2.aggregate",
"miRNASNP3.core.mongo.db.utr_cosmic_gain_indel_redundancy.aggregate",
"miRNASNP3.core.mongo.db.snp_in_premir_v2.find",
"miRNASNP3.core.mongo.db.utr_cosmic_loss_indel_redundancy.find",
"miRNASNP3.core.mongo.db.seed_cosmic_loss_redundancy.find",
"miRNASNP3.core.mongo.db.utr_clinvar_gain_indel_redundancy.find",
"miRNASNP3.core.mongo.db.browseY.find",
"flask_restful.fields.String",
"flask_restful.fields.Nested",
"miRNASNP3.core.mongo.db.snv_utr_gain_v2_redundancy.aggregate",
"miRNASNP3.core.mongo.db.snp_in_utr_v2.find",
"miRNASNP3.core.mongo.db.seed_loss_4666_redundancy.find",
"miRNASNP3.core.mongo.db.seed_loss_addindel_redundancy.find",
"miRNASNP3.core.mongo.db.utr_clinvar_gain_redundancy.aggregate",
"miRNASNP3.core.mongo.db.indel_seed_mutation_loss_redundancy.find",
"miRNASNP3.core.mongo.db.utr_clinvar_loss_redundancy.aggregate",
"miRNASNP3.core.mongo.db.indel_seed_mutation_loss_redundancy.aggregate",
"miRNASNP3.core.mongo.db.drv_in_premir_v3_redundancy.aggregate",
"miRNASNP3.core.mongo.db.primary_altseq_indel.find",
"miRNASNP3.core.mongo.db.utr_clinvar_loss_redundancy.find",
"miRNASNP3.core.mongo.db.seed_gain_4666_redundancy.find"
] |
[((2097, 2153), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MirExpression', '"""/api/mirna_expression"""'], {}), "(MirExpression, '/api/mirna_expression')\n", (2113, 2153), False, 'from miRNASNP3 import app, api\n'), ((6993, 7053), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpSeedGainFull', '"""/api/snp_seed_gain_full"""'], {}), "(SnpSeedGainFull, '/api/snp_seed_gain_full')\n", (7009, 7053), False, 'from miRNASNP3 import app, api\n'), ((11519, 11570), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpSeedGain', '"""/api/snp_seed_gain"""'], {}), "(SnpSeedGain, '/api/snp_seed_gain')\n", (11535, 11570), False, 'from miRNASNP3 import app, api\n'), ((15659, 15719), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpSeedLossFull', '"""/api/snp_seed_loss_full"""'], {}), "(SnpSeedLossFull, '/api/snp_seed_loss_full')\n", (15675, 15719), False, 'from miRNASNP3 import app, api\n'), ((19468, 19519), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpSeedLoss', '"""/api/snp_seed_loss"""'], {}), "(SnpSeedLoss, '/api/snp_seed_loss')\n", (19484, 19519), False, 'from miRNASNP3 import app, api\n'), ((22749, 22800), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MutSeedGain', '"""/api/mut_seed_gain"""'], {}), "(MutSeedGain, '/api/mut_seed_gain')\n", (22765, 22800), False, 'from miRNASNP3 import app, api\n'), ((26227, 26278), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MutSeedLoss', '"""/api/mut_seed_loss"""'], {}), "(MutSeedLoss, '/api/mut_seed_loss')\n", (26243, 26278), False, 'from miRNASNP3 import app, api\n'), ((30985, 31034), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnvUtrLoss', '"""/api/snv_utr_loss"""'], {}), "(SnvUtrLoss, '/api/snv_utr_loss')\n", (31001, 31034), False, 'from miRNASNP3 import app, api\n'), ((33288, 33337), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnvUtrGain', '"""/api/snv_utr_gain"""'], {}), "(SnvUtrGain, '/api/snv_utr_gain')\n", (33304, 33337), False, 'from miRNASNP3 import app, api\n'), ((36656, 36705), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MutUtrGain', '"""/api/mut_utr_gain"""'], {}), "(MutUtrGain, '/api/mut_utr_gain')\n", (36672, 36705), False, 'from miRNASNP3 import app, api\n'), ((40461, 40510), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MutUtrLoss', '"""/api/mut_utr_loss"""'], {}), "(MutUtrLoss, '/api/mut_utr_loss')\n", (40477, 40510), False, 'from miRNASNP3 import app, api\n'), ((41664, 41709), 'miRNASNP3.api.add_resource', 'api.add_resource', (['BrowseMir', '"""/api/browsemir"""'], {}), "(BrowseMir, '/api/browsemir')\n", (41680, 41709), False, 'from miRNASNP3 import app, api\n'), ((44131, 44181), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MirSummary', '"""/api/mirna_summary"""'], {}), "(MirSummary, '/api/mirna_summary')\n", (44147, 44181), False, 'from miRNASNP3 import app, api\n'), ((45082, 45123), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MirInfo', '"""/api/mirinfo"""'], {}), "(MirInfo, '/api/mirinfo')\n", (45098, 45123), False, 'from miRNASNP3 import app, api\n'), ((47098, 47139), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MirDrug', '"""/api/mirdrug"""'], {}), "(MirDrug, '/api/mirdrug')\n", (47114, 47139), False, 'from miRNASNP3 import app, api\n'), ((48156, 48197), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MirnaGo', '"""/api/mirnago"""'], {}), "(MirnaGo, '/api/mirnago')\n", (48172, 48197), False, 'from miRNASNP3 import app, api\n'), ((49052, 49096), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MirnaKey', '"""/api/mirna_key"""'], {}), "(MirnaKey, '/api/mirna_key')\n", (49068, 49096), False, 'from miRNASNP3 import app, api\n'), ((51974, 52028), 'miRNASNP3.api.add_resource', 'api.add_resource', (['PrimirSummary', '"""/api/primir_summary"""'], {}), "(PrimirSummary, '/api/primir_summary')\n", (51990, 52028), False, 'from miRNASNP3 import app, api\n'), ((55353, 55401), 'miRNASNP3.api.add_resource', 'api.add_resource', (['PremirInfo', '"""/api/premir_info"""'], {}), "(PremirInfo, '/api/premir_info')\n", (55369, 55401), False, 'from miRNASNP3 import app, api\n'), ((57179, 57228), 'miRNASNP3.api.add_resource', 'api.add_resource', (['PrimirAlt', '"""/api/primir_altseq"""'], {}), "(PrimirAlt, '/api/primir_altseq')\n", (57195, 57228), False, 'from miRNASNP3 import app, api\n'), ((58913, 58966), 'miRNASNP3.api.add_resource', 'api.add_resource', (['PrimirMut', '"""/api/primir_altseq_mut"""'], {}), "(PrimirMut, '/api/primir_altseq_mut')\n", (58929, 58966), False, 'from miRNASNP3 import app, api\n'), ((60428, 60469), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpInfo', '"""/api/snpinfo"""'], {}), "(SnpInfo, '/api/snpinfo')\n", (60444, 60469), False, 'from miRNASNP3 import app, api\n'), ((61839, 61889), 'miRNASNP3.api.add_resource', 'api.add_resource', (['GwasCatalog', '"""/api/gwas_catalog"""'], {}), "(GwasCatalog, '/api/gwas_catalog')\n", (61855, 61889), False, 'from miRNASNP3 import app, api\n'), ((64845, 64884), 'miRNASNP3.api.add_resource', 'api.add_resource', (['LDinfo', '"""/api/ldinfo"""'], {}), "(LDinfo, '/api/ldinfo')\n", (64861, 64884), False, 'from miRNASNP3 import app, api\n'), ((70700, 70750), 'miRNASNP3.api.add_resource', 'api.add_resource', (['GetGene', '"""/api/snp_summary_gene"""'], {}), "(GetGene, '/api/snp_summary_gene')\n", (70716, 70750), False, 'from miRNASNP3 import app, api\n'), ((71629, 71687), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MutGetGene', '"""/api/mutation_summary_gene"""'], {}), "(MutGetGene, '/api/mutation_summary_gene')\n", (71645, 71687), False, 'from miRNASNP3 import app, api\n'), ((72629, 72694), 'miRNASNP3.api.add_resource', 'api.add_resource', (['GetPhenotype', '"""/api/mutation_summary_phenotype"""'], {}), "(GetPhenotype, '/api/mutation_summary_phenotype')\n", (72645, 72694), False, 'from miRNASNP3 import app, api\n'), ((76143, 76210), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MutationSummarySeed', '"""/api/mutation_summary_seed"""'], {}), "(MutationSummarySeed, '/api/mutation_summary_seed')\n", (76159, 76210), False, 'from miRNASNP3 import app, api\n'), ((80554, 80625), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MutationSummaryMature', '"""/api/mutation_summary_mature"""'], {}), "(MutationSummaryMature, '/api/mutation_summary_mature')\n", (80570, 80625), False, 'from miRNASNP3 import app, api\n'), ((84242, 84313), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MutationSummaryPremir', '"""/api/mutation_summary_premir"""'], {}), "(MutationSummaryPremir, '/api/mutation_summary_premir')\n", (84258, 84313), False, 'from miRNASNP3 import app, api\n'), ((88707, 88774), 'miRNASNP3.api.add_resource', 'api.add_resource', (['MutationSummaryUtr3', '"""/api/mutation_summary_utr3"""'], {}), "(MutationSummaryUtr3, '/api/mutation_summary_utr3')\n", (88723, 88774), False, 'from miRNASNP3 import app, api\n'), ((96387, 96435), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpSummary', '"""/api/snp_summary"""'], {}), "(SnpSummary, '/api/snp_summary')\n", (96403, 96435), False, 'from miRNASNP3 import app, api\n'), ((98993, 99050), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpSummarySeed', '"""/api/snp_summary_seed"""'], {}), "(SnpSummarySeed, '/api/snp_summary_seed')\n", (99009, 99050), False, 'from miRNASNP3 import app, api\n'), ((101895, 101956), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpSummaryMature', '"""/api/snp_summary_mature"""'], {}), "(SnpSummaryMature, '/api/snp_summary_mature')\n", (101911, 101956), False, 'from miRNASNP3 import app, api\n'), ((104327, 104388), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpSummaryPremir', '"""/api/snp_summary_premir"""'], {}), "(SnpSummaryPremir, '/api/snp_summary_premir')\n", (104343, 104388), False, 'from miRNASNP3 import app, api\n'), ((107533, 107590), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpSummaryUtr3', '"""/api/snp_summary_utr3"""'], {}), "(SnpSummaryUtr3, '/api/snp_summary_utr3')\n", (107549, 107590), False, 'from miRNASNP3 import app, api\n'), ((109300, 109347), 'miRNASNP3.api.add_resource', 'api.add_resource', (['CosmicInfo', '"""/api/cosmicinfo"""'], {}), "(CosmicInfo, '/api/cosmicinfo')\n", (109316, 109347), False, 'from miRNASNP3 import app, api\n'), ((110729, 110778), 'miRNASNP3.api.add_resource', 'api.add_resource', (['ClinvarInfo', '"""/api/clinvarinfo"""'], {}), "(ClinvarInfo, '/api/clinvarinfo')\n", (110745, 110778), False, 'from miRNASNP3 import app, api\n'), ((113095, 113147), 'miRNASNP3.api.add_resource', 'api.add_resource', (['EnrichResult', '"""/api/enrich_result"""'], {}), "(EnrichResult, '/api/enrich_result')\n", (113111, 113147), False, 'from miRNASNP3 import app, api\n'), ((114394, 114448), 'miRNASNP3.api.add_resource', 'api.add_resource', (['SnpDistribute', '"""/api/snp_distribute"""'], {}), "(SnpDistribute, '/api/snp_distribute')\n", (114410, 114448), False, 'from miRNASNP3 import app, api\n'), ((114605, 114647), 'miRNASNP3.api.add_resource', 'api.add_resource', (['BIGDIndexBS', '"""/index.bs"""'], {}), "(BIGDIndexBS, '/index.bs')\n", (114621, 114647), False, 'from miRNASNP3 import app, api\n'), ((1113, 1140), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_exp_df'], {}), '(mirna_exp_df)\n', (1126, 1140), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((1259, 1290), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_expression'], {}), '(mirna_expression)\n', (1272, 1290), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((1378, 1413), 'flask_restful.marshal_with', 'marshal_with', (['mirna_expression_list'], {}), '(mirna_expression_list)\n', (1390, 1413), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((2701, 2737), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""prob_exac"""'}), "(attribute='prob_exac')\n", (2714, 2737), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((3892, 3918), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_exp_df'], {}), '(gene_exp_df)\n', (3905, 3918), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((4006, 4032), 'flask_restful.fields.List', 'fields.List', (['fields.String'], {}), '(fields.String)\n', (4017, 4032), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((4392, 4415), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_info'], {}), '(snp_info)\n', (4405, 4415), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((4434, 4458), 'flask_restful.fields.Nested', 'fields.Nested', (['site_info'], {}), '(site_info)\n', (4447, 4458), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((4476, 4499), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_info'], {}), '(utr_info)\n', (4489, 4499), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((4524, 4554), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_expression'], {}), '(gene_expression)\n', (4537, 4554), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((4580, 4611), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_expression'], {}), '(mirna_expression)\n', (4593, 4611), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((4690, 4718), 'flask_restful.fields.Nested', 'fields.Nested', (['gainsite_info'], {}), '(gainsite_info)\n', (4703, 4718), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((4804, 4831), 'flask_restful.marshal_with', 'marshal_with', (['snp_seed_gain'], {}), '(snp_seed_gain)\n', (4816, 4831), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((7089, 7116), 'flask_restful.marshal_with', 'marshal_with', (['snp_seed_gain'], {}), '(snp_seed_gain)\n', (7101, 7116), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((12474, 12495), 'flask_restful.fields.Nested', 'fields.Nested', (['cor_df'], {}), '(cor_df)\n', (12487, 12495), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((12894, 12917), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_info'], {}), '(snp_info)\n', (12907, 12917), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((12936, 12960), 'flask_restful.fields.Nested', 'fields.Nested', (['site_info'], {}), '(site_info)\n', (12949, 12960), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((12978, 13001), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_info'], {}), '(utr_info)\n', (12991, 13001), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((13026, 13056), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_expression'], {}), '(gene_expression)\n', (13039, 13056), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((13082, 13113), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_expression'], {}), '(mirna_expression)\n', (13095, 13113), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((13140, 13172), 'flask_restful.fields.Nested', 'fields.Nested', (['corelation_detail'], {}), '(corelation_detail)\n', (13153, 13172), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((13226, 13254), 'flask_restful.fields.Nested', 'fields.Nested', (['losssite_info'], {}), '(losssite_info)\n', (13239, 13254), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((13340, 13372), 'flask_restful.marshal_with', 'marshal_with', (['snp_seed_loss_list'], {}), '(snp_seed_loss_list)\n', (13352, 13372), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((15755, 15787), 'flask_restful.marshal_with', 'marshal_with', (['snp_seed_loss_list'], {}), '(snp_seed_loss_list)\n', (15767, 15787), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((20037, 20060), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_info'], {}), '(mut_info)\n', (20050, 20060), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((20079, 20103), 'flask_restful.fields.Nested', 'fields.Nested', (['site_info'], {}), '(site_info)\n', (20092, 20103), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((20121, 20144), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_info'], {}), '(utr_info)\n', (20134, 20144), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((20169, 20199), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_expression'], {}), '(gene_expression)\n', (20182, 20199), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((20225, 20256), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_expression'], {}), '(mirna_expression)\n', (20238, 20256), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((20309, 20341), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_gainsite_info'], {}), '(mut_gainsite_info)\n', (20322, 20341), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((20424, 20456), 'flask_restful.marshal_with', 'marshal_with', (['mut_seed_gain_list'], {}), '(mut_seed_gain_list)\n', (20436, 20456), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((23176, 23199), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_info'], {}), '(mut_info)\n', (23189, 23199), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((23218, 23242), 'flask_restful.fields.Nested', 'fields.Nested', (['site_info'], {}), '(site_info)\n', (23231, 23242), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((23260, 23283), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_info'], {}), '(utr_info)\n', (23273, 23283), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((23308, 23338), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_expression'], {}), '(gene_expression)\n', (23321, 23338), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((23364, 23395), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_expression'], {}), '(mirna_expression)\n', (23377, 23395), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((23422, 23454), 'flask_restful.fields.Nested', 'fields.Nested', (['corelation_detail'], {}), '(corelation_detail)\n', (23435, 23454), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((23508, 23540), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_losssite_info'], {}), '(mut_losssite_info)\n', (23521, 23540), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((23623, 23655), 'flask_restful.marshal_with', 'marshal_with', (['mut_seed_loss_list'], {}), '(mut_seed_loss_list)\n', (23635, 23655), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((27460, 27486), 'flask_restful.fields.List', 'fields.List', (['fields.String'], {}), '(fields.String)\n', (27471, 27486), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((28033, 28064), 'flask_restful.fields.Nested', 'fields.Nested', (['experiment_valid'], {}), '(experiment_valid)\n', (28046, 28064), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((28120, 28148), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_info_line'], {}), '(snp_info_line)\n', (28133, 28148), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((28166, 28194), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_info_line'], {}), '(utr_info_line)\n', (28179, 28194), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((28213, 28241), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_site_info'], {}), '(utr_site_info)\n', (28226, 28241), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((28266, 28296), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_expression'], {}), '(gene_expression)\n', (28279, 28296), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((28322, 28353), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_expression'], {}), '(mirna_expression)\n', (28335, 28353), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((28380, 28412), 'flask_restful.fields.Nested', 'fields.Nested', (['corelation_detail'], {}), '(corelation_detail)\n', (28393, 28412), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((28455, 28482), 'flask_restful.fields.Nested', 'fields.Nested', (['snv_utr_loss'], {}), '(snv_utr_loss)\n', (28468, 28482), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((28559, 28586), 'flask_restful.marshal_with', 'marshal_with', (['utr_loss_list'], {}), '(utr_loss_list)\n', (28571, 28586), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((31163, 31191), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_info_line'], {}), '(snp_info_line)\n', (31176, 31191), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((31209, 31237), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_info_line'], {}), '(utr_info_line)\n', (31222, 31237), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((31256, 31284), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_site_info'], {}), '(utr_site_info)\n', (31269, 31284), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((31309, 31339), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_expression'], {}), '(gene_expression)\n', (31322, 31339), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((31365, 31396), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_expression'], {}), '(mirna_expression)\n', (31378, 31396), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((31440, 31467), 'flask_restful.fields.Nested', 'fields.Nested', (['snv_utr_gain'], {}), '(snv_utr_gain)\n', (31453, 31467), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((31544, 31571), 'flask_restful.marshal_with', 'marshal_with', (['utr_gain_list'], {}), '(utr_gain_list)\n', (31556, 31571), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((33471, 33494), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_info'], {}), '(mut_info)\n', (33484, 33494), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((33513, 33541), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_site_info'], {}), '(utr_site_info)\n', (33526, 33541), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((33559, 33587), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_info_line'], {}), '(utr_info_line)\n', (33572, 33587), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((33612, 33642), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_expression'], {}), '(gene_expression)\n', (33625, 33642), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((33668, 33699), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_expression'], {}), '(mirna_expression)\n', (33681, 33699), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((33746, 33778), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_gain_utr_site'], {}), '(mut_gain_utr_site)\n', (33759, 33778), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((33859, 33885), 'flask_restful.marshal_with', 'marshal_with', (['mut_utr_gain'], {}), '(mut_utr_gain)\n', (33871, 33885), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((36917, 36940), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_info'], {}), '(mut_info)\n', (36930, 36940), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((36958, 36986), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_info_line'], {}), '(utr_info_line)\n', (36971, 36986), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((37005, 37033), 'flask_restful.fields.Nested', 'fields.Nested', (['utr_site_info'], {}), '(utr_site_info)\n', (37018, 37033), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((37058, 37088), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_expression'], {}), '(gene_expression)\n', (37071, 37088), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((37114, 37145), 'flask_restful.fields.Nested', 'fields.Nested', (['mirna_expression'], {}), '(mirna_expression)\n', (37127, 37145), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((37172, 37204), 'flask_restful.fields.Nested', 'fields.Nested', (['corelation_detail'], {}), '(corelation_detail)\n', (37185, 37204), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((37251, 37283), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_loss_utr_site'], {}), '(mut_loss_utr_site)\n', (37264, 37283), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((37364, 37390), 'flask_restful.marshal_with', 'marshal_with', (['mut_utr_loss'], {}), '(mut_utr_loss)\n', (37376, 37390), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((40989, 41014), 'flask_restful.marshal_with', 'marshal_with', (['browse_list'], {}), '(browse_list)\n', (41001, 41014), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((42793, 42819), 'flask_restful.fields.Nested', 'fields.Nested', (['mir_summary'], {}), '(mir_summary)\n', (42806, 42819), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((42901, 42933), 'flask_restful.marshal_with', 'marshal_with', (['mirna_summary_list'], {}), '(mirna_summary_list)\n', (42913, 42933), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((44214, 44246), 'flask_restful.marshal_with', 'marshal_with', (['mirna_summary_list'], {}), '(mirna_summary_list)\n', (44226, 44246), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((45505, 45529), 'flask_restful.fields.Nested', 'fields.Nested', (['drug_name'], {}), '(drug_name)\n', (45518, 45529), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((45560, 45585), 'flask_restful.fields.Nested', 'fields.Nested', (['nci60_item'], {}), '(nci60_item)\n', (45573, 45585), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((45650, 45672), 'flask_restful.marshal_with', 'marshal_with', (['drug_cor'], {}), '(drug_cor)\n', (45662, 45672), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((47182, 47208), 'flask_restful.fields.Nested', 'fields.Nested', (['mir_summary'], {}), '(mir_summary)\n', (47195, 47208), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((47233, 47259), 'flask_restful.fields.Nested', 'fields.Nested', (['mir_summary'], {}), '(mir_summary)\n', (47246, 47259), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((47445, 47472), 'flask_restful.fields.Nested', 'fields.Nested', (['mirnago_item'], {}), '(mirnago_item)\n', (47458, 47472), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((47545, 47571), 'flask_restful.marshal_with', 'marshal_with', (['mirnago_list'], {}), '(mirnago_list)\n', (47557, 47571), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((48231, 48259), 'flask_restful.marshal_with', 'marshal_with', (['mirna_key_list'], {}), '(mirna_key_list)\n', (48243, 48259), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((49444, 49470), 'flask_restful.fields.List', 'fields.List', (['fields.String'], {}), '(fields.String)\n', (49455, 49470), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((49487, 49513), 'flask_restful.fields.List', 'fields.List', (['fields.String'], {}), '(fields.String)\n', (49498, 49513), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((49954, 49980), 'flask_restful.fields.Nested', 'fields.Nested', (['mature_info'], {}), '(mature_info)\n', (49967, 49980), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((50035, 50064), 'flask_restful.fields.Nested', 'fields.Nested', (['primir_summary'], {}), '(primir_summary)\n', (50048, 50064), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((50150, 50183), 'flask_restful.marshal_with', 'marshal_with', (['primir_summary_list'], {}), '(primir_summary_list)\n', (50162, 50183), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((52958, 52984), 'flask_restful.fields.List', 'fields.List', (['fields.String'], {}), '(fields.String)\n', (52969, 52984), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53033, 53059), 'flask_restful.fields.List', 'fields.List', (['fields.String'], {}), '(fields.String)\n', (53044, 53059), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53424, 53447), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_item'], {}), '(mut_item)\n', (53437, 53447), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53464, 53487), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_item'], {}), '(mut_item)\n', (53477, 53487), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53500, 53523), 'flask_restful.fields.Nested', 'fields.Nested', (['mut_item'], {}), '(mut_item)\n', (53513, 53523), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53568, 53597), 'flask_restful.fields.Nested', 'fields.Nested', (['premir_context'], {}), '(premir_context)\n', (53581, 53597), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53614, 53640), 'flask_restful.fields.Nested', 'fields.Nested', (['mir_summary'], {}), '(mir_summary)\n', (53627, 53640), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53723, 53752), 'flask_restful.fields.Nested', 'fields.Nested', (['mirset_v9_item'], {}), '(mirset_v9_item)\n', (53736, 53752), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53791, 53817), 'flask_restful.fields.Nested', 'fields.Nested', (['premir_info'], {}), '(premir_info)\n', (53804, 53817), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53854, 53884), 'flask_restful.marshal_with', 'marshal_with', (['premir_info_list'], {}), '(premir_info_list)\n', (53866, 53884), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((55672, 55706), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""snp_alt"""'}), "(attribute='snp_alt')\n", (55685, 55706), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((56123, 56145), 'flask_restful.fields.Nested', 'fields.Nested', (['pri_alt'], {}), '(pri_alt)\n', (56136, 56145), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((56223, 56252), 'flask_restful.marshal_with', 'marshal_with', (['primir_alt_list'], {}), '(primir_alt_list)\n', (56235, 56252), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((57865, 57890), 'flask_restful.fields.Nested', 'fields.Nested', (['primir_mut'], {}), '(primir_mut)\n', (57878, 57890), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((57968, 57997), 'flask_restful.marshal_with', 'marshal_with', (['primir_mut_list'], {}), '(primir_mut_list)\n', (57980, 57997), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((59417, 59444), 'flask_restful.fields.Nested', 'fields.Nested', (['snpinfo_line'], {}), '(snpinfo_line)\n', (59430, 59444), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((59511, 59532), 'flask_restful.marshal_with', 'marshal_with', (['snpinfo'], {}), '(snpinfo)\n', (59523, 59532), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((60502, 60533), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""SNPS"""'}), "(attribute='SNPS')\n", (60515, 60533), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((60554, 60606), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""STRONGEST_SNP-RISK_ALLELE"""'}), "(attribute='STRONGEST_SNP-RISK_ALLELE')\n", (60567, 60606), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((60631, 60679), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""RISK_ALLELE_FREQUENCY"""'}), "(attribute='RISK_ALLELE_FREQUENCY')\n", (60644, 60679), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((60696, 60736), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""DISEASE/TRAIT"""'}), "(attribute='DISEASE/TRAIT')\n", (60709, 60736), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((60759, 60799), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""REPORTED_GENE"""'}), "(attribute='REPORTED_GENE')\n", (60772, 60799), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((60816, 60850), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""P-VALUE"""'}), "(attribute='P-VALUE')\n", (60829, 60850), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((60867, 60904), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""OR_or_BETA"""'}), "(attribute='OR_or_BETA')\n", (60880, 60904), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((60918, 60955), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""CI_95_TEXT"""'}), "(attribute='CI_95_TEXT')\n", (60931, 60955), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((60974, 61009), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""PUBMEDID"""'}), "(attribute='PUBMEDID')\n", (60987, 61009), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((61030, 61061), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""LINK"""'}), "(attribute='LINK')\n", (61043, 61061), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((61103, 61130), 'flask_restful.fields.Nested', 'fields.Nested', (['catalog_line'], {}), '(catalog_line)\n', (61116, 61130), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((61207, 61233), 'flask_restful.marshal_with', 'marshal_with', (['catalog_list'], {}), '(catalog_list)\n', (61219, 61233), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((62287, 62320), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""chrome"""'}), "(attribute='chrome')\n", (62300, 62320), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((62342, 62377), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""position"""'}), "(attribute='position')\n", (62355, 62377), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((62561, 62586), 'flask_restful.fields.Nested', 'fields.Nested', (['ld_info_id'], {}), '(ld_info_id)\n', (62574, 62586), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((62604, 62627), 'flask_restful.fields.Nested', 'fields.Nested', (['tag_info'], {}), '(tag_info)\n', (62617, 62627), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((62652, 62682), 'flask_restful.fields.Nested', 'fields.Nested', (['relate_tag_info'], {}), '(relate_tag_info)\n', (62665, 62682), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((62704, 62731), 'flask_restful.fields.Nested', 'fields.Nested', (['catalog_line'], {}), '(catalog_line)\n', (62717, 62731), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((62762, 62784), 'flask_restful.fields.Nested', 'fields.Nested', (['ld_info'], {}), '(ld_info)\n', (62775, 62784), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((62850, 62876), 'flask_restful.marshal_with', 'marshal_with', (['ld_info_list'], {}), '(ld_info_list)\n', (62862, 62876), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((65575, 65609), 'flask_restful.fields.Nested', 'fields.Nested', (['disease_pubmed_item'], {}), '(disease_pubmed_item)\n', (65588, 65609), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((65729, 65757), 'flask_restful.fields.Nested', 'fields.Nested', (['mutation_line'], {}), '(mutation_line)\n', (65742, 65757), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((65786, 65812), 'flask_restful.fields.Nested', 'fields.Nested', (['count_group'], {}), '(count_group)\n', (65799, 65812), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((65842, 65870), 'flask_restful.fields.Nested', 'fields.Nested', (['mutation_line'], {}), '(mutation_line)\n', (65855, 65870), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((65901, 65927), 'flask_restful.fields.Nested', 'fields.Nested', (['count_group'], {}), '(count_group)\n', (65914, 65927), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((65957, 65985), 'flask_restful.fields.Nested', 'fields.Nested', (['mutation_line'], {}), '(mutation_line)\n', (65970, 65985), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((66016, 66042), 'flask_restful.fields.Nested', 'fields.Nested', (['count_group'], {}), '(count_group)\n', (66029, 66042), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((66070, 66098), 'flask_restful.fields.Nested', 'fields.Nested', (['mutation_line'], {}), '(mutation_line)\n', (66083, 66098), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((66227, 66255), 'flask_restful.fields.Nested', 'fields.Nested', (['mutation_line'], {}), '(mutation_line)\n', (66240, 66255), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((66287, 66313), 'flask_restful.fields.Nested', 'fields.Nested', (['count_group'], {}), '(count_group)\n', (66300, 66313), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((69759, 69785), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_symbol'], {}), '(gene_symbol)\n', (69772, 69785), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((69805, 69831), 'flask_restful.fields.Nested', 'fields.Nested', (['gene_symbol'], {}), '(gene_symbol)\n', (69818, 69831), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((69867, 69890), 'flask_restful.marshal_with', 'marshal_with', (['gene_list'], {}), '(gene_list)\n', (69879, 69890), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((70786, 70809), 'flask_restful.marshal_with', 'marshal_with', (['gene_list'], {}), '(gene_list)\n', (70798, 70809), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((71771, 71800), 'flask_restful.fields.Nested', 'fields.Nested', (['phenotype_line'], {}), '(phenotype_line)\n', (71784, 71800), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((71839, 71867), 'flask_restful.marshal_with', 'marshal_with', (['phenotype_list'], {}), '(phenotype_list)\n', (71851, 71867), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((72739, 72774), 'flask_restful.marshal_with', 'marshal_with', (['mutation_summary_list'], {}), '(mutation_summary_list)\n', (72751, 72774), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((76257, 76292), 'flask_restful.marshal_with', 'marshal_with', (['mutation_summary_list'], {}), '(mutation_summary_list)\n', (76269, 76292), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((80672, 80707), 'flask_restful.marshal_with', 'marshal_with', (['mutation_summary_list'], {}), '(mutation_summary_list)\n', (80684, 80707), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((84358, 84393), 'flask_restful.marshal_with', 'marshal_with', (['mutation_summary_list'], {}), '(mutation_summary_list)\n', (84370, 84393), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((90950, 90973), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_line'], {}), '(snp_line)\n', (90963, 90973), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((91036, 91059), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_line'], {}), '(snp_line)\n', (91049, 91059), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((91122, 91145), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_line'], {}), '(snp_line)\n', (91135, 91145), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((91208, 91231), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_line'], {}), '(snp_line)\n', (91221, 91231), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((91297, 91320), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_line'], {}), '(snp_line)\n', (91310, 91320), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((91400, 91430), 'flask_restful.marshal_with', 'marshal_with', (['snp_summary_list'], {}), '(snp_summary_list)\n', (91412, 91430), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((96475, 96505), 'flask_restful.marshal_with', 'marshal_with', (['snp_summary_list'], {}), '(snp_summary_list)\n', (96487, 96505), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((99092, 99122), 'flask_restful.marshal_with', 'marshal_with', (['snp_summary_list'], {}), '(snp_summary_list)\n', (99104, 99122), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((101998, 102028), 'flask_restful.marshal_with', 'marshal_with', (['snp_summary_list'], {}), '(snp_summary_list)\n', (102010, 102028), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((104428, 104458), 'flask_restful.marshal_with', 'marshal_with', (['snp_summary_list'], {}), '(snp_summary_list)\n', (104440, 104458), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((107693, 107737), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""Primary histology"""'}), "(attribute='Primary histology')\n", (107706, 107737), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((107799, 107849), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""Mutation somatic status"""'}), "(attribute='Mutation somatic status')\n", (107812, 107849), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((107871, 107910), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""Primary site"""'}), "(attribute='Primary site')\n", (107884, 107910), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((108147, 108173), 'flask_restful.fields.Nested', 'fields.Nested', (['cosmic_line'], {}), '(cosmic_line)\n', (108160, 108173), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((108241, 108266), 'flask_restful.marshal_with', 'marshal_with', (['cosmic_list'], {}), '(cosmic_list)\n', (108253, 108266), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((109671, 109698), 'flask_restful.fields.Nested', 'fields.Nested', (['clinvar_line'], {}), '(clinvar_line)\n', (109684, 109698), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((109773, 109799), 'flask_restful.marshal_with', 'marshal_with', (['clinvar_list'], {}), '(clinvar_list)\n', (109785, 109799), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((110804, 110847), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""ONTOLOGY_pathway"""'}), "(attribute='ONTOLOGY_pathway')\n", (110817, 110847), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((110859, 110888), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""ID"""'}), "(attribute='ID')\n", (110872, 110888), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((110909, 110947), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""Description"""'}), "(attribute='Description')\n", (110922, 110947), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((110967, 111003), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""GeneRatio"""'}), "(attribute='GeneRatio')\n", (110980, 111003), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((111021, 111055), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""BgRatio"""'}), "(attribute='BgRatio')\n", (111034, 111055), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((111160, 111193), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""geneID"""'}), "(attribute='geneID')\n", (111173, 111193), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((111213, 111245), 'flask_restful.fields.String', 'fields.String', ([], {'attribute': '"""Count"""'}), "(attribute='Count')\n", (111226, 111245), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((111557, 111581), 'flask_restful.fields.Nested', 'fields.Nested', (['csv_table'], {}), '(csv_table)\n', (111570, 111581), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((111674, 111700), 'flask_restful.fields.Nested', 'fields.Nested', (['enrich_line'], {}), '(enrich_line)\n', (111687, 111700), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((111784, 111816), 'flask_restful.marshal_with', 'marshal_with', (['enrich_result_list'], {}), '(enrich_result_list)\n', (111796, 111816), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((113392, 113415), 'flask_restful.fields.Nested', 'fields.Nested', (['var_item'], {}), '(var_item)\n', (113405, 113415), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((113503, 113532), 'flask_restful.fields.Nested', 'fields.Nested', (['snp_distribute'], {}), '(snp_distribute)\n', (113516, 113532), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((113618, 113651), 'flask_restful.marshal_with', 'marshal_with', (['snp_distribute_list'], {}), '(snp_distribute_list)\n', (113630, 113651), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((1450, 1474), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (1472, 1474), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((4868, 4892), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (4890, 4892), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((6610, 6663), 'miRNASNP3.core.mongo.db.seed_gain_4666_redundancy.aggregate', 'mongo.db.seed_gain_4666_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (6654, 6663), False, 'from miRNASNP3.core import mongo\n'), ((6695, 6752), 'miRNASNP3.core.mongo.db.seed_gain_addindel_redundancy.aggregate', 'mongo.db.seed_gain_addindel_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (6743, 6752), False, 'from miRNASNP3.core import mongo\n'), ((7153, 7177), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (7175, 7177), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((9012, 9065), 'miRNASNP3.core.mongo.db.seed_gain_4666_redundancy.aggregate', 'mongo.db.seed_gain_4666_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (9056, 9065), False, 'from miRNASNP3.core import mongo\n'), ((9097, 9154), 'miRNASNP3.core.mongo.db.seed_gain_addindel_redundancy.aggregate', 'mongo.db.seed_gain_addindel_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (9145, 9154), False, 'from miRNASNP3.core import mongo\n'), ((13409, 13433), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (13431, 13433), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((15214, 15267), 'miRNASNP3.core.mongo.db.seed_loss_4666_redundancy.aggregate', 'mongo.db.seed_loss_4666_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (15258, 15267), False, 'from miRNASNP3.core import mongo\n'), ((15329, 15386), 'miRNASNP3.core.mongo.db.seed_loss_addindel_redundancy.aggregate', 'mongo.db.seed_loss_addindel_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (15377, 15386), False, 'from miRNASNP3.core import mongo\n'), ((15824, 15848), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (15846, 15848), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((20493, 20517), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (20515, 20517), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((21925, 21980), 'miRNASNP3.core.mongo.db.seed_cosmic_gain_redundancy.aggregate', 'mongo.db.seed_cosmic_gain_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (21971, 21980), False, 'from miRNASNP3.core import mongo\n'), ((22157, 22220), 'miRNASNP3.core.mongo.db.indel_seed_mutation_gain_redundancy.aggregate', 'mongo.db.indel_seed_mutation_gain_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (22211, 22220), False, 'from miRNASNP3.core import mongo\n'), ((23692, 23716), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (23714, 23716), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((25402, 25457), 'miRNASNP3.core.mongo.db.seed_cosmic_loss_redundancy.aggregate', 'mongo.db.seed_cosmic_loss_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (25448, 25457), False, 'from miRNASNP3.core import mongo\n'), ((25634, 25697), 'miRNASNP3.core.mongo.db.indel_seed_mutation_loss_redundancy.aggregate', 'mongo.db.indel_seed_mutation_loss_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (25688, 25697), False, 'from miRNASNP3.core import mongo\n'), ((28623, 28647), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (28645, 28647), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((30404, 30458), 'miRNASNP3.core.mongo.db.snv_utr_loss_v2_redundancy.aggregate', 'mongo.db.snv_utr_loss_v2_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (30449, 30458), False, 'from miRNASNP3.core import mongo\n'), ((30578, 30634), 'miRNASNP3.core.mongo.db.indel_utr_loss_v2_redundancy.aggregate', 'mongo.db.indel_utr_loss_v2_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (30625, 30634), False, 'from miRNASNP3.core import mongo\n'), ((31608, 31632), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (31630, 31632), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((32707, 32761), 'miRNASNP3.core.mongo.db.snv_utr_gain_v2_redundancy.aggregate', 'mongo.db.snv_utr_gain_v2_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (32752, 32761), False, 'from miRNASNP3.core import mongo\n'), ((32881, 32937), 'miRNASNP3.core.mongo.db.indel_utr_gain_v2_redundancy.aggregate', 'mongo.db.indel_utr_gain_v2_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (32928, 32937), False, 'from miRNASNP3.core import mongo\n'), ((33922, 33946), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (33944, 33946), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((37427, 37451), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (37449, 37451), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((40926, 40952), 'flask_restful.fields.Nested', 'fields.Nested', (['browse_info'], {}), '(browse_info)\n', (40939, 40952), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((41051, 41075), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (41073, 41075), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((42970, 42994), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (42992, 42994), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((44283, 44307), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (44305, 44307), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((45709, 45733), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (45731, 45733), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((47608, 47632), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (47630, 47632), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((48296, 48320), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (48318, 48320), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((50220, 50244), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (50242, 50244), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53259, 53285), 'flask_restful.fields.List', 'fields.List', (['fields.String'], {}), '(fields.String)\n', (53270, 53285), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53320, 53346), 'flask_restful.fields.List', 'fields.List', (['fields.String'], {}), '(fields.String)\n', (53331, 53346), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53677, 53703), 'flask_restful.fields.List', 'fields.List', (['fields.String'], {}), '(fields.String)\n', (53688, 53703), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((53921, 53945), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (53943, 53945), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((56289, 56313), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (56311, 56313), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((58034, 58058), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (58056, 58058), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((59569, 59593), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (59591, 59593), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((61270, 61294), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (61292, 61294), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((62913, 62937), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (62935, 62937), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((64651, 64688), 'miRNASNP3.core.mongo.db.ld_region.aggregate', 'mongo.db.ld_region.aggregate', (['pipline'], {}), '(pipline)\n', (64679, 64688), False, 'from miRNASNP3.core import mongo\n'), ((69927, 69951), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (69949, 69951), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((70846, 70870), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (70868, 70870), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((71904, 71928), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (71926, 71928), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((72811, 72835), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (72833, 72835), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((75688, 75741), 'miRNASNP3.core.mongo.db.drv_in_seed_v3_redundancy.aggregate', 'mongo.db.drv_in_seed_v3_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (75732, 75741), False, 'from miRNASNP3.core import mongo\n'), ((75909, 75968), 'miRNASNP3.core.mongo.db.drv_in_seed_v3_redundancy.aggregate', 'mongo.db.drv_in_seed_v3_redundancy.aggregate', (['pipline_count'], {}), '(pipline_count)\n', (75953, 75968), False, 'from miRNASNP3.core import mongo\n'), ((76329, 76353), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (76351, 76353), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((79150, 79205), 'miRNASNP3.core.mongo.db.drv_in_premir_v3_redundancy.aggregate', 'mongo.db.drv_in_premir_v3_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (79196, 79205), False, 'from miRNASNP3.core import mongo\n'), ((79401, 79462), 'miRNASNP3.core.mongo.db.drv_in_premir_v3_redundancy.aggregate', 'mongo.db.drv_in_premir_v3_redundancy.aggregate', (['pipline_count'], {}), '(pipline_count)\n', (79447, 79462), False, 'from miRNASNP3.core import mongo\n'), ((80065, 80109), 'miRNASNP3.core.mongo.db.drv_in_premir_v2.aggregate', 'mongo.db.drv_in_premir_v2.aggregate', (['pipline'], {}), '(pipline)\n', (80100, 80109), False, 'from miRNASNP3.core import mongo\n'), ((80140, 80190), 'miRNASNP3.core.mongo.db.drv_in_premir_v2.aggregate', 'mongo.db.drv_in_premir_v2.aggregate', (['pipline_count'], {}), '(pipline_count)\n', (80175, 80190), False, 'from miRNASNP3.core import mongo\n'), ((80744, 80768), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (80766, 80768), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((83773, 83828), 'miRNASNP3.core.mongo.db.drv_in_premir_v3_redundancy.aggregate', 'mongo.db.drv_in_premir_v3_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (83819, 83828), False, 'from miRNASNP3.core import mongo\n'), ((83998, 84059), 'miRNASNP3.core.mongo.db.drv_in_premir_v3_redundancy.aggregate', 'mongo.db.drv_in_premir_v3_redundancy.aggregate', (['pipline_count'], {}), '(pipline_count)\n', (84044, 84059), False, 'from miRNASNP3.core import mongo\n'), ((84430, 84454), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (84452, 84454), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((91467, 91491), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (91489, 91491), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((95573, 95612), 'miRNASNP3.core.mongo.db.snp_in_seed_v2.find', 'mongo.db.snp_in_seed_v2.find', (['condition'], {}), '(condition)\n', (95601, 95612), False, 'from miRNASNP3.core import mongo\n'), ((95642, 95683), 'miRNASNP3.core.mongo.db.snp_in_premir_v2.find', 'mongo.db.snp_in_premir_v2.find', (['condition'], {}), '(condition)\n', (95672, 95683), False, 'from miRNASNP3.core import mongo\n'), ((95711, 95749), 'miRNASNP3.core.mongo.db.snp_in_utr_v2.find', 'mongo.db.snp_in_utr_v2.find', (['condition'], {}), '(condition)\n', (95738, 95749), False, 'from miRNASNP3.core import mongo\n'), ((96542, 96566), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (96564, 96566), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((99159, 99183), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (99181, 99183), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((102065, 102089), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (102087, 102089), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((104495, 104519), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (104517, 104519), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((108303, 108327), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (108325, 108327), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((109836, 109860), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (109858, 109860), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((111853, 111877), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (111875, 111877), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((113688, 113712), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (113710, 113712), False, 'from flask_restful import Resource, fields, marshal_with, reqparse, marshal\n'), ((114552, 114602), 'flask.send_file', 'send_file', (['filepath_indexbs'], {'mimetype': '"""text/plain"""'}), "(filepath_indexbs, mimetype='text/plain')\n", (114561, 114602), False, 'from flask import send_file\n'), ((1719, 1760), 'miRNASNP3.core.mongo.db.mirna_expression.find', 'mongo.db.mirna_expression.find', (['condition'], {}), '(condition)\n', (1749, 1760), False, 'from miRNASNP3.core import mongo\n'), ((17801, 17854), 'miRNASNP3.core.mongo.db.seed_loss_4666_redundancy.aggregate', 'mongo.db.seed_loss_4666_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (17845, 17854), False, 'from miRNASNP3.core import mongo\n'), ((17920, 17977), 'miRNASNP3.core.mongo.db.seed_loss_addindel_redundancy.aggregate', 'mongo.db.seed_loss_addindel_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (17968, 17977), False, 'from miRNASNP3.core import mongo\n'), ((35084, 35138), 'miRNASNP3.core.mongo.db.utr_cosmic_gain_redundancy.aggregate', 'mongo.db.utr_cosmic_gain_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (35129, 35138), False, 'from miRNASNP3.core import mongo\n'), ((35336, 35396), 'miRNASNP3.core.mongo.db.utr_cosmic_gain_indel_redundancy.aggregate', 'mongo.db.utr_cosmic_gain_indel_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (35387, 35396), False, 'from miRNASNP3.core import mongo\n'), ((35823, 35878), 'miRNASNP3.core.mongo.db.utr_clinvar_gain_redundancy.aggregate', 'mongo.db.utr_clinvar_gain_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (35869, 35878), False, 'from miRNASNP3.core import mongo\n'), ((36077, 36138), 'miRNASNP3.core.mongo.db.utr_clinvar_gain_indel_redundancy.aggregate', 'mongo.db.utr_clinvar_gain_indel_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (36129, 36138), False, 'from miRNASNP3.core import mongo\n'), ((38890, 38944), 'miRNASNP3.core.mongo.db.utr_cosmic_loss_redundancy.aggregate', 'mongo.db.utr_cosmic_loss_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (38935, 38944), False, 'from miRNASNP3.core import mongo\n'), ((39142, 39202), 'miRNASNP3.core.mongo.db.utr_cosmic_loss_indel_redundancy.aggregate', 'mongo.db.utr_cosmic_loss_indel_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (39193, 39202), False, 'from miRNASNP3.core import mongo\n'), ((39629, 39684), 'miRNASNP3.core.mongo.db.utr_clinvar_loss_redundancy.aggregate', 'mongo.db.utr_clinvar_loss_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (39675, 39684), False, 'from miRNASNP3.core import mongo\n'), ((39883, 39944), 'miRNASNP3.core.mongo.db.utr_clinvar_loss_indel_redundancy.aggregate', 'mongo.db.utr_clinvar_loss_indel_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (39935, 39944), False, 'from miRNASNP3.core import mongo\n'), ((44683, 44730), 'miRNASNP3.core.mongo.db.seed_mature_pre_var_v1.find', 'mongo.db.seed_mature_pre_var_v1.find', (['condition'], {}), '(condition)\n', (44719, 44730), False, 'from miRNASNP3.core import mongo\n'), ((46870, 46921), 'miRNASNP3.core.mongo.db.nci60_drug_correlation.aggregate', 'mongo.db.nci60_drug_correlation.aggregate', (['pipeline'], {}), '(pipeline)\n', (46911, 46921), False, 'from miRNASNP3.core import mongo\n'), ((47894, 47926), 'miRNASNP3.core.mongo.db.mirnago.find', 'mongo.db.mirnago.find', (['condition'], {}), '(condition)\n', (47915, 47926), False, 'from miRNASNP3.core import mongo\n'), ((48721, 48761), 'miRNASNP3.core.mongo.db.pri_mir_summary.find', 'mongo.db.pri_mir_summary.find', (['condition'], {}), '(condition)\n', (48750, 48761), False, 'from miRNASNP3.core import mongo\n'), ((48792, 48836), 'miRNASNP3.core.mongo.db.pri_mir_summary.find', 'mongo.db.pri_mir_summary.find', (['condition_pre'], {}), '(condition_pre)\n', (48821, 48836), False, 'from miRNASNP3.core import mongo\n'), ((55206, 55257), 'miRNASNP3.core.mongo.db.premir_info_addindel_v1.aggregate', 'mongo.db.premir_info_addindel_v1.aggregate', (['pipline'], {}), '(pipline)\n', (55248, 55257), False, 'from miRNASNP3.core import mongo\n'), ((56834, 56879), 'miRNASNP3.core.mongo.db.primary_altseq_indel.find', 'mongo.db.primary_altseq_indel.find', (['condition'], {}), '(condition)\n', (56868, 56879), False, 'from miRNASNP3.core import mongo\n'), ((58563, 58611), 'miRNASNP3.core.mongo.db.primir_altseq_mut_indel.find', 'mongo.db.primir_altseq_mut_indel.find', (['condition'], {}), '(condition)\n', (58600, 58611), False, 'from miRNASNP3.core import mongo\n'), ((61543, 61592), 'miRNASNP3.core.mongo.db.gwas_catalog_alternative.find', 'mongo.db.gwas_catalog_alternative.find', (['condition'], {}), '(condition)\n', (61581, 61592), False, 'from miRNASNP3.core import mongo\n'), ((70496, 70550), 'miRNASNP3.core.mongo.db.snp_summary_genelist.find', 'mongo.db.snp_summary_genelist.find', (['accurate_condition'], {}), '(accurate_condition)\n', (70530, 70550), False, 'from miRNASNP3.core import mongo\n'), ((71420, 71479), 'miRNASNP3.core.mongo.db.mutation_summary_genelist.find', 'mongo.db.mutation_summary_genelist.find', (['accurate_condition'], {}), '(accurate_condition)\n', (71459, 71479), False, 'from miRNASNP3.core import mongo\n'), ((112725, 112774), 'miRNASNP3.core.mongo.db.enrichment_summary_v2.aggregate', 'mongo.db.enrichment_summary_v2.aggregate', (['pipline'], {}), '(pipline)\n', (112765, 112774), False, 'from miRNASNP3.core import mongo\n'), ((113938, 113995), 'miRNASNP3.core.mongo.db.variation_distribute_deduplicate.find', 'mongo.db.variation_distribute_deduplicate.find', (['condition'], {}), '(condition)\n', (113984, 113995), False, 'from miRNASNP3.core import mongo\n'), ((6314, 6364), 'miRNASNP3.core.mongo.db.seed_gain_4666_redundancy.find', 'mongo.db.seed_gain_4666_redundancy.find', (['condition'], {}), '(condition)\n', (6353, 6364), False, 'from miRNASNP3.core import mongo\n'), ((6404, 6458), 'miRNASNP3.core.mongo.db.seed_gain_addindel_redundancy.find', 'mongo.db.seed_gain_addindel_redundancy.find', (['condition'], {}), '(condition)\n', (6447, 6458), False, 'from miRNASNP3.core import mongo\n'), ((8672, 8722), 'miRNASNP3.core.mongo.db.seed_gain_4666_redundancy.find', 'mongo.db.seed_gain_4666_redundancy.find', (['condition'], {}), '(condition)\n', (8711, 8722), False, 'from miRNASNP3.core import mongo\n'), ((8784, 8838), 'miRNASNP3.core.mongo.db.seed_gain_addindel_redundancy.find', 'mongo.db.seed_gain_addindel_redundancy.find', (['condition'], {}), '(condition)\n', (8827, 8838), False, 'from miRNASNP3.core import mongo\n'), ((10209, 10272), 'miRNASNP3.core.mongo.db.seed_gain_addindel_redundancy.aggregate', 'mongo.db.seed_gain_addindel_redundancy.aggregate', (['pipline_indel'], {}), '(pipline_indel)\n', (10257, 10272), False, 'from miRNASNP3.core import mongo\n'), ((14898, 14948), 'miRNASNP3.core.mongo.db.seed_loss_4666_redundancy.find', 'mongo.db.seed_loss_4666_redundancy.find', (['condition'], {}), '(condition)\n', (14937, 14948), False, 'from miRNASNP3.core import mongo\n'), ((15010, 15064), 'miRNASNP3.core.mongo.db.seed_loss_addindel_redundancy.find', 'mongo.db.seed_loss_addindel_redundancy.find', (['condition'], {}), '(condition)\n', (15053, 15064), False, 'from miRNASNP3.core import mongo\n'), ((17463, 17513), 'miRNASNP3.core.mongo.db.seed_loss_4666_redundancy.find', 'mongo.db.seed_loss_4666_redundancy.find', (['condition'], {}), '(condition)\n', (17502, 17513), False, 'from miRNASNP3.core import mongo\n'), ((17575, 17629), 'miRNASNP3.core.mongo.db.seed_loss_addindel_redundancy.find', 'mongo.db.seed_loss_addindel_redundancy.find', (['condition'], {}), '(condition)\n', (17618, 17629), False, 'from miRNASNP3.core import mongo\n'), ((18420, 18483), 'miRNASNP3.core.mongo.db.seed_loss_addindel_redundancy.aggregate', 'mongo.db.seed_loss_addindel_redundancy.aggregate', (['pipline_indel'], {}), '(pipline_indel)\n', (18468, 18483), False, 'from miRNASNP3.core import mongo\n'), ((22039, 22091), 'miRNASNP3.core.mongo.db.seed_cosmic_gain_redundancy.find', 'mongo.db.seed_cosmic_gain_redundancy.find', (['condition'], {}), '(condition)\n', (22080, 22091), False, 'from miRNASNP3.core import mongo\n'), ((22279, 22339), 'miRNASNP3.core.mongo.db.indel_seed_mutation_gain_redundancy.find', 'mongo.db.indel_seed_mutation_gain_redundancy.find', (['condition'], {}), '(condition)\n', (22328, 22339), False, 'from miRNASNP3.core import mongo\n'), ((25516, 25568), 'miRNASNP3.core.mongo.db.seed_cosmic_loss_redundancy.find', 'mongo.db.seed_cosmic_loss_redundancy.find', (['condition'], {}), '(condition)\n', (25557, 25568), False, 'from miRNASNP3.core import mongo\n'), ((25756, 25816), 'miRNASNP3.core.mongo.db.indel_seed_mutation_loss_redundancy.find', 'mongo.db.indel_seed_mutation_loss_redundancy.find', (['condition'], {}), '(condition)\n', (25805, 25816), False, 'from miRNASNP3.core import mongo\n'), ((30488, 30539), 'miRNASNP3.core.mongo.db.snv_utr_loss_v2_redundancy.find', 'mongo.db.snv_utr_loss_v2_redundancy.find', (['condition'], {}), '(condition)\n', (30528, 30539), False, 'from miRNASNP3.core import mongo\n'), ((30666, 30719), 'miRNASNP3.core.mongo.db.indel_utr_loss_v2_redundancy.find', 'mongo.db.indel_utr_loss_v2_redundancy.find', (['condition'], {}), '(condition)\n', (30708, 30719), False, 'from miRNASNP3.core import mongo\n'), ((32791, 32842), 'miRNASNP3.core.mongo.db.snv_utr_gain_v2_redundancy.find', 'mongo.db.snv_utr_gain_v2_redundancy.find', (['condition'], {}), '(condition)\n', (32831, 32842), False, 'from miRNASNP3.core import mongo\n'), ((32969, 33022), 'miRNASNP3.core.mongo.db.indel_utr_gain_v2_redundancy.find', 'mongo.db.indel_utr_gain_v2_redundancy.find', (['condition'], {}), '(condition)\n', (33011, 33022), False, 'from miRNASNP3.core import mongo\n'), ((43930, 43977), 'miRNASNP3.core.mongo.db.seed_mature_pre_var_v1.find', 'mongo.db.seed_mature_pre_var_v1.find', (['condition'], {}), '(condition)\n', (43966, 43977), False, 'from miRNASNP3.core import mongo\n'), ((51696, 51738), 'miRNASNP3.core.mongo.db.premir_summary_v1.find', 'mongo.db.premir_summary_v1.find', (['condition'], {}), '(condition)\n', (51727, 51738), False, 'from miRNASNP3.core import mongo\n'), ((60173, 60209), 'miRNASNP3.core.mongo.db.snp_summary.find', 'mongo.db.snp_summary.find', (['condition'], {}), '(condition)\n', (60198, 60209), False, 'from miRNASNP3.core import mongo\n'), ((64713, 64760), 'miRNASNP3.core.mongo.db.ld_region.find', 'mongo.db.ld_region.find', (["{'snp_id': search_ids}"], {}), "({'snp_id': search_ids})\n", (64736, 64760), False, 'from miRNASNP3.core import mongo\n'), ((88504, 88553), 'miRNASNP3.core.mongo.db.drv_in_utr_v3_redundancy.find', 'mongo.db.drv_in_utr_v3_redundancy.find', (['condition'], {}), '(condition)\n', (88542, 88553), False, 'from miRNASNP3.core import mongo\n'), ((95783, 95822), 'miRNASNP3.core.mongo.db.snp_in_seed_v2.find', 'mongo.db.snp_in_seed_v2.find', (['condition'], {}), '(condition)\n', (95811, 95822), False, 'from miRNASNP3.core import mongo\n'), ((95866, 95907), 'miRNASNP3.core.mongo.db.snp_in_premir_v2.find', 'mongo.db.snp_in_premir_v2.find', (['condition'], {}), '(condition)\n', (95896, 95907), False, 'from miRNASNP3.core import mongo\n'), ((95949, 95987), 'miRNASNP3.core.mongo.db.snp_in_utr_v2.find', 'mongo.db.snp_in_utr_v2.find', (['condition'], {}), '(condition)\n', (95976, 95987), False, 'from miRNASNP3.core import mongo\n'), ((98732, 98771), 'miRNASNP3.core.mongo.db.snp_in_seed_v2.find', 'mongo.db.snp_in_seed_v2.find', (['condition'], {}), '(condition)\n', (98760, 98771), False, 'from miRNASNP3.core import mongo\n'), ((101197, 101238), 'miRNASNP3.core.mongo.db.snp_in_premir_v2.find', 'mongo.db.snp_in_premir_v2.find', (['condition'], {}), '(condition)\n', (101227, 101238), False, 'from miRNASNP3.core import mongo\n'), ((101573, 101614), 'miRNASNP3.core.mongo.db.snp_in_premir_v2.find', 'mongo.db.snp_in_premir_v2.find', (['condition'], {}), '(condition)\n', (101603, 101614), False, 'from miRNASNP3.core import mongo\n'), ((104143, 104184), 'miRNASNP3.core.mongo.db.snp_in_premir_v2.find', 'mongo.db.snp_in_premir_v2.find', (['condition'], {}), '(condition)\n', (104173, 104184), False, 'from miRNASNP3.core import mongo\n'), ((109032, 109071), 'miRNASNP3.core.mongo.db.cosmic_summary.find', 'mongo.db.cosmic_summary.find', (['condition'], {}), '(condition)\n', (109060, 109071), False, 'from miRNASNP3.core import mongo\n'), ((110453, 110493), 'miRNASNP3.core.mongo.db.clinvar_summary.find', 'mongo.db.clinvar_summary.find', (['condition'], {}), '(condition)\n', (110482, 110493), False, 'from miRNASNP3.core import mongo\n'), ((1798, 1839), 'miRNASNP3.core.mongo.db.mirna_expression.find', 'mongo.db.mirna_expression.find', (['condition'], {}), '(condition)\n', (1828, 1839), False, 'from miRNASNP3.core import mongo\n'), ((10584, 10637), 'miRNASNP3.core.mongo.db.seed_gain_4666_redundancy.aggregate', 'mongo.db.seed_gain_4666_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (10628, 10637), False, 'from miRNASNP3.core import mongo\n'), ((10901, 10964), 'miRNASNP3.core.mongo.db.seed_gain_addindel_redundancy.aggregate', 'mongo.db.seed_gain_addindel_redundancy.aggregate', (['pipline_indel'], {}), '(pipline_indel)\n', (10949, 10964), False, 'from miRNASNP3.core import mongo\n'), ((11163, 11216), 'miRNASNP3.core.mongo.db.seed_gain_4666_redundancy.aggregate', 'mongo.db.seed_gain_4666_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (11207, 11216), False, 'from miRNASNP3.core import mongo\n'), ((18690, 18743), 'miRNASNP3.core.mongo.db.seed_loss_4666_redundancy.aggregate', 'mongo.db.seed_loss_4666_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (18734, 18743), False, 'from miRNASNP3.core import mongo\n'), ((19007, 19070), 'miRNASNP3.core.mongo.db.seed_loss_addindel_redundancy.aggregate', 'mongo.db.seed_loss_addindel_redundancy.aggregate', (['pipline_indel'], {}), '(pipline_indel)\n', (19055, 19070), False, 'from miRNASNP3.core import mongo\n'), ((19269, 19322), 'miRNASNP3.core.mongo.db.seed_loss_4666_redundancy.aggregate', 'mongo.db.seed_loss_4666_redundancy.aggregate', (['pipline'], {}), '(pipline)\n', (19313, 19322), False, 'from miRNASNP3.core import mongo\n'), ((35208, 35259), 'miRNASNP3.core.mongo.db.utr_cosmic_gain_redundancy.find', 'mongo.db.utr_cosmic_gain_redundancy.find', (['condition'], {}), '(condition)\n', (35248, 35259), False, 'from miRNASNP3.core import mongo\n'), ((35466, 35523), 'miRNASNP3.core.mongo.db.utr_cosmic_gain_indel_redundancy.find', 'mongo.db.utr_cosmic_gain_indel_redundancy.find', (['condition'], {}), '(condition)\n', (35512, 35523), False, 'from miRNASNP3.core import mongo\n'), ((35948, 36000), 'miRNASNP3.core.mongo.db.utr_clinvar_gain_redundancy.find', 'mongo.db.utr_clinvar_gain_redundancy.find', (['condition'], {}), '(condition)\n', (35989, 36000), False, 'from miRNASNP3.core import mongo\n'), ((36208, 36266), 'miRNASNP3.core.mongo.db.utr_clinvar_gain_indel_redundancy.find', 'mongo.db.utr_clinvar_gain_indel_redundancy.find', (['condition'], {}), '(condition)\n', (36255, 36266), False, 'from miRNASNP3.core import mongo\n'), ((39014, 39065), 'miRNASNP3.core.mongo.db.utr_cosmic_loss_redundancy.find', 'mongo.db.utr_cosmic_loss_redundancy.find', (['condition'], {}), '(condition)\n', (39054, 39065), False, 'from miRNASNP3.core import mongo\n'), ((39272, 39329), 'miRNASNP3.core.mongo.db.utr_cosmic_loss_indel_redundancy.find', 'mongo.db.utr_cosmic_loss_indel_redundancy.find', (['condition'], {}), '(condition)\n', (39318, 39329), False, 'from miRNASNP3.core import mongo\n'), ((39754, 39806), 'miRNASNP3.core.mongo.db.utr_clinvar_loss_redundancy.find', 'mongo.db.utr_clinvar_loss_redundancy.find', (['condition'], {}), '(condition)\n', (39795, 39806), False, 'from miRNASNP3.core import mongo\n'), ((40014, 40072), 'miRNASNP3.core.mongo.db.utr_clinvar_loss_indel_redundancy.find', 'mongo.db.utr_clinvar_loss_indel_redundancy.find', (['condition'], {}), '(condition)\n', (40061, 40072), False, 'from miRNASNP3.core import mongo\n'), ((44765, 44812), 'miRNASNP3.core.mongo.db.seed_mature_pre_var_v1.find', 'mongo.db.seed_mature_pre_var_v1.find', (['condition'], {}), '(condition)\n', (44801, 44812), False, 'from miRNASNP3.core import mongo\n'), ((47955, 47987), 'miRNASNP3.core.mongo.db.mirnago.find', 'mongo.db.mirnago.find', (['condition'], {}), '(condition)\n', (47976, 47987), False, 'from miRNASNP3.core import mongo\n'), ((56911, 56956), 'miRNASNP3.core.mongo.db.primary_altseq_indel.find', 'mongo.db.primary_altseq_indel.find', (['condition'], {}), '(condition)\n', (56945, 56956), False, 'from miRNASNP3.core import mongo\n'), ((58643, 58691), 'miRNASNP3.core.mongo.db.primir_altseq_mut_indel.find', 'mongo.db.primir_altseq_mut_indel.find', (['condition'], {}), '(condition)\n', (58680, 58691), False, 'from miRNASNP3.core import mongo\n'), ((60028, 60055), 'miRNASNP3.core.mongo.db.snp_summary.find', 'mongo.db.snp_summary.find', ([], {}), '()\n', (60053, 60055), False, 'from miRNASNP3.core import mongo\n'), ((61621, 61670), 'miRNASNP3.core.mongo.db.gwas_catalog_alternative.find', 'mongo.db.gwas_catalog_alternative.find', (['condition'], {}), '(condition)\n', (61659, 61670), False, 'from miRNASNP3.core import mongo\n'), ((70415, 70460), 'miRNASNP3.core.mongo.db.snp_summary_genelist.find', 'mongo.db.snp_summary_genelist.find', (['condition'], {}), '(condition)\n', (70449, 70460), False, 'from miRNASNP3.core import mongo\n'), ((71334, 71384), 'miRNASNP3.core.mongo.db.mutation_summary_genelist.find', 'mongo.db.mutation_summary_genelist.find', (['condition'], {}), '(condition)\n', (71373, 71384), False, 'from miRNASNP3.core import mongo\n'), ((72362, 72401), 'miRNASNP3.core.mongo.db.phenotype_list.find', 'mongo.db.phenotype_list.find', (['condition'], {}), '(condition)\n', (72390, 72401), False, 'from miRNASNP3.core import mongo\n'), ((106821, 106859), 'miRNASNP3.core.mongo.db.snp_in_utr_v2.find', 'mongo.db.snp_in_utr_v2.find', (['condition'], {}), '(condition)\n', (106848, 106859), False, 'from miRNASNP3.core import mongo\n'), ((108895, 108925), 'miRNASNP3.core.mongo.db.cosmic_summary.find', 'mongo.db.cosmic_summary.find', ([], {}), '()\n', (108923, 108925), False, 'from miRNASNP3.core import mongo\n'), ((110314, 110345), 'miRNASNP3.core.mongo.db.clinvar_summary.find', 'mongo.db.clinvar_summary.find', ([], {}), '()\n', (110343, 110345), False, 'from miRNASNP3.core import mongo\n'), ((112809, 112855), 'miRNASNP3.core.mongo.db.enrichment_summary_v2.find', 'mongo.db.enrichment_summary_v2.find', (['condition'], {}), '(condition)\n', (112844, 112855), False, 'from miRNASNP3.core import mongo\n'), ((114061, 114118), 'miRNASNP3.core.mongo.db.variation_distribute_deduplicate.find', 'mongo.db.variation_distribute_deduplicate.find', (['condition'], {}), '(condition)\n', (114107, 114118), False, 'from miRNASNP3.core import mongo\n'), ((41545, 41577), 'miRNASNP3.core.mongo.db.browseY.find', 'mongo.db.browseY.find', (['condition'], {}), '(condition)\n', (41566, 41577), False, 'from miRNASNP3.core import mongo\n'), ((43782, 43829), 'miRNASNP3.core.mongo.db.seed_mature_pre_var_v1.find', 'mongo.db.seed_mature_pre_var_v1.find', (['condition'], {}), '(condition)\n', (43818, 43829), False, 'from miRNASNP3.core import mongo\n'), ((51578, 51620), 'miRNASNP3.core.mongo.db.premir_summary_v1.find', 'mongo.db.premir_summary_v1.find', (['condition'], {}), '(condition)\n', (51609, 51620), False, 'from miRNASNP3.core import mongo\n'), ((60238, 60274), 'miRNASNP3.core.mongo.db.snp_summary.find', 'mongo.db.snp_summary.find', (['condition'], {}), '(condition)\n', (60263, 60274), False, 'from miRNASNP3.core import mongo\n'), ((88354, 88403), 'miRNASNP3.core.mongo.db.drv_in_utr_v3_redundancy.find', 'mongo.db.drv_in_utr_v3_redundancy.find', (['condition'], {}), '(condition)\n', (88392, 88403), False, 'from miRNASNP3.core import mongo\n'), ((98818, 98857), 'miRNASNP3.core.mongo.db.snp_in_seed_v2.find', 'mongo.db.snp_in_seed_v2.find', (['condition'], {}), '(condition)\n', (98846, 98857), False, 'from miRNASNP3.core import mongo\n'), ((101285, 101326), 'miRNASNP3.core.mongo.db.snp_in_premir_v2.find', 'mongo.db.snp_in_premir_v2.find', (['condition'], {}), '(condition)\n', (101315, 101326), False, 'from miRNASNP3.core import mongo\n'), ((101456, 101497), 'miRNASNP3.core.mongo.db.snp_in_premir_v2.find', 'mongo.db.snp_in_premir_v2.find', (['condition'], {}), '(condition)\n', (101486, 101497), False, 'from miRNASNP3.core import mongo\n'), ((104030, 104071), 'miRNASNP3.core.mongo.db.snp_in_premir_v2.find', 'mongo.db.snp_in_premir_v2.find', (['condition'], {}), '(condition)\n', (104060, 104071), False, 'from miRNASNP3.core import mongo\n'), ((107063, 107101), 'miRNASNP3.core.mongo.db.snp_in_utr_v2.find', 'mongo.db.snp_in_utr_v2.find', (['condition'], {}), '(condition)\n', (107090, 107101), False, 'from miRNASNP3.core import mongo\n'), ((107217, 107255), 'miRNASNP3.core.mongo.db.snp_in_utr_v2.find', 'mongo.db.snp_in_utr_v2.find', (['condition'], {}), '(condition)\n', (107244, 107255), False, 'from miRNASNP3.core import mongo\n'), ((107301, 107339), 'miRNASNP3.core.mongo.db.snp_in_utr_v2.find', 'mongo.db.snp_in_utr_v2.find', (['condition'], {}), '(condition)\n', (107328, 107339), False, 'from miRNASNP3.core import mongo\n'), ((109099, 109138), 'miRNASNP3.core.mongo.db.cosmic_summary.find', 'mongo.db.cosmic_summary.find', (['condition'], {}), '(condition)\n', (109127, 109138), False, 'from miRNASNP3.core import mongo\n'), ((110522, 110562), 'miRNASNP3.core.mongo.db.clinvar_summary.find', 'mongo.db.clinvar_summary.find', (['condition'], {}), '(condition)\n', (110551, 110562), False, 'from miRNASNP3.core import mongo\n'), ((59938, 59965), 'miRNASNP3.core.mongo.db.snp_summary.find', 'mongo.db.snp_summary.find', ([], {}), '()\n', (59963, 59965), False, 'from miRNASNP3.core import mongo\n'), ((106705, 106743), 'miRNASNP3.core.mongo.db.snp_in_utr_v2.find', 'mongo.db.snp_in_utr_v2.find', (['condition'], {}), '(condition)\n', (106732, 106743), False, 'from miRNASNP3.core import mongo\n'), ((108789, 108819), 'miRNASNP3.core.mongo.db.cosmic_summary.find', 'mongo.db.cosmic_summary.find', ([], {}), '()\n', (108817, 108819), False, 'from miRNASNP3.core import mongo\n'), ((110205, 110236), 'miRNASNP3.core.mongo.db.clinvar_summary.find', 'mongo.db.clinvar_summary.find', ([], {}), '()\n', (110234, 110236), False, 'from miRNASNP3.core import mongo\n'), ((106947, 106985), 'miRNASNP3.core.mongo.db.snp_in_utr_v2.find', 'mongo.db.snp_in_utr_v2.find', (['condition'], {}), '(condition)\n', (106974, 106985), False, 'from miRNASNP3.core import mongo\n')]
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import pickle
import numpy as np
import pandas as pd
import azureml.train.automl
from sklearn.externals import joblib
from azureml.core.model import Model
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
import xgboost as xgb
input_sample = pd.DataFrame(data=[{'winddirabs': 0.34244, 'winddirrel': 0.324235,'windspeedrel':1.3213}])
output_sample = np.array([0])
def init():
global model
# This name is model.id of model that we want to deploy deserialize the model file back
# into a sklearn model
model_path = Model.get_model_path(model_name = 'Model')
model = joblib.load(model_path)
@input_schema('data', PandasParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
result = model.predict(data)
return json.dumps({"result": result.tolist()})
except Exception as e:
result = str(e)
return json.dumps({"error": result})
|
[
"pandas.DataFrame",
"azureml.core.model.Model.get_model_path",
"inference_schema.parameter_types.numpy_parameter_type.NumpyParameterType",
"json.dumps",
"inference_schema.parameter_types.pandas_parameter_type.PandasParameterType",
"numpy.array",
"sklearn.externals.joblib.load"
] |
[((633, 729), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[{'winddirabs': 0.34244, 'winddirrel': 0.324235, 'windspeedrel': 1.3213}]"}), "(data=[{'winddirabs': 0.34244, 'winddirrel': 0.324235,\n 'windspeedrel': 1.3213}])\n", (645, 729), True, 'import pandas as pd\n'), ((740, 753), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (748, 753), True, 'import numpy as np\n'), ((921, 961), 'azureml.core.model.Model.get_model_path', 'Model.get_model_path', ([], {'model_name': '"""Model"""'}), "(model_name='Model')\n", (941, 961), False, 'from azureml.core.model import Model\n'), ((976, 999), 'sklearn.externals.joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (987, 999), False, 'from sklearn.externals import joblib\n'), ((1024, 1057), 'inference_schema.parameter_types.pandas_parameter_type.PandasParameterType', 'PandasParameterType', (['input_sample'], {}), '(input_sample)\n', (1043, 1057), False, 'from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType\n'), ((1074, 1107), 'inference_schema.parameter_types.numpy_parameter_type.NumpyParameterType', 'NumpyParameterType', (['output_sample'], {}), '(output_sample)\n', (1092, 1107), False, 'from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n'), ((1291, 1320), 'json.dumps', 'json.dumps', (["{'error': result}"], {}), "({'error': result})\n", (1301, 1320), False, 'import json\n')]
|
import pygame
from pygame.locals import *
from pygame.event import wait
from deck import *
from game import *
from init import *
deck = Deck()
King = Game("Pit","Dotti","Lella","Rob")
giocata=0
position=[0,0,0,0]
carteGiocate=[[],[],[],[],[],[],[],[],[],[],[],[],[]]
timerScomparsa=0
timerGiocata=0
primaCarta = None
Turno = init(deck, King)
#Inizializzare pygame
pygame.init()
clock = pygame.time.Clock()
#Mostra lo schermo
screen = pygame.display.set_mode((800,600))
#Impostazioni del gioco
pygame.display.set_caption("King")
icon = pygame.image.load("img\icon.png")
pygame.display.set_icon(icon)
font = pygame.font.SysFont("monospace", 16)
#funzione per mostrare la mano a video
def mostraMano(self,ypos,sel):
xpos=400-len(self.Mano)*50/2
for carta in range(len(self.Mano)):
thisy=ypos
if carta == sel :
thisy-=35
screen.blit(self.Mano[carta].img, (xpos,thisy))
xpos+=50
def primaGiocata(Turno,giocata):
if King.Primo == 0 :
primaCarta=King.g1.Mano[position[0]]
Turno=(Turno+1)%4
return primaCarta, Turno
position[King.Primo]=random.randint(0,len(King.allg[King.Primo].Mano)-1)
primaCarta=King.allg[King.Primo].Mano[position[King.Primo]]
carteGiocate[giocata].append(primaCarta)
King.allg[Turno].Mano.pop(position[Turno])
Turno=(Turno+1)%4
return primaCarta, Turno, giocata
def altraGiocata(Turno, primaCarta, position):
position[Turno]=random.randint(0,len(King.allg[Turno].Mano)-1)
cartaGiocata=King.allg[Turno].Mano[position[Turno]]
while King.checkSuit(position, primaCarta, Turno):
position[Turno]=random.randint(0,len(King.allg[Turno].Mano)-1)
cartaGiocata=King.allg[Turno].Mano[position[Turno]]
carteGiocate[giocata].append(cartaGiocata)
King.allg[Turno].Mano.pop(position[Turno])
Turno=(Turno+1)%4
return Turno
def checkVincitore(primaCarta, giocata, carteGiocate, Primo):
cartaVincente = primaCarta
newPrimo = Primo
for i in [1,2,3]:
if (cartaVincente.suit == carteGiocate[giocata][i].suit) & (cartaVincente.value < carteGiocate[giocata][i].value):
cartaVincente = carteGiocate[giocata][i]
newPrimo = (i + Primo) % 4
print("La mano è stata vinta da {} con la carta ".format(King.allg[newPrimo].Nome), end="")
cartaVincente.show()
return newPrimo
def mostraGiocata(giocata):
cordcarte=[(375,310),(425,230),(375,150),(325,230)]
for i in range(len(carteGiocate[giocata])):
screen.blit(carteGiocate[giocata][i].img, cordcarte[(King.Primo+i)%4])
def stampaUHD():
pos=[(350,540),(600,300),(350,40),(100,300)]
pos2=[(350,556),(600,316),(350,56),(100,316)]
for i in range(4):
label = font.render('{}'.format(King.allg[i].Nome), 1, (0,0,0), (160,160,160))
label2 = font.render('Punti: {}'.format(King.allg[i].Punti), 1, (0,0,0), (160,160,160))
screen.blit(label, pos[i])
screen.blit(label2, pos2[i])
#Loop del gioco
running = True
while running:
screen.fill((0,255,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#Muoversi tra le carte
if (event.type == pygame.KEYDOWN) & (len(carteGiocate[giocata]) < 4):
if (event.key == pygame.K_LEFT) :
if position[0] > 0:
position[0]-=1
else:
position[0]=len(King.g1.Mano)-1
elif (event.key == pygame.K_RIGHT) :
if (position[0]<len(King.g1.Mano)-1) :
position[0]+=1
else :
position[0]= 0
elif (event.key == pygame.K_RETURN):
if (Turno == 0):
if (King.Primo == 0):
primaCarta, Turno = primaGiocata(Turno, giocata)
carteGiocate[giocata].append(King.g1.Mano[position[0]])
King.g1.Mano.pop(position[0])
position[0]=0
else:
if King.checkSuit(position, primaCarta, 0):
print("Devi rispondere a seme")
continue
carteGiocate[giocata].append(King.g1.Mano[position[0]])
King.g1.Mano.pop(position[0])
position[0]=0
Turno += 1
else :
print('Non è il tuo turno')
elif (event.key == pygame.K_ESCAPE):
pygame.quit()
quit()
#Premo un pulsante per far giocare quello dopo
#Primo
elif (event.key == pygame.K_p and len(carteGiocate[giocata]) == 0):
if (Turno == 0 ):
print('è il tuo turno')
continue
primaCarta, Turno, giocata = primaGiocata(Turno, giocata)
#Altri
elif (event.key == pygame.K_n):
if (Turno == 0):
print('è il tuo turno')
continue
if (primaCarta == None):
print('deve giocare il primo di mano')
continue
Turno = altraGiocata(Turno, primaCarta, position)
#Premere S per stampare cose
elif (event.key == pygame.K_s):
print(carteGiocate)
print(giocata)
#Premere T per stampare carta selezionata
elif (event.key == pygame.K_t):
King.allg[0].Mano[position[0]].show()
#Andamento del gioco
if (Turno != 0):
if timerGiocata>10:
if (len(carteGiocate[giocata]) == 0):
primaCarta, Turno, giocata = primaGiocata(Turno, giocata)
elif (len(carteGiocate[giocata]) < 4):
Turno = altraGiocata(Turno, primaCarta, position)
timerGiocata = 0
timerGiocata += 1
#Check di fine turno
if (len(carteGiocate[giocata])>3):
if timerScomparsa>16 :
King.Primo=checkVincitore(primaCarta,giocata, carteGiocate,King.Primo)
King.allg[King.Primo].Punti+=1
giocata+=1
Turno=King.Primo
timerScomparsa=0
primaCarta=None
King.contaSemi()
King.punteggio()
timerScomparsa+=1
if (giocata == 13):
Turno = init(deck, King)
giocata=0
position=[0,0,0,0]
carteGiocate=[[],[],[],[],[],[],[],[],[],[],[],[],[]]
timerScomparsa=0
primaCarta = None
mostraMano(King.g1,450,position[0])
stampaUHD()
# mostraMano(King.g2,50,position[0])
# mostraMano(King.g3,100,position[0])
# mostraMano(King.g4,150,position[0])
mostraGiocata(giocata)
pygame.display.update()
clock.tick(10)
|
[
"pygame.quit",
"pygame.display.set_icon",
"pygame.font.SysFont",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"pygame.display.update",
"pygame.image.load",
"pygame.display.set_caption",
"pygame.time.Clock"
] |
[((370, 383), 'pygame.init', 'pygame.init', ([], {}), '()\n', (381, 383), False, 'import pygame\n'), ((392, 411), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (409, 411), False, 'import pygame\n'), ((442, 477), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(800, 600)'], {}), '((800, 600))\n', (465, 477), False, 'import pygame\n'), ((502, 536), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""King"""'], {}), "('King')\n", (528, 536), False, 'import pygame\n'), ((545, 579), 'pygame.image.load', 'pygame.image.load', (['"""img\\\\icon.png"""'], {}), "('img\\\\icon.png')\n", (562, 579), False, 'import pygame\n'), ((579, 608), 'pygame.display.set_icon', 'pygame.display.set_icon', (['icon'], {}), '(icon)\n', (602, 608), False, 'import pygame\n'), ((616, 652), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""monospace"""', '(16)'], {}), "('monospace', 16)\n", (635, 652), False, 'import pygame\n'), ((3098, 3116), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3114, 3116), False, 'import pygame\n'), ((6884, 6907), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6905, 6907), False, 'import pygame\n'), ((4587, 4600), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4598, 4600), False, 'import pygame\n')]
|
# Generated by Django 3.0.3 on 2020-02-07 02:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
('carts', '0002_cart_user'),
]
operations = [
migrations.AddField(
model_name='cart',
name='products',
field=models.ManyToManyField(through='carts.CartProducts', to='products.Product'),
),
]
|
[
"django.db.models.ManyToManyField"
] |
[((361, 436), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'through': '"""carts.CartProducts"""', 'to': '"""products.Product"""'}), "(through='carts.CartProducts', to='products.Product')\n", (383, 436), False, 'from django.db import migrations, models\n')]
|
from django.db import models
from django.db.models import base
from django.db.models.deletion import CASCADE
from django.db.models.expressions import F
from localflavor.br.models import BRCPFField
from localflavor.br.validators import BRCPFValidator
class PersonType(models.Model):
id = models.AutoField(primary_key=True, editable=False)
name = models.CharField(max_length=32, blank=False, null=False)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class PersonMediaType(models.Model):
id = models.AutoField(primary_key=True, editable=False)
name = models.CharField(max_length=32, blank=False, null=False)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class Person(models.Model):
id = models.AutoField(primary_key=True, editable=False)
name = models.CharField(max_length=32, blank=False, null=False)
type = models.ForeignKey(PersonType, on_delete=models.CASCADE, related_name='type', blank=False, null=False)
cpf = BRCPFField()
phone = models.CharField(max_length=15, null=True, blank=True)
company = models.CharField(max_length=32, null=False, blank=False)
last_update = models.DateField(auto_now=True, null=False, blank=False)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class PersonMedia(models.Model):
id = models.AutoField(primary_key=True, editable=False)
person_id = models.ForeignKey(Person, on_delete=models.CASCADE, related_name='person', null=True, blank=True)
object_media = models.TextField(null=False, blank=False)
class PersonAudit(models.Model):
id = models.AutoField(primary_key=True, editable=False)
person_id = models.ForeignKey(Person,on_delete=models.CASCADE, null=False, blank=False, editable=False)
cpf_new = models.CharField(max_length=14, null=False, blank=False, editable=False)
cpf_old = models.CharField(max_length=14, null=True, blank=False, editable=False)
last_update = models.DateField(auto_now=True, null=False, blank=False, editable=False)
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateField",
"localflavor.br.models.BRCPFField"
] |
[((293, 343), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'editable': '(False)'}), '(primary_key=True, editable=False)\n', (309, 343), False, 'from django.db import models\n'), ((355, 411), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'blank': '(False)', 'null': '(False)'}), '(max_length=32, blank=False, null=False)\n', (371, 411), False, 'from django.db import models\n'), ((558, 608), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'editable': '(False)'}), '(primary_key=True, editable=False)\n', (574, 608), False, 'from django.db import models\n'), ((620, 676), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'blank': '(False)', 'null': '(False)'}), '(max_length=32, blank=False, null=False)\n', (636, 676), False, 'from django.db import models\n'), ((810, 860), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'editable': '(False)'}), '(primary_key=True, editable=False)\n', (826, 860), False, 'from django.db import models\n'), ((872, 928), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'blank': '(False)', 'null': '(False)'}), '(max_length=32, blank=False, null=False)\n', (888, 928), False, 'from django.db import models\n'), ((940, 1045), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PersonType'], {'on_delete': 'models.CASCADE', 'related_name': '"""type"""', 'blank': '(False)', 'null': '(False)'}), "(PersonType, on_delete=models.CASCADE, related_name='type',\n blank=False, null=False)\n", (957, 1045), False, 'from django.db import models\n'), ((1052, 1064), 'localflavor.br.models.BRCPFField', 'BRCPFField', ([], {}), '()\n', (1062, 1064), False, 'from localflavor.br.models import BRCPFField\n'), ((1077, 1131), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'null': '(True)', 'blank': '(True)'}), '(max_length=15, null=True, blank=True)\n', (1093, 1131), False, 'from django.db import models\n'), ((1146, 1202), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'null': '(False)', 'blank': '(False)'}), '(max_length=32, null=False, blank=False)\n', (1162, 1202), False, 'from django.db import models\n'), ((1221, 1277), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now': '(True)', 'null': '(False)', 'blank': '(False)'}), '(auto_now=True, null=False, blank=False)\n', (1237, 1277), False, 'from django.db import models\n'), ((1416, 1466), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'editable': '(False)'}), '(primary_key=True, editable=False)\n', (1432, 1466), False, 'from django.db import models\n'), ((1483, 1584), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'on_delete': 'models.CASCADE', 'related_name': '"""person"""', 'null': '(True)', 'blank': '(True)'}), "(Person, on_delete=models.CASCADE, related_name='person',\n null=True, blank=True)\n", (1500, 1584), False, 'from django.db import models\n'), ((1601, 1642), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(False)', 'blank': '(False)'}), '(null=False, blank=False)\n', (1617, 1642), False, 'from django.db import models\n'), ((1687, 1737), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'editable': '(False)'}), '(primary_key=True, editable=False)\n', (1703, 1737), False, 'from django.db import models\n'), ((1754, 1850), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Person'], {'on_delete': 'models.CASCADE', 'null': '(False)', 'blank': '(False)', 'editable': '(False)'}), '(Person, on_delete=models.CASCADE, null=False, blank=False,\n editable=False)\n', (1771, 1850), False, 'from django.db import models\n'), ((1861, 1933), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(14)', 'null': '(False)', 'blank': '(False)', 'editable': '(False)'}), '(max_length=14, null=False, blank=False, editable=False)\n', (1877, 1933), False, 'from django.db import models\n'), ((1948, 2019), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(14)', 'null': '(True)', 'blank': '(False)', 'editable': '(False)'}), '(max_length=14, null=True, blank=False, editable=False)\n', (1964, 2019), False, 'from django.db import models\n'), ((2038, 2110), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now': '(True)', 'null': '(False)', 'blank': '(False)', 'editable': '(False)'}), '(auto_now=True, null=False, blank=False, editable=False)\n', (2054, 2110), False, 'from django.db import models\n')]
|
# Standard imports
import collections
import json
import select
import socket
import threading
import zmq
# Custom imports
import job
import message
import taskunit
import utils.logger
class Messenger:
'''A class representing a messenger that handles all communication.
'''
def __init__(self):
# identity <--> address maps.
self.identity_to_address = {}
self.address_to_identity = {}
# Both inbound_queue and outbound_queue contain tuples of
# (address, message) that are received or need to be sent out.
self.inbound_queue = collections.deque()
self.outbound_queue = collections.deque()
self.outbound_queue_sem = threading.Semaphore(value=0)
self.inbound_queue_sem = threading.Semaphore(value=0)
# This dict is used to keep track of MessageTracker objects which can
# be used to track message status.
self.trackers = {}
self.logger = utils.logger.Logger('MESSENGER')
return
def start(self):
'''Start the messenger.
'''
pass
def get_host_by_name(self, name):
'''Return the address for the hostname.
'''
return self.identity_to_address[name]
def register_destination(self, name, address):
'''
Store the hostname as key with address as value for this destination
so that the caller can later only supply destination as hostname
to communicate with the destination.
'''
self.identity_to_address[name] = address
self.address_to_identity[address] = name
return
def send(self, msg, address):
'''Send the msg to the address.
'''
self.outbound_queue.append((address, msg))
self.outbound_queue_sem.release()
return
def receive(self, deserialize=True):
'''Yield the next message from the inbound_queue.
:param deserialize: If True, the message payload is deserialized
and generated instead of the Message object itself.
'''
while self.inbound_queue_sem.acquire():
msg = self.inbound_queue.popleft()
if not deserialize:
yield msg
continue
msg_type = msg.msg_type
decoded_msg = msg.msg_payload.decode('UTF-8')
if msg_type == message.Message.MSG_STATUS:
yield int(decoded_msg)
elif msg_type == message.Message.MSG_TASKUNIT:
yield taskunit.TaskUnit.deserialize(decoded_msg)
elif msg_type == message.Message.MSG_TASKUNIT_RESULT:
yield taskunit.TaskUnit.deserialize(decoded_msg)
elif msg_type == message.Message.MSG_JOB:
yield job.Job.deserialize(decoded_msg)
def queue_for_sending(self, messages, address):
'''Add messages to the outbound queue for sending.
NOTE: This method takes a list of messages and not a single message.
'''
for message in messages:
self.outbound_queue.append((address, message))
self.outbound_queue_sem.release()
return
def delete_tracker(self, tracker):
'''
The tracker for msg_id is no longer needed. Delete it.
'''
msg_id = tracker.msg_id
del self.trackers[msg_id]
return
def sender(self):
'''Send messages out through the sender socket. Forever.
'''
pass
def receiver(self):
'''Receive messages on the receiver socket. Forever.
'''
pass
class UDPMessenger(Messenger):
'''A Messenger that uses UDP sockets for communication.
This messenger implements custom fragmentation, ack etc.
'''
# Constants
DEFAULT_IP = '0.0.0.0'
DEFAULT_PORT = 33310
def __init__(self, ip=DEFAULT_IP, port=DEFAULT_PORT):
super().__init__()
self.ip = ip
self.port = port
# Fragments map for inbound messages.
self.fragments_map = {}
return
def start(self):
'''Start the messenger.
'''
# Create the sockets.
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('0.0.0.0', self.port))
# Create and start the receiver and sender threads now.
receiver_thread = threading.Thread(target=self.receiver,
name='receiver_thread')
sender_thread = threading.Thread(target=self.sender,
name='sender_thread')
receiver_thread.start()
sender_thread.start()
return
def send_status(self, status, address, track=False):
'''
Send a status update to a remote node.
If track is True, then this method returns a MessageTracker object
which can be used to check the state of the message sending.
'''
# Trivially serializeable.
serialized_status = str(status)
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_STATUS,
serialized_status,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def send_ack(self, msg, address, track=False):
'''
Send an ack for msg to a remote node.
If track is True, then this method returns a MessageTracker object
which can be used to check the state of the message sending.
'''
msg_id = msg.msg_id
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_ACK,
msg_id,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def send_job(self, job, address, track=False):
'''
Send a job to a remote node.
If track is True, then this method returns a MessageTracker object
which can be used to check the state of the message sending.
'''
serialized_job = job.serialize(json_encode=True)
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_JOB,
serialized_job,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def send_taskunit(self, tu, address, track=False,
attrs=['id', 'job_id', 'data', 'retries', 'state',
'result']):
'''
Send a taskunit to a remote node.
If track is True, then this method returns a MessageTracker object
which can be used to check the state of the message sending.
'''
serialized_taskunit = tu.serialize(include_attrs=attrs,
json_encode=True)
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_TASKUNIT,
serialized_taskunit,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def send_taskunit_result(self, tu, address, track=False,
attrs=['id', 'job_id', 'state', 'result']):
'''
Send the result of running taskunit.
'''
serialized_result = tu.serialize(include_attrs=attrs, json_encode=True)
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_TASKUNIT_RESULT,
serialized_result,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def sender(self):
'''Send messages out through the sender socket. Forever.
'''
poller = select.epoll()
poller.register(self.socket.fileno(),
select.EPOLLOUT | select.EPOLLET) # Edge-triggered.
self.logger.log("Sender up!")
while True:
self.outbound_queue_sem.acquire()
address, msg = self.outbound_queue.popleft()
self.logger.log("Sending message to %s:%d" % address)
# While the msg is still not sent...
while msg is not None:
# Poll with timeout of 1.0 seconds.
poll_responses = poller.poll(1.0)
for _, event in poll_responses:
# If we can send...
if event & select.EPOLLOUT:
bytes_sent = self.socket.sendto(msg, address)
if bytes_sent == 0:
raise Exception("Couldn't send out the message.")
# If we have a tracker for this msg, then we need to
# mark it as sent if this is the last frag for the msg
# being sent out.
try:
msg_object = message.Message(packed_msg=msg)
if msg_object.is_last_frag():
tracker = self.trackers[msg_object.msg_id]
tracker.set_state(
message.MessageTracker.MSG_SENT)
except KeyError:
pass
msg = None
break
else:
self.logger.log("Unexpected event on sender socket.")
def handle_received_msg(self, msg, address):
'''Handle received message.
'''
fragments_map = self.fragments_map
msg = message.Message(packed_msg=msg)
try:
fragments_map[msg.msg_id]
except KeyError:
fragments_map[msg.msg_id] = []
if not msg.is_last_frag():
fragments_map[msg.msg_id].append(msg)
else:
msg_frag_id = msg.msg_meta1
total_frags = msg_frag_id + 1
current_frags = len(fragments_map[msg.msg_id])
fragments_map[msg.msg_id].extend(
[None] * (total_frags - current_frags))
fragments_map[msg.msg_id][-1] = msg
# If all the frags for this message have already been received.
if None not in fragments_map[msg.msg_id]:
if fragments_map[msg.msg_id][-1].is_last_frag():
msg = message.Message.glue_fragments(fragments_map[msg.msg_id])
# If it is an ack message, then we don't need to put it on the
# inbound_queue.
msg_id = msg.msg_id
# If this message is an ack, then update the tracker.
if msg.msg_type == message.Message.MSG_ACK:
MSG_ACKED = message.MessageTracker.MSG_ACKED
acked_msg_id = msg.msg_payload
tracker = self.trackers[acked_msg_id]
tracker.set_state(MSG_ACKED)
# If the tracker is not being used, delete it.
if not tracker.isinuse:
self.delete_tracker(tracker)
return
self.inbound_queue.append((address, msg))
self.inbound_queue_sem.release()
# Send an ack now that we have received the msg.
self.send_ack(msg, address)
del fragments_map[msg_id]
return
def receiver(self):
'''Receive messages on the receiver socket. Forever.
'''
poller = select.epoll()
poller.register(self.socket.fileno(),
select.EPOLLIN | select.EPOLLET) # Edge-triggered.
self.logger.log("Receiver up!")
while True:
poll_responses = poller.poll()
for fileno, event in poll_responses:
if not event & select.EPOLLIN:
self.logger.log(
"Unexpected event on receiver socket.")
continue
data, address = self.socket.recvfrom(message.Message.MSG_SIZE)
self.logger.log("Received message from %s:%d" % address)
self.handle_received_msg(data, address)
class ZMQMessenger(Messenger):
# Constants
DEFAULT_PORT = 33310
NUM_TRIES = 3
# Messenger types
TYPE_SERVER = 0 # Listener socket. Accepts connections.
TYPE_CLIENT = 1 # Client socket. Connects to server.
VALID_TYPES = [TYPE_SERVER, TYPE_CLIENT]
def __init__(self, type, ip=None, port=DEFAULT_PORT):
'''
:param type: The type of Messenger. Can be SERVER or CLIENT messenger.
:param ip: The ip of the interface the socket should use.
:param port: The port the socket should use.
'''
super().__init__()
self.type = type
self.ip = ip
self.port = port
self.context = zmq.Context()
return
def start(self):
if self.ip:
public_ip = ip
else:
public_ip = self.get_public_ip()
identity = 'tcp://%s:%d' % (public_ip, self.port)
bind_addr = 'tcp://*:%d' % self.port
self.socket = self.context.socket(zmq.ROUTER)
self.socket.setsockopt(zmq.IDENTITY, bytes(identity, 'UTF-8'))
if self.type == self.TYPE_SERVER:
self.socket.bind(bind_addr)
return
def connect(self, address):
'''Connect to address and PING NUM_TRIES times till PONG received.
Raises ConnectionError if failed to connect after NUM_TRIES tries. None
otherwise.
'''
self.socket.connect('tcp://%s:%d' % address)
for _ in range(self.NUM_TRIES):
self.ping(address)
try:
msg_address, msg = next(self.receive(block=False, timeout=0.2))
if msg_address == address and msg == 'PONG':
return
except:
pass
else:
raise ConnectionError("Failed to connect.")
def ping(self, address):
self.send(json.dumps('PING'), address)
return
def pong(self, address):
self.send(json.dumps('PONG'), address)
return
def receive(self, deserialize=False, block=True, timeout=0):
while True:
flags = 0 if block else zmq.NOBLOCK
if timeout > 0.0:
if self.socket.poll(timeout=timeout*1000) == 0:
raise TimeoutError()
address = self.socket.recv_string(flags=flags)
assert self.socket.recv() == b"" # Empty delimiter
msg = self.socket.recv_json()
# FIXME(mtahmed): This would probably fail for IPV6.
address = address.split(':')[1:]
address[0] = address[0][2:]
address[1] = int(address[1])
address = tuple(address)
# FIXME(mtahmed): The PING-PONG should be taken care of in Messenger.
if not deserialize:
yield (address, msg)
continue
# FIXME
msg_type = msg.msg_type
decoded_msg = msg.msg_payload.decode('UTF-8')
if msg_type == message.Message.MSG_STATUS:
yield (address, int(decoded_msg))
elif msg_type == message.Message.MSG_TASKUNIT:
yield (address, taskunit.TaskUnit.deserialize(decoded_msg))
elif msg_type == message.Message.MSG_TASKUNIT_RESULT:
yield (address, taskunit.TaskUnit.deserialize(decoded_msg))
elif msg_type == message.Message.MSG_JOB:
yield (address, job.Job.deserialize(decoded_msg))
def send(self, msg, address):
address = 'tcp://%s:%d' % address
self.socket.send_string(address, zmq.SNDMORE)
self.socket.send_string("", zmq.SNDMORE)
self.socket.send_string(msg)
return
def send_job(self, job, address):
'''Send a job to a remote node.
'''
serialized_job = job.serialize(json_encode=True)
self.send(serialized_job, address)
return
def send_taskunit(self, tu, address,
attrs=['id', 'job_id', 'data', 'retries', 'state',
'result']):
'''Send a taskunit to a remote node.
'''
serialized_taskunit = tu.serialize(include_attrs=attrs,
json_encode=True)
self.send(serialized_taskunit, address)
return
def send_taskunit_result(self, tu, address,
attrs=['id', 'job_id', 'state', 'result']):
'''Send the result of running taskunit.
'''
serialized_result = tu.serialize(include_attrs=attrs, json_encode=True)
self.send(serialized_result, address)
return
@staticmethod
def get_public_ip():
'''Get the ip address of the external interface.
This tries to connect to some public service to try to see what
interface the socket binds to and uses that interface's address.
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
google_addr = socket.gethostbyname('www.google.com')
s.connect((google_addr, 80))
addr = s.getsockname()[0]
s.close()
return addr
|
[
"job.Job.deserialize",
"threading.Thread",
"message.Message.glue_fragments",
"taskunit.TaskUnit.deserialize",
"zmq.Context",
"socket.socket",
"message.Message.packed_fragments",
"json.dumps",
"socket.gethostbyname",
"select.epoll",
"message.Message",
"job.serialize",
"threading.Semaphore",
"collections.deque",
"message.MessageTracker"
] |
[((590, 609), 'collections.deque', 'collections.deque', ([], {}), '()\n', (607, 609), False, 'import collections\n'), ((640, 659), 'collections.deque', 'collections.deque', ([], {}), '()\n', (657, 659), False, 'import collections\n'), ((694, 722), 'threading.Semaphore', 'threading.Semaphore', ([], {'value': '(0)'}), '(value=0)\n', (713, 722), False, 'import threading\n'), ((756, 784), 'threading.Semaphore', 'threading.Semaphore', ([], {'value': '(0)'}), '(value=0)\n', (775, 784), False, 'import threading\n'), ((4157, 4205), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (4170, 4205), False, 'import socket\n'), ((4420, 4482), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.receiver', 'name': '"""receiver_thread"""'}), "(target=self.receiver, name='receiver_thread')\n", (4436, 4482), False, 'import threading\n'), ((4550, 4608), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.sender', 'name': '"""sender_thread"""'}), "(target=self.sender, name='sender_thread')\n", (4566, 4608), False, 'import threading\n'), ((5104, 5196), 'message.Message.packed_fragments', 'message.Message.packed_fragments', (['message.Message.MSG_STATUS', 'serialized_status', 'address'], {}), '(message.Message.MSG_STATUS,\n serialized_status, address)\n', (5136, 5196), False, 'import message\n'), ((5248, 5293), 'message.MessageTracker', 'message.MessageTracker', (['msg_id'], {'isinuse': 'track'}), '(msg_id, isinuse=track)\n', (5270, 5293), False, 'import message\n'), ((5751, 5825), 'message.Message.packed_fragments', 'message.Message.packed_fragments', (['message.Message.MSG_ACK', 'msg_id', 'address'], {}), '(message.Message.MSG_ACK, msg_id, address)\n', (5783, 5825), False, 'import message\n'), ((5881, 5926), 'message.MessageTracker', 'message.MessageTracker', (['msg_id'], {'isinuse': 'track'}), '(msg_id, isinuse=track)\n', (5903, 5926), False, 'import message\n'), ((6345, 6376), 'job.serialize', 'job.serialize', ([], {'json_encode': '(True)'}), '(json_encode=True)\n', (6358, 6376), False, 'import job\n'), ((6404, 6490), 'message.Message.packed_fragments', 'message.Message.packed_fragments', (['message.Message.MSG_JOB', 'serialized_job', 'address'], {}), '(message.Message.MSG_JOB, serialized_job,\n address)\n', (6436, 6490), False, 'import message\n'), ((6542, 6587), 'message.MessageTracker', 'message.MessageTracker', (['msg_id'], {'isinuse': 'track'}), '(msg_id, isinuse=track)\n', (6564, 6587), False, 'import message\n'), ((7255, 7351), 'message.Message.packed_fragments', 'message.Message.packed_fragments', (['message.Message.MSG_TASKUNIT', 'serialized_taskunit', 'address'], {}), '(message.Message.MSG_TASKUNIT,\n serialized_taskunit, address)\n', (7287, 7351), False, 'import message\n'), ((7403, 7448), 'message.MessageTracker', 'message.MessageTracker', (['msg_id'], {'isinuse': 'track'}), '(msg_id, isinuse=track)\n', (7425, 7448), False, 'import message\n'), ((7895, 7996), 'message.Message.packed_fragments', 'message.Message.packed_fragments', (['message.Message.MSG_TASKUNIT_RESULT', 'serialized_result', 'address'], {}), '(message.Message.MSG_TASKUNIT_RESULT,\n serialized_result, address)\n', (7927, 7996), False, 'import message\n'), ((8048, 8093), 'message.MessageTracker', 'message.MessageTracker', (['msg_id'], {'isinuse': 'track'}), '(msg_id, isinuse=track)\n', (8070, 8093), False, 'import message\n'), ((8346, 8360), 'select.epoll', 'select.epoll', ([], {}), '()\n', (8358, 8360), False, 'import select\n'), ((10180, 10211), 'message.Message', 'message.Message', ([], {'packed_msg': 'msg'}), '(packed_msg=msg)\n', (10195, 10211), False, 'import message\n'), ((12070, 12084), 'select.epoll', 'select.epoll', ([], {}), '()\n', (12082, 12084), False, 'import select\n'), ((13429, 13442), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (13440, 13442), False, 'import zmq\n'), ((16548, 16579), 'job.serialize', 'job.serialize', ([], {'json_encode': '(True)'}), '(json_encode=True)\n', (16561, 16579), False, 'import job\n'), ((17636, 17684), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (17649, 17684), False, 'import socket\n'), ((17707, 17745), 'socket.gethostbyname', 'socket.gethostbyname', (['"""www.google.com"""'], {}), "('www.google.com')\n", (17727, 17745), False, 'import socket\n'), ((14603, 14621), 'json.dumps', 'json.dumps', (['"""PING"""'], {}), "('PING')\n", (14613, 14621), False, 'import json\n'), ((14696, 14714), 'json.dumps', 'json.dumps', (['"""PONG"""'], {}), "('PONG')\n", (14706, 14714), False, 'import json\n'), ((10927, 10984), 'message.Message.glue_fragments', 'message.Message.glue_fragments', (['fragments_map[msg.msg_id]'], {}), '(fragments_map[msg.msg_id])\n', (10957, 10984), False, 'import message\n'), ((2506, 2548), 'taskunit.TaskUnit.deserialize', 'taskunit.TaskUnit.deserialize', (['decoded_msg'], {}), '(decoded_msg)\n', (2535, 2548), False, 'import taskunit\n'), ((2637, 2679), 'taskunit.TaskUnit.deserialize', 'taskunit.TaskUnit.deserialize', (['decoded_msg'], {}), '(decoded_msg)\n', (2666, 2679), False, 'import taskunit\n'), ((9495, 9526), 'message.Message', 'message.Message', ([], {'packed_msg': 'msg'}), '(packed_msg=msg)\n', (9510, 9526), False, 'import message\n'), ((15893, 15935), 'taskunit.TaskUnit.deserialize', 'taskunit.TaskUnit.deserialize', (['decoded_msg'], {}), '(decoded_msg)\n', (15922, 15935), False, 'import taskunit\n'), ((2756, 2788), 'job.Job.deserialize', 'job.Job.deserialize', (['decoded_msg'], {}), '(decoded_msg)\n', (2775, 2788), False, 'import job\n'), ((16035, 16077), 'taskunit.TaskUnit.deserialize', 'taskunit.TaskUnit.deserialize', (['decoded_msg'], {}), '(decoded_msg)\n', (16064, 16077), False, 'import taskunit\n'), ((16165, 16197), 'job.Job.deserialize', 'job.Job.deserialize', (['decoded_msg'], {}), '(decoded_msg)\n', (16184, 16197), False, 'import job\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: fds/protobuf/stach/v2/table/TableData.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from fds.protobuf.stach.v2.table import ColumnData_pb2 as fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_ColumnData__pb2
from fds.protobuf.stach.v2.table import MetadataCollection_pb2 as fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_MetadataCollection__pb2
from fds.protobuf.stach.v2.table import RowDefinition_pb2 as fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_RowDefinition__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='fds/protobuf/stach/v2/table/TableData.proto',
package='factset.protobuf.stach.v2.table',
syntax='proto3',
serialized_options=b'\n#com.factset.protobuf.stach.v2.tableB\016TableDataProtoZBgithub.com/factset/stachschema-sdks/go/fds/protobuf/stach/v2/table\252\002\037FactSet.Protobuf.Stach.V2.Table',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n+fds/protobuf/stach/v2/table/TableData.proto\x12\x1f\x66\x61\x63tset.protobuf.stach.v2.table\x1a,fds/protobuf/stach/v2/table/ColumnData.proto\x1a\x34\x66\x64s/protobuf/stach/v2/table/MetadataCollection.proto\x1a/fds/protobuf/stach/v2/table/RowDefinition.proto\"\xb7\x02\n\tTableData\x12<\n\x04rows\x18\x01 \x03(\x0b\x32..factset.protobuf.stach.v2.table.RowDefinition\x12H\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x37.factset.protobuf.stach.v2.table.TableData.ColumnsEntry\x12\x45\n\x08metadata\x18\x03 \x01(\x0b\x32\x33.factset.protobuf.stach.v2.table.MetadataCollection\x1a[\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12:\n\x05value\x18\x02 \x01(\x0b\x32+.factset.protobuf.stach.v2.table.ColumnData:\x02\x38\x01\x42\x9b\x01\n#com.factset.protobuf.stach.v2.tableB\x0eTableDataProtoZBgithub.com/factset/stachschema-sdks/go/fds/protobuf/stach/v2/table\xaa\x02\x1f\x46\x61\x63tSet.Protobuf.Stach.V2.Tableb\x06proto3'
,
dependencies=[fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_ColumnData__pb2.DESCRIPTOR,fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_MetadataCollection__pb2.DESCRIPTOR,fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_RowDefinition__pb2.DESCRIPTOR,])
_TABLEDATA_COLUMNSENTRY = _descriptor.Descriptor(
name='ColumnsEntry',
full_name='factset.protobuf.stach.v2.table.TableData.ColumnsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='factset.protobuf.stach.v2.table.TableData.ColumnsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='factset.protobuf.stach.v2.table.TableData.ColumnsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=450,
serialized_end=541,
)
_TABLEDATA = _descriptor.Descriptor(
name='TableData',
full_name='factset.protobuf.stach.v2.table.TableData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='rows', full_name='factset.protobuf.stach.v2.table.TableData.rows', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='columns', full_name='factset.protobuf.stach.v2.table.TableData.columns', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='factset.protobuf.stach.v2.table.TableData.metadata', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_TABLEDATA_COLUMNSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=230,
serialized_end=541,
)
_TABLEDATA_COLUMNSENTRY.fields_by_name['value'].message_type = fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_ColumnData__pb2._COLUMNDATA
_TABLEDATA_COLUMNSENTRY.containing_type = _TABLEDATA
_TABLEDATA.fields_by_name['rows'].message_type = fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_RowDefinition__pb2._ROWDEFINITION
_TABLEDATA.fields_by_name['columns'].message_type = _TABLEDATA_COLUMNSENTRY
_TABLEDATA.fields_by_name['metadata'].message_type = fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_MetadataCollection__pb2._METADATACOLLECTION
DESCRIPTOR.message_types_by_name['TableData'] = _TABLEDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TableData = _reflection.GeneratedProtocolMessageType('TableData', (_message.Message,), {
'ColumnsEntry' : _reflection.GeneratedProtocolMessageType('ColumnsEntry', (_message.Message,), {
'DESCRIPTOR' : _TABLEDATA_COLUMNSENTRY,
'__module__' : 'fds.protobuf.stach.v2.table.TableData_pb2'
# @@protoc_insertion_point(class_scope:factset.protobuf.stach.v2.table.TableData.ColumnsEntry)
})
,
'DESCRIPTOR' : _TABLEDATA,
'__module__' : 'fds.protobuf.stach.v2.table.TableData_pb2'
# @@protoc_insertion_point(class_scope:factset.protobuf.stach.v2.table.TableData)
})
_sym_db.RegisterMessage(TableData)
_sym_db.RegisterMessage(TableData.ColumnsEntry)
DESCRIPTOR._options = None
_TABLEDATA_COLUMNSENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.reflection.GeneratedProtocolMessageType",
"google.protobuf.descriptor.FileDescriptor"
] |
[((405, 431), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (429, 431), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((837, 2405), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', ([], {'name': '"""fds/protobuf/stach/v2/table/TableData.proto"""', 'package': '"""factset.protobuf.stach.v2.table"""', 'syntax': '"""proto3"""', 'serialized_options': "b'\\n#com.factset.protobuf.stach.v2.tableB\\x0eTableDataProtoZBgithub.com/factset/stachschema-sdks/go/fds/protobuf/stach/v2/table\\xaa\\x02\\x1fFactSet.Protobuf.Stach.V2.Table'", 'create_key': '_descriptor._internal_create_key', 'serialized_pb': 'b\'\\n+fds/protobuf/stach/v2/table/TableData.proto\\x12\\x1ffactset.protobuf.stach.v2.table\\x1a,fds/protobuf/stach/v2/table/ColumnData.proto\\x1a4fds/protobuf/stach/v2/table/MetadataCollection.proto\\x1a/fds/protobuf/stach/v2/table/RowDefinition.proto"\\xb7\\x02\\n\\tTableData\\x12<\\n\\x04rows\\x18\\x01 \\x03(\\x0b2..factset.protobuf.stach.v2.table.RowDefinition\\x12H\\n\\x07columns\\x18\\x02 \\x03(\\x0b27.factset.protobuf.stach.v2.table.TableData.ColumnsEntry\\x12E\\n\\x08metadata\\x18\\x03 \\x01(\\x0b23.factset.protobuf.stach.v2.table.MetadataCollection\\x1a[\\n\\x0cColumnsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12:\\n\\x05value\\x18\\x02 \\x01(\\x0b2+.factset.protobuf.stach.v2.table.ColumnData:\\x028\\x01B\\x9b\\x01\\n#com.factset.protobuf.stach.v2.tableB\\x0eTableDataProtoZBgithub.com/factset/stachschema-sdks/go/fds/protobuf/stach/v2/table\\xaa\\x02\\x1fFactSet.Protobuf.Stach.V2.Tableb\\x06proto3\'', 'dependencies': '[fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_ColumnData__pb2.DESCRIPTOR,\n fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_MetadataCollection__pb2\n .DESCRIPTOR,\n fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_RowDefinition__pb2.\n DESCRIPTOR]'}), '(name=\n \'fds/protobuf/stach/v2/table/TableData.proto\', package=\n \'factset.protobuf.stach.v2.table\', syntax=\'proto3\', serialized_options=\n b\'\\n#com.factset.protobuf.stach.v2.tableB\\x0eTableDataProtoZBgithub.com/factset/stachschema-sdks/go/fds/protobuf/stach/v2/table\\xaa\\x02\\x1fFactSet.Protobuf.Stach.V2.Table\'\n , create_key=_descriptor._internal_create_key, serialized_pb=\n b\'\\n+fds/protobuf/stach/v2/table/TableData.proto\\x12\\x1ffactset.protobuf.stach.v2.table\\x1a,fds/protobuf/stach/v2/table/ColumnData.proto\\x1a4fds/protobuf/stach/v2/table/MetadataCollection.proto\\x1a/fds/protobuf/stach/v2/table/RowDefinition.proto"\\xb7\\x02\\n\\tTableData\\x12<\\n\\x04rows\\x18\\x01 \\x03(\\x0b2..factset.protobuf.stach.v2.table.RowDefinition\\x12H\\n\\x07columns\\x18\\x02 \\x03(\\x0b27.factset.protobuf.stach.v2.table.TableData.ColumnsEntry\\x12E\\n\\x08metadata\\x18\\x03 \\x01(\\x0b23.factset.protobuf.stach.v2.table.MetadataCollection\\x1a[\\n\\x0cColumnsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12:\\n\\x05value\\x18\\x02 \\x01(\\x0b2+.factset.protobuf.stach.v2.table.ColumnData:\\x028\\x01B\\x9b\\x01\\n#com.factset.protobuf.stach.v2.tableB\\x0eTableDataProtoZBgithub.com/factset/stachschema-sdks/go/fds/protobuf/stach/v2/table\\xaa\\x02\\x1fFactSet.Protobuf.Stach.V2.Tableb\\x06proto3\'\n , dependencies=[\n fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_ColumnData__pb2.\n DESCRIPTOR,\n fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_MetadataCollection__pb2\n .DESCRIPTOR,\n fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_RowDefinition__pb2.\n DESCRIPTOR])\n', (863, 2405), True, 'from google.protobuf import descriptor as _descriptor\n'), ((6324, 6510), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""ColumnsEntry"""', '(_message.Message,)', "{'DESCRIPTOR': _TABLEDATA_COLUMNSENTRY, '__module__':\n 'fds.protobuf.stach.v2.table.TableData_pb2'}"], {}), "('ColumnsEntry', (_message.Message,\n ), {'DESCRIPTOR': _TABLEDATA_COLUMNSENTRY, '__module__':\n 'fds.protobuf.stach.v2.table.TableData_pb2'})\n", (6364, 6510), True, 'from google.protobuf import reflection as _reflection\n'), ((3144, 3562), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""value"""', 'full_name': '"""factset.protobuf.stach.v2.table.TableData.ColumnsEntry.value"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='value', full_name=\n 'factset.protobuf.stach.v2.table.TableData.ColumnsEntry.value', index=1,\n number=2, type=11, cpp_type=10, label=1, has_default_value=False,\n default_value=None, message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, serialized_options=None,\n file=DESCRIPTOR, create_key=_descriptor._internal_create_key)\n", (3171, 3562), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4041, 4443), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""rows"""', 'full_name': '"""factset.protobuf.stach.v2.table.TableData.rows"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='rows', full_name=\n 'factset.protobuf.stach.v2.table.TableData.rows', index=0, number=1,\n type=11, cpp_type=10, label=3, has_default_value=False, default_value=[\n ], message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR, create_key=_descriptor._internal_create_key)\n", (4068, 4443), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4464, 4872), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""columns"""', 'full_name': '"""factset.protobuf.stach.v2.table.TableData.columns"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='columns', full_name=\n 'factset.protobuf.stach.v2.table.TableData.columns', index=1, number=2,\n type=11, cpp_type=10, label=3, has_default_value=False, default_value=[\n ], message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR, create_key=_descriptor._internal_create_key)\n", (4491, 4872), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4893, 5305), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""metadata"""', 'full_name': '"""factset.protobuf.stach.v2.table.TableData.metadata"""', 'index': '(2)', 'number': '(3)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='metadata', full_name=\n 'factset.protobuf.stach.v2.table.TableData.metadata', index=2, number=3,\n type=11, cpp_type=10, label=1, has_default_value=False, default_value=\n None, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR, create_key=_descriptor._internal_create_key)\n", (4920, 5305), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import math
def is_prime(n):
if n <= 1:
return False
elif n == 2:
return True
elif n % 2 == 0:
return False
for divisor in range(3, math.ceil(math.sqrt(n)) + 1, 2):
if n % divisor == 0:
return False
return True
def find_n_primes(n):
primes = [2]
test = 2
while len(primes) < n:
test += 1
for d in primes:
if test % d == 0:
break
elif d > math.sqrt(test):
primes.append(test)
break
return primes
def find_primes_less_than_n(n):
primes = [2]
test = 2
while test < n:
test += 1
for d in primes:
if test % d == 0:
break
elif d > math.sqrt(test):
primes.append(test)
break
return primes
def generate_primes():
yield 2
primes = [2]
test = 2
while True:
test += 1
for d in primes:
if test % d == 0:
break
elif d > math.sqrt(test):
primes.append(test)
yield test
break
def sieve_of_eratosthenes(n):
primes = []
not_primes = []
for test in range(2, n):
if test not in not_primes:
primes.append(test)
i = test
while i < n:
i += test
if test not in not_primes:
not_primes.append(i)
elif test in primes:
continue
return primes
def prime_factorization(n):
factors = []
divisor = 2
n_sqrt = math.sqrt(n)
while True:
if n % divisor == 0:
factors.append(divisor)
n = n / divisor
elif n == 1:
break
elif divisor > n_sqrt:
factors.append(int(n))
break
else:
divisor += 1
return factors
def relatively_prime(n):
relatives = set()
relatives.add(1)
for test in range(2, n):
if len(set(prime_factorization(n)).intersection(set(prime_factorization(test)))) == 0:
relatives.add(test)
return relatives
|
[
"math.sqrt"
] |
[((1625, 1637), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (1634, 1637), False, 'import math\n'), ((184, 196), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (193, 196), False, 'import math\n'), ((474, 489), 'math.sqrt', 'math.sqrt', (['test'], {}), '(test)\n', (483, 489), False, 'import math\n'), ((767, 782), 'math.sqrt', 'math.sqrt', (['test'], {}), '(test)\n', (776, 782), False, 'import math\n'), ((1059, 1074), 'math.sqrt', 'math.sqrt', (['test'], {}), '(test)\n', (1068, 1074), False, 'import math\n')]
|
from tkinter import *
import tkinter as tk
import os
import inspect
import configparser
#Create a window with a title
window = tk.Tk()
window.geometry("650x670")
window.title("Manager")
#Gets the system path for the manager file
filePath = os.path.abspath(inspect.getfile(inspect.currentframe()))
extenstion = filePath[-11]
#gets the system path for the plugin folder
pluginPath = filePath[0:filePath.index("manager.py")] + "plugins"
#Switches to the config file dir
configPath = filePath[0:filePath.index("manager.py")] + "settings" + extenstion + "PLUGINS.conf"
def saveFile():
s = tt.get(1.0,END)
f = open(configPath, 'wt')
f.write(s)
f.close()
def getPlugins(dirName):
listOfFile = os.listdir(dirName)
return listOfFile
def clicked():
selected = [listbox.get(pos) for pos in listbox.curselection()]
for file in selected:
tt.insert(END, file + "\n")
pluginList = getPlugins(pluginPath);
MainLabel = Label(window, text="Select the plugins you wish to load and add them to the config file")
MainLabel.grid(row=0, column=0)
saveBtn = Button(window, text="Save File", width=10, command=saveFile)
saveBtn.grid(row=6, column=0)
addBtn = Button(window, text="Add Plugin", width=10, command=clicked)
addBtn.grid(row=2, column=0)
label2 = Label(window, text="Editable PLUGINS.conf file:")
label2.grid(row=3,column=0)
label3 = Label(window, text="Don't forget to hit save!")
label3.grid(row=5,column=0)
tt = Text(window, width= 80)
tt.grid(row=4,column=0)
tt.insert(END, open(configPath).read())
listbox = Listbox(window, width=60)
listbox.grid(row=1, column=0)
for name in pluginList:
if(name[-2:] == "py"):
listbox.insert(END, name)
window.mainloop()
|
[
"os.listdir",
"tkinter.Tk",
"inspect.currentframe"
] |
[((128, 135), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (133, 135), True, 'import tkinter as tk\n'), ((696, 715), 'os.listdir', 'os.listdir', (['dirName'], {}), '(dirName)\n', (706, 715), False, 'import os\n'), ((275, 297), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (295, 297), False, 'import inspect\n')]
|
#!/usr/bin/python3
import typing
import pytest
import ecological
def test_regular_types(monkeypatch):
monkeypatch.setenv("INTEGER", "42")
monkeypatch.setenv("BOOLEAN", "False")
monkeypatch.setenv("ANY_STR", "AnyStr Example")
monkeypatch.setenv("TEXT", "Text Example")
monkeypatch.setenv("DICT", "{'key': 'value'}")
monkeypatch.setenv("LIST", "[1, 2, 3]")
class Configuration(ecological.AutoConfig):
integer: int
boolean: bool
any_str: typing.AnyStr
default: str = "Default Value"
text: typing.Text
dict: typing.Dict[str, str]
list: typing.List[int]
assert Configuration.integer == 42
assert Configuration.boolean is False
assert Configuration.any_str == "AnyStr Example"
assert Configuration.default == "Default Value"
assert Configuration.text == "Text Example"
assert Configuration.dict == {'key': 'value'}
assert Configuration.list == [1, 2, 3]
def test_nested(monkeypatch):
monkeypatch.setenv("INTEGER", "42")
monkeypatch.setenv("NESTED_BOOLEAN", "False")
class Configuration(ecological.AutoConfig):
integer: int
class Nested(ecological.AutoConfig, prefix='nested'):
boolean: bool
assert Configuration.integer == 42
assert Configuration.Nested.boolean is False
def test_explicit_variable(monkeypatch):
monkeypatch.setenv("TEST_Integer", "42")
class Configuration(ecological.AutoConfig, prefix="this_is_going_to_be_ignored"):
var1a = ecological.Variable("TEST_Integer", transform=lambda v, wt: int(v))
var1b: str = ecological.Variable("TEST_Integer", transform=lambda v, wt: v * 2)
var2: bool = ecological.Variable("404", default=False)
assert Configuration.var1a == 42
assert Configuration.var1b == "4242"
assert Configuration.var2 is False
def test_prefix(monkeypatch):
monkeypatch.setenv("PREFIX_INTEGER", "42")
monkeypatch.setenv("PREFIX_BOOLEAN", "False")
monkeypatch.setenv("PREFIX_NOT_DEFAULT", "Not Default")
class Configuration(ecological.AutoConfig, prefix="prefix"):
integer: int
boolean: bool
default: str = "Default"
not_default: typing.AnyStr
assert Configuration.integer == 42
assert Configuration.boolean is False
assert Configuration.default == "Default"
assert Configuration.not_default == "Not Default"
def test_invalid_value_regular_type(monkeypatch):
monkeypatch.setenv("PARAM_REGULAR_TYPE", "not an integer")
with pytest.raises(ValueError):
class Configuration(ecological.AutoConfig):
param_regular_type: int
def test_invalid_value_parsed_type(monkeypatch):
monkeypatch.setenv("PARAM_PARSED_TYPE", "not a list")
with pytest.raises(ValueError):
class Configuration(ecological.AutoConfig):
param_parsed_type: list = ['param_1', 'param_2']
def test_no_default():
with pytest.raises(AttributeError):
class Configuration(ecological.AutoConfig):
no_default: int
bool_var: bool = False
|
[
"pytest.raises",
"ecological.Variable"
] |
[((1617, 1683), 'ecological.Variable', 'ecological.Variable', (['"""TEST_Integer"""'], {'transform': '(lambda v, wt: v * 2)'}), "('TEST_Integer', transform=lambda v, wt: v * 2)\n", (1636, 1683), False, 'import ecological\n'), ((1705, 1746), 'ecological.Variable', 'ecological.Variable', (['"""404"""'], {'default': '(False)'}), "('404', default=False)\n", (1724, 1746), False, 'import ecological\n'), ((2538, 2563), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2551, 2563), False, 'import pytest\n'), ((2772, 2797), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2785, 2797), False, 'import pytest\n'), ((2946, 2975), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2959, 2975), False, 'import pytest\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetOverrideResult',
'AwaitableGetOverrideResult',
'get_override',
'get_override_output',
]
@pulumi.output_type
class GetOverrideResult:
def __init__(__self__, api_proxy=None, name=None, sampling_config=None):
if api_proxy and not isinstance(api_proxy, str):
raise TypeError("Expected argument 'api_proxy' to be a str")
pulumi.set(__self__, "api_proxy", api_proxy)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sampling_config and not isinstance(sampling_config, dict):
raise TypeError("Expected argument 'sampling_config' to be a dict")
pulumi.set(__self__, "sampling_config", sampling_config)
@property
@pulumi.getter(name="apiProxy")
def api_proxy(self) -> str:
"""
ID of the API proxy that will have its trace configuration overridden.
"""
return pulumi.get(self, "api_proxy")
@property
@pulumi.getter
def name(self) -> str:
"""
ID of the trace configuration override specified as a system-generated UUID.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="samplingConfig")
def sampling_config(self) -> 'outputs.GoogleCloudApigeeV1TraceSamplingConfigResponse':
"""
Trace configuration to override.
"""
return pulumi.get(self, "sampling_config")
class AwaitableGetOverrideResult(GetOverrideResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOverrideResult(
api_proxy=self.api_proxy,
name=self.name,
sampling_config=self.sampling_config)
def get_override(environment_id: Optional[str] = None,
organization_id: Optional[str] = None,
override_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOverrideResult:
"""
Gets a trace configuration override.
"""
__args__ = dict()
__args__['environmentId'] = environment_id
__args__['organizationId'] = organization_id
__args__['overrideId'] = override_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:apigee/v1:getOverride', __args__, opts=opts, typ=GetOverrideResult).value
return AwaitableGetOverrideResult(
api_proxy=__ret__.api_proxy,
name=__ret__.name,
sampling_config=__ret__.sampling_config)
@_utilities.lift_output_func(get_override)
def get_override_output(environment_id: Optional[pulumi.Input[str]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
override_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOverrideResult]:
"""
Gets a trace configuration override.
"""
...
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
] |
[((1159, 1189), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""apiProxy"""'}), "(name='apiProxy')\n", (1172, 1189), False, 'import pulumi\n'), ((1600, 1636), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""samplingConfig"""'}), "(name='samplingConfig')\n", (1613, 1636), False, 'import pulumi\n'), ((721, 765), 'pulumi.set', 'pulumi.set', (['__self__', '"""api_proxy"""', 'api_proxy'], {}), "(__self__, 'api_proxy', api_proxy)\n", (731, 765), False, 'import pulumi\n'), ((889, 923), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (899, 923), False, 'import pulumi\n'), ((1082, 1138), 'pulumi.set', 'pulumi.set', (['__self__', '"""sampling_config"""', 'sampling_config'], {}), "(__self__, 'sampling_config', sampling_config)\n", (1092, 1138), False, 'import pulumi\n'), ((1340, 1369), 'pulumi.get', 'pulumi.get', (['self', '"""api_proxy"""'], {}), "(self, 'api_proxy')\n", (1350, 1369), False, 'import pulumi\n'), ((1555, 1579), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (1565, 1579), False, 'import pulumi\n'), ((1808, 1843), 'pulumi.get', 'pulumi.get', (['self', '"""sampling_config"""'], {}), "(self, 'sampling_config')\n", (1818, 1843), False, 'import pulumi\n'), ((2667, 2689), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (2687, 2689), False, 'import pulumi\n'), ((2781, 2890), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""google-native:apigee/v1:getOverride"""', '__args__'], {'opts': 'opts', 'typ': 'GetOverrideResult'}), "('google-native:apigee/v1:getOverride', __args__, opts\n =opts, typ=GetOverrideResult)\n", (2802, 2890), False, 'import pulumi\n')]
|
#
# Script for transferring Dynatrace timeseries into AWS CloudWatch.
#
import requests, datetime, time, sched, subprocess, shlex
# Enter your own environment id and API key token here
YOUR_ENV_ID = 'ENTER_YOUR_ENV_ID_HERE';
YOUR_API_TOKEN = 'ENTER_YOUR_API_TOKEN_HERE';
# Configure a list of monitored components you would like to transfer timeseries for.
# Please mind that the component has to support the requested tye of timeseries and
# that the timeseries also supports the requested aggregation type.
# Find details on metric types within our Dynatrace API help documentation here:
# https://help.dynatrace.com/api-documentation/v1/
CONFIG = [
{'timeseriesId':'com.dynatrace.builtin:appmethod.useractionsperminute', 'aggregation':'COUNT', 'entities':['APPLICATION_METHOD-13A2457ABF20CF35', 'APPLICATION_METHOD-322A1F8DD1984123']},
{'timeseriesId':'com.dynatrace.builtin:host.mem.used', 'aggregation':'AVG', 'entities':['HOST-F5D85B7DCDD8A93C']}
]
scheduler = sched.scheduler(time.time, time.sleep)
def export_metric(name):
scheduler.enter(360, 1, export_metric, ('first',))
for conf in CONFIG:
print('Pull timeseries ' + conf['timeseriesId']);
headers = {'Content-Type' : 'application/json', 'Authorization' : 'Api-Token ' + YOUR_API_TOKEN };
url = 'https://' + YOUR_ENV_ID + '.live.dynatrace.com/api/v1/timeseries/';
data = {
'relativeTime' : '5mins',
'timeseriesId' : conf['timeseriesId'],
'aggregationType' : conf['aggregation'],
'entities' : conf['entities']
};
r = requests.post(url, json=data, headers=headers);
if r.status_code == 200:
j = r.json();
for entity in conf['entities']:
for dp in j['result']['dataPoints'][entity]:
val = "";
print(datetime.datetime.utcfromtimestamp(int(dp[0]/1000)).isoformat());
if str(dp[1]) != 'None':
val = str(dp[1]);
cmd = 'aws cloudwatch put-metric-data --metric-name "' + j['result']['entities'][entity] + ' (' + conf['timeseriesId'] + ')" --namespace "Dynatrace" --value ' + val + ' --timestamp ' + datetime.datetime.utcfromtimestamp(int(dp[0]/1000)).isoformat();
subprocess.call(shlex.split(cmd));
elif r.status_code == 401:
print('Dynatrace authentication failed, please check your API token!');
elif r.status_code == 400:
print('Wrong timeseriesid, aggregation type or entity combination, please check Dynatrace API help for valid combinations!');
else:
print('Error ' + r);
scheduler.enter(1, 1, export_metric, ('first',))
scheduler.run()
|
[
"sched.scheduler",
"requests.post",
"shlex.split"
] |
[((978, 1016), 'sched.scheduler', 'sched.scheduler', (['time.time', 'time.sleep'], {}), '(time.time, time.sleep)\n', (993, 1016), False, 'import requests, datetime, time, sched, subprocess, shlex\n'), ((1516, 1562), 'requests.post', 'requests.post', (['url'], {'json': 'data', 'headers': 'headers'}), '(url, json=data, headers=headers)\n', (1529, 1562), False, 'import requests, datetime, time, sched, subprocess, shlex\n'), ((2117, 2133), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (2128, 2133), False, 'import requests, datetime, time, sched, subprocess, shlex\n')]
|
from logging import log
import torch
import argparse
import sys
import os
import tqdm
from copy import deepcopy
import torchvision
from torchvision import transforms
from torch import nn
from fedlab.core.client.manager import PassiveClientManager
from fedlab.core.client.trainer import SGDClientTrainer
from fedlab.core.client.serial_trainer import SubsetSerialTrainer
from fedlab.core.network import DistNetwork
from fedlab.utils import Logger, SerializationTool
from fedlab.utils.functional import load_dict
from fedlab.utils.dataset import SubsetSampler
from setting import get_model, get_dataloader
class SerialProxTrainer(SubsetSerialTrainer):
def __init__(self,
model,
dataset,
data_slices,
optimizer,
criterion,
logger=None,
cuda=False,
args=None) -> None:
super().__init__(model, dataset, data_slices, logger, cuda, args)
self.optimizer = optimizer
self.criterion = criterion
@property
def uplink_package(self):
return super().uplink_package
def _get_dataloader(self, client_id):
train_loader = torch.utils.data.DataLoader(
self.dataset,
sampler=SubsetSampler(indices=self.data_slices[client_id],
shuffle=True),
batch_size=self.args.batch_size)
return train_loader
def _train_alone(self, model_parameters, train_loader):
frz_model = deepcopy(self._model)
SerializationTool.deserialize_model(frz_model, model_parameters)
SerializationTool.deserialize_model(
self._model, model_parameters) # load parameters
self._LOGGER.info("Local train procedure is running")
for ep in range(self.args.epochs):
self._model.train()
for inputs, labels in train_loader:
if self.cuda:
inputs, labels = inputs.cuda(self.gpu), labels.cuda(
self.gpu)
outputs = self._model(inputs)
l1 = self.criterion(outputs, labels)
l2 = 0.0
for w0, w in zip(frz_model.parameters(),
self._model.parameters()):
l2 += torch.sum(torch.pow(w - w0, 2))
loss = l1 + 0.5 * self.args.mu * l2
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self._LOGGER.info("Local train procedure is finished")
return self.model_parameters
# return model_parameters - self.model_parameters
class ProxTrainer(SGDClientTrainer):
"""Refer to GitHub implementation https://github.com/WwZzz/easyFL """
def __init__(self,
model,
data_loader,
epochs,
optimizer,
criterion,
cuda=True,
logger=Logger(),
args=None):
super().__init__(model,
data_loader,
epochs,
optimizer,
criterion,
cuda=cuda,
logger=logger)
self.delta_w = None
self.args = args
@property
def uplink_package(self):
return self.model_parameters
def local_process(self, payload) -> None:
model_parameters = payload[0]
frz_model = deepcopy(self._model)
SerializationTool.deserialize_model(frz_model, model_parameters)
SerializationTool.deserialize_model(
self._model, model_parameters) # load parameters
self._LOGGER.info("Local train procedure is running")
for ep in range(self.epochs):
self._model.train()
for inputs, labels in self._data_loader:
if self.cuda:
inputs, labels = inputs.cuda(self.gpu), labels.cuda(
self.gpu)
outputs = self._model(inputs)
l1 = self.criterion(outputs, labels)
l2 = 0.0
for w0, w in zip(frz_model.parameters(),
self._model.parameters()):
l2 += torch.sum(torch.pow(w - w0, 2))
loss = l1 + 0.5 * self.args.mu * l2
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self._LOGGER.info("Local train procedure is finished")
#self.delta_w = model_parameters - self.model_parameters
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Distbelief training example")
parser.add_argument("--ip", type=str)
parser.add_argument("--port", type=str)
parser.add_argument("--world_size", type=int)
parser.add_argument("--rank", type=int)
parser.add_argument("--lr", type=float, default=0.1)
parser.add_argument("--epochs", type=int, default=5)
parser.add_argument("--dataset", type=str, default="mnist")
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--mu", type=float, default=0.1)
parser.add_argument("--scale", type=bool, default=False)
parser.add_argument("--gpu", type=str, default="0,1,2,3")
parser.add_argument("--ethernet", type=str, default=None)
args = parser.parse_args()
if args.gpu != "-1":
args.cuda = True
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
else:
args.cuda = False
model = get_model(args)
network = DistNetwork(
address=(args.ip, args.port),
world_size=args.world_size,
rank=args.rank,
ethernet=args.ethernet,
)
LOGGER = Logger(log_name="client " + str(args.rank))
if not args.scale:
trainloader, _ = get_dataloader(args)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()
trainer = ProxTrainer(model,
trainloader,
epochs=args.epochs,
optimizer=optimizer,
criterion=criterion,
cuda=args.cuda,
logger=LOGGER,
args=args)
else:
data_slices = load_dict("mnist_noniid_200_100.pkl")
#data_slices = load_dict("mnist_iid_100.pkl")
client_id_list = [
i for i in range((args.rank - 1) * 10, (args.rank - 1) * 10 + 10)
]
# get corresponding data partition indices
sub_data_indices = {
idx: data_slices[cid]
for idx, cid in enumerate(client_id_list)
}
root = '../datasets/mnist/'
trainset = torchvision.datasets.MNIST(root=root,
train=True,
download=True,
transform=transforms.ToTensor())
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()
trainer = SerialProxTrainer(model,
trainset,
data_slices=sub_data_indices,
optimizer=optimizer,
criterion=criterion,
cuda=args.cuda,
logger=LOGGER,
args=args)
manager_ = PassiveClientManager(trainer=trainer,
network=network,
logger=LOGGER)
manager_.run()
|
[
"fedlab.utils.Logger",
"copy.deepcopy",
"setting.get_dataloader",
"argparse.ArgumentParser",
"setting.get_model",
"torch.nn.CrossEntropyLoss",
"fedlab.utils.functional.load_dict",
"fedlab.utils.dataset.SubsetSampler",
"torch.pow",
"fedlab.core.client.manager.PassiveClientManager",
"fedlab.core.network.DistNetwork",
"fedlab.utils.SerializationTool.deserialize_model",
"torchvision.transforms.ToTensor"
] |
[((4697, 4763), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Distbelief training example"""'}), "(description='Distbelief training example')\n", (4720, 4763), False, 'import argparse\n'), ((5616, 5631), 'setting.get_model', 'get_model', (['args'], {}), '(args)\n', (5625, 5631), False, 'from setting import get_model, get_dataloader\n'), ((5647, 5761), 'fedlab.core.network.DistNetwork', 'DistNetwork', ([], {'address': '(args.ip, args.port)', 'world_size': 'args.world_size', 'rank': 'args.rank', 'ethernet': 'args.ethernet'}), '(address=(args.ip, args.port), world_size=args.world_size, rank=\n args.rank, ethernet=args.ethernet)\n', (5658, 5761), False, 'from fedlab.core.network import DistNetwork\n'), ((7651, 7720), 'fedlab.core.client.manager.PassiveClientManager', 'PassiveClientManager', ([], {'trainer': 'trainer', 'network': 'network', 'logger': 'LOGGER'}), '(trainer=trainer, network=network, logger=LOGGER)\n', (7671, 7720), False, 'from fedlab.core.client.manager import PassiveClientManager\n'), ((1531, 1552), 'copy.deepcopy', 'deepcopy', (['self._model'], {}), '(self._model)\n', (1539, 1552), False, 'from copy import deepcopy\n'), ((1561, 1625), 'fedlab.utils.SerializationTool.deserialize_model', 'SerializationTool.deserialize_model', (['frz_model', 'model_parameters'], {}), '(frz_model, model_parameters)\n', (1596, 1625), False, 'from fedlab.utils import Logger, SerializationTool\n'), ((1634, 1700), 'fedlab.utils.SerializationTool.deserialize_model', 'SerializationTool.deserialize_model', (['self._model', 'model_parameters'], {}), '(self._model, model_parameters)\n', (1669, 1700), False, 'from fedlab.utils import Logger, SerializationTool\n'), ((3004, 3012), 'fedlab.utils.Logger', 'Logger', ([], {}), '()\n', (3010, 3012), False, 'from fedlab.utils import Logger, SerializationTool\n'), ((3534, 3555), 'copy.deepcopy', 'deepcopy', (['self._model'], {}), '(self._model)\n', (3542, 3555), False, 'from copy import deepcopy\n'), ((3564, 3628), 'fedlab.utils.SerializationTool.deserialize_model', 'SerializationTool.deserialize_model', (['frz_model', 'model_parameters'], {}), '(frz_model, model_parameters)\n', (3599, 3628), False, 'from fedlab.utils import Logger, SerializationTool\n'), ((3637, 3703), 'fedlab.utils.SerializationTool.deserialize_model', 'SerializationTool.deserialize_model', (['self._model', 'model_parameters'], {}), '(self._model, model_parameters)\n', (3672, 3703), False, 'from fedlab.utils import Logger, SerializationTool\n'), ((5903, 5923), 'setting.get_dataloader', 'get_dataloader', (['args'], {}), '(args)\n', (5917, 5923), False, 'from setting import get_model, get_dataloader\n'), ((6012, 6033), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6031, 6033), False, 'from torch import nn\n'), ((6430, 6467), 'fedlab.utils.functional.load_dict', 'load_dict', (['"""mnist_noniid_200_100.pkl"""'], {}), "('mnist_noniid_200_100.pkl')\n", (6439, 6467), False, 'from fedlab.utils.functional import load_dict\n'), ((7194, 7215), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7213, 7215), False, 'from torch import nn\n'), ((1277, 1341), 'fedlab.utils.dataset.SubsetSampler', 'SubsetSampler', ([], {'indices': 'self.data_slices[client_id]', 'shuffle': '(True)'}), '(indices=self.data_slices[client_id], shuffle=True)\n', (1290, 1341), False, 'from fedlab.utils.dataset import SubsetSampler\n'), ((7083, 7104), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7102, 7104), False, 'from torchvision import transforms\n'), ((2334, 2354), 'torch.pow', 'torch.pow', (['(w - w0)', '(2)'], {}), '(w - w0, 2)\n', (2343, 2354), False, 'import torch\n'), ((4337, 4357), 'torch.pow', 'torch.pow', (['(w - w0)', '(2)'], {}), '(w - w0, 2)\n', (4346, 4357), False, 'import torch\n')]
|
import re
import numpy as np
#numerical operation
import matplotlib.pyplot as plt
#matploit provides functions that draws graphs or etc.
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
import array
import numpy as np
def findminmax(dirname, filename):
print('findminmax')
mf = open(dirname+filename,'r')
#TotalInstances = list()
strfreq = ''
intfreq = 0
m = 0
tmpcount=0
minlist = list()
maxlist = list()
firstlineflag = True
numberofinsatnces = 0
numoffeattype = 0
while True:
ch = mf.read(1)
if ch == '':
break
if ch == '(':
AnInstance = list()
strfreq = ''
elif ch == ')':
AnInstance.append(int(strfreq))
numberofinsatnces += 1
numoffeattype = len(AnInstance)
if firstlineflag == True:
for i in range(numoffeattype):
minlist.append(9999)
maxlist.append(-9999)
firstlineflag = False
for i in range(numoffeattype):
if minlist[i]>AnInstance[i]:
minlist[i]=AnInstance[i]
if maxlist[i]<AnInstance[i]:
maxlist[i]=AnInstance[i]
tmpcount+=1
strfreq = ''
elif ch == ',':
AnInstance.append(int(strfreq))
strfreq = ''
elif ch == ' ':
continue
else:
strfreq += ch
mf.close()
fminmax = open(dirname+"Noofinstance_minmax_"+filename,'w')
fminmax.write(str(numberofinsatnces))
fminmax.write(' ')
fminmax.write(str(numoffeattype))
fminmax.write('\n')
for minv in minlist:
fminmax.write(str(minv))
fminmax.write(' ')
fminmax.write('\n')
for maxv in maxlist:
fminmax.write(str(maxv))
fminmax.write(' ')
fminmax.close()
def convertToNormVals(dirname, filename):
print('convertToNormVals')
mf = open(dirname+filename,'r')
fminmax = open(dirname+"Noofinstance_minmax_"+filename,'r')
lines = fminmax.readlines()
minStrlist = lines[1].split()
maxStrlist = lines[2].split()
fminmax.close()
strfreq = ''
minlist = list()
for minstr in minStrlist:
minlist.append(float(minstr))
maxlist = list()
for maxstr in maxStrlist:
maxlist.append(float(maxstr))
fnorm = open(dirname+"Norm_"+filename,'w')
while True:
ch = mf.read(1)
if ch == '':
break
if ch == '(':
AnInstance = list()
strfreq = ''
elif ch == ')':
AnInstance.append(float(strfreq))
strfreq = ''
for i in range(len(AnInstance)):
if minlist[i]>maxlist[i]:
exit()
if minlist[i] == 0 and maxlist[i] == 0:
AnInstance[i] = 0 #should be consided again later...
elif minlist[i] == maxlist[i]:
AnInstance[i] = 0 #should be consided again later...
else:
AnInstance[i] = float(float((AnInstance[i]-minlist[i]))/float((maxlist[i]-minlist[i])))
for i in range(len(AnInstance)):
fnorm.write(str(AnInstance[i]))
fnorm.write(' ')
fnorm.write('\n')
elif ch == ',':
AnInstance.append(float(strfreq))
strfreq = ''
elif ch == ' ':
continue
else:
strfreq += ch
mf.close()
fnorm.close()
def convertToNTemplate(dirname, filename):
print('convertToTemplate')
mf = open(dirname+filename,'r')
strfreq = ''
f = open(dirname+"NewTemp_"+filename,'w')
AllZero = True
noinstances = 0
nofeattype = 0
while True:
ch = mf.read(1)
if ch == '':
break
if ch == '(':
AnInstance = list()
AllZero = True
strfreq = ''
elif ch == ')':
if not float(strfreq) == 0.0:
AllZero = False
AnInstance.append(float(strfreq))
nofeattype = len(AnInstance)
if AllZero == False:
noinstances +=1
strfreq = ''
for i in range(len(AnInstance)):
f.write(str(AnInstance[i]))
f.write(' ')
f.write('\n')
elif ch == ',':
if not float(strfreq) == 0.0:
AllZero = False
AnInstance.append(float(strfreq))
strfreq = ''
elif ch == ' ':
continue
else:
strfreq += ch
mf.close()
f.close()
return noinstances, nofeattype
def readNormInstances(dirname, filename, numberofinsatnces, numoffeattype):
print('readNormInstances')
TotalInstances = np.empty(numberofinsatnces*numoffeattype,dtype='float64')
f = open(dirname+filename,'r')
index = 0
#for line in f:
while True:
line = f.readline()
if line == '':
break
s = line.split()
for ss in s:
TotalInstances[index] = float(ss)
index +=1
TotalInstances = np.reshape(TotalInstances, (numberofinsatnces,numoffeattype))
f.close()
return TotalInstances
def divideIntoTwoSets(TotalInstances, numoffeattypeA, numoffeattypeB):
TotalInstances = np.hsplit(TotalInstances, np.array([numoffeattypeA, numoffeattypeA+numoffeattypeB]))
return TotalInstances[0], TotalInstances[1]
def minikmeanGo(TotalInstances, dirname, filename, nocluster):
np.random.seed(5)
noOfCluster = nocluster
kmeans = MiniBatchKMeans(n_clusters=noOfCluster)
print(kmeans)
kmeans.fit(TotalInstances)
print('fitting done')
centroids = kmeans.cluster_centers_
resultF = open(dirname+filename,'w')
for centroid in centroids:
for v in centroid:
resultF.write(str(v)+' ')
resultF.write('\n')
resultF.close()
def KmeanGo(TotalInstances, dirname, filename, nocluster):
np.random.seed(5)
noOfCluster = nocluster
kmeans = KMeans(n_clusters=noOfCluster, n_jobs=5)
print(kmeans)
kmeans.fit(TotalInstances)
print('fitting done')
centroids = kmeans.cluster_centers_
resultF = open(dirname+filename,'w')
for centroid in centroids:
for v in centroid:
resultF.write(str(v)+' ')
resultF.write('\n')
resultF.close()
#findminmax('./40000TotalSets/','Funcs.txt')
#findminmax('./40000TotalSets/','Methods.txt')
#noinstances, nofeattype = convertToNTemplate('./40000TotalSets/','Funcs.txt')
#t = readNormInstances('./40000TotalSets/', 'NewTemp_Funcs.txt', noinstances, nofeattype)
#minikmeanGo(t, './40000TotalSets/', 'F13_FUNCTIONS_so.txt')
"""
noinstances, nofeattype = convertToNTemplate('./','Funcs.txt')
t = readNormInstances('./', 'NewTemp_Funcs.txt', noinstances, nofeattype)
ta, tb = divideIntoTwoSets(t, 1321, 555)
minikmeanGo(tb, './', 'F13_FUNCTIONS_so_SYS.txt', 200)
minikmeanGo(ta, './', 'F13_FUNCTIONS_so_OP.txt', 2500)
"""
noinstances, nofeattype = convertToNTemplate('./','Methods.txt')
t = readNormInstances('./', 'NewTemp_Methods.txt', noinstances, nofeattype)
ta, tb = divideIntoTwoSets(t, 217, 238)
KmeanGo(tb, './', 'F12_METHOD_smali_API.txt', 1000)
KmeanGo(ta, './', 'F12_METHOD_smali_OP.txt', 5000)
|
[
"sklearn.cluster.MiniBatchKMeans",
"numpy.random.seed",
"numpy.empty",
"sklearn.cluster.KMeans",
"numpy.array",
"numpy.reshape"
] |
[((4919, 4979), 'numpy.empty', 'np.empty', (['(numberofinsatnces * numoffeattype)'], {'dtype': '"""float64"""'}), "(numberofinsatnces * numoffeattype, dtype='float64')\n", (4927, 4979), True, 'import numpy as np\n'), ((5266, 5328), 'numpy.reshape', 'np.reshape', (['TotalInstances', '(numberofinsatnces, numoffeattype)'], {}), '(TotalInstances, (numberofinsatnces, numoffeattype))\n', (5276, 5328), True, 'import numpy as np\n'), ((5663, 5680), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (5677, 5680), True, 'import numpy as np\n'), ((5722, 5761), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'noOfCluster'}), '(n_clusters=noOfCluster)\n', (5737, 5761), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((6132, 6149), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (6146, 6149), True, 'import numpy as np\n'), ((6191, 6231), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'noOfCluster', 'n_jobs': '(5)'}), '(n_clusters=noOfCluster, n_jobs=5)\n', (6197, 6231), False, 'from sklearn.cluster import KMeans\n'), ((5488, 5547), 'numpy.array', 'np.array', (['[numoffeattypeA, numoffeattypeA + numoffeattypeB]'], {}), '([numoffeattypeA, numoffeattypeA + numoffeattypeB])\n', (5496, 5547), True, 'import numpy as np\n')]
|
# This file will be (temporarily) included in the Python sys.path
# when config.yml is loaded by the Tiled server.
import io
from PIL import Image
from tiled.structures.image_serializer_helpers import img_as_ubyte
def smiley_separated_variables(array, metadata):
return "\n".join("🙂".join(str(number) for number in row) for row in array)
def to_jpeg(array, metadata):
file = io.BytesIO()
# PIL detail: ensure array has compatible data type before handing to PIL.
prepared_array = img_as_ubyte(array)
image = Image.fromarray(prepared_array)
image.save(file, format="jpeg")
return file.getbuffer()
|
[
"PIL.Image.fromarray",
"io.BytesIO",
"tiled.structures.image_serializer_helpers.img_as_ubyte"
] |
[((389, 401), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (399, 401), False, 'import io\n'), ((502, 521), 'tiled.structures.image_serializer_helpers.img_as_ubyte', 'img_as_ubyte', (['array'], {}), '(array)\n', (514, 521), False, 'from tiled.structures.image_serializer_helpers import img_as_ubyte\n'), ((534, 565), 'PIL.Image.fromarray', 'Image.fromarray', (['prepared_array'], {}), '(prepared_array)\n', (549, 565), False, 'from PIL import Image\n')]
|
# Author: <NAME>
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import logsumexp
'''
z = Wx + µ + E
the equation above represents the latent variable model which
relates a d-dimensional data vector z to a corresponding q-dimensional
latent variables x
with q < d, for isotropic noise E ∼ N (0, σ2I)
z : latent
x : data
W : latent_to_observation matrix
µ : centres_of_clusters
E : var_of_latent
This code is an implementation of generative model of mixture of PPCA
Given the number of clusters, data_dim(D) and latent_dim(L)
we generate the data for every cluster n,
we sample zn from a Gaussian prior and pass it through the
Wk matrix and add noise, where Wk maps from the L-dimensional subspace to the D-dimensional
visible space. Using the expectation maximization algorithm we estimate the parameters
and then we plot the PC vectors
'''
def mixture_ppca_parameter_initialization(data, n_clusters, latent_dim,
n_iterations):
"""
The k-means algorithm is used to determine the centres. The
priors are computed from the proportion of examples belonging to each
cluster. The covariance matrices are calculated as the sample
covariance of the points associated with (i.e. closest to) the
corresponding centres. For a mixture of PPCA model, the PPCA
decomposition is calculated for the points closest to a given centre.
This initialisation can be used as the starting point for training
the model using the EM algorithm.
W : latent_to_observation matrix
µ/mu : centres_of_clusters
pi : proportion of data in each cluster
sigma2 : variance of latent
covars : covariance of the points associated with (i.e. closest to) the
corresponding centres
"""
n_datapts, data_dim = data.shape
# initialization of the centres of clusters
init_centers = np.random.randint(0, n_datapts, n_clusters)
# Randomly choose distinct initial centres for the clusters
while (len(np.unique(init_centers)) != n_clusters):
init_centers = np.random.randint(0, n_datapts, n_clusters)
mu = data[init_centers, :]
distance_square = np.zeros((n_datapts, n_clusters))
clusters = np.zeros(n_datapts, dtype=np.int32)
# Running iterations for K means algorithm to assign centres for clusters
for k in range(n_iterations):
# assign clusters
for c in range(n_clusters):
distance_square[:, c] = np.power(data - mu[c, :], 2).sum(1)
clusters = np.argmin(distance_square, axis=1)
# compute distortion
distmin = distance_square[range(n_datapts), clusters]
# compute new centers
for c in range(n_clusters):
mu[c, :] = data[clusters == c, :].mean(0)
# parameter initialization
pi = np.zeros(n_clusters) # Sum should be equal to 1
W = np.zeros((n_clusters, data_dim, latent_dim))
sigma2 = np.zeros(n_clusters)
for c in range(n_clusters):
W[c, :, :] = np.random.randn(data_dim, latent_dim)
pi[c] = (clusters == c).sum() / n_datapts
sigma2[c] = (distmin[clusters == c]).mean() / data_dim
covars = np.zeros(n_clusters)
for i in range(n_clusters):
covars[i] = (np.var(data[clusters == i, 0]) +
np.var(data[clusters == i, 1])) / 2
return pi, mu, W, sigma2, covars, clusters
def mixture_ppca_expectation_maximization(data, pi, mu, W, sigma2, niter):
'''
we can find the p(latent|data) with the assumption that data is gaussian
z : latent
x : data
W : latent_to_observation matrix
µ/mu : centres_of_clusters
d : data_dimension
q : latent_dimention
σ2/ sigma2 : variance of latent
π/pi : cluster proportion
p(z|x) = (2πσ2)^−d/2 * exp(−1/(2σ2) * ||z − Wx − µ||)
p(z) = ∫p(z|x)p(x)dx
Solving for p(z) and then using the result we can find the p(x|z)
through which we can find
the log likelihood function which is
log_likelihood = −N/2 * (d ln(2π) + ln |Σ| + tr(Σ−1S))
We can develop an iterative EM algorithm for
optimisation of all of the model parameters µ,W and σ2
If Rn,i = p(zn, i) is the posterior responsibility of
mixture i for generating data point zn,given by
Rn,i = (p(zn|i) * πi) / p(zn)
Using EM, the parameter estimates are as follows:
µi = Σ (Rn,i * zn) / Σ Rn,i
Si = 1/(πi*N) * ΣRn,i*(zn − µi)*(zn − µi)'
Using Si we can estimate W and σ2
For more information on EM algorithm for mixture of PPCA
visit Mixtures of Probabilistic Principal Component Analysers
by <NAME> and <NAME>:
page 5-10 of http://www.miketipping.com/papers/met-mppca.pdf
'''
n_datapts, data_dim = data.shape
n_clusters = len(sigma2)
_, latent_dim = W[0].shape
M = np.zeros((n_clusters, latent_dim, latent_dim))
Minv = np.zeros((n_clusters, latent_dim, latent_dim))
Cinv = np.zeros((n_clusters, data_dim, data_dim))
logR = np.zeros((n_datapts, n_clusters))
R = np.zeros((n_datapts, n_clusters))
M[:] = 0.
Minv[:] = 0.
Cinv[:] = 0.
log_likelihood = np.zeros(niter)
for i in range(niter):
print('.', end='')
for c in range(n_clusters):
# M
'''
M = σ2I + WT.W
'''
M[c, :, :] = sigma2[c] * np.eye(latent_dim) + np.dot(W[c, :, :].T, W[c, :, :])
Minv[c, :, :] = np.linalg.inv(M[c, :, :])
# Cinv
Cinv[c, :, :] = (np.eye(data_dim)
- np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
) / sigma2[c]
# R_ni
deviation_from_center = data - mu[c, :]
logR[:, c] = (np.log(pi[c])
+ 0.5 * np.log(
np.linalg.det(
np.eye(data_dim) - np.dot(np.dot(W[c, :, :],
Minv[c, :, :]), W[c, :, :].T)
)
)
- 0.5 * data_dim * np.log(sigma2[c])
- 0.5 * (deviation_from_center * np.dot(deviation_from_center,
Cinv[c, :, :].T)).sum(1)
)
'''
Using the log-sum-trick, visit Section 2.5.4 in "Probabilistic Machine Learning: An Introduction" by <NAME> for more information
logsumexp(logR - myMax, axis=1) can be replaced by logsumexp(logR, axis=1)
myMax + logsumexp((logR - myMax), axis=0) can be replaced by logsumexp(logR, axis=0)
myMax in the above equations refer to
myMax = logR.max(axis=0) & myMax = logR.max(axis=1).reshape((n_datapts, 1))
'''
log_likelihood[i] = (
(logsumexp(logR, axis=1)).sum(axis=0)
- n_datapts * data_dim * np.log(2 * math.pi) / 2.
)
logR = logR - np.reshape(logsumexp(logR, axis=1),
(n_datapts, 1))
logpi = logsumexp(logR, axis=0) - np.log(n_datapts)
logpi = logpi.T
pi = np.exp(logpi)
R = np.exp(logR)
for c in range(n_clusters):
mu[c, :] = (R[:, c].reshape((n_datapts, 1)) * data).sum(axis=0) / R[:, c].sum()
deviation_from_center = data - mu[c, :].reshape((1, data_dim))
'''
Si = 1/(πi*N) * ΣRn,i*(zn − µi)*(zn − µi)'
Si is used to estimate
'''
Si = ((1 / (pi[c] * n_datapts))
* np.dot((R[:, c].reshape((n_datapts, 1)) * deviation_from_center).T,
np.dot(deviation_from_center, W[c, :, :]))
)
Wnew = np.dot(Si, np.linalg.inv(sigma2[c] * np.eye(latent_dim)
+ np.dot(np.dot(Minv[c, :, :], W[c, :, :].T), Si)))
sigma2[c] = (1 / data_dim) * (
(R[:, c].reshape(n_datapts, 1) * np.power(deviation_from_center, 2)).sum()
/
(n_datapts * pi[c])
-
np.trace(np.dot(np.dot(Si, Minv[c, :, :]), Wnew.T))
)
W[c, :, :] = Wnew
return pi, mu, W, sigma2, log_likelihood
def generate_data():
n = 500
r = np.random.rand(1, n) + 1
theta = np.random.rand(1, n) * (2 * math.pi)
x1 = r * np.sin(theta)
x2 = r * np.cos(theta)
X = np.vstack((x1, x2))
return np.transpose(X)
def mixppcademo(data, n_clusters):
'''
W : latent to observation matrix
mu : centres_of_clusters
pi : proportions of data in each of the cluster
sigma2 : variance of latent
L : log likelihood after each iteration
covars : covariance of the points associated with (i.e. closest to) the
corresponding centres
'''
plt.plot(data[:, 0], data[:, 1], 'o', c='blue', mfc='none')
pi, mu, W, sigma2, covars, clusters = mixture_ppca_parameter_initialization(
data, n_clusters, latent_dim=1, n_iterations=10)
pi, mu, W, sigma2, L = mixture_ppca_expectation_maximization(data, pi, mu,
W, sigma2, 10)
for i in range(n_clusters):
v = W[i, :, :]
#Plotting the pc vectors using 2 standard deviations
start = mu[i].reshape((2, 1)) - (v * 2 * np.sqrt(sigma2[i]))
endpt = mu[i].reshape((2, 1)) + (v * 2 * np.sqrt(sigma2[i]))
linex = [start[0], endpt[0]]
liney = [start[1], endpt[1]]
plt.plot(linex, liney, linewidth=3, c='black')
theta = np.arange(0, 2 * math.pi, 0.02)
#Plotting the confidence interval ellipse using 2 standard deviations
x = 2 * np.sqrt(sigma2[i]) * np.cos(theta)
y = np.sqrt(covars[i]) * np.sin(theta)
rot_matrix = np.vstack((np.hstack((v[0], -v[1])), np.hstack((v[1], v[0]))))
ellipse = np.dot(rot_matrix, np.vstack((x, y)))
ellipse = np.transpose(ellipse)
ellipse = ellipse + np.dot(np.ones((len(theta), 1)), mu[i, :].reshape((1, 2)))
plt.plot(ellipse[:, 0], ellipse[:, 1], c='crimson')
def main():
np.random.seed(61)
data = generate_data()
plt.figure(0)
mixppcademo(data, n_clusters=1)
plt.savefig("mixppca_k-1.png", dpi=300)
np.random.seed(7)
data = generate_data()
plt.figure(1)
mixppcademo(data, n_clusters=10)
plt.savefig("mixppca_k-10.png", dpi=300)
plt.show()
if __name__ == "__main__":
main()
|
[
"numpy.random.seed",
"numpy.argmin",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.sin",
"numpy.exp",
"numpy.arange",
"scipy.special.logsumexp",
"numpy.unique",
"numpy.random.randn",
"numpy.power",
"numpy.transpose",
"numpy.var",
"matplotlib.pyplot.show",
"numpy.hstack",
"numpy.linalg.inv",
"numpy.cos",
"numpy.dot",
"numpy.vstack",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.random.rand",
"numpy.eye",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((1916, 1959), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_datapts', 'n_clusters'], {}), '(0, n_datapts, n_clusters)\n', (1933, 1959), True, 'import numpy as np\n'), ((2205, 2238), 'numpy.zeros', 'np.zeros', (['(n_datapts, n_clusters)'], {}), '((n_datapts, n_clusters))\n', (2213, 2238), True, 'import numpy as np\n'), ((2255, 2290), 'numpy.zeros', 'np.zeros', (['n_datapts'], {'dtype': 'np.int32'}), '(n_datapts, dtype=np.int32)\n', (2263, 2290), True, 'import numpy as np\n'), ((2857, 2877), 'numpy.zeros', 'np.zeros', (['n_clusters'], {}), '(n_clusters)\n', (2865, 2877), True, 'import numpy as np\n'), ((2915, 2959), 'numpy.zeros', 'np.zeros', (['(n_clusters, data_dim, latent_dim)'], {}), '((n_clusters, data_dim, latent_dim))\n', (2923, 2959), True, 'import numpy as np\n'), ((2974, 2994), 'numpy.zeros', 'np.zeros', (['n_clusters'], {}), '(n_clusters)\n', (2982, 2994), True, 'import numpy as np\n'), ((3217, 3237), 'numpy.zeros', 'np.zeros', (['n_clusters'], {}), '(n_clusters)\n', (3225, 3237), True, 'import numpy as np\n'), ((4962, 5008), 'numpy.zeros', 'np.zeros', (['(n_clusters, latent_dim, latent_dim)'], {}), '((n_clusters, latent_dim, latent_dim))\n', (4970, 5008), True, 'import numpy as np\n'), ((5021, 5067), 'numpy.zeros', 'np.zeros', (['(n_clusters, latent_dim, latent_dim)'], {}), '((n_clusters, latent_dim, latent_dim))\n', (5029, 5067), True, 'import numpy as np\n'), ((5080, 5122), 'numpy.zeros', 'np.zeros', (['(n_clusters, data_dim, data_dim)'], {}), '((n_clusters, data_dim, data_dim))\n', (5088, 5122), True, 'import numpy as np\n'), ((5135, 5168), 'numpy.zeros', 'np.zeros', (['(n_datapts, n_clusters)'], {}), '((n_datapts, n_clusters))\n', (5143, 5168), True, 'import numpy as np\n'), ((5178, 5211), 'numpy.zeros', 'np.zeros', (['(n_datapts, n_clusters)'], {}), '((n_datapts, n_clusters))\n', (5186, 5211), True, 'import numpy as np\n'), ((5285, 5300), 'numpy.zeros', 'np.zeros', (['niter'], {}), '(niter)\n', (5293, 5300), True, 'import numpy as np\n'), ((8738, 8757), 'numpy.vstack', 'np.vstack', (['(x1, x2)'], {}), '((x1, x2))\n', (8747, 8757), True, 'import numpy as np\n'), ((8770, 8785), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (8782, 8785), True, 'import numpy as np\n'), ((9149, 9208), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 0]', 'data[:, 1]', '"""o"""'], {'c': '"""blue"""', 'mfc': '"""none"""'}), "(data[:, 0], data[:, 1], 'o', c='blue', mfc='none')\n", (9157, 9208), True, 'import matplotlib.pyplot as plt\n'), ((10486, 10504), 'numpy.random.seed', 'np.random.seed', (['(61)'], {}), '(61)\n', (10500, 10504), True, 'import numpy as np\n'), ((10538, 10551), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (10548, 10551), True, 'import matplotlib.pyplot as plt\n'), ((10594, 10633), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""mixppca_k-1.png"""'], {'dpi': '(300)'}), "('mixppca_k-1.png', dpi=300)\n", (10605, 10633), True, 'import matplotlib.pyplot as plt\n'), ((10639, 10656), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (10653, 10656), True, 'import numpy as np\n'), ((10690, 10703), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (10700, 10703), True, 'import matplotlib.pyplot as plt\n'), ((10747, 10787), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""mixppca_k-10.png"""'], {'dpi': '(300)'}), "('mixppca_k-10.png', dpi=300)\n", (10758, 10787), True, 'import matplotlib.pyplot as plt\n'), ((10793, 10803), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10801, 10803), True, 'import matplotlib.pyplot as plt\n'), ((2106, 2149), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_datapts', 'n_clusters'], {}), '(0, n_datapts, n_clusters)\n', (2123, 2149), True, 'import numpy as np\n'), ((2562, 2596), 'numpy.argmin', 'np.argmin', (['distance_square'], {'axis': '(1)'}), '(distance_square, axis=1)\n', (2571, 2596), True, 'import numpy as np\n'), ((3050, 3087), 'numpy.random.randn', 'np.random.randn', (['data_dim', 'latent_dim'], {}), '(data_dim, latent_dim)\n', (3065, 3087), True, 'import numpy as np\n'), ((7378, 7391), 'numpy.exp', 'np.exp', (['logpi'], {}), '(logpi)\n', (7384, 7391), True, 'import numpy as np\n'), ((7405, 7417), 'numpy.exp', 'np.exp', (['logR'], {}), '(logR)\n', (7411, 7417), True, 'import numpy as np\n'), ((8598, 8618), 'numpy.random.rand', 'np.random.rand', (['(1)', 'n'], {}), '(1, n)\n', (8612, 8618), True, 'import numpy as np\n'), ((8636, 8656), 'numpy.random.rand', 'np.random.rand', (['(1)', 'n'], {}), '(1, n)\n', (8650, 8656), True, 'import numpy as np\n'), ((8687, 8700), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (8693, 8700), True, 'import numpy as np\n'), ((8715, 8728), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (8721, 8728), True, 'import numpy as np\n'), ((9856, 9902), 'matplotlib.pyplot.plot', 'plt.plot', (['linex', 'liney'], {'linewidth': '(3)', 'c': '"""black"""'}), "(linex, liney, linewidth=3, c='black')\n", (9864, 9902), True, 'import matplotlib.pyplot as plt\n'), ((9920, 9951), 'numpy.arange', 'np.arange', (['(0)', '(2 * math.pi)', '(0.02)'], {}), '(0, 2 * math.pi, 0.02)\n', (9929, 9951), True, 'import numpy as np\n'), ((10293, 10314), 'numpy.transpose', 'np.transpose', (['ellipse'], {}), '(ellipse)\n', (10305, 10314), True, 'import numpy as np\n'), ((10412, 10463), 'matplotlib.pyplot.plot', 'plt.plot', (['ellipse[:, 0]', 'ellipse[:, 1]'], {'c': '"""crimson"""'}), "(ellipse[:, 0], ellipse[:, 1], c='crimson')\n", (10420, 10463), True, 'import matplotlib.pyplot as plt\n'), ((2041, 2064), 'numpy.unique', 'np.unique', (['init_centers'], {}), '(init_centers)\n', (2050, 2064), True, 'import numpy as np\n'), ((5599, 5624), 'numpy.linalg.inv', 'np.linalg.inv', (['M[c, :, :]'], {}), '(M[c, :, :])\n', (5612, 5624), True, 'import numpy as np\n'), ((7295, 7318), 'scipy.special.logsumexp', 'logsumexp', (['logR'], {'axis': '(0)'}), '(logR, axis=0)\n', (7304, 7318), False, 'from scipy.special import logsumexp\n'), ((7321, 7338), 'numpy.log', 'np.log', (['n_datapts'], {}), '(n_datapts)\n', (7327, 7338), True, 'import numpy as np\n'), ((10069, 10082), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10075, 10082), True, 'import numpy as np\n'), ((10096, 10114), 'numpy.sqrt', 'np.sqrt', (['covars[i]'], {}), '(covars[i])\n', (10103, 10114), True, 'import numpy as np\n'), ((10118, 10131), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10124, 10131), True, 'import numpy as np\n'), ((10255, 10272), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (10264, 10272), True, 'import numpy as np\n'), ((3293, 3323), 'numpy.var', 'np.var', (['data[clusters == i, 0]'], {}), '(data[clusters == i, 0])\n', (3299, 3323), True, 'import numpy as np\n'), ((3348, 3378), 'numpy.var', 'np.var', (['data[clusters == i, 1]'], {}), '(data[clusters == i, 1])\n', (3354, 3378), True, 'import numpy as np\n'), ((5535, 5567), 'numpy.dot', 'np.dot', (['W[c, :, :].T', 'W[c, :, :]'], {}), '(W[c, :, :].T, W[c, :, :])\n', (5541, 5567), True, 'import numpy as np\n'), ((7193, 7216), 'scipy.special.logsumexp', 'logsumexp', (['logR'], {'axis': '(1)'}), '(logR, axis=1)\n', (7202, 7216), False, 'from scipy.special import logsumexp\n'), ((9681, 9699), 'numpy.sqrt', 'np.sqrt', (['sigma2[i]'], {}), '(sigma2[i])\n', (9688, 9699), True, 'import numpy as np\n'), ((9751, 9769), 'numpy.sqrt', 'np.sqrt', (['sigma2[i]'], {}), '(sigma2[i])\n', (9758, 9769), True, 'import numpy as np\n'), ((10048, 10066), 'numpy.sqrt', 'np.sqrt', (['sigma2[i]'], {}), '(sigma2[i])\n', (10055, 10066), True, 'import numpy as np\n'), ((10165, 10189), 'numpy.hstack', 'np.hstack', (['(v[0], -v[1])'], {}), '((v[0], -v[1]))\n', (10174, 10189), True, 'import numpy as np\n'), ((10191, 10214), 'numpy.hstack', 'np.hstack', (['(v[1], v[0])'], {}), '((v[1], v[0]))\n', (10200, 10214), True, 'import numpy as np\n'), ((2506, 2534), 'numpy.power', 'np.power', (['(data - mu[c, :])', '(2)'], {}), '(data - mu[c, :], 2)\n', (2514, 2534), True, 'import numpy as np\n'), ((5514, 5532), 'numpy.eye', 'np.eye', (['latent_dim'], {}), '(latent_dim)\n', (5520, 5532), True, 'import numpy as np\n'), ((5677, 5693), 'numpy.eye', 'np.eye', (['data_dim'], {}), '(data_dim)\n', (5683, 5693), True, 'import numpy as np\n'), ((7042, 7065), 'scipy.special.logsumexp', 'logsumexp', (['logR'], {'axis': '(1)'}), '(logR, axis=1)\n', (7051, 7065), False, 'from scipy.special import logsumexp\n'), ((7121, 7140), 'numpy.log', 'np.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (7127, 7140), True, 'import numpy as np\n'), ((7913, 7954), 'numpy.dot', 'np.dot', (['deviation_from_center', 'W[c, :, :]'], {}), '(deviation_from_center, W[c, :, :])\n', (7919, 7954), True, 'import numpy as np\n'), ((5733, 5766), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (5739, 5766), True, 'import numpy as np\n'), ((5928, 5941), 'numpy.log', 'np.log', (['pi[c]'], {}), '(pi[c])\n', (5934, 5941), True, 'import numpy as np\n'), ((6287, 6304), 'numpy.log', 'np.log', (['sigma2[c]'], {}), '(sigma2[c])\n', (6293, 6304), True, 'import numpy as np\n'), ((8036, 8054), 'numpy.eye', 'np.eye', (['latent_dim'], {}), '(latent_dim)\n', (8042, 8054), True, 'import numpy as np\n'), ((8109, 8144), 'numpy.dot', 'np.dot', (['Minv[c, :, :]', 'W[c, :, :].T'], {}), '(Minv[c, :, :], W[c, :, :].T)\n', (8115, 8144), True, 'import numpy as np\n'), ((8418, 8443), 'numpy.dot', 'np.dot', (['Si', 'Minv[c, :, :]'], {}), '(Si, Minv[c, :, :])\n', (8424, 8443), True, 'import numpy as np\n'), ((6365, 6411), 'numpy.dot', 'np.dot', (['deviation_from_center', 'Cinv[c, :, :].T'], {}), '(deviation_from_center, Cinv[c, :, :].T)\n', (6371, 6411), True, 'import numpy as np\n'), ((8252, 8286), 'numpy.power', 'np.power', (['deviation_from_center', '(2)'], {}), '(deviation_from_center, 2)\n', (8260, 8286), True, 'import numpy as np\n'), ((6054, 6070), 'numpy.eye', 'np.eye', (['data_dim'], {}), '(data_dim)\n', (6060, 6070), True, 'import numpy as np\n'), ((6080, 6113), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (6086, 6113), True, 'import numpy as np\n')]
|
import streamlit as st
def app():
st.write("## Welcome to the Skink Search Tool app")
st.write("""
The app filters existing skink data by multiple criteria in order to help with the identification of skinks.
Latest data update: 10 Apr 2020. \n
Use the navigation bar to select the type of search you would like to perform.
""")
st.markdown("### Toes")
st.write("Use this option to search by missing toes only")
with st.beta_expander("More information"):
st.markdown("""
- This search filters by all possible combinations of **`missing toes`** and excludes other missing toes. \n
Example:
> `selected toes` = [LF1, LF2] \n
> Results: \n
> The search returns all skinks where [LF1], [LF2], [LF1, LF2] or [none] toes are missing.
""")
st.markdown("### Search")
st.write("Use this option to search by multiple criteria:")
st.markdown("""
- SVL (snout to vent length) (mm) \n
Existing skinks above 70mm are classified as adults and labelled with `projected_SVL`=100
""")
with st.beta_expander("More information"):
st.markdown("""
The search considers matches within 5 mm of the selected length. All skinks above 70 mm (`@adult`) are classified as adults.
In finding matches, it is assumed that skinks grow by **10** mm per year (`@delta`) and reach adult size at **70** mm (`@adult`).
Search is performed on a calculated variable, `projected_SVL`:
```python
projected_SVL= skink_SVL + delta*(current_year – skink_Year)
```
""")
st.markdown("""
- Paddock/traplist \n
Each paddock contains multiple traps, click below to view the full list of traps
""")
with st.beta_expander("See traps"):
st.markdown("""
| Paddock | Traps |
| ------ | ------ |
| pdk_R66 | ['R66', 'board', 'R67', 'M14', 'R68', 'R69', 'R70', 'M11', 'PR1'] |
| pdk_R71 | ['R71', 'PR2', 'R72', 'M9', 'P3', 'PR3', 'R73', 'M8', 'PR4', 'R74', 'M7', 'PR5', 'R75', 'PR6', 'R76', 'M5', 'PR7'] |
| pdk_R77 | ['R2', 'PR13', 'R3', 'PR14', 'R4', 'PR15', 'P16', 'PR16', 'R6', 'PR17'] |
| pdk_R02 | ['W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'W7', 'W8', 'W9', 'W10', 'W11', 'W12', 'W13'] |
| ... | ... |
""")
st.markdown("""
- Toes \n
Search by intact or missing toes.
""")
image = 'data/P1060519.jpg'
st.image(image, caption='El pretty skinko', use_column_width = True)
with st.sidebar.beta_expander("About"):
st.markdown(''' Copyright © 2021 <NAME>.
This app is open source. You can find it on [GitHub](https://github.com/eri3l/skinks) ''')
|
[
"streamlit.markdown",
"streamlit.image",
"streamlit.sidebar.beta_expander",
"streamlit.write",
"streamlit.beta_expander"
] |
[((39, 90), 'streamlit.write', 'st.write', (['"""## Welcome to the Skink Search Tool app"""'], {}), "('## Welcome to the Skink Search Tool app')\n", (47, 90), True, 'import streamlit as st\n'), ((96, 398), 'streamlit.write', 'st.write', (['""" \n The app filters existing skink data by multiple criteria in order to help with the identification of skinks. \n Latest data update: 10 Apr 2020. \n\n Use the navigation bar to select the type of search you would like to perform.\n\t"""'], {}), '(\n """ \n The app filters existing skink data by multiple criteria in order to help with the identification of skinks. \n Latest data update: 10 Apr 2020. \n\n Use the navigation bar to select the type of search you would like to perform.\n\t"""\n )\n', (104, 398), True, 'import streamlit as st\n'), ((404, 427), 'streamlit.markdown', 'st.markdown', (['"""### Toes"""'], {}), "('### Toes')\n", (415, 427), True, 'import streamlit as st\n'), ((432, 490), 'streamlit.write', 'st.write', (['"""Use this option to search by missing toes only"""'], {}), "('Use this option to search by missing toes only')\n", (440, 490), True, 'import streamlit as st\n'), ((934, 959), 'streamlit.markdown', 'st.markdown', (['"""### Search"""'], {}), "('### Search')\n", (945, 959), True, 'import streamlit as st\n'), ((964, 1023), 'streamlit.write', 'st.write', (['"""Use this option to search by multiple criteria:"""'], {}), "('Use this option to search by multiple criteria:')\n", (972, 1023), True, 'import streamlit as st\n'), ((1028, 1229), 'streamlit.markdown', 'st.markdown', (['"""\n - SVL (snout to vent length) (mm) \n\n Existing skinks above 70mm are classified as adults and labelled with `projected_SVL`=100\n """'], {}), '(\n """\n - SVL (snout to vent length) (mm) \n\n Existing skinks above 70mm are classified as adults and labelled with `projected_SVL`=100\n """\n )\n', (1039, 1229), True, 'import streamlit as st\n'), ((1730, 1911), 'streamlit.markdown', 'st.markdown', (['"""\n - Paddock/traplist \n\n Each paddock contains multiple traps, click below to view the full list of traps\n """'], {}), '(\n """\n - Paddock/traplist \n\n Each paddock contains multiple traps, click below to view the full list of traps\n """\n )\n', (1741, 1911), True, 'import streamlit as st\n'), ((2618, 2740), 'streamlit.markdown', 'st.markdown', (['"""\n - Toes \n\n Search by intact or missing toes.\n """'], {}), '(\n """\n - Toes \n\n Search by intact or missing toes.\n """\n )\n', (2629, 2740), True, 'import streamlit as st\n'), ((2789, 2855), 'streamlit.image', 'st.image', (['image'], {'caption': '"""El pretty skinko"""', 'use_column_width': '(True)'}), "(image, caption='El pretty skinko', use_column_width=True)\n", (2797, 2855), True, 'import streamlit as st\n'), ((500, 536), 'streamlit.beta_expander', 'st.beta_expander', (['"""More information"""'], {}), "('More information')\n", (516, 536), True, 'import streamlit as st\n'), ((546, 926), 'streamlit.markdown', 'st.markdown', (['"""\n - This search filters by all possible combinations of **`missing toes`** and excludes other missing toes. \n\n Example:\n \n > `selected toes` = [LF1, LF2] \n\n > Results: \n\n > The search returns all skinks where [LF1], [LF2], [LF1, LF2] or [none] toes are missing. \n """'], {}), '(\n """\n - This search filters by all possible combinations of **`missing toes`** and excludes other missing toes. \n\n Example:\n \n > `selected toes` = [LF1, LF2] \n\n > Results: \n\n > The search returns all skinks where [LF1], [LF2], [LF1, LF2] or [none] toes are missing. \n """\n )\n', (557, 926), True, 'import streamlit as st\n'), ((1230, 1266), 'streamlit.beta_expander', 'st.beta_expander', (['"""More information"""'], {}), "('More information')\n", (1246, 1266), True, 'import streamlit as st\n'), ((1276, 1735), 'streamlit.markdown', 'st.markdown', (['"""\n The search considers matches within 5 mm of the selected length. All skinks above 70 mm (`@adult`) are classified as adults. \nIn finding matches, it is assumed that skinks grow by **10** mm per year (`@delta`) and reach adult size at **70** mm (`@adult`). \nSearch is performed on a calculated variable, `projected_SVL`:\n```python\nprojected_SVL= skink_SVL + delta*(current_year – skink_Year) \n```\n """'], {}), '(\n """\n The search considers matches within 5 mm of the selected length. All skinks above 70 mm (`@adult`) are classified as adults. \nIn finding matches, it is assumed that skinks grow by **10** mm per year (`@delta`) and reach adult size at **70** mm (`@adult`). \nSearch is performed on a calculated variable, `projected_SVL`:\n```python\nprojected_SVL= skink_SVL + delta*(current_year – skink_Year) \n```\n """\n )\n', (1287, 1735), True, 'import streamlit as st\n'), ((1912, 1941), 'streamlit.beta_expander', 'st.beta_expander', (['"""See traps"""'], {}), "('See traps')\n", (1928, 1941), True, 'import streamlit as st\n'), ((1955, 2623), 'streamlit.markdown', 'st.markdown', (['"""\n | Paddock | Traps |\n | ------ | ------ |\n | pdk_R66 | [\'R66\', \'board\', \'R67\', \'M14\', \'R68\', \'R69\', \'R70\', \'M11\', \'PR1\'] |\n | pdk_R71 | [\'R71\', \'PR2\', \'R72\', \'M9\', \'P3\', \'PR3\', \'R73\', \'M8\', \'PR4\', \'R74\', \'M7\', \'PR5\', \'R75\', \'PR6\', \'R76\', \'M5\', \'PR7\'] |\n | pdk_R77 | [\'R2\', \'PR13\', \'R3\', \'PR14\', \'R4\', \'PR15\', \'P16\', \'PR16\', \'R6\', \'PR17\'] |\n | pdk_R02 | [\'W1\', \'W2\', \'W3\', \'W4\', \'W5\', \'W6\', \'W7\', \'W8\', \'W9\', \'W10\', \'W11\', \'W12\', \'W13\'] |\n | ... | ... |\n """'], {}), '(\n """\n | Paddock | Traps |\n | ------ | ------ |\n | pdk_R66 | [\'R66\', \'board\', \'R67\', \'M14\', \'R68\', \'R69\', \'R70\', \'M11\', \'PR1\'] |\n | pdk_R71 | [\'R71\', \'PR2\', \'R72\', \'M9\', \'P3\', \'PR3\', \'R73\', \'M8\', \'PR4\', \'R74\', \'M7\', \'PR5\', \'R75\', \'PR6\', \'R76\', \'M5\', \'PR7\'] |\n | pdk_R77 | [\'R2\', \'PR13\', \'R3\', \'PR14\', \'R4\', \'PR15\', \'P16\', \'PR16\', \'R6\', \'PR17\'] |\n | pdk_R02 | [\'W1\', \'W2\', \'W3\', \'W4\', \'W5\', \'W6\', \'W7\', \'W8\', \'W9\', \'W10\', \'W11\', \'W12\', \'W13\'] |\n | ... | ... |\n """\n )\n', (1966, 2623), True, 'import streamlit as st\n'), ((2873, 2906), 'streamlit.sidebar.beta_expander', 'st.sidebar.beta_expander', (['"""About"""'], {}), "('About')\n", (2897, 2906), True, 'import streamlit as st\n'), ((2919, 3079), 'streamlit.markdown', 'st.markdown', (['""" Copyright © 2021 <NAME>. \n This app is open source. You can find it on [GitHub](https://github.com/eri3l/skinks) """'], {}), '(\n """ Copyright © 2021 <NAME>. \n This app is open source. You can find it on [GitHub](https://github.com/eri3l/skinks) """\n )\n', (2930, 3079), True, 'import streamlit as st\n')]
|
import json
import sys
import imageio
import matplotlib.pyplot as plt
import cv2
import random
def search_images_by_id(_id):
for _ in valid['images']:
if _['id'] == _id:
return _
def search_categories_by_id(_id):
for _ in valid['categories']:
if _['id'] == _id:
return _
def plot_polygon(mask, polygons):
plt.imshow(mask)
for polygon in polygons:
plt.scatter(polygon[0::2], polygon[1::2], s=2)
plt.show()
with open(sys.argv[1], 'r') as f:
valid = json.load(f)
for ann in valid['annotations']:
if random.random() < 0.01:
ann_images = search_images_by_id(ann['image_id'])
ann_category = search_categories_by_id(ann['category_id'])
bbox = list(map(int, ann['bbox']))
rgb = imageio.imread(sys.argv[1] + '/../images/' + str(ann_images['file_name']))
rgb = cv2.rectangle(rgb, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 5)
plot_polygon(rgb, ann['segmentation'])
|
[
"json.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.scatter",
"random.random",
"cv2.rectangle"
] |
[((363, 379), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask'], {}), '(mask)\n', (373, 379), True, 'import matplotlib.pyplot as plt\n'), ((469, 479), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (477, 479), True, 'import matplotlib.pyplot as plt\n'), ((528, 540), 'json.load', 'json.load', (['f'], {}), '(f)\n', (537, 540), False, 'import json\n'), ((418, 464), 'matplotlib.pyplot.scatter', 'plt.scatter', (['polygon[0::2]', 'polygon[1::2]'], {'s': '(2)'}), '(polygon[0::2], polygon[1::2], s=2)\n', (429, 464), True, 'import matplotlib.pyplot as plt\n'), ((582, 597), 'random.random', 'random.random', ([], {}), '()\n', (595, 597), False, 'import random\n'), ((880, 979), 'cv2.rectangle', 'cv2.rectangle', (['rgb', '(bbox[0], bbox[1])', '(bbox[0] + bbox[2], bbox[1] + bbox[3])', '(255, 0, 0)', '(5)'], {}), '(rgb, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3\n ]), (255, 0, 0), 5)\n', (893, 979), False, 'import cv2\n')]
|
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'core'
def ready(self):
from mqtt.mqtt_file import client
client.loop_start()
|
[
"mqtt.mqtt_file.client.loop_start"
] |
[((155, 174), 'mqtt.mqtt_file.client.loop_start', 'client.loop_start', ([], {}), '()\n', (172, 174), False, 'from mqtt.mqtt_file import client\n')]
|
# -*- coding:utf-8 -*-
"""
Author:
<NAME>,<EMAIL>
Reference:
[1] <NAME>, <NAME>, <NAME>, et al. Product-based neural networks for user response prediction[C]//Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.(https://arxiv.org/pdf/1611.00144.pdf)
"""
import torch
import torch.nn as nn
from .basemodel import BaseModel
from ..inputs import combined_dnn_input
from ..layers import DNN, concat_fun, InnerProductLayer, OutterProductLayer
class PNN(BaseModel):
"""Instantiates the Product-based Neural Network architecture.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float . L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param use_inner: bool,whether use inner-product or not.
:param use_outter: bool,whether use outter-product or not.
:param kernel_type: str,kernel_type used in outter-product,can be ``'mat'`` , ``'vec'`` or ``'num'``
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:param device: str, ``"cpu"`` or ``"cuda:0"``
:return: A PyTorch model instance.
"""
def __init__(self, dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-5, l2_reg_dnn=0,
init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', use_inner=True, use_outter=False,
kernel_type='mat', task='binary', device='cpu', ):
super(PNN, self).__init__([], dnn_feature_columns, l2_reg_linear=0, l2_reg_embedding=l2_reg_embedding,
init_std=init_std, seed=seed, task=task, device=device)
if kernel_type not in ['mat', 'vec', 'num']:
raise ValueError("kernel_type must be mat,vec or num")
self.use_inner = use_inner
self.use_outter = use_outter
self.kernel_type = kernel_type
self.task = task
product_out_dim = 0
num_inputs = self.compute_input_dim(dnn_feature_columns, include_dense=False, feature_group=True)
num_pairs = int(num_inputs * (num_inputs - 1) / 2)
if self.use_inner:
product_out_dim += num_pairs
self.innerproduct = InnerProductLayer(device=device)
if self.use_outter:
product_out_dim += num_pairs
self.outterproduct = OutterProductLayer(
num_inputs, self.embedding_size, kernel_type=kernel_type, device=device)
self.dnn = DNN(product_out_dim + self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False,
init_std=init_std, device=device)
self.dnn_linear = nn.Linear(
dnn_hidden_units[-1], 1, bias=False).to(device)
self.add_regularization_weight(
filter(lambda x: 'weight' in x[0] and 'bn' not in x[0], self.dnn.named_parameters()), l2=l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2=l2_reg_dnn)
self.to(device)
def forward(self, X):
sparse_embedding_list, dense_value_list = self.input_from_feature_columns(X, self.dnn_feature_columns,
self.embedding_dict)
linear_signal = torch.flatten(
concat_fun(sparse_embedding_list), start_dim=1)
if self.use_inner:
inner_product = torch.flatten(
self.innerproduct(sparse_embedding_list), start_dim=1)
if self.use_outter:
outer_product = self.outterproduct(sparse_embedding_list)
if self.use_outter and self.use_inner:
product_layer = torch.cat(
[linear_signal, inner_product, outer_product], dim=1)
elif self.use_outter:
product_layer = torch.cat([linear_signal, outer_product], dim=1)
elif self.use_inner:
product_layer = torch.cat([linear_signal, inner_product], dim=1)
else:
product_layer = linear_signal
dnn_input = combined_dnn_input([product_layer], dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
logit = dnn_logit
y_pred = self.out(logit)
return y_pred
|
[
"torch.cat",
"torch.nn.Linear"
] |
[((4252, 4315), 'torch.cat', 'torch.cat', (['[linear_signal, inner_product, outer_product]'], {'dim': '(1)'}), '([linear_signal, inner_product, outer_product], dim=1)\n', (4261, 4315), False, 'import torch\n'), ((3267, 3313), 'torch.nn.Linear', 'nn.Linear', (['dnn_hidden_units[-1]', '(1)'], {'bias': '(False)'}), '(dnn_hidden_units[-1], 1, bias=False)\n', (3276, 3313), True, 'import torch.nn as nn\n'), ((4391, 4439), 'torch.cat', 'torch.cat', (['[linear_signal, outer_product]'], {'dim': '(1)'}), '([linear_signal, outer_product], dim=1)\n', (4400, 4439), False, 'import torch\n'), ((4497, 4545), 'torch.cat', 'torch.cat', (['[linear_signal, inner_product]'], {'dim': '(1)'}), '([linear_signal, inner_product], dim=1)\n', (4506, 4545), False, 'import torch\n')]
|
import os
import shutil
from utils.logger import Logger, LogLvl
_logger = Logger(LogLvl.LOG_ERROR)
# Creation
def is_directory_exists(dir_name):
directory_exists = os.path.exists(dir_name)
if not directory_exists:
_logger.info("Directory \"{}\" not exists".format(dir_name))
return directory_exists
def create_directory(dir_name):
if not is_directory_exists(dir_name):
os.makedirs(dir_name)
_logger.info("Creating directory {}".format(dir_name))
return dir_name
def remove_directory(dir_name):
if is_directory_exists(dir_name):
shutil.rmtree(dir_name)
_logger.info("Removing directory {}".format(dir_name))
# Query
def list_all_dirs_in_folder(folder_name):
return os.listdir(folder_name)
# Comparators
def equal(fname1, fname2):
# Open file for reading in text mode (default mode)
f1 = open(fname1)
f2 = open(fname2)
files_equal = True
# Print confirmation
_logger.info("-----------------------------------")
_logger.info("Comparing files\n > " + fname1 + "\n < " + fname2)
_logger.info("-----------------------------------")
# Read the first line from the files
f1_line = f1.readline()
f2_line = f2.readline()
# Initialize counter for line number
line_no = 1
# Loop if either file1 or file2 has not reached EOF
while f1_line != '' or f2_line != '':
# Strip the leading whitespaces
f1_line = f1_line.rstrip()
f2_line = f2_line.rstrip()
# Compare the lines from both file
if f1_line != f2_line:
# If a line does not exist on file2 then mark the output with
# + sign
if f2_line == '' and f1_line != '':
print(">+", "Line-%d" % line_no, f1_line)
# otherwise output the line on file1 and mark it with > sign
elif f1_line != '':
print(">", "Line-%d" % line_no, f1_line)
# If a line does not exist on file1 then mark the output with
# + sign
if f1_line == '' and f2_line != '':
print("<+", "Line-%d" % line_no, f2_line)
# otherwise output the line on file2 and mark it with < sign
elif f2_line != '':
print("<", "Line-%d" % line_no, f2_line)
# Print a blank line
print()
files_equal = False
# Read the next line from the file
f1_line = f1.readline()
f2_line = f2.readline()
# Increment line counter
line_no += 1
# Close the files
f1.close()
f2.close()
return files_equal
|
[
"os.makedirs",
"os.path.exists",
"utils.logger.Logger",
"shutil.rmtree",
"os.listdir"
] |
[((75, 99), 'utils.logger.Logger', 'Logger', (['LogLvl.LOG_ERROR'], {}), '(LogLvl.LOG_ERROR)\n', (81, 99), False, 'from utils.logger import Logger, LogLvl\n'), ((172, 196), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (186, 196), False, 'import os\n'), ((743, 766), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (753, 766), False, 'import os\n'), ((407, 428), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (418, 428), False, 'import os\n'), ((592, 615), 'shutil.rmtree', 'shutil.rmtree', (['dir_name'], {}), '(dir_name)\n', (605, 615), False, 'import shutil\n')]
|
from things import Room, Item
def build_rooms():
print("Building world...", end="")
house_front_yard = Room("house_front_yard")
print(" done.")
return
def generate_items():
print("Generating items...", end="")
print(" done.")
return
|
[
"things.Room"
] |
[((113, 137), 'things.Room', 'Room', (['"""house_front_yard"""'], {}), "('house_front_yard')\n", (117, 137), False, 'from things import Room, Item\n')]
|
"""
sphinx-simulink.directives
~~~~~~~~~~~~~~~~~~~~~~~
Embed Simulink diagrams on your documentation.
:copyright:
Copyright 2016 by <NAME> <<EMAIL>>.
:license:
MIT, see LICENSE for details.
"""
import hashlib
import os
import tempfile
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
from sphinx.util.osutil import ensuredir
from sphinxsimulink.diagram import nodes
def pathlist(argument):
paths = []
list = argument.split(';')
for path in list:
paths.append( directives.path(path) )
return paths
class SimulinkDiagramDirective(images.Figure):
required_arguments = 1
optional_arguments = 0
option_spec = dict(
images.Figure.option_spec, **{
'dir': directives.path,
'addpath': pathlist,
'preload': directives.path,
'subsystem': directives.unchanged,
}
)
# content used by images.Figure as caption
has_content = True
@staticmethod
def generate_uri(app, diagram_options, fileformat):
# give a unique folder name for the specific srcdir, housed under the
# system's temporary directory
outdir = os.path.join(
tempfile.gettempdir(),
'sphinxsimulink',
hashlib.sha1(
os.path.abspath( app.builder.srcdir ).encode('utf-8')
).hexdigest()
)
# FIXME: change filename hash to include contents of preload script,
# simulink system model, and other dependencies...
# use as mechanism to reuse cache, and delete on clean job
# make a unique filename for the Simulink model
hash = hashlib.sha1( repr( sorted( diagram_options.items() ) )
.encode('utf-8') ).hexdigest()
filename = "simulink-diagram-{}.{}".format( hash, fileformat )
# combine the directory and filename
uri = os.path.join(outdir, filename)
return uri
def run(self):
env = self.state.document.settings.env
app = env.app
# pop these keys out of self.options;
# place into diagram_options
diagram_options = dict(
(popped_key, self.options.pop(popped_key, None))
for popped_key in
('dir','addpath','preload','subsystem')
)
# generate image at this location; Sphinx will relocate later
uri = SimulinkDiagramDirective.generate_uri(
app, diagram_options, 'png'
)
# make an empty file, if needed, to avoid warning from Sphinx's image
# processing
ensuredir( os.path.dirname( uri ) )
open( uri, 'a' ).close()
# SimulinkDiagramDirective takes system from argument[0]
system = self.arguments[0]
# images.Figure expects uri in argument[0]
self.arguments[0] = uri;
(figure_node,) = images.Figure.run(self)
# escalate system messages
if isinstance(figure_node, nodes.system_message):
return [figure_node]
diagram_node = nodes.diagram('', figure_node, **diagram_options)
diagram_node['uri'] = uri
diagram_node['system'] = system
return [diagram_node]
|
[
"sphinxsimulink.diagram.nodes.diagram",
"os.path.abspath",
"os.path.dirname",
"tempfile.gettempdir",
"docutils.parsers.rst.directives.images.Figure.run",
"docutils.parsers.rst.directives.path",
"os.path.join"
] |
[((1946, 1976), 'os.path.join', 'os.path.join', (['outdir', 'filename'], {}), '(outdir, filename)\n', (1958, 1976), False, 'import os\n'), ((2918, 2941), 'docutils.parsers.rst.directives.images.Figure.run', 'images.Figure.run', (['self'], {}), '(self)\n', (2935, 2941), False, 'from docutils.parsers.rst.directives import images\n'), ((3093, 3142), 'sphinxsimulink.diagram.nodes.diagram', 'nodes.diagram', (['""""""', 'figure_node'], {}), "('', figure_node, **diagram_options)\n", (3106, 3142), False, 'from sphinxsimulink.diagram import nodes\n'), ((566, 587), 'docutils.parsers.rst.directives.path', 'directives.path', (['path'], {}), '(path)\n', (581, 587), False, 'from docutils.parsers.rst import directives\n'), ((1255, 1276), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1274, 1276), False, 'import tempfile\n'), ((2649, 2669), 'os.path.dirname', 'os.path.dirname', (['uri'], {}), '(uri)\n', (2664, 2669), False, 'import os\n'), ((1350, 1385), 'os.path.abspath', 'os.path.abspath', (['app.builder.srcdir'], {}), '(app.builder.srcdir)\n', (1365, 1385), False, 'import os\n')]
|
from setuptools import setup, find_packages
exec(open('opensoar/version.py').read())
with open("README.rst", "r") as f:
long_description = f.read()
setup(
name='opensoar',
version=__version__, # has been import above in exec command
license='MIT',
description='Open source python library for glider flight analysis',
url='https://github.com/glidergeek/opensoar',
packages=find_packages(exclude=['tests']),
long_description=long_description,
install_requires=[
'pygeodesy>=17.11.26',
'aerofiles>=0.4.1',
'beautifulsoup4>=4.6.0'
]
)
|
[
"setuptools.find_packages"
] |
[((404, 436), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (417, 436), False, 'from setuptools import setup, find_packages\n')]
|
""" Tests for the Dfa class"""
import math
import random
import itertools
import pytest
from citoolkit.specifications.spec import AbstractSpec
from citoolkit.specifications.dfa import Dfa, State, DfaCycleError
###################################################################################################
# Basic Tests
###################################################################################################
def test_dfa_complete():
""" Creates a simple complete Dfa and ensures
this does not raise an error.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {"0_Seen", "1_Seen", "2_Seen", "3_Seen"}
accepting_states = {"3_Seen"}
start_state = "0_Seen"
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = "0_Seen"
# Complete transitions map.
transitions[("0_Seen", "1")] = "1_Seen"
transitions[("1_Seen", "1")] = "2_Seen"
transitions[("2_Seen", "1")] = "3_Seen"
transitions[("3_Seen", "0")] = "3_Seen"
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA, which should not raise an exception.
Dfa(alphabet, states, accepting_states, start_state, transitions)
def test_dfa_not_complete():
""" Attempts to create a simple incomplete Dfa and ensures
that this raises a ValueError.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {"0_Seen", "1_Seen", "2_Seen", "3_Seen"}
accepting_states = {"3_Seen"}
start_state = "0_Seen"
transitions = {}
# Partially completes transitions map.
transitions[("0_Seen", "1")] = "1_Seen"
transitions[("1_Seen", "1")] = "2_Seen"
transitions[("2_Seen", "1")] = "3_Seen"
transitions[("3_Seen", "0")] = "3_Seen"
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA and check select strings against Dfa
with pytest.raises(ValueError):
Dfa(alphabet, states, accepting_states, start_state, transitions)
def test_dfa_string_states():
""" Creates a simple Dfa and ensures that select
words are correctly accepted or rejected. Dfa is
constructed with string states.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {"0_Seen", "1_Seen", "2_Seen", "3_Seen"}
accepting_states = {"3_Seen"}
start_state = "0_Seen"
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = "0_Seen"
# Complete transitions map.
transitions[("0_Seen", "1")] = "1_Seen"
transitions[("1_Seen", "1")] = "2_Seen"
transitions[("2_Seen", "1")] = "3_Seen"
transitions[("3_Seen", "0")] = "3_Seen"
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA and check select strings against Dfa
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
assert not dfa.accepts([])
assert not dfa.accepts(list("0"))
assert not dfa.accepts(list("1"))
assert not dfa.accepts(list("2"))
assert dfa.accepts(list("111"))
assert not dfa.accepts(list("1112"))
assert not dfa.accepts(list("000"))
assert not dfa.accepts(list("222"))
assert dfa.accepts(list("01110"))
assert not dfa.accepts(list("00000011000020011100020001"))
assert dfa.accepts(list("0000001100002001110002000111"))
def test_dfa_class_states():
""" Creates a simple Dfa and ensures that select
words are correctly accepted or rejected. Dfa is
constructed with State class states.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {State("0_Seen"), State("1_Seen"), State("2_Seen"), State("3_Seen")}
accepting_states = {State("3_Seen")}
start_state = State("0_Seen")
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = State("0_Seen")
# Complete transitions map.
transitions[(State("0_Seen"), "1")] = State("1_Seen")
transitions[(State("1_Seen"), "1")] = State("2_Seen")
transitions[(State("2_Seen"), "1")] = State("3_Seen")
transitions[(State("3_Seen"), "0")] = State("3_Seen")
transitions[(State("3_Seen"), "1")] = State("3_Seen")
# Create the DFA and check select strings against Dfa
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
assert not dfa.accepts([])
assert not dfa.accepts(list("0"))
assert not dfa.accepts(list("1"))
assert not dfa.accepts(list("2"))
assert dfa.accepts(list("111"))
assert not dfa.accepts(list("1112"))
assert not dfa.accepts(list("000"))
assert not dfa.accepts(list("222"))
assert dfa.accepts(list("01110"))
assert not dfa.accepts(list("00000011000020011100020001"))
assert dfa.accepts(list("0000001100002001110002000111"))
def test_dfa_mixed_states():
""" Creates a simple Dfa and ensures that select
words are correctly accepted or rejected. Dfa is
constructed with a mix of string and State class
states.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {State("0_Seen"), "1_Seen", "2_Seen", State("3_Seen")}
accepting_states = {"3_Seen"}
start_state = State("0_Seen")
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = "0_Seen"
# Complete transitions map.
transitions[(State("0_Seen"), "1")] = State("1_Seen")
transitions[("1_Seen", "1")] = State("2_Seen")
transitions[(State("2_Seen"), "1")] = "3_Seen"
transitions[(State("3_Seen"), "0")] = State("3_Seen")
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA and check select strings against Dfa
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
assert not dfa.accepts([])
assert not dfa.accepts(list("0"))
assert not dfa.accepts(list("1"))
assert not dfa.accepts(list("2"))
assert dfa.accepts(list("111"))
assert not dfa.accepts(list("1112"))
assert not dfa.accepts(list("000"))
assert not dfa.accepts(list("222"))
assert dfa.accepts(list("01110"))
assert not dfa.accepts(list("00000011000020011100020001"))
assert dfa.accepts(list("0000001100002001110002000111"))
def test_dfa_topological_ordering():
""" Create an acyclic DFA and ensure that a correct
topologically sorted list of states is computed.
"""
# Create an acyclic DFA
alphabet = {"0", "1"}
states = {"A", "B", "C", "D", "E", "F", "Sink"}
accepting_states = {"F"}
start_state = "A"
transitions = {}
transitions[("A","0")] = "B"
transitions[("A","1")] = "C"
transitions[("B","0")] = "C"
transitions[("B","1")] = "C"
transitions[("C","0")] = "D"
transitions[("C","1")] = "E"
transitions[("D","0")] = "E"
transitions[("D","1")] = "F"
transitions[("E","0")] = "F"
transitions[("E","1")] = "F"
transitions[("F","0")] = "Sink"
transitions[("F","1")] = "Sink"
transitions[("Sink", "0")] = "Sink"
transitions[("Sink", "1")] = "Sink"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Ensures that the one correct topological sort is generated.
assert dfa.states_topological() == ["A", "B", "C", "D", "E", "F"]
def test_dfa_topological_ordering_cycle():
""" Create a simple DFA with a reachable and accepting cycle
and ensure that a ValueError is raised.
"""
# Create a cyclic DFA
alphabet = {"0", "1"}
states = {"A", "B", "C", "D", "Sink"}
accepting_states = {"D"}
start_state = "A"
transitions = {}
transitions[("A","0")] = "B"
transitions[("A","1")] = "C"
transitions[("B","0")] = "D"
transitions[("B","1")] = "Sink"
transitions[("C","0")] = "B"
transitions[("C","1")] = "Sink"
transitions[("D","0")] = "C"
transitions[("D","1")] = "Sink"
transitions[("Sink", "0")] = "Sink"
transitions[("Sink", "1")] = "Sink"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Ensures that a ValueError is rasied as a cyclical DFA does not
# have a well defined topological odering.
with pytest.raises(DfaCycleError):
dfa.states_topological()
def test_dfa_language_size():
""" Creates a Dfa that accepts only words of length
7 and ensures that language_size returns the
correct result.
"""
dfa = Dfa.exact_length_dfa({"0","1"}, 7)
assert dfa.language_size() == 2**7
def test_dfa_language_size_abstract():
""" Creates an abstract specification that is
the union of two exact length Dfas and ensures
that language_size returns the correct result.
"""
dfa_1 = Dfa.exact_length_dfa({"0","1"}, 5)
dfa_2 = Dfa.exact_length_dfa({"0","1"}, 7)
abstract_dfa = dfa_1 | dfa_2
assert abstract_dfa.language_size() == (2**5 + 2**7)
def test_dfa_language_size_param():
""" Creates a Dfa that accepts only words of length
7 and ensures that language_size returns the
correct result.
"""
dfa = Dfa.max_length_dfa({"0","1"}, 7)
assert dfa.language_size(min_length=5, max_length=7) == 2**5 + 2**6 + 2**7
def test_dfa_sample():
""" Create a simple Dfa that when uniformly sampled
should generate the following words with relatively
uniform probabilities: [[], ["A"], ["A", "A"], ["B"]].
Then verify that the sampling is over the correct
words and reasonably accurate.
"""
# Create test Dfa
alphabet = {"A", "B"}
states = {"Start", "Top", "Bottom1", "Bottom2", "Sink"}
accepting_states = {"Start", "Top", "Bottom1", "Bottom2"}
start_state = "Start"
transitions = dict()
transitions[("Start", "A")] = "Bottom1"
transitions[("Start", "B")] = "Top"
transitions[("Top", "A")] = "Sink"
transitions[("Top", "B")] = "Sink"
transitions[("Bottom1", "A")] = "Bottom2"
transitions[("Bottom1", "B")] = "Sink"
transitions[("Bottom2", "A")] = "Sink"
transitions[("Bottom2", "B")] = "Sink"
transitions[("Sink", "A")] = "Sink"
transitions[("Sink", "B")] = "Sink"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Sample 100,000 words and keep track of
# how many of each are sampled.
dfa_language = [tuple(), tuple("A"), ("A", "A"), tuple("B")]
sample_counts = dict()
for word in dfa_language:
sample_counts[word] = 0
for _ in range(100000):
# Sample a word from our Dfa's language
sampled_word = dfa.sample()
# Ensure we didn't sample a word not in our language
assert sampled_word in dfa_language
# Increment the count for the word we sampled
sample_counts[tuple(sampled_word)] += 1
# Assert the sampled ratios are relatively correct
for word in dfa_language:
word_prob = sample_counts[word]/100000
assert word_prob > .24
assert word_prob < .26
def test_dfa_sample_abstract():
""" Create a simple Dfa that when uniformly sampled
should generate the following words with relatively
uniform probabilities: [[], ["A"], ["A", "A"], ["B"]].
Then intersect it with a Dfa that accepts only words
of length 1. Then verify that the sampling is over the
correct words and reasonably accurate.
"""
# Create test Dfa
alphabet = {"A", "B"}
states = {"Start", "Top", "Bottom1", "Bottom2", "Sink"}
accepting_states = {"Start", "Top", "Bottom1", "Bottom2"}
start_state = "Start"
transitions = dict()
transitions[("Start", "A")] = "Bottom1"
transitions[("Start", "B")] = "Top"
transitions[("Top", "A")] = "Sink"
transitions[("Top", "B")] = "Sink"
transitions[("Bottom1", "A")] = "Bottom2"
transitions[("Bottom1", "B")] = "Sink"
transitions[("Bottom2", "A")] = "Sink"
transitions[("Bottom2", "B")] = "Sink"
transitions[("Sink", "A")] = "Sink"
transitions[("Sink", "B")] = "Sink"
main_dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
length_dfa = Dfa.exact_length_dfa(alphabet, 1)
dfa = main_dfa & length_dfa
# Sample 100,000 words and keep track of
# how many of each are sampled.
dfa_language = [tuple("A"), tuple("B")]
sample_counts = dict()
for word in dfa_language:
sample_counts[word] = 0
for _ in range(100000):
# Sample a word from our Dfa's language
sampled_word = dfa.sample()
# Ensure we didn't sample a word not in our language
assert sampled_word in dfa_language
# Increment the count for the word we sampled
sample_counts[tuple(sampled_word)] += 1
# Assert the sampled ratios are relatively correct
for word in dfa_language:
word_prob = sample_counts[word]/100000
assert word_prob > .49
assert word_prob < .51
def test_dfa_sample_param():
""" Create a simple Dfa that when uniformly sampled
should generate the following words with relatively
uniform probabilities: [[], ["A"], ["A", "A"], ["B"]].
Then intersect it with a Dfa that accepts only words
of length 1. Then verify that the sampling is over the
correct words and reasonably accurate.
"""
# Create test Dfa
alphabet = {"A", "B"}
states = {"Start", "Top", "Bottom1", "Bottom2", "Bottom3", "Bottom4", "Sink"}
accepting_states = {"Start", "Top", "Bottom1", "Bottom2", "Bottom3", "Bottom4"}
start_state = "Start"
transitions = dict()
transitions[("Start", "A")] = "Bottom1"
transitions[("Start", "B")] = "Top"
transitions[("Top", "A")] = "Sink"
transitions[("Top", "B")] = "Sink"
transitions[("Bottom1", "A")] = "Bottom2"
transitions[("Bottom1", "B")] = "Sink"
transitions[("Bottom2", "A")] = "Bottom3"
transitions[("Bottom2", "B")] = "Sink"
transitions[("Bottom3", "A")] = "Bottom4"
transitions[("Bottom3", "B")] = "Sink"
transitions[("Bottom4", "A")] = "Sink"
transitions[("Bottom4", "B")] = "Sink"
transitions[("Sink", "A")] = "Sink"
transitions[("Sink", "B")] = "Sink"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Sample 100,000 words and keep track of
# how many of each are sampled.
dfa_language = [tuple("A"), tuple("B"), ("A", "A"), ("A", "A", "A")]
sample_counts = dict()
for word in dfa_language:
sample_counts[word] = 0
for _ in range(100000):
# Sample a word from our Dfa's language
sampled_word = dfa.sample(min_length=1, max_length=3)
# Ensure we didn't sample a word not in our language
assert sampled_word in dfa_language
# Increment the count for the word we sampled
sample_counts[tuple(sampled_word)] += 1
# Assert the sampled ratios are relatively correct
for word in dfa_language:
word_prob = sample_counts[word]/100000
assert word_prob > .24
assert word_prob < .26
def test_dfa_minimize_no_reduction():
""" Creates a simple Dfa that is already minimal,
minimizes it, and ensures that select words are
correctly accepted or rejected.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {"0_Seen", "1_Seen", "2_Seen", "3_Seen"}
accepting_states = {"3_Seen"}
start_state = "0_Seen"
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = "0_Seen"
# Complete transitions map.
transitions[("0_Seen", "1")] = "1_Seen"
transitions[("1_Seen", "1")] = "2_Seen"
transitions[("2_Seen", "1")] = "3_Seen"
transitions[("3_Seen", "0")] = "3_Seen"
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA and minimizes it.
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
minimized_dfa = dfa.minimize()
# Assert the minimized Dfa's size is the same as the
# original and check select strings against the minimized Dfa
assert len(dfa.states) == len(minimized_dfa.states)
assert not dfa.accepts([])
assert not dfa.accepts(list("0"))
assert not dfa.accepts(list("1"))
assert not dfa.accepts(list("2"))
assert dfa.accepts(list("111"))
assert not dfa.accepts(list("1112"))
assert not dfa.accepts(list("000"))
assert not dfa.accepts(list("222"))
assert dfa.accepts(list("01110"))
assert not dfa.accepts(list("00000011000020011100020001"))
assert dfa.accepts(list("0000001100002001110002000111"))
def test_dfa_minimize_reduction():
""" Creates a Dfa that has many redundancies,
minimizes it, and ensures that select words are
correctly accepted or rejected.
"""
# Create a very redundant DFA that accepts if and only if the
# string contains a "1" symbol before any 0 symbols
alphabet = {"0", "1", "2"}
s_states = {"Start_A", "Start_B", "Start_C"}
a_states = {"Accept_A", "Accept_B", "Accept_C"}
r_states = {"Reject_A", "Reject_B", "Reject_C"}
dr_states = {"DeadReject_A", "DeadReject_B"}
da_states = {"DeadAccept_A", "DeadAccept_B"}
states = s_states | a_states | r_states | dr_states |da_states
accepting_states = a_states | da_states
start_state = "Start_A"
# Create transitions map
transitions = {}
# S state transitions
for s_state in s_states:
transitions[(s_state, "0")] = "Reject_A"
transitions[(s_state, "1")] = "Accept_A"
transitions[("Start_A", "2")] = "Start_B"
transitions[("Start_B", "2")] = "Start_C"
transitions[("Start_C", "2")] = "Start_C"
# A state transitions
for symbol in alphabet:
transitions[("Accept_A", symbol)] = "Accept_B"
for symbol in alphabet:
transitions[("Accept_B", symbol)] = "Accept_C"
for symbol in alphabet:
transitions[("Accept_C", symbol)] = "Accept_C"
# R state transitions
for symbol in alphabet:
transitions[("Reject_A", symbol)] = "Reject_B"
for symbol in alphabet:
transitions[("Reject_B", symbol)] = "Reject_C"
for symbol in alphabet:
transitions[("Reject_C", symbol)] = "Reject_C"
# Dead state transitions
transitions[("DeadReject_A", "0")] = "Accept_A"
transitions[("DeadReject_A", "1")] = "Reject_A"
transitions[("DeadReject_A", "2")] = "Start_A"
transitions[("DeadReject_B", "0")] = "DeadReject_B"
transitions[("DeadReject_B", "1")] = "DeadReject_B"
transitions[("DeadReject_B", "2")] = "DeadReject_B"
transitions[("DeadAccept_A", "0")] = "Accept_A"
transitions[("DeadAccept_A", "1")] = "Reject_A"
transitions[("DeadAccept_A", "2")] = "Start_A"
transitions[("DeadAccept_B", "0")] = "DeadAccept_B"
transitions[("DeadAccept_B", "1")] = "DeadAccept_B"
transitions[("DeadAccept_B", "2")] = "DeadAccept_B"
# Create the DFA and minimizes it.
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
minimized_dfa = dfa.minimize()
# Assert the minimized Dfa's size appropriately minimized
# and check select strings against the two DFAs.
assert len(minimized_dfa.states) == 3
assert not minimized_dfa.accepts([]) and not dfa.accepts([])
assert not minimized_dfa.accepts(list("0")) and not dfa.accepts(list("0"))
assert minimized_dfa.accepts(list("1")) and dfa.accepts(list("1"))
assert not minimized_dfa.accepts(list("2")) and not dfa.accepts(list("2"))
assert not minimized_dfa.accepts(list("000")) and not dfa.accepts(list("000"))
assert minimized_dfa.accepts(list("111")) and dfa.accepts(list("111"))
assert not minimized_dfa.accepts(list("222")) and not dfa.accepts(list("222"))
assert not minimized_dfa.accepts(list("2220000110011000020100002000")) and not dfa.accepts(list("2220000110011000020100002000"))
assert minimized_dfa.accepts(list("222210000001100002001110002000111")) and dfa.accepts(list("222210000001100002001110002000111"))
def test_dfa_union():
""" Creates two DFAs, one which accepts iff
a string contains a "1" symbol and another which
accepts iff a string contains a "2" symbol. Then ensure
that their symbolic and explicit union have an equivalent
and correct language
"""
alphabet = {"0","1","2"}
# Create DFA that accepts once it encounters a "1"
states_1 = {"Reject", "Accept"}
accepting_states_1 = {"Accept"}
start_state_1 = "Reject"
transitions_1 = {}
transitions_1[("Reject", "0")] = "Reject"
transitions_1[("Reject", "1")] = "Accept"
transitions_1[("Reject", "2")] = "Reject"
transitions_1[("Accept", "0")] = "Accept"
transitions_1[("Accept", "1")] = "Accept"
transitions_1[("Accept", "2")] = "Accept"
dfa_1 = Dfa(alphabet, states_1, accepting_states_1, start_state_1, transitions_1)
# Create DFA that accepts once it encounters a "2"
states_2 = {"Reject", "Accept"}
accepting_states_2 = {"Accept"}
start_state_2 = "Reject"
transitions_2 = {}
transitions_2[("Reject", "0")] = "Reject"
transitions_2[("Reject", "1")] = "Reject"
transitions_2[("Reject", "2")] = "Accept"
transitions_2[("Accept", "0")] = "Accept"
transitions_2[("Accept", "1")] = "Accept"
transitions_2[("Accept", "2")] = "Accept"
dfa_2 = Dfa(alphabet, states_2, accepting_states_2, start_state_2, transitions_2)
# Create abstract spec for the union of dfa_1 and dfa_2. Then compute its explicit form.
abstract_union = dfa_1 | dfa_2
explicit_union = abstract_union.explicit()
assert isinstance(abstract_union, AbstractSpec)
assert isinstance(explicit_union, Dfa)
assert not abstract_union.accepts([]) and not explicit_union.accepts([])
assert not abstract_union.accepts(list("0")) and not explicit_union.accepts(list("0"))
assert abstract_union.accepts(list("1")) and explicit_union.accepts(list("1"))
assert abstract_union.accepts(list("2")) and explicit_union.accepts(list("2"))
assert not abstract_union.accepts(list("000")) and not explicit_union.accepts(list("000"))
assert abstract_union.accepts(list("111")) and explicit_union.accepts(list("111"))
assert abstract_union.accepts(list("222")) and explicit_union.accepts(list("222"))
assert abstract_union.accepts(list("010")) and explicit_union.accepts(list("010"))
assert abstract_union.accepts(list("020")) and explicit_union.accepts(list("020"))
assert abstract_union.accepts(list("12")) and explicit_union.accepts(list("12"))
def test_dfa_intersection():
""" Creates two DFAs, one which accepts iff
a string contains a "1" symbol and another which
accepts iff a string contains a "2" symbol. Then ensure
that their symbolic and explicit intersection
have an equivalent and correct language
"""
alphabet = {"0","1","2"}
# Create DFA that accepts once it encounters a "1"
states_1 = {"Reject", "Accept"}
accepting_states_1 = {"Accept"}
start_state_1 = "Reject"
transitions_1 = {}
transitions_1[("Reject", "0")] = "Reject"
transitions_1[("Reject", "1")] = "Accept"
transitions_1[("Reject", "2")] = "Reject"
transitions_1[("Accept", "0")] = "Accept"
transitions_1[("Accept", "1")] = "Accept"
transitions_1[("Accept", "2")] = "Accept"
dfa_1 = Dfa(alphabet, states_1, accepting_states_1, start_state_1, transitions_1)
# Create DFA that accepts once it encounters a "2"
states_2 = {"Reject", "Accept"}
accepting_states_2 = {"Accept"}
start_state_2 = "Reject"
transitions_2 = {}
transitions_2[("Reject", "0")] = "Reject"
transitions_2[("Reject", "1")] = "Reject"
transitions_2[("Reject", "2")] = "Accept"
transitions_2[("Accept", "0")] = "Accept"
transitions_2[("Accept", "1")] = "Accept"
transitions_2[("Accept", "2")] = "Accept"
dfa_2 = Dfa(alphabet, states_2, accepting_states_2, start_state_2, transitions_2)
# Create abstract spec for the intersection of dfa_1 and dfa_2. Then compute its explicit form.
abstract_intersection = dfa_1 & dfa_2
explicit_intersection = abstract_intersection.explicit()
assert isinstance(abstract_intersection, AbstractSpec)
assert isinstance(explicit_intersection, Dfa)
assert not abstract_intersection.accepts([]) and not explicit_intersection.accepts([])
assert not abstract_intersection.accepts(list("0")) and not explicit_intersection.accepts(list("0"))
assert not abstract_intersection.accepts(list("1")) and not explicit_intersection.accepts(list("1"))
assert not abstract_intersection.accepts(list("2")) and not explicit_intersection.accepts(list("2"))
assert not abstract_intersection.accepts(list("000")) and not explicit_intersection.accepts(list("000"))
assert not abstract_intersection.accepts(list("111")) and not explicit_intersection.accepts(list("111"))
assert not abstract_intersection.accepts(list("222")) and not explicit_intersection.accepts(list("222"))
assert not abstract_intersection.accepts(list("010")) and not explicit_intersection.accepts(list("010"))
assert not abstract_intersection.accepts(list("020")) and not explicit_intersection.accepts(list("020"))
assert abstract_intersection.accepts(list("12")) and explicit_intersection.accepts(list("12"))
assert abstract_intersection.accepts(list("012210")) and explicit_intersection.accepts(list("012210"))
def test_dfa_negation():
""" Creates a DFA which accepts iff a string contains a "1"
symbol. Then ensure that its symbolic and explicit negation
have an equivalent and correct language
"""
alphabet = {"0","1","2"}
# Create DFA that accepts once it encounters a "1"
states = {"Reject", "Accept"}
accepting_states = {"Accept"}
start_state = "Reject"
transitions = {}
transitions[("Reject", "0")] = "Reject"
transitions[("Reject", "1")] = "Accept"
transitions[("Reject", "2")] = "Reject"
transitions[("Accept", "0")] = "Accept"
transitions[("Accept", "1")] = "Accept"
transitions[("Accept", "2")] = "Accept"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Create abstract spec for the negation of dfa and compute its explicit form.
abstract_negation = ~dfa
explicit_negation = abstract_negation.explicit()
assert isinstance(abstract_negation, AbstractSpec)
assert isinstance(explicit_negation, Dfa)
assert abstract_negation.accepts([]) and explicit_negation.accepts([])
assert abstract_negation.accepts(list("0")) and explicit_negation.accepts(list("0"))
assert not abstract_negation.accepts(list("1")) and not explicit_negation.accepts(list("1"))
assert abstract_negation.accepts(list("2")) and explicit_negation.accepts(list("2"))
assert abstract_negation.accepts(list("000")) and explicit_negation.accepts(list("000"))
assert not abstract_negation.accepts(list("111")) and not explicit_negation.accepts(list("111"))
assert abstract_negation.accepts(list("222")) and explicit_negation.accepts(list("222"))
assert not abstract_negation.accepts(list("010")) and not explicit_negation.accepts(list("010"))
assert abstract_negation.accepts(list("020")) and explicit_negation.accepts(list("020"))
assert not abstract_negation.accepts(list("12")) and not explicit_negation.accepts(list("12"))
assert not abstract_negation.accepts(list("012210")) and not explicit_negation.accepts(list("012210"))
def test_dfa_difference():
""" Creates two DFAs, one which accepts iff
a string contains a "1" symbol and another which
accepts iff a string contains a "2" symbol. Then ensure
that their symbolic and explicit difference
have an equivalent and correct language
"""
alphabet = {"0","1","2"}
# Create DFA that accepts once it encounters a "1"
states_1 = {"Reject", "Accept"}
accepting_states_1 = {"Accept"}
start_state_1 = "Reject"
transitions_1 = {}
transitions_1[("Reject", "0")] = "Reject"
transitions_1[("Reject", "1")] = "Accept"
transitions_1[("Reject", "2")] = "Reject"
transitions_1[("Accept", "0")] = "Accept"
transitions_1[("Accept", "1")] = "Accept"
transitions_1[("Accept", "2")] = "Accept"
dfa_1 = Dfa(alphabet, states_1, accepting_states_1, start_state_1, transitions_1)
# Create DFA that accepts once it encounters a "2"
states_2 = {"Reject", "Accept"}
accepting_states_2 = {"Accept"}
start_state_2 = "Reject"
transitions_2 = {}
transitions_2[("Reject", "0")] = "Reject"
transitions_2[("Reject", "1")] = "Reject"
transitions_2[("Reject", "2")] = "Accept"
transitions_2[("Accept", "0")] = "Accept"
transitions_2[("Accept", "1")] = "Accept"
transitions_2[("Accept", "2")] = "Accept"
dfa_2 = Dfa(alphabet, states_2, accepting_states_2, start_state_2, transitions_2)
# Create abstract spec for the difference of dfa_1 and dfa_2. Then compute its explicit form.
abstract_difference = dfa_1 - dfa_2
explicit_difference = abstract_difference.explicit()
assert isinstance(abstract_difference, AbstractSpec)
assert isinstance(explicit_difference, Dfa)
assert not abstract_difference.accepts([]) and not explicit_difference.accepts([])
assert not abstract_difference.accepts(list("0")) and not explicit_difference.accepts(list("0"))
assert abstract_difference.accepts(list("1")) and explicit_difference.accepts(list("1"))
assert not abstract_difference.accepts(list("2")) and not explicit_difference.accepts(list("2"))
assert not abstract_difference.accepts(list("000")) and not explicit_difference.accepts(list("000"))
assert abstract_difference.accepts(list("111")) and explicit_difference.accepts(list("111"))
assert not abstract_difference.accepts(list("222")) and not explicit_difference.accepts(list("222"))
assert abstract_difference.accepts(list("010")) and explicit_difference.accepts(list("010"))
assert not abstract_difference.accepts(list("020")) and not explicit_difference.accepts(list("020"))
assert not abstract_difference.accepts(list("12")) and not explicit_difference.accepts(list("12"))
assert not abstract_difference.accepts(list("012210")) and not explicit_difference.accepts(list("012210"))
def test_dfa_exact_length_constructor():
""" Tests that the Dfa returned by the exact_length_dfa
constructor works as expected.
"""
dfa = Dfa.exact_length_dfa({"0","1"}, 7)
assert not dfa.accepts("")
assert not dfa.accepts("0")
assert not dfa.accepts("1")
assert not dfa.accepts("01")
assert not dfa.accepts("011")
assert not dfa.accepts("0110")
assert not dfa.accepts("01101")
assert not dfa.accepts("011010")
assert dfa.accepts("0110100")
assert not dfa.accepts("01101000")
assert not dfa.accepts("000000001111000000001100001000111100110110110")
def test_dfa_min_length_constructor():
""" Tests that the Dfa returned by the min_length_dfa
constructor works as expected.
"""
dfa = Dfa.min_length_dfa({"0", "1"}, 7)
assert not dfa.accepts("")
assert not dfa.accepts("0")
assert not dfa.accepts("1")
assert not dfa.accepts("01")
assert not dfa.accepts("011")
assert not dfa.accepts("0110")
assert not dfa.accepts("01101")
assert not dfa.accepts("011010")
assert dfa.accepts("0110100")
assert dfa.accepts("01101000")
assert dfa.accepts("000000001111000000001100001000111100110110110")
def test_dfa_max_length_constructor():
""" Tests that the Dfa returned by the max_length_dfa
constructor works as expected.
"""
dfa = Dfa.max_length_dfa({"0", "1"}, 7)
assert dfa.accepts("")
assert dfa.accepts("0")
assert dfa.accepts("1")
assert dfa.accepts("01")
assert dfa.accepts("011")
assert dfa.accepts("0110")
assert dfa.accepts("01101")
assert dfa.accepts("011010")
assert dfa.accepts("0110100")
assert not dfa.accepts("01101000")
assert not dfa.accepts("000000001111000000001100001000111100110110110")
###################################################################################################
# Randomized Tests
###################################################################################################
# Randomized tests default parameters
RANDOM_TEST_NUM_ITERS = 1000 # Default to 1000, but can set lower when writing new tests.
RANDOM_DFA_MIN_STATES = 1
RANDOM_DFA_MAX_STATES = 10
RANDOM_DFA_MIN_SYMBOLS = 1
RANDOM_DFA_MAX_SYMBOLS = 3
@pytest.mark.slow
def test_dfa_minimize_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates a random DFA with
the number of states between RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
and the number of symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS.
Then minimizes the dfa and ensures that the minimizes version and
the complete version either accept or reject all strings of length
less than or equal to the number of states.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
# Generate random Dfa and calculate its minimized form.
orig_dfa = generate_random_dfa(RANDOM_DFA_MIN_STATES, RANDOM_DFA_MAX_STATES, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
min_dfa = orig_dfa.minimize()
# Check that construction is valid
assert isinstance(orig_dfa, Dfa)
assert isinstance(min_dfa, Dfa)
assert len(min_dfa.states) <= len(orig_dfa.states)
# Iterate through every possible word that has length <= the number
# of states in the original DFAs to ensure that the specs are equivalent.
for word_length in range(len(orig_dfa.states)+1):
for word in itertools.product(orig_dfa.alphabet, repeat=word_length):
assert orig_dfa.accepts(word) == min_dfa.accepts(word)
@pytest.mark.slow
def test_dfa_union_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates 2 random DFAs with
the number of states between the square root of RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
(which puts the product construction size between these bounds) and the number of
symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS. Then takes the
logical and explicit union of the 2 DFAs and ensures that they are consistent
on all strings of length less than or equal to the number of states.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
min_states_sqrt = int(math.sqrt(RANDOM_DFA_MIN_STATES))
max_states_sqrt = int(math.sqrt(RANDOM_DFA_MAX_STATES))
# Generate random Dfa and calculate its minimized form.
dfa_1 = generate_random_dfa(min_states_sqrt, max_states_sqrt, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
dfa_2 = generate_random_dfa(min_states_sqrt, max_states_sqrt, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS, alphabet=dfa_1.alphabet)
abstract_dfa = dfa_1 | dfa_2
explicit_dfa = abstract_dfa.explicit()
# Check that construction is valid
assert isinstance(abstract_dfa, AbstractSpec)
assert isinstance(explicit_dfa, Dfa)
# Iterate through every possible word that has length <= the number
# of states in the new Dfa to ensure they are equivalent.
for word_length in range(len(explicit_dfa.states)+1):
for word in itertools.product(explicit_dfa.alphabet, repeat=word_length):
assert abstract_dfa.accepts(word) == explicit_dfa.accepts(word)
@pytest.mark.slow
def test_dfa_intersection_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates 2 random DFAs with
the number of states between the square root of RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
(which puts the product construction size between these bounds) and the number of
symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS. Then takes the
logical and explicit intersection of the 2 DFAs and ensures that they are consistent
on all strings of length less than or equal to the number of states.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
min_states_sqrt = int(math.sqrt(RANDOM_DFA_MIN_STATES))
max_states_sqrt = int(math.sqrt(RANDOM_DFA_MAX_STATES))
# Generate random Dfa and calculate its minimized form.
dfa_1 = generate_random_dfa(min_states_sqrt, max_states_sqrt, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
dfa_2 = generate_random_dfa(min_states_sqrt, max_states_sqrt, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS, alphabet=dfa_1.alphabet)
abstract_dfa = dfa_1 & dfa_2
explicit_dfa = abstract_dfa.explicit()
# Check that construction is valid
assert isinstance(abstract_dfa, AbstractSpec)
assert isinstance(explicit_dfa, Dfa)
# Iterate through every possible word that has length <= the number
# of states in the new Dfa to ensure they are equivalent.
for word_length in range(len(explicit_dfa.states)+1):
for word in itertools.product(explicit_dfa.alphabet, repeat=word_length):
assert abstract_dfa.accepts(word) == explicit_dfa.accepts(word)
@pytest.mark.slow
def test_dfa_negation_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates a random DFA with
the number of states between RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
and the number of symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS.
Then takes the logical and explicit negation of that DFA and ensure they are
consistent on all strings of length less than or equal to the number of states.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
# Generate random Dfa and calculate its minimized form.
dfa = generate_random_dfa(RANDOM_DFA_MIN_STATES, RANDOM_DFA_MAX_STATES, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
abstract_dfa = ~dfa
explicit_dfa = abstract_dfa.explicit()
# Check that construction is valid
assert isinstance(abstract_dfa, AbstractSpec)
assert isinstance(explicit_dfa, Dfa)
# Iterate through every possible word that has length <= the number
# of states in the new DFA to ensure that the specs are equivalent.
for word_length in range(len(explicit_dfa.states)+1):
for word in itertools.product(explicit_dfa.alphabet, repeat=word_length):
assert abstract_dfa.accepts(word) == explicit_dfa.accepts(word)
@pytest.mark.slow
def test_dfa_language_size_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates a random DFA with
the number of states between RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
and the number of symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS.
Then intersects this with a Dfa that accepts all words with length less than max_length,
a random number between RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES. Enumerates
all words in the alphabet of length at most max_length ensures the count is correct.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
max_length = random.randint(RANDOM_DFA_MIN_STATES,RANDOM_DFA_MAX_STATES)
base_dfa = generate_random_dfa(RANDOM_DFA_MIN_STATES, RANDOM_DFA_MAX_STATES, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
length_limit_dfa = Dfa.max_length_dfa(base_dfa.alphabet, max_length)
dfa = base_dfa & length_limit_dfa
explicit_dfa = dfa.explicit()
enumerated_count = 0
for word_length in range(max_length+1):
for word in itertools.product(base_dfa.alphabet, repeat=word_length):
if explicit_dfa.accepts(word):
enumerated_count += 1
assert explicit_dfa.language_size() == enumerated_count
###################################################################################################
# Helper Functions
###################################################################################################
def generate_random_dfa(min_states, max_states, min_symbols, max_symbols, alphabet = None):
""" Generates a random Dfa object.
:param min_states: The minimum number of states this Dfa can have.
:param max_states: The maximum number of states this Dfa can have.
:param min_symbols: The minimum number of symbols this Dfa can have.
:param max_symbols: The maximum number of symbols this Dfa can have.
"""
# Pick number of states and symbols
num_states = random.randint(min_states, max_states)
if alphabet is None:
num_symbols = random.randint(min_symbols, max_symbols)
alphabet = set(map(str, range(num_symbols)))
else:
num_symbols = len(alphabet)
states = set()
for state_num in range(1, num_states+1):
states.add("State_" + str(state_num))
# Picks a random number of accepting states
shuffled_state_list = sorted(list(states))
random.shuffle(shuffled_state_list)
accepting_states = set(shuffled_state_list[0:random.randint(0,num_states)])
# Picks a random start state
start_state = "State_" + str(random.randint(1, num_states))
# Randomly generates transitions
transitions = {}
for symbol in alphabet:
for state in states:
transitions[(state, symbol)] = "State_" + str(random.randint(1, num_states))
# Create and return Dfa
return Dfa(alphabet, states, accepting_states, start_state, transitions)
|
[
"citoolkit.specifications.dfa.Dfa.exact_length_dfa",
"citoolkit.specifications.dfa.Dfa.min_length_dfa",
"citoolkit.specifications.dfa.Dfa",
"random.randint",
"math.sqrt",
"random.shuffle",
"pytest.raises",
"itertools.product",
"citoolkit.specifications.dfa.State",
"citoolkit.specifications.dfa.Dfa.max_length_dfa"
] |
[((1338, 1403), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (1341, 1403), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((3239, 3304), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (3242, 3304), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4250, 4265), 'citoolkit.specifications.dfa.State', 'State', (['"""0_Seen"""'], {}), "('0_Seen')\n", (4255, 4265), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4557, 4572), 'citoolkit.specifications.dfa.State', 'State', (['"""1_Seen"""'], {}), "('1_Seen')\n", (4562, 4572), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4615, 4630), 'citoolkit.specifications.dfa.State', 'State', (['"""2_Seen"""'], {}), "('2_Seen')\n", (4620, 4630), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4673, 4688), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (4678, 4688), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4732, 4747), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (4737, 4747), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4790, 4805), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (4795, 4805), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4875, 4940), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (4878, 4940), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((5889, 5904), 'citoolkit.specifications.dfa.State', 'State', (['"""0_Seen"""'], {}), "('0_Seen')\n", (5894, 5904), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((6189, 6204), 'citoolkit.specifications.dfa.State', 'State', (['"""1_Seen"""'], {}), "('1_Seen')\n", (6194, 6204), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((6240, 6255), 'citoolkit.specifications.dfa.State', 'State', (['"""2_Seen"""'], {}), "('2_Seen')\n", (6245, 6255), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((6350, 6365), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (6355, 6365), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((6479, 6544), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (6482, 6544), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((7842, 7907), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (7845, 7907), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((8737, 8802), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (8740, 8802), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((9166, 9201), 'citoolkit.specifications.dfa.Dfa.exact_length_dfa', 'Dfa.exact_length_dfa', (["{'0', '1'}", '(7)'], {}), "({'0', '1'}, 7)\n", (9186, 9201), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((9453, 9488), 'citoolkit.specifications.dfa.Dfa.exact_length_dfa', 'Dfa.exact_length_dfa', (["{'0', '1'}", '(5)'], {}), "({'0', '1'}, 5)\n", (9473, 9488), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((9500, 9535), 'citoolkit.specifications.dfa.Dfa.exact_length_dfa', 'Dfa.exact_length_dfa', (["{'0', '1'}", '(7)'], {}), "({'0', '1'}, 7)\n", (9520, 9535), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((9807, 9840), 'citoolkit.specifications.dfa.Dfa.max_length_dfa', 'Dfa.max_length_dfa', (["{'0', '1'}", '(7)'], {}), "({'0', '1'}, 7)\n", (9825, 9840), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((10863, 10928), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (10866, 10928), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((12712, 12777), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (12715, 12777), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((12795, 12828), 'citoolkit.specifications.dfa.Dfa.exact_length_dfa', 'Dfa.exact_length_dfa', (['alphabet', '(1)'], {}), '(alphabet, 1)\n', (12815, 12828), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((14838, 14903), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (14841, 14903), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((16661, 16726), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (16664, 16726), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((19765, 19830), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (19768, 19830), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((21613, 21686), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states_1', 'accepting_states_1', 'start_state_1', 'transitions_1'], {}), '(alphabet, states_1, accepting_states_1, start_state_1, transitions_1)\n', (21616, 21686), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((22159, 22232), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states_2', 'accepting_states_2', 'start_state_2', 'transitions_2'], {}), '(alphabet, states_2, accepting_states_2, start_state_2, transitions_2)\n', (22162, 22232), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((24167, 24240), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states_1', 'accepting_states_1', 'start_state_1', 'transitions_1'], {}), '(alphabet, states_1, accepting_states_1, start_state_1, transitions_1)\n', (24170, 24240), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((24713, 24786), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states_2', 'accepting_states_2', 'start_state_2', 'transitions_2'], {}), '(alphabet, states_2, accepting_states_2, start_state_2, transitions_2)\n', (24716, 24786), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((26949, 27014), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (26952, 27014), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((29115, 29188), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states_1', 'accepting_states_1', 'start_state_1', 'transitions_1'], {}), '(alphabet, states_1, accepting_states_1, start_state_1, transitions_1)\n', (29118, 29188), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((29661, 29734), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states_2', 'accepting_states_2', 'start_state_2', 'transitions_2'], {}), '(alphabet, states_2, accepting_states_2, start_state_2, transitions_2)\n', (29664, 29734), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((31302, 31337), 'citoolkit.specifications.dfa.Dfa.exact_length_dfa', 'Dfa.exact_length_dfa', (["{'0', '1'}", '(7)'], {}), "({'0', '1'}, 7)\n", (31322, 31337), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((31908, 31941), 'citoolkit.specifications.dfa.Dfa.min_length_dfa', 'Dfa.min_length_dfa', (["{'0', '1'}", '(7)'], {}), "({'0', '1'}, 7)\n", (31926, 31941), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((32505, 32538), 'citoolkit.specifications.dfa.Dfa.max_length_dfa', 'Dfa.max_length_dfa', (["{'0', '1'}", '(7)'], {}), "({'0', '1'}, 7)\n", (32523, 32538), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((41352, 41390), 'random.randint', 'random.randint', (['min_states', 'max_states'], {}), '(min_states, max_states)\n', (41366, 41390), False, 'import random\n'), ((41791, 41826), 'random.shuffle', 'random.shuffle', (['shuffled_state_list'], {}), '(shuffled_state_list)\n', (41805, 41826), False, 'import random\n'), ((42250, 42315), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (42253, 42315), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((2160, 2185), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2173, 2185), False, 'import pytest\n'), ((2195, 2260), 'citoolkit.specifications.dfa.Dfa', 'Dfa', (['alphabet', 'states', 'accepting_states', 'start_state', 'transitions'], {}), '(alphabet, states, accepting_states, start_state, transitions)\n', (2198, 2260), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4123, 4138), 'citoolkit.specifications.dfa.State', 'State', (['"""0_Seen"""'], {}), "('0_Seen')\n", (4128, 4138), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4140, 4155), 'citoolkit.specifications.dfa.State', 'State', (['"""1_Seen"""'], {}), "('1_Seen')\n", (4145, 4155), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4157, 4172), 'citoolkit.specifications.dfa.State', 'State', (['"""2_Seen"""'], {}), "('2_Seen')\n", (4162, 4172), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4174, 4189), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (4179, 4189), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4215, 4230), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (4220, 4230), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((5783, 5798), 'citoolkit.specifications.dfa.State', 'State', (['"""0_Seen"""'], {}), "('0_Seen')\n", (5788, 5798), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((5820, 5835), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (5825, 5835), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((8929, 8957), 'pytest.raises', 'pytest.raises', (['DfaCycleError'], {}), '(DfaCycleError)\n', (8942, 8957), False, 'import pytest\n'), ((39979, 40039), 'random.randint', 'random.randint', (['RANDOM_DFA_MIN_STATES', 'RANDOM_DFA_MAX_STATES'], {}), '(RANDOM_DFA_MIN_STATES, RANDOM_DFA_MAX_STATES)\n', (39993, 40039), False, 'import random\n'), ((40200, 40249), 'citoolkit.specifications.dfa.Dfa.max_length_dfa', 'Dfa.max_length_dfa', (['base_dfa.alphabet', 'max_length'], {}), '(base_dfa.alphabet, max_length)\n', (40218, 40249), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((41439, 41479), 'random.randint', 'random.randint', (['min_symbols', 'max_symbols'], {}), '(min_symbols, max_symbols)\n', (41453, 41479), False, 'import random\n'), ((4466, 4481), 'citoolkit.specifications.dfa.State', 'State', (['"""0_Seen"""'], {}), "('0_Seen')\n", (4471, 4481), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4532, 4547), 'citoolkit.specifications.dfa.State', 'State', (['"""0_Seen"""'], {}), "('0_Seen')\n", (4537, 4547), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4590, 4605), 'citoolkit.specifications.dfa.State', 'State', (['"""1_Seen"""'], {}), "('1_Seen')\n", (4595, 4605), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4648, 4663), 'citoolkit.specifications.dfa.State', 'State', (['"""2_Seen"""'], {}), "('2_Seen')\n", (4653, 4663), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4707, 4722), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (4712, 4722), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((4765, 4780), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (4770, 4780), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((6164, 6179), 'citoolkit.specifications.dfa.State', 'State', (['"""0_Seen"""'], {}), "('0_Seen')\n", (6169, 6179), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((6273, 6288), 'citoolkit.specifications.dfa.State', 'State', (['"""2_Seen"""'], {}), "('2_Seen')\n", (6278, 6288), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((6325, 6340), 'citoolkit.specifications.dfa.State', 'State', (['"""3_Seen"""'], {}), "('3_Seen')\n", (6330, 6340), False, 'from citoolkit.specifications.dfa import Dfa, State, DfaCycleError\n'), ((34584, 34640), 'itertools.product', 'itertools.product', (['orig_dfa.alphabet'], {'repeat': 'word_length'}), '(orig_dfa.alphabet, repeat=word_length)\n', (34601, 34640), False, 'import itertools\n'), ((35344, 35376), 'math.sqrt', 'math.sqrt', (['RANDOM_DFA_MIN_STATES'], {}), '(RANDOM_DFA_MIN_STATES)\n', (35353, 35376), False, 'import math\n'), ((35408, 35440), 'math.sqrt', 'math.sqrt', (['RANDOM_DFA_MAX_STATES'], {}), '(RANDOM_DFA_MAX_STATES)\n', (35417, 35440), False, 'import math\n'), ((36225, 36285), 'itertools.product', 'itertools.product', (['explicit_dfa.alphabet'], {'repeat': 'word_length'}), '(explicit_dfa.alphabet, repeat=word_length)\n', (36242, 36285), False, 'import itertools\n'), ((37012, 37044), 'math.sqrt', 'math.sqrt', (['RANDOM_DFA_MIN_STATES'], {}), '(RANDOM_DFA_MIN_STATES)\n', (37021, 37044), False, 'import math\n'), ((37076, 37108), 'math.sqrt', 'math.sqrt', (['RANDOM_DFA_MAX_STATES'], {}), '(RANDOM_DFA_MAX_STATES)\n', (37085, 37108), False, 'import math\n'), ((37893, 37953), 'itertools.product', 'itertools.product', (['explicit_dfa.alphabet'], {'repeat': 'word_length'}), '(explicit_dfa.alphabet, repeat=word_length)\n', (37910, 37953), False, 'import itertools\n'), ((39195, 39255), 'itertools.product', 'itertools.product', (['explicit_dfa.alphabet'], {'repeat': 'word_length'}), '(explicit_dfa.alphabet, repeat=word_length)\n', (39212, 39255), False, 'import itertools\n'), ((40434, 40490), 'itertools.product', 'itertools.product', (['base_dfa.alphabet'], {'repeat': 'word_length'}), '(base_dfa.alphabet, repeat=word_length)\n', (40451, 40490), False, 'import itertools\n'), ((41974, 42003), 'random.randint', 'random.randint', (['(1)', 'num_states'], {}), '(1, num_states)\n', (41988, 42003), False, 'import random\n'), ((41876, 41905), 'random.randint', 'random.randint', (['(0)', 'num_states'], {}), '(0, num_states)\n', (41890, 41905), False, 'import random\n'), ((42179, 42208), 'random.randint', 'random.randint', (['(1)', 'num_states'], {}), '(1, num_states)\n', (42193, 42208), False, 'import random\n')]
|
#!/usr/bin/env python3
import os
import sys
import html5lib
from xml.etree import ElementTree as ET
import subprocess
from html import escape as H
"""
If you've found this, then you should help me report a bug in IDLE,
the official Python code editor. In IDLE 3.8.5 on Python 3.8.5 in
Xubuntu 20.04 LTS, if you open a blank file and edit it, you might
not be able to save it because IDLE couldn't tell at load time
whether it originally used UNIX newlines or CP/M newlines.
touch something.py && idle something.py
Trying to File > Save or File > Save As produces an exception on stderr:
Exception in Tkinter callback
Traceback (most recent call last):
[snip]
File "/usr/lib/python3.8/idlelib/iomenu.py", line 232, in writefile
text = self.fixnewlines()
File "/usr/lib/python3.8/idlelib/iomenu.py", line 252, in fixnewlines
text = text.replace("\n", self.eol_convention)
TypeError: replace() argument 2 must be str, not None
"""
stylesheet = """
/* Original stylesheet by Daid */
table { border-collapse: collapse }
td, th { border: #333 solid 1px; text-align: center; line-height: 1.5}
.PASS { background-color: #6e2 }
.FAIL { background-color: #e44 }
.UNKNOWN { background-color: #fd6 }
td { font-size:80% }
th { background:#eee }
th:first-child { text-align:right; padding-right:4px }
body { font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif }
/* additions by Pino */
td>img { vertical-align: text-bottom }
"""
htmlns = {
'html': 'http://www.w3.org/1999/xhtml',
}
ET.register_namespace('', "http://www.w3.org/1999/xhtml")
def eldump(el):
print(ET.tostring(el, encoding="unicode"))
def iterdump(rows):
print("\n".join(repr(row) for row in rows))
def destructive_iter(ls):
"""Destructive iterator over a mutable sequence.
Return each element of ls before setting it to None (and releasing it
to the garbage collector)."""
for i in range(len(ls)):
yield ls[i]
ls[i] = None
def xdg_open(filename):
if os.name == 'nt':
args = ["start", "", filename]
else:
args = ["xdg-open", filename]
subprocess.run(args)
def load_shootout(filename):
"""
Load an HTML file from Daid's Game Boy emulator shootout.
Return a 3-tuple (emunames, testnames, allresults) where
- emunames is [(name, num_tests_passed), ...]
- testnames is [testname, ...]
- allresults is {emuname: {testname: (True if passed, img[src]), ...}, ...}
"""
with open(filename, "r", encoding="utf-8") as infp:
doc = html5lib.parse(infp)
# Find the table in this document with the most rows, where
# a "row" is a tr child of a thead/tbody child of a table
tables = (
el.findall("./*/html:tr", htmlns)
for el in doc.findall(".//html:table", htmlns)
)
table = max(tables, key=len)
rowit = destructive_iter(table)
doc = tables = table = None # drop variables
emunames = [th.text.split("(", 1) for th in next(rowit)][1:]
emunames = [(l.rstrip(), int(r.split('/', 1)[0])) for l, r in emunames]
allresults = {n: {} for n, _ in emunames}
testnames = []
for row in rowit:
row = list(row)
# To reduce excess width of the name column on the sub-1080p
# displays in smaller laptops, the name in the table includes
# a zero-width space after each slash. It shifts the
# limiting factor to channel_3_wave_ram_locked_write.gb
testname = row[0].text.replace("\u200b", "")
testnames.append(testname)
for (emuname, _), result in zip(emunames, row[1:]):
tpass, img = result.text, result.find("./html:img", htmlns)
ispass = tpass.upper() == 'PASS'
imsrc = img.get("src") if img is not None else None
allresults[emuname][testname] = ispass, imsrc
return emunames, testnames, allresults
def input_emu(prompt, emunames):
xprompt = "\n".join(
"%4d: %s (%d)" % (i + 1, n, c) for i, (n, c) in enumerate(emunames)
)
xprompt = "\n".join((
prompt, xprompt, "Enter a number from 1 to %d: " % len(emunames)
))
while True:
num = input(xprompt).strip()
if num == '': return None
try:
num = int(num)
except ValueError:
print("%s: not a whole number" % num)
continue
if not 1 <= num <= len(emunames):
print("%s: not in range 1 to %d" % (num, len(emunames)))
continue
return num - 1
def shootoutkey(row, col1=None, coldiff=None):
"""Calculate key for sorting a shootout.
row - a tuple (testname, results) where results is [(passing, ...), ...]
and passing is a truthy or falsy value.
col1 and col2 - indices into results
Return a tuple (col12same, col1fail, failcount) where
- col1fail is 0 if results[col1] passes else 1
- col12same is 1 if passing for results[col1] and results[col2]
have same truthiness
"""
testname, results = row
fails = [0 if x[0] else 1 for x in results]
col1fail = 0 if col1 is None else fails[col1]
col2fail = 0 if coldiff is None else fails[coldiff]
return col2fail == col1fail, col1fail, sum(fails)
def format_row(row, emunames):
"""
row - a tuple (testname, [(passing, imgsrc), ...])
"""
testname, results = row
out = ["<tr>\n <th>", H(testname.replace("/", "/\u200b")), "</th>\n"]
for (emuname, _), (passing, imgsrc) in zip(emunames, results):
classname = "PASS" if passing else "FAIL"
out.append(' <td class="%s">%s<br><img src="%s" title="%s %s"></td>\n'
% (classname, classname, imgsrc, emuname, classname))
out.append("</tr>\n")
return "".join(out)
def main(argv=None):
mainshootout = load_shootout(".cache/Daid-shootout.html")
emunames, testnames, allresults = mainshootout
## mgba_extra = load_shootout(".cache/Daid-shootout-mgba.html")
## emunames = [x for x in emunames if x[0] != 'mGBA']
## emunames.extend(mgba_extra[0])
## allresults.update(mgba_extra[2])
## del mgba_extra
print("Sorting tests based on decreasing pass rate")
print("Optional: Choose emulators that one or two pass")
col1emu = input_emu("Choose an emulator for column 1", emunames)
col2emu = (input_emu("Choose an emulator for column 2", emunames)
if col1emu is not None
else None)
new_emunames = []
if col1emu is not None: new_emunames.append(emunames[col1emu])
if col2emu is not None: new_emunames.append(emunames[col2emu])
new_emunames.extend(x for i, x in enumerate(emunames)
if i != col1emu and i != col2emu)
emunames = None
rows = [
(testname, [allresults[e[0]][testname] for e in new_emunames])
for testname in testnames
]
col1ok = 0 if col1emu is not None else None
col2ok = 1 if col2emu is not None else None
rows.sort(key=lambda row: shootoutkey(row, col1ok, col2ok))
# rows is of the form
# [(testname, [(passing, image), ...]), ...]
# Now make our own table based on this
title, subtitle = "Game Boy emulator shootout", ""
if col2ok is not None:
# Calculate subtitle for pass/fail differences
emu1, emu2 = new_emunames[0][0], new_emunames[1][0]
title = "Shootout: %s vs. %s" % (emu1, emu2)
pass1not2 = pass2not1 = 0
for row in rows:
pass1, pass2 = row[1][0][0], row[1][1][0]
if pass1 and not pass2: pass1not2 += 1
if pass2 and not pass1: pass2not1 += 1
pass1not2_pl = "tests" if pass1not2 != 1 else "test"
pass2not1_pl = "tests" if pass2not1 != 1 else "test"
subtitle = ("%s passes %d %s that %s fails, and %s passes %d %s that %s fails."
% (emu1, pass1not2, pass1not2_pl, emu2,
emu2, pass2not1, pass2not1_pl, emu1))
elif col1ok is not None:
title = "Shootout: %s vs. other emulators" % (new_emunames[0][0])
tests_pl = "tests" if new_emunames[0][1] != 1 else "test"
subtitle = ("%s passes %d %s."
% (new_emunames[0][0], new_emunames[0][1], tests_pl))
values1 = sum(1 for v in allresults[new_emunames[0][0]].values() if v[0])
print(subtitle)
out = [
"""<!DOCTYPE HTML><html><head><meta charset="utf-8"><title>""",
H(title),
"""</title><style type="text/css">""",
stylesheet,
"""</style></head><body><h1>""",
H(title),
"</h1>\n<p>", H(subtitle), """
Based on test ROM results by Daid.
</p><table id="results"><thead>\n<tr><th>Name of test</th>"""
]
out.extend("<th>%s (%d)</th>" % row for row in new_emunames)
out.append("</tr>\n</thead><tbody>\n")
out.extend(format_row(row, new_emunames) for row in rows)
out.append("</tbody></table></body></html>")
outfilename = "sortshootout.html"
with open(outfilename, "w", encoding="utf-8") as outfp:
outfp.writelines(out)
xdg_open(outfilename)
if __name__=='__main__':
if 'idlelib' in sys.modules:
main(["./htmltotsv.py", ".cache/names1920s.html", "-"])
else:
main()
|
[
"subprocess.run",
"xml.etree.ElementTree.register_namespace",
"html5lib.parse",
"xml.etree.ElementTree.tostring",
"html.escape"
] |
[((1521, 1578), 'xml.etree.ElementTree.register_namespace', 'ET.register_namespace', (['""""""', '"""http://www.w3.org/1999/xhtml"""'], {}), "('', 'http://www.w3.org/1999/xhtml')\n", (1542, 1578), True, 'from xml.etree import ElementTree as ET\n'), ((2103, 2123), 'subprocess.run', 'subprocess.run', (['args'], {}), '(args)\n', (2117, 2123), False, 'import subprocess\n'), ((1606, 1641), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['el'], {'encoding': '"""unicode"""'}), "(el, encoding='unicode')\n", (1617, 1641), True, 'from xml.etree import ElementTree as ET\n'), ((2505, 2525), 'html5lib.parse', 'html5lib.parse', (['infp'], {}), '(infp)\n', (2519, 2525), False, 'import html5lib\n'), ((8304, 8312), 'html.escape', 'H', (['title'], {}), '(title)\n', (8305, 8312), True, 'from html import escape as H\n'), ((8430, 8438), 'html.escape', 'H', (['title'], {}), '(title)\n', (8431, 8438), True, 'from html import escape as H\n'), ((8462, 8473), 'html.escape', 'H', (['subtitle'], {}), '(subtitle)\n', (8463, 8473), True, 'from html import escape as H\n')]
|
from bs4 import BeautifulSoup
import requests
from urllib.parse import urlsplit, urlunsplit
from config import settings
from logo_finder_service import LogoFinderService
from phone_finder_service import PhoneFinderService
from time import sleep
from selenium import webdriver
#from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
class ScrappingService():
def __init__(self, website_url:str) -> None:
self.website_url = website_url
if self.website_url[-1] == '/':
self.website_url = self.website_url[:-1]
split = urlsplit(website_url)
if split.scheme == "":
raise Exception(f"Error: {website_url} url without scheme.")
self.website_url = f"{split.scheme}://{split.netloc}"
def check_if_website_exists(self) -> bool:
'''Simple and fast requests get just to check if the domain returns something.'''
response = requests.get(self.website_url)
if 200 <= response.code <= 299:
return True
else:
return False
def scrap(self) -> None:
'''Main scrapping orchestrator'''
self.scrap_using_simple_request()
if (self.logo == "NO LOGO FOUND") or (self.phones[0] == "NO PHONE FOUND"):
self.scrap_using_selenium()
def scrap_using_simple_request(self) -> None:
'''Initial try to obtain the data. Faster than Selenium, but does not work at dynamic generated js pages'''
response = requests.get(self.website_url)
home_soup_obj = BeautifulSoup(response.content, 'html.parser')
logo_seach_obj = LogoFinderService(
soup_obj=home_soup_obj,
website_url=self.website_url
)
self.logo = logo_seach_obj.find_logo()
contact_url = self.find_contact_url(home_soup_obj)
response = requests.get(contact_url)
contact_soup_obj = BeautifulSoup(response.content, 'html.parser')
phone_seach_obj = PhoneFinderService(
soup_obj=contact_soup_obj,
website_url=self.website_url
)
self.phones = phone_seach_obj.find_phones()
def scrap_using_selenium(self) -> None:
'''Slower than scrap_using_simple_request, but works for dynamic js sites'''
chrome_options = Options()
chrome_prefs = {}
chrome_options.experimental_options["prefs"] = chrome_prefs
chrome_prefs["profile.default_content_settings"] = {"images": 2}
chrome_options.add_argument(
f'user-agent={settings["ScrappingSettings"]["BrowserUserAgent"]}')
# chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--log-level=3")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
#driver = webdriver.Chrome(ChromeDriverManager().install(),options=chrome_options)
driver = webdriver.Chrome(options=chrome_options)
driver.get(self.website_url)
sleep(settings['SleepTimeToLoadJavascript'])
home_body = driver.find_element_by_tag_name("body")
home_body = home_body.get_attribute('innerHTML')
home_soup_obj = BeautifulSoup(home_body, 'html.parser')
logo_seach_obj = LogoFinderService(
soup_obj=home_soup_obj,
website_url=self.website_url
)
self.logo = logo_seach_obj.find_logo()
contact_url = self.find_contact_url(soup_obj=home_soup_obj)
driver.get(contact_url)
sleep(settings['SleepTimeToLoadJavascript'])
contact_body = driver.find_element_by_tag_name("body")
contact_body = contact_body.get_attribute('innerHTML')
contact_soup_obj = BeautifulSoup(contact_body, 'html.parser')
phone_seach_obj = PhoneFinderService(
soup_obj=contact_soup_obj,
website_url=self.website_url
)
self.phones = phone_seach_obj.find_phones()
driver.close()
driver.quit()
def find_contact_url(self,soup_obj:BeautifulSoup) -> str:
'''In case of the page provided is the homepage, it gets the website contacts page'''
all_links = soup_obj.find_all('a', href=True)
contact_text = settings['ScrappingSettings']['ContactIdentifier']
contact_links = [
item for item in all_links if contact_text in item.text.lower()]
if len(contact_links) < 1:
return self.website_url
contact_link = contact_links[0]
contact_url = contact_link.attrs['href']
if contact_url[0] == '/':
contact_url = self.website_url+contact_url
return contact_url
|
[
"phone_finder_service.PhoneFinderService",
"selenium.webdriver.chrome.options.Options",
"time.sleep",
"urllib.parse.urlsplit",
"requests.get",
"logo_finder_service.LogoFinderService",
"bs4.BeautifulSoup",
"selenium.webdriver.Chrome"
] |
[((613, 634), 'urllib.parse.urlsplit', 'urlsplit', (['website_url'], {}), '(website_url)\n', (621, 634), False, 'from urllib.parse import urlsplit, urlunsplit\n'), ((958, 988), 'requests.get', 'requests.get', (['self.website_url'], {}), '(self.website_url)\n', (970, 988), False, 'import requests\n'), ((1515, 1545), 'requests.get', 'requests.get', (['self.website_url'], {}), '(self.website_url)\n', (1527, 1545), False, 'import requests\n'), ((1570, 1616), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (1583, 1616), False, 'from bs4 import BeautifulSoup\n'), ((1642, 1713), 'logo_finder_service.LogoFinderService', 'LogoFinderService', ([], {'soup_obj': 'home_soup_obj', 'website_url': 'self.website_url'}), '(soup_obj=home_soup_obj, website_url=self.website_url)\n', (1659, 1713), False, 'from logo_finder_service import LogoFinderService\n'), ((1874, 1899), 'requests.get', 'requests.get', (['contact_url'], {}), '(contact_url)\n', (1886, 1899), False, 'import requests\n'), ((1927, 1973), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (1940, 1973), False, 'from bs4 import BeautifulSoup\n'), ((2000, 2075), 'phone_finder_service.PhoneFinderService', 'PhoneFinderService', ([], {'soup_obj': 'contact_soup_obj', 'website_url': 'self.website_url'}), '(soup_obj=contact_soup_obj, website_url=self.website_url)\n', (2018, 2075), False, 'from phone_finder_service import PhoneFinderService\n'), ((2317, 2326), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (2324, 2326), False, 'from selenium.webdriver.chrome.options import Options\n'), ((3052, 3092), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'chrome_options'}), '(options=chrome_options)\n', (3068, 3092), False, 'from selenium import webdriver\n'), ((3138, 3182), 'time.sleep', 'sleep', (["settings['SleepTimeToLoadJavascript']"], {}), "(settings['SleepTimeToLoadJavascript'])\n", (3143, 3182), False, 'from time import sleep\n'), ((3324, 3363), 'bs4.BeautifulSoup', 'BeautifulSoup', (['home_body', '"""html.parser"""'], {}), "(home_body, 'html.parser')\n", (3337, 3363), False, 'from bs4 import BeautifulSoup\n'), ((3389, 3460), 'logo_finder_service.LogoFinderService', 'LogoFinderService', ([], {'soup_obj': 'home_soup_obj', 'website_url': 'self.website_url'}), '(soup_obj=home_soup_obj, website_url=self.website_url)\n', (3406, 3460), False, 'from logo_finder_service import LogoFinderService\n'), ((3651, 3695), 'time.sleep', 'sleep', (["settings['SleepTimeToLoadJavascript']"], {}), "(settings['SleepTimeToLoadJavascript'])\n", (3656, 3695), False, 'from time import sleep\n'), ((3849, 3891), 'bs4.BeautifulSoup', 'BeautifulSoup', (['contact_body', '"""html.parser"""'], {}), "(contact_body, 'html.parser')\n", (3862, 3891), False, 'from bs4 import BeautifulSoup\n'), ((3918, 3993), 'phone_finder_service.PhoneFinderService', 'PhoneFinderService', ([], {'soup_obj': 'contact_soup_obj', 'website_url': 'self.website_url'}), '(soup_obj=contact_soup_obj, website_url=self.website_url)\n', (3936, 3993), False, 'from phone_finder_service import PhoneFinderService\n')]
|
# Generated by Django 4.0.1 on 2022-02-25 04:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('beatup', '0011_alter_customer_photo'),
]
operations = [
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='beatup.customer'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((368, 467), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""beatup.customer"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='beatup.customer')\n", (385, 467), False, 'from django.db import migrations, models\n')]
|
import tweepy
import pandas as pd
config = pd.read_csv("./config.csv")
twitterAPIkey = config['twitterApiKey'][0]
twitterAPIS = config['twitterApiSecret'][0]
twitterAPIAT = config['twitterApiAccessToken'][0]
twitterAPIATS = config['twitterApiAccessTokenSecret'][0]
auth = tweepy.OAuthHandler(twitterAPIkey, twitterAPIS)
|
[
"pandas.read_csv",
"tweepy.OAuthHandler"
] |
[((43, 70), 'pandas.read_csv', 'pd.read_csv', (['"""./config.csv"""'], {}), "('./config.csv')\n", (54, 70), True, 'import pandas as pd\n'), ((273, 320), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['twitterAPIkey', 'twitterAPIS'], {}), '(twitterAPIkey, twitterAPIS)\n', (292, 320), False, 'import tweepy\n')]
|
"""
"""
import argparse
import os
import sys
import mlflow
import pandas as pd
import pytorch_lightning as pl
import yaml
from dotenv import load_dotenv
load_dotenv() # noqa
sys.path.append(f"{os.getenv('PROJECT_ROOT')}src/") # noqa
from image_predict.data_module.kiva_data_module import KivaDataModule
from image_predict.module import mlflow_module
from module.utils import set_seed
from pytorch_lightning import callbacks
from pytorch_lightning.loggers import MLFlowLogger
from sklearn.model_selection import KFold
from image_predict.models.swin_t_transfer_model import SwinTTransferModel
from image_predict.models.swin_t_finetune_model import SwinTFinetuneModel
class Trainer:
def __init__(
self,
train_path: str,
validation_dataset_save_dir: str,
model_dir_save_path: str,
model_class_name: str = "SwinTTransferModel",
seed: int = 0,
validation_num: int = 4,
model_params: dict = None,
pl_trainer_params: dict = None,
early_stopping_params: dict = None,
train_loader_params: dict = None,
val_loader_params: dict = None,
*args,
**kwargs,
):
"""
Args:
model_class_name:
train_path:
validation_dataset_save_dir:
model_dir_save_path:
seed:
validation_num:
pl_trainer_params:
early_stopping_params:
train_loader_params:
val_loader_params:
"""
self.model_class_name = model_class_name
self.data_df = pd.read_csv(train_path)
self.validation_dataset_save_dir = validation_dataset_save_dir
self.model_dir_save_path = model_dir_save_path
self.seed = seed
self.validation_num = validation_num
self.model_params = model_params
self.pl_trainer_params = pl_trainer_params
self.early_stopping_params = early_stopping_params
self.train_loader_params = train_loader_params
self.val_loader_params = val_loader_params
def __train(self, train, valid, fold_name):
model = eval(self.model_class_name)(model_params=self.model_params, fold_name=fold_name)
datamodule = KivaDataModule(
train,
valid,
train_loader_params=self.train_loader_params,
val_loader_params=self.val_loader_params,
)
early_stopping = callbacks.EarlyStopping(
monitor=f"val_{fold_name}_loss",
**self.early_stopping_params
)
lr_monitor = callbacks.LearningRateMonitor()
os.makedirs(self.model_dir_save_path, exist_ok=True)
loss_checkpoint = callbacks.ModelCheckpoint(
dirpath=self.model_dir_save_path,
filename=fold_name,
monitor=f"val_{fold_name}_loss",
save_top_k=1,
mode="min",
save_last=False,
)
mlf_logger = MLFlowLogger()
mlf_logger._run_id = mlflow.active_run().info.run_id
trainer = pl.Trainer(
logger=mlf_logger,
callbacks=[lr_monitor, loss_checkpoint, early_stopping],
**self.pl_trainer_params,
)
trainer.fit(model, datamodule=datamodule)
mlflow.log_metric(f"epoch_{fold_name}", trainer.current_epoch)
def run(self):
set_seed(self.seed)
kf = KFold(n_splits=self.validation_num, shuffle=True, random_state=self.seed)
for fold, (train_index, valid_index) in enumerate(kf.split(self.data_df["IMAGE_PATH"])):
train = self.data_df.loc[train_index]
valid = self.data_df.loc[valid_index]
os.makedirs(self.validation_dataset_save_dir, exist_ok=True)
train.to_csv(f"{self.validation_dataset_save_dir}train_fold_{fold}.csv", index=False)
valid.to_csv(f"{self.validation_dataset_save_dir}valid_fold_{fold}.csv", index=False)
self.__train(train=train, valid=valid, fold_name=f"fold_{fold}")
params = {
"validation_dataset_save_dir": self.validation_dataset_save_dir,
"model_dir_save_path": self.model_dir_save_path,
"seed": self.seed,
"validation_num": self.validation_num,
"model_params": self.model_params,
"pl_trainer_params": self.pl_trainer_params,
"early_stopping_params": self.early_stopping_params,
"train_loader_params": self.train_loader_params,
"val_loader_params": self.val_loader_params,
}
mlflow.log_params(params)
mlflow.log_artifact(self.validation_dataset_save_dir)
mlflow.log_artifact(self.model_dir_save_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
'--config',
type=str,
default='config/image_predict/trainer/trainer001.yaml',
help='config path')
args = parser.parse_args()
with open(args.config) as f:
config = yaml.safe_load(f)
mlflow_module.start_experiment(tracking_uri=os.getenv("TRACKING_URI"), **config["experiment_setting"])
mlflow.log_artifact(args.config)
trainer = Trainer(**config)
trainer.run()
mlflow.end_run()
if __name__ == '__main__':
main()
|
[
"pytorch_lightning.Trainer",
"argparse.ArgumentParser",
"pandas.read_csv",
"mlflow.log_artifact",
"yaml.safe_load",
"module.utils.set_seed",
"mlflow.active_run",
"pytorch_lightning.loggers.MLFlowLogger",
"mlflow.end_run",
"pytorch_lightning.callbacks.EarlyStopping",
"mlflow.log_metric",
"pytorch_lightning.callbacks.ModelCheckpoint",
"dotenv.load_dotenv",
"image_predict.data_module.kiva_data_module.KivaDataModule",
"pytorch_lightning.callbacks.LearningRateMonitor",
"os.getenv",
"os.makedirs",
"sklearn.model_selection.KFold",
"mlflow.log_params"
] |
[((156, 169), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (167, 169), False, 'from dotenv import load_dotenv\n'), ((4776, 4801), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4799, 4801), False, 'import argparse\n'), ((5181, 5213), 'mlflow.log_artifact', 'mlflow.log_artifact', (['args.config'], {}), '(args.config)\n', (5200, 5213), False, 'import mlflow\n'), ((5268, 5284), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (5282, 5284), False, 'import mlflow\n'), ((1644, 1667), 'pandas.read_csv', 'pd.read_csv', (['train_path'], {}), '(train_path)\n', (1655, 1667), True, 'import pandas as pd\n'), ((2288, 2408), 'image_predict.data_module.kiva_data_module.KivaDataModule', 'KivaDataModule', (['train', 'valid'], {'train_loader_params': 'self.train_loader_params', 'val_loader_params': 'self.val_loader_params'}), '(train, valid, train_loader_params=self.train_loader_params,\n val_loader_params=self.val_loader_params)\n', (2302, 2408), False, 'from image_predict.data_module.kiva_data_module import KivaDataModule\n'), ((2489, 2580), 'pytorch_lightning.callbacks.EarlyStopping', 'callbacks.EarlyStopping', ([], {'monitor': 'f"""val_{fold_name}_loss"""'}), "(monitor=f'val_{fold_name}_loss', **self.\n early_stopping_params)\n", (2512, 2580), False, 'from pytorch_lightning import callbacks\n'), ((2631, 2662), 'pytorch_lightning.callbacks.LearningRateMonitor', 'callbacks.LearningRateMonitor', ([], {}), '()\n', (2660, 2662), False, 'from pytorch_lightning import callbacks\n'), ((2671, 2723), 'os.makedirs', 'os.makedirs', (['self.model_dir_save_path'], {'exist_ok': '(True)'}), '(self.model_dir_save_path, exist_ok=True)\n', (2682, 2723), False, 'import os\n'), ((2750, 2914), 'pytorch_lightning.callbacks.ModelCheckpoint', 'callbacks.ModelCheckpoint', ([], {'dirpath': 'self.model_dir_save_path', 'filename': 'fold_name', 'monitor': 'f"""val_{fold_name}_loss"""', 'save_top_k': '(1)', 'mode': '"""min"""', 'save_last': '(False)'}), "(dirpath=self.model_dir_save_path, filename=\n fold_name, monitor=f'val_{fold_name}_loss', save_top_k=1, mode='min',\n save_last=False)\n", (2775, 2914), False, 'from pytorch_lightning import callbacks\n'), ((3010, 3024), 'pytorch_lightning.loggers.MLFlowLogger', 'MLFlowLogger', ([], {}), '()\n', (3022, 3024), False, 'from pytorch_lightning.loggers import MLFlowLogger\n'), ((3104, 3220), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'logger': 'mlf_logger', 'callbacks': '[lr_monitor, loss_checkpoint, early_stopping]'}), '(logger=mlf_logger, callbacks=[lr_monitor, loss_checkpoint,\n early_stopping], **self.pl_trainer_params)\n', (3114, 3220), True, 'import pytorch_lightning as pl\n'), ((3322, 3384), 'mlflow.log_metric', 'mlflow.log_metric', (['f"""epoch_{fold_name}"""', 'trainer.current_epoch'], {}), "(f'epoch_{fold_name}', trainer.current_epoch)\n", (3339, 3384), False, 'import mlflow\n'), ((3413, 3432), 'module.utils.set_seed', 'set_seed', (['self.seed'], {}), '(self.seed)\n', (3421, 3432), False, 'from module.utils import set_seed\n'), ((3446, 3519), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.validation_num', 'shuffle': '(True)', 'random_state': 'self.seed'}), '(n_splits=self.validation_num, shuffle=True, random_state=self.seed)\n', (3451, 3519), False, 'from sklearn.model_selection import KFold\n'), ((4607, 4632), 'mlflow.log_params', 'mlflow.log_params', (['params'], {}), '(params)\n', (4624, 4632), False, 'import mlflow\n'), ((4641, 4694), 'mlflow.log_artifact', 'mlflow.log_artifact', (['self.validation_dataset_save_dir'], {}), '(self.validation_dataset_save_dir)\n', (4660, 4694), False, 'import mlflow\n'), ((4703, 4748), 'mlflow.log_artifact', 'mlflow.log_artifact', (['self.model_dir_save_path'], {}), '(self.model_dir_save_path)\n', (4722, 4748), False, 'import mlflow\n'), ((5052, 5069), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (5066, 5069), False, 'import yaml\n'), ((197, 222), 'os.getenv', 'os.getenv', (['"""PROJECT_ROOT"""'], {}), "('PROJECT_ROOT')\n", (206, 222), False, 'import os\n'), ((3729, 3789), 'os.makedirs', 'os.makedirs', (['self.validation_dataset_save_dir'], {'exist_ok': '(True)'}), '(self.validation_dataset_save_dir, exist_ok=True)\n', (3740, 3789), False, 'import os\n'), ((5118, 5143), 'os.getenv', 'os.getenv', (['"""TRACKING_URI"""'], {}), "('TRACKING_URI')\n", (5127, 5143), False, 'import os\n'), ((3054, 3073), 'mlflow.active_run', 'mlflow.active_run', ([], {}), '()\n', (3071, 3073), False, 'import mlflow\n')]
|
"""[summary]
"""
import os
import numpy as np
import tensorflow as tf
from src.utils import evaluation
from src.draw import draw
class GCLSemi:
"""[summary]
"""
def __init__(self, train_relevance_labels, train_features,
test_relevance_labels, test_features, test_query_ids, train_features_u):
"""[summary]
Args:
train_relevance_labels ([type]): [description]
train_features ([type]): [description]
test_relevance_labels ([type]): [description]
test_features ([type]): [description]
test_query_ids ([type]): [description]
train_features_u ([type]): [description]
"""
self.y_labeled2 = train_relevance_labels
self.x_labeled = train_features
self.x_unlabeled = train_features_u
self.y_unlabeled = np.zeros([self.x_unlabeled.shape[0], 1])
self.test_labels = test_relevance_labels
self.test_features = test_features
self.test_ids = test_query_ids
self.n_feature = 0
self.n_samples = 0
x = self.x_labeled
y = self.y_labeled2.reshape(-1, 1)
x_y = np.concatenate((x, y), axis=1)
np.random.seed(1)
np.random.shuffle(x_y)
self.x_labeled = x_y[:, :-1]
self.y_labeled2 = x_y[:, -1].reshape(-1,)
# ------ PARAM -----#
self.n_point = 40
self.seed = 37
self.is_change = False
self.learning_rate = 0.009
self.batch_a = 190 # 200 is for GLOBAL+ 500 is for number of party 4
self.batch_b = 200
self.lamb = 0.5
self.beta = 0.
self.r = 0.2
self.a = 0.0
self.af = 0.001
self.t1 = 0
self.t2 = 200
self.n_iter = 300
# end of param ##
# def fit(self, from_fed, to_fed, DATA_PATH, FEATURE_NUM=16):
def fit(self, from_fed, to_fed, _, feature_num=16):
"""[summary]
Args:
from_fed ([type]): [description]
to_fed ([type]): [description]
DATA_PATH ([type]): [description]
FEATURE_NUM (int, optional): [description]. Defaults to 16.
"""
fed_num = to_fed - from_fed
# initial
ws1 = np.load(os.path.join("/data/ltrdata", "w1%d.npy" % from_fed))
ws2 = np.load(os.path.join("/data/ltrdata", "w2%d.npy" % from_fed))
bs1 = np.load(os.path.join("/data/ltrdata", "b1%d.npy" % from_fed))
bs2 = np.load(os.path.join("/data/ltrdata", "b2%d.npy" % from_fed))
for i in range(from_fed + 1, to_fed):
ws1 += np.load(os.path.join("/data/ltrdata", "w1%d.npy" % i))
ws2 += np.load(os.path.join("/data/ltrdata", "w2%d.npy" % i))
bs1 += np.load(os.path.join("/data/ltrdata", "b1%d.npy" % i))
bs2 += np.load(os.path.join("/data/ltrdata", "b2%d.npy" % i))
ws1 /= fed_num
ws2 /= fed_num
bs1 /= fed_num
bs2 /= fed_num
ws = np.load(os.path.join("/data/ltrdata", "semi_ws%d.npy" % from_fed))
bs = np.load(os.path.join("/data/ltrdata", "semi_bs%d.npy" % from_fed))
for i in range(from_fed + 1, to_fed):
ws += np.load(os.path.join("/data/ltrdata", "semi_ws%d.npy" % i))
bs += np.load(os.path.join("/data/ltrdata", "semi_bs%d.npy" % i))
ws /= fed_num
bs /= fed_num
ws *= 0.1
bs *= 0.1
ws += 0.1 * np.random.randn(ws.shape[0], ws.shape[1])
bs += 0.1 * np.random.randn(bs.shape[0])
x = tf.placeholder(dtype='float', shape=[None, feature_num], name='x')
y = tf.placeholder(dtype='float', shape=[None], name='y')
w = tf.Variable(tf.constant(ws), name='w')
b = tf.Variable(tf.constant(bs), name='b')
pred = tf.transpose(tf.add(tf.matmul(x, w), b))
x_u = tf.placeholder(
dtype='float', shape=[
None, feature_num], name='xu')
pred_u = tf.add(tf.matmul(x_u, w), b)
pred_us = tf.nn.softmax(tf.add(tf.matmul(tf.add(tf.matmul(x_u, ws1), bs1), ws2), bs2))
alpha = tf.placeholder("float",)
pred_pl = tf.placeholder(dtype='float', shape=[None, 1], name='predspl')
cost = tf.add(self.lamb * tf.reduce_mean(tf.square(w)),
tf.add(tf.reduce_mean(tf.square(pred - y) / 2),
alpha * tf.reduce_mean(tf.square(pred_pl - pred_u)) / 2))
opt = tf.train.AdamOptimizer(self.learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
self.y_unlabeled = sess.run(pred_us, feed_dict={x_u: self.x_unlabeled})
y_l2 = []
for each in self.y_unlabeled:
if each[0] > each[1] and each[0] > each[2]:
y_l2.append(0)
elif each[1] > each[0] and each[1] > each[2]:
y_l2.append(1)
else:
y_l2.append(2)
self.y_unlabeled = np.array(y_l2)
auc_iters = []
map_iters = []
ndcg10_iters = []
ndcg_iters = []
err_iters = []
for it in range(self.n_iter):
if it > self.t1: a = min((it - self.t1) / (self.t2 - self.t1) * self.af, self.af)
self.beta /= (1 + 0.5 * it)
loss_one_fed = []
x = self.x_labeled
y = self.y_labeled2.reshape(-1, 1)
left = it * self.batch_a
right = left + self.batch_a
if left >= right or right > len(x):
left = 0
right = left + self.batch_a
batch_x = x[left: right]
batch_y = y[left: right].reshape(-1,)
x_unlabeled = self.x_unlabeled
y_unlabeled = self.y_unlabeled
left = it * self.batch_b
right = left + self.batch_b
if left >= right or right > len(x_unlabeled):
left = 0
right = left + self.batch_b
batch_x_unlabeled = x_unlabeled[left: right]
batch_y_unlabeled = y_unlabeled[left: right].reshape(-1, 1)
if it % (self.n_iter // self.n_point) == 0:
pred_for_testo = sess.run(pred, feed_dict={x: self.test_features})[0]
print(min(pred_for_testo), max(pred_for_testo), np.mean(pred_for_testo))
avg_err, avg_ndcg, avg_full_ndcg, avg_map, avg_auc = \
evaluation(pred_for_testo, self.test_labels, self.test_ids, self.test_features)
err_iters.append(avg_err)
auc_iters.append(avg_auc)
map_iters.append(avg_map)
ndcg10_iters.append(avg_ndcg)
ndcg_iters.append(avg_full_ndcg)
_, loss = sess.run([opt, cost],
feed_dict={x: batch_x, y: batch_y, x_u: batch_x_unlabeled,
pred_pl: batch_y_unlabeled, alpha: a})
loss_one_fed.append(loss)
draw([i for i in range(len(ndcg10_iters))], [ndcg10_iters])
print("%f, %f, %f, %f;" % (err_iters[-1], ndcg10_iters[-1], ndcg_iters[-1], map_iters[-1]))
print("nfed_sol4=", ndcg10_iters, ";")
|
[
"numpy.random.seed",
"numpy.concatenate",
"numpy.random.randn",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.matmul",
"numpy.mean",
"numpy.array",
"tensorflow.square",
"tensorflow.train.AdamOptimizer",
"os.path.join",
"numpy.random.shuffle",
"src.utils.evaluation"
] |
[((855, 895), 'numpy.zeros', 'np.zeros', (['[self.x_unlabeled.shape[0], 1]'], {}), '([self.x_unlabeled.shape[0], 1])\n', (863, 895), True, 'import numpy as np\n'), ((1165, 1195), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {'axis': '(1)'}), '((x, y), axis=1)\n', (1179, 1195), True, 'import numpy as np\n'), ((1204, 1221), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1218, 1221), True, 'import numpy as np\n'), ((1230, 1252), 'numpy.random.shuffle', 'np.random.shuffle', (['x_y'], {}), '(x_y)\n', (1247, 1252), True, 'import numpy as np\n'), ((3531, 3597), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float"""', 'shape': '[None, feature_num]', 'name': '"""x"""'}), "(dtype='float', shape=[None, feature_num], name='x')\n", (3545, 3597), True, 'import tensorflow as tf\n'), ((3610, 3663), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float"""', 'shape': '[None]', 'name': '"""y"""'}), "(dtype='float', shape=[None], name='y')\n", (3624, 3663), True, 'import tensorflow as tf\n'), ((3836, 3903), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float"""', 'shape': '[None, feature_num]', 'name': '"""xu"""'}), "(dtype='float', shape=[None, feature_num], name='xu')\n", (3850, 3903), True, 'import tensorflow as tf\n'), ((4091, 4114), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (4105, 4114), True, 'import tensorflow as tf\n'), ((4134, 4196), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': '"""float"""', 'shape': '[None, 1]', 'name': '"""predspl"""'}), "(dtype='float', shape=[None, 1], name='predspl')\n", (4148, 4196), True, 'import tensorflow as tf\n'), ((4505, 4538), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4536, 4538), True, 'import tensorflow as tf\n'), ((2250, 2302), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('w1%d.npy' % from_fed)"], {}), "('/data/ltrdata', 'w1%d.npy' % from_fed)\n", (2262, 2302), False, 'import os\n'), ((2326, 2378), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('w2%d.npy' % from_fed)"], {}), "('/data/ltrdata', 'w2%d.npy' % from_fed)\n", (2338, 2378), False, 'import os\n'), ((2402, 2454), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('b1%d.npy' % from_fed)"], {}), "('/data/ltrdata', 'b1%d.npy' % from_fed)\n", (2414, 2454), False, 'import os\n'), ((2478, 2530), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('b2%d.npy' % from_fed)"], {}), "('/data/ltrdata', 'b2%d.npy' % from_fed)\n", (2490, 2530), False, 'import os\n'), ((2987, 3044), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('semi_ws%d.npy' % from_fed)"], {}), "('/data/ltrdata', 'semi_ws%d.npy' % from_fed)\n", (2999, 3044), False, 'import os\n'), ((3067, 3124), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('semi_bs%d.npy' % from_fed)"], {}), "('/data/ltrdata', 'semi_bs%d.npy' % from_fed)\n", (3079, 3124), False, 'import os\n'), ((3428, 3469), 'numpy.random.randn', 'np.random.randn', (['ws.shape[0]', 'ws.shape[1]'], {}), '(ws.shape[0], ws.shape[1])\n', (3443, 3469), True, 'import numpy as np\n'), ((3490, 3518), 'numpy.random.randn', 'np.random.randn', (['bs.shape[0]'], {}), '(bs.shape[0])\n', (3505, 3518), True, 'import numpy as np\n'), ((3688, 3703), 'tensorflow.constant', 'tf.constant', (['ws'], {}), '(ws)\n', (3699, 3703), True, 'import tensorflow as tf\n'), ((3739, 3754), 'tensorflow.constant', 'tf.constant', (['bs'], {}), '(bs)\n', (3750, 3754), True, 'import tensorflow as tf\n'), ((3958, 3975), 'tensorflow.matmul', 'tf.matmul', (['x_u', 'w'], {}), '(x_u, w)\n', (3967, 3975), True, 'import tensorflow as tf\n'), ((4552, 4564), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4562, 4564), True, 'import tensorflow as tf\n'), ((5029, 5043), 'numpy.array', 'np.array', (['y_l2'], {}), '(y_l2)\n', (5037, 5043), True, 'import numpy as np\n'), ((2605, 2650), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('w1%d.npy' % i)"], {}), "('/data/ltrdata', 'w1%d.npy' % i)\n", (2617, 2650), False, 'import os\n'), ((2679, 2724), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('w2%d.npy' % i)"], {}), "('/data/ltrdata', 'w2%d.npy' % i)\n", (2691, 2724), False, 'import os\n'), ((2753, 2798), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('b1%d.npy' % i)"], {}), "('/data/ltrdata', 'b1%d.npy' % i)\n", (2765, 2798), False, 'import os\n'), ((2827, 2872), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('b2%d.npy' % i)"], {}), "('/data/ltrdata', 'b2%d.npy' % i)\n", (2839, 2872), False, 'import os\n'), ((3198, 3248), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('semi_ws%d.npy' % i)"], {}), "('/data/ltrdata', 'semi_ws%d.npy' % i)\n", (3210, 3248), False, 'import os\n'), ((3276, 3326), 'os.path.join', 'os.path.join', (['"""/data/ltrdata"""', "('semi_bs%d.npy' % i)"], {}), "('/data/ltrdata', 'semi_bs%d.npy' % i)\n", (3288, 3326), False, 'import os\n'), ((3801, 3816), 'tensorflow.matmul', 'tf.matmul', (['x', 'w'], {}), '(x, w)\n', (3810, 3816), True, 'import tensorflow as tf\n'), ((4432, 4474), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (4454, 4474), True, 'import tensorflow as tf\n'), ((4246, 4258), 'tensorflow.square', 'tf.square', (['w'], {}), '(w)\n', (4255, 4258), True, 'import tensorflow as tf\n'), ((6593, 6672), 'src.utils.evaluation', 'evaluation', (['pred_for_testo', 'self.test_labels', 'self.test_ids', 'self.test_features'], {}), '(pred_for_testo, self.test_labels, self.test_ids, self.test_features)\n', (6603, 6672), False, 'from src.utils import evaluation\n'), ((4036, 4055), 'tensorflow.matmul', 'tf.matmul', (['x_u', 'ws1'], {}), '(x_u, ws1)\n', (4045, 4055), True, 'import tensorflow as tf\n'), ((4305, 4324), 'tensorflow.square', 'tf.square', (['(pred - y)'], {}), '(pred - y)\n', (4314, 4324), True, 'import tensorflow as tf\n'), ((6469, 6492), 'numpy.mean', 'np.mean', (['pred_for_testo'], {}), '(pred_for_testo)\n', (6476, 6492), True, 'import numpy as np\n'), ((4383, 4410), 'tensorflow.square', 'tf.square', (['(pred_pl - pred_u)'], {}), '(pred_pl - pred_u)\n', (4392, 4410), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
import curses
dogdance1=[[
' ▄','▄','▄', #3
'▄▄▄▄','▄','▄', #6
'▄'],[' ',' ', #9
' ', '▄','▄'],#12
[' ',' ' , '▄',#15
' ','▄',' ',#18
'▄▄', '▄▄ '],[ ' ',#21
' ','▄',' ',#24
' ','▄',' ',#27
'▄', ' ','▄▄▄▄▄▄',#30
'▄'],[' ',' ',#33
'▄▄▄▄',' ',' ',#36
'▀'],[' ',' ',#39
' '],[' ',' ', #42
'▄', '▀'],[' ',#45
' ',' ','▄▄ ▄▄▄▄ ▄▄ ', #48
' '],[' ', ' ',#51
' ','▄','▀▀',#54
'▄','▀',' ▀',#57
'▄', '▀',' ',#60
' ', ' ','▄',#63
'▀'],[' ','▀ ▀']]#66
dog1pallete=[
[1,3,1, #3
3,1,3,#6
1],[1,3,#9
2,3,1], #12
[4,2,3, #15
2,3,2, #18
3,1],[4, #21
2,2,2, #24
4,3,2, #27
2,2,3, #30
1],[4,2, #33
3,2,4, #36
1],[4,2, #39
4],[4,2, #42
2,1],[1, #45
4,2,2, #48
4],[1,4, #51
2,2,1, #54
2,1,1, #57
2,1,1, #60
4,2,2, #63
1],[1,1]] #6
dogdance2=[[
' ▄','▄','▄', #3
'▄▄▄▄','▄','▄',#6
'▄'],[' ',' ',#9
' ','▄','▄'],[ #12
' ',' ',' ', #15
'▄',' ','▄',#18
' ','▄▄','▄▄', #21
' ','▄','▄', #24
'▄'],[' ',' ', #27
'▄',' ',' ▄', #30
' ▄ ',' ','▄▄▄', #33
' ',' ',' '],[' ', #36
' ','▄▄▄▄',' ', #39
' ',' '],[' ',' ',#42
' '],[' ',' ▄', #45
'▀'],[' ',' ', #48
' ▄▄ ▄▄▄▄ ▄▄ ',' '],[' ', #51
'▀','▄','▀', #54
' ',' ',' ▄', #57
'▀ ',' ',' ▄', #60
'▀▀','▄','▀ '],[ #63
' ▀ ▀ ']]#64
dog2pallete=[[1,3,1, #3
3,1,3, #6
1],[1,3, #9
2,3,1],[ #12
4,2,2, #15
3,2,3, #18
2,3,1, #21
1,1,3, #24
1],[4,2, #27
2,2,3, #30
2,2,3, #33
2,3,1],[3, #36
2,3,2, #39
3,1],[3,2, #42
3],[3,2, #45
1],[1,3, #48
2,3],[1, #51
1,2,1, #54
1,3,2, #57
1,3,2, #60
1,2,1],[ #63
1]] #64
def draw_dog1(scr,posx,posy):
i = 0
width = posx
for code,num in zip(dogdance1,dog1pallete):
for st,pair in zip(code,num):
scr.addstr(posy,width,st,curses.color_pair(pair))
width=width+(len(st.decode('utf-8')))
posy=posy+1
width=posx
def draw_dog2(scr,posx,posy):
i = 0
width = posx
for code,num in zip(dogdance2,dog2pallete):
for st,pair in zip(code,num):
scr.addstr(posy,width,st,curses.color_pair(pair))
width=width+(len(st.decode('utf-8')))
posy=posy+1
width=posx
#def main():
# i = 1
# for code,num in zip(dogdance2,dog2pallete):
# for st,pair in zip(code,num):
# #print st
# print len(st.decode('utf-8'))
# #i = i + 1
# #print "-------------------"
# #print len(code),len(num)
#
#if __name__ == "__main__":
# main()
|
[
"curses.color_pair"
] |
[((2141, 2164), 'curses.color_pair', 'curses.color_pair', (['pair'], {}), '(pair)\n', (2158, 2164), False, 'import curses\n'), ((2436, 2459), 'curses.color_pair', 'curses.color_pair', (['pair'], {}), '(pair)\n', (2453, 2459), False, 'import curses\n')]
|
from collections import OrderedDict
from providers import value, terminal
def result_format(database_result, fmt):
format_function = 'result_format_%s' % fmt
if format_function not in globals():
raise Exception('Unsupported format "%s"' % fmt)
return globals()[format_function](database_result)
def result_format_tabular(database_result):
if len(database_result) == 0:
return ''
column_width = OrderedDict()
text_output = []
for k in database_result[0].keys():
column_width[k] = len(k)+1
for line in database_result:
for (k, v) in line.items():
if len(str(v)) > column_width[k]:
column_width[k] = len(str(v))+1
output_line = '+'
for (k, v) in column_width.items():
output_line += '-' * v + '+'
text_output.append(output_line)
output_line = '|'
for (k, v) in column_width.items():
output_line += '{:>{width}}|'.format(k, width=column_width[k])
text_output.append(output_line)
output_line = '+'
for (k, v) in column_width.items():
output_line += '-' * v + '+'
text_output.append(output_line)
for line in database_result:
output_line = '|'
for k in column_width.keys():
output_line += '{:>{width}}|'.format(str(line[k]) if line[k] is not None else '', width=column_width[k])
text_output.append(output_line)
output_line = '+'
for (k, v) in column_width.items():
output_line += '-' * v + '+'
text_output.append(output_line)
return '\n'.join(text_output)
def result_format_vertical(database_result):
if len(database_result) == 0:
return ''
text_output = []
max_label_length = 0
for k in database_result[0].keys():
if len(k) > max_label_length:
max_label_length = len(k)
row_num = 1
for line in database_result:
line_header = '{:*^{width}}'.format(' %s. row ' % row_num, width=64)
text_output.append(line_header)
for (k, v) in line.items():
line_column = terminal.get_key_value_adjusted(k, v, max_label_length)
text_output.append(line_column)
row_num += 1
return '\n'.join(text_output)
def result_format_keyvalue(database_result):
if len(database_result) == 0:
return ''
text_output = []
for line in database_result:
kv_list = list(line.values())
text_output.append('%s=%s' % (kv_list[0], str(kv_list[1])))
return '\n'.join(text_output)
def convert_to_dict(database_result):
if len(database_result) == 0:
return {}
dict_output = {}
for line in database_result:
kv_list = list(line.values())
dict_output[kv_list[0]] = int(kv_list[1]) if value.represents_int(kv_list[1]) else kv_list[1]
return dict_output
|
[
"collections.OrderedDict",
"providers.terminal.get_key_value_adjusted",
"providers.value.represents_int"
] |
[((441, 454), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (452, 454), False, 'from collections import OrderedDict\n'), ((2104, 2159), 'providers.terminal.get_key_value_adjusted', 'terminal.get_key_value_adjusted', (['k', 'v', 'max_label_length'], {}), '(k, v, max_label_length)\n', (2135, 2159), False, 'from providers import value, terminal\n'), ((2805, 2837), 'providers.value.represents_int', 'value.represents_int', (['kv_list[1]'], {}), '(kv_list[1])\n', (2825, 2837), False, 'from providers import value, terminal\n')]
|
"""
A CPython inspired RPython parser.
"""
from rpython.rlib.objectmodel import not_rpython
class Grammar(object):
"""
Base Grammar object.
Pass this to ParserGenerator.build_grammar to fill it with useful values for
the Parser.
"""
def __init__(self):
self.symbol_ids = {}
self.symbol_names = {}
self.symbol_to_label = {}
self.keyword_ids = {}
self.token_to_error_string = {}
self.dfas = []
self.labels = [0]
self.token_ids = {}
self.start = -1
def shared_copy(self):
new = self.__class__()
new.symbol_ids = self.symbol_ids
new.symbols_names = self.symbol_names
new.keyword_ids = self.keyword_ids
new.token_to_error_string = self.token_to_error_string
new.dfas = self.dfas
new.labels = self.labels
new.token_ids = self.token_ids
return new
def classify(self, token):
"""Find the label for a token."""
if token.token_type == self.KEYWORD_TOKEN:
label_index = self.keyword_ids.get(token.value, -1)
if label_index != -1:
return label_index
label_index = self.token_ids.get(token.token_type, -1)
if label_index == -1:
raise ParseError("invalid token", token)
return label_index
def _freeze_(self):
# Remove some attributes not used in parsing.
try:
del self.symbol_to_label
del self.symbol_names
del self.symbol_ids
except AttributeError:
pass
return True
class DFA(object):
def __init__(self, grammar, symbol_id, states, first):
self.grammar = grammar
self.symbol_id = symbol_id
self.states = states
self.first = self._first_to_string(first)
def could_match_token(self, label_index):
pos = label_index >> 3
bit = 1 << (label_index & 0b111)
return bool(ord(self.first[label_index >> 3]) & bit)
@staticmethod
@not_rpython
def _first_to_string(first):
l = sorted(first.keys())
b = bytearray(32)
for label_index in l:
pos = label_index >> 3
bit = 1 << (label_index & 0b111)
b[pos] |= bit
return str(b)
class Token(object):
def __init__(self, token_type, value, lineno, column, line):
self.token_type = token_type
self.value = value
self.lineno = lineno
# 0-based offset
self.column = column
self.line = line
def __repr__(self):
return "Token(%s, %s)" % (self.token_type, self.value)
def __eq__(self, other):
# for tests
return (
self.token_type == other.token_type and
self.value == other.value and
self.lineno == other.lineno and
self.column == other.column and
self.line == other.line
)
def __ne__(self, other):
return not self == other
class Node(object):
__slots__ = ("grammar", "type")
def __init__(self, grammar, type):
assert grammar is None or isinstance(grammar, Grammar)
assert isinstance(type, int)
self.grammar = grammar
self.type = type
def __eq__(self, other):
raise NotImplementedError("abstract base class")
def __ne__(self, other):
return not self == other
def get_value(self):
return None
def get_child(self, i):
raise NotImplementedError("abstract base class")
def num_children(self):
return 0
def append_child(self, child):
raise NotImplementedError("abstract base class")
def get_lineno(self):
raise NotImplementedError("abstract base class")
def get_column(self):
raise NotImplementedError("abstract base class")
def get_line(self):
raise NotImplementedError("abstract base class")
def view(self):
from dotviewer import graphclient
import pytest
r = ["digraph G {"]
self._dot(r)
r.append("}")
p = pytest.ensuretemp("pyparser").join("temp.dot")
p.write("\n".join(r))
graphclient.display_dot_file(str(p))
def _dot(self, result):
raise NotImplementedError("abstract base class")
class Terminal(Node):
__slots__ = ("value", "lineno", "column", "line")
def __init__(self, grammar, type, value, lineno, column, line=None):
Node.__init__(self, grammar, type)
self.value = value
self.lineno = lineno
self.column = column
self.line = line
@staticmethod
def fromtoken(grammar, token):
return Terminal(
grammar,
token.token_type, token.value, token.lineno, token.column,
token.line)
def __repr__(self):
return "Terminal(type=%s, value=%r)" % (self.type, self.value)
def __eq__(self, other):
# For tests.
return (type(self) == type(other) and
self.type == other.type and
self.value == other.value)
def get_value(self):
return self.value
def get_lineno(self):
return self.lineno
def get_column(self):
return self.column
def get_line(self):
return self.line
def _dot(self, result):
result.append('%s [label="%r", shape=box];' % (id(self), self.value))
class AbstractNonterminal(Node):
__slots__ = ()
def get_lineno(self):
return self.get_child(0).get_lineno()
def get_column(self):
return self.get_child(0).get_column()
def get_line(self):
return self.get_child(0).get_line()
def __eq__(self, other):
# For tests.
# grumble, annoying
if not isinstance(other, AbstractNonterminal):
return False
if self.type != other.type:
return False
if self.num_children() != other.num_children():
return False
for i in range(self.num_children()):
if self.get_child(i) != other.get_child(i):
return False
return True
def _dot(self, result):
for i in range(self.num_children()):
child = self.get_child(i)
result.append('%s [label=%s, shape=box]' % (id(self), self.grammar.symbol_names[self.type]))
result.append('%s -> %s [label="%s"]' % (id(self), id(child), i))
child._dot(result)
class Nonterminal(AbstractNonterminal):
__slots__ = ("_children", )
def __init__(self, grammar, type, children=None):
Node.__init__(self, grammar, type)
if children is None:
children = []
self._children = children
def __repr__(self):
return "Nonterminal(type=%s, children=%r)" % (
self.grammar.symbol_names[self.type]
if self.grammar is not None else self.type,
self._children)
def get_child(self, i):
assert self._children is not None
return self._children[i]
def num_children(self):
return len(self._children)
def append_child(self, child):
self._children.append(child)
class Nonterminal1(AbstractNonterminal):
__slots__ = ("_child", )
def __init__(self, grammar, type, child):
Node.__init__(self, grammar, type)
self._child = child
def __repr__(self):
return "Nonterminal(type=%s, children=[%r])" % (
self.grammar.symbol_names[self.type]
if self.grammar is not None else self.type,
self._child)
def get_child(self, i):
assert i == 0 or i == -1
return self._child
def num_children(self):
return 1
def append_child(self, child):
assert 0, "should be unreachable"
class ParseError(Exception):
def __init__(self, msg, token, expected=-1, expected_str=None):
self.msg = msg
self.token = token
self.expected = expected
self.expected_str = expected_str
def __str__(self):
return "ParserError(%s)" % (self.token, )
class StackEntry(object):
def __init__(self, next, dfa, state):
self.next = next
self.dfa = dfa
self.state = state
self.node = None
def push(self, dfa, state):
return StackEntry(self, dfa, state)
def pop(self):
return self.next
def node_append_child(self, child):
node = self.node
if node is None:
self.node = Nonterminal1(self.dfa.grammar, self.dfa.symbol_id, child)
elif isinstance(node, Nonterminal1):
newnode = self.node = Nonterminal(
self.dfa.grammar,
self.dfa.symbol_id, [node._child, child])
else:
self.node.append_child(child)
def view(self):
from dotviewer import graphclient
import pytest
r = ["digraph G {"]
self._dot(r)
r.append("}")
p = pytest.ensuretemp("pyparser").join("temp.dot")
p.write("\n".join(r))
graphclient.display_dot_file(str(p))
def _dot(self, result):
result.append('%s [label=%s, shape=box, color=white]' % (id(self), self.dfa.grammar.symbol_names[self.dfa.symbol_id]))
if self.next:
result.append('%s -> %s [label="next"]' % (id(self), id(self.next)))
self.next._dot(result)
if self.node:
result.append('%s -> %s [label="node"]' % (id(self), id(self.node)))
self.node._dot(result)
class Parser(object):
def __init__(self, grammar):
self.grammar = grammar
self.root = None
def prepare(self, start=-1):
"""Setup the parser for parsing.
Takes the starting symbol as an argument.
"""
if start == -1:
start = self.grammar.start
self.root = None
self.stack = StackEntry(None, self.grammar.dfas[start - 256], 0)
def add_token(self, token):
label_index = self.grammar.classify(token)
sym_id = 0 # for the annotator
while True:
dfa = self.stack.dfa
state_index = self.stack.state
states = dfa.states
arcs, is_accepting = states[state_index]
for i, next_state in arcs:
sym_id = self.grammar.labels[i]
if label_index == i:
# We matched a non-terminal.
self.shift(next_state, token)
state = states[next_state]
# While the only possible action is to accept, pop nodes off
# the stack.
while state[1] and not state[0]:
self.pop()
if self.stack is None:
# Parsing is done.
return True
dfa = self.stack.dfa
state_index = self.stack.state
state = dfa.states[state_index]
return False
elif sym_id >= 256:
sub_node_dfa = self.grammar.dfas[sym_id - 256]
# Check if this token can start a child node.
if sub_node_dfa.could_match_token(label_index):
self.push(sub_node_dfa, next_state, sym_id)
break
else:
# We failed to find any arcs to another state, so unless this
# state is accepting, it's invalid input.
if is_accepting:
self.pop()
if self.stack is None:
raise ParseError("too much input", token)
else:
# If only one possible input would satisfy, attach it to the
# error.
if len(arcs) == 1:
expected = sym_id
expected_str = self.grammar.token_to_error_string.get(
arcs[0][0], None)
else:
expected = -1
expected_str = None
raise ParseError("bad input", token, expected, expected_str)
def shift(self, next_state, token):
"""Shift a non-terminal and prepare for the next state."""
new_node = Terminal.fromtoken(self.grammar, token)
self.stack.node_append_child(new_node)
self.stack.state = next_state
def push(self, next_dfa, next_state, node_type):
"""Push a terminal and adjust the current state."""
self.stack.state = next_state
self.stack = self.stack.push(next_dfa, 0)
def pop(self):
"""Pop an entry off the stack and make its node a child of the last."""
top = self.stack
self.stack = top.pop()
node = top.node
assert node is not None
if self.stack:
self.stack.node_append_child(node)
else:
self.root = node
|
[
"pytest.ensuretemp"
] |
[((4103, 4132), 'pytest.ensuretemp', 'pytest.ensuretemp', (['"""pyparser"""'], {}), "('pyparser')\n", (4120, 4132), False, 'import pytest\n'), ((8970, 8999), 'pytest.ensuretemp', 'pytest.ensuretemp', (['"""pyparser"""'], {}), "('pyparser')\n", (8987, 8999), False, 'import pytest\n')]
|
import json
import pandas as pd
import requests
from datetime import datetime
from io import StringIO
from furl import furl
from tqdm import tqdm
from time import sleep
class Appodeal:
DEFAULT_ENDPOINT = "https://api-services.appodeal.com/api/v2/stats_api?/"
TASK_ENDPOINT = "https://api-services.appodeal.com/api/v2/check_status?/"
OUTPUT_ENDPOINT = "https://api-services.appodeal.com/api/v2/output_result?/"
DETALISATION = [
"date",
"country",
"banner_type",
"segment",
"placement",
"network",
"app",
]
def __init__(self, api_token, user_id):
self.api_key = api_token
self.user_id = user_id
def __build_args(self, date_from, date_to, kwargs):
args = {
"api_key": self.api_key,
"user_id": self.user_id,
"date_from": date_from,
"date_to": date_to,
}
if "country[]" in kwargs:
args["country[]"] = kwargs.get("country[]")
if "network[]" in kwargs:
args["network[]"] = kwargs.get("network[]")
if "app[]" in kwargs:
args["app[]"] = kwargs.get("app[]")
if "detalisation[]" in kwargs:
args["detalisation[]"] = kwargs.get("detalisation[]")
return args
def __build_task_args(self, task_id):
args = {"api_key": self.api_key, "user_id": self.user_id, "task_id": task_id}
return args
def __to_df(self, resp):
import pandas as df
if resp.status_code != requests.codes.ok:
raise Exception(resp.text)
return df.read_csv(StringIO(resp.text))
def report(
self,
date_from,
date_to,
as_df=True,
country=None,
network=None,
app=None,
detalisation=None,
report_waiting_time=3600,
**kwargs
):
f = furl(self.DEFAULT_ENDPOINT)
if detalisation is None:
kwargs["detalisation[]"] = self.DETALISATION
else:
kwargs["detalisation[]"] = detalisation
if country is not None:
kwargs["country[]"] = country
else:
pass
if network is not None:
kwargs["network[]"] = network
else:
pass
if app is not None:
kwargs["app[]"] = app
else:
pass
f.args = self.__build_args(date_from, date_to, kwargs)
request_get_task = requests.get(f.url)
task_id = str(json.loads(request_get_task.text)["task_id"])
print('TaskId {} obtained!'.format(task_id))
f_task = furl(self.TASK_ENDPOINT)
f_task.args = self.__build_task_args(task_id)
print('Waiting for report... 5 second checks started!')
starttime = datetime.now()
diff = []
diff = diff + [int((datetime.now() - starttime).seconds)]
with tqdm(total=report_waiting_time) as pbar:
while diff[-1] < report_waiting_time:
if json.loads(requests.get(f_task.url).text)["task_status"] == "0":
if diff[-1]>120:
sleep(10)
diff = diff + [int((datetime.now() - starttime).seconds)]
diff_sub = diff[-1]-diff[-2]
pbar.update(diff_sub)
else:
sleep(5)
diff = diff + [int((datetime.now() - starttime).seconds)]
diff_sub = diff[-1]-diff[-2]
pbar.update(diff_sub)
elif json.loads(requests.get(f_task.url).text)["task_status"] == "1":
print("Report is ready!")
break
elif diff.seconds>report_waiting_time:
print('Waiting time expired. Increase period!')
f_report = furl(self.OUTPUT_ENDPOINT)
f_report.args = self.__build_task_args(task_id)
request_get_data = requests.get(f_report.url)
report_data = json.loads(request_get_data.text)
print('ReportData collected!')
if 'data' not in report_data:
report_data = requests.get(report_data['url']).json()
else:
report_data = report_data["data"]
if as_df:
return pd.json_normalize(report_data)
else:
return report_data
|
[
"io.StringIO",
"tqdm.tqdm",
"json.loads",
"pandas.json_normalize",
"furl.furl",
"time.sleep",
"requests.get",
"datetime.datetime.now"
] |
[((1903, 1930), 'furl.furl', 'furl', (['self.DEFAULT_ENDPOINT'], {}), '(self.DEFAULT_ENDPOINT)\n', (1907, 1930), False, 'from furl import furl\n'), ((2485, 2504), 'requests.get', 'requests.get', (['f.url'], {}), '(f.url)\n', (2497, 2504), False, 'import requests\n'), ((2643, 2667), 'furl.furl', 'furl', (['self.TASK_ENDPOINT'], {}), '(self.TASK_ENDPOINT)\n', (2647, 2667), False, 'from furl import furl\n'), ((2806, 2820), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2818, 2820), False, 'from datetime import datetime\n'), ((3903, 3929), 'furl.furl', 'furl', (['self.OUTPUT_ENDPOINT'], {}), '(self.OUTPUT_ENDPOINT)\n', (3907, 3929), False, 'from furl import furl\n'), ((4013, 4039), 'requests.get', 'requests.get', (['f_report.url'], {}), '(f_report.url)\n', (4025, 4039), False, 'import requests\n'), ((4062, 4095), 'json.loads', 'json.loads', (['request_get_data.text'], {}), '(request_get_data.text)\n', (4072, 4095), False, 'import json\n'), ((1636, 1655), 'io.StringIO', 'StringIO', (['resp.text'], {}), '(resp.text)\n', (1644, 1655), False, 'from io import StringIO\n'), ((2928, 2959), 'tqdm.tqdm', 'tqdm', ([], {'total': 'report_waiting_time'}), '(total=report_waiting_time)\n', (2932, 2959), False, 'from tqdm import tqdm\n'), ((4355, 4385), 'pandas.json_normalize', 'pd.json_normalize', (['report_data'], {}), '(report_data)\n', (4372, 4385), True, 'import pandas as pd\n'), ((2527, 2560), 'json.loads', 'json.loads', (['request_get_task.text'], {}), '(request_get_task.text)\n', (2537, 2560), False, 'import json\n'), ((4209, 4241), 'requests.get', 'requests.get', (["report_data['url']"], {}), "(report_data['url'])\n", (4221, 4241), False, 'import requests\n'), ((3166, 3175), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (3171, 3175), False, 'from time import sleep\n'), ((3409, 3417), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (3414, 3417), False, 'from time import sleep\n'), ((2867, 2881), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2879, 2881), False, 'from datetime import datetime\n'), ((3050, 3074), 'requests.get', 'requests.get', (['f_task.url'], {}), '(f_task.url)\n', (3062, 3074), False, 'import requests\n'), ((3633, 3657), 'requests.get', 'requests.get', (['f_task.url'], {}), '(f_task.url)\n', (3645, 3657), False, 'import requests\n'), ((3220, 3234), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3232, 3234), False, 'from datetime import datetime\n'), ((3462, 3476), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3474, 3476), False, 'from datetime import datetime\n')]
|
from web.template import CompiledTemplate, ForLoop, TemplateResult
# coding: utf-8
def base (page):
__lineoffset__ = -4
loop = ForLoop()
self = TemplateResult(); extend_ = self.extend
extend_([u'\n'])
extend_([u'<html>\n'])
extend_([u'<head>\n'])
extend_([u' <meta name="viewport" content="width=device-width, initial-scale=1">\n'])
extend_([u' <title>MapGetter</title>\n'])
extend_([u' <link rel="shortcut icon" type="image/x-icon" href="/static/favicon.ico" />\n'])
extend_([u' <link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">\n'])
extend_([u' <link rel="stylesheet" type="text/css" href="/static/Styles/styles.css" />\n'])
extend_([u' <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>\n'])
extend_([u' <script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>\n'])
extend_([u' <script src="https://maps.googleapis.com/maps/api/js?v=3.exp&key=<KEY>"></script>\n'])
extend_([u' <script src="/static/Scripts/mapgetter.js" type="text/javascript"></script>\n'])
extend_([u'</head>\n'])
extend_([u'\n'])
extend_([u'<body>\n'])
extend_([u' <!-- Navigation Bar -->\n'])
extend_([u' <nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">\n'])
extend_([u' <div class="container">\n'])
extend_([u' <!-- Brand and toggle get grouped for better mobile display -->\n'])
extend_([u' <div class="navbar-header">\n'])
extend_([u' <button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1">\n'])
extend_([u' <span class="sr-only">Toggle navigation</span>\n'])
extend_([u' <span class="icon-bar"></span>\n'])
extend_([u' <span class="icon-bar"></span>\n'])
extend_([u' <span class="icon-bar"></span>\n'])
extend_([u' </button>\n'])
extend_([u' <a class="navbar-brand" href="http://blog.mpiannucci.com/"><NAME></a>\n'])
extend_([u' </div>\n'])
extend_([u' <!-- Collect the nav links, forms, and other content for toggling -->\n'])
extend_([u' <div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">\n'])
extend_([u' <ul class="nav navbar-nav">\n'])
extend_([u' <li>\n'])
extend_([u' <a href="http://blog.mpiannucci.com/blog">Blog</a>\n'])
extend_([u' </li>\n'])
extend_([u' <li>\n'])
extend_([u' <a href="http://blog.mpiannucci.com/apps">Projects</a>\n'])
extend_([u' </li>\n'])
extend_([u' <li>\n'])
extend_([u' <a href="http://blog.mpiannucci.com/bio">About</a>\n'])
extend_([u' </li>\n'])
extend_([u' </ul>\n'])
extend_([u' </div>\n'])
extend_([u' <!-- /.navbar-collapse -->\n'])
extend_([u' </div>\n'])
extend_([u' <!-- /.container -->\n'])
extend_([u' </nav>\n'])
extend_([u' <header class="jumbotron map_jumbotron" id="mainheader">\n'])
extend_([u' <div class="container">\n'])
extend_([u' <h1>MapGetter</h1>\n'])
extend_([u' <p>Get static images of a central area with coordinates in meters</p>\n'])
extend_([u' <em>Images courtesy of Google Maps</em>\n'])
extend_([u' </div>\n'])
extend_([u' </header>\n'])
extend_([u' <div class="row">\n'])
extend_([u' <div class="col-sm-12 text-center" id="mapImage">\n'])
extend_([u' <div class="container">\n'])
extend_([u' ', escape_(page, False), u'\n'])
extend_([u' </div>\n'])
extend_([u' </div>\n'])
extend_([u' </div>\n'])
extend_([u' <div class="row">\n'])
extend_([u' <div class="col-sm-12 text-center" id="mainfooter">\n'])
extend_([u' <div class="container">\n'])
extend_([u' <p>Copyright 2014, <NAME></p>\n'])
extend_([u' </div>\n'])
extend_([u' </div>\n'])
extend_([u' </div>\n'])
extend_([u'</div>\n'])
extend_([u'\n'])
extend_([u'</body>\n'])
extend_([u'</html>\n'])
return self
base = CompiledTemplate(base, 'templates/base.html')
join_ = base._join; escape_ = base._escape
# coding: utf-8
def index():
__lineoffset__ = -5
loop = ForLoop()
self = TemplateResult(); extend_ = self.extend
extend_([u'<div id="mapforms" class="table-responsive">\n'])
extend_([u' <form name="mapform">\n'])
extend_([u' <table class="table">\n'])
extend_([u' <tr>\n'])
extend_([u' <th>\n'])
extend_([u' <label for="coordCheck">By Coordinates</label>\n'])
extend_([u' </th>\n'])
extend_([u' <td>\n'])
extend_([u' <input type="checkbox" id="coordcheck" onclick="handleCheck(this)"></input>\n'])
extend_([u' </td>\n'])
extend_([u' </tr>\n'])
extend_([u' <tr>\n'])
extend_([u' <th>\n'])
extend_([u' <label for="addressbox">Address</label>\n'])
extend_([u' </th>\n'])
extend_([u' <td>\n'])
extend_([u' <input type="textbox" id="addressbox"></input>\n'])
extend_([u' </td>\n'])
extend_([u' </tr>\n'])
extend_([u' <tr>\n'])
extend_([u' <th>\n'])
extend_([u' <label for="citybox">City</label>\n'])
extend_([u' </th>\n'])
extend_([u' <td>\n'])
extend_([u' <input type="textbox" id="citybox"></input>\n'])
extend_([u' </td>\n'])
extend_([u' </tr>\n'])
extend_([u' <tr>\n'])
extend_([u' </th>\n'])
extend_([u' <th>\n'])
extend_([u' <label for="statebox">State</label>\n'])
extend_([u' </th>\n'])
extend_([u' <td>\n'])
extend_([u' <input type="textbox" id="statebox"></input>\n'])
extend_([u' </td>\n'])
extend_([u' </tr>\n'])
extend_([u' <tr>\n'])
extend_([u' <th>\n'])
extend_([u' <label for="textbox">Latitude</label>\n'])
extend_([u' </th>\n'])
extend_([u' <td>\n'])
extend_([u' <input type="textbox" id="latbox" disabled></input>\n'])
extend_([u' </td>\n'])
extend_([u' </tr>\n'])
extend_([u' <tr>\n'])
extend_([u' <th>\n'])
extend_([u' <label for="lonbox">Longitude</label>\n'])
extend_([u' </th>\n'])
extend_([u' <td>\n'])
extend_([u' <input type="textbox" id="lonbox" disabled></input>\n'])
extend_([u' </td>\n'])
extend_([u' </tr>\n'])
extend_([u' <tr>\n'])
extend_([u' <th>\n'])
extend_([u' <label for="zoomdrop" id="zoomlabel">Zoom</label>\n'])
extend_([u' </th>\n'])
extend_([u' <td>\n'])
extend_([u' <select id="zoomdrop">Zoom\n'])
extend_([u' <option value="5">5</option>\n'])
extend_([u' <option value="6">6</option>\n'])
extend_([u' <option value="7">7</option>\n'])
extend_([u' <option value="8">8</option>\n'])
extend_([u' <option value="9">9</option>\n'])
extend_([u' <option value="10">10</option>\n'])
extend_([u' <option value="11">11</option>\n'])
extend_([u' <option value="12">12</option>\n'])
extend_([u' <option value="13">13</option>\n'])
extend_([u' <option value="14">14</option>\n'])
extend_([u' <option value="15">15</option>\n'])
extend_([u' <option value="16">16</option>\n'])
extend_([u' <option value="17">17</option>\n'])
extend_([u' <option value="18">18</option>\n'])
extend_([u' <option value="19">19</option>\n'])
extend_([u' <option value="20">20</option>\n'])
extend_([u' </select>\n'])
extend_([u' </td>\n'])
extend_([u' </tr>\n'])
extend_([u' <tr>\n'])
extend_([u' <th>\n'])
extend_([u' <label for="textbox">Width and Height (meters)</label>\n'])
extend_([u' </th>\n'])
extend_([u' <td>\n'])
extend_([u' <input type="textbox" id="resultbox" disabled onclick="onSideLengthClick()" readonly="readonly"></input>\n'])
extend_([u' </td>\n'])
extend_([u' </tr>\n'])
extend_([u' <tr>\n'])
extend_([u' <th></th>\n'])
extend_([u' <td>\n'])
extend_([u' <button type="button" class="btn btn-default btn-lg" id="formButton" onclick="handleGetMap()" name="Get My Map">Get My Map</button>\n'])
extend_([u' </td>\n'])
extend_([u' </tr>\n'])
extend_([u' </table>\n'])
extend_([u' </form>\n'])
extend_([u'</div>\n'])
extend_([u'<div class="row">\n'])
extend_([u' <div id="mapimage" class="col-lg-12">\n'])
extend_([u' <img src="" id="mapresult" />\n'])
extend_([u' </div>\n'])
extend_([u'</div>\n'])
return self
index = CompiledTemplate(index, 'templates/index.html')
join_ = index._join; escape_ = index._escape
|
[
"web.template.CompiledTemplate",
"web.template.TemplateResult",
"web.template.ForLoop"
] |
[((4577, 4622), 'web.template.CompiledTemplate', 'CompiledTemplate', (['base', '"""templates/base.html"""'], {}), "(base, 'templates/base.html')\n", (4593, 4622), False, 'from web.template import CompiledTemplate, ForLoop, TemplateResult\n'), ((10303, 10350), 'web.template.CompiledTemplate', 'CompiledTemplate', (['index', '"""templates/index.html"""'], {}), "(index, 'templates/index.html')\n", (10319, 10350), False, 'from web.template import CompiledTemplate, ForLoop, TemplateResult\n'), ((137, 146), 'web.template.ForLoop', 'ForLoop', ([], {}), '()\n', (144, 146), False, 'from web.template import CompiledTemplate, ForLoop, TemplateResult\n'), ((158, 174), 'web.template.TemplateResult', 'TemplateResult', ([], {}), '()\n', (172, 174), False, 'from web.template import CompiledTemplate, ForLoop, TemplateResult\n'), ((4731, 4740), 'web.template.ForLoop', 'ForLoop', ([], {}), '()\n', (4738, 4740), False, 'from web.template import CompiledTemplate, ForLoop, TemplateResult\n'), ((4752, 4768), 'web.template.TemplateResult', 'TemplateResult', ([], {}), '()\n', (4766, 4768), False, 'from web.template import CompiledTemplate, ForLoop, TemplateResult\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import logging, sys, operator
from matplotlib.colors import Normalize
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
from gseapy.parser import unique
class _MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def zscore(data2d, axis=0):
"""Standardize the mean and variance of the data axis Parameters.
:param data2d: DataFrame to normalize.
:param axis: int, Which axis to normalize across. If 0, normalize across rows,
if 1, normalize across columns. If None, don't change data
:Returns: Normalized DataFrame. Normalized data with a mean of 0 and variance of 1
across the specified axis.
"""
if axis is None:
# normalized to mean and std using entire matrix
# z_scored = (data2d - data2d.values.mean()) / data2d.values.std(ddof=1)
return data2d
assert axis in [0,1]
# if axis == 1:
# z_scored = data2d
# else:
# z_scored = data2d.T
# z_scored = (z_scored - z_scored.mean()) / z_scored.std(ddof=1)
# if axis == 1:
# return z_scored
# else:
# return z_scored.T
z_scored = data2d.apply(lambda x: (x-x.mean())/x.std(ddof=1),
axis=operator.xor(1, axis))
return z_scored
def colorbar(mappable):
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2%", pad=0.05)
return fig.colorbar(mappable, cax=cax)
def heatmap(df, z_score=None, title='', figsize=(5,5), cmap='RdBu_r',
xticklabels=True, yticklabels=True, ofname=None, **kwargs):
"""Visualize the dataframe.
:param df: DataFrame from expression table.
:param z_score: z_score axis{0, 1}. If None, don't normalize data.
:param title: gene set name.
:param outdir: path to save heatmap.
:param figsize: heatmap figsize.
:param cmap: matplotlib colormap.
:param ofname: output file name. If None, don't save figure
"""
df = zscore(df, axis=z_score)
df = df.iloc[::-1]
# Get the positions and used label for the ticks
ny, nx = df.shape
xticks = np.arange(0, nx, 1) + .5
yticks = np.arange(0, ny, 1) + .5
# If working on commandline, don't show figure
if hasattr(sys, 'ps1') and (ofname is None):
fig = plt.figure(figsize=figsize)
else:
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
vmin = np.percentile(df.min(), 2)
vmax = np.percentile(df.max(), 98)
matrix = ax.pcolormesh(df.values, cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_ylim([0,len(df)])
ax.set(xticks=xticks, yticks=yticks)
ax.set_xticklabels(df.columns.values if xticklabels else '', fontsize=14, rotation=90)
ax.set_yticklabels(df.index.values if yticklabels else '', fontsize=14)
ax.set_title("%s\nHeatmap of the Analyzed Geneset"%title, fontsize=20)
ax.tick_params(axis='both', which='both', bottom=False, top=False,
right=False, left=False)
# cax=fig.add_axes([0.93,0.25,0.05,0.20])
# cbar = fig.colorbar(matrix, cax=cax)
cbar = colorbar(matrix)
cbar.ax.tick_params(axis='both', which='both', bottom=False, top=False,
right=False, left=False)
for side in ["top", "right", "left", "bottom"]:
ax.spines[side].set_visible(False)
cbar.ax.spines[side].set_visible(False)
# cbar.ax.set_title('',loc='left')
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
def gseaplot(rank_metric, term, hits_indices, nes, pval, fdr, RES,
pheno_pos='', pheno_neg='', figsize=(6,5.5),
cmap='seismic', ofname=None, **kwargs):
"""This is the main function for reproducing the gsea plot.
:param rank_metric: pd.Series for rankings, rank_metric.values.
:param term: gene_set name
:param hits_indices: hits indices of rank_metric.index presented in gene set S.
:param nes: Normalized enrichment scores.
:param pval: nominal p-value.
:param fdr: false discovery rate.
:param RES: running enrichment scores.
:param pheno_pos: phenotype label, positive correlated.
:param pheno_neg: phenotype label, negative correlated.
:param figsize: matplotlib figsize.
:param ofname: output file name. If None, don't save figure
"""
# plt.style.use('classic')
# center color map at midpoint = 0
norm = _MidpointNormalize(midpoint=0)
#dataFrame of ranked matrix scores
x = np.arange(len(rank_metric))
rankings = rank_metric.values
# figsize = (6,6)
phenoP_label = pheno_pos + ' (Positively Correlated)'
phenoN_label = pheno_neg + ' (Negatively Correlated)'
zero_score_ind = np.abs(rankings).argmin()
z_score_label = 'Zero score at ' + str(zero_score_ind)
nes_label = 'NES: '+ "{:.3f}".format(float(nes))
pval_label = 'Pval: '+ "{:.3f}".format(float(pval))
fdr_label = 'FDR: '+ "{:.3f}".format(float(fdr))
im_matrix = np.tile(rankings, (2,1))
# output truetype
plt.rcParams.update({'pdf.fonttype':42,'ps.fonttype':42})
# in most case, we will have many plots, so do not display plots
# It's also usefull to run this script on command line.
# GSEA Plots
gs = plt.GridSpec(16,1)
if hasattr(sys, 'ps1') and (ofname is None):
# working inside python console, show figure
fig = plt.figure(figsize=figsize)
else:
# If working on commandline, don't show figure
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
# Ranked Metric Scores Plot
ax1 = fig.add_subplot(gs[11:])
module = 'tmp' if ofname is None else ofname.split(".")[-2]
if module == 'ssgsea':
nes_label = 'ES: '+ "{:.3f}".format(float(nes))
pval_label='Pval: '
fdr_label='FDR: '
ax1.fill_between(x, y1=np.log(rankings), y2=0, color='#C9D3DB')
ax1.set_ylabel("log ranked metric", fontsize=14)
else:
ax1.fill_between(x, y1=rankings, y2=0, color='#C9D3DB')
ax1.set_ylabel("Ranked list metric", fontsize=14)
ax1.text(.05, .9, phenoP_label, color='red',
horizontalalignment='left', verticalalignment='top',
transform=ax1.transAxes)
ax1.text(.95, .05, phenoN_label, color='Blue',
horizontalalignment='right', verticalalignment='bottom',
transform=ax1.transAxes)
# the x coords of this transformation are data, and the y coord are axes
trans1 = transforms.blended_transform_factory(ax1.transData, ax1.transAxes)
if module != 'ssgsea':
ax1.vlines(zero_score_ind, 0, 1, linewidth=.5, transform=trans1, linestyles='--', color='grey')
ax1.text(zero_score_ind, 0.5, z_score_label,
horizontalalignment='center',
verticalalignment='center',
transform=trans1)
ax1.set_xlabel("Rank in Ordered Dataset", fontsize=14)
ax1.spines['top'].set_visible(False)
ax1.tick_params(axis='both', which='both', top=False, right=False, left=False)
ax1.locator_params(axis='y', nbins=5)
ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc) ))
# use round method to control float number
# ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : round(tick_loc, 1) ))
# gene hits
ax2 = fig.add_subplot(gs[8:10], sharex=ax1)
# the x coords of this transformation are data, and the y coord are axes
trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)
ax2.vlines(hits_indices, 0, 1,linewidth=.5,transform=trans2)
ax2.spines['bottom'].set_visible(False)
ax2.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False, labelleft=False)
# colormap
ax3 = fig.add_subplot(gs[10], sharex=ax1)
ax3.imshow(im_matrix, aspect='auto', norm=norm, cmap=cmap, interpolation='none') # cm.coolwarm
ax3.spines['bottom'].set_visible(False)
ax3.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False,labelleft=False)
# Enrichment score plot
ax4 = fig.add_subplot(gs[:8], sharex=ax1)
ax4.plot(x, RES, linewidth=4, color ='#88C544')
ax4.text(.1, .1, fdr_label, transform=ax4.transAxes)
ax4.text(.1, .2, pval_label, transform=ax4.transAxes)
ax4.text(.1, .3, nes_label, transform=ax4.transAxes)
# the y coords of this transformation are data, and the x coord are axes
trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)
ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')
ax4.set_ylabel("Enrichment score (ES)", fontsize=14)
ax4.set_xlim(min(x), max(x))
ax4.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False)
ax4.locator_params(axis='y', nbins=5)
# FuncFormatter need two argument, I don't know why. this lambda function used to format yaxis tick labels.
ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc)) )
# fig adjustment
fig.suptitle(term, fontsize=16, fontweight='bold')
fig.subplots_adjust(hspace=0)
# fig.tight_layout()
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
def isfloat(x):
try:
float(x)
except:
return False
else:
return True
def dotplot(df, column='Adjusted P-value', title='', cutoff=0.05, top_term=10,
sizes=None, norm=None, legend=True, figsize=(6, 5.5),
cmap='RdBu_r', ofname=None, **kwargs):
"""Visualize enrichr results.
:param df: GSEApy DataFrame results.
:param column: which column of DataFrame to show. Default: Adjusted P-value
:param title: figure title
:param cutoff: p-adjust cut-off.
:param top_term: number of enriched terms to show.
:param ascending: bool, the order of y axis.
:param sizes: tuple, (min, max) scatter size. Not functional for now
:param norm: maplotlib.colors.Normalize object.
:param legend: bool, whether to show legend.
:param figsize: tuple, figure size.
:param cmap: matplotlib colormap
:param ofname: output file name. If None, don't save figure
"""
colname = column
# sorting the dataframe for better visualization
if colname in ['Adjusted P-value', 'P-value']:
# check if any values in `df[colname]` can't be coerced to floats
can_be_coerced = df[colname].map(isfloat)
if np.sum(~can_be_coerced) > 0:
raise ValueError('some value in %s could not be typecast to `float`'%colname)
else:
df.loc[:, colname] = df[colname].map(float)
df = df[df[colname] <= cutoff]
if len(df) < 1:
msg = "Warning: No enrich terms when cutoff = %s"%cutoff
return msg
df = df.assign(logAP=lambda x: - x[colname].apply(np.log10))
colname='logAP'
df = df.sort_values(by=colname).iloc[-top_term:,:]
#
temp = df['Overlap'].str.split("/", expand=True).astype(int)
df = df.assign(Hits=temp.iloc[:,0], Background=temp.iloc[:,1])
df = df.assign(Hits_ratio=lambda x:x.Hits / x.Background)
# x axis values
x = df.loc[:, colname].values
combined_score = df['Combined Score'].round().astype('int')
# y axis index and values
y = [i for i in range(0,len(df))]
ylabels = df['Term'].values
# Normalise to [0,1]
# b = (df['Count'] - df['Count'].min())/ np.ptp(df['Count'])
# area = 100 * b
# control the size of scatter and legend marker
levels = numbers = np.sort(df.Hits.unique())
if norm is None:
norm = Normalize()
elif isinstance(norm, tuple):
norm = Normalize(*norm)
elif not isinstance(norm, Normalize):
err = ("``size_norm`` must be None, tuple, "
"or Normalize object.")
raise ValueError(err)
min_width, max_width = np.r_[20, 100] * plt.rcParams["lines.linewidth"]
norm.clip = True
if not norm.scaled():
norm(np.asarray(numbers))
size_limits = norm.vmin, norm.vmax
scl = norm(numbers)
widths = np.asarray(min_width + scl * (max_width - min_width))
if scl.mask.any():
widths[scl.mask] = 0
sizes = dict(zip(levels, widths))
df['sizes'] = df.Hits.map(sizes)
area = df['sizes'].values
# creat scatter plot
if hasattr(sys, 'ps1') and (ofname is None):
# working inside python console, show figure
fig, ax = plt.subplots(figsize=figsize)
else:
# If working on commandline, don't show figure
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
vmin = np.percentile(combined_score.min(), 2)
vmax = np.percentile(combined_score.max(), 98)
sc = ax.scatter(x=x, y=y, s=area, edgecolors='face', c=combined_score,
cmap=cmap, vmin=vmin, vmax=vmax)
if column in ['Adjusted P-value', 'P-value']:
xlabel = "-log$_{10}$(%s)"%column
else:
xlabel = column
ax.set_xlabel(xlabel, fontsize=14, fontweight='bold')
ax.yaxis.set_major_locator(plt.FixedLocator(y))
ax.yaxis.set_major_formatter(plt.FixedFormatter(ylabels))
ax.set_yticklabels(ylabels, fontsize=16)
# ax.set_ylim([-1, len(df)])
ax.grid()
# colorbar
cax=fig.add_axes([0.95,0.20,0.03,0.22])
cbar = fig.colorbar(sc, cax=cax,)
cbar.ax.tick_params(right=True)
cbar.ax.set_title('Combined\nScore',loc='left', fontsize=12)
# for terms less than 3
if len(df) >= 3:
# find the index of the closest value to the median
idx = [area.argmax(), np.abs(area - area.mean()).argmin(), area.argmin()]
idx = unique(idx)
else:
idx = df.index.values
label = df.iloc[idx, df.columns.get_loc('Hits')]
if legend:
handles, _ = ax.get_legend_handles_labels()
legend_markers = []
for ix in idx:
legend_markers.append(ax.scatter([],[], s=area[ix], c='b'))
# artist = ax.scatter([], [], s=size_levels,)
ax.legend(legend_markers, label, title='Hits')
ax.set_title(title, fontsize=20, fontweight='bold')
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
return ax
def barplot(df, column='Adjusted P-value', title="", cutoff=0.05, top_term=10,
figsize=(6.5,6), color='salmon', ofname=None, **kwargs):
"""Visualize enrichr results.
:param df: GSEApy DataFrame results.
:param column: which column of DataFrame to show. Default: Adjusted P-value
:param title: figure title.
:param cutoff: cut-off of the cloumn you've chosen.
:param top_term: number of top enriched terms to show.
:param figsize: tuple, matplotlib figsize.
:param color: color for bars.
:param ofname: output file name. If None, don't save figure
"""
colname = column
if colname in ['Adjusted P-value', 'P-value']:
df = df[df[colname] <= cutoff]
if len(df) < 1:
msg = "Warning: No enrich terms using library %s when cutoff = %s"%(title, cutoff)
return msg
df = df.assign(logAP = lambda x: - x[colname].apply(np.log10))
colname = 'logAP'
dd = df.sort_values(by=colname).iloc[-top_term:,:]
# dd = d.head(top_term).sort_values('logAP')
# create bar plot
if hasattr(sys, 'ps1') and (ofname is None):
# working inside python console, show (True) figure
fig = plt.figure(figsize=figsize)
else:
# If working on commandline, don't show figure
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
bar = dd.plot.barh(x='Term', y=colname, color=color,
alpha=0.75, fontsize=16, ax=ax)
if column in ['Adjusted P-value', 'P-value']:
xlabel = "-log$_{10}$(%s)"%column
else:
xlabel = column
bar.set_xlabel(xlabel, fontsize=16, fontweight='bold')
bar.set_ylabel("")
bar.set_title(title, fontsize=24, fontweight='bold')
bar.xaxis.set_major_locator(MaxNLocator(integer=True))
bar.legend_.remove()
adjust_spines(ax, spines=['left','bottom'])
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
return ax
def adjust_spines(ax, spines):
"""function for removing spines and ticks.
:param ax: axes object
:param spines: a list of spines names to keep. e.g [left, right, top, bottom]
if spines = []. remove all spines and ticks.
"""
for loc, spine in ax.spines.items():
if loc in spines:
# spine.set_position(('outward', 10)) # outward by 10 points
# spine.set_smart_bounds(True)
continue
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
|
[
"numpy.abs",
"numpy.sum",
"matplotlib.pyplot.FixedFormatter",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.tile",
"numpy.interp",
"matplotlib.colors.Normalize",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"matplotlib.ticker.MaxNLocator",
"matplotlib.figure.Figure",
"matplotlib.pyplot.rcParams.update",
"matplotlib.transforms.blended_transform_factory",
"operator.xor",
"matplotlib.pyplot.subplots",
"gseapy.parser.unique",
"numpy.asarray",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"matplotlib.colors.Normalize.__init__",
"numpy.log",
"matplotlib.pyplot.GridSpec",
"matplotlib.pyplot.FixedLocator"
] |
[((2042, 2065), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (2061, 2065), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((5790, 5815), 'numpy.tile', 'np.tile', (['rankings', '(2, 1)'], {}), '(rankings, (2, 1))\n', (5797, 5815), True, 'import numpy as np\n'), ((5842, 5902), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'pdf.fonttype': 42, 'ps.fonttype': 42}"], {}), "({'pdf.fonttype': 42, 'ps.fonttype': 42})\n", (5861, 5902), True, 'import matplotlib.pyplot as plt\n'), ((6056, 6075), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(16)', '(1)'], {}), '(16, 1)\n', (6068, 6075), True, 'import matplotlib.pyplot as plt\n'), ((7290, 7356), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax1.transData', 'ax1.transAxes'], {}), '(ax1.transData, ax1.transAxes)\n', (7326, 7356), True, 'import matplotlib.transforms as transforms\n'), ((8310, 8376), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax2.transData', 'ax2.transAxes'], {}), '(ax2.transData, ax2.transAxes)\n', (8346, 8376), True, 'import matplotlib.transforms as transforms\n'), ((9386, 9452), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax4.transAxes', 'ax4.transData'], {}), '(ax4.transAxes, ax4.transData)\n', (9422, 9452), True, 'import matplotlib.transforms as transforms\n'), ((13172, 13225), 'numpy.asarray', 'np.asarray', (['(min_width + scl * (max_width - min_width))'], {}), '(min_width + scl * (max_width - min_width))\n', (13182, 13225), True, 'import numpy as np\n'), ((587, 629), 'matplotlib.colors.Normalize.__init__', 'Normalize.__init__', (['self', 'vmin', 'vmax', 'clip'], {}), '(self, vmin, vmax, clip)\n', (605, 629), False, 'from matplotlib.colors import Normalize\n'), ((2834, 2853), 'numpy.arange', 'np.arange', (['(0)', 'nx', '(1)'], {}), '(0, nx, 1)\n', (2843, 2853), True, 'import numpy as np\n'), ((2872, 2891), 'numpy.arange', 'np.arange', (['(0)', 'ny', '(1)'], {}), '(0, ny, 1)\n', (2881, 2891), True, 'import numpy as np\n'), ((3013, 3040), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3023, 3040), True, 'import matplotlib.pyplot as plt\n'), ((3065, 3088), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3071, 3088), False, 'from matplotlib.figure import Figure\n'), ((3106, 3123), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (3118, 3123), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((6191, 6218), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6201, 6218), True, 'import matplotlib.pyplot as plt\n'), ((6298, 6321), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6304, 6321), False, 'from matplotlib.figure import Figure\n'), ((6339, 6356), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (6351, 6356), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((12696, 12707), 'matplotlib.colors.Normalize', 'Normalize', ([], {}), '()\n', (12705, 12707), False, 'from matplotlib.colors import Normalize\n'), ((13529, 13558), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (13541, 13558), True, 'import matplotlib.pyplot as plt\n'), ((13638, 13661), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (13644, 13661), False, 'from matplotlib.figure import Figure\n'), ((13679, 13696), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (13691, 13696), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((14178, 14197), 'matplotlib.pyplot.FixedLocator', 'plt.FixedLocator', (['y'], {}), '(y)\n', (14194, 14197), True, 'import matplotlib.pyplot as plt\n'), ((14232, 14259), 'matplotlib.pyplot.FixedFormatter', 'plt.FixedFormatter', (['ylabels'], {}), '(ylabels)\n', (14250, 14259), True, 'import matplotlib.pyplot as plt\n'), ((14762, 14773), 'gseapy.parser.unique', 'unique', (['idx'], {}), '(idx)\n', (14768, 14773), False, 'from gseapy.parser import unique\n'), ((16637, 16664), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (16647, 16664), True, 'import matplotlib.pyplot as plt\n'), ((16744, 16767), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (16750, 16767), False, 'from matplotlib.figure import Figure\n'), ((16785, 16802), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (16797, 16802), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((17249, 17274), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (17260, 17274), False, 'from matplotlib.ticker import MaxNLocator\n'), ((876, 898), 'numpy.interp', 'np.interp', (['value', 'x', 'y'], {}), '(value, x, y)\n', (885, 898), True, 'import numpy as np\n'), ((1917, 1938), 'operator.xor', 'operator.xor', (['(1)', 'axis'], {}), '(1, axis)\n', (1929, 1938), False, 'import logging, sys, operator\n'), ((5527, 5543), 'numpy.abs', 'np.abs', (['rankings'], {}), '(rankings)\n', (5533, 5543), True, 'import numpy as np\n'), ((11530, 11553), 'numpy.sum', 'np.sum', (['(~can_be_coerced)'], {}), '(~can_be_coerced)\n', (11536, 11553), True, 'import numpy as np\n'), ((12757, 12773), 'matplotlib.colors.Normalize', 'Normalize', (['*norm'], {}), '(*norm)\n', (12766, 12773), False, 'from matplotlib.colors import Normalize\n'), ((13075, 13094), 'numpy.asarray', 'np.asarray', (['numbers'], {}), '(numbers)\n', (13085, 13094), True, 'import numpy as np\n'), ((6658, 6674), 'numpy.log', 'np.log', (['rankings'], {}), '(rankings)\n', (6664, 6674), True, 'import numpy as np\n')]
|
#This is a direct port of x_keckhelio.pro from XIDL
from __future__ import division, print_function
from math import pi
from numpy import cos, sin
import numpy as np
def x_keckhelio(ra, dec, epoch=2000.0, jd=None, tai=None,
longitude=None, latitude=None, altitude=None, obs='keck'):
"""
`ra` and `dec` in degrees
Returns `vcorr`: "Velocity correction term, in km/s, to add to measured
radial velocity to convert it to the heliocentric frame."
but the sign seems to be backwards of what that says:
helio_shift = -1. * x_keckhelio(RA, DEC, 2000.0)
uses barvel and ct2lst functions from idlastro, also ported below
#NOTE: this seems to have some jitter about the IDL version at the .1 km/s level
"""
if longitude is not None and latitude is not None and altitude is not None:
print('using long/lat/alt instead of named observatory')
elif obs == 'keck':
longitude = 360. - 155.47220
latitude = 19.82656886
altitude = 4000. #meters
else:
print('Using observatory', obs)
if obs == 'vlt':
longitude = 360. - 70.40322
latitude = -24.6258
altitude = 2635. #meters
elif obs == 'mmt':
longitude = 360. - 110.88456
latitude = 31.688778
altitude = 2600. #meters
elif obs == 'lick':
longitude = 360. - 121.637222
latitude = 37.343056
altitude = 1283. #meters
else:
raise ValueError('unrecognized observatory' + obs)
if jd is None and tai is not None:
jd = 2400000.5 + tai / (24. * 3600.)
elif tai is None and jd is not None:
pass
else:
raise ValueError('Must specify either JD or TAI')
DRADEG = 180.0 / pi
# ----------
# Compute baryocentric velocity (Accurate only to 1m/s)
dvelh, dvelb = baryvel(jd, epoch)
#Project velocity toward star
vbarycen = dvelb[0]*cos(dec/DRADEG)*cos(ra/DRADEG) + \
dvelb[1]*cos(dec/DRADEG)*sin(ra/DRADEG) + dvelb[2]*sin(dec/DRADEG)
#----------
#Compute rotational velocity of observer on the Earth
#LAT is the latitude in radians.
latrad = latitude / DRADEG
#Reduction of geodetic latitude to geocentric latitude (radians).
#DLAT is in arcseconds.
dlat = -(11. * 60. + 32.743000) * sin(2. * latrad) + \
1.163300 * sin(4. * latrad) -0.002600 * sin(6. * latrad)
latrad = latrad + (dlat / 3600.) / DRADEG
#R is the radius vector from the Earth's center to the observer (meters).
#VC is the corresponding circular velocity
#(meters/sidereal day converted to km / sec).
#(sidereal day = 23.934469591229 hours (1986))
r = 6378160.0 * (0.998327073 + 0.00167643800 * cos(2. * latrad) - \
0.00000351 * cos(4. * latrad) + 0.000000008 * cos(6. * latrad)) \
+ altitude
vc = 2. * pi * (r / 1000.) / (23.934469591229 * 3600.)
#Compute the hour angle, HA, in degrees
LST = 15. * ct2lst(longitude, 'junk', jd) # convert from hours to degrees
HA = LST - ra
#Project the velocity onto the line of sight to the star.
vrotate = vc * cos(latrad) * cos(dec/DRADEG) * sin(HA/DRADEG)
return (-vbarycen + vrotate)
def ct2lst(lng, tz, jd, day=None, mon=None, year=None):
"""
# NAME:
# CT2LST
# PURPOSE:
# To convert from Local Civil Time to Local Mean Sidereal Time.
#
# CALLING SEQUENCE:
# CT2LST, Lst, Lng, Tz, Time, [Day, Mon, Year] #NOT SUPPORTED IN PYTHON PORT!
# or
# CT2LST, Lst, Lng, dummy, JD
#
# INPUTS:
# Lng - The longitude in degrees (east of Greenwich) of the place for
# which the local sidereal time is desired, scalar. The Greenwich
# mean sidereal time (GMST) can be found by setting Lng = 0.
# Tz - The time zone of the site in hours, positive East of the Greenwich
# meridian (ahead of GMT). Use this parameter to easily account
# for Daylight Savings time (e.g. -4=EDT, -5 = EST/CDT), scalar
# This parameter is not needed (and ignored) if Julian date is
# supplied. ***Note that the sign of TZ was changed in July 2008
# to match the standard definition.***
# Time or JD - If more than four parameters are specified, then this is
# the time of day of the specified date in decimal hours. If
# exactly four parameters are specified, then this is the
# Julian date of time in question, scalar or vector
#
# OPTIONAL INPUTS:
# Day - The day of the month (1-31),integer scalar or vector
# Mon - The month, in numerical format (1-12), integer scalar or vector
# Year - The 4 digit year (e.g. 2008), integer scalar or vector
#
# OUTPUTS:
# Lst The Local Sidereal Time for the date/time specified in hours.
#
# RESTRICTIONS:
# If specified, the date should be in numerical form. The year should
# appear as yyyy.
#
# PROCEDURE:
# The Julian date of the day and time is question is used to determine
# the number of days to have passed since 0 Jan 2000. This is used
# in conjunction with the GST of that date to extrapolate to the current
# GST# this is then used to get the LST. See Astronomical Algorithms
# by <NAME>, p. 84 (Eq. 11-4) for the constants used.
#
# EXAMPLE:
# Find the Greenwich mean sidereal time (GMST) on 2008 Jul 30 at 15:53 pm
# in Baltimore, Maryland (longitude=-76.72 degrees). The timezone is
# EDT or tz=-4
#
# IDL> CT2LST, lst, -76.72, -4,ten(15,53), 30, 07, 2008
#
# ==> lst = 11.356505 hours (= 11h 21m 23.418s)
#
# The Web site http://tycho.usno.navy.mil/sidereal.html contains more
# info on sidereal time, as well as an interactive calculator.
# PROCEDURES USED:
# jdcnv - Convert from year, month, day, hour to julian date
#
# MODIFICATION HISTORY:
# Adapted from the FORTRAN program GETSD by <NAME>, STX,
# 27 October 1988.
# Use IAU 1984 constants <NAME>, HSTX, April 1995, results
# differ by about 0.1 seconds
# Longitudes measured *east* of Greenwich <NAME> December 1998
# Time zone now measure positive East of Greenwich <NAME> July 2008
# Remove debugging print statement <NAME> April 2009
"""
# IF N_params() gt 4 THEN BEGIN
# time = tme - tz
# jdcnv, year, mon, day, time, jd
# ENDIF ELSE jd = double(tme)
#
# Useful constants, see Meeus, p.84
#
c = [280.46061837, 360.98564736629, 0.000387933, 38710000.0]
jd2000 = 2451545.0
t0 = jd - jd2000
t = t0 / 36525
#
# Compute GST in seconds.
#
theta = c[0] + (c[1] * t0) + t ** 2 * (c[2] - t / c[3])
#
# Compute LST in hours.
#
lst = np.array((theta + lng) / 15.0)
neg = lst < 0
if np.sum(neg) > 0:
if neg.shape == tuple():
lst = 24. + idl_like_mod(lst, 24.)
else:
lst[neg] = 24. + idl_like_mod(lst[neg], 24.)
return idl_like_mod(lst, 24.)
def baryvel(dje, deq):
#+
# NAME:
# BARYVEL
# PURPOSE:
# Calculates heliocentric and barycentric velocity components of Earth.
#
# EXPLANATION:
# BARYVEL takes into account the Earth-Moon motion, and is useful for
# radial velocity work to an accuracy of ~1 m/s.
#
# CALLING SEQUENCE:
# BARYVEL, dje, deq, dvelh, dvelb, [ JPL = ]
#
# INPUTS:
# DJE - (scalar) Julian ephemeris date.
# DEQ - (scalar) epoch of mean equinox of dvelh and dvelb. If deq=0
# then deq is assumed to be equal to dje.
# OUTPUTS:
# DVELH: (vector(3)) heliocentric velocity component. in km/s
# DVELB: (vector(3)) barycentric velocity component. in km/s
#
# The 3-vectors DVELH and DVELB are given in a right-handed coordinate
# system with the +X axis toward the Vernal Equinox, and +Z axis
# toward the celestial pole.
#
# OPTIONAL KEYWORD SET:
# JPL - if /JPL set, then BARYVEL will call the procedure JPLEPHINTERP
# to compute the Earth velocity using the full JPL ephemeris.
# The JPL ephemeris FITS file JPLEPH.405 must exist in either the
# current directory, or in the directory specified by the
# environment variable ASTRO_DATA. Alternatively, the JPL keyword
# can be set to the full path and name of the ephemeris file.
# A copy of the JPL ephemeris FITS file is available in
# http://idlastro.gsfc.nasa.gov/ftp/data/
# PROCEDURES CALLED:
# Function PREMAT() -- computes precession matrix
# JPLEPHREAD, JPLEPHINTERP, TDB2TDT - if /JPL keyword is set
# NOTES:
# Algorithm taken from FORTRAN program of Stumpff (1980, A&A Suppl, 41,1)
# Stumpf claimed an accuracy of 42 cm/s for the velocity. A
# comparison with the JPL FORTRAN planetary ephemeris program PLEPH
# found agreement to within about 65 cm/s between 1986 and 1994
#
# If /JPL is set (using JPLEPH.405 ephemeris file) then velocities are
# given in the ICRS system# otherwise in the FK4 system.
# EXAMPLE:
# Compute the radial velocity of the Earth toward Altair on 15-Feb-1994
# using both the original Stumpf algorithm and the JPL ephemeris
#
# IDL> jdcnv, 1994, 2, 15, 0, jd #==> JD = 2449398.5
# IDL> baryvel, jd, 2000, vh, vb #Original algorithm
# ==> vh = [-17.07243, -22.81121, -9.889315] #Heliocentric km/s
# ==> vb = [-17.08083, -22.80471, -9.886582] #Barycentric km/s
# IDL> baryvel, jd, 2000, vh, vb, /jpl #JPL ephemeris
# ==> vh = [-17.07236, -22.81126, -9.889419] #Heliocentric km/s
# ==> vb = [-17.08083, -22.80484, -9.886409] #Barycentric km/s
#
# IDL> ra = ten(19,50,46.77)*15/!RADEG #RA in radians
# IDL> dec = ten(08,52,3.5)/!RADEG #Dec in radians
# IDL> v = vb[0]*cos(dec)*cos(ra) + $ #Project velocity toward star
# vb[1]*cos(dec)*sin(ra) + vb[2]*sin(dec)
#
# REVISION HISTORY:
# <NAME>, U.C. Berkeley Translated BARVEL.FOR to IDL.
# <NAME>, Cleaned up program sent by <NAME> (SfSU) June 1994
# Converted to IDL V5.0 <NAME> September 1997
# Added /JPL keyword <NAME> July 2001
# Documentation update W. Landsman Dec 2005
#-
#Define constants
dc2pi = 2* pi
cc2pi = dc2pi
dc1 = 1.0
dcto = 2415020.0
dcjul = 36525.0 #days in Julian year
dcbes = 0.313
dctrop = 365.24219572 #days in tropical year (...572 insig)
dc1900 = 1900.0
AU = 1.4959787e8
#Constants dcfel(i,k) of fast changing elements.
dcfel = [1.7400353e00, 6.2833195099091e02, 5.2796e-6 \
,6.2565836e00, 6.2830194572674e02, -2.6180e-6 \
,4.7199666e00, 8.3997091449254e03, -1.9780e-5 \
,1.9636505e-1, 8.4334662911720e03, -5.6044e-5 \
,4.1547339e00, 5.2993466764997e01, 5.8845e-6 \
,4.6524223e00, 2.1354275911213e01, 5.6797e-6 \
,4.2620486e00, 7.5025342197656e00, 5.5317e-6 \
,1.4740694e00, 3.8377331909193e00, 5.6093e-6 ]
dcfel = np.array(dcfel).reshape(8,3)
#constants dceps and ccsel(i,k) of slowly changing elements.
dceps = [4.093198e-1, -2.271110e-4, -2.860401e-8 ]
ccsel = [1.675104E-2, -4.179579E-5, -1.260516E-7 \
,2.220221E-1, 2.809917E-2, 1.852532E-5 \
,1.589963E00, 3.418075E-2, 1.430200E-5 \
,2.994089E00, 2.590824E-2, 4.155840E-6 \
,8.155457E-1, 2.486352E-2, 6.836840E-6 \
,1.735614E00, 1.763719E-2, 6.370440E-6 \
,1.968564E00, 1.524020E-2, -2.517152E-6 \
,1.282417E00, 8.703393E-3, 2.289292E-5 \
,2.280820E00, 1.918010E-2, 4.484520E-6 \
,4.833473E-2, 1.641773E-4, -4.654200E-7 \
,5.589232E-2, -3.455092E-4, -7.388560E-7 \
,4.634443E-2, -2.658234E-5, 7.757000E-8 \
,8.997041E-3, 6.329728E-6, -1.939256E-9 \
,2.284178E-2, -9.941590E-5, 6.787400E-8 \
,4.350267E-2, -6.839749E-5, -2.714956E-7 \
,1.348204E-2, 1.091504E-5, 6.903760E-7 \
,3.106570E-2, -1.665665E-4, -1.590188E-7 ]
ccsel = np.array(ccsel).reshape(17,3)
#Constants of the arguments of the short-period perturbations.
dcargs = [5.0974222, -7.8604195454652e2 \
,3.9584962, -5.7533848094674e2 \
,1.6338070, -1.1506769618935e3 \
,2.5487111, -3.9302097727326e2 \
,4.9255514, -5.8849265665348e2 \
,1.3363463, -5.5076098609303e2 \
,1.6072053, -5.2237501616674e2 \
,1.3629480, -1.1790629318198e3 \
,5.5657014, -1.0977134971135e3 \
,5.0708205, -1.5774000881978e2 \
,3.9318944, 5.2963464780000e1 \
,4.8989497, 3.9809289073258e1 \
,1.3097446, 7.7540959633708e1 \
,3.5147141, 7.9618578146517e1 \
,3.5413158, -5.4868336758022e2 ]
dcargs = np.array(dcargs).reshape(15,2)
#Amplitudes ccamps(n,k) of the short-period perturbations.
ccamps = \
[-2.279594E-5, 1.407414E-5, 8.273188E-6, 1.340565E-5, -2.490817E-7 \
,-3.494537E-5, 2.860401E-7, 1.289448E-7, 1.627237E-5, -1.823138E-7 \
, 6.593466E-7, 1.322572E-5, 9.258695E-6, -4.674248E-7, -3.646275E-7 \
, 1.140767E-5, -2.049792E-5, -4.747930E-6, -2.638763E-6, -1.245408E-7 \
, 9.516893E-6, -2.748894E-6, -1.319381E-6, -4.549908E-6, -1.864821E-7 \
, 7.310990E-6, -1.924710E-6, -8.772849E-7, -3.334143E-6, -1.745256E-7 \
,-2.603449E-6, 7.359472E-6, 3.168357E-6, 1.119056E-6, -1.655307E-7 \
,-3.228859E-6, 1.308997E-7, 1.013137E-7, 2.403899E-6, -3.736225E-7 \
, 3.442177E-7, 2.671323E-6, 1.832858E-6, -2.394688E-7, -3.478444E-7 \
, 8.702406E-6, -8.421214E-6, -1.372341E-6, -1.455234E-6, -4.998479E-8 \
,-1.488378E-6, -1.251789E-5, 5.226868E-7, -2.049301E-7, 0.E0 \
,-8.043059E-6, -2.991300E-6, 1.473654E-7, -3.154542E-7, 0.E0 \
, 3.699128E-6, -3.316126E-6, 2.901257E-7, 3.407826E-7, 0.E0 \
, 2.550120E-6, -1.241123E-6, 9.901116E-8, 2.210482E-7, 0.E0 \
,-6.351059E-7, 2.341650E-6, 1.061492E-6, 2.878231E-7, 0.E0 ]
ccamps = np.array(ccamps).reshape(15,5)
#Constants csec3 and ccsec(n,k) of the secular perturbations in longitude.
ccsec3 = -7.757020E-8
ccsec = [1.289600E-6, 5.550147E-1, 2.076942E00 \
,3.102810E-5, 4.035027E00, 3.525565E-1 \
,9.124190E-6, 9.990265E-1, 2.622706E00 \
,9.793240E-7, 5.508259E00, 1.559103E01 ]
ccsec = np.array(ccsec).reshape(4,3)
#Sidereal rates.
dcsld = 1.990987e-7 #sidereal rate in longitude
ccsgd = 1.990969E-7 #sidereal rate in mean anomaly
#Constants used in the calculation of the lunar contribution.
cckm = 3.122140E-5
ccmld = 2.661699E-6
ccfdi = 2.399485E-7
#Constants dcargm(i,k) of the arguments of the perturbations of the motion
# of the moon.
dcargm = [5.1679830, 8.3286911095275e3 \
,5.4913150, -7.2140632838100e3 \
,5.9598530, 1.5542754389685e4 ]
dcargm = np.array(dcargm).reshape(3,2)
#Amplitudes ccampm(n,k) of the perturbations of the moon.
ccampm = [ 1.097594E-1, 2.896773E-7, 5.450474E-2, 1.438491E-7 \
,-2.223581E-2, 5.083103E-8, 1.002548E-2, -2.291823E-8 \
, 1.148966E-2, 5.658888E-8, 8.249439E-3, 4.063015E-8 ]
ccampm = np.array(ccampm).reshape(3,4)
#ccpamv(k)=a*m*dl,dt (planets), dc1mme=1-mass(earth+moon)
ccpamv = [8.326827E-11, 1.843484E-11, 1.988712E-12, 1.881276E-12]
dc1mme = 0.99999696
#Time arguments.
dt = (dje - dcto) / dcjul
tvec = np.array([1., dt, dt*dt])
#Values of all elements for the instant(aneous?) dje.
temp = idl_like_mod(idl_like_pound(tvec,dcfel), dc2pi)
#PROBLEM: the mod here is where the 100 m/s error slips in
dml = temp[:,0]
forbel = temp[:,1:8]
g = forbel[:,0] #old fortran equivalence
deps = idl_like_mod(np.sum(tvec*dceps), dc2pi)
sorbel = idl_like_mod(idl_like_pound(tvec, ccsel), dc2pi)
e = sorbel[:, 0] #old fortran equivalence
#Secular perturbations in longitude.
dummy=cos(2.0)
sn = sin(idl_like_mod(idl_like_pound(tvec.ravel()[0:2] , ccsec[:, 1:3]),cc2pi))
#Periodic perturbations of the emb (earth-moon barycenter).
pertl = np.sum(ccsec[:,0] * sn) + dt*ccsec3*sn.ravel()[2]
pertld = 0.0
pertr = 0.0
pertrd = 0.0
for k in range(14):
a = idl_like_mod((dcargs[k,0]+dt*dcargs[k,1]), dc2pi)
cosa = cos(a)
sina = sin(a)
pertl = pertl + ccamps[k,0]*cosa + ccamps[k,1]*sina
pertr = pertr + ccamps[k,2]*cosa + ccamps[k,3]*sina
if k < 11:
pertld = pertld + (ccamps[k,1]*cosa-ccamps[k,0]*sina)*ccamps[k,4]
pertrd = pertrd + (ccamps[k,3]*cosa-ccamps[k,2]*sina)*ccamps[k,4]
#Elliptic part of the motion of the emb.
phi = (e*e/4)*(((8/e)-e)*sin(g) +5*sin(2*g) +(13/3)*e*sin(3*g))
f = g + phi
sinf = sin(f)
cosf = cos(f)
dpsi = (dc1 - e*e) / (dc1 + e*cosf)
phid = 2*e*ccsgd*((1 + 1.5*e*e)*cosf + e*(1.25 - 0.5*sinf*sinf))
psid = ccsgd*e*sinf * (dc1 - e*e)**-0.5
#Perturbed heliocentric motion of the emb.
d1pdro = dc1+pertr
drd = d1pdro * (psid + dpsi*pertrd)
drld = d1pdro*dpsi * (dcsld+phid+pertld)
dtl = idl_like_mod((dml + phi + pertl), dc2pi)
dsinls = sin(dtl)
dcosls = cos(dtl)
dxhd = drd*dcosls - drld*dsinls
dyhd = drd*dsinls + drld*dcosls
#Influence of eccentricity, evection and variation on the geocentric
# motion of the moon.
pertl = 0.0
pertld = 0.0
pertp = 0.0
pertpd = 0.0
for k in range(2):
a = idl_like_mod((dcargm[k,0] + dt*dcargm[k,1]), dc2pi)
sina = sin(a)
cosa = cos(a)
pertl = pertl + ccampm[k,0]*sina
pertld = pertld + ccampm[k,1]*cosa
pertp = pertp + ccampm[k,2]*cosa
pertpd = pertpd - ccampm[k,3]*sina
#Heliocentric motion of the earth.
tl = forbel.ravel()[1] + pertl
sinlm = sin(tl)
coslm = cos(tl)
sigma = cckm / (1.0 + pertp)
a = sigma*(ccmld + pertld)
b = sigma*pertpd
dxhd = dxhd + a*sinlm + b*coslm
dyhd = dyhd - a*coslm + b*sinlm
dzhd= -sigma*ccfdi*cos(forbel.ravel()[2])
#Barycentric motion of the earth.
dxbd = dxhd*dc1mme
dybd = dyhd*dc1mme
dzbd = dzhd*dc1mme
for k in range(3):
plon = forbel.ravel()[k+3]
pomg = sorbel.ravel()[k+1]
pecc = sorbel.ravel()[k+9]
tl = idl_like_mod((plon + 2.0*pecc*sin(plon-pomg)), cc2pi)
dxbd = dxbd + ccpamv[k]*(sin(tl) + pecc*sin(pomg))
dybd = dybd - ccpamv[k]*(cos(tl) + pecc*cos(pomg))
dzbd = dzbd - ccpamv[k]*sorbel.ravel()[k+13]*cos(plon - sorbel.ravel()[k+5])
#Transition to mean equator of date.
dcosep = cos(deps)
dsinep = sin(deps)
dyahd = dcosep*dyhd - dsinep*dzhd
dzahd = dsinep*dyhd + dcosep*dzhd
dyabd = dcosep*dybd - dsinep*dzbd
dzabd = dsinep*dybd + dcosep*dzbd
#Epoch of mean equinox (deq) of zero implies that we should use
# Julian ephemeris date (dje) as epoch of mean equinox.
if deq == 0:
dvelh = AU * ([dxhd, dyahd, dzahd])
dvelb = AU * ([dxbd, dyabd, dzabd])
return dvelh, dvelb
#General precession from epoch dje to deq.
deqdat = (dje-dcto-dcbes) / dctrop + dc1900
prema = premat(deqdat,deq,FK4=True)
dvelh = AU * idl_like_pound( prema, [dxhd, dyahd, dzahd] )
dvelb = AU * idl_like_pound( prema, [dxbd, dyabd, dzabd] )
return dvelh, dvelb
def premat(equinox1, equinox2, FK4=False):
"""
#+
# NAME:
# PREMAT
# PURPOSE:
# Return the precession matrix needed to go from EQUINOX1 to EQUINOX2.
# EXPLANTION:
# This matrix is used by the procedures PRECESS and BARYVEL to precess
# astronomical coordinates
#
# CALLING SEQUENCE:
# matrix = PREMAT( equinox1, equinox2, [ /FK4 ] )
#
# INPUTS:
# EQUINOX1 - Original equinox of coordinates, numeric scalar.
# EQUINOX2 - Equinox of precessed coordinates.
#
# OUTPUT:
# matrix - double precision 3 x 3 precession matrix, used to precess
# equatorial rectangular coordinates
#
# OPTIONAL INPUT KEYWORDS:
# /FK4 - If this keyword is set, the FK4 (B1950.0) system precession
# angles are used to compute the precession matrix. The
# default is to use FK5 (J2000.0) precession angles
#
# EXAMPLES:
# Return the precession matrix from 1950.0 to 1975.0 in the FK4 system
#
# IDL> matrix = PREMAT( 1950.0, 1975.0, /FK4)
#
# PROCEDURE:
# FK4 constants from "Computational Spherical Astronomy" by Taff (1983),
# p. 24. (FK4). FK5 constants from "Astronomical Almanac Explanatory
# Supplement 1992, page 104 Table 3.211.1.
#
# REVISION HISTORY
# Written, <NAME>, HSTX Corporation, June 1994
# Converted to IDL V5.0 <NAME> September 1997
#-
"""
deg_to_rad = pi/180.0
sec_to_rad = deg_to_rad/3600.
T = 0.001 * ( equinox2 - equinox1)
if not FK4: # FK5
ST = 0.001*( equinox1 - 2000.)
# Compute 3 rotation angles
A = sec_to_rad * T * (23062.181 + ST*(139.656 +0.0139*ST) \
+ T*(30.188 - 0.344*ST+17.998*T))
B = sec_to_rad * T * T * (79.280 + 0.410*ST + 0.205*T) + A
C = sec_to_rad * T * (20043.109 - ST*(85.33 + 0.217*ST) \
+ T*(-42.665 - 0.217*ST -41.833*T))
else:
ST = 0.001*( equinox1 - 1900.)
# Compute 3 rotation angles
A = sec_to_rad * T * (23042.53 + ST*(139.75 +0.06*ST) \
+ T*(30.23 - 0.27*ST+18.0*T))
B = sec_to_rad * T * T * (79.27 + 0.66*ST + 0.32*T) + A
C = sec_to_rad * T * (20046.85 - ST*(85.33 + 0.37*ST) \
+ T*(-42.67 - 0.37*ST -41.8*T))
sina = sin(A)
sinb = sin(B)
sinc = sin(C)
cosa = cos(A)
cosb = cos(B)
cosc = cos(C)
r = np.empty([3, 3])
r[:,0] = [ cosa*cosb*cosc-sina*sinb, sina*cosb+cosa*sinb*cosc, cosa*sinc]
r[:,1] = [-cosa*sinb-sina*cosb*cosc, cosa*cosb-sina*sinb*cosc, -sina*sinc]
r[:,2] = [-cosb*sinc, -sinb*sinc, cosc]
return r
def idl_like_pound(a, b):
a = np.array(a, copy=False)
b = np.array(b, copy=False)
if len(a.shape) == 2 and len(b.shape) == 1:
return np.dot(a.T, b)
if len(a.shape) == 1 and len(b.shape) == 2:
res = np.dot(a, b.T)
return res.reshape(1, res.size)
else:
return np.dot(a, b)
def idl_like_mod(a, b):
a = np.array(a, copy=False)
b = np.array(b, copy=False)
res = np.abs(a) % b
if a.shape == tuple():
if a<0:
return -res
else:
return res
else:
res[a<0] *= -1
return res
|
[
"numpy.sum",
"numpy.abs",
"numpy.empty",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.dot"
] |
[((7216, 7246), 'numpy.array', 'np.array', (['((theta + lng) / 15.0)'], {}), '((theta + lng) / 15.0)\n', (7224, 7246), True, 'import numpy as np\n'), ((16178, 16206), 'numpy.array', 'np.array', (['[1.0, dt, dt * dt]'], {}), '([1.0, dt, dt * dt])\n', (16186, 16206), True, 'import numpy as np\n'), ((16735, 16743), 'numpy.cos', 'cos', (['(2.0)'], {}), '(2.0)\n', (16738, 16743), False, 'from numpy import cos, sin\n'), ((17572, 17578), 'numpy.sin', 'sin', (['f'], {}), '(f)\n', (17575, 17578), False, 'from numpy import cos, sin\n'), ((17590, 17596), 'numpy.cos', 'cos', (['f'], {}), '(f)\n', (17593, 17596), False, 'from numpy import cos, sin\n'), ((17970, 17978), 'numpy.sin', 'sin', (['dtl'], {}), '(dtl)\n', (17973, 17978), False, 'from numpy import cos, sin\n'), ((17992, 18000), 'numpy.cos', 'cos', (['dtl'], {}), '(dtl)\n', (17995, 18000), False, 'from numpy import cos, sin\n'), ((18625, 18632), 'numpy.sin', 'sin', (['tl'], {}), '(tl)\n', (18628, 18632), False, 'from numpy import cos, sin\n'), ((18645, 18652), 'numpy.cos', 'cos', (['tl'], {}), '(tl)\n', (18648, 18652), False, 'from numpy import cos, sin\n'), ((19417, 19426), 'numpy.cos', 'cos', (['deps'], {}), '(deps)\n', (19420, 19426), False, 'from numpy import cos, sin\n'), ((19440, 19449), 'numpy.sin', 'sin', (['deps'], {}), '(deps)\n', (19443, 19449), False, 'from numpy import cos, sin\n'), ((22571, 22577), 'numpy.sin', 'sin', (['A'], {}), '(A)\n', (22574, 22577), False, 'from numpy import cos, sin\n'), ((22589, 22595), 'numpy.sin', 'sin', (['B'], {}), '(B)\n', (22592, 22595), False, 'from numpy import cos, sin\n'), ((22607, 22613), 'numpy.sin', 'sin', (['C'], {}), '(C)\n', (22610, 22613), False, 'from numpy import cos, sin\n'), ((22625, 22631), 'numpy.cos', 'cos', (['A'], {}), '(A)\n', (22628, 22631), False, 'from numpy import cos, sin\n'), ((22643, 22649), 'numpy.cos', 'cos', (['B'], {}), '(B)\n', (22646, 22649), False, 'from numpy import cos, sin\n'), ((22661, 22667), 'numpy.cos', 'cos', (['C'], {}), '(C)\n', (22664, 22667), False, 'from numpy import cos, sin\n'), ((22677, 22693), 'numpy.empty', 'np.empty', (['[3, 3]'], {}), '([3, 3])\n', (22685, 22693), True, 'import numpy as np\n'), ((22945, 22968), 'numpy.array', 'np.array', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (22953, 22968), True, 'import numpy as np\n'), ((22977, 23000), 'numpy.array', 'np.array', (['b'], {'copy': '(False)'}), '(b, copy=False)\n', (22985, 23000), True, 'import numpy as np\n'), ((23268, 23291), 'numpy.array', 'np.array', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (23276, 23291), True, 'import numpy as np\n'), ((23300, 23323), 'numpy.array', 'np.array', (['b'], {'copy': '(False)'}), '(b, copy=False)\n', (23308, 23323), True, 'import numpy as np\n'), ((3247, 3263), 'numpy.sin', 'sin', (['(HA / DRADEG)'], {}), '(HA / DRADEG)\n', (3250, 3263), False, 'from numpy import cos, sin\n'), ((7272, 7283), 'numpy.sum', 'np.sum', (['neg'], {}), '(neg)\n', (7278, 7283), True, 'import numpy as np\n'), ((16524, 16544), 'numpy.sum', 'np.sum', (['(tvec * dceps)'], {}), '(tvec * dceps)\n', (16530, 16544), True, 'import numpy as np\n'), ((16905, 16929), 'numpy.sum', 'np.sum', (['(ccsec[:, 0] * sn)'], {}), '(ccsec[:, 0] * sn)\n', (16911, 16929), True, 'import numpy as np\n'), ((17107, 17113), 'numpy.cos', 'cos', (['a'], {}), '(a)\n', (17110, 17113), False, 'from numpy import cos, sin\n'), ((17129, 17135), 'numpy.sin', 'sin', (['a'], {}), '(a)\n', (17132, 17135), False, 'from numpy import cos, sin\n'), ((18341, 18347), 'numpy.sin', 'sin', (['a'], {}), '(a)\n', (18344, 18347), False, 'from numpy import cos, sin\n'), ((18363, 18369), 'numpy.cos', 'cos', (['a'], {}), '(a)\n', (18366, 18369), False, 'from numpy import cos, sin\n'), ((23065, 23079), 'numpy.dot', 'np.dot', (['a.T', 'b'], {}), '(a.T, b)\n', (23071, 23079), True, 'import numpy as np\n'), ((23142, 23156), 'numpy.dot', 'np.dot', (['a', 'b.T'], {}), '(a, b.T)\n', (23148, 23156), True, 'import numpy as np\n'), ((23222, 23234), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (23228, 23234), True, 'import numpy as np\n'), ((23334, 23343), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (23340, 23343), True, 'import numpy as np\n'), ((2105, 2122), 'numpy.sin', 'sin', (['(dec / DRADEG)'], {}), '(dec / DRADEG)\n', (2108, 2122), False, 'from numpy import cos, sin\n'), ((2476, 2493), 'numpy.sin', 'sin', (['(6.0 * latrad)'], {}), '(6.0 * latrad)\n', (2479, 2493), False, 'from numpy import cos, sin\n'), ((3229, 3246), 'numpy.cos', 'cos', (['(dec / DRADEG)'], {}), '(dec / DRADEG)\n', (3232, 3246), False, 'from numpy import cos, sin\n'), ((11620, 11635), 'numpy.array', 'np.array', (['dcfel'], {}), '(dcfel)\n', (11628, 11635), True, 'import numpy as np\n'), ((12685, 12700), 'numpy.array', 'np.array', (['ccsel'], {}), '(ccsel)\n', (12693, 12700), True, 'import numpy as np\n'), ((13458, 13474), 'numpy.array', 'np.array', (['dcargs'], {}), '(dcargs)\n', (13466, 13474), True, 'import numpy as np\n'), ((14686, 14702), 'numpy.array', 'np.array', (['ccamps'], {}), '(ccamps)\n', (14694, 14702), True, 'import numpy as np\n'), ((15041, 15056), 'numpy.array', 'np.array', (['ccsec'], {}), '(ccsec)\n', (15049, 15056), True, 'import numpy as np\n'), ((15619, 15635), 'numpy.array', 'np.array', (['dcargm'], {}), '(dcargm)\n', (15627, 15635), True, 'import numpy as np\n'), ((15928, 15944), 'numpy.array', 'np.array', (['ccampm'], {}), '(ccampm)\n', (15936, 15944), True, 'import numpy as np\n'), ((2020, 2036), 'numpy.cos', 'cos', (['(ra / DRADEG)'], {}), '(ra / DRADEG)\n', (2023, 2036), False, 'from numpy import cos, sin\n'), ((2079, 2095), 'numpy.sin', 'sin', (['(ra / DRADEG)'], {}), '(ra / DRADEG)\n', (2082, 2095), False, 'from numpy import cos, sin\n'), ((2403, 2420), 'numpy.sin', 'sin', (['(2.0 * latrad)'], {}), '(2.0 * latrad)\n', (2406, 2420), False, 'from numpy import cos, sin\n'), ((2447, 2464), 'numpy.sin', 'sin', (['(4.0 * latrad)'], {}), '(4.0 * latrad)\n', (2450, 2464), False, 'from numpy import cos, sin\n'), ((3215, 3226), 'numpy.cos', 'cos', (['latrad'], {}), '(latrad)\n', (3218, 3226), False, 'from numpy import cos, sin\n'), ((17535, 17545), 'numpy.sin', 'sin', (['(3 * g)'], {}), '(3 * g)\n', (17538, 17545), False, 'from numpy import cos, sin\n'), ((2004, 2021), 'numpy.cos', 'cos', (['(dec / DRADEG)'], {}), '(dec / DRADEG)\n', (2007, 2021), False, 'from numpy import cos, sin\n'), ((2063, 2080), 'numpy.cos', 'cos', (['(dec / DRADEG)'], {}), '(dec / DRADEG)\n', (2066, 2080), False, 'from numpy import cos, sin\n'), ((2893, 2910), 'numpy.cos', 'cos', (['(6.0 * latrad)'], {}), '(6.0 * latrad)\n', (2896, 2910), False, 'from numpy import cos, sin\n'), ((17506, 17512), 'numpy.sin', 'sin', (['g'], {}), '(g)\n', (17509, 17512), False, 'from numpy import cos, sin\n'), ((17516, 17526), 'numpy.sin', 'sin', (['(2 * g)'], {}), '(2 * g)\n', (17519, 17526), False, 'from numpy import cos, sin\n'), ((19135, 19151), 'numpy.sin', 'sin', (['(plon - pomg)'], {}), '(plon - pomg)\n', (19138, 19151), False, 'from numpy import cos, sin\n'), ((19192, 19199), 'numpy.sin', 'sin', (['tl'], {}), '(tl)\n', (19195, 19199), False, 'from numpy import cos, sin\n'), ((19251, 19258), 'numpy.cos', 'cos', (['tl'], {}), '(tl)\n', (19254, 19258), False, 'from numpy import cos, sin\n'), ((2860, 2877), 'numpy.cos', 'cos', (['(4.0 * latrad)'], {}), '(4.0 * latrad)\n', (2863, 2877), False, 'from numpy import cos, sin\n'), ((19207, 19216), 'numpy.sin', 'sin', (['pomg'], {}), '(pomg)\n', (19210, 19216), False, 'from numpy import cos, sin\n'), ((19266, 19275), 'numpy.cos', 'cos', (['pomg'], {}), '(pomg)\n', (19269, 19275), False, 'from numpy import cos, sin\n'), ((2819, 2836), 'numpy.cos', 'cos', (['(2.0 * latrad)'], {}), '(2.0 * latrad)\n', (2822, 2836), False, 'from numpy import cos, sin\n')]
|
import loader
from migration_tool.adapters.mssql import MSSQLAdapter
from migration_tool.adapters.mysql import MySQLAdapter
from migration_tool.adapters.postgres import PostgresAdapter
from migration_tool.adapters.oracle import OracleAdapter
from migration_tool.sql2json import SQLtoJSON
if __name__ == '__main__':
databases = [
# PostgresAdapter({
# 'host': 'localhost',
# 'database': 'owf',
# 'user': 'owf',
# 'password': 'password',
# }),
# MySQLAdapter({
# 'host': 'localhost',
# 'database': 'owf',
# 'user': 'root',
# 'password': 'password',
# # 'unix_socket': "/tmp/mysql.sock",
# }),
# OracleAdapter({
# 'host': 'localhost',
# 'database': 'ORCLCDB',
# 'user': 'system',
# 'password': '<PASSWORD>',
# 'port': '1521',
# 'client_path': 'C:\instantclient_19_5', # needed for windows.
# }),
MSSQLAdapter({
'host': 'localhost',
'database': 'owf',
'user': 'sa',
'password': '<PASSWORD>',
})
]
for adapter in databases:
SQLtoJSON(adapter) \
.with_tables() \
.with_schema() \
.to_json()
|
[
"migration_tool.sql2json.SQLtoJSON",
"migration_tool.adapters.mssql.MSSQLAdapter"
] |
[((1041, 1139), 'migration_tool.adapters.mssql.MSSQLAdapter', 'MSSQLAdapter', (["{'host': 'localhost', 'database': 'owf', 'user': 'sa', 'password': '<PASSWORD>'\n }"], {}), "({'host': 'localhost', 'database': 'owf', 'user': 'sa',\n 'password': '<PASSWORD>'})\n", (1053, 1139), False, 'from migration_tool.adapters.mssql import MSSQLAdapter\n'), ((1241, 1259), 'migration_tool.sql2json.SQLtoJSON', 'SQLtoJSON', (['adapter'], {}), '(adapter)\n', (1250, 1259), False, 'from migration_tool.sql2json import SQLtoJSON\n')]
|
import argparse
import importlib.util
import os
import sys
import chainer
import numpy as np
import six
from PIL import Image
from ..params import ProcessParams
from ..simple import BaseProcessor
PROJECT_DIR = os.path.dirname(__file__)
waifu2x_path = os.path.join(PROJECT_DIR, "waifu2x-chainer")
def import_waifu2x_module(name):
spec = importlib.util.spec_from_file_location(
name,
os.path.join(waifu2x_path, 'lib', ''.join((name, '.py')))
)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
return foo
iproc = import_waifu2x_module("iproc")
reconstruct = import_waifu2x_module("reconstruct")
srcnn = import_waifu2x_module("srcnn")
utils = import_waifu2x_module("utils")
default_model = "UpResNet10"
def debug_print(debug=False, *args, **kwargs):
if debug:
six.print_(file=sys.stderr, *args, **kwargs)
def load_models(cfg: ProcessParams, args: argparse.Namespace):
ch = 3 if cfg.input_pix_fmt.lower() == 'rgb' else 1
if cfg.model:
if os.path.isdir(cfg.model):
model_dir = cfg.model
else:
model_dir = os.path.join(waifu2x_path, f'models/{cfg.model.lower()}')
else:
cfg.model = default_model
model_dir = os.path.join(waifu2x_path, f'models/{default_model.lower()}')
models = {}
flag = False
if args.method == 'noise_scale':
model_name = 'anime_style_noise{}_scale_{}.npz'.format(
cfg.denoise_level, cfg.input_pix_fmt.lower())
model_path = os.path.join(model_dir, model_name)
if os.path.exists(model_path):
models['noise_scale'] = srcnn.archs[cfg.model](ch)
chainer.serializers.load_npz(model_path, models['noise_scale'])
alpha_model_name = 'anime_style_scale_{}.npz'.format(cfg.input_pix_fmt.lower())
alpha_model_path = os.path.join(model_dir, alpha_model_name)
models['alpha'] = srcnn.archs[cfg.model](ch)
chainer.serializers.load_npz(alpha_model_path, models['alpha'])
else:
flag = True
if args.method == 'scale' or flag:
model_name = 'anime_style_scale_{}.npz'.format(cfg.input_pix_fmt.lower())
model_path = os.path.join(model_dir, model_name)
models['scale'] = srcnn.archs[cfg.model](ch)
chainer.serializers.load_npz(model_path, models['scale'])
if args.method == 'noise' or flag:
model_name = 'anime_style_noise{}_{}.npz'.format(
cfg.denoise_level, cfg.input_pix_fmt.lower())
model_path = os.path.join(model_dir, model_name)
if not os.path.exists(model_path):
model_name = 'anime_style_noise{}_scale_{}.npz'.format(
cfg.denoise_level, cfg.input_pix_fmt.lower())
model_path = os.path.join(model_dir, model_name)
models['noise'] = srcnn.archs[cfg.input_pix_fmt.lower()](ch)
chainer.serializers.load_npz(model_path, models['noise'])
if cfg.device_id >= 0:
chainer.backends.cuda.check_cuda_available()
chainer.backends.cuda.get_device(cfg.device_id).use()
for _, model in models.items():
model.to_gpu()
return models
def split_alpha(src, model, debug=False):
alpha = None
if src.mode in ('L', 'RGB', 'P') and isinstance(
src.info.get('transparency'), bytes
):
src = src.convert('RGBA')
rgb = src.convert('RGB')
if src.mode in ('LA', 'RGBA'):
debug_print(debug, 'Splitting alpha channel...', end=' ', flush=True)
alpha = src.split()[-1]
rgb = iproc.alpha_make_border(rgb, alpha, model)
debug_print(debug, 'OK', debug=debug)
return rgb, alpha
def denoise_image(cfg: ProcessParams, args: argparse.Namespace, src, model):
dst, alpha = split_alpha(src, model, cfg.debug)
debug_print(cfg.debug, 'Level {} denoising...'.format(cfg.denoise_level),
end=' ', flush=True)
if cfg.tta_mode:
dst = reconstruct.image_tta(
dst, model, args.tta_level, cfg.tilesize,
args.batch_size)
else:
dst = reconstruct.image(dst, model, cfg.tilesize, args.batch_size)
if model.inner_scale != 1:
dst = dst.resize((src.size[0], src.size[1]), Image.LANCZOS)
debug_print(cfg.debug, 'OK')
if alpha is not None:
dst.putalpha(alpha)
return dst
def upscale_image(cfg: ProcessParams, args: argparse.Namespace, src, scale_model, alpha_model=None):
dst, alpha = split_alpha(src, scale_model, cfg.debug)
log_scale = np.log2(cfg.scale)
for i in range(int(np.ceil(log_scale))):
debug_print(cfg.debug, '2.0x upscaling...', end=' ', flush=True, )
model = alpha_model
if i == 0 or alpha_model is None:
model = scale_model
if model.inner_scale == 1:
dst = iproc.nn_scaling(dst, 2) # Nearest neighbor 2x scaling
alpha = iproc.nn_scaling(alpha, 2) # Nearest neighbor 2x scaling
if cfg.tta_mode:
dst = reconstruct.image_tta(dst, model, args.tta_level, cfg.tilesize, args.batch_size)
else:
dst = reconstruct.image(dst, model, cfg.tilesize, args.batch_size)
if alpha_model is None:
alpha = reconstruct.image(
alpha, scale_model, cfg.tilesize, args.batch_size)
else:
alpha = reconstruct.image(
alpha, alpha_model, cfg.tilesize, args.batch_size)
debug_print(cfg.debug, 'OK')
dst_w = int(np.round(src.size[0] * cfg.scale))
dst_h = int(np.round(src.size[1] * cfg.scale))
if np.round(log_scale % 1.0, 6) != 0 or log_scale <= 0:
debug_print(cfg.debug, 'Resizing...', end=' ', flush=True)
dst = dst.resize((dst_w, dst_h), Image.LANCZOS)
debug_print(cfg.debug, 'OK')
if alpha is not None:
if alpha.size[0] != dst_w or alpha.size[1] != dst_h:
alpha = alpha.resize((dst_w, dst_h), Image.LANCZOS)
dst.putalpha(alpha)
return dst
def get_parser():
p = argparse.ArgumentParser()
p.add_argument('--tta_level', '-T', type=int, default=8,
choices=[2, 4, 8])
p.add_argument('--method', '-m', default='scale',
choices=['noise', 'scale', 'noise_scale'])
p.add_argument('--batch_size', '-b', type=int, default=16)
return p
class Processor(BaseProcessor):
def __init__(self, params: ProcessParams):
p = get_parser()
self.args = p.parse_args(params.additional_args)
if params.model and params.model in srcnn.table:
params.model = srcnn.table[params.model]
self.models = load_models(params, self.args)
self.params = params
if params.tilesize < 32:
params.tilesize = 128
def process(self, im: Image) -> Image:
if 'noise_scale' in self.models:
return upscale_image(self.params, self.args, im, self.models['noise_scale'], self.models['alpha'])
if 'noise' in self.models:
return denoise_image(self.params, self.args, im, self.models['noise'])
if 'scale' in self.models:
return upscale_image(self.params, self.args, im, self.models['scale'])
|
[
"argparse.ArgumentParser",
"chainer.serializers.load_npz",
"numpy.ceil",
"os.path.isdir",
"numpy.log2",
"os.path.dirname",
"chainer.backends.cuda.get_device",
"os.path.exists",
"chainer.backends.cuda.check_cuda_available",
"numpy.round",
"os.path.join",
"six.print_"
] |
[((213, 238), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (228, 238), False, 'import os\n'), ((254, 298), 'os.path.join', 'os.path.join', (['PROJECT_DIR', '"""waifu2x-chainer"""'], {}), "(PROJECT_DIR, 'waifu2x-chainer')\n", (266, 298), False, 'import os\n'), ((4536, 4554), 'numpy.log2', 'np.log2', (['cfg.scale'], {}), '(cfg.scale)\n', (4543, 4554), True, 'import numpy as np\n'), ((6020, 6045), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6043, 6045), False, 'import argparse\n'), ((837, 881), 'six.print_', 'six.print_', (['*args'], {'file': 'sys.stderr'}), '(*args, file=sys.stderr, **kwargs)\n', (847, 881), False, 'import six\n'), ((1032, 1056), 'os.path.isdir', 'os.path.isdir', (['cfg.model'], {}), '(cfg.model)\n', (1045, 1056), False, 'import os\n'), ((1528, 1563), 'os.path.join', 'os.path.join', (['model_dir', 'model_name'], {}), '(model_dir, model_name)\n', (1540, 1563), False, 'import os\n'), ((1575, 1601), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (1589, 1601), False, 'import os\n'), ((2220, 2255), 'os.path.join', 'os.path.join', (['model_dir', 'model_name'], {}), '(model_dir, model_name)\n', (2232, 2255), False, 'import os\n'), ((2317, 2374), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['model_path', "models['scale']"], {}), "(model_path, models['scale'])\n", (2345, 2374), False, 'import chainer\n'), ((2551, 2586), 'os.path.join', 'os.path.join', (['model_dir', 'model_name'], {}), '(model_dir, model_name)\n', (2563, 2586), False, 'import os\n'), ((2898, 2955), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['model_path', "models['noise']"], {}), "(model_path, models['noise'])\n", (2926, 2955), False, 'import chainer\n'), ((2992, 3036), 'chainer.backends.cuda.check_cuda_available', 'chainer.backends.cuda.check_cuda_available', ([], {}), '()\n', (3034, 3036), False, 'import chainer\n'), ((5492, 5525), 'numpy.round', 'np.round', (['(src.size[0] * cfg.scale)'], {}), '(src.size[0] * cfg.scale)\n', (5500, 5525), True, 'import numpy as np\n'), ((5543, 5576), 'numpy.round', 'np.round', (['(src.size[1] * cfg.scale)'], {}), '(src.size[1] * cfg.scale)\n', (5551, 5576), True, 'import numpy as np\n'), ((1678, 1741), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['model_path', "models['noise_scale']"], {}), "(model_path, models['noise_scale'])\n", (1706, 1741), False, 'import chainer\n'), ((1865, 1906), 'os.path.join', 'os.path.join', (['model_dir', 'alpha_model_name'], {}), '(model_dir, alpha_model_name)\n', (1877, 1906), False, 'import os\n'), ((1976, 2039), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['alpha_model_path', "models['alpha']"], {}), "(alpha_model_path, models['alpha'])\n", (2004, 2039), False, 'import chainer\n'), ((2602, 2628), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (2616, 2628), False, 'import os\n'), ((2785, 2820), 'os.path.join', 'os.path.join', (['model_dir', 'model_name'], {}), '(model_dir, model_name)\n', (2797, 2820), False, 'import os\n'), ((4578, 4596), 'numpy.ceil', 'np.ceil', (['log_scale'], {}), '(log_scale)\n', (4585, 4596), True, 'import numpy as np\n'), ((5585, 5613), 'numpy.round', 'np.round', (['(log_scale % 1.0)', '(6)'], {}), '(log_scale % 1.0, 6)\n', (5593, 5613), True, 'import numpy as np\n'), ((3045, 3092), 'chainer.backends.cuda.get_device', 'chainer.backends.cuda.get_device', (['cfg.device_id'], {}), '(cfg.device_id)\n', (3077, 3092), False, 'import chainer\n')]
|
import argparse
from datetime import datetime
import gc
import joblib
from poutyne.framework import Model
from poutyne.framework.callbacks import *
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from load_dataset import AudioDatasetFine, label_hierarchy
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#train_dir = r"D:\datasets\dcase5_processed\spec_vgg\train"
#test_dir = r"D:\datasets\dcase5_processed\spec_vgg\validate"
train_dir = r"/dcase/spec_vgg/train"
test_dir = r"/dcase/spec_vgg/validate"
MODEL_BASE = r'/dcase/output/models'
TENSORBOARD_BASE = r'/dcase/output/tensorboard'
os.makedirs(MODEL_BASE, exist_ok=True)
os.makedirs(TENSORBOARD_BASE, exist_ok=True)
index_to_files_dict_train = joblib.load('/dcase/spec_vgg/label_to_files_train.zip')
index_to_files_dict_test = joblib.load('/dcase/spec_vgg/label_to_files_test.zip')
NUM_COARSE_LABELS = 8
BATCH_SIZE = 64
MAX_EPOCHS = 100
USE_EXAMPLE_WEIGHTS = True
if USE_EXAMPLE_WEIGHTS:
weights_fine = joblib.load('weights_fine_train.pkl')
class ConvBlock(nn.Module):
"""This creates a convolutional layer with optional maxpool, batchnorm, and dropout"""
def __init__(self,
in_channels,
out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
batchnorm=True,
maxpool=True,
maxpool_size=(2, 2),
dropout=None):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding) # , bias=False ?
# print('kernel', kernel_size, stride, padding, maxpool)
if maxpool:
self.mp = nn.MaxPool2d(maxpool_size, stride=maxpool_size)
else:
self.mp = None
if batchnorm:
self.bn = nn.BatchNorm2d(out_channels)
else:
self.bn = None
if dropout:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
# self.init_weights()
def forward(self, nn_input):
x = nn_input
if self.bn:
x = F.relu(self.bn(self.conv(x)))
else:
x = F.relu(self.conv(x))
if self.mp:
x = self.mp(x)
if self.dropout:
x = self.dropout(x)
return x
class VGG_alt(nn.Module):
"""Based on AudioSet paper, with some maxpool size modifications"""
def __init__(self, num_classes):
super(VGG_alt, self).__init__()
self.NUM_CLASSES = num_classes
DROPOUT = .5
self.emb_size = 49152
# spectrogram convolutions
self.conv_block_1 = ConvBlock(in_channels=1,
out_channels=8,
kernel_size=(1, 1),
stride=(1, 1),
padding=(0, 0),
batchnorm=True,
maxpool=False,
maxpool_size=(2, 16),
dropout=DROPOUT)
self.conv_block_2 = ConvBlock(in_channels=8,
out_channels=16,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
batchnorm=True,
maxpool=False,
maxpool_size=(2, 2),
dropout=DROPOUT)
self.conv_block_3 = ConvBlock(in_channels=16,
out_channels=32,
kernel_size=(16, 128),
stride=(4, 16),
padding=(8, 16),
batchnorm=True,
maxpool=True,
maxpool_size=(4, 4),
dropout=DROPOUT)
self.conv_block_4 = ConvBlock(in_channels=32,
out_channels=64,
kernel_size=(5, 5),
stride=(2, 2),
padding=(1, 1),
batchnorm=True,
maxpool=False,
maxpool_size=(2, 2),
dropout=DROPOUT)
self.conv_block_5 = ConvBlock(in_channels=64,
out_channels=128,
kernel_size=(5, 5),
stride=(2, 2),
padding=(1, 1),
batchnorm=True,
maxpool=False,
maxpool_size=None,
dropout=DROPOUT)
self.conv_block_6 = ConvBlock(in_channels=128,
out_channels=256,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1),
batchnorm=True,
maxpool=False,
maxpool_size=(2, 4),
dropout=DROPOUT)
# self.conv_block_7 = ConvBlock(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1),
# padding=(1, 1), batchnorm=True, maxpool=False, maxpool_size=(2, 4), dropout=DROPOUT)
# openl3 embedding convolutions
# self.emb_conv_1 = ConvBlock(in_channels=1, out_channels=4, kernel_size=(5, 5), stride=(2, 2),
# padding=(1, 1), batchnorm=True, maxpool=True, maxpool_size=(4, 4), dropout=DROPOUT)
# self.emb_conv_2 = ConvBlock(in_channels=4, out_channels=8, kernel_size=(5, 5), stride=(2, 2),
# padding=(1, 1), batchnorm=True, maxpool=True, maxpool_size=(2, 2),
# dropout=DROPOUT)
# self.emb_conv_3 = ConvBlock(in_channels=16, out_channels=32, kernel_size=(5, 5), stride=(2, 2),
# padding=(1, 1), batchnorm=True, maxpool=True, maxpool_size=(2, 2),
# dropout=DROPOUT)
# self.emb_conv_4 = ConvBlock(in_channels=32, out_channels=64, kernel_size=(5, 5), stride=(2, 2),
# padding=(1, 1), batchnorm=True, maxpool=True, maxpool_size=(2, 2),
# dropout=DROPOUT)
# fc layers
# self.fc_emb1 = nn.Linear(self.emb_size, 2**10, bias=True)
# self.fc_emb2 = nn.Linear(2**10, 2**8, bias=True)
self.fc1 = nn.Bilinear(256, 1280, 512, bias=True)
self.fc1_bn = nn.BatchNorm1d(512)
self.fc2 = nn.Linear(512, 256, bias=True)
self.fc2_bn = nn.BatchNorm1d(256)
# self.fc3 = nn.Linear(2**7, 2**6, bias=True)
# self.fc4 = nn.Linear(2**8, 2**6, bias=True)
self.fc_final = nn.Linear(256, self.NUM_CLASSES, bias=True)
self.dropout = nn.Dropout(.2)
# self.init_weights()
# def init_weights(self):
# init_layer(self.fc)
def forward(self, nn_input):
'''
Input: (batch_size, times_steps, freq_bins)'''
# x, emb, vgg = nn_input
x, vgg = nn_input
'''(batch_size, 1, times_steps, freq_bins)'''
# spectrogram convolutions
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = self.conv_block_3(x)
x = self.conv_block_4(x)
x = self.conv_block_5(x)
x = self.conv_block_6(x)
# x = self.conv_block_7(x)
# openl3 convolutions
# emb = self.emb_conv_1(emb)
# emb = self.emb_conv_2(emb)
# emb = self.emb_conv_3(emb)
# emb = self.emb_conv_4(emb)
# reshape for fc layers
x = x.view(x.size(0), -1)
# emb = emb.view(emb.size(0), -1)
vgg = vgg.view(vgg.size(0), -1)
# print(x.shape, emb.shape)
# print(x.shape)
# emb = self.fc_emb1(emb)
# emb = self.fc_emb2(emb)
# takes spectrogram and openl3 conv outputs
x = self.fc1(x, vgg)
x = self.fc1_bn(x)
x = F.relu(x)
x = self.dropout(x)
# x = self.dropout(x)
x = self.fc2(x)
x = self.fc2_bn(x)
x = F.relu(x)
x = self.dropout(x)
# x = self.dropout(x)
# x = F.relu(self.fc3(x))
# x = self.dropout(x)
# x = F.relu(self.fc4(x))
# x = self.dropout(x)
# x = F.relu(self.fc5(x))
# x = self.dropout(x)
# x = F.relu(self.fc6(x))
# x = self.dropout(x)
x = self.fc_final(x)
# output = torch.sigmoid(x)
output = x
return output
def get_label_range(coarse_index):
label_start, label_end = label_hierarchy[coarse_index + 1]
NUM_CLASSES = len(range(label_start, label_end))
return label_start, label_end, NUM_CLASSES
def train_model(coarse_index, DATE):
label_start, label_end, NUM_CLASSES = get_label_range(coarse_index)
print('number of classes:', NUM_CLASSES)
if NUM_CLASSES < 2:
print('Skipping this coarse category.')
return
TRAIN = AudioDatasetFine(train_dir, coarse_index,
index_to_files_dict_train)
TEST = AudioDatasetFine(test_dir, coarse_index, index_to_files_dict_test)
TRAIN_LOADER = DataLoader(dataset=TRAIN,
batch_size=BATCH_SIZE,
shuffle=True)
TEST_LOADER = DataLoader(dataset=TEST, batch_size=BATCH_SIZE, shuffle=True)
# train_sampler = torch.utils.data.sampler.WeightedRandomSampler(TRAIN_WEIGHTS, 2351)
# test_sampler = torch.utils.data.sampler.WeightedRandomSampler(TEST_WEIGHTS, 443)
# model = NeuralNetwork().to(device)
# model = VGG_11().to(device)
model_tmp = VGG_alt(NUM_CLASSES).to(device)
# model = OpenL3().to(device)
## if training from checkpoint; ensure checkpoint matches model class architecture
# checkpoint = torch.load("models/20190531_151918_best_epoch_19_val_loss=0.1182.ckpt")
# model.load_state_dict(checkpoint)
# Loss and optimizer
# criterion = nn.BCELoss() # must be this for multi-label predictions
# criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(np.array(TRAIN_WEIGHTS).astype(np.float32)**.2).to(device))
if USE_EXAMPLE_WEIGHTS:
weights = weights_fine[coarse_index]
print(f'Using sample weights: {weights}')
criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(np.array(weights).astype(np.float32)).to(device))
#criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(np.array(weights_fine[coarse_index]).astype(np.float32)**.2).to(device))
else:
print('Not using sample weights.')
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model_tmp.parameters(), lr=.001)
# to Poutyne
model = Model(model_tmp, optimizer, criterion, metrics=['bin_acc'])
# Callbacks
tb_writer = SummaryWriter(os.path.join(TENSORBOARD_BASE, f'{DATE}_coarse={coarse_index}'))
callbacks = [
# Save the latest weights to be able to continue the optimization at the end for more epochs.
ModelCheckpoint(
os.path.join(MODEL_BASE, f'{DATE}_coarse={coarse_index}_last_epoch.ckpt'),
temporary_filename=os.path.join(MODEL_BASE, 'last_epoch.ckpt.tmp')),
# Save the weights in a new file when the current model is better than all previous models.
ModelCheckpoint(
os.path.join(MODEL_BASE, '%s_coarse=%d_best_epoch_{epoch}_val_loss={val_loss:.4f}.ckpt'
% (DATE, coarse_index)),
monitor='val_loss',
mode='min',
save_best_only=True,
restore_best=False, #True
verbose=True,
temporary_filename=os.path.join(MODEL_BASE, 'best_epoch.ckpt.tmp')),
# Save the losses and accuracies for each epoch in a TSV.
CSVLogger(os.path.join(MODEL_BASE, f'{DATE}_coarse={coarse_index}_log.tsv'),
separator='\t'),
ReduceLROnPlateau(patience=5, verbose=True, factor=0.1),
EarlyStopping(patience=10, verbose=True),
TerminateOnNaN(),
# policies.sgdr_phases(6, 6, lr=(1.0, 0.1), cycle_mult = 2) # doesn't work as callback
]
save_file_path = os.path.join(MODEL_BASE, '%s_coarse=%d_weights.{epoch:02d}-{val_loss:.4f}.txt' % (
DATE, coarse_index))
save_best_model = PeriodicSaveCallback(save_file_path,
temporary_filename=os.path.join(MODEL_BASE, 'tmp_file.txt'),
atomic_write=False,
save_best_only=True,
verbose=True)
# Train the model
model.fit_generator(TRAIN_LOADER,
TEST_LOADER,
epochs=MAX_EPOCHS,
callbacks=callbacks)
del optimizer
del model
del model_tmp
del TEST_LOADER
del TRAIN_LOADER
del TEST
del TRAIN
def print_gpu_ram():
print(f'GPU memory allocated: {torch.cuda.memory_allocated()}')
print(f'GPU memory cached: {torch.cuda.memory_cached()}')
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data')
# and torch.is_tensor(obj.data)):
# print(type(obj), obj.size())
# del obj
# except:
# pass
def main(coarse_category_idx, DATE):
global BATCH_SIZE
print(
f'\n*****************\nTraining model for coarse category {coarse_category_idx}\n*******\n'
)
print_gpu_ram()
# Hack to avoid batch-size 1 in final batch, which causes crash in batch-norm.
# TODO: Should fix in Poutyne training loop to skip final batch when this happens.
if coarse_category_idx == 3:
BATCH_SIZE = 63
print('Training')
train_model(coarse_category_idx, DATE)
print('Done training.')
print_gpu_ram()
print('Clearing GPU ram')
torch.cuda.empty_cache()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Train model for a single coarse category.')
parser.add_argument('index', type=int, help='coarse category index')
parser.add_argument('date', type=str, help='date string')
args = parser.parse_args()
main(args.index, args.date)
|
[
"torch.nn.Dropout",
"torch.nn.BCEWithLogitsLoss",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.cuda.memory_allocated",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"poutyne.framework.Model",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.cuda.empty_cache",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.relu",
"joblib.load",
"torch.cuda.memory_cached",
"load_dataset.AudioDatasetFine",
"torch.nn.Bilinear"
] |
[((828, 883), 'joblib.load', 'joblib.load', (['"""/dcase/spec_vgg/label_to_files_train.zip"""'], {}), "('/dcase/spec_vgg/label_to_files_train.zip')\n", (839, 883), False, 'import joblib\n'), ((911, 965), 'joblib.load', 'joblib.load', (['"""/dcase/spec_vgg/label_to_files_test.zip"""'], {}), "('/dcase/spec_vgg/label_to_files_test.zip')\n", (922, 965), False, 'import joblib\n'), ((1094, 1131), 'joblib.load', 'joblib.load', (['"""weights_fine_train.pkl"""'], {}), "('weights_fine_train.pkl')\n", (1105, 1131), False, 'import joblib\n'), ((9959, 10027), 'load_dataset.AudioDatasetFine', 'AudioDatasetFine', (['train_dir', 'coarse_index', 'index_to_files_dict_train'], {}), '(train_dir, coarse_index, index_to_files_dict_train)\n', (9975, 10027), False, 'from load_dataset import AudioDatasetFine, label_hierarchy\n'), ((10068, 10134), 'load_dataset.AudioDatasetFine', 'AudioDatasetFine', (['test_dir', 'coarse_index', 'index_to_files_dict_test'], {}), '(test_dir, coarse_index, index_to_files_dict_test)\n', (10084, 10134), False, 'from load_dataset import AudioDatasetFine, label_hierarchy\n'), ((10155, 10217), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'TRAIN', 'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(dataset=TRAIN, batch_size=BATCH_SIZE, shuffle=True)\n', (10165, 10217), False, 'from torch.utils.data import DataLoader\n'), ((10296, 10357), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'TEST', 'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(dataset=TEST, batch_size=BATCH_SIZE, shuffle=True)\n', (10306, 10357), False, 'from torch.utils.data import DataLoader\n'), ((11585, 11607), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (11605, 11607), True, 'import torch.nn as nn\n'), ((11705, 11764), 'poutyne.framework.Model', 'Model', (['model_tmp', 'optimizer', 'criterion'], {'metrics': "['bin_acc']"}), "(model_tmp, optimizer, criterion, metrics=['bin_acc'])\n", (11710, 11764), False, 'from poutyne.framework import Model\n'), ((14937, 14961), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (14959, 14961), False, 'import torch\n'), ((15005, 15090), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train model for a single coarse category."""'}), "(description='Train model for a single coarse category.'\n )\n", (15028, 15090), False, 'import argparse\n'), ((391, 416), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (414, 416), False, 'import torch\n'), ((1637, 1760), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (1646, 1760), True, 'import torch.nn as nn\n'), ((7396, 7434), 'torch.nn.Bilinear', 'nn.Bilinear', (['(256)', '(1280)', '(512)'], {'bias': '(True)'}), '(256, 1280, 512, bias=True)\n', (7407, 7434), True, 'import torch.nn as nn\n'), ((7457, 7476), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (7471, 7476), True, 'import torch.nn as nn\n'), ((7496, 7526), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {'bias': '(True)'}), '(512, 256, bias=True)\n', (7505, 7526), True, 'import torch.nn as nn\n'), ((7549, 7568), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (7563, 7568), True, 'import torch.nn as nn\n'), ((7701, 7744), 'torch.nn.Linear', 'nn.Linear', (['(256)', 'self.NUM_CLASSES'], {'bias': '(True)'}), '(256, self.NUM_CLASSES, bias=True)\n', (7710, 7744), True, 'import torch.nn as nn\n'), ((7769, 7784), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (7779, 7784), True, 'import torch.nn as nn\n'), ((8938, 8947), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (8944, 8947), True, 'import torch.nn.functional as F\n'), ((9069, 9078), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (9075, 9078), True, 'import torch.nn.functional as F\n'), ((2001, 2048), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['maxpool_size'], {'stride': 'maxpool_size'}), '(maxpool_size, stride=maxpool_size)\n', (2013, 2048), True, 'import torch.nn as nn\n'), ((2135, 2163), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (2149, 2163), True, 'import torch.nn as nn\n'), ((2253, 2272), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (2263, 2272), True, 'import torch.nn as nn\n'), ((13967, 13996), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (13994, 13996), False, 'import torch\n'), ((14032, 14058), 'torch.cuda.memory_cached', 'torch.cuda.memory_cached', ([], {}), '()\n', (14056, 14058), False, 'import torch\n')]
|
from __future__ import print_function, division
import os
import torch
import numpy as np
import pandas as pd
import math
import re
import pdb
import pickle
from scipy import stats
from torch.utils.data import Dataset
import h5py
from libs.utils.utils import generate_split, nth
def save_splits(split_datasets, column_keys, filename, boolean_style=False):
splits = [split_datasets[i].slide_data['slide_id'] for i in range(len(split_datasets))]
if not boolean_style:
df = pd.concat(splits, ignore_index=True, axis=1)
df.columns = column_keys
else:
df = pd.concat(splits, ignore_index = True, axis=0)
index = df.values.tolist()
one_hot = np.eye(len(split_datasets)).astype(bool)
bool_array = np.repeat(one_hot, [len(dset) for dset in split_datasets], axis=0)
df = pd.DataFrame(bool_array, index=index, columns = ['train', 'val', 'test'])
df.to_csv(filename)
print()
class Generic_WSI_Classification_Dataset(Dataset):
def __init__(self,
csv_path = 'dataset_csv/ccrcc_clean.csv',
shuffle = False,
seed = 7,
print_info = True,
label_dict = {},
ignore=[],
patient_strat=False,
label_col = None,
patient_voting = 'max',
):
"""
Args:
csv_file (string): Path to the csv file with annotations.
shuffle (boolean): Whether to shuffle
seed (int): random seed for shuffling the data
print_info (boolean): Whether to print a summary of the dataset
label_dict (dict): Dictionary with key, value pairs for converting str labels to int
ignore (list): List containing class labels to ignore
"""
self.label_dict = label_dict
self.custom_test_ids = None
self.num_classes=len(self.label_dict)
self.seed = seed
self.print_info = print_info
self.patient_strat = patient_strat
self.train_ids, self.val_ids, self.test_ids = (None, None, None)
self.data_dir = None
if not label_col:
label_col = 'label'
self.label_col = label_col
slide_data = pd.read_csv(csv_path)
slide_data = self.df_prep(slide_data, self.label_dict, ignore, self.label_col)
###shuffle data
if shuffle:
np.random.seed(seed)
np.random.shuffle(slide_data)
self.slide_data = slide_data
patients = np.unique(np.array(slide_data['case_id'])) # get unique patients
patient_labels = []
for p in patients:
locations = slide_data[slide_data['case_id'] == p].index.tolist()
assert len(locations) > 0
label = slide_data['label'][locations].values
if patient_voting == 'max':
label = label.max() # get patient label (MIL convention)
elif patient_voting == 'maj':
label = stats.mode(label)[0]
else:
pass
patient_labels.append(label)
self.patient_data = {'case_id':patients, 'label':np.array(patient_labels)}
# self.patient_data_prep()
self.cls_ids_prep()
if print_info:
self.summarize()
def cls_ids_prep(self):
self.patient_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.patient_cls_ids[i] = np.where(self.patient_data['label'] == i)[0]
self.slide_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.slide_cls_ids[i] = np.where(self.slide_data['label'] == i)[0]
def patient_data_prep(self):
patients = np.unique(np.array(self.slide_data['case_id'])) # get unique patients
patient_labels = []
for p in patients:
locations = self.slide_data[self.slide_data['case_id'] == p].index.tolist()
assert len(locations) > 0
label = self.slide_data['label'][locations[0]] # get patient label
patient_labels.append(label)
self.patient_data = {'case_id':patients, 'label':np.array(patient_labels)}
@staticmethod
def df_prep(data, label_dict, ignore, label_col):
# convert from MIL label
data = data[['study_code', 'target', 'slide']]
data.rename(columns={'study_code': 'case_id', 'target':'label', 'slide':'slide_id'}, inplace=True)
if label_col != 'label':
data['label'] = data[label_col].copy()
mask = data['label'].isin(ignore)
data = data[~mask]
data.reset_index(drop=True, inplace=True)
for i in data.index:
key = data.loc[i, 'label']
data.at[i, 'label'] = label_dict[key]
return data
def __len__(self):
if self.patient_strat:
return len(self.patient_data['case_id'])
else:
return len(self.slide_data)
def summarize(self):
print("label column: {}".format(self.label_col))
print("label dictionary: {}".format(self.label_dict))
print("number of classes: {}".format(self.num_classes))
print("slide-level counts: ", '\n', self.slide_data['label'].value_counts(sort = False))
for i in range(self.num_classes):
print('Patient-LVL; Number of samples registered in class %d: %d' % (i, self.patient_cls_ids[i].shape[0]))
print('Slide-LVL; Number of samples registered in class %d: %d' % (i, self.slide_cls_ids[i].shape[0]))
def create_splits(self, k = 3, val_num = (25, 25), test_num = (40, 40), label_frac = 1.0, custom_test_ids = None):
settings = {
'n_splits' : k,
'val_num' : val_num,
'test_num': test_num,
'label_frac': label_frac,
'seed': self.seed,
'custom_test_ids': self.custom_test_ids
}
if self.patient_strat:
settings.update({'cls_ids' : self.patient_cls_ids, 'samples': len(self.patient_data['case_id'])})
else:
settings.update({'cls_ids' : self.slide_cls_ids, 'samples': len(self.slide_data)})
self.split_gen = generate_split(**settings)
def set_splits(self,start_from=None):
if start_from:
ids = nth(self.split_gen, start_from)
else:
ids = next(self.split_gen)
if self.patient_strat:
slide_ids = [[] for i in range(len(ids))]
for split in range(len(ids)):
for idx in ids[split]:
case_id = self.patient_data['case_id'][idx]
slide_indices = self.slide_data[self.slide_data['case_id'] == case_id].index.tolist()
slide_ids[split].extend(slide_indices)
self.train_ids, self.val_ids, self.test_ids = slide_ids[0], slide_ids[1], slide_ids[2]
else:
self.train_ids, self.val_ids, self.test_ids = ids
def get_split_from_df(self, all_splits, split_key='train'):
split = all_splits[split_key]
split = split.dropna().reset_index(drop=True)
if len(split) > 0:
mask = self.slide_data['slide_id'].isin(split.tolist())
df_slice = self.slide_data[mask].dropna().reset_index(drop=True)
split = Generic_Split(df_slice, data_dir=self.data_dir, num_classes=self.num_classes)
else:
split = None
return split
def get_merged_split_from_df(self, all_splits, split_keys=['train']):
merged_split = []
for split_key in split_keys:
split = all_splits[split_key]
split = split.dropna().reset_index(drop=True).tolist()
merged_split.extend(split)
if len(split) > 0:
mask = self.slide_data['slide_id'].isin(merged_split)
df_slice = self.slide_data[mask].dropna().reset_index(drop=True)
split = Generic_Split(df_slice, data_dir=self.data_dir, num_classes=self.num_classes)
else:
split = None
return split
def return_splits(self, from_id=True, csv_path=None):
if from_id:
if len(self.train_ids) > 0:
train_data = self.slide_data.loc[self.train_ids].reset_index(drop=True)
train_split = Generic_Split(train_data, data_dir=self.data_dir, num_classes=self.num_classes)
else:
train_split = None
if len(self.val_ids) > 0:
val_data = self.slide_data.loc[self.val_ids].reset_index(drop=True)
val_split = Generic_Split(val_data, data_dir=self.data_dir, num_classes=self.num_classes)
else:
val_split = None
if len(self.test_ids) > 0:
test_data = self.slide_data.loc[self.test_ids].reset_index(drop=True)
test_split = Generic_Split(test_data, data_dir=self.data_dir, num_classes=self.num_classes)
else:
test_split = None
else:
assert csv_path
all_splits = pd.read_csv(csv_path)
train_split = self.get_split_from_df(all_splits, 'train')
val_split = self.get_split_from_df(all_splits, 'val')
test_split = self.get_split_from_df(all_splits, 'test')
return train_split, val_split, test_split
def get_list(self, ids):
return self.slide_data['slide_id'][ids]
def getlabel(self, ids):
return self.slide_data['label'][ids]
def __getitem__(self, idx):
return None
def test_split_gen(self, return_descriptor=False):
if return_descriptor:
index = [list(self.label_dict.keys())[list(self.label_dict.values()).index(i)] for i in range(self.num_classes)]
columns = ['train', 'val', 'test']
df = pd.DataFrame(np.full((len(index), len(columns)), 0, dtype=np.int32), index= index,
columns= columns)
count = len(self.train_ids)
print('\nnumber of training samples: {}'.format(count))
labels = self.getlabel(self.train_ids)
unique, counts = np.unique(labels, return_counts=True)
for u in range(len(unique)):
print('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[index[u], 'train'] = counts[u]
count = len(self.val_ids)
print('\nnumber of val samples: {}'.format(count))
labels = self.getlabel(self.val_ids)
unique, counts = np.unique(labels, return_counts=True)
for u in range(len(unique)):
print('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[index[u], 'val'] = counts[u]
count = len(self.test_ids)
print('\nnumber of test samples: {}'.format(count))
labels = self.getlabel(self.test_ids)
unique, counts = np.unique(labels, return_counts=True)
for u in range(len(unique)):
print('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[index[u], 'test'] = counts[u]
assert len(np.intersect1d(self.train_ids, self.test_ids)) == 0
assert len(np.intersect1d(self.train_ids, self.val_ids)) == 0
assert len(np.intersect1d(self.val_ids, self.test_ids)) == 0
if return_descriptor:
return df
def save_split(self, filename):
train_split = self.get_list(self.train_ids)
val_split = self.get_list(self.val_ids)
test_split = self.get_list(self.test_ids)
df_tr = pd.DataFrame({'train': train_split})
df_v = pd.DataFrame({'val': val_split})
df_t = pd.DataFrame({'test': test_split})
df = pd.concat([df_tr, df_v, df_t], axis=1)
df.to_csv(filename, index = False)
class Generic_MIL_Dataset(Generic_WSI_Classification_Dataset):
def __init__(self,
data_dir,
**kwargs):
super(Generic_MIL_Dataset, self).__init__(**kwargs)
self.data_dir = data_dir
self.use_h5 = False
def load_from_h5(self, toggle):
self.use_h5 = toggle
def __getitem__(self, idx):
slide_id = self.slide_data['slide_id'][idx]
label = self.slide_data['label'][idx]
if not self.use_h5:
if self.data_dir:
full_path = os.path.join(self.data_dir,'{}.pt'.format(slide_id))
features = torch.load(full_path)
return features, label
else:
return slide_id, label
else:
full_path = os.path.join(self.data_dir,'{}.h5'.format(slide_id))
with h5py.File(full_path,'r') as hdf5_file:
features = hdf5_file['features'][:]
coords = hdf5_file['coords'][:]
features = torch.from_numpy(features)
return features, label, coords
class Generic_Split(Generic_MIL_Dataset):
def __init__(self, slide_data, data_dir=None, num_classes=2):
self.use_h5 = False
self.slide_data = slide_data
self.data_dir = data_dir
self.num_classes = num_classes
self.slide_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.slide_cls_ids[i] = np.where(self.slide_data['label'] == i)[0]
def __len__(self):
return len(self.slide_data)
|
[
"pandas.DataFrame",
"torch.from_numpy",
"h5py.File",
"numpy.random.seed",
"numpy.random.shuffle",
"libs.utils.utils.generate_split",
"pandas.read_csv",
"scipy.stats.mode",
"torch.load",
"numpy.where",
"numpy.array",
"numpy.intersect1d",
"pandas.concat",
"numpy.unique",
"libs.utils.utils.nth"
] |
[((480, 524), 'pandas.concat', 'pd.concat', (['splits'], {'ignore_index': '(True)', 'axis': '(1)'}), '(splits, ignore_index=True, axis=1)\n', (489, 524), True, 'import pandas as pd\n'), ((566, 610), 'pandas.concat', 'pd.concat', (['splits'], {'ignore_index': '(True)', 'axis': '(0)'}), '(splits, ignore_index=True, axis=0)\n', (575, 610), True, 'import pandas as pd\n'), ((784, 855), 'pandas.DataFrame', 'pd.DataFrame', (['bool_array'], {'index': 'index', 'columns': "['train', 'val', 'test']"}), "(bool_array, index=index, columns=['train', 'val', 'test'])\n", (796, 855), True, 'import pandas as pd\n'), ((1916, 1937), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (1927, 1937), True, 'import pandas as pd\n'), ((5353, 5379), 'libs.utils.utils.generate_split', 'generate_split', ([], {}), '(**settings)\n', (5367, 5379), False, 'from libs.utils.utils import generate_split, nth\n'), ((8686, 8723), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (8695, 8723), True, 'import numpy as np\n'), ((9037, 9074), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (9046, 9074), True, 'import numpy as np\n'), ((9387, 9424), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (9396, 9424), True, 'import numpy as np\n'), ((10002, 10038), 'pandas.DataFrame', 'pd.DataFrame', (["{'train': train_split}"], {}), "({'train': train_split})\n", (10014, 10038), True, 'import pandas as pd\n'), ((10048, 10080), 'pandas.DataFrame', 'pd.DataFrame', (["{'val': val_split}"], {}), "({'val': val_split})\n", (10060, 10080), True, 'import pandas as pd\n'), ((10090, 10124), 'pandas.DataFrame', 'pd.DataFrame', (["{'test': test_split}"], {}), "({'test': test_split})\n", (10102, 10124), True, 'import pandas as pd\n'), ((10132, 10170), 'pandas.concat', 'pd.concat', (['[df_tr, df_v, df_t]'], {'axis': '(1)'}), '([df_tr, df_v, df_t], axis=1)\n', (10141, 10170), True, 'import pandas as pd\n'), ((2055, 2075), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2069, 2075), True, 'import numpy as np\n'), ((2079, 2108), 'numpy.random.shuffle', 'np.random.shuffle', (['slide_data'], {}), '(slide_data)\n', (2096, 2108), True, 'import numpy as np\n'), ((2165, 2196), 'numpy.array', 'np.array', (["slide_data['case_id']"], {}), "(slide_data['case_id'])\n", (2173, 2196), True, 'import numpy as np\n'), ((2676, 2700), 'numpy.array', 'np.array', (['patient_labels'], {}), '(patient_labels)\n', (2684, 2700), True, 'import numpy as np\n'), ((3215, 3251), 'numpy.array', 'np.array', (["self.slide_data['case_id']"], {}), "(self.slide_data['case_id'])\n", (3223, 3251), True, 'import numpy as np\n'), ((3585, 3609), 'numpy.array', 'np.array', (['patient_labels'], {}), '(patient_labels)\n', (3593, 3609), True, 'import numpy as np\n'), ((5446, 5477), 'libs.utils.utils.nth', 'nth', (['self.split_gen', 'start_from'], {}), '(self.split_gen, start_from)\n', (5449, 5477), False, 'from libs.utils.utils import generate_split, nth\n'), ((7763, 7784), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (7774, 7784), True, 'import pandas as pd\n'), ((11030, 11056), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (11046, 11056), False, 'import torch\n'), ((2948, 2989), 'numpy.where', 'np.where', (["(self.patient_data['label'] == i)"], {}), "(self.patient_data['label'] == i)\n", (2956, 2989), True, 'import numpy as np\n'), ((3118, 3157), 'numpy.where', 'np.where', (["(self.slide_data['label'] == i)"], {}), "(self.slide_data['label'] == i)\n", (3126, 3157), True, 'import numpy as np\n'), ((9609, 9654), 'numpy.intersect1d', 'np.intersect1d', (['self.train_ids', 'self.test_ids'], {}), '(self.train_ids, self.test_ids)\n', (9623, 9654), True, 'import numpy as np\n'), ((9674, 9718), 'numpy.intersect1d', 'np.intersect1d', (['self.train_ids', 'self.val_ids'], {}), '(self.train_ids, self.val_ids)\n', (9688, 9718), True, 'import numpy as np\n'), ((9738, 9781), 'numpy.intersect1d', 'np.intersect1d', (['self.val_ids', 'self.test_ids'], {}), '(self.val_ids, self.test_ids)\n', (9752, 9781), True, 'import numpy as np\n'), ((10726, 10747), 'torch.load', 'torch.load', (['full_path'], {}), '(full_path)\n', (10736, 10747), False, 'import torch\n'), ((10900, 10925), 'h5py.File', 'h5py.File', (['full_path', '"""r"""'], {}), "(full_path, 'r')\n", (10909, 10925), False, 'import h5py\n'), ((11435, 11474), 'numpy.where', 'np.where', (["(self.slide_data['label'] == i)"], {}), "(self.slide_data['label'] == i)\n", (11443, 11474), True, 'import numpy as np\n'), ((2550, 2567), 'scipy.stats.mode', 'stats.mode', (['label'], {}), '(label)\n', (2560, 2567), False, 'from scipy import stats\n')]
|
import math
import gin
import torch
from torch import nn
@gin.configurable
class RegularizationLoss(nn.Module):
def __init__(self,
latent_dims,
scale_by_batch=True,
use_bayes_factor_vae0_loss=False,
use_tc_loss=False):
super(RegularizationLoss, self).__init__()
self.scale_by_batch = scale_by_batch
self.use_bayes_factor_vae0_loss = use_bayes_factor_vae0_loss
self.use_tc_loss = use_tc_loss
if use_bayes_factor_vae0_loss:
self.log_precision = nn.Parameter(torch.zeros(1, latent_dims))
def add_kld_loss(self, losses, mu, logvar):
"""Standard KLD with standard Gaussian as prior
Computes `0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)`
See Appendix B from VAE paper:
Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
https://arxiv.org/abs/1312.6114
"""
x = 1 + logvar - mu.pow(2) - logvar.exp()
KLD = -0.5 * torch.sum(x)
losses['KLD'] = KLD / mu.shape[-1]
def add_bayes_factor_vae0_loss(self, losses, mu, logvar):
"""KLD with Gaussian with flexible variances as prior
The target precision (reciprocal of variance) of the prior can be
learned from data. Then we can compute the KLD as
`0.5 * sum(1 + log(sigma^2) + log(alpha) - mu^2 * alpha -
sigma^2 * alpha)`
where alpha is the learned precision parameter to be learned
from data. Formula is self-derived and thus may contain errors.
See model BF-VAE-0 from Kim et al. Bayes-Factor-VAE, 2019,
https://arxiv.org/abs/1909.02820
"""
x = (1 + logvar + self.log_precision
- mu.pow(2) * self.log_precision.exp()
- logvar.exp() * self.log_precision.exp())
KLD = -0.5 * torch.sum(x)
losses['KLD'] = KLD / mu.shape[-1]
# Compute penalty term that specifies that variance should be close
# to one
alpha_penalty = torch.sum((1 / self.log_precision.exp() - 1).pow(2))
losses['alpha_penalty'] = alpha_penalty / mu.shape[-1]
def add_tc_loss(self, losses, z, mu, logvar):
"""Total correlation loss
Computes `KL[q(z) || prod_i z_i]`
Adapted from
https://github.com/YannDubs/disentangling-vae/blob/master/disvae/models/losses.py
under MIT License
See Chen et al. Isolating Sources of Disentanglement in VAEs, 2018,
https://arxiv.org/abs/1802.04942
"""
mat_log_qz = _matrix_log_density_gaussian(z, mu, logvar)
log_qz = torch.logsumexp(mat_log_qz.sum(2), dim=1, keepdim=False)
log_prod_qzi = torch.logsumexp(mat_log_qz, dim=1, keepdim=False).sum(1)
tc_loss = torch.sum(log_qz - log_prod_qzi)
losses['TC'] = tc_loss / mu.shape[1]
def forward(self, z, mu, logvar):
losses = {}
if self.use_bayes_factor_vae0_loss:
self.add_bayes_factor_vae0_loss(losses, mu, logvar)
else:
self.add_kld_loss(losses, mu, logvar)
if self.use_tc_loss:
self.add_tc_loss(losses, z, mu, logvar)
if self.scale_by_batch:
for name, loss in losses.items():
losses[name] = loss / mu.shape[0]
return losses
def _matrix_log_density_gaussian(x, mu, logvar):
"""Calculates log density of a Gaussian for all combination of batch pairs of
`x` and `mu`. I.e. return tensor of shape `(batch_size, batch_size, dim)`
instead of (batch_size, dim) in the usual log density.
Adapted from
https://github.com/YannDubs/disentangling-vae/blob/master/disvae/models/losses.py
under MIT License
Parameters
----------
x: torch.Tensor
Value at which to compute the density. Shape: (batch_size, dim).
mu: torch.Tensor
Mean. Shape: (batch_size, dim).
logvar: torch.Tensor
Log variance. Shape: (batch_size, dim).
"""
batch_size, dim = x.shape
x = x.view(batch_size, 1, dim)
mu = mu.view(1, batch_size, dim)
logvar = logvar.view(1, batch_size, dim)
return _log_density_gaussian(x, mu, logvar)
def _log_density_gaussian(x, mu, logvar):
"""Calculates log density of a Gaussian
Adapted from
https://github.com/YannDubs/disentangling-vae/blob/master/disvae/models/losses.py
under MIT License
Parameters
----------
x: torch.Tensor or np.ndarray or float
Value at which to compute the density.
mu: torch.Tensor or np.ndarray or float
Mean.
logvar: torch.Tensor or np.ndarray or float
Log variance.
"""
normalization = - 0.5 * (math.log(2 * math.pi) + logvar)
inv_var = torch.exp(-logvar)
log_density = normalization - 0.5 * ((x - mu)**2 * inv_var)
return log_density
|
[
"torch.logsumexp",
"torch.exp",
"torch.zeros",
"math.log",
"torch.sum"
] |
[((4890, 4908), 'torch.exp', 'torch.exp', (['(-logvar)'], {}), '(-logvar)\n', (4899, 4908), False, 'import torch\n'), ((2880, 2912), 'torch.sum', 'torch.sum', (['(log_qz - log_prod_qzi)'], {}), '(log_qz - log_prod_qzi)\n', (2889, 2912), False, 'import torch\n'), ((1050, 1062), 'torch.sum', 'torch.sum', (['x'], {}), '(x)\n', (1059, 1062), False, 'import torch\n'), ((1930, 1942), 'torch.sum', 'torch.sum', (['x'], {}), '(x)\n', (1939, 1942), False, 'import torch\n'), ((4843, 4864), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (4851, 4864), False, 'import math\n'), ((604, 631), 'torch.zeros', 'torch.zeros', (['(1)', 'latent_dims'], {}), '(1, latent_dims)\n', (615, 631), False, 'import torch\n'), ((2802, 2851), 'torch.logsumexp', 'torch.logsumexp', (['mat_log_qz'], {'dim': '(1)', 'keepdim': '(False)'}), '(mat_log_qz, dim=1, keepdim=False)\n', (2817, 2851), False, 'import torch\n')]
|
from catsup.models import Post
from catsup.utils import to_unicode, ObjectDict
from catsup.reader.utils import split_content, parse_yaml_meta
def html_reader(path):
meta, content = split_content(path)
if not meta:
meta = ObjectDict()
else:
meta = parse_yaml_meta(meta, path)
return Post(
path=path,
meta=meta,
content=to_unicode(content)
)
|
[
"catsup.reader.utils.split_content",
"catsup.utils.ObjectDict",
"catsup.utils.to_unicode",
"catsup.reader.utils.parse_yaml_meta"
] |
[((187, 206), 'catsup.reader.utils.split_content', 'split_content', (['path'], {}), '(path)\n', (200, 206), False, 'from catsup.reader.utils import split_content, parse_yaml_meta\n'), ((239, 251), 'catsup.utils.ObjectDict', 'ObjectDict', ([], {}), '()\n', (249, 251), False, 'from catsup.utils import to_unicode, ObjectDict\n'), ((277, 304), 'catsup.reader.utils.parse_yaml_meta', 'parse_yaml_meta', (['meta', 'path'], {}), '(meta, path)\n', (292, 304), False, 'from catsup.reader.utils import split_content, parse_yaml_meta\n'), ((376, 395), 'catsup.utils.to_unicode', 'to_unicode', (['content'], {}), '(content)\n', (386, 395), False, 'from catsup.utils import to_unicode, ObjectDict\n')]
|
import json
import os
os.chdir(r'C:\Users\xtrem\Desktop\electric\Electric Packages\packages')
packages = [ f.replace('.json', '') for f in os.listdir(r'C:\Users\xtrem\Desktop\electric\Electric Packages\packages') ]
print(packages)
data = {
'packages': packages,
}
with open(r'C:\Users\xtrem\Desktop\electric\Electric Packages\package-list.json', 'w+') as f:
f.write(json.dumps(data, indent=4))
os.system('powershell.exe deploy "Update Package List"')
|
[
"os.listdir",
"os.system",
"os.chdir",
"json.dumps"
] |
[((23, 99), 'os.chdir', 'os.chdir', (['"""C:\\\\Users\\\\xtrem\\\\Desktop\\\\electric\\\\Electric Packages\\\\packages"""'], {}), "('C:\\\\Users\\\\xtrem\\\\Desktop\\\\electric\\\\Electric Packages\\\\packages')\n", (31, 99), False, 'import os\n'), ((404, 460), 'os.system', 'os.system', (['"""powershell.exe deploy "Update Package List\\""""'], {}), '(\'powershell.exe deploy "Update Package List"\')\n', (413, 460), False, 'import os\n'), ((140, 218), 'os.listdir', 'os.listdir', (['"""C:\\\\Users\\\\xtrem\\\\Desktop\\\\electric\\\\Electric Packages\\\\packages"""'], {}), "('C:\\\\Users\\\\xtrem\\\\Desktop\\\\electric\\\\Electric Packages\\\\packages')\n", (150, 218), False, 'import os\n'), ((376, 402), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (386, 402), False, 'import json\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
from PIL import Image
from mtcnn.mtcnn import MTCNN
train_dir = 'data/train'
valid_dir = 'data/val'
face_detector = MTCNN()
# for i in os.listdir(train_dir):
# print(i)
# my_img = 'data/train/madonna/httpiamediaimdbcomimagesMMVBMTANDQNTAxNDVeQTJeQWpwZBbWUMDIMjQOTYVUXCRALjpg.jpg'
# img = img.convert("RGB")
def extract_faces(filename):
img_path = filename
img = Image.open(img_path)
img = img.convert("RGB")
pixels = np.asarray(img)
results = face_detector.detect_faces(pixels)
x1, y1, width, height = results[0]['box']
x1 = abs(x1)
y1 = abs(y1)
x2,y2 = x1+width, y1+height
face = pixels[y1:y2,x1:x2]
image = Image.fromarray(face)
resized_img = image.resize((160,160))
final_pix = np.asarray(resized_img)
return final_pix
def load_faces(directory):
faces = []
for filename in os.listdir(directory):
path = os.path.join(directory,filename)
face = extract_faces(path)
faces.append(face)
return faces
def load_dataset(directory):
X, y = [], []
for subdir in os.listdir(directory):
path = directory +'/' + subdir + '/'
if not os.path.isdir(path):
continue
faces = load_faces(path)
labels = [subdir for _ in range(len(faces))]
# summarize progress
print('>loaded %d examples for class: %s' % (len(faces), subdir))
# store
X.extend(faces)
y.extend(labels)
return np.asarray(X), np.asarray(y)
# load_dataset(train_dir))
trainX, trainy = load_dataset(train_dir)
print(trainX.shape, trainy.shape)
# load test dataset
testX, testy = load_dataset(valid_dir)
print(testX.shape, testy.shape)
# save arrays to one file in compressed format
# np.savez_compressed('face_test.npz', trainX, trainy, testX, testy)
# plt.imshow(ans)
# plt.show()
|
[
"os.path.isdir",
"numpy.asarray",
"mtcnn.mtcnn.MTCNN",
"PIL.Image.open",
"PIL.Image.fromarray",
"os.path.join",
"os.listdir"
] |
[((194, 201), 'mtcnn.mtcnn.MTCNN', 'MTCNN', ([], {}), '()\n', (199, 201), False, 'from mtcnn.mtcnn import MTCNN\n'), ((447, 467), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (457, 467), False, 'from PIL import Image\n'), ((504, 519), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (514, 519), True, 'import numpy as np\n'), ((704, 725), 'PIL.Image.fromarray', 'Image.fromarray', (['face'], {}), '(face)\n', (719, 725), False, 'from PIL import Image\n'), ((778, 801), 'numpy.asarray', 'np.asarray', (['resized_img'], {}), '(resized_img)\n', (788, 801), True, 'import numpy as np\n'), ((882, 903), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (892, 903), False, 'import os\n'), ((1072, 1093), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1082, 1093), False, 'import os\n'), ((914, 947), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (926, 947), False, 'import os\n'), ((1399, 1412), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (1409, 1412), True, 'import numpy as np\n'), ((1414, 1427), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1424, 1427), True, 'import numpy as np\n'), ((1145, 1164), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1158, 1164), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import threading
g_ip_check = re.compile(r'^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$')
def check_ip_valid4(ip):
"""检查ipv4地址的合法性"""
ret = g_ip_check.match(ip)
if ret is not None:
"each item range: [0,255]"
for item in ret.groups():
if int(item) > 255:
return 0
return 1
else:
return 0
def check_ip_valid6(ip):
"""Copied from http://stackoverflow.com/a/319293/2755602"""
"""Validates IPv6 addresses.
"""
pattern = re.compile(r"""
^
\s* # Leading whitespace
(?!.*::.*::) # Only a single whildcard allowed
(?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard
(?: # Repeat 6 times:
[0-9a-f]{0,4} # A group of at most four hexadecimal digits
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
){6} #
(?: # Either
[0-9a-f]{0,4} # Another group
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
[0-9a-f]{0,4} # Last group
(?: (?<=::) # Colon iff preceeded by exacly one colon
| (?<!:) #
| (?<=:) (?<!::) : #
) # OR
| # A v4 address with NO leading zeros
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
(?: \.
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
){3}
)
\s* # Trailing whitespace
$
""", re.VERBOSE | re.IGNORECASE | re.DOTALL)
return pattern.match(ip) is not None
def check_ip_valid(ip):
if ':' in ip:
return check_ip_valid6(ip)
else:
return check_ip_valid4(ip)
domain_allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$")
def check_domain_valid(hostname):
if len(hostname) > 255:
return False
if hostname.endswith("."):
hostname = hostname[:-1]
return all(domain_allowed.match(x) for x in hostname.split("."))
def str2hex(data):
return ":".join("{:02x}".format(ord(c)) for c in data)
def get_ip_maskc(ip_str):
head = ".".join(ip_str.split(".")[:-1])
return head + ".0"
def split_ip(strline):
"""从每组地址中分离出起始IP以及结束IP"""
begin = ""
end = ""
if "-" in strline:
num_regions = strline.split(".")
if len(num_regions) == 4:
"xxx.xxx.xxx-xxx.xxx-xxx"
begin = ''
end = ''
for region in num_regions:
if '-' in region:
s, e = region.split('-')
begin += '.' + s
end += '.' + e
else:
begin += '.' + region
end += '.' + region
begin = begin[1:]
end = end[1:]
else:
"xxx.xxx.xxx.xxx-xxx.xxx.xxx.xxx"
begin, end = strline.split("-")
if 1 <= len(end) <= 3:
prefix = begin[0:begin.rfind(".")]
end = prefix + "." + end
elif strline.endswith("."):
"xxx.xxx.xxx."
begin = strline + "0"
end = strline + "255"
elif "/" in strline:
"xxx.xxx.xxx.xxx/xx"
(ip, bits) = strline.split("/")
if check_ip_valid4(ip) and (0 <= int(bits) <= 32):
orgip = ip_string_to_num(ip)
end_bits = (1 << (32 - int(bits))) - 1
begin_bits = 0xFFFFFFFF ^ end_bits
begin = ip_num_to_string(orgip & begin_bits)
end = ip_num_to_string(orgip | end_bits)
else:
"xxx.xxx.xxx.xxx"
begin = strline
end = strline
return begin, end
def generate_random_lowercase(n):
min_lc = ord(b'a')
len_lc = 26
ba = bytearray(os.urandom(n))
for i, b in enumerate(ba):
ba[i] = min_lc + b % len_lc # convert 0..255 to 97..122
#sys.stdout.buffer.write(ba)
return ba
class SimpleCondition(object):
def __init__(self):
self.lock = threading.Condition()
def notify(self):
self.lock.acquire()
self.lock.notify()
self.lock.release()
def wait(self):
self.lock.acquire()
self.lock.wait()
self.lock.release()
def split_domain(host):
hl = host.split(".")
return hl[0], ".".join(hl[1:])
def ip_string_to_num(s):
"""Convert dotted IPv4 address to integer."""
return reduce(lambda a, b: a << 8 | b, map(int, s.split(".")))
def ip_num_to_string(ip):
"""Convert 32-bit integer to dotted IPv4 address."""
return ".".join(map(lambda n: str(ip >> n & 0xFF), [24, 16, 8, 0]))
private_ipv4_range = [
("10.0.0.0", "10.255.255.255"),
("127.0.0.0", "127.255.255.255"),
("169.254.0.0", "169.254.255.255"),
("172.16.0.0", "172.31.255.255"),
("192.168.0.0", "192.168.255.255")
]
private_ipv6_range = [
("::1", "::1"),
("fc00::", "fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
]
private_ipv4_range_bin = []
for b, e in private_ipv4_range:
bb = ip_string_to_num(b)
ee = ip_string_to_num(e)
private_ipv4_range_bin.append((bb, ee))
def is_private_ip(ip):
try:
if "." in ip:
ip_bin = ip_string_to_num(ip)
for b, e in private_ipv4_range_bin:
if b <= ip_bin <= e:
return True
return False
else:
if ip == "::1":
return True
fi = ip.find(":")
if fi != 4:
return False
be = ip[0:2]
if be in ["fc", "fd"]:
return True
else:
return False
except Exception as e:
print("is_private_ip(%s), except:%r", ip, e)
return False
if __name__ == '__main__':
print(is_private_ip("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"))
|
[
"threading.Condition",
"os.urandom",
"re.compile"
] |
[((98, 163), 're.compile', 're.compile', (['"""^(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})$"""'], {}), "('^(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})\\\\.(\\\\d{1,3})$')\n", (108, 163), False, 'import re\n'), ((2005, 2046), 're.compile', 're.compile', (['"""(?!-)[A-Z\\\\d-]{1,63}(?<!-)$"""'], {}), "('(?!-)[A-Z\\\\d-]{1,63}(?<!-)$')\n", (2015, 2046), False, 'import re\n'), ((579, 1840), 're.compile', 're.compile', (['"""\n ^\n \\\\s* # Leading whitespace\n (?!.*::.*::) # Only a single whildcard allowed\n (?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard\n (?: # Repeat 6 times:\n [0-9a-f]{0,4} # A group of at most four hexadecimal digits\n (?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard\n ){6} #\n (?: # Either\n [0-9a-f]{0,4} # Another group\n (?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard\n [0-9a-f]{0,4} # Last group\n (?: (?<=::) # Colon iff preceeded by exacly one colon\n | (?<!:) #\n | (?<=:) (?<!::) : #\n ) # OR\n | # A v4 address with NO leading zeros\n (?:25[0-4]|2[0-4]\\\\d|1\\\\d\\\\d|[1-9]?\\\\d)\n (?: \\\\.\n (?:25[0-4]|2[0-4]\\\\d|1\\\\d\\\\d|[1-9]?\\\\d)\n ){3}\n )\n \\\\s* # Trailing whitespace\n $\n """', '(re.VERBOSE | re.IGNORECASE | re.DOTALL)'], {}), '(\n """\n ^\n \\\\s* # Leading whitespace\n (?!.*::.*::) # Only a single whildcard allowed\n (?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard\n (?: # Repeat 6 times:\n [0-9a-f]{0,4} # A group of at most four hexadecimal digits\n (?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard\n ){6} #\n (?: # Either\n [0-9a-f]{0,4} # Another group\n (?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard\n [0-9a-f]{0,4} # Last group\n (?: (?<=::) # Colon iff preceeded by exacly one colon\n | (?<!:) #\n | (?<=:) (?<!::) : #\n ) # OR\n | # A v4 address with NO leading zeros\n (?:25[0-4]|2[0-4]\\\\d|1\\\\d\\\\d|[1-9]?\\\\d)\n (?: \\\\.\n (?:25[0-4]|2[0-4]\\\\d|1\\\\d\\\\d|[1-9]?\\\\d)\n ){3}\n )\n \\\\s* # Trailing whitespace\n $\n """\n , re.VERBOSE | re.IGNORECASE | re.DOTALL)\n', (589, 1840), False, 'import re\n'), ((4002, 4015), 'os.urandom', 'os.urandom', (['n'], {}), '(n)\n', (4012, 4015), False, 'import os\n'), ((4236, 4257), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (4255, 4257), False, 'import threading\n')]
|
from django.db import migrations
from django.db.migrations import RunPython
def add_fuel_classes(apps, schema_editor):
"""
Creates the fuel classes: Gasoline and Diesel
"""
db_alias = schema_editor.connection.alias
fuel_class = apps.get_model('api', 'FuelClass')
fuel_class.objects.using(db_alias).bulk_create([
fuel_class(
fuel_class="Diesel",
display_order=1,
effective_date='2017-01-01'
),
fuel_class(
fuel_class="Gasoline",
display_order=2,
effective_date='2017-01-01'
)
])
def remove_fuel_classes(apps, schema_editor):
"""
Removes the credit calculation permissions from roles
"""
db_alias = schema_editor.connection.alias
fuel_class = apps.get_model('api', 'FuelClass')
fuel_class.objects.using(db_alias).all().delete()
class Migration(migrations.Migration):
"""
Attaches the functions for the migrations
"""
dependencies = [
('api', '0105_add_credit_calculation_permissions'),
]
operations = [
RunPython(
add_fuel_classes,
remove_fuel_classes
)
]
|
[
"django.db.migrations.RunPython"
] |
[((1108, 1156), 'django.db.migrations.RunPython', 'RunPython', (['add_fuel_classes', 'remove_fuel_classes'], {}), '(add_fuel_classes, remove_fuel_classes)\n', (1117, 1156), False, 'from django.db.migrations import RunPython\n')]
|
from django.db import models
from django.contrib.auth.models import User
class PostLike(models.Model):
post = models.ForeignKey("Post", on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
class Post(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
content = models.CharField(max_length=200)
likes = models.IntegerField(default=0)
created = models.DateTimeField(auto_now_add=True)
liked_by = models.ManyToManyField(User, related_name='like_user', blank=True, through=PostLike)
def __str__(self):
return self.title
|
[
"django.db.models.ManyToManyField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((116, 167), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Post"""'], {'on_delete': 'models.CASCADE'}), "('Post', on_delete=models.CASCADE)\n", (133, 167), False, 'from django.db import models\n'), ((179, 228), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (196, 228), False, 'from django.db import models\n'), ((245, 284), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (265, 284), False, 'from django.db import models\n'), ((325, 357), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (341, 357), False, 'from django.db import models\n'), ((371, 431), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(User, on_delete=models.CASCADE, null=True)\n', (388, 431), False, 'from django.db import models\n'), ((446, 478), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (462, 478), False, 'from django.db import models\n'), ((491, 521), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (510, 521), False, 'from django.db import models\n'), ((536, 575), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (556, 575), False, 'from django.db import models\n'), ((591, 680), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'related_name': '"""like_user"""', 'blank': '(True)', 'through': 'PostLike'}), "(User, related_name='like_user', blank=True, through=\n PostLike)\n", (613, 680), False, 'from django.db import models\n')]
|
from django.contrib import admin
from apps.inventories.models import Place
@admin.register(Place)
class PlaceAdmin(admin.ModelAdmin):
list_display = ('pk', 'name', 'all_members')
ordering = ('pk',)
def all_members(self, obj):
return '\n'.join([str(member) for member in obj.members.all().distinct()])
|
[
"django.contrib.admin.register"
] |
[((79, 100), 'django.contrib.admin.register', 'admin.register', (['Place'], {}), '(Place)\n', (93, 100), False, 'from django.contrib import admin\n')]
|
import logging
import boto3
import re
import pandas as pd
import concurrent.futures
from itertools import repeat
from typing import Dict, List, Union
from datetime import datetime
from dateutil.parser import parse
__author__ = "mikethoun"
__copyright__ = "mikethoun"
__license__ = "apache license 2.0"
class LogQuery:
"""
This object fetches, aggregates, and returns log files from S3.
:param Dict[str, str] log_paths: Dict of key-value pairs that represent the server name and the log file path in S3.
:param str s3_bucket: Name of S3 bucket
:param str timestamp_format: Format of timestamps in the log files.
:param str log_format_regex: Regex to split log messages into fields.
:param str fields: List of names that represent log file fields.
"""
def __init__(self, log_paths: Dict[str, str], s3_bucket: str, timestamp_format: str = '%m/%d/%Y %-H:%M:%S.%f',
log_format_regex: str = '\[(.*?)\]|((?<=] ).*$)', fields=None) -> None:
self.s3_bucket = s3_bucket
self.log_paths = log_paths
self.timestamp_format = timestamp_format
self.log_format_regex = log_format_regex
self.fields = ['timestamp', 'severity', 'message', 'server'] if fields is None else fields
@staticmethod
def __create_severity_filter(severity: int) -> str:
"""Creates a severity filter.
This function returns a dynamic SQL string that can be used to filter for messages of a minimum severity
level in S3 Select queries.
:param int severity: Logging level constant for minimum severity to include e.g. logging.WARN
:return: Returns a dynamic SQL WHERE clause condition.
:rtype: str
"""
severity_filter = ""
for k, v in logging._nameToLevel.items():
severity_filter += f" _1 LIKE '%[{k.lower()}]%' OR " if v >= severity else ""
if severity_filter:
return "AND (" + severity_filter[:-3] + ")"
else:
return severity_filter
def __execute_s3select(self, server: str, start_time: str, severity_filter: int, entries: int) -> pd.DataFrame:
"""Execute S3 Select query.
This function executes an S3 Select query and returns the results as a Pandas DataFrame.
:param str server: Name of server.
:param str start_time: Minimum log timestamp to fetch.
:param int severity_filter: Minimum log severity to fetch.
:param int entries: Number of log entries to fetch.
:return: Returns a dataframe containing selected log messages for server.
:rtype: pd.DataFrame
"""
s3 = boto3.session.Session().client('s3')
try:
r = s3.select_object_content(
Bucket=self.s3_bucket,
Key=self.log_paths[server],
ExpressionType='SQL',
Expression=f"select _1 from s3object WHERE _1 >= '[{start_time}]' {severity_filter} LIMIT {entries}",
InputSerialization={'CSV': {"FileHeaderInfo": "NONE"}},
OutputSerialization={'CSV': {}},
)
except s3.exceptions.NoSuchKey:
return pd.DataFrame()
data = []
for event in r['Payload']:
if 'Records' in event:
records = event['Records']['Payload'].decode('utf-8').splitlines()
for x in records:
data.append([''.join(t) for t in re.findall(self.log_format_regex, x)] + [server])
df = pd.DataFrame(data, columns=self.fields)
df.set_index('timestamp', inplace=True)
return df
def query(self, keys: List[str], start: str = None, entries: int = 100, min_severity: int = logging.ERROR,
output: str = 'string') -> Union[str, pd.DataFrame]:
""" Download and aggregate log files from S3.
This function downloads and aggregates log files from S3 using S3 Select and Multi-Threading.
:param str keys: List of server names.
:param str start: Minimum log timestamp to fetch.
:param int entries: Number of log entries to fetch.
:param int min_severity: Minimum log severity to fetch.
:param str output: Determines the type returned by the function. Accepts 'string' or 'dataframe' as an argument.
:return: Returns fetched logged messages.
:rtype: str or pd.DataFrame
"""
start_time = datetime.strftime(parse(start), self.timestamp_format)[:-2]
severity = self.__create_severity_filter(severity=min_severity)
with concurrent.futures.ThreadPoolExecutor() as executor:
results = executor.map(self.__execute_s3select, keys, repeat(start_time), repeat(severity), repeat(entries))
log_df = pd.concat(results)
log_df.sort_index(inplace=True)
if output == 'dataframe':
return log_df
else:
return "No Log Messages Found." if log_df.empty else log_df.to_string()
|
[
"pandas.DataFrame",
"dateutil.parser.parse",
"logging._nameToLevel.items",
"re.findall",
"boto3.session.Session",
"pandas.concat",
"itertools.repeat"
] |
[((1769, 1797), 'logging._nameToLevel.items', 'logging._nameToLevel.items', ([], {}), '()\n', (1795, 1797), False, 'import logging\n'), ((3508, 3547), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'self.fields'}), '(data, columns=self.fields)\n', (3520, 3547), True, 'import pandas as pd\n'), ((4758, 4776), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (4767, 4776), True, 'import pandas as pd\n'), ((2644, 2667), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (2665, 2667), False, 'import boto3\n'), ((3170, 3184), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3182, 3184), True, 'import pandas as pd\n'), ((4438, 4450), 'dateutil.parser.parse', 'parse', (['start'], {}), '(start)\n', (4443, 4450), False, 'from dateutil.parser import parse\n'), ((4685, 4703), 'itertools.repeat', 'repeat', (['start_time'], {}), '(start_time)\n', (4691, 4703), False, 'from itertools import repeat\n'), ((4705, 4721), 'itertools.repeat', 'repeat', (['severity'], {}), '(severity)\n', (4711, 4721), False, 'from itertools import repeat\n'), ((4723, 4738), 'itertools.repeat', 'repeat', (['entries'], {}), '(entries)\n', (4729, 4738), False, 'from itertools import repeat\n'), ((3444, 3480), 're.findall', 're.findall', (['self.log_format_regex', 'x'], {}), '(self.log_format_regex, x)\n', (3454, 3480), False, 'import re\n')]
|
import time
import os
import glob
import gc
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import pytorch_lightning as pl
import pytorch_lightning.loggers as pl_loggers
import pytorch_lightning.callbacks as pl_callbacks
from torch.utils.data import DataLoader
from config_modified import args
from models.video_net import VideoNet
from data.lrs2_dataset import LRS2Pretrain, LRS2Main
from data.utils import collate_fn
from utils.metrics import compute_cer, compute_wer
from utils.decoders import ctc_greedy_decode, ctc_search_decode
class VideoNetDataModule(pl.LightningDataModule):
def __init__(self, data_cfg):
# TODO: Change cfg to regular argument names
super().__init__()
self.data_cfg = data_cfg
self.videoParams = {"videoFPS": self.data_cfg["VIDEO_FPS"]}
self.gpuAvailable = torch.cuda.is_available()
self.data_cls = LRS2Pretrain if self.data_cfg["PRETRAIN"] else LRS2Main
if self.data_cfg["PRETRAIN"]:
self.trainData = LRS2Pretrain("pretrain",
self.data_cfg["DATA_DIRECTORY"],
self.data_cfg["PRETRAIN_NUM_WORDS"],
self.data_cfg["CHAR_TO_INDEX"],
self.data_cfg["STEP_SIZE"],
self.videoParams)
self.valData = LRS2Pretrain("preval",
self.data_cfg["DATA_DIRECTORY"],
self.data_cfg["PRETRAIN_NUM_WORDS"],
self.data_cfg["CHAR_TO_INDEX"],
self.data_cfg["STEP_SIZE"],
self.videoParams)
else:
self.trainData = LRS2Main("train",
self.data_cfg["DATA_DIRECTORY"],
self.data_cfg["MAIN_REQ_INPUT_LENGTH"],
self.data_cfg["CHAR_TO_INDEX"],
self.data_cfg["STEP_SIZE"],
self.videoParams)
self.valData = LRS2Main("val",
self.data_cfg["DATA_DIRECTORY"],
self.data_cfg["MAIN_REQ_INPUT_LENGTH"],
self.data_cfg["CHAR_TO_INDEX"],
self.data_cfg["STEP_SIZE"],
self.videoParams)
def train_dataloader(self) -> DataLoader:
kwargs = {"num_workers": self.data_cfg["NUM_WORKERS"], "pin_memory": True} if self.gpuAvailable else {}
trainLoader = DataLoader(self.trainData,
batch_size=self.data_cfg["BATCH_SIZE"],
collate_fn=collate_fn,
shuffle=True,
**kwargs)
return trainLoader
def val_dataloader(self) -> DataLoader:
kwargs = {"num_workers": self.data_cfg["NUM_WORKERS"], "pin_memory": True} if self.gpuAvailable else {}
valLoader = DataLoader(self.valData,
batch_size=self.data_cfg["BATCH_SIZE"],
collate_fn=collate_fn,
shuffle=True,
**kwargs)
return valLoader
class VideoNetPL(pl.LightningModule):
def __init__(self, net_class, net_cfg, train_cfg):
super().__init__()
self.net_cfg = net_cfg
self.train_cfg = train_cfg
self.loss_fn = nn.CTCLoss(blank=0, zero_infinity=False)
self.model = net_class(**net_cfg)
def forward(self, inputBatch):
outputBatch = self.model(inputBatch)
return outputBatch
def training_step(self, batch, batch_idx):
trainParams = {"spaceIx": args["CHAR_TO_INDEX"][" "],
"eosIx": args["CHAR_TO_INDEX"]["<EOS>"]}
inputBatch, targetBatch, inputLenBatch, targetLenBatch = batch
inputBatch, targetBatch = inputBatch.float(), targetBatch.int()
inputLenBatch, targetLenBatch = inputLenBatch.int(), targetLenBatch.int()
outputBatch = self.model(inputBatch)
with torch.backends.cudnn.flags(enabled=False):
loss = self.loss_fn(outputBatch, targetBatch, inputLenBatch, targetLenBatch)
trainingLoss = loss
predictionBatch, predictionLenBatch = ctc_greedy_decode(outputBatch.detach(),
inputLenBatch,
trainParams["eosIx"])
trainingCER = compute_cer(predictionBatch,
targetBatch,
predictionLenBatch,
targetLenBatch)
trainingWER = compute_wer(predictionBatch,
targetBatch,
predictionLenBatch,
targetLenBatch,
trainParams["spaceIx"])
self.log('train_loss', trainingLoss, prog_bar=True)
self.log('train_wer', trainingWER, prog_bar=True)
self.log('train_cer', trainingCER, prog_bar=True)
return trainingLoss
def validation_step(self, batch, batch_idx):
evalParams = {"decodeScheme": "greedy",
"spaceIx": args["CHAR_TO_INDEX"][" "],
"eosIx": args["CHAR_TO_INDEX"]["<EOS>"]}
inputBatch, targetBatch, inputLenBatch, targetLenBatch = batch
inputBatch, targetBatch = inputBatch.float(), targetBatch.int()
inputLenBatch, targetLenBatch = inputLenBatch.int(), targetLenBatch.int()
outputBatch = self.model(inputBatch)
with torch.backends.cudnn.flags(enabled=False):
loss = self.loss_fn(outputBatch, targetBatch, inputLenBatch, targetLenBatch)
evalLoss = loss
if evalParams["decodeScheme"] == "greedy":
predictionBatch, predictionLenBatch = ctc_greedy_decode(outputBatch,
inputLenBatch,
evalParams["eosIx"])
elif evalParams["decodeScheme"] == "search":
predictionBatch, predictionLenBatch = ctc_search_decode(outputBatch,
inputLenBatch,
evalParams["beamSearchParams"],
evalParams["spaceIx"],
evalParams["eosIx"],
evalParams["lm"])
else:
print("Invalid Decode Scheme")
exit()
evalCER = compute_cer(predictionBatch,
targetBatch,
predictionLenBatch,
targetLenBatch)
evalWER = compute_wer(predictionBatch,
targetBatch,
predictionLenBatch,
targetLenBatch,
evalParams["spaceIx"])
self.log('val_loss', evalLoss, prog_bar=True)
self.log('val_wer', evalWER, prog_bar=True)
self.log('val_cer', evalCER, prog_bar=True)
return evalLoss
def configure_optimizers(self):
optimizer = optim.Adam(self.model.parameters(),
lr=self.train_cfg["INIT_LR"],
betas=(self.train_cfg["MOMENTUM1"], self.train_cfg["MOMENTUM2"]))
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode="min",
factor=self.train_cfg["LR_SCHEDULER_FACTOR"],
patience=self.train_cfg["LR_SCHEDULER_WAIT"],
threshold=self.train_cfg["LR_SCHEDULER_THRESH"],
threshold_mode="abs",
min_lr=self.train_cfg["FINAL_LR"],
verbose=True)
return {
"optimizer": optimizer,
"lr_scheduler": scheduler,
"monitor": "val_wer"
}
def train_step(args, timestr='', best_ckpt=None):
data_cfg = {
"VIDEO_FPS": args["VIDEO_FPS"],
"DATA_DIRECTORY": args["DATA_DIRECTORY"],
"PRETRAIN_NUM_WORDS": args["PRETRAIN_NUM_WORDS"],
"CHAR_TO_INDEX": args["CHAR_TO_INDEX"],
"STEP_SIZE": args["STEP_SIZE"],
"NUM_WORKERS": args["NUM_WORKERS"],
"BATCH_SIZE": args["BATCH_SIZE"],
"PRETRAIN": args["PRETRAIN"]
}
train_cfg = {
"INIT_LR": args["INIT_LR"],
"MOMENTUM1": args["MOMENTUM1"],
"MOMENTUM2": args["MOMENTUM2"],
"LR_SCHEDULER_FACTOR": args["LR_SCHEDULER_FACTOR"],
"LR_SCHEDULER_WAIT": args["LR_SCHEDULER_WAIT"],
"LR_SCHEDULER_THRESH": args["LR_SCHEDULER_THRESH"],
"FINAL_LR": args["FINAL_LR"],
}
net_cfg = {
"dModel": args["TX_NUM_FEATURES"],
"nHeads": args["TX_ATTENTION_HEADS"],
"numLayers": args["TX_NUM_LAYERS"],
"peMaxLen": args["PE_MAX_LENGTH"],
"fcHiddenSize": args["TX_FEEDFORWARD_DIM"],
"dropout": args["TX_DROPOUT"],
"numClasses": args["NUM_CLASSES"]
}
logger = pl_loggers.NeptuneLogger(
project_name='benso/deep-avsr',
experiment_name=f'video_only_curriculum',
params=args,
tags={'start_date': timestr}
)
model_checkpoint = pl_callbacks.ModelCheckpoint(
filename=args["NUM_WORDS"] + '/{epoch:02d}-{val_wer:.2f}',
save_weights_only=True,
save_top_k=3,
monitor='val_wer',
period=1
)
trainer = pl.Trainer(
logger=logger,
checkpoint_callback=model_checkpoint,
gpus=2,
auto_select_gpus=False,
max_epochs=args["NUM_STEPS"],
accelerator=args["ACCELERATOR"],
resume_from_checkpoint=best_ckpt
)
data = VideoNetDataModule(data_cfg=data_cfg)
network = VideoNetPL(net_class=VideoNet, net_cfg=net_cfg, train_cfg=train_cfg)
trainer.fit(model=network, datamodule=data)
return model_checkpoint.best_model_path
def curriculum(args):
PRETRAIN_NUM_WORDS = [1, 2, 3, 5, 7, 9, 11, 13, 17, 21, 29, 37, 0]
PRETRAIN_CONFIG = {
1: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 1, 'BATCH_SIZE': 32,},
2: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 2, 'BATCH_SIZE': 32},
3: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 3, 'BATCH_SIZE': 32},
5: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 5, 'BATCH_SIZE': 32},
7: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 7, 'BATCH_SIZE': 32},
9: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 9, 'BATCH_SIZE': 32},
11: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 11, 'BATCH_SIZE': 32},
13: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 13, 'BATCH_SIZE': 32},
17: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 17, 'BATCH_SIZE': 32},
21: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 21, 'BATCH_SIZE': 32},
29: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 29, 'BATCH_SIZE': 32},
37: {'PRETRAIN': True, 'PRETRAIN_NUM_WORDS': 37, 'BATCH_SIZE': 32},
0: {'PRETRAIN': False, 'PRETRAIN_NUM_WORDS': 0, 'BATCH_SIZE': 32},
}
# Create parent directory for the checkpoints of this curriculum run
timestr = time.strftime("%Y%m%d-%H%M%S")
# Start curriculum learning loop
best_ckpt = None
for n, num_words in enumerate(PRETRAIN_NUM_WORDS):
train_over = False
while not train_over:
cfg = args.copy()
cfg.update(PRETRAIN_CONFIG[num_words])
try:
best_ckpt = train_step(args=cfg, timestr=timestr, best_ckpt=best_ckpt)
train_over = True
except RuntimeError as e:
print(f"Runtime Error... Trying Again: \n{e}")
PRETRAIN_CONFIG[num_words]['BATCH_SIZE'] //= 2
torch.cuda.empty_cache()
gc.collect()
if __name__ == '__main__':
np.random.seed(args["SEED"])
torch.manual_seed(args["SEED"])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
curriculum(args)
|
[
"pytorch_lightning.Trainer",
"numpy.random.seed",
"utils.decoders.ctc_search_decode",
"time.strftime",
"gc.collect",
"torch.utils.data.DataLoader",
"data.lrs2_dataset.LRS2Pretrain",
"utils.metrics.compute_wer",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"pytorch_lightning.loggers.NeptuneLogger",
"pytorch_lightning.callbacks.ModelCheckpoint",
"torch.manual_seed",
"utils.decoders.ctc_greedy_decode",
"torch.cuda.is_available",
"torch.nn.CTCLoss",
"utils.metrics.compute_cer",
"data.lrs2_dataset.LRS2Main",
"config_modified.args.copy",
"torch.backends.cudnn.flags",
"torch.cuda.empty_cache"
] |
[((9879, 10025), 'pytorch_lightning.loggers.NeptuneLogger', 'pl_loggers.NeptuneLogger', ([], {'project_name': '"""benso/deep-avsr"""', 'experiment_name': 'f"""video_only_curriculum"""', 'params': 'args', 'tags': "{'start_date': timestr}"}), "(project_name='benso/deep-avsr', experiment_name=\n f'video_only_curriculum', params=args, tags={'start_date': timestr})\n", (9903, 10025), True, 'import pytorch_lightning.loggers as pl_loggers\n'), ((10083, 10245), 'pytorch_lightning.callbacks.ModelCheckpoint', 'pl_callbacks.ModelCheckpoint', ([], {'filename': "(args['NUM_WORDS'] + '/{epoch:02d}-{val_wer:.2f}')", 'save_weights_only': '(True)', 'save_top_k': '(3)', 'monitor': '"""val_wer"""', 'period': '(1)'}), "(filename=args['NUM_WORDS'] +\n '/{epoch:02d}-{val_wer:.2f}', save_weights_only=True, save_top_k=3,\n monitor='val_wer', period=1)\n", (10111, 10245), True, 'import pytorch_lightning.callbacks as pl_callbacks\n'), ((10299, 10500), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'logger': 'logger', 'checkpoint_callback': 'model_checkpoint', 'gpus': '(2)', 'auto_select_gpus': '(False)', 'max_epochs': "args['NUM_STEPS']", 'accelerator': "args['ACCELERATOR']", 'resume_from_checkpoint': 'best_ckpt'}), "(logger=logger, checkpoint_callback=model_checkpoint, gpus=2,\n auto_select_gpus=False, max_epochs=args['NUM_STEPS'], accelerator=args[\n 'ACCELERATOR'], resume_from_checkpoint=best_ckpt)\n", (10309, 10500), True, 'import pytorch_lightning as pl\n'), ((11969, 11999), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (11982, 11999), False, 'import time\n'), ((12651, 12679), 'numpy.random.seed', 'np.random.seed', (["args['SEED']"], {}), "(args['SEED'])\n", (12665, 12679), True, 'import numpy as np\n'), ((12684, 12715), 'torch.manual_seed', 'torch.manual_seed', (["args['SEED']"], {}), "(args['SEED'])\n", (12701, 12715), False, 'import torch\n'), ((865, 890), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (888, 890), False, 'import torch\n'), ((2777, 2894), 'torch.utils.data.DataLoader', 'DataLoader', (['self.trainData'], {'batch_size': "self.data_cfg['BATCH_SIZE']", 'collate_fn': 'collate_fn', 'shuffle': '(True)'}), "(self.trainData, batch_size=self.data_cfg['BATCH_SIZE'],\n collate_fn=collate_fn, shuffle=True, **kwargs)\n", (2787, 2894), False, 'from torch.utils.data import DataLoader\n'), ((3227, 3343), 'torch.utils.data.DataLoader', 'DataLoader', (['self.valData'], {'batch_size': "self.data_cfg['BATCH_SIZE']", 'collate_fn': 'collate_fn', 'shuffle': '(True)'}), "(self.valData, batch_size=self.data_cfg['BATCH_SIZE'], collate_fn\n =collate_fn, shuffle=True, **kwargs)\n", (3237, 3343), False, 'from torch.utils.data import DataLoader\n'), ((3699, 3739), 'torch.nn.CTCLoss', 'nn.CTCLoss', ([], {'blank': '(0)', 'zero_infinity': '(False)'}), '(blank=0, zero_infinity=False)\n', (3709, 3739), True, 'import torch.nn as nn\n'), ((4784, 4861), 'utils.metrics.compute_cer', 'compute_cer', (['predictionBatch', 'targetBatch', 'predictionLenBatch', 'targetLenBatch'], {}), '(predictionBatch, targetBatch, predictionLenBatch, targetLenBatch)\n', (4795, 4861), False, 'from utils.metrics import compute_cer, compute_wer\n'), ((4986, 5091), 'utils.metrics.compute_wer', 'compute_wer', (['predictionBatch', 'targetBatch', 'predictionLenBatch', 'targetLenBatch', "trainParams['spaceIx']"], {}), "(predictionBatch, targetBatch, predictionLenBatch,\n targetLenBatch, trainParams['spaceIx'])\n", (4997, 5091), False, 'from utils.metrics import compute_cer, compute_wer\n'), ((7079, 7156), 'utils.metrics.compute_cer', 'compute_cer', (['predictionBatch', 'targetBatch', 'predictionLenBatch', 'targetLenBatch'], {}), '(predictionBatch, targetBatch, predictionLenBatch, targetLenBatch)\n', (7090, 7156), False, 'from utils.metrics import compute_cer, compute_wer\n'), ((7265, 7369), 'utils.metrics.compute_wer', 'compute_wer', (['predictionBatch', 'targetBatch', 'predictionLenBatch', 'targetLenBatch', "evalParams['spaceIx']"], {}), "(predictionBatch, targetBatch, predictionLenBatch,\n targetLenBatch, evalParams['spaceIx'])\n", (7276, 7369), False, 'from utils.metrics import compute_cer, compute_wer\n'), ((7940, 8225), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': "self.train_cfg['LR_SCHEDULER_FACTOR']", 'patience': "self.train_cfg['LR_SCHEDULER_WAIT']", 'threshold': "self.train_cfg['LR_SCHEDULER_THRESH']", 'threshold_mode': '"""abs"""', 'min_lr': "self.train_cfg['FINAL_LR']", 'verbose': '(True)'}), "(optimizer, mode='min', factor=self.\n train_cfg['LR_SCHEDULER_FACTOR'], patience=self.train_cfg[\n 'LR_SCHEDULER_WAIT'], threshold=self.train_cfg['LR_SCHEDULER_THRESH'],\n threshold_mode='abs', min_lr=self.train_cfg['FINAL_LR'], verbose=True)\n", (7976, 8225), True, 'import torch.optim as optim\n'), ((1039, 1221), 'data.lrs2_dataset.LRS2Pretrain', 'LRS2Pretrain', (['"""pretrain"""', "self.data_cfg['DATA_DIRECTORY']", "self.data_cfg['PRETRAIN_NUM_WORDS']", "self.data_cfg['CHAR_TO_INDEX']", "self.data_cfg['STEP_SIZE']", 'self.videoParams'], {}), "('pretrain', self.data_cfg['DATA_DIRECTORY'], self.data_cfg[\n 'PRETRAIN_NUM_WORDS'], self.data_cfg['CHAR_TO_INDEX'], self.data_cfg[\n 'STEP_SIZE'], self.videoParams)\n", (1051, 1221), False, 'from data.lrs2_dataset import LRS2Pretrain, LRS2Main\n'), ((1449, 1629), 'data.lrs2_dataset.LRS2Pretrain', 'LRS2Pretrain', (['"""preval"""', "self.data_cfg['DATA_DIRECTORY']", "self.data_cfg['PRETRAIN_NUM_WORDS']", "self.data_cfg['CHAR_TO_INDEX']", "self.data_cfg['STEP_SIZE']", 'self.videoParams'], {}), "('preval', self.data_cfg['DATA_DIRECTORY'], self.data_cfg[\n 'PRETRAIN_NUM_WORDS'], self.data_cfg['CHAR_TO_INDEX'], self.data_cfg[\n 'STEP_SIZE'], self.videoParams)\n", (1461, 1629), False, 'from data.lrs2_dataset import LRS2Pretrain, LRS2Main\n'), ((1863, 2041), 'data.lrs2_dataset.LRS2Main', 'LRS2Main', (['"""train"""', "self.data_cfg['DATA_DIRECTORY']", "self.data_cfg['MAIN_REQ_INPUT_LENGTH']", "self.data_cfg['CHAR_TO_INDEX']", "self.data_cfg['STEP_SIZE']", 'self.videoParams'], {}), "('train', self.data_cfg['DATA_DIRECTORY'], self.data_cfg[\n 'MAIN_REQ_INPUT_LENGTH'], self.data_cfg['CHAR_TO_INDEX'], self.data_cfg\n ['STEP_SIZE'], self.videoParams)\n", (1871, 2041), False, 'from data.lrs2_dataset import LRS2Pretrain, LRS2Main\n'), ((2249, 2425), 'data.lrs2_dataset.LRS2Main', 'LRS2Main', (['"""val"""', "self.data_cfg['DATA_DIRECTORY']", "self.data_cfg['MAIN_REQ_INPUT_LENGTH']", "self.data_cfg['CHAR_TO_INDEX']", "self.data_cfg['STEP_SIZE']", 'self.videoParams'], {}), "('val', self.data_cfg['DATA_DIRECTORY'], self.data_cfg[\n 'MAIN_REQ_INPUT_LENGTH'], self.data_cfg['CHAR_TO_INDEX'], self.data_cfg\n ['STEP_SIZE'], self.videoParams)\n", (2257, 2425), False, 'from data.lrs2_dataset import LRS2Pretrain, LRS2Main\n'), ((4350, 4391), 'torch.backends.cudnn.flags', 'torch.backends.cudnn.flags', ([], {'enabled': '(False)'}), '(enabled=False)\n', (4376, 4391), False, 'import torch\n'), ((5940, 5981), 'torch.backends.cudnn.flags', 'torch.backends.cudnn.flags', ([], {'enabled': '(False)'}), '(enabled=False)\n', (5966, 5981), False, 'import torch\n'), ((6198, 6264), 'utils.decoders.ctc_greedy_decode', 'ctc_greedy_decode', (['outputBatch', 'inputLenBatch', "evalParams['eosIx']"], {}), "(outputBatch, inputLenBatch, evalParams['eosIx'])\n", (6215, 6264), False, 'from utils.decoders import ctc_greedy_decode, ctc_search_decode\n'), ((12190, 12201), 'config_modified.args.copy', 'args.copy', ([], {}), '()\n', (12199, 12201), False, 'from config_modified import args\n'), ((12568, 12592), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (12590, 12592), False, 'import torch\n'), ((12605, 12617), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12615, 12617), False, 'import gc\n'), ((6504, 6648), 'utils.decoders.ctc_search_decode', 'ctc_search_decode', (['outputBatch', 'inputLenBatch', "evalParams['beamSearchParams']", "evalParams['spaceIx']", "evalParams['eosIx']", "evalParams['lm']"], {}), "(outputBatch, inputLenBatch, evalParams['beamSearchParams'\n ], evalParams['spaceIx'], evalParams['eosIx'], evalParams['lm'])\n", (6521, 6648), False, 'from utils.decoders import ctc_greedy_decode, ctc_search_decode\n')]
|
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from src.data.baseline_transformers import TransformsWrapper
from tqdm import tqdm
# Ignore warnings
import warnings
import torchvision as tv
import pickle
class FinalRNNMCSDataset(Dataset):
def __init__(
self,
test_df: str,
test_df_track_order_df,
test_descriptors_df,
root_dir,
transform=None
):
""" Plan
1. for each track presented form 1 triplet with each of gt images vs one random negative track
and lets work with indices only
"""
self.test_df = pd.read_csv(test_df)
self.test_df_track_order_df = pd.read_csv(test_df_track_order_df)
self.test_descriptors_npy = np.load(test_descriptors_df)
self.samples = list()
# 1 triplet sample for one person
# this takes 10 minutes every run
print('Generating dataset for evaluation')
for track_id in tqdm(self.test_df_track_order_df.track_id.values, total=len(self.test_df_track_order_df)):
track_image_idxs = self.test_df[self.test_df.track_id == track_id].index.values
self.samples.append((track_image_idxs))
self.root_dir = root_dir
self.transform = transform
print(f"Triplets count for final eval is {len(self.samples)}")
# Was Triplets count for train was 57570 when only one negative sample was used
# now it s 1151400 (20 times more)
# with open('train_samples.pkl', 'wb') as outf:
# pickle.dump(self.samples, outf)
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
pos_images_idxs = self.samples[idx]
# todo: maybe add some scaling on all given descriptors
pos_seq = self.test_descriptors_npy[pos_images_idxs]
pos_seq = [torch.from_numpy(pos_img) for pos_img in pos_seq]
pos_seq = torch.stack(pos_seq, dim=0, out=None)
sample = {'img_seq': pos_seq}
return sample
class RNNMCSDataset(Dataset):
def __init__(
self,
train_df: str,
train_df_descriptors,
train_gt_df,
train_gt_descriptors,
train_df_track_order_df,
root_dir,
is_val=False,
transform=None
):
""" Plan
1. for each track presented form 1 triplet with each of gt images vs one random negative track
and lets work with indices only
"""
self.train_df = pd.read_csv(train_df)
self.train_df_descriptors = np.load(train_df_descriptors)
self.train_gt_df = pd.read_csv(train_gt_df)
self.train_gt_descriptors = np.load(train_gt_descriptors)
self.train_df_track_order_df = pd.read_csv(train_df_track_order_df)
self.train_df_track_order_df = pd.merge(self.train_df_track_order_df,
self.train_df[['person_id', 'is_val']].drop_duplicates(),
on='person_id',
how='left') # [is_val == False]
self.train_df_track_order_df = self.train_df_track_order_df[self.train_df_track_order_df.is_val == is_val]
self.samples = list()
# 1 triplet sample for one person
# this takes 10 minutes every run
if is_val:
n_neg_samples = 1
print(f"Generating samples for {'dev' if is_val else 'train'}")
for id, (track_id, person_id) in tqdm(self.train_df_track_order_df[['track_id', 'person_id']].iterrows(), total=len(self.train_df_track_order_df)):
not_this_person_order_df = self.train_df_track_order_df[self.train_df_track_order_df.person_id != person_id]
track_image_idxs = self.train_df[self.train_df.track_id == track_id].index.values
track_anchors_df = self.train_gt_df[self.train_gt_df.person_id == person_id]
for anchor_idx in track_anchors_df.index.values:
for not_this_person_sampled_track_id in tqdm(not_this_person_order_df.sample(n_neg_samples).track_id.values):
not_this_person_sampled_track_image_idxs = self.train_df[
self.train_df.track_id == not_this_person_sampled_track_id].index.values
self.samples.append((anchor_idx, track_image_idxs, not_this_person_sampled_track_image_idxs))
# if id > 10:
# break
else:
with open('train_samples.pkl', 'rb') as inf:
self.samples = pickle.loads(inf.read())
self.root_dir = root_dir
self.transform = transform
print(f"Triplets count for {'dev' if is_val else 'train'} is {len(self.samples)}")
# Was Triplets count for train was 57570 when only one negative sample was used
# now it s 1151400 (20 times more)
# with open('train_samples.pkl', 'wb') as outf:
# pickle.dump(self.samples, outf)
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
gt_image_idx, pos_images_idxs, neg_images_idxs = self.samples[idx]
# todo: maybe add some scaling on all given descriptors
gt_descriptor = self.train_gt_descriptors[gt_image_idx]
pos_seq = self.train_df_descriptors[pos_images_idxs]
neg_seq = self.train_df_descriptors[neg_images_idxs]
gt_descriptor = torch.from_numpy(gt_descriptor)
pos_seq = [torch.from_numpy(pos_img) for pos_img in pos_seq]
neg_seq = [torch.from_numpy(neg_img) for neg_img in neg_seq]
pos_seq = torch.stack(pos_seq, dim=0, out=None)
neg_seq = torch.stack(neg_seq, dim=0, out=None)
sample = {'gt_image': gt_descriptor,
'pos_seq': pos_seq,
'neg_seq': neg_seq}
return sample
class FakeRNNMCSDataset(Dataset):
def __init__(
self,
train_df: str,
train_df_descriptors,
train_gt_df,
train_gt_descriptors,
train_df_track_order_df,
root_dir,
is_val=False,
transform=None
):
seq_len = 5
self.samples = [[np.random.randn(512).astype(np.float32),
[np.random.randn(512).astype(np.float32) for i in range(seq_len)],
[np.random.randn(512).astype(np.float32) for i in range(seq_len)]]
for _ in range(100)]
# self.transform = transform
# self.tw = TransformsWrapper(transform)
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
track_image, pos_seq, neg_seq = self.samples[idx]
# track_image = self.transform(track_image)
track_image = torch.from_numpy(track_image)
pos_seq = [torch.from_numpy(pos_img) for pos_img in pos_seq]
neg_seq = [torch.from_numpy(neg_img) for neg_img in neg_seq]
pos_seq = torch.stack(pos_seq, dim=0, out=None)
neg_seq = torch.stack(neg_seq, dim=0, out=None)
sample = {'gt_image': track_image, 'pos_seq': pos_seq, 'neg_seq': neg_seq}
return sample
# we are going to do train dataset and test dataset separately
def check_data_iteration(iterate_data=False):
is_val = False
# U may use MCSDataset for the training
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
preprocessing = tv.transforms.Compose([
tv.transforms.ToPILImage(),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=MEAN, std=STD),
])
dataset = RNNMCSDataset(
train_df="../../data/raw/train_df.csv",
train_df_descriptors="../../data/raw/train_df_descriptors.npy",
train_gt_df="../../data/raw/train_gt_df.csv",
train_gt_descriptors="../../data/raw/train_gt_descriptors.npy",
train_df_track_order_df="../../data/raw/train_df_track_order_df.csv",
root_dir='../../data/raw/data',
is_val=False,
transform=None
)
print(f"Total triples in {'test' if is_val else 'train'} dataset is {len(dataset)}")
if iterate_data:
for i in range(len(dataset)):
sample = dataset[i]
# print(sample['track_image'])
print(i, sample['gt_image'].size(), sample['pos_seq'].size(), sample['neg_seq'].size())
# if i == 3:
# break
if __name__ == '__main__':
# example usage
# python -i read_dataset.py check_data_iteration
check_data_iteration(iterate_data=True)
|
[
"numpy.load",
"torch.stack",
"numpy.random.randn",
"pandas.read_csv",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Normalize",
"torch.from_numpy"
] |
[((851, 871), 'pandas.read_csv', 'pd.read_csv', (['test_df'], {}), '(test_df)\n', (862, 871), True, 'import pandas as pd\n'), ((910, 945), 'pandas.read_csv', 'pd.read_csv', (['test_df_track_order_df'], {}), '(test_df_track_order_df)\n', (921, 945), True, 'import pandas as pd\n'), ((982, 1010), 'numpy.load', 'np.load', (['test_descriptors_df'], {}), '(test_descriptors_df)\n', (989, 1010), True, 'import numpy as np\n'), ((2156, 2193), 'torch.stack', 'torch.stack', (['pos_seq'], {'dim': '(0)', 'out': 'None'}), '(pos_seq, dim=0, out=None)\n', (2167, 2193), False, 'import torch\n'), ((2767, 2788), 'pandas.read_csv', 'pd.read_csv', (['train_df'], {}), '(train_df)\n', (2778, 2788), True, 'import pandas as pd\n'), ((2825, 2854), 'numpy.load', 'np.load', (['train_df_descriptors'], {}), '(train_df_descriptors)\n', (2832, 2854), True, 'import numpy as np\n'), ((2882, 2906), 'pandas.read_csv', 'pd.read_csv', (['train_gt_df'], {}), '(train_gt_df)\n', (2893, 2906), True, 'import pandas as pd\n'), ((2943, 2972), 'numpy.load', 'np.load', (['train_gt_descriptors'], {}), '(train_gt_descriptors)\n', (2950, 2972), True, 'import numpy as np\n'), ((3012, 3048), 'pandas.read_csv', 'pd.read_csv', (['train_df_track_order_df'], {}), '(train_df_track_order_df)\n', (3023, 3048), True, 'import pandas as pd\n'), ((5727, 5758), 'torch.from_numpy', 'torch.from_numpy', (['gt_descriptor'], {}), '(gt_descriptor)\n', (5743, 5758), False, 'import torch\n'), ((5917, 5954), 'torch.stack', 'torch.stack', (['pos_seq'], {'dim': '(0)', 'out': 'None'}), '(pos_seq, dim=0, out=None)\n', (5928, 5954), False, 'import torch\n'), ((5973, 6010), 'torch.stack', 'torch.stack', (['neg_seq'], {'dim': '(0)', 'out': 'None'}), '(neg_seq, dim=0, out=None)\n', (5984, 6010), False, 'import torch\n'), ((7092, 7121), 'torch.from_numpy', 'torch.from_numpy', (['track_image'], {}), '(track_image)\n', (7108, 7121), False, 'import torch\n'), ((7279, 7316), 'torch.stack', 'torch.stack', (['pos_seq'], {'dim': '(0)', 'out': 'None'}), '(pos_seq, dim=0, out=None)\n', (7290, 7316), False, 'import torch\n'), ((7335, 7372), 'torch.stack', 'torch.stack', (['neg_seq'], {'dim': '(0)', 'out': 'None'}), '(neg_seq, dim=0, out=None)\n', (7346, 7372), False, 'import torch\n'), ((2087, 2112), 'torch.from_numpy', 'torch.from_numpy', (['pos_img'], {}), '(pos_img)\n', (2103, 2112), False, 'import torch\n'), ((5779, 5804), 'torch.from_numpy', 'torch.from_numpy', (['pos_img'], {}), '(pos_img)\n', (5795, 5804), False, 'import torch\n'), ((5848, 5873), 'torch.from_numpy', 'torch.from_numpy', (['neg_img'], {}), '(neg_img)\n', (5864, 5873), False, 'import torch\n'), ((7141, 7166), 'torch.from_numpy', 'torch.from_numpy', (['pos_img'], {}), '(pos_img)\n', (7157, 7166), False, 'import torch\n'), ((7210, 7235), 'torch.from_numpy', 'torch.from_numpy', (['neg_img'], {}), '(neg_img)\n', (7226, 7235), False, 'import torch\n'), ((7772, 7798), 'torchvision.transforms.ToPILImage', 'tv.transforms.ToPILImage', ([], {}), '()\n', (7796, 7798), True, 'import torchvision as tv\n'), ((7808, 7832), 'torchvision.transforms.ToTensor', 'tv.transforms.ToTensor', ([], {}), '()\n', (7830, 7832), True, 'import torchvision as tv\n'), ((7842, 7885), 'torchvision.transforms.Normalize', 'tv.transforms.Normalize', ([], {'mean': 'MEAN', 'std': 'STD'}), '(mean=MEAN, std=STD)\n', (7865, 7885), True, 'import torchvision as tv\n'), ((6513, 6533), 'numpy.random.randn', 'np.random.randn', (['(512)'], {}), '(512)\n', (6528, 6533), True, 'import numpy as np\n'), ((6580, 6600), 'numpy.random.randn', 'np.random.randn', (['(512)'], {}), '(512)\n', (6595, 6600), True, 'import numpy as np\n'), ((6672, 6692), 'numpy.random.randn', 'np.random.randn', (['(512)'], {}), '(512)\n', (6687, 6692), True, 'import numpy as np\n')]
|
"""
Name: <NAME>
Class: K63K2
MSSV: 18020116
You should understand the code you write.
"""
import numpy as np
import cv2
import argparse
from matplotlib import pyplot as plt
def q_0(input_file, output_file, ):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
cv2.imshow('Test img', img)
cv2.waitKey(5000)
cv2.imwrite(output_file, img)
def q_1(input_file, output_file):
"""
Convert the image to gray channel of the input image.
"""
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
cv2.imshow('Color', img)
R, G, B = img[:, :, 2], img[:, :, 1], img[:, :, 0]
# Convert image to gray channgel
gray = 0.299 * R + 0.587 * G + 0.114 * B
img_gray = gray.astype(np.uint8)
cv2.imwrite(output_file, img_gray)
cv2.imshow('Gray', img_gray)
cv2.waitKey(0)
# Normalized histogram
def normallizedHistogram(img):
(height, width) = img.shape[:2]
# uint64 works while uint8 doesn't???
# h = np.zeros((256, ), np.uint8) //Wrong?
# h= np.zeros((256,), dtype=int) //Right??
h = [0] * 256
for i in range(height):
for j in range(width):
h[img[i, j]] += 1
return np.array(h) / (height * width)
# Finds cumulative sum of a numpy array, list
def cummulativeSum(normalized_hist):
cummulative_sum = np.zeros_like(normalized_hist, np.float64)
hist_length = len(normalized_hist)
for i in range(hist_length):
cummulative_sum[i] = sum(normalized_hist[:i+1])
return cummulative_sum
def q_2(input_file, output_file):
"""
Performs a histogram equalization on the input image.
"""
img = cv2.imread(input_file, cv2.IMREAD_GRAYSCALE)
(height, width) = img.shape[:2]
# Analysing original image and original histogram
# original_hist = cv2.calcHist([img], [0], None, [256], [0, 256]) # Mask: None, value from 0 - 255
# plt.figure()
# plt.axis("off")
# plt.imshow(img, cmap='gray')
# plt.figure()
# plt.title('Histogram')
# plt.xlabel('Bins')
# plt.ylabel('Number of pixel')
# plt.plot(original_hist)
# plt.xlim([0, 256])
# plt.show()
# Histogram equalization
norm_hist = normallizedHistogram(img)
cumulative_sum = cummulativeSum(norm_hist)
new_hist = np.array(np.rint(255 * cumulative_sum))
# Convert image
img_eq = np.zeros_like(img)
for i in range(height):
for j in range(width):
img_eq[i, j] = new_hist[img[i, j]]
# Check
hist_test = cv2.calcHist([img_eq], [0], None, [256], [0, 256]) # Mask: None, value from 0 - 255
plt.figure()
plt.axis("off")
plt.imshow(img_eq, cmap='gray')
plt.figure()
plt.title('Histogram')
plt.xlabel('Bins')
plt.ylabel('Number of pixel')
plt.plot(hist_test)
plt.xlim([0, 256])
plt.show()
cv2.imwrite(output_file, img_eq)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", "-i", type=str, help="Path to input image")
parser.add_argument("--output_file", "-o", type=str, help="Path to output image")
parser.add_argument("--question", "-q", type=int, default=0, help="Question number")
args = parser.parse_args()
q_number = args.question
if q_number == 1:
q_1(input_file=args.input_file, output_file=args.output_file)
elif q_number == 2:
q_2(input_file=args.input_file, output_file=args.output_file)
else:
q_0(input_file=args.input_file, output_file=args.output_file)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"cv2.waitKey",
"cv2.imwrite",
"cv2.calcHist",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"cv2.imread",
"matplotlib.pyplot.figure",
"numpy.rint",
"numpy.array",
"matplotlib.pyplot.ylabel",
"cv2.imshow",
"matplotlib.pyplot.xlabel"
] |
[((224, 264), 'cv2.imread', 'cv2.imread', (['input_file', 'cv2.IMREAD_COLOR'], {}), '(input_file, cv2.IMREAD_COLOR)\n', (234, 264), False, 'import cv2\n'), ((269, 296), 'cv2.imshow', 'cv2.imshow', (['"""Test img"""', 'img'], {}), "('Test img', img)\n", (279, 296), False, 'import cv2\n'), ((301, 318), 'cv2.waitKey', 'cv2.waitKey', (['(5000)'], {}), '(5000)\n', (312, 318), False, 'import cv2\n'), ((324, 353), 'cv2.imwrite', 'cv2.imwrite', (['output_file', 'img'], {}), '(output_file, img)\n', (335, 353), False, 'import cv2\n'), ((474, 514), 'cv2.imread', 'cv2.imread', (['input_file', 'cv2.IMREAD_COLOR'], {}), '(input_file, cv2.IMREAD_COLOR)\n', (484, 514), False, 'import cv2\n'), ((519, 543), 'cv2.imshow', 'cv2.imshow', (['"""Color"""', 'img'], {}), "('Color', img)\n", (529, 543), False, 'import cv2\n'), ((728, 762), 'cv2.imwrite', 'cv2.imwrite', (['output_file', 'img_gray'], {}), '(output_file, img_gray)\n', (739, 762), False, 'import cv2\n'), ((767, 795), 'cv2.imshow', 'cv2.imshow', (['"""Gray"""', 'img_gray'], {}), "('Gray', img_gray)\n", (777, 795), False, 'import cv2\n'), ((800, 814), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (811, 814), False, 'import cv2\n'), ((1327, 1369), 'numpy.zeros_like', 'np.zeros_like', (['normalized_hist', 'np.float64'], {}), '(normalized_hist, np.float64)\n', (1340, 1369), True, 'import numpy as np\n'), ((1644, 1688), 'cv2.imread', 'cv2.imread', (['input_file', 'cv2.IMREAD_GRAYSCALE'], {}), '(input_file, cv2.IMREAD_GRAYSCALE)\n', (1654, 1688), False, 'import cv2\n'), ((2372, 2390), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (2385, 2390), True, 'import numpy as np\n'), ((2551, 2601), 'cv2.calcHist', 'cv2.calcHist', (['[img_eq]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([img_eq], [0], None, [256], [0, 256])\n', (2563, 2601), False, 'import cv2\n'), ((2647, 2659), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2657, 2659), True, 'from matplotlib import pyplot as plt\n'), ((2664, 2679), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2672, 2679), True, 'from matplotlib import pyplot as plt\n'), ((2684, 2715), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_eq'], {'cmap': '"""gray"""'}), "(img_eq, cmap='gray')\n", (2694, 2715), True, 'from matplotlib import pyplot as plt\n'), ((2720, 2732), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2730, 2732), True, 'from matplotlib import pyplot as plt\n'), ((2737, 2759), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram"""'], {}), "('Histogram')\n", (2746, 2759), True, 'from matplotlib import pyplot as plt\n'), ((2764, 2782), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {}), "('Bins')\n", (2774, 2782), True, 'from matplotlib import pyplot as plt\n'), ((2787, 2816), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of pixel"""'], {}), "('Number of pixel')\n", (2797, 2816), True, 'from matplotlib import pyplot as plt\n'), ((2821, 2840), 'matplotlib.pyplot.plot', 'plt.plot', (['hist_test'], {}), '(hist_test)\n', (2829, 2840), True, 'from matplotlib import pyplot as plt\n'), ((2845, 2863), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 256]'], {}), '([0, 256])\n', (2853, 2863), True, 'from matplotlib import pyplot as plt\n'), ((2868, 2878), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2876, 2878), True, 'from matplotlib import pyplot as plt\n'), ((2884, 2916), 'cv2.imwrite', 'cv2.imwrite', (['output_file', 'img_eq'], {}), '(output_file, img_eq)\n', (2895, 2916), False, 'import cv2\n'), ((2959, 2984), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2982, 2984), False, 'import argparse\n'), ((1177, 1188), 'numpy.array', 'np.array', (['h'], {}), '(h)\n', (1185, 1188), True, 'import numpy as np\n'), ((2302, 2331), 'numpy.rint', 'np.rint', (['(255 * cumulative_sum)'], {}), '(255 * cumulative_sum)\n', (2309, 2331), True, 'import numpy as np\n')]
|
"""
echopype data model inherited from based class Process for EK80 data.
"""
import os
import datetime as dt
import numpy as np
import xarray as xr
from scipy import signal
from ..utils import uwa
from .processbase import ProcessBase
class ProcessEK80(ProcessBase):
"""Class for manipulating EK80 echo data already converted to netCDF.
"""
def __init__(self, file_path=""):
ProcessBase.__init__(self, file_path)
self._acidity = None
self._salinity = None
self._temperature = None
self._pressure = None
self._ch_ids = None
self._tau_effective = None
self.ytx = []
self.backscatter_compressed = []
self._sound_speed = self.calc_sound_speed()
self._salinity = self.get_salinity()
self._temperature = self.get_temperature()
self._pressure = self.get_pressure()
self._sample_thickness = self.calc_sample_thickness()
self._seawater_absorption = self.calc_seawater_absorption()
self._range = self.calc_range()
@property
def ch_ids(self):
if self._ch_ids is None:
with self._open_dataset(self.file_path, group="Beam") as ds_beam:
self._ch_ids = ds_beam.channel_id.data
return self._ch_ids
@property
def tau_effective(self):
return self._tau_effective
def get_salinity(self):
if self._salinity is None:
with self._open_dataset(self.file_path, group="Environment") as ds_env:
return ds_env.salinity
def get_temperature(self, path=''):
path = path if path else self.file_path
if self._temperature is None:
with self._open_dataset(path, group="Environment") as ds_env:
return ds_env.temperature
def get_pressure(self):
if self._pressure is None:
with self._open_dataset(self.file_path, group="Environment") as ds_env:
return ds_env.depth
def calc_sound_speed(self, src='file'):
"""gets sound speed [m/s] using parameters stored in the .nc file.
Will use a custom path if one is provided
"""
if src == 'file':
with self._open_dataset(self.file_path, group="Environment") as ds_env:
return ds_env.sound_speed_indicative
elif src == 'user':
ss = uwa.calc_sound_speed(salinity=self.salinity,
temperature=self.temperature,
pressure=self.pressure)
return ss * np.ones(self.sound_speed.size)
else:
ValueError('Not sure how to update sound speed!')
def calc_seawater_absorption(self, src='user', path=''):
"""Returns the seawater absorption
Parameters
----------
src : str
'file' will return the seawater absoption recorded in the .nc file
'user' will calculate the seawater absorption. Default (Francois and Garrison, 1982).
Returns
-------
Seawater absorption value
"""
if src == 'user':
path = path if path else self.file_path
with self._open_dataset(path, group='Beam') as ds_beam:
try:
f0 = ds_beam.frequency_start
f1 = ds_beam.frequency_end
f = (f0 + f1) / 2
except AttributeError:
f = ds_beam.frequency
sea_abs = uwa.calc_seawater_absorption(f,
salinity=self.salinity,
temperature=self.temperature,
pressure=self.pressure,
formula_source='FG')
else:
ValueError('Not sure how to update seawater absorption!')
return sea_abs
def calc_sample_thickness(self, path=''):
"""gets sample thickness using parameters stored in the .nc file.
Will use a custom path if one is provided
"""
path = path if path else self.file_path
with self._open_dataset(path, group="Beam") as ds_beam:
sth = self.sound_speed * ds_beam.sample_interval / 2 # sample thickness
return sth
def calc_range(self, range_bins=None, path=''):
"""Calculates range [m] using parameters stored in the .nc file.
Will use a custom path if one is provided
"""
st = self.calc_sample_thickness(path) if path else self.sample_thickness
path = path if path else self.file_path
with self._open_dataset(path, group="Beam") as ds_beam:
if range_bins:
range_bin = np.arange(range_bins)
range_bin = xr.DataArray(range_bin, coords=[('range_bin', range_bin)])
else:
range_bin = ds_beam.range_bin
range_meter = range_bin * st - \
ds_beam.transmit_duration_nominal * self.sound_speed / 2 # DataArray [frequency x range_bin]
range_meter = range_meter.where(range_meter > 0, other=0).transpose()
return range_meter
def calc_transmit_signal(self):
"""Generate transmit signal as replica for pulse compression.
"""
def chirp_linear(t, f0, f1, tau):
beta = (f1 - f0) * (tau ** -1)
return np.cos(2 * np.pi * (beta / 2 * (t ** 2) + f0 * t))
# Retrieve filter coefficients
with self._open_dataset(self.file_path, group="Vendor") as ds_fil, \
self._open_dataset(self.file_path, group="Beam") as ds_beam:
# Get various parameters
Ztrd = 75 # Transducer quadrant nominal impedance [Ohms] (Supplied by Simrad)
delta = 1 / 1.5e6 # Hard-coded EK80 sample interval
tau = ds_beam.transmit_duration_nominal.data
txpower = ds_beam.transmit_power.data
f0 = ds_beam.frequency_start.data
f1 = ds_beam.frequency_end.data
slope = ds_beam.slope[:, 0].data # Use slope of first ping
amp = np.sqrt((txpower / 4) * (2 * Ztrd))
# Create transmit signal
ytx = []
for ch in range(ds_beam.frequency.size):
t = np.arange(0, tau[ch], delta)
nt = len(t)
nwtx = (int(2 * np.floor(slope[ch] * nt)))
wtx_tmp = np.hanning(nwtx)
nwtxh = (int(np.round(nwtx / 2)))
wtx = np.concatenate([wtx_tmp[0:nwtxh], np.ones((nt - nwtx)), wtx_tmp[nwtxh:]])
y_tmp = amp[ch] * chirp_linear(t, f0[ch], f1[ch], tau[ch]) * wtx
# The transmit signal must have a max amplitude of 1
y = (y_tmp / np.max(np.abs(y_tmp)))
# filter and decimation
wbt_fil = ds_fil[self.ch_ids[ch] + "_WBT_filter"].data
pc_fil = ds_fil[self.ch_ids[ch] + "_PC_filter"].data
# if saved as netCDF4, convert compound complex datatype to complex64
if wbt_fil.ndim == 1:
wbt_fil = np.array([complex(n[0], n[1]) for n in wbt_fil], dtype='complex64')
pc_fil = np.array([complex(n[0], n[1]) for n in pc_fil], dtype='complex64')
# Apply WBT filter and downsample
ytx_tmp = np.convolve(y, wbt_fil)
ytx_tmp = ytx_tmp[0::ds_fil.attrs[self.ch_ids[ch] + "_WBT_decimation"]]
# Apply PC filter and downsample
ytx_tmp = np.convolve(ytx_tmp, pc_fil)
ytx_tmp = ytx_tmp[0::ds_fil.attrs[self.ch_ids[ch] + "_PC_decimation"]]
ytx.append(ytx_tmp)
del nwtx, wtx_tmp, nwtxh, wtx, y_tmp, y, ytx_tmp
# TODO: rename ytx into something like 'transmit_signal' and
# also package the sampling interval together with the signal
self.ytx = ytx
def pulse_compression(self):
"""Pulse compression using transmit signal as replica.
"""
with self._open_dataset(self.file_path, group="Beam") as ds_beam:
sample_interval = ds_beam.sample_interval
backscatter = ds_beam.backscatter_r + ds_beam.backscatter_i * 1j # Construct complex backscatter
backscatter_compressed = []
tau_constants = []
# Loop over channels
for ch in range(ds_beam.frequency.size):
# tmp_x = np.fft.fft(backscatter[i].dropna('range_bin'))
# tmp_y = np.fft.fft(np.flipud(np.conj(ytx[i])))
# remove quadrants that are nans across all samples
tmp_b = backscatter[ch].dropna('range_bin', how='all')
# remove samples that are nans across all quadrants
tmp_b = tmp_b.dropna('quadrant', how='all')
# tmp_b = tmp_b[:, 0, :] # 1 ping
tmp_y = np.flipud(np.conj(self.ytx[ch]))
# Convolve tx signal with backscatter. atol=1e-7 between fft and direct convolution
compressed = xr.apply_ufunc(lambda m: np.apply_along_axis(
lambda m: signal.convolve(m, tmp_y), axis=2, arr=m),
tmp_b,
input_core_dims=[['range_bin']],
output_core_dims=[['range_bin']],
exclude_dims={'range_bin'}) / np.linalg.norm(self.ytx[ch]) ** 2
# Average across quadrants
backscatter_compressed.append(compressed)
# Effective pulse length
ptxa = np.square(np.abs(signal.convolve(self.ytx[ch], tmp_y, method='direct') /
np.linalg.norm(self.ytx[ch]) ** 2))
tau_constants.append(np.sum(ptxa) / (np.max(ptxa)))
self._tau_effective = np.array(tau_constants) * sample_interval
# Pad nans so that each channel has the same range_bin length
largest_range_bin = max([bc.shape[2] for bc in backscatter_compressed])
for i, ds in enumerate(backscatter_compressed):
pad_width = largest_range_bin - ds.shape[2]
backscatter_compressed[i] = xr.apply_ufunc(lambda x: np.pad(x, ((0,0), (0,0), (0,pad_width)),
constant_values=np.nan),
ds,
input_core_dims=[['range_bin']],
output_core_dims=[['range_bin']],
exclude_dims={'range_bin'})
self.backscatter_compressed = xr.concat(backscatter_compressed, dim='frequency')
def calibrate(self, mode='Sv', save=False, save_path=None, save_postfix=None):
"""Perform echo-integration to get volume backscattering strength (Sv)
or target strength (TS) from EK80 power data.
Parameters
-----------
mode : str
'Sv' for volume backscattering strength calibration (default)
'TS' for target strength calibration
save : bool, optional
whether to save calibrated output
default to ``False``
save_path : str
Full filename to save to, overwriting the RAWFILENAME_Sv.nc default
save_postfix : str
Filename postfix, default to '_Sv' or '_TS'
"""
ds_beam = self._open_dataset(self.file_path, group="Beam")
# Check for cw data file
split = os.path.splitext(self.file_path)
cw_path = split[0] + '_cw' + split[1]
if save_postfix is None:
save_postfix = '_' + mode
if os.path.exists(cw_path):
self.calibrate_cw(mode, cw_path, save, save_path, save_postfix)
elif 'backscatter_i' not in ds_beam:
self.calibrate_cw(mode, self.file_path, save, save_path, save_postfix)
# Calibrate bb data
if 'backscatter_i' in ds_beam:
Ztrd = 75 # Transducer quadrant nominal impedance [Ohms] (Supplied by Simrad)
Rwbtrx = 1000 # Wideband transceiver impedance [Ohms] (Supplied by Simrad)
self.calc_transmit_signal() # Get transmit signal
self.pulse_compression() # Perform pulse compression
c = self.sound_speed
f_nominal = ds_beam.frequency
f_center = (ds_beam.frequency_start.data + ds_beam.frequency_end.data) / 2
psifc = ds_beam.equivalent_beam_angle + 20 * np.log10(f_nominal / f_center)
la2 = (c / f_center) ** 2
Sv = []
TS = []
# Average accross quadrants and take the absolute value of complex backscatter
prx = np.abs(np.mean(self.backscatter_compressed, axis=1))
prx = prx * prx / 2 * (np.abs(Rwbtrx + Ztrd) / Rwbtrx) ** 2 / np.abs(Ztrd)
# TODO Gfc should be gain interpolated at the center frequency
# Only 1 gain value is given provided per channel
Gfc = ds_beam.gain_correction
ranges = self.calc_range(range_bins=prx.shape[2])
ranges = ranges.where(ranges >= 1, other=1)
if mode == 'Sv':
Sv = (
10 * np.log10(prx) + 20 * np.log10(ranges) +
2 * self.seawater_absorption * ranges -
10 * np.log10(ds_beam.transmit_power * la2 * c / (32 * np.pi * np.pi)) -
2 * Gfc - 10 * np.log10(self.tau_effective) - psifc
)
if mode == 'TS':
TS = (
10 * np.log10(prx) + 40 * np.log10(ranges) +
2 * self.seawater_absorption * ranges -
10 * np.log10(ds_beam.transmit_power * la2 / (16 * np.pi * np.pi)) -
2 * Gfc
)
ds_beam.close() # Close opened dataset
# Save Sv calibrated data
if mode == 'Sv':
Sv.name = 'Sv'
Sv = Sv.to_dataset()
Sv['range'] = (('frequency', 'range_bin'), ranges)
self.Sv = Sv
if save:
self.Sv_path = self.validate_path(save_path, save_postfix)
print('%s saving calibrated Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
self._save_dataset(Sv, self.Sv_path, mode="w")
# Save TS calibrated data
elif mode == 'TS':
TS.name = 'TS'
TS = TS.to_dataset()
TS['range'] = (('frequency', 'range_bin'), ranges)
self.TS = TS
if save:
self.TS_path = self.validate_path(save_path, save_postfix)
print('%s saving calibrated TS to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.TS_path))
self._save_dataset(self.TS, self.TS_path, mode="w")
def calibrate_TS(self, save=False, save_path=None, save_postfix=None):
self.calibrate(mode='TS', save=save, save_path=save_path, save_postfix=save_postfix)
def calibrate_cw(self, mode='Sv', file_path='', save=False, save_path=None, save_postfix=None):
"""Perform echo-integration to get volume backscattering strength (Sv) from EK80 power data.
Parameters
-----------
mode : str
'Sv' for volume backscattering strength (default)
'TS' for target strength
file_path : str
Path to CW data
save : bool, optional
whether to save calibrated Sv output
default to ``False``
save_path : str
Full filename to save to, overwriting the RAWFILENAME_Sv.nc default
save_postfix : str
Filename postfix
"""
# Open data set for and Beam groups
if file_path and os.path.exists(file_path):
ds_beam = self._open_dataset(file_path, group="Beam")
else:
file_path = self.file_path
ds_beam = self._open_dataset(self.file_path, group="Beam")
# Derived params
wavelength = self.sound_speed / ds_beam.frequency # wavelength
# Retrieved params
backscatter_r = ds_beam['backscatter_r'].load()
range_meter = self.calc_range(path=file_path)
sea_abs = self.calc_seawater_absorption(path=file_path)
if mode == 'Sv':
# Calc gain
CSv = 10 * np.log10((ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) ** 2 *
wavelength ** 2 * self.sound_speed * ds_beam.transmit_duration_nominal *
10 ** (ds_beam.equivalent_beam_angle / 10)) /
(32 * np.pi ** 2))
# Get TVG and absorption
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * sea_abs * range_meter
# Calibration and echo integration
Sv = backscatter_r + TVG + ABS - CSv - 2 * ds_beam.sa_correction
Sv.name = 'Sv'
Sv = Sv.to_dataset()
# Attach calculated range into data set
Sv['range'] = (('frequency', 'range_bin'), range_meter)
# Save calibrated data into the calling instance and
# to a separate .nc file in the same directory as the data filef.Sv = Sv
self.Sv = Sv
if save:
if save_postfix is None:
save_postfix = '_' + mode
self.Sv_path = self.validate_path(save_path, save_postfix, file_path)
print('%s saving calibrated Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
self._save_dataset(Sv, self.Sv_path, mode="w")
elif mode == 'TS':
CSp = 10 * np.log10((ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) ** 2 *
wavelength ** 2) / (16 * np.pi ** 2))
TVG = np.real(40 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Calibration and echo integration
TS = backscatter_r + TVG + ABS - CSp
TS.name = 'TS'
TS = TS.to_dataset()
# Attach calculated range into data set
TS['range'] = (('frequency', 'range_bin'), range_meter)
# Save calibrated data into the calling instance and
# to a separate .nc file in the same directory as the data filef.Sv = Sv
self.TS = TS
if save:
self.TS_path = self.validate_path(save_path, save_postfix)
print('%s saving calibrated TS to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.TS_path))
self._save_dataset(TS, self.TS_path, mode="w")
# Close opened resources
ds_beam.close()
|
[
"numpy.abs",
"numpy.sum",
"numpy.floor",
"numpy.ones",
"numpy.mean",
"numpy.arange",
"numpy.linalg.norm",
"numpy.convolve",
"numpy.round",
"numpy.pad",
"os.path.exists",
"numpy.max",
"numpy.hanning",
"numpy.log10",
"datetime.datetime.now",
"numpy.conj",
"xarray.concat",
"numpy.cos",
"scipy.signal.convolve",
"numpy.array",
"os.path.splitext",
"xarray.DataArray",
"numpy.sqrt"
] |
[((11802, 11834), 'os.path.splitext', 'os.path.splitext', (['self.file_path'], {}), '(self.file_path)\n', (11818, 11834), False, 'import os\n'), ((11963, 11986), 'os.path.exists', 'os.path.exists', (['cw_path'], {}), '(cw_path)\n', (11977, 11986), False, 'import os\n'), ((5434, 5482), 'numpy.cos', 'np.cos', (['(2 * np.pi * (beta / 2 * t ** 2 + f0 * t))'], {}), '(2 * np.pi * (beta / 2 * t ** 2 + f0 * t))\n', (5440, 5482), True, 'import numpy as np\n'), ((6157, 6190), 'numpy.sqrt', 'np.sqrt', (['(txpower / 4 * (2 * Ztrd))'], {}), '(txpower / 4 * (2 * Ztrd))\n', (6164, 6190), True, 'import numpy as np\n'), ((10925, 10975), 'xarray.concat', 'xr.concat', (['backscatter_compressed'], {'dim': '"""frequency"""'}), "(backscatter_compressed, dim='frequency')\n", (10934, 10975), True, 'import xarray as xr\n'), ((16174, 16199), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (16188, 16199), False, 'import os\n'), ((4770, 4791), 'numpy.arange', 'np.arange', (['range_bins'], {}), '(range_bins)\n', (4779, 4791), True, 'import numpy as np\n'), ((4820, 4878), 'xarray.DataArray', 'xr.DataArray', (['range_bin'], {'coords': "[('range_bin', range_bin)]"}), "(range_bin, coords=[('range_bin', range_bin)])\n", (4832, 4878), True, 'import xarray as xr\n'), ((6325, 6353), 'numpy.arange', 'np.arange', (['(0)', 'tau[ch]', 'delta'], {}), '(0, tau[ch], delta)\n', (6334, 6353), True, 'import numpy as np\n'), ((6467, 6483), 'numpy.hanning', 'np.hanning', (['nwtx'], {}), '(nwtx)\n', (6477, 6483), True, 'import numpy as np\n'), ((7408, 7431), 'numpy.convolve', 'np.convolve', (['y', 'wbt_fil'], {}), '(y, wbt_fil)\n', (7419, 7431), True, 'import numpy as np\n'), ((7596, 7624), 'numpy.convolve', 'np.convolve', (['ytx_tmp', 'pc_fil'], {}), '(ytx_tmp, pc_fil)\n', (7607, 7624), True, 'import numpy as np\n'), ((10017, 10040), 'numpy.array', 'np.array', (['tau_constants'], {}), '(tau_constants)\n', (10025, 10040), True, 'import numpy as np\n'), ((13022, 13066), 'numpy.mean', 'np.mean', (['self.backscatter_compressed'], {'axis': '(1)'}), '(self.backscatter_compressed, axis=1)\n', (13029, 13066), True, 'import numpy as np\n'), ((13142, 13154), 'numpy.abs', 'np.abs', (['Ztrd'], {}), '(Ztrd)\n', (13148, 13154), True, 'import numpy as np\n'), ((16764, 16993), 'numpy.log10', 'np.log10', (['(ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) ** 2 * \n wavelength ** 2 * self.sound_speed * ds_beam.transmit_duration_nominal *\n 10 ** (ds_beam.equivalent_beam_angle / 10) / (32 * np.pi ** 2))'], {}), '(ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) **\n 2 * wavelength ** 2 * self.sound_speed * ds_beam.\n transmit_duration_nominal * 10 ** (ds_beam.equivalent_beam_angle / 10) /\n (32 * np.pi ** 2))\n', (16772, 16993), True, 'import numpy as np\n'), ((2560, 2590), 'numpy.ones', 'np.ones', (['self.sound_speed.size'], {}), '(self.sound_speed.size)\n', (2567, 2590), True, 'import numpy as np\n'), ((6513, 6531), 'numpy.round', 'np.round', (['(nwtx / 2)'], {}), '(nwtx / 2)\n', (6521, 6531), True, 'import numpy as np\n'), ((8990, 9011), 'numpy.conj', 'np.conj', (['self.ytx[ch]'], {}), '(self.ytx[ch])\n', (8997, 9011), True, 'import numpy as np\n'), ((12796, 12826), 'numpy.log10', 'np.log10', (['(f_nominal / f_center)'], {}), '(f_nominal / f_center)\n', (12804, 12826), True, 'import numpy as np\n'), ((18151, 18271), 'numpy.log10', 'np.log10', (['(ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) ** 2 * \n wavelength ** 2 / (16 * np.pi ** 2))'], {}), '(ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) **\n 2 * wavelength ** 2 / (16 * np.pi ** 2))\n', (18159, 18271), True, 'import numpy as np\n'), ((6414, 6438), 'numpy.floor', 'np.floor', (['(slope[ch] * nt)'], {}), '(slope[ch] * nt)\n', (6422, 6438), True, 'import numpy as np\n'), ((6590, 6608), 'numpy.ones', 'np.ones', (['(nt - nwtx)'], {}), '(nt - nwtx)\n', (6597, 6608), True, 'import numpy as np\n'), ((6816, 6829), 'numpy.abs', 'np.abs', (['y_tmp'], {}), '(y_tmp)\n', (6822, 6829), True, 'import numpy as np\n'), ((9566, 9594), 'numpy.linalg.norm', 'np.linalg.norm', (['self.ytx[ch]'], {}), '(self.ytx[ch])\n', (9580, 9594), True, 'import numpy as np\n'), ((9952, 9964), 'numpy.sum', 'np.sum', (['ptxa'], {}), '(ptxa)\n', (9958, 9964), True, 'import numpy as np\n'), ((9968, 9980), 'numpy.max', 'np.max', (['ptxa'], {}), '(ptxa)\n', (9974, 9980), True, 'import numpy as np\n'), ((10406, 10473), 'numpy.pad', 'np.pad', (['x', '((0, 0), (0, 0), (0, pad_width))'], {'constant_values': 'np.nan'}), '(x, ((0, 0), (0, 0), (0, pad_width)), constant_values=np.nan)\n', (10412, 10473), True, 'import numpy as np\n'), ((9783, 9836), 'scipy.signal.convolve', 'signal.convolve', (['self.ytx[ch]', 'tmp_y'], {'method': '"""direct"""'}), "(self.ytx[ch], tmp_y, method='direct')\n", (9798, 9836), False, 'from scipy import signal\n'), ((13103, 13124), 'numpy.abs', 'np.abs', (['(Rwbtrx + Ztrd)'], {}), '(Rwbtrx + Ztrd)\n', (13109, 13124), True, 'import numpy as np\n'), ((13765, 13793), 'numpy.log10', 'np.log10', (['self.tau_effective'], {}), '(self.tau_effective)\n', (13773, 13793), True, 'import numpy as np\n'), ((14028, 14089), 'numpy.log10', 'np.log10', (['(ds_beam.transmit_power * la2 / (16 * np.pi * np.pi))'], {}), '(ds_beam.transmit_power * la2 / (16 * np.pi * np.pi))\n', (14036, 14089), True, 'import numpy as np\n'), ((9879, 9907), 'numpy.linalg.norm', 'np.linalg.norm', (['self.ytx[ch]'], {}), '(self.ytx[ch])\n', (9893, 9907), True, 'import numpy as np\n'), ((9243, 9268), 'scipy.signal.convolve', 'signal.convolve', (['m', 'tmp_y'], {}), '(m, tmp_y)\n', (9258, 9268), False, 'from scipy import signal\n'), ((13660, 13725), 'numpy.log10', 'np.log10', (['(ds_beam.transmit_power * la2 * c / (32 * np.pi * np.pi))'], {}), '(ds_beam.transmit_power * la2 * c / (32 * np.pi * np.pi))\n', (13668, 13725), True, 'import numpy as np\n'), ((13899, 13912), 'numpy.log10', 'np.log10', (['prx'], {}), '(prx)\n', (13907, 13912), True, 'import numpy as np\n'), ((13920, 13936), 'numpy.log10', 'np.log10', (['ranges'], {}), '(ranges)\n', (13928, 13936), True, 'import numpy as np\n'), ((17983, 18000), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (17998, 18000), True, 'import datetime as dt\n'), ((14592, 14609), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (14607, 14609), True, 'import datetime as dt\n'), ((19059, 19076), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (19074, 19076), True, 'import datetime as dt\n'), ((13531, 13544), 'numpy.log10', 'np.log10', (['prx'], {}), '(prx)\n', (13539, 13544), True, 'import numpy as np\n'), ((13552, 13568), 'numpy.log10', 'np.log10', (['ranges'], {}), '(ranges)\n', (13560, 13568), True, 'import numpy as np\n'), ((15113, 15130), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (15128, 15130), True, 'import datetime as dt\n')]
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
description = f.read()
setup(
name='bikeshed',
version='0.1.0',
packages=find_packages(),
license=u'BSD 3-Clause License',
long_description=description,
include_package_data=True,
install_requires=[
'bcrypt>=1.0.2',
'docutils>=0.11',
'elasticsearch>=1.0.0',
'httplib2>=0.9',
'Jinja2>=2.7.2',
'lxml>=3.3.4',
'patchit>=1.1',
'python-dateutil>=2.2',
'redis>=2.9.1',
'Sphinx>=1.2.2',
'itsdangerous>=0.24',
'Werkzeug==0.9.4',
'gunicorn==18.0',
],
)
|
[
"setuptools.find_packages"
] |
[((190, 205), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (203, 205), False, 'from setuptools import setup, find_packages\n')]
|
from model.contact import Contact
from random import randrange
import re
def test_contact_info_on_main_page(app):
if app.contact.count() == 0:
app.contact.add_contact(
Contact(firstname="Ivan", middlename="Sergeevich", lastname="Petrov", nickname="Butthead", title="test",
company="Gazprom", address="Moscow", home="+74950000000", mobile="+79190000000",
work="+74951000000", fax="+74952000000", email="<EMAIL>", email2="<EMAIL>",
email3="<EMAIL>", homepage="www.petrov.su", bday="2", bmonth="April", byear="1973",
aday="6", amonth="May", ayear="1999", address2="Moscow", phone2="1", notes="Test"))
old_contact_list = app.contact.get_contact_list()
index = randrange(len(old_contact_list))
contact_from_home_page = app.contact.get_contact_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home, contact.mobile, contact.work, contact.phone2]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3])))
|
[
"re.sub",
"model.contact.Contact"
] |
[((1447, 1470), 're.sub', 're.sub', (['"""[() -]"""', '""""""', 's'], {}), "('[() -]', '', s)\n", (1453, 1470), False, 'import re\n'), ((194, 649), 'model.contact.Contact', 'Contact', ([], {'firstname': '"""Ivan"""', 'middlename': '"""Sergeevich"""', 'lastname': '"""Petrov"""', 'nickname': '"""Butthead"""', 'title': '"""test"""', 'company': '"""Gazprom"""', 'address': '"""Moscow"""', 'home': '"""+74950000000"""', 'mobile': '"""+79190000000"""', 'work': '"""+74951000000"""', 'fax': '"""+74952000000"""', 'email': '"""<EMAIL>"""', 'email2': '"""<EMAIL>"""', 'email3': '"""<EMAIL>"""', 'homepage': '"""www.petrov.su"""', 'bday': '"""2"""', 'bmonth': '"""April"""', 'byear': '"""1973"""', 'aday': '"""6"""', 'amonth': '"""May"""', 'ayear': '"""1999"""', 'address2': '"""Moscow"""', 'phone2': '"""1"""', 'notes': '"""Test"""'}), "(firstname='Ivan', middlename='Sergeevich', lastname='Petrov',\n nickname='Butthead', title='test', company='Gazprom', address='Moscow',\n home='+74950000000', mobile='+79190000000', work='+74951000000', fax=\n '+74952000000', email='<EMAIL>', email2='<EMAIL>', email3='<EMAIL>',\n homepage='www.petrov.su', bday='2', bmonth='April', byear='1973', aday=\n '6', amonth='May', ayear='1999', address2='Moscow', phone2='1', notes=\n 'Test')\n", (201, 649), False, 'from model.contact import Contact\n')]
|
"""
Run this script with -h for the help.
It produces for each method for a given dataset all the data needed to compare the methods on the specified dataset.
The strategies being compared are defined after line 88.
"""
from concurrent.futures import wait, ALL_COMPLETED
from concurrent.futures.process import ProcessPoolExecutor
import os
from typing import Callable, Dict, List, Optional, Tuple
import pandas as pd
import numpy as np
from pseas.instance_selection.instance_selection import InstanceSelection
from tqdm import tqdm
from pseas.test_env import ResetChoice, TestEnv
from pseas.strategy import Strategy
from pseas.standard_strategy import StandardStrategy
from pseas.discrimination.subset_baseline import SubsetBaseline
from pseas.discrimination.wilcoxon import Wilcoxon
from pseas.instance_selection.random_baseline import RandomBaseline
from pseas.instance_selection.discrimination_based import DiscriminationBased
from pseas.instance_selection.variance_based import VarianceBased
from pseas.instance_selection.information_based import InformationBased
from pseas.instance_selection.udd import UDD
# =============================================================================
# Argument parsing.
# =============================================================================
import argparse
argument_parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Produce run data.")
argument_default_values: Dict = {
"output_suffix": '',
"save_every": 5,
"max_workers": None,
"scenario_path": './rundata/kissat_ibm',
"nb_configurations": 10,
"ratio_instances": .1,
"nb_seeds": 10
}
argument_parser.add_argument('-o', '--output-suffix',
type=str,
action='store',
default=argument_default_values['output_suffix'],
help="CSV data filename suffix (default: '[scenario]_[nb configurations]_[ratio instance]')"
)
argument_parser.add_argument('--save-every',
type=int,
action='store',
default=argument_default_values['save_every'],
help="Save data every X time. (default: 5)"
)
argument_parser.add_argument('--max-workers',
type=int,
action='store',
default=argument_default_values['max_workers'],
help="Max number of processes. (default: None)"
)
argument_parser.add_argument('--scenario-path',
type=str,
action='store',
default=argument_default_values['scenario_path'],
help=" (default: './rundata/kissat_ibm')"
)
argument_parser.add_argument('--nb-configurations',
type=int,
action='store',
default=argument_default_values['nb_configurations'],
help=" (default: 10)"
)
argument_parser.add_argument('--nb-seeds',
type=int,
action='store',
default=argument_default_values['nb_seeds'],
help=" (default: 10)"
)
argument_parser.add_argument('--ratio-instances',
type=float,
action='store',
default=argument_default_values['ratio_instances'],
help=" (default: 1)"
)
argument_parser.add_argument('--disc',
action='store_true',
help=" (default: False) instaed of GridSearch for UDD do it for discrimination"
)
parsed_parameters = argument_parser.parse_args()
nb_seeds: int = parsed_parameters.nb_seeds
save_every: int = parsed_parameters.save_every
max_workers: int = parsed_parameters.max_workers
scenario_path: str = parsed_parameters.scenario_path
nb_configurations: int = parsed_parameters.nb_configurations
ratio_instances: float = parsed_parameters.ratio_instances
disc_instead_udd: bool = parsed_parameters.disc
name: str = "discrimination" if disc_instead_udd else "udd"
output_suffix: str = scenario_path.strip('/').split('/')[-1]+'_'+str(nb_configurations)+'_'+str(ratio_instances)+"_"+name
# =============================================================================
# Finished parsing
# =============================================================================
# =============================================================================
# Start Strategy Definition
# =============================================================================
discriminators = [
lambda: Wilcoxon(confidence=101),
]
selectors: List[Callable[[], InstanceSelection]] = []
if not disc_instead_udd:
parameters_1 = np.linspace(.2, 2, num=10).tolist()
parameters_2 = np.linspace(.2, 2, num=10).tolist()
selectors = [UDD(p1, p2) for p1 in parameters_1 for p2 in parameters_2]
else:
parameters = np.linspace(1.01, 2, num=10).tolist()
selectors = [DiscriminationBased(p) for p in parameters]
strategy_makers = [
lambda i, d: StandardStrategy(i, d),
]
# =============================================================================
# End Strategy Definition
# =============================================================================
# Check if file already exists
original_df_general: Optional[pd.DataFrame] = None
original_df_detailed: Optional[pd.DataFrame] = None
if os.path.exists(f"./runs_{output_suffix}.csv"):
original_df_general = pd.read_csv(f"./runs_{output_suffix}.csv")
original_df_general = original_df_general.drop("Unnamed: 0", axis=1)
original_df_detailed = pd.read_csv(
f"./detailed_runs_{output_suffix}.csv")
original_df_detailed = original_df_detailed.drop(
"Unnamed: 0", axis=1)
print("Found existing data, continuing acquisition from save.")
df_general = {
"y_true": [],
"y_pred": [],
"time": [],
"perf_eval": [],
"perf_cmp": [],
"instances": [],
"strategy": [],
"a_new": [],
"a_old": [],
"seed": []
}
df_detailed = {
"strategy": [],
"confidence": [],
"time": [],
"instances": [],
"prediction": [],
"a_new": [],
"a_old": [],
"seed": []
}
def callback(future):
pbar.update(1)
strat_name, runs, dico = future.result()
# Fill detailed dataframe
stats = dico["stats"]
for k, v in stats.items():
for el in v:
df_detailed[k].append(el)
# Save detailed dataframe
if pbar.n % save_every == 0:
df_tmp = pd.DataFrame(df_detailed)
if original_df_detailed is not None:
df_tmp = original_df_detailed.append(df_tmp)
df_tmp.to_csv(f"./detailed_runs_{output_suffix}.csv")
# real data
real = dico["real"]
challengers: List[int] = real["challenger"]
seed = stats["seed"][-1]
# Fill general dataframe
for challenger, incumbent, perf_chall, perf_inc, y_true, _, _, _ in runs:
df_general["y_true"].append(y_true)
df_general["perf_eval"].append(perf_chall)
df_general["perf_cmp"].append(perf_inc)
df_general["strategy"].append(strat_name)
df_general["a_new"].append(challenger)
df_general["a_old"].append(incumbent)
index = challengers.index(challenger)
df_general["time"].append(real["time"][index])
df_general["instances"].append(real["instances"][index])
df_general["y_pred"].append(real["prediction"][index])
df_general["seed"].append(seed)
# Save general dataframe
if pbar.n % save_every == 0:
df_tmp = pd.DataFrame(df_general)
if original_df_general is not None:
df_tmp = original_df_general.append(df_tmp)
df_tmp.to_csv(f"./runs_{output_suffix}.csv")
def evaluate(scenario_path: str, strategy: Strategy, seed: int,
verbose: bool = False, **kwargs) -> Tuple[str, List[Tuple[int, int, float, float, bool, bool, float, int]], Dict]:
env: TestEnv = TestEnv(scenario_path, verbose, seed=seed)
# Select instances
ninstances = round(ratio_instances * env.ninstances)
selected_instances = env.rng.choice(list(range(env.ninstances)), size=ninstances)
for instance in range(env.ninstances):
if instance not in selected_instances:
env.set_enabled(-1, instance, False)
env.set_instance_count_for_eval(instance, False)
# Subset of configurations
known_configurations = env.rng.choice(list(range(env.nconfigurations)), size=nb_configurations)
challenger_list: List[int] = []
for config in range(env.nconfigurations):
if config not in known_configurations:
env.set_enabled(config, -1, False)
challenger_list.append(config)
# Get incumbent that is the fastest
env.fit_model()
incumbent: int = env.reset(ResetChoice.RESET_BEST)[1]["incumbent_configuration"]
env._history.clear()
stats = {
"time": [],
"confidence": [],
"prediction": [],
"strategy": [],
"a_new": [],
"a_old": [],
"instances": [],
"seed": []
}
real = {
"prediction": [],
"time": [],
"challenger": [],
"instances": [],
}
to_ratio = lambda x: int(np.floor(x * 100))
label: str = strategy.name()
for challenger in challenger_list:
state, information, information_has_changed = env.reset((incumbent, challenger))
if information_has_changed:
strategy.ready(**information)
strategy.reset()
strategy.feed(state)
last_time_ratio: float = 0
instances : int = 0
finished: bool = False
while instances < env.ninstances:
state = env.step(strategy.choose_instance())
strategy.feed(state)
instances += 1
# Update if time changed enough
time_ratio: float = env.current_time / env.current_challenger_max_total_time
if to_ratio(last_time_ratio) < to_ratio(time_ratio):
for i in range(to_ratio(last_time_ratio), to_ratio(time_ratio)):
# Update predictions
stats["time"].append(i)
stats["prediction"].append(
strategy.is_better() == env.is_challenger_better)
stats["strategy"].append(label)
stats["a_new"].append(challenger)
stats["a_old"].append(incumbent)
stats["instances"].append(instances)
stats["seed"].append(seed)
# Update confidence
try:
stats["confidence"].append(
strategy.get_current_choice_confidence() * 100)
except AttributeError:
stats["confidence"].append(100)
last_time_ratio = time_ratio
if not finished and strategy.get_current_choice_confidence() >= .95 and not strategy.is_better():
if isinstance(strategy._discrimination, Wilcoxon) and env.current_instances < 5:
continue
finished = True
real["challenger"].append(challenger)
real["prediction"].append(strategy.is_better())
real["time"].append(env.current_time / env.current_challenger_max_total_time)
real["instances"].append(env.current_instances)
env.choose(strategy.is_better())
# Fill in the rest
for i in range(to_ratio(last_time_ratio), 101):
# Update predictions
stats["time"].append(i)
stats["strategy"].append(label)
stats["a_new"].append(challenger)
stats["a_old"].append(incumbent)
stats["instances"].append(instances)
stats["prediction"].append(
strategy.is_better() == env.is_challenger_better)
stats["seed"].append(seed)
# Update confidence
try:
stats["confidence"].append(
strategy.get_current_choice_confidence() * 100)
except AttributeError:
stats["confidence"].append(100)
if not finished:
finished = True
real["challenger"].append(challenger)
real["prediction"].append(strategy.is_better())
real["time"].append(env.current_time / env.current_challenger_max_total_time)
real["instances"].append(env.current_instances)
kwargs["stats"] = stats
kwargs["real"] = real
kwargs["a_old"] = incumbent
return strategy.name(), list(env.runs()), kwargs
def run(scenario_path, max_workers):
print()
env = TestEnv(scenario_path)
n_algos = env.nconfigurations
# Generate strategies
total: int = 0
strategies: List[Tuple[Strategy, Dict]] = []
for discriminator in discriminators:
for selection in selectors:
for strategy_make in strategy_makers:
strat = strategy_make(selection, discriminator())
seeds_done = []
total += nb_seeds
if original_df_general is not None:
tmp = original_df_general[original_df_general["strategy"] == strat.name(
)]
seeds_done = np.unique(
tmp["seed"].values).tolist()
total -= len(seeds_done)
strategies.append([strat, seeds_done])
global pbar
pbar = tqdm(total=total)
futures = []
executor = ProcessPoolExecutor(max_workers)
for strategy, seeds_done in strategies:
for seed in range(nb_seeds):
if seed in seeds_done:
continue
future = executor.submit(evaluate, scenario_path, strategy.clone(), seed)
future.add_done_callback(callback)
futures.append(future)
wait(futures, return_when=ALL_COMPLETED)
pbar.close()
run(scenario_path, max_workers)
# Last save
df_tmp = pd.DataFrame(df_detailed)
if original_df_detailed is not None:
df_tmp = original_df_detailed.append(df_tmp)
df_tmp.to_csv(f"./detailed_runs_{output_suffix}.csv")
df_tmp = pd.DataFrame(df_general)
if original_df_general is not None:
df_tmp = original_df_general.append(df_tmp)
df_tmp.to_csv(f"./runs_{output_suffix}.csv")
|
[
"pandas.DataFrame",
"tqdm.tqdm",
"argparse.ArgumentParser",
"pseas.discrimination.wilcoxon.Wilcoxon",
"pseas.instance_selection.udd.UDD",
"pandas.read_csv",
"numpy.floor",
"pseas.standard_strategy.StandardStrategy",
"os.path.exists",
"pseas.test_env.TestEnv",
"concurrent.futures.process.ProcessPoolExecutor",
"pseas.instance_selection.discrimination_based.DiscriminationBased",
"numpy.linspace",
"concurrent.futures.wait",
"numpy.unique"
] |
[((1356, 1412), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Produce run data."""'}), "(description='Produce run data.')\n", (1379, 1412), False, 'import argparse\n'), ((5860, 5905), 'os.path.exists', 'os.path.exists', (['f"""./runs_{output_suffix}.csv"""'], {}), "(f'./runs_{output_suffix}.csv')\n", (5874, 5905), False, 'import os\n'), ((14656, 14681), 'pandas.DataFrame', 'pd.DataFrame', (['df_detailed'], {}), '(df_detailed)\n', (14668, 14681), True, 'import pandas as pd\n'), ((14831, 14855), 'pandas.DataFrame', 'pd.DataFrame', (['df_general'], {}), '(df_general)\n', (14843, 14855), True, 'import pandas as pd\n'), ((5933, 5975), 'pandas.read_csv', 'pd.read_csv', (['f"""./runs_{output_suffix}.csv"""'], {}), "(f'./runs_{output_suffix}.csv')\n", (5944, 5975), True, 'import pandas as pd\n'), ((6077, 6128), 'pandas.read_csv', 'pd.read_csv', (['f"""./detailed_runs_{output_suffix}.csv"""'], {}), "(f'./detailed_runs_{output_suffix}.csv')\n", (6088, 6128), True, 'import pandas as pd\n'), ((8424, 8466), 'pseas.test_env.TestEnv', 'TestEnv', (['scenario_path', 'verbose'], {'seed': 'seed'}), '(scenario_path, verbose, seed=seed)\n', (8431, 8466), False, 'from pseas.test_env import ResetChoice, TestEnv\n'), ((13338, 13360), 'pseas.test_env.TestEnv', 'TestEnv', (['scenario_path'], {}), '(scenario_path)\n', (13345, 13360), False, 'from pseas.test_env import ResetChoice, TestEnv\n'), ((14147, 14164), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total'}), '(total=total)\n', (14151, 14164), False, 'from tqdm import tqdm\n'), ((14197, 14229), 'concurrent.futures.process.ProcessPoolExecutor', 'ProcessPoolExecutor', (['max_workers'], {}), '(max_workers)\n', (14216, 14229), False, 'from concurrent.futures.process import ProcessPoolExecutor\n'), ((14543, 14583), 'concurrent.futures.wait', 'wait', (['futures'], {'return_when': 'ALL_COMPLETED'}), '(futures, return_when=ALL_COMPLETED)\n', (14547, 14583), False, 'from concurrent.futures import wait, ALL_COMPLETED\n'), ((5058, 5082), 'pseas.discrimination.wilcoxon.Wilcoxon', 'Wilcoxon', ([], {'confidence': '(101)'}), '(confidence=101)\n', (5066, 5082), False, 'from pseas.discrimination.wilcoxon import Wilcoxon\n'), ((5292, 5303), 'pseas.instance_selection.udd.UDD', 'UDD', (['p1', 'p2'], {}), '(p1, p2)\n', (5295, 5303), False, 'from pseas.instance_selection.udd import UDD\n'), ((5429, 5451), 'pseas.instance_selection.discrimination_based.DiscriminationBased', 'DiscriminationBased', (['p'], {}), '(p)\n', (5448, 5451), False, 'from pseas.instance_selection.discrimination_based import DiscriminationBased\n'), ((5510, 5532), 'pseas.standard_strategy.StandardStrategy', 'StandardStrategy', (['i', 'd'], {}), '(i, d)\n', (5526, 5532), False, 'from pseas.standard_strategy import StandardStrategy\n'), ((6978, 7003), 'pandas.DataFrame', 'pd.DataFrame', (['df_detailed'], {}), '(df_detailed)\n', (6990, 7003), True, 'import pandas as pd\n'), ((8029, 8053), 'pandas.DataFrame', 'pd.DataFrame', (['df_general'], {}), '(df_general)\n', (8041, 8053), True, 'import pandas as pd\n'), ((5184, 5211), 'numpy.linspace', 'np.linspace', (['(0.2)', '(2)'], {'num': '(10)'}), '(0.2, 2, num=10)\n', (5195, 5211), True, 'import numpy as np\n'), ((5239, 5266), 'numpy.linspace', 'np.linspace', (['(0.2)', '(2)'], {'num': '(10)'}), '(0.2, 2, num=10)\n', (5250, 5266), True, 'import numpy as np\n'), ((5374, 5402), 'numpy.linspace', 'np.linspace', (['(1.01)', '(2)'], {'num': '(10)'}), '(1.01, 2, num=10)\n', (5385, 5402), True, 'import numpy as np\n'), ((9737, 9754), 'numpy.floor', 'np.floor', (['(x * 100)'], {}), '(x * 100)\n', (9745, 9754), True, 'import numpy as np\n'), ((13954, 13983), 'numpy.unique', 'np.unique', (["tmp['seed'].values"], {}), "(tmp['seed'].values)\n", (13963, 13983), True, 'import numpy as np\n')]
|
import json
import plotly
import pandas as pd
import re
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
import plotly.graph_objs as go
app = Flask(__name__)
def tokenize(text):
""" Tokenize string
Args:
text(string)
Returns:
tokens(list): tokens in list of strings format
"""
text = text.lower()
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# '@' mention. Even tough @ adds some information to the message,
# this information doesn't add value build the classifcation model
text = re.sub(r'@[A-Za-z0-9_]+', '', text)
# Dealing with URL links
url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
text = re.sub(url_regex, 'urlplaceholder', text)
# A lot of url are write as follows: http bit.ly. Apply Regex for these
# cases
utl_regex_2 = 'http [a-zA-Z]+\.[a-zA-Z]+'
text = re.sub(utl_regex_2, 'urlplaceholder', text)
# Other formats: http : //t.co/ihW64e8Z
utl_regex_3 = 'http \: //[a-zA-Z]\.(co|com|pt|ly)/[A-Za-z0-9_]+'
text = re.sub(utl_regex_3, 'urlplaceholder', text)
# Hashtags can provide useful informations. Removing only ``#``
text = re.sub('#', ' ', text)
# Contractions
text = re.sub(r"what's", 'what is ', text)
text = re.sub(r"can't", 'cannot', text)
text = re.sub(r"\'s", ' ', text)
text = re.sub(r"\'ve", ' have ', text)
text = re.sub(r"n't", ' not ', text)
text = re.sub(r"im", 'i am ', text)
text = re.sub(r"i'm", 'i am ', text)
text = re.sub(r"\'re", ' are ', text)
text = re.sub(r"\'d", ' would ', text)
text = re.sub(r"\'ll", ' will ', text)
# Operations and special words
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub('foof', 'food', text)
text = re.sub('msg', 'message', text)
text = re.sub(' u ', 'you', text)
# Ponctuation Removal
text = re.sub(r'[^a-zA-Z0-9]', ' ', text)
tokens = word_tokenize(text)
tokens = [lemmatizer.lemmatize(w) for w in tokens]
tokens = [tok for tok in tokens if tok not in stop_words]
return tokens
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('messages', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# create visuals
graphs = []
figure = go.Figure()
figure.add_trace(
go.Bar(
x=genre_names,
y=genre_counts
)
)
figure.update_layout(
go.Layout(
title="Distribution of Message Genres",
title_x=0.5,
yaxis_title="Count",
xaxis_title=f"Genre",
plot_bgcolor="rgba(0,0,0,0)"
)
)
graphs.append(dict(data=figure.data, layout=figure.layout))
# graphs = [
# {
# 'data': [
# Bar(
# x=genre_names,
# y=genre_counts
# )
# ],
# 'layout': {
# 'title': 'Distribution of Message Genres',
# 'yaxis': {
# 'title': "Count"
# },
# 'xaxis': {
# 'title': "Genre"
# }
# }
# }
# ]
# Plot Outputs Columns Distributions
output_columns = [
'related', 'request', 'offer', 'aid_related',
'medical_help', 'medical_products', 'search_and_rescue', 'security',
'military', 'child_alone', 'water', 'food', 'shelter', 'clothing',
'money', 'missing_people', 'refugees', 'death', 'other_aid',
'infrastructure_related', 'transport', 'buildings', 'electricity',
'tools', 'hospitals', 'shops', 'aid_centers', 'other_infrastructure',
'weather_related', 'floods', 'storm', 'fire', 'earthquake', 'cold',
'other_weather', 'direct_report']
for i, col in enumerate(output_columns):
counts = df.groupby(col).count()['id']
total_rows = df.shape[0]
names = col.replace('_', ' ').title()
figure = go.Figure()
figure.add_trace(
go.Bar(
x=counts.index,
y=counts,
text=round(counts[counts.index]/total_rows*100, 2)
.apply(lambda x: str(x) + '%'),
textposition='outside',
cliponaxis=False
)
)
figure.update_layout(
go.Layout(
title=f'{names}',
title_x=0.5,
yaxis_title="Count",
xaxis_title=f"{names}",
plot_bgcolor="rgba(0,0,0,0)"
)
)
figure.update_traces(
marker_color='rgb(158,202,225)',
marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6)
graphs.append(dict(data=figure.data, layout=figure.layout))
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/classify')
def classify():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
|
[
"nltk.stem.WordNetLemmatizer",
"flask.request.args.get",
"flask.Flask",
"plotly.graph_objs.Layout",
"json.dumps",
"pandas.read_sql_table",
"nltk.corpus.stopwords.words",
"sklearn.externals.joblib.load",
"sqlalchemy.create_engine",
"plotly.graph_objs.Figure",
"flask.render_template",
"re.sub",
"nltk.tokenize.word_tokenize",
"plotly.graph_objs.Bar"
] |
[((394, 409), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (399, 409), False, 'from flask import Flask\n'), ((2677, 2731), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///../data/DisasterResponse.db"""'], {}), "('sqlite:///../data/DisasterResponse.db')\n", (2690, 2731), False, 'from sqlalchemy import create_engine\n'), ((2737, 2774), 'pandas.read_sql_table', 'pd.read_sql_table', (['"""messages"""', 'engine'], {}), "('messages', engine)\n", (2754, 2774), True, 'import pandas as pd\n'), ((2797, 2836), 'sklearn.externals.joblib.load', 'joblib.load', (['"""../models/classifier.pkl"""'], {}), "('../models/classifier.pkl')\n", (2808, 2836), False, 'from sklearn.externals import joblib\n'), ((605, 631), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (620, 631), False, 'from nltk.corpus import stopwords\n'), ((649, 668), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (666, 668), False, 'from nltk.stem import WordNetLemmatizer\n'), ((821, 855), 're.sub', 're.sub', (['"""@[A-Za-z0-9_]+"""', '""""""', 'text'], {}), "('@[A-Za-z0-9_]+', '', text)\n", (827, 855), False, 'import re\n'), ((1017, 1058), 're.sub', 're.sub', (['url_regex', '"""urlplaceholder"""', 'text'], {}), "(url_regex, 'urlplaceholder', text)\n", (1023, 1058), False, 'import re\n'), ((1205, 1248), 're.sub', 're.sub', (['utl_regex_2', '"""urlplaceholder"""', 'text'], {}), "(utl_regex_2, 'urlplaceholder', text)\n", (1211, 1248), False, 'import re\n'), ((1373, 1416), 're.sub', 're.sub', (['utl_regex_3', '"""urlplaceholder"""', 'text'], {}), "(utl_regex_3, 'urlplaceholder', text)\n", (1379, 1416), False, 'import re\n'), ((1497, 1519), 're.sub', 're.sub', (['"""#"""', '""" """', 'text'], {}), "('#', ' ', text)\n", (1503, 1519), False, 'import re\n'), ((1551, 1585), 're.sub', 're.sub', (['"""what\'s"""', '"""what is """', 'text'], {}), '("what\'s", \'what is \', text)\n', (1557, 1585), False, 'import re\n'), ((1598, 1629), 're.sub', 're.sub', (['"""can\'t"""', '"""cannot"""', 'text'], {}), '("can\'t", \'cannot\', text)\n', (1604, 1629), False, 'import re\n'), ((1642, 1667), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" """', 'text'], {}), '("\\\\\'s", \' \', text)\n', (1648, 1667), False, 'import re\n'), ((1679, 1710), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" have """', 'text'], {}), '("\\\\\'ve", \' have \', text)\n', (1685, 1710), False, 'import re\n'), ((1722, 1750), 're.sub', 're.sub', (['"""n\'t"""', '""" not """', 'text'], {}), '("n\'t", \' not \', text)\n', (1728, 1750), False, 'import re\n'), ((1763, 1790), 're.sub', 're.sub', (['"""im"""', '"""i am """', 'text'], {}), "('im', 'i am ', text)\n", (1769, 1790), False, 'import re\n'), ((1803, 1831), 're.sub', 're.sub', (['"""i\'m"""', '"""i am """', 'text'], {}), '("i\'m", \'i am \', text)\n', (1809, 1831), False, 'import re\n'), ((1844, 1874), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" are """', 'text'], {}), '("\\\\\'re", \' are \', text)\n', (1850, 1874), False, 'import re\n'), ((1886, 1917), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" would """', 'text'], {}), '("\\\\\'d", \' would \', text)\n', (1892, 1917), False, 'import re\n'), ((1929, 1960), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" will """', 'text'], {}), '("\\\\\'ll", \' will \', text)\n', (1935, 1960), False, 'import re\n'), ((2008, 2030), 're.sub', 're.sub', (['""","""', '""" """', 'text'], {}), "(',', ' ', text)\n", (2014, 2030), False, 'import re\n'), ((2043, 2067), 're.sub', 're.sub', (['"""\\\\."""', '""" """', 'text'], {}), "('\\\\.', ' ', text)\n", (2049, 2067), False, 'import re\n'), ((2079, 2103), 're.sub', 're.sub', (['"""!"""', '""" ! """', 'text'], {}), "('!', ' ! ', text)\n", (2085, 2103), False, 'import re\n'), ((2116, 2140), 're.sub', 're.sub', (['"""\\\\/"""', '""" """', 'text'], {}), "('\\\\/', ' ', text)\n", (2122, 2140), False, 'import re\n'), ((2152, 2178), 're.sub', 're.sub', (['"""\\\\^"""', '""" ^ """', 'text'], {}), "('\\\\^', ' ^ ', text)\n", (2158, 2178), False, 'import re\n'), ((2190, 2216), 're.sub', 're.sub', (['"""\\\\+"""', '""" + """', 'text'], {}), "('\\\\+', ' + ', text)\n", (2196, 2216), False, 'import re\n'), ((2228, 2254), 're.sub', 're.sub', (['"""\\\\-"""', '""" - """', 'text'], {}), "('\\\\-', ' - ', text)\n", (2234, 2254), False, 'import re\n'), ((2266, 2292), 're.sub', 're.sub', (['"""\\\\="""', '""" = """', 'text'], {}), "('\\\\=', ' = ', text)\n", (2272, 2292), False, 'import re\n'), ((2304, 2332), 're.sub', 're.sub', (['"""foof"""', '"""food"""', 'text'], {}), "('foof', 'food', text)\n", (2310, 2332), False, 'import re\n'), ((2344, 2374), 're.sub', 're.sub', (['"""msg"""', '"""message"""', 'text'], {}), "('msg', 'message', text)\n", (2350, 2374), False, 'import re\n'), ((2386, 2412), 're.sub', 're.sub', (['""" u """', '"""you"""', 'text'], {}), "(' u ', 'you', text)\n", (2392, 2412), False, 'import re\n'), ((2451, 2484), 're.sub', 're.sub', (['"""[^a-zA-Z0-9]"""', '""" """', 'text'], {}), "('[^a-zA-Z0-9]', ' ', text)\n", (2457, 2484), False, 'import re\n'), ((2499, 2518), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (2512, 2518), False, 'from nltk.tokenize import word_tokenize\n'), ((3157, 3168), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (3166, 3168), True, 'import plotly.graph_objs as go\n'), ((5820, 5874), 'json.dumps', 'json.dumps', (['graphs'], {'cls': 'plotly.utils.PlotlyJSONEncoder'}), '(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n', (5830, 5874), False, 'import json\n'), ((5928, 5988), 'flask.render_template', 'render_template', (['"""master.html"""'], {'ids': 'ids', 'graphJSON': 'graphJSON'}), "('master.html', ids=ids, graphJSON=graphJSON)\n", (5943, 5988), False, 'from flask import render_template, request, jsonify\n'), ((6136, 6165), 'flask.request.args.get', 'request.args.get', (['"""query"""', '""""""'], {}), "('query', '')\n", (6152, 6165), False, 'from flask import render_template, request, jsonify\n'), ((6420, 6510), 'flask.render_template', 'render_template', (['"""go.html"""'], {'query': 'query', 'classification_result': 'classification_results'}), "('go.html', query=query, classification_result=\n classification_results)\n", (6435, 6510), False, 'from flask import render_template, request, jsonify\n'), ((3200, 3237), 'plotly.graph_objs.Bar', 'go.Bar', ([], {'x': 'genre_names', 'y': 'genre_counts'}), '(x=genre_names, y=genre_counts)\n', (3206, 3237), True, 'import plotly.graph_objs as go\n'), ((3312, 3452), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': '"""Distribution of Message Genres"""', 'title_x': '(0.5)', 'yaxis_title': '"""Count"""', 'xaxis_title': 'f"""Genre"""', 'plot_bgcolor': '"""rgba(0,0,0,0)"""'}), "(title='Distribution of Message Genres', title_x=0.5, yaxis_title=\n 'Count', xaxis_title=f'Genre', plot_bgcolor='rgba(0,0,0,0)')\n", (3321, 3452), True, 'import plotly.graph_objs as go\n'), ((4875, 4886), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (4884, 4886), True, 'import plotly.graph_objs as go\n'), ((5247, 5367), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': 'f"""{names}"""', 'title_x': '(0.5)', 'yaxis_title': '"""Count"""', 'xaxis_title': 'f"""{names}"""', 'plot_bgcolor': '"""rgba(0,0,0,0)"""'}), "(title=f'{names}', title_x=0.5, yaxis_title='Count', xaxis_title=\n f'{names}', plot_bgcolor='rgba(0,0,0,0)')\n", (5256, 5367), True, 'import plotly.graph_objs as go\n')]
|
#!/usr/bin/env python
#
# <NAME>
#
# This is a test of a Lindenmayer grammar that generates
# rather realistic-looking plant shrubbery. You can go to
# http://en.wikipedia.org/wiki/L-system for more information.
# This script requires the python-pygame dependency.
#
# Licensed under the MIT License.
import pygame, sys, math, os, random
from pygame.locals import *
os.environ['SDL_VIDEO_CENTERED'] ="1" ## Makes the window centered on-screen
class Main: ## Wrapper for the main method
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((800,600), SWSURFACE)
self.screen.fill((179,229,254)) ## Fill with a sort of sky-bluish color
pygame.display.set_caption("L-System Test")
self.clock = pygame.time.Clock()
self.positions = [] ## Stack of origin points from which to draw a branch
self.angles = [] ## Stack of angles at which to draw each branch
self.widths = [] ## Stack of widths of each branch
self.color_scales = [] ## Stack of color values for each branch
self.string = "X" ## Initial "seed" for the iterative process
self.angle = 180 ## Initial angle at which to draw the tree
self.cur_pos = [1000,1000] ## Offset the origin point offscreen
self.length = 4 ## Length of each segment
self.width = 8 ## Starting width to draw each branch
self.color_scale = 1.0 ## Color value is initialized to 100%
self.skip = False ## This permits the user to skip drawing
def _quit(self): ## Safe exiting method
pygame.quit()
sys.exit()
def iterate(self): ## Handles one iteration of L-system recursion
temp_string = "" ## Create a new string
for ch in self.string: ## Read through the current string
if ch == "X": ## Grammar rule: (X -> F-[[X]+X]+F[+FX]-X)
temp_string += "F-[[X]+X]+F[+FX]-X"
elif ch == "F": ## Grammar rule: (F -> FF)
temp_string += "FF"
else: ## Write in any constants
temp_string += ch
self.string = temp_string ## Update our string
def read(self): ## Interprets and renders the recursed string
length = len(self.string) ## Length and count measure how much progress we've made in parsing this string
count = 0
for ch in self.string:
if ch == "F": ## Draw a line straight forward in the current angle, from the current origin
new_pos_x = self.cur_pos[0] + math.cos(self.angle) * self.length
new_pos_y = self.cur_pos[1] + math.sin(self.angle) * self.length
pygame.draw.line(self.screen, (int(78*self.color_scale),
int(117*self.color_scale),
int(28*self.color_scale)),
self.cur_pos, [int(new_pos_x),int(new_pos_y)], self.width)
self.cur_pos = [int(new_pos_x),int(new_pos_y)]
self.angle += 0.02 * random.randint(-1,1)
elif ch == "+" or ch == "-": ## Randomly choose between rotating the angle 170 degrees left or right
self.angle += 170 * random.choice((-1,1))
elif ch == "[": ## Push our current state onto the stack and enter a sub-branch
self.positions.append(self.cur_pos)
self.angles.append(self.angle)
self.widths.append(self.width)
self.color_scales.append(self.color_scale)
self.width = max(1, self.width-1) ## Branches get smaller further up
self.color_scale = max(0.01, self.color_scale-0.1) ## Branches get darker further up
elif ch == "]": ## Pop the previous state off the stack and exit a sub-branch
self.cur_pos = self.positions.pop(-1)
self.angle = self.angles.pop(-1)
self.width = self.widths.pop(-1)
self.color_scale = self.color_scales.pop(-1)
count += 1
pygame.draw.rect(self.screen, (0,0,0), (10,550,780,20)) ## This displays how much progress we've made
pygame.draw.rect(self.screen, (255,255,255), (10,550,int(780*count/float(length)),20))
if not self.skip: ## If we're not skipping, update the tree picture each frame
pygame.display.flip()
for e in pygame.event.get(): ## Makes it so the program doesn't hang while drawing
if e.type == pygame.QUIT:
self._quit()
elif e.type == pygame.KEYDOWN: ## You can exit, too
if e.key == pygame.K_ESCAPE:
self._quit()
elif e.key == pygame.K_RETURN: ## Press ENTER to fast-forward
self.skip = True
def run(self):
for n in range(7): ## Seven levels of recursion, we could do more but it'll take a lot longer
self.iterate()
self.read() ## Parse what we've generated
while True:
self.clock.tick(30) ## 30 FPS
pygame.display.flip()
for e in pygame.event.get(): ## Allow the user to bask in the glory of their randomly generated tree
if e.type == pygame.QUIT:
self._quit()
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
self._quit()
if __name__ == "__main__":
main = Main()
main.run()
|
[
"pygame.quit",
"random.randint",
"pygame.draw.rect",
"pygame.display.set_mode",
"pygame.event.get",
"random.choice",
"pygame.init",
"pygame.display.flip",
"math.sin",
"math.cos",
"pygame.display.set_caption",
"pygame.time.Clock",
"sys.exit"
] |
[((521, 534), 'pygame.init', 'pygame.init', ([], {}), '()\n', (532, 534), False, 'import pygame, sys, math, os, random\n'), ((558, 604), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(800, 600)', 'SWSURFACE'], {}), '((800, 600), SWSURFACE)\n', (581, 604), False, 'import pygame, sys, math, os, random\n'), ((692, 735), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""L-System Test"""'], {}), "('L-System Test')\n", (718, 735), False, 'import pygame, sys, math, os, random\n'), ((758, 777), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (775, 777), False, 'import pygame, sys, math, os, random\n'), ((1662, 1675), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1673, 1675), False, 'import pygame, sys, math, os, random\n'), ((1684, 1694), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1692, 1694), False, 'import pygame, sys, math, os, random\n'), ((4254, 4314), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', '(0, 0, 0)', '(10, 550, 780, 20)'], {}), '(self.screen, (0, 0, 0), (10, 550, 780, 20))\n', (4270, 4314), False, 'import pygame, sys, math, os, random\n'), ((5381, 5402), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (5400, 5402), False, 'import pygame, sys, math, os, random\n'), ((5437, 5455), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5453, 5455), False, 'import pygame, sys, math, os, random\n'), ((4563, 4584), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4582, 4584), False, 'import pygame, sys, math, os, random\n'), ((4611, 4629), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4627, 4629), False, 'import pygame, sys, math, os, random\n'), ((3223, 3244), 'random.randint', 'random.randint', (['(-1)', '(1)'], {}), '(-1, 1)\n', (3237, 3244), False, 'import pygame, sys, math, os, random\n'), ((2680, 2700), 'math.cos', 'math.cos', (['self.angle'], {}), '(self.angle)\n', (2688, 2700), False, 'import pygame, sys, math, os, random\n'), ((2761, 2781), 'math.sin', 'math.sin', (['self.angle'], {}), '(self.angle)\n', (2769, 2781), False, 'import pygame, sys, math, os, random\n'), ((3393, 3415), 'random.choice', 'random.choice', (['(-1, 1)'], {}), '((-1, 1))\n', (3406, 3415), False, 'import pygame, sys, math, os, random\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 16 23:08:55 2021
@author: maurol
"""
import os
from typing import Dict
import graphviz
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
# TRUE False
f = """
digraph Tree {
node [shape=box, style="rounded", color="black", fontname=helvetica] ;
edge [fontname=helvetica] ;
0 [label="Number of leadership experiences < 1.5"] ;
1 [label="Essay grade < 5.25"] ;
0 -> 1 [labeldistance=2.5, labelangle=45, headlabel="True"] ;
3 [label="GPA < 3.46"] ;
1 -> 3 [headlabel="True"] ;
7 [label="Average rating:
3.4"] ;
3 -> 7 [headlabel="True "] ;
8 [label="Average rating:
4.31"] ;
3 -> 8 [headlabel="False "] ;
4 [label="Grade is not College/University Grad Student"] ;
1 -> 4 [headlabel="False "] ;
13 [label="Average rating:
5.07"] ;
4 -> 13 [headlabel="True "];
14 [label="Average rating:
6.56"] ;
4 -> 14 [headlabel="False "];
2 [label="Essay grade < 4.75"] ;
0 -> 2 [labeldistance=2.5, labelangle=-45, headlabel="False"] ;
5 [label="Number of extracurricular activities < 3.5"] ;
2 -> 5 [headlabel="True "] ;
11 [label="Average rating:
4.66"] ;
5 -> 11 [headlabel="True "] ;
12 [label="Average rating:
5.68"] ;
5 -> 12 [headlabel="False "] ;
6 [label="GPA < 3.65"] ;
2 -> 6 [headlabel="False"] ;
9 [label="Average rating:
5.58"] ;
6 -> 9 [headlabel="True "] ;
10 [label="Average rating:
6.87"] ;
6 -> 10 [headlabel=" False"] ;
}
"""
def run():
plot_name = 'surrogate_sample_True_sparse_False.png'
path_plot = r"C:\Users\maurol\OneDrive\Dokumente\Python_Scripts\algorithmic-explanations\reports\all\plot"
name, extension = os.path.splitext(plot_name)
graphviz.Source(
f,
filename=os.path.join(path_plot, name),
format=extension.replace('.', ''),
).view()
with open(
os.path.join(path_plot, "{}.dot".format(plot_name)), "w"
) as file:
file.write(f)
|
[
"os.path.join",
"os.path.splitext"
] |
[((1667, 1694), 'os.path.splitext', 'os.path.splitext', (['plot_name'], {}), '(plot_name)\n', (1683, 1694), False, 'import os\n'), ((1749, 1778), 'os.path.join', 'os.path.join', (['path_plot', 'name'], {}), '(path_plot, name)\n', (1761, 1778), False, 'import os\n')]
|
import FWCore.ParameterSet.Config as cms
#
# produce ttSemiLep event hypotheses
#
## geom hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypGeom_cff import *
## wMassDeltaTopMass hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypWMassDeltaTopMass_cff import *
## wMassMaxSumPt hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypWMassMaxSumPt_cff import *
## maxSumPtWMass hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypMaxSumPtWMass_cff import *
## genMatch hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypGenMatch_cff import *
## mvaDisc hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypMVADisc_cff import *
## kinFit hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypKinFit_cff import *
## hitFit hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypHitFit_cff import *
## make all considered event hypotheses
makeTtSemiLepHypothesesTask = cms.Task(
makeHypothesis_geomTask,
makeHypothesis_wMassDeltaTopMassTask,
makeHypothesis_wMassMaxSumPtTask,
makeHypothesis_maxSumPtWMassTask,
makeHypothesis_genMatchTask,
makeHypothesis_mvaDiscTask,
makeHypothesis_kinFitTask,
makeHypothesis_hitFitTask
)
makeTtSemiLepHypotheses = cms.Sequence(makeTtSemiLepHypothesesTask)
|
[
"FWCore.ParameterSet.Config.Sequence",
"FWCore.ParameterSet.Config.Task"
] |
[((958, 1220), 'FWCore.ParameterSet.Config.Task', 'cms.Task', (['makeHypothesis_geomTask', 'makeHypothesis_wMassDeltaTopMassTask', 'makeHypothesis_wMassMaxSumPtTask', 'makeHypothesis_maxSumPtWMassTask', 'makeHypothesis_genMatchTask', 'makeHypothesis_mvaDiscTask', 'makeHypothesis_kinFitTask', 'makeHypothesis_hitFitTask'], {}), '(makeHypothesis_geomTask, makeHypothesis_wMassDeltaTopMassTask,\n makeHypothesis_wMassMaxSumPtTask, makeHypothesis_maxSumPtWMassTask,\n makeHypothesis_genMatchTask, makeHypothesis_mvaDiscTask,\n makeHypothesis_kinFitTask, makeHypothesis_hitFitTask)\n', (966, 1220), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1270, 1311), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', (['makeTtSemiLepHypothesesTask'], {}), '(makeTtSemiLepHypothesesTask)\n', (1282, 1311), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A Jax version of Sinkhorn's algorithm."""
from typing import Any, Dict, Optional, NamedTuple, Union
import jax
import jax.numpy as jnp
from ott.core import fixed_point_loop
from ott.core import problems
from ott.core import sinkhorn
from ott.geometry import epsilon_scheduler
from ott.geometry import geometry
class GWOutput(NamedTuple):
"""Holds the output of the Gromov-Wasserstein solver.
Attributes:
costs: Holds the sequence of regularized GW costs seen through the outer
loop of the solver.
linear_convergence: Holds the sequence of bool convergence flags of the
inner Sinkhorn iterations.
convergence: Bool convergence flag for the outer GW iterations.
errors: Holds sequence of vectors of errors of the Sinkhorn algorithm
at each iteration.
linear_state: State used to solve and store solutions to the local
linearization of GW.
geom: The geometry underlying the local linearization.
transport: The transport matrix.
reg_gw_cost: Regularized optimal transport cost of the linearization.
"""
costs: Optional[jnp.ndarray] = None
linear_convergence: Optional[jnp.ndarray] = None
convergence: bool = False
errors: Optional[jnp.ndarray] = None
linear_state: Any = None
geom: geometry.Geometry = None
def set(self, **kwargs) -> 'GWOutput':
"""Returns a copy of self, possibly with overwrites."""
return self._replace(**kwargs)
@property
def transport(self):
return self.linear_state.matrix
@property
def reg_gw_cost(self):
return self.linear_state.reg_ot_cost
class GWState(NamedTuple):
"""Holds the state of the Gromov-Wasserstein solver.
Attributes:
costs: Holds the sequence of regularized GW costs seen through the outer
loop of the solver.
linear_convergence: Holds the sequence of bool convergence flags of the
inner Sinkhorn iterations.
errors: Holds sequence of vectors of errors of the Sinkhorn algorithm
at each iteration.
linear_state: State used to solve and store solutions to the local
linearization of GW.
linear_pb: Local linearization of the quadratic GW problem.
"""
costs: Optional[jnp.ndarray] = None
linear_convergence: Optional[jnp.ndarray] = None
errors: Optional[jnp.ndarray] = None
linear_state: Any = None
linear_pb: Optional[problems.LinearProblem] = None
def set(self, **kwargs) -> 'GWState':
"""Returns a copy of self, possibly with overwrites."""
return self._replace(**kwargs)
def update(self, iteration: int, linear_sol, linear_pb, store_errors: bool):
costs = self.costs.at[iteration].set(linear_sol.reg_ot_cost)
errors = None
if store_errors and self.errors is not None:
errors = self.errors.at[iteration, :].set(linear_sol.errors)
linear_convergence = self.linear_convergence.at[iteration].set(
linear_sol.converged)
return self.set(linear_state=linear_sol,
linear_pb=linear_pb,
costs=costs,
linear_convergence=linear_convergence,
errors=errors)
@jax.tree_util.register_pytree_node_class
class GromovWasserstein:
"""A Gromov Wasserstein solver."""
def __init__(self,
epsilon: Union[epsilon_scheduler.Epsilon, float] = 1.0,
min_iterations: int = 5,
max_iterations: int = 50,
threshold: float = 1e-3,
jit: bool = True,
store_sinkhorn_errors: bool = False,
linear_ot_solver: sinkhorn.Sinkhorn = sinkhorn.Sinkhorn(),
**kwargs):
self.epsilon = epsilon
self.min_iterations = min_iterations
self.max_iterations = max_iterations
self.threshold = threshold
self.jit = jit
self.store_sinkhorn_errors = store_sinkhorn_errors
self.linear_ot_solver = linear_ot_solver
self._kwargs = kwargs
def tree_flatten(self):
return ([self.epsilon, self.linear_ot_solver, self.threshold],
dict(
min_iterations=self.min_iterations,
max_iterations=self.max_iterations,
jit=self.jit,
store_sinkhorn_errors=self.store_sinkhorn_errors,
**self._kwargs))
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(epsilon=children[0],
linear_ot_solver=children[1],
threshold=children[2],
**aux_data)
def not_converged(self, state, iteration):
costs, i, tol = state.costs, iteration, self.threshold
return jnp.logical_or(
i <= 2,
jnp.logical_and(
jnp.isfinite(costs[i - 1]),
jnp.logical_not(jnp.isclose(costs[i - 2], costs[i - 1], rtol=tol))))
def __call__(self, prob: problems.QuadraticProblem) -> GWOutput:
if not prob.is_balanced:
raise ValueError('Unbalanced Gromov-Wasserstein is not supported yet.')
gromov_fn = jax.jit(iterations) if self.jit else iterations
out = gromov_fn(self, prob)
# TODO(lpapaxanthos): remove stop_gradient when using backprop
linearization = prob.update_linearization(
jax.lax.stop_gradient(out.linear_state),
self.epsilon)
linear_state = out.linear_state.set_cost(linearization, True, True)
iteration = jnp.sum(out.costs != 0)
convergence = jnp.logical_not(self.not_converged(out, iteration))
return out.set(linear_state=linear_state,
convergence=convergence)
def init_state(self, prob: problems.QuadraticProblem) -> GWState:
"""Initializes the state of the Gromov-Wasserstein iterations."""
linearization = prob.init_linearization(self.epsilon)
linear_state = self.linear_ot_solver(linearization)
num_iter = self.max_iterations
if self.store_sinkhorn_errors:
errors = -jnp.ones((num_iter, self.linear_ot_solver.outer_iterations))
else:
errors = None
return GWState(jnp.zeros((num_iter,)), jnp.zeros((num_iter,)),
errors, linear_state, linearization)
def output_from_state(self, state):
"""Create an output from a loop state.
Arguments:
state: A GWState.
Returns:
A GWOutput.
"""
geom = state.linear_pb.geom
return GWOutput(costs=state.costs,
linear_convergence=state.linear_convergence,
errors=state.errors,
linear_state=state.linear_state,
geom=geom)
def iterations(solver: GromovWasserstein,
prob: problems.QuadraticProblem) -> GWOutput:
"""A jittable Gromov-Wasserstein outer loop."""
def cond_fn(iteration, constants, state):
solver = constants
return solver.not_converged(state, iteration)
def body_fn(iteration, constants, state, compute_error):
del compute_error # Always assumed True for outer loop of GW.
solver = constants
linear_pb = prob.update_linearization(
state.linear_state,
solver.epsilon)
out = solver.linear_ot_solver(linear_pb)
return state.update(
iteration, out, linear_pb, solver.store_sinkhorn_errors)
state = fixed_point_loop.fixpoint_iter(
cond_fn=cond_fn,
body_fn=body_fn,
min_iterations=solver.min_iterations,
max_iterations=solver.max_iterations,
inner_iterations=1,
constants=solver,
state=solver.init_state(prob))
return solver.output_from_state(state)
def make(
epsilon: Union[epsilon_scheduler.Epsilon, float] = 1.,
max_iterations: int = 50,
jit: bool = False,
warm_start: bool = True,
store_sinkhorn_errors: bool = False,
sinkhorn_kwargs: Optional[Dict[str, Any]] = None,
threshold: float = 1e-2,
min_iterations: int = 1,
**kwargs) -> GromovWasserstein:
"""Creates a GromovWasserstein solver.
Args:
epsilon: a regularization parameter or a epsilon_scheduler.Epsilon object.
max_iterations: int32, the maximum number of outer iterations for
Gromov Wasserstein.
jit: bool, if True, jits the function.
warm_start: deprecated.
store_sinkhorn_errors: whether or not to return all the errors of the inner
Sinkhorn iterations.
sinkhorn_kwargs: Optionally a dictionary containing the keywords arguments
for calls to the sinkhorn function.
threshold: threshold (progress between two iterate costs) used to stop GW.
min_iterations: see fixed_point_loop.
**kwargs: additional kwargs for epsilon.
Returns:
A GromovWasserstein solver.
"""
del warm_start
sinkhorn_kwargs = {} if sinkhorn_kwargs is None else sinkhorn_kwargs
sink = sinkhorn.make(**sinkhorn_kwargs)
return GromovWasserstein(
epsilon, max_iterations=max_iterations,
jit=jit, linear_ot_solver=sink, threshold=threshold,
store_sinkhorn_errors=store_sinkhorn_errors,
min_iterations=min_iterations, **kwargs)
def gromov_wasserstein(
geom_x: geometry.Geometry,
geom_y: geometry.Geometry,
a: Optional[jnp.ndarray] = None,
b: Optional[jnp.ndarray] = None,
loss: str = 'sqeucl',
**kwargs) -> GWOutput:
"""Fits Gromov Wasserstein.
Args:
geom_x: a Geometry object for the first view.
geom_y: a second Geometry object for the second view.
a: jnp.ndarray<float>[num_a,] or jnp.ndarray<float>[batch,num_a] weights.
b: jnp.ndarray<float>[num_b,] or jnp.ndarray<float>[batch,num_b] weights.
loss: str 'sqeucl' or 'kl' to define the GW loss.
**kwargs: keyword arguments to make.
Returns:
A GromovWassersteinState named tuple.
"""
losses = {'sqeucl': problems.make_square_loss, 'kl': problems.make_kl_loss}
loss_fn = losses.get(loss, None)
prob = problems.QuadraticProblem(geom_x, geom_y, a=a, b=b, loss=loss_fn())
solver = make(**kwargs)
return solver(prob)
|
[
"jax.numpy.sum",
"jax.numpy.isfinite",
"jax.jit",
"ott.core.sinkhorn.make",
"jax.numpy.zeros",
"jax.numpy.isclose",
"ott.core.sinkhorn.Sinkhorn",
"jax.numpy.ones",
"jax.lax.stop_gradient"
] |
[((9174, 9206), 'ott.core.sinkhorn.make', 'sinkhorn.make', ([], {}), '(**sinkhorn_kwargs)\n', (9187, 9206), False, 'from ott.core import sinkhorn\n'), ((4152, 4171), 'ott.core.sinkhorn.Sinkhorn', 'sinkhorn.Sinkhorn', ([], {}), '()\n', (4169, 4171), False, 'from ott.core import sinkhorn\n'), ((5877, 5900), 'jax.numpy.sum', 'jnp.sum', (['(out.costs != 0)'], {}), '(out.costs != 0)\n', (5884, 5900), True, 'import jax.numpy as jnp\n'), ((5524, 5543), 'jax.jit', 'jax.jit', (['iterations'], {}), '(iterations)\n', (5531, 5543), False, 'import jax\n'), ((5726, 5765), 'jax.lax.stop_gradient', 'jax.lax.stop_gradient', (['out.linear_state'], {}), '(out.linear_state)\n', (5747, 5765), False, 'import jax\n'), ((6510, 6532), 'jax.numpy.zeros', 'jnp.zeros', (['(num_iter,)'], {}), '((num_iter,))\n', (6519, 6532), True, 'import jax.numpy as jnp\n'), ((6534, 6556), 'jax.numpy.zeros', 'jnp.zeros', (['(num_iter,)'], {}), '((num_iter,))\n', (6543, 6556), True, 'import jax.numpy as jnp\n'), ((5223, 5249), 'jax.numpy.isfinite', 'jnp.isfinite', (['costs[i - 1]'], {}), '(costs[i - 1])\n', (5235, 5249), True, 'import jax.numpy as jnp\n'), ((6400, 6460), 'jax.numpy.ones', 'jnp.ones', (['(num_iter, self.linear_ot_solver.outer_iterations)'], {}), '((num_iter, self.linear_ot_solver.outer_iterations))\n', (6408, 6460), True, 'import jax.numpy as jnp\n'), ((5279, 5328), 'jax.numpy.isclose', 'jnp.isclose', (['costs[i - 2]', 'costs[i - 1]'], {'rtol': 'tol'}), '(costs[i - 2], costs[i - 1], rtol=tol)\n', (5290, 5328), True, 'import jax.numpy as jnp\n')]
|
from django.test import TestCase
from django.urls import reverse
from interactions.models import Interaction
from interactions.tests.factories import InteractionFactory
class InteractionModelTestCase(TestCase):
"""Testing the interaction model class."""
def test_create_interaction(self):
interaction_from_factory = InteractionFactory()
interaction_from_db = Interaction.objects.first()
self.assertEqual(Interaction.objects.count(), 1)
self.assertEqual(interaction_from_factory.project, interaction_from_db.project)
self.assertEqual(interaction_from_factory.channel, interaction_from_db.channel)
self.assertEqual(interaction_from_factory.manager, interaction_from_db.manager)
self.assertEqual(interaction_from_factory.description, interaction_from_db.description)
self.assertEqual(interaction_from_factory.evaluation, interaction_from_db.evaluation)
self.assertEqual(
reverse('interaction:detail', kwargs={'pk': interaction_from_factory.pk}),
interaction_from_db.get_absolute_url()
)
self.assertEqual(
f"Взаимодействие с компанией {interaction_from_factory.project.company.name} #{interaction_from_factory.pk}",
interaction_from_db.__str__()
)
|
[
"django.urls.reverse",
"interactions.tests.factories.InteractionFactory",
"interactions.models.Interaction.objects.count",
"interactions.models.Interaction.objects.first"
] |
[((335, 355), 'interactions.tests.factories.InteractionFactory', 'InteractionFactory', ([], {}), '()\n', (353, 355), False, 'from interactions.tests.factories import InteractionFactory\n'), ((386, 413), 'interactions.models.Interaction.objects.first', 'Interaction.objects.first', ([], {}), '()\n', (411, 413), False, 'from interactions.models import Interaction\n'), ((440, 467), 'interactions.models.Interaction.objects.count', 'Interaction.objects.count', ([], {}), '()\n', (465, 467), False, 'from interactions.models import Interaction\n'), ((964, 1037), 'django.urls.reverse', 'reverse', (['"""interaction:detail"""'], {'kwargs': "{'pk': interaction_from_factory.pk}"}), "('interaction:detail', kwargs={'pk': interaction_from_factory.pk})\n", (971, 1037), False, 'from django.urls import reverse\n')]
|
from torchtext.data import Field, TabularDataset, Iterator
from torchtext.vocab import Vectors
import torch
from .base import allennlp_tokenize, basic_tokenize, uniform_unk_init, space_tokenize, \
bert_tokenize, gpt2_tokenize
_REGISTRY = {}
class RegisteredDataset(TabularDataset):
def __init_subclass__(cls, name):
_REGISTRY[name.lower()] = cls
def list_field_mappings(field_tgt, vocab):
mapping = []
for word in vocab.stoi:
if word not in field_tgt.vocab.stoi:
continue
mapping.append((vocab.stoi[word], field_tgt.vocab.stoi[word]))
return mapping
def replace_embeds(embeds_tgt, embeds_src, field_mappings):
for idx_src, idx_tgt in field_mappings:
embeds_tgt.weight.data[idx_tgt] = embeds_src.weight.data[idx_src]
class SST2Dataset(RegisteredDataset, name="sst2"):
N_CLASSES = 2
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.sentence)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("label", cls.LABEL_FIELD), ("sentence", cls.TEXT_FIELD), ("logits_0", cls.LOGITS_0),
("logits_1", cls.LOGITS_1)]
train_ds, dev_ds, test_ds = super(SST2Dataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
del test_ds.fields["logits_0"]
del test_ds.fields["logits_1"]
del test_ds.fields["label"]
return train_ds, dev_ds, test_ds
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
sort_within_batch = False
if sort_within_batch:
print("SORTING WITHIN BATCH!!!!!!!!!!!!!!!!!!!!!!!")
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=sort_within_batch, device=device, sort=False)
class CoLADataset(RegisteredDataset, name="cola"):
N_CLASSES = 2
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.sentence)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("label", cls.LABEL_FIELD), ("sentence", cls.TEXT_FIELD), ("logits_0", cls.LOGITS_0),
("logits_1", cls.LOGITS_1)]
return super(CoLADataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class STSDataset(RegisteredDataset, name="sts"):
N_CLASSES = 1
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
SCORE = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.sentence1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("score", cls.SCORE), ("sentence1", cls.TEXT_FIELD), ("sentence2", cls.TEXT_FIELD)]
return super(STSDataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class MRPCDataset(RegisteredDataset, name="mrpc"):
N_CLASSES = 2
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.question1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("label", cls.LABEL_FIELD), ("sentence1", cls.TEXT_FIELD), ("sentence2", cls.TEXT_FIELD),
("logits_0", cls.LOGITS_0), ("logits_1", cls.LOGITS_1)]
return super(MRPCDataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class QQBDataset(RegisteredDataset, name="qqb"):
N_CLASSES = 2
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.question1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("is_duplicate", cls.LABEL_FIELD), ("question1", cls.TEXT_FIELD), ("question2", cls.TEXT_FIELD),
("logits_0", cls.LOGITS_0), ("logits_1", cls.LOGITS_1)]
return super(QQBDataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class QNLIDataset(RegisteredDataset, name="qnli"):
N_CLASSES = 2
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.question1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("index", cls.LABEL_FIELD), ("question", cls.TEXT_FIELD), ("sentence", cls.TEXT_FIELD), ("label", cls.LABEL_FIELD),
("logits_0", cls.LOGITS), ("logits_1", cls.LOGITS)]
return super(QNLIDataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class MNLIDataset_MisMatch(RegisteredDataset, name="mnli_mismatch"):
N_CLASSES = 3
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_2 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.sentence1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev_mismatched.tsv", test="test_mismatched.tsv"):
fields = [("gold_label", cls.LABEL_FIELD), ("sentence1", cls.TEXT_FIELD), ("sentence2", cls.TEXT_FIELD),
("logits_0", cls.LOGITS_0), ("logits_1", cls.LOGITS_1), ("logits_2", cls.LOGITS_2)]
return super(MNLIDataset_MisMatch, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev_mismatched.tsv", test="test_mismatched.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
def find_dataset(name):
return _REGISTRY[name]
def list_datasets():
return list(_REGISTRY.keys())
|
[
"torchtext.data.Iterator.splits",
"torchtext.vocab.Vectors",
"torchtext.data.Field"
] |
[((883, 953), 'torchtext.data.Field', 'Field', ([], {'batch_first': '(True)', 'tokenize': 'basic_tokenize', 'include_lengths': '(True)'}), '(batch_first=True, tokenize=basic_tokenize, include_lengths=True)\n', (888, 953), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((972, 1030), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)'}), '(sequential=False, use_vocab=False, batch_first=True)\n', (977, 1030), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((1046, 1123), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (1051, 1123), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((1139, 1216), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (1144, 1216), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((2733, 2803), 'torchtext.data.Field', 'Field', ([], {'batch_first': '(True)', 'tokenize': 'basic_tokenize', 'include_lengths': '(True)'}), '(batch_first=True, tokenize=basic_tokenize, include_lengths=True)\n', (2738, 2803), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((2822, 2880), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)'}), '(sequential=False, use_vocab=False, batch_first=True)\n', (2827, 2880), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((2896, 2973), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (2901, 2973), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((2989, 3066), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (2994, 3066), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((4263, 4333), 'torchtext.data.Field', 'Field', ([], {'batch_first': '(True)', 'tokenize': 'basic_tokenize', 'include_lengths': '(True)'}), '(batch_first=True, tokenize=basic_tokenize, include_lengths=True)\n', (4268, 4333), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((4346, 4423), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (4351, 4423), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((5580, 5650), 'torchtext.data.Field', 'Field', ([], {'batch_first': '(True)', 'tokenize': 'basic_tokenize', 'include_lengths': '(True)'}), '(batch_first=True, tokenize=basic_tokenize, include_lengths=True)\n', (5585, 5650), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((5669, 5727), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)'}), '(sequential=False, use_vocab=False, batch_first=True)\n', (5674, 5727), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((5743, 5820), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (5748, 5820), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((5836, 5913), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (5841, 5913), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((7145, 7203), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)'}), '(sequential=False, use_vocab=False, batch_first=True)\n', (7150, 7203), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((7221, 7291), 'torchtext.data.Field', 'Field', ([], {'batch_first': '(True)', 'tokenize': 'basic_tokenize', 'include_lengths': '(True)'}), '(batch_first=True, tokenize=basic_tokenize, include_lengths=True)\n', (7226, 7291), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((7307, 7384), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (7312, 7384), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((7400, 7477), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (7405, 7477), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((8715, 8785), 'torchtext.data.Field', 'Field', ([], {'batch_first': '(True)', 'tokenize': 'basic_tokenize', 'include_lengths': '(True)'}), '(batch_first=True, tokenize=basic_tokenize, include_lengths=True)\n', (8720, 8785), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((8804, 8862), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)'}), '(sequential=False, use_vocab=False, batch_first=True)\n', (8809, 8862), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((8876, 8953), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (8881, 8953), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((10225, 10273), 'torchtext.data.Field', 'Field', ([], {'batch_first': '(True)', 'tokenize': 'basic_tokenize'}), '(batch_first=True, tokenize=basic_tokenize)\n', (10230, 10273), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((10292, 10350), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)'}), '(sequential=False, use_vocab=False, batch_first=True)\n', (10297, 10350), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((10366, 10443), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (10371, 10443), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((10459, 10536), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (10464, 10536), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((10552, 10629), 'torchtext.data.Field', 'Field', ([], {'sequential': '(False)', 'use_vocab': '(False)', 'batch_first': '(True)', 'dtype': 'torch.float'}), '(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\n', (10557, 10629), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((2494, 2634), 'torchtext.data.Iterator.splits', 'Iterator.splits', (['(train, val, test)'], {'batch_size': 'batch_size', 'repeat': '(False)', 'sort_within_batch': 'sort_within_batch', 'device': 'device', 'sort': '(False)'}), '((train, val, test), batch_size=batch_size, repeat=False,\n sort_within_batch=sort_within_batch, device=device, sort=False)\n', (2509, 2634), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((4039, 4167), 'torchtext.data.Iterator.splits', 'Iterator.splits', (['(train, val, test)'], {'batch_size': 'batch_size', 'repeat': '(False)', 'sort_within_batch': '(False)', 'device': 'device', 'sort': '(False)'}), '((train, val, test), batch_size=batch_size, repeat=False,\n sort_within_batch=False, device=device, sort=False)\n', (4054, 4167), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((5354, 5482), 'torchtext.data.Iterator.splits', 'Iterator.splits', (['(train, val, test)'], {'batch_size': 'batch_size', 'repeat': '(False)', 'sort_within_batch': '(False)', 'device': 'device', 'sort': '(False)'}), '((train, val, test), batch_size=batch_size, repeat=False,\n sort_within_batch=False, device=device, sort=False)\n', (5369, 5482), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((6920, 7048), 'torchtext.data.Iterator.splits', 'Iterator.splits', (['(train, val, test)'], {'batch_size': 'batch_size', 'repeat': '(False)', 'sort_within_batch': '(False)', 'device': 'device', 'sort': '(False)'}), '((train, val, test), batch_size=batch_size, repeat=False,\n sort_within_batch=False, device=device, sort=False)\n', (6935, 7048), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((8489, 8617), 'torchtext.data.Iterator.splits', 'Iterator.splits', (['(train, val, test)'], {'batch_size': 'batch_size', 'repeat': '(False)', 'sort_within_batch': '(False)', 'device': 'device', 'sort': '(False)'}), '((train, val, test), batch_size=batch_size, repeat=False,\n sort_within_batch=False, device=device, sort=False)\n', (8504, 8617), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((9981, 10109), 'torchtext.data.Iterator.splits', 'Iterator.splits', (['(train, val, test)'], {'batch_size': 'batch_size', 'repeat': '(False)', 'sort_within_batch': '(False)', 'device': 'device', 'sort': '(False)'}), '((train, val, test), batch_size=batch_size, repeat=False,\n sort_within_batch=False, device=device, sort=False)\n', (9996, 10109), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((11722, 11850), 'torchtext.data.Iterator.splits', 'Iterator.splits', (['(train, val, test)'], {'batch_size': 'batch_size', 'repeat': '(False)', 'sort_within_batch': '(False)', 'device': 'device', 'sort': '(False)'}), '((train, val, test), batch_size=batch_size, repeat=False,\n sort_within_batch=False, device=device, sort=False)\n', (11737, 11850), False, 'from torchtext.data import Field, TabularDataset, Iterator\n'), ((2135, 2201), 'torchtext.vocab.Vectors', 'Vectors', ([], {'name': 'vectors_name', 'cache': 'vectors_cache', 'unk_init': 'unk_init'}), '(name=vectors_name, cache=vectors_cache, unk_init=unk_init)\n', (2142, 2201), False, 'from torchtext.vocab import Vectors\n'), ((3809, 3875), 'torchtext.vocab.Vectors', 'Vectors', ([], {'name': 'vectors_name', 'cache': 'vectors_cache', 'unk_init': 'unk_init'}), '(name=vectors_name, cache=vectors_cache, unk_init=unk_init)\n', (3816, 3875), False, 'from torchtext.vocab import Vectors\n'), ((5124, 5190), 'torchtext.vocab.Vectors', 'Vectors', ([], {'name': 'vectors_name', 'cache': 'vectors_cache', 'unk_init': 'unk_init'}), '(name=vectors_name, cache=vectors_cache, unk_init=unk_init)\n', (5131, 5190), False, 'from torchtext.vocab import Vectors\n'), ((6690, 6756), 'torchtext.vocab.Vectors', 'Vectors', ([], {'name': 'vectors_name', 'cache': 'vectors_cache', 'unk_init': 'unk_init'}), '(name=vectors_name, cache=vectors_cache, unk_init=unk_init)\n', (6697, 6756), False, 'from torchtext.vocab import Vectors\n'), ((8259, 8325), 'torchtext.vocab.Vectors', 'Vectors', ([], {'name': 'vectors_name', 'cache': 'vectors_cache', 'unk_init': 'unk_init'}), '(name=vectors_name, cache=vectors_cache, unk_init=unk_init)\n', (8266, 8325), False, 'from torchtext.vocab import Vectors\n'), ((9751, 9817), 'torchtext.vocab.Vectors', 'Vectors', ([], {'name': 'vectors_name', 'cache': 'vectors_cache', 'unk_init': 'unk_init'}), '(name=vectors_name, cache=vectors_cache, unk_init=unk_init)\n', (9758, 9817), False, 'from torchtext.vocab import Vectors\n'), ((11492, 11558), 'torchtext.vocab.Vectors', 'Vectors', ([], {'name': 'vectors_name', 'cache': 'vectors_cache', 'unk_init': 'unk_init'}), '(name=vectors_name, cache=vectors_cache, unk_init=unk_init)\n', (11499, 11558), False, 'from torchtext.vocab import Vectors\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 20:41:43 2020
@author: djamal
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import sys
sys.path.append('C:/Users/DJAMAL/Documents/GitHub/Jamal_NREL2020')
#External Module
import MainBearing_Analytical_Model
import rwtparameters
from datetime import datetime
# Define turbine and drivetrain characteristics
Parameters = rwtparameters.RWTParameters()
FF_timestep, g, m_gr, m_s, m_rh, rho, L_gr, L_g, L_s, L_r, L_h, C1, e1, X1, Y1, C2, e2, X2 = Parameters.RWT_5MW()
#Assign Model Parameters for Analytical Model
MainBearingCalc = MainBearing_Analytical_Model.MainBearing_Analytical_Model(
FF_timestep = FF_timestep,
m_s = m_s,
m_gr = m_gr,
m_rh = m_rh,
g = g,
L_gr = L_gr,
L_g = L_g,
L_s = L_s,
L_r = L_r,
L_h = L_h,
rho = rho,
)
#Define load channel inputs
file = "/Users/DJAMAL/Documents/GitHub/Jamal_NREL2020/Example/5MWFastData.outb"
data, ChanName, info = MainBearingCalc.load_binary_output(file)
rot_speed = data[:,7] #translate rotor speed to planet speed (rpm)
torque = data[:,5] * 1E3 # in N-m
RotThrust = data[:,6] * 1E3 # in N
m_y = data[:,8] * 1E3 # in N-m
m_z = data[:,9] * 1E3 # in N-m
f_y = data[:,10] * 1E3 # in N
f_z = data[:,11] * 1E3 # in-N
startTime = datetime.now()
f_r1, f_r2, f_a1, f_total1 = MainBearingCalc.MB_forces(rho,torque, RotThrust, m_y, m_z, f_y, f_z, rot_speed, X1, Y1, X2)
MainBearingCalc.plot_loads(f_r1, f_a1, f_total1, f_r2, "Radial Force on MB1", "Axial Force on MB1", "Resultant Force on MB1","Radial Force on MB2", "Time (s)", "Load (N-m)" )
L101, L10_total_MB1 = MainBearingCalc.L10_Calc(rot_speed, f_total1, C1, e1)
L102, L10_total_MB2 = MainBearingCalc.L10_Calc(rot_speed, f_r2, C2, e2)
print('MB1 L10 Calculated: ', L10_total_MB1, "hours or", L10_total_MB1/24/365 , "years" )
print('MB2 L10 Calculated: ', L10_total_MB2, "hours or", L10_total_MB2/24/365 , "years" )
print('Run Time: ', datetime.now() - startTime)
|
[
"sys.path.append",
"rwtparameters.RWTParameters",
"datetime.datetime.now",
"MainBearing_Analytical_Model.MainBearing_Analytical_Model"
] |
[((179, 245), 'sys.path.append', 'sys.path.append', (['"""C:/Users/DJAMAL/Documents/GitHub/Jamal_NREL2020"""'], {}), "('C:/Users/DJAMAL/Documents/GitHub/Jamal_NREL2020')\n", (194, 245), False, 'import sys\n'), ((413, 442), 'rwtparameters.RWTParameters', 'rwtparameters.RWTParameters', ([], {}), '()\n', (440, 442), False, 'import rwtparameters\n'), ((623, 806), 'MainBearing_Analytical_Model.MainBearing_Analytical_Model', 'MainBearing_Analytical_Model.MainBearing_Analytical_Model', ([], {'FF_timestep': 'FF_timestep', 'm_s': 'm_s', 'm_gr': 'm_gr', 'm_rh': 'm_rh', 'g': 'g', 'L_gr': 'L_gr', 'L_g': 'L_g', 'L_s': 'L_s', 'L_r': 'L_r', 'L_h': 'L_h', 'rho': 'rho'}), '(FF_timestep=\n FF_timestep, m_s=m_s, m_gr=m_gr, m_rh=m_rh, g=g, L_gr=L_gr, L_g=L_g,\n L_s=L_s, L_r=L_r, L_h=L_h, rho=rho)\n', (680, 806), False, 'import MainBearing_Analytical_Model\n'), ((1319, 1333), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1331, 1333), False, 'from datetime import datetime\n'), ((1982, 1996), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1994, 1996), False, 'from datetime import datetime\n')]
|
"""Belinsky observability blueprint."""
import os
from flask import Blueprint
from healthcheck import HealthCheck
from healthcheck.security import safe_dict
from prometheus_client import CollectorRegistry, generate_latest, multiprocess
from ..database import get_all
from ..models import User
# Create healthcheck function
def check_database() -> tuple[bool, str]:
"""Check database is available."""
get_all(User)
return True, "Belinsky database is ok"
# Create observability function
def metrics_prometheus() -> tuple[bytes, int]:
"""Generate prometheus metrics response."""
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
data = generate_latest(registry)
return data, 200
# Create environment function
def environment() -> tuple[dict, int]:
"""Generate application environment response."""
return safe_dict(os.environ, ["key", "token", "pass", "credentials"]), 200
def create_blueprint_observability() -> Blueprint:
"""Create observability blueprint."""
# Create blueprint
observability_bp = Blueprint("observability", __name__)
# Register healthcheck route
health = HealthCheck()
health.add_check(check_database)
observability_bp.add_url_rule("/healthcheck", "healthcheck", view_func=health.run)
# Register environment route
observability_bp.add_url_rule("/environment", "environment", view_func=environment)
# Register prometheus route
observability_bp.add_url_rule(
"/metrics/prometheus", "prometheus", view_func=metrics_prometheus
)
return observability_bp
|
[
"prometheus_client.generate_latest",
"prometheus_client.CollectorRegistry",
"flask.Blueprint",
"healthcheck.HealthCheck",
"healthcheck.security.safe_dict",
"prometheus_client.multiprocess.MultiProcessCollector"
] |
[((613, 632), 'prometheus_client.CollectorRegistry', 'CollectorRegistry', ([], {}), '()\n', (630, 632), False, 'from prometheus_client import CollectorRegistry, generate_latest, multiprocess\n'), ((637, 681), 'prometheus_client.multiprocess.MultiProcessCollector', 'multiprocess.MultiProcessCollector', (['registry'], {}), '(registry)\n', (671, 681), False, 'from prometheus_client import CollectorRegistry, generate_latest, multiprocess\n'), ((693, 718), 'prometheus_client.generate_latest', 'generate_latest', (['registry'], {}), '(registry)\n', (708, 718), False, 'from prometheus_client import CollectorRegistry, generate_latest, multiprocess\n'), ((1084, 1120), 'flask.Blueprint', 'Blueprint', (['"""observability"""', '__name__'], {}), "('observability', __name__)\n", (1093, 1120), False, 'from flask import Blueprint\n'), ((1168, 1181), 'healthcheck.HealthCheck', 'HealthCheck', ([], {}), '()\n', (1179, 1181), False, 'from healthcheck import HealthCheck\n'), ((875, 937), 'healthcheck.security.safe_dict', 'safe_dict', (['os.environ', "['key', 'token', 'pass', 'credentials']"], {}), "(os.environ, ['key', 'token', 'pass', 'credentials'])\n", (884, 937), False, 'from healthcheck.security import safe_dict\n')]
|
import sys
import math
import warnings
import logging
class tcam:
""" a basic tcam class
"""
def __init__(self,entryWidth, priWidth=8, addrWidth=int(math.log2(sys.maxsize)), valueWidth=32, size=sys.maxsize):
"""
entryWidth : width in bits of the entry
priWidth : Width of the priority in bits
addrWidth : width of addresses in bits
valueWidth : width of the associated value in bit
size : max number of entries
entryWidth : size of an entry in bits
"""
if math.log2(size) > addrWidth :
warnings.warn("addr width can't represents the size of table")
self.MaxEntries=size
self.PriorityWidth=priWidth
self.AddrWidth=addrWidth
self.ValueWidth=valueWidth
self.EntryWidth=entryWidth
self.Content=[]
def insert(self,key, mask, pri, val, addr=None):
"""insert information
key : key to look
mask : mask of the entry
pri : priority
val : result
addr : position inside the TCAM : optional
"""
line=(key,mask,pri,val,addr)
if len(self.Content) >= self.MaxEntries:
raise MemoryError("memory full content {} not inserted".format(line))
if key > 2**self.EntryWidth-1 or key < 0:
raise ValueError("inserted key {} too large".format(key))
if mask > 2**self.EntryWidth-1 or mask < 0:
raise ValueError("inserted mask {} too large".format(mask))
if pri > 2**self.PriorityWidth-1 or pri < 0:
raise ValueError("inserted priority key {} too large".format(pri))
if val > 2**self.ValueWidth-1 or val < 0:
raise ValueError("inserted value {} too large".format(val))
self.Content.append(line)
logging.info("content {} inserted".format(line))
def search(self,val):
"""
return the value associtated to val with the highest priority
if two match have the same priority take the first one found
TODO: better algorithm for search
"""
res=(0,-1)
for (key,mask,pri,resO,_) in self.Content:
if (key & ~mask) == (val & ~mask) and res[1]<pri:
res = (resO, pri)
if res==(0,-1):
return None
else:
return res[0]
def deleteAddr(self, addr):
"""
delete the entry at addr
"""
i=0
notFind=True
for (_,_,_,_,elem) in self.Content:
if addr==elem:
del self.Content[i]
notFind=False
break
i=i+1
if notFind:
raise ValueError("Address {} is not present".format(addr))
def deleteKM(self, key, mask):
"""
delete the entry corresponding key, mask
"""
i=0
notFind=True
for (keyC,maskC,_,_,_) in self.Content:
if mask==maskC and key==keyC:
del self.Content[i]
notFind=False
else:
i=i+1
if notFind:
raise ValueError("pair (key,mask): ({}, {}) not found".format(key,mask))
def __str__(self):
ret=[]
find_all = lambda c,s: [x for x in range(c.find(s), len(c)) if c[x] == s]
printFormat='{{0:0{0}b}}'.format(self.EntryWidth)
ret.append("number of entries {}".format(len(self.Content)))
for (key,mask,pri,res,_) in self.Content:
l=list(printFormat.format(key))
for i in find_all(printFormat.format(mask),'1'):
l[i]='*'
ret.append("key : {}".format("".join(l)))
ret.append("priority : {0}, result : {1}".format(pri,res))
return "\n".join(ret)
def __len__(self):
"""return number of entries
"""
return len(self.Content)
|
[
"warnings.warn",
"math.log2"
] |
[((167, 189), 'math.log2', 'math.log2', (['sys.maxsize'], {}), '(sys.maxsize)\n', (176, 189), False, 'import math\n'), ((555, 570), 'math.log2', 'math.log2', (['size'], {}), '(size)\n', (564, 570), False, 'import math\n'), ((597, 659), 'warnings.warn', 'warnings.warn', (['"""addr width can\'t represents the size of table"""'], {}), '("addr width can\'t represents the size of table")\n', (610, 659), False, 'import warnings\n')]
|
# coding: utf-8
"""
Automated Tool for Optimized Modelling (ATOM)
Author: Mavs
Description: Unit tests for feature_engineering.py
"""
# Standard packages
import pandas as pd
import pytest
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import f_regression
# Own modules
from atom.feature_engineering import (
FeatureExtractor,
FeatureGenerator,
FeatureSelector,
)
from atom.utils import to_df
from .utils import X_bin, y_bin, X_class, y_class, X_reg, y_reg, X10_str, X10_dt
# Test FeatureExtractor ============================================ >>
def test_invalid_encoding_type():
"""Assert that an error is raised when encoding_type is invalid."""
with pytest.raises(ValueError, match=r".*the encoding_type parameter.*"):
FeatureExtractor(encoding_type="invalid").transform(X10_dt)
def test_invalid_features():
"""Assert that an error is raised when features are invalid."""
with pytest.raises(ValueError, match=r".*an attribute of pd.Series.dt.*"):
FeatureExtractor(features="invalid").transform(X10_dt)
def test_wrongly_converted_columns_are_ignored():
"""Assert that columns converted unsuccessfully are skipped."""
extractor = FeatureExtractor()
X = extractor.transform(X10_str)
assert "Feature 3" in X.columns
def test_datetime_features_are_used():
"""Assert that datetime64 features are used as is."""
X = to_df(X10_dt.copy())
X["Feature 3"] = pd.to_datetime(X["Feature 3"])
extractor = FeatureExtractor(features="day")
X = extractor.transform(X)
assert "Feature 3_day" in X.columns
assert "Feature 3" not in X.columns
def test_wrongly_converted_features_are_ignored():
"""Assert that wrongly converted features are ignored."""
extractor = FeatureExtractor(features=["tz", "is_leap_year", "day"])
X = extractor.transform(X10_dt)
assert "Feature 2_tz" not in X.columns # Not pd.Series.dt
def test_ordinal_features():
"""Assert that ordinal features are created."""
extractor = FeatureExtractor(features="day")
X = extractor.transform(X10_dt)
assert "Feature 3_day" in X.columns
assert "Feature 3" not in X.columns
def test_order_features():
"""Assert that the new features are in the order provided."""
extractor = FeatureExtractor()
X = extractor.transform(X10_dt)
assert X.columns.get_loc("Feature 3_day") == 2
assert X.columns.get_loc("Feature 3_month") == 3
assert X.columns.get_loc("Feature 3_year") == 4
@pytest.mark.parametrize("fxs", [
("microsecond", "%f"),
("second", "%S"),
("hour", "%H"),
("weekday", "%d/%m/%Y"),
("day", "%d/%m/%Y"),
("dayofyear", "%d/%m/%Y"),
("month", "%d/%m/%Y"),
("quarter", "%d/%m/%Y"),
])
def test_all_cyclic_features(fxs):
"""Assert that all cyclic columns create two features."""
extractor = FeatureExtractor(features=fxs[0], fmt=fxs[1], encoding_type="cyclic")
X = extractor.transform(X10_dt)
assert any(X.columns.str.contains(f"{fxs[0]}_cos"))
assert X.shape[1] == 4 + 1 # 2 new and og is dropped
def test_features_are_not_dropped():
"""Assert that features are kept when drop_columns=False."""
extractor = FeatureExtractor(drop_columns=False)
X = extractor.transform(X10_dt)
assert "Feature 3" in X.columns
# Test FeatureGenerator ============================================ >>
def test_n_features_parameter_negative():
"""Assert that an error is raised when n_features is negative."""
generator = FeatureGenerator(n_features=-2)
with pytest.raises(ValueError, match=r".*should be >0.*"):
generator.fit(X_bin, y_bin)
def test_population_parameter():
"""Assert that an error is raised when population is invalid."""
generator = FeatureGenerator(strategy="gfg", population=30)
pytest.raises(ValueError, generator.fit, X_reg, y_reg)
def test_generations_parameter():
"""Assert that an error is raised when generations is invalid."""
generator = FeatureGenerator(strategy="gfg", generations=0)
pytest.raises(ValueError, generator.fit, X_bin, y_bin)
def test_n_features_parameter_not_one_percent():
"""Assert that the n_features parameter is within 1% of population."""
generator = FeatureGenerator(strategy="gfg", n_features=23, population=200)
with pytest.raises(ValueError, match=r".*should be <1%.*"):
generator.fit(X_bin, y_bin)
def test_strategy_parameter():
"""Assert that the strategy parameter is either "DFS", "GFG" or "genetic"."""
generator = FeatureGenerator(strategy="invalid")
with pytest.raises(ValueError, match=r".*should be either 'dfs'.*"):
generator.fit(X_bin, y_bin)
def test_operators_parameter():
"""Assert that all operators are valid."""
generator = FeatureGenerator("GFG", n_features=None, operators=("div", "invalid"))
with pytest.raises(ValueError, match=r".*value in the operators.*"):
generator.fit(X_bin, y_bin)
def test_n_features_above_maximum():
"""Assert that n_features becomes maximum if more than maximum for "DFS"."""
generator = FeatureGenerator(n_features=1000, operators="log", random_state=1)
X = generator.fit_transform(X_bin, y_bin)
assert X.shape[1] == 60 # 30 og + 30 log
def test_genetic_non_improving_features():
"""Assert that the code doesn't fail if there are no new improving features."""
generator = FeatureGenerator(
strategy="gfg",
generations=5,
population=300,
operators="sqrt",
random_state=1,
)
_ = generator.fit_transform(X_reg, y_reg)
assert generator.genetic_features is None
def test_attribute_genetic_features():
"""Assert that the genetic_features attribute is created."""
generator = FeatureGenerator(
strategy="gfg",
generations=3,
population=200,
random_state=1,
)
_ = generator.fit_transform(X_bin, y_bin)
assert not generator.genetic_features.empty
def test_genetic_maximum_features():
"""Assert that the features are 1% of the population for n_features=None."""
generator = FeatureGenerator(
strategy="gfg",
n_features=None,
generations=4,
population=400,
random_state=1,
)
X = generator.fit_transform(X_bin, y_bin)
assert X.shape[1] == X_bin.shape[1] + 4
def test_updated_dataset():
"""Assert that the feature set contains the new features."""
generator = FeatureGenerator(
strategy="gfg",
n_features=1,
generations=4,
population=1000,
random_state=1,
)
X = generator.fit_transform(X_bin, y_bin)
assert X.shape[1] == X_bin.shape[1] + 1
generator = FeatureGenerator(strategy="dfs", n_features=None, random_state=1)
X = generator.fit_transform(X_bin, y_bin)
assert X.shape[1] > X_bin.shape[1]
# Test FeatureSelector ============================================= >>
def test_unknown_strategy_parameter():
"""Assert that an error is raised when strategy is unknown."""
selector = FeatureSelector(strategy="invalid")
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_solver_parameter_empty_univariate():
"""Assert that an error is raised when solver is None for univariate."""
selector = FeatureSelector(strategy="univariate")
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_raise_unknown_solver_univariate():
"""Assert that an error is raised when the solver is unknown."""
selector = FeatureSelector(strategy="univariate", solver="invalid")
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_solver_auto_PCA():
"""Assert that the solver is set to "auto" when None."""
selector = FeatureSelector(strategy="PCA", solver=None)
selector.fit(X_bin, y_bin)
assert selector._solver == "auto"
def test_solver_parameter_empty_SFM():
"""Assert that an error is raised when solver is None for SFM strategy."""
selector = FeatureSelector(strategy="SFM", solver=None)
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_goal_attribute():
"""Assert that the goal is deduced from the model's name."""
# For classification tasks
selector = FeatureSelector(strategy="SFM", solver="LGB_class")
selector.fit(X_bin, y_bin)
assert selector.goal == "class"
# For regression tasks
selector = FeatureSelector(strategy="SFM", solver="LGB_reg")
selector.fit(X_reg, y_reg)
assert selector.goal == "reg"
def test_solver_parameter_invalid_value():
"""Assert that an error is raised when solver is unknown."""
selector = FeatureSelector(strategy="RFE", solver="invalid")
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_n_features_parameter():
"""Assert that an error is raised when n_features is invalid."""
selector = FeatureSelector(strategy="SFM", solver="XGB_reg", n_features=0)
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_max_frac_repeated_parameter():
"""Assert that an error is raised when max_frac_repeated is invalid."""
selector = FeatureSelector(strategy=None, max_frac_repeated=1.1)
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_max_correlation_parameter():
"""Assert that an error is raised when max_correlation is invalid."""
selector = FeatureSelector(strategy=None, max_correlation=-0.2)
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_error_y_is_None():
"""Assert that an error is raised when y is None for some strategies."""
selector = FeatureSelector(strategy="univariate", solver=f_regression, n_features=9)
pytest.raises(ValueError, selector.fit, X_reg)
def test_remove_low_variance():
"""Assert that the remove_low_variance function works as intended."""
X = X_bin.copy()
X["invalid"] = 3 # Add column with minimum variance
selector = FeatureSelector(max_frac_repeated=1.0)
X = selector.fit_transform(X)
assert X.shape[1] == X_bin.shape[1]
def test_collinear_attribute():
"""Assert that the collinear attribute is created."""
selector = FeatureSelector(max_correlation=0.6)
assert hasattr(selector, "collinear")
def test_remove_collinear():
"""Assert that the remove_collinear function works as intended."""
selector = FeatureSelector(max_correlation=0.9)
X = selector.fit_transform(X_bin)
assert X.shape[1] == 20 # Originally 30
def test_univariate_strategy_custom_solver():
"""Assert that the univariate strategy works for a custom solver."""
selector = FeatureSelector("univariate", solver=f_regression, n_features=9)
X = selector.fit_transform(X_reg, y_reg)
assert X.shape[1] == 9
assert set(selector.feature_importance) == set(X.columns)
def test_PCA_strategy():
"""Assert that the PCA strategy works as intended."""
selector = FeatureSelector(strategy="PCA", n_features=0.7)
X = selector.fit_transform(X_bin)
assert X.shape[1] == 21
def test_PCA_components():
"""Assert that the PCA strategy creates components instead of features."""
selector = FeatureSelector(strategy="PCA")
X = selector.fit_transform(X_bin)
assert "Component 1" in X.columns
def test_SFM_prefit_invalid_estimator():
"""Assert that an error is raised for an invalid estimator in SFM."""
selector = FeatureSelector(
strategy="SFM",
solver=ExtraTreesClassifier(random_state=1).fit(X_class, y_class),
n_features=8,
random_state=1,
)
with pytest.raises(ValueError, match=r".*different columns than X.*"):
selector.fit(X_bin, y_bin)
def test_SFM_strategy_not_threshold():
"""Assert that if threshold is not specified, SFM selects n_features features."""
selector = FeatureSelector(
strategy="SFM",
solver=ExtraTreesClassifier(random_state=1),
n_features=16,
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 16
def test_SFM_invalid_solver():
"""Assert that an error is raised when solver is invalid."""
selector = FeatureSelector(strategy="SFM", solver="invalid", n_features=5)
with pytest.raises(ValueError, match=r".*Unknown model.*"):
selector.fit_transform(X_bin, y_bin)
def test_SFM_strategy_fitted_solver():
"""Assert that the SFM strategy works when the solver is already fitted."""
selector = FeatureSelector(
strategy="SFM",
solver=ExtraTreesClassifier(random_state=1).fit(X_bin, y_bin),
n_features=7,
random_state=1,
)
X = selector.fit_transform(X_bin)
assert X.shape[1] == 7
assert set(selector.feature_importance) == set(X.columns)
def test_SFM_strategy_not_fitted_solver():
"""Assert that the SFM strategy works when the solver is not fitted."""
selector = FeatureSelector(
strategy="SFM", solver=ExtraTreesClassifier(random_state=1), n_features=5
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 5
assert set(selector.feature_importance) == set(X.columns)
def test_RFE_strategy():
"""Assert that the RFE strategy works as intended."""
selector = FeatureSelector(
strategy="RFE",
solver=ExtraTreesClassifier(random_state=1),
n_features=13,
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 13
assert set(selector.feature_importance) == set(X.columns)
def test_RFECV_strategy_before_pipeline_classification():
"""Assert that the RFECV strategy works before a fitted pipeline."""
selector = FeatureSelector(
strategy="RFECV",
solver="RF_class",
n_features=None,
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 17
assert set(selector.feature_importance) == set(X.columns)
def test_RFECV_strategy_before_pipeline_regression():
"""Assert that the RFECV strategy works before a fitted pipeline."""
selector = FeatureSelector("RFECV", solver="RF_reg", n_features=16, random_state=1)
X = selector.fit_transform(X_reg, y_reg)
assert X.shape[1] == 10
assert set(selector.feature_importance) == set(X.columns)
def test_SFS_strategy():
"""Assert that the SFS strategy works."""
selector = FeatureSelector("SFS", solver="RF_reg", n_features=6, cv=3, random_state=1)
X = selector.fit_transform(X_reg, y_reg)
assert X.shape[1] == 6
def test_kwargs_parameter_threshold():
"""Assert that the kwargs parameter works as intended (add threshold)."""
selector = FeatureSelector(
strategy="SFM",
solver=ExtraTreesClassifier(random_state=1),
n_features=21,
threshold="mean",
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 10
def test_kwargs_parameter_tol():
"""Assert that the kwargs parameter works as intended (add tol)."""
selector = FeatureSelector(
strategy="PCA", solver="arpack", tol=0.001, n_features=12, random_state=1
)
X = selector.fit_transform(X_bin)
assert X.shape[1] == 12
def test_kwargs_parameter_scoring():
"""Assert that the kwargs parameter works as intended (add scoring acronym)."""
selector = FeatureSelector(
strategy="RFECV",
solver="rf_class",
scoring="auc",
n_features=12,
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 14
|
[
"atom.feature_engineering.FeatureExtractor",
"atom.feature_engineering.FeatureSelector",
"sklearn.ensemble.ExtraTreesClassifier",
"pytest.raises",
"atom.feature_engineering.FeatureGenerator",
"pandas.to_datetime",
"pytest.mark.parametrize"
] |
[((2520, 2740), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fxs"""', "[('microsecond', '%f'), ('second', '%S'), ('hour', '%H'), ('weekday',\n '%d/%m/%Y'), ('day', '%d/%m/%Y'), ('dayofyear', '%d/%m/%Y'), ('month',\n '%d/%m/%Y'), ('quarter', '%d/%m/%Y')]"], {}), "('fxs', [('microsecond', '%f'), ('second', '%S'), (\n 'hour', '%H'), ('weekday', '%d/%m/%Y'), ('day', '%d/%m/%Y'), (\n 'dayofyear', '%d/%m/%Y'), ('month', '%d/%m/%Y'), ('quarter', '%d/%m/%Y')])\n", (2543, 2740), False, 'import pytest\n'), ((1227, 1245), 'atom.feature_engineering.FeatureExtractor', 'FeatureExtractor', ([], {}), '()\n', (1243, 1245), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((1468, 1498), 'pandas.to_datetime', 'pd.to_datetime', (["X['Feature 3']"], {}), "(X['Feature 3'])\n", (1482, 1498), True, 'import pandas as pd\n'), ((1516, 1548), 'atom.feature_engineering.FeatureExtractor', 'FeatureExtractor', ([], {'features': '"""day"""'}), "(features='day')\n", (1532, 1548), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((1791, 1847), 'atom.feature_engineering.FeatureExtractor', 'FeatureExtractor', ([], {'features': "['tz', 'is_leap_year', 'day']"}), "(features=['tz', 'is_leap_year', 'day'])\n", (1807, 1847), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((2046, 2078), 'atom.feature_engineering.FeatureExtractor', 'FeatureExtractor', ([], {'features': '"""day"""'}), "(features='day')\n", (2062, 2078), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((2306, 2324), 'atom.feature_engineering.FeatureExtractor', 'FeatureExtractor', ([], {}), '()\n', (2322, 2324), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((2879, 2948), 'atom.feature_engineering.FeatureExtractor', 'FeatureExtractor', ([], {'features': 'fxs[0]', 'fmt': 'fxs[1]', 'encoding_type': '"""cyclic"""'}), "(features=fxs[0], fmt=fxs[1], encoding_type='cyclic')\n", (2895, 2948), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((3219, 3255), 'atom.feature_engineering.FeatureExtractor', 'FeatureExtractor', ([], {'drop_columns': '(False)'}), '(drop_columns=False)\n', (3235, 3255), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((3531, 3562), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'n_features': '(-2)'}), '(n_features=-2)\n', (3547, 3562), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((3782, 3829), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'strategy': '"""gfg"""', 'population': '(30)'}), "(strategy='gfg', population=30)\n", (3798, 3829), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((3834, 3888), 'pytest.raises', 'pytest.raises', (['ValueError', 'generator.fit', 'X_reg', 'y_reg'], {}), '(ValueError, generator.fit, X_reg, y_reg)\n', (3847, 3888), False, 'import pytest\n'), ((4011, 4058), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'strategy': '"""gfg"""', 'generations': '(0)'}), "(strategy='gfg', generations=0)\n", (4027, 4058), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((4063, 4117), 'pytest.raises', 'pytest.raises', (['ValueError', 'generator.fit', 'X_bin', 'y_bin'], {}), '(ValueError, generator.fit, X_bin, y_bin)\n', (4076, 4117), False, 'import pytest\n'), ((4260, 4323), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'strategy': '"""gfg"""', 'n_features': '(23)', 'population': '(200)'}), "(strategy='gfg', n_features=23, population=200)\n", (4276, 4323), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((4555, 4591), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'strategy': '"""invalid"""'}), "(strategy='invalid')\n", (4571, 4591), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((4798, 4868), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', (['"""GFG"""'], {'n_features': 'None', 'operators': "('div', 'invalid')"}), "('GFG', n_features=None, operators=('div', 'invalid'))\n", (4814, 4868), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((5114, 5180), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'n_features': '(1000)', 'operators': '"""log"""', 'random_state': '(1)'}), "(n_features=1000, operators='log', random_state=1)\n", (5130, 5180), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((5418, 5520), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'strategy': '"""gfg"""', 'generations': '(5)', 'population': '(300)', 'operators': '"""sqrt"""', 'random_state': '(1)'}), "(strategy='gfg', generations=5, population=300, operators=\n 'sqrt', random_state=1)\n", (5434, 5520), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((5777, 5856), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'strategy': '"""gfg"""', 'generations': '(3)', 'population': '(200)', 'random_state': '(1)'}), "(strategy='gfg', generations=3, population=200, random_state=1)\n", (5793, 5856), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((6126, 6227), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'strategy': '"""gfg"""', 'n_features': 'None', 'generations': '(4)', 'population': '(400)', 'random_state': '(1)'}), "(strategy='gfg', n_features=None, generations=4, population\n =400, random_state=1)\n", (6142, 6227), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((6471, 6570), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'strategy': '"""gfg"""', 'n_features': '(1)', 'generations': '(4)', 'population': '(1000)', 'random_state': '(1)'}), "(strategy='gfg', n_features=1, generations=4, population=\n 1000, random_state=1)\n", (6487, 6570), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((6720, 6785), 'atom.feature_engineering.FeatureGenerator', 'FeatureGenerator', ([], {'strategy': '"""dfs"""', 'n_features': 'None', 'random_state': '(1)'}), "(strategy='dfs', n_features=None, random_state=1)\n", (6736, 6785), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((7067, 7102), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""invalid"""'}), "(strategy='invalid')\n", (7082, 7102), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((7107, 7160), 'pytest.raises', 'pytest.raises', (['ValueError', 'selector.fit', 'X_reg', 'y_reg'], {}), '(ValueError, selector.fit, X_reg, y_reg)\n', (7120, 7160), False, 'import pytest\n'), ((7301, 7339), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""univariate"""'}), "(strategy='univariate')\n", (7316, 7339), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((7344, 7397), 'pytest.raises', 'pytest.raises', (['ValueError', 'selector.fit', 'X_reg', 'y_reg'], {}), '(ValueError, selector.fit, X_reg, y_reg)\n', (7357, 7397), False, 'import pytest\n'), ((7528, 7584), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""univariate"""', 'solver': '"""invalid"""'}), "(strategy='univariate', solver='invalid')\n", (7543, 7584), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((7589, 7642), 'pytest.raises', 'pytest.raises', (['ValueError', 'selector.fit', 'X_reg', 'y_reg'], {}), '(ValueError, selector.fit, X_reg, y_reg)\n', (7602, 7642), False, 'import pytest\n'), ((7749, 7793), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""PCA"""', 'solver': 'None'}), "(strategy='PCA', solver=None)\n", (7764, 7793), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((7998, 8042), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""SFM"""', 'solver': 'None'}), "(strategy='SFM', solver=None)\n", (8013, 8042), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((8047, 8100), 'pytest.raises', 'pytest.raises', (['ValueError', 'selector.fit', 'X_reg', 'y_reg'], {}), '(ValueError, selector.fit, X_reg, y_reg)\n', (8060, 8100), False, 'import pytest\n'), ((8241, 8292), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""SFM"""', 'solver': '"""LGB_class"""'}), "(strategy='SFM', solver='LGB_class')\n", (8256, 8292), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((8403, 8452), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""SFM"""', 'solver': '"""LGB_reg"""'}), "(strategy='SFM', solver='LGB_reg')\n", (8418, 8452), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((8643, 8692), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""RFE"""', 'solver': '"""invalid"""'}), "(strategy='RFE', solver='invalid')\n", (8658, 8692), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((8697, 8750), 'pytest.raises', 'pytest.raises', (['ValueError', 'selector.fit', 'X_reg', 'y_reg'], {}), '(ValueError, selector.fit, X_reg, y_reg)\n', (8710, 8750), False, 'import pytest\n'), ((8870, 8933), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""SFM"""', 'solver': '"""XGB_reg"""', 'n_features': '(0)'}), "(strategy='SFM', solver='XGB_reg', n_features=0)\n", (8885, 8933), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((8938, 8991), 'pytest.raises', 'pytest.raises', (['ValueError', 'selector.fit', 'X_reg', 'y_reg'], {}), '(ValueError, selector.fit, X_reg, y_reg)\n', (8951, 8991), False, 'import pytest\n'), ((9125, 9178), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': 'None', 'max_frac_repeated': '(1.1)'}), '(strategy=None, max_frac_repeated=1.1)\n', (9140, 9178), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((9183, 9236), 'pytest.raises', 'pytest.raises', (['ValueError', 'selector.fit', 'X_reg', 'y_reg'], {}), '(ValueError, selector.fit, X_reg, y_reg)\n', (9196, 9236), False, 'import pytest\n'), ((9366, 9418), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': 'None', 'max_correlation': '(-0.2)'}), '(strategy=None, max_correlation=-0.2)\n', (9381, 9418), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((9423, 9476), 'pytest.raises', 'pytest.raises', (['ValueError', 'selector.fit', 'X_reg', 'y_reg'], {}), '(ValueError, selector.fit, X_reg, y_reg)\n', (9436, 9476), False, 'import pytest\n'), ((9599, 9672), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""univariate"""', 'solver': 'f_regression', 'n_features': '(9)'}), "(strategy='univariate', solver=f_regression, n_features=9)\n", (9614, 9672), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((9677, 9723), 'pytest.raises', 'pytest.raises', (['ValueError', 'selector.fit', 'X_reg'], {}), '(ValueError, selector.fit, X_reg)\n', (9690, 9723), False, 'import pytest\n'), ((9925, 9963), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'max_frac_repeated': '(1.0)'}), '(max_frac_repeated=1.0)\n', (9940, 9963), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((10145, 10181), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'max_correlation': '(0.6)'}), '(max_correlation=0.6)\n', (10160, 10181), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((10341, 10377), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'max_correlation': '(0.9)'}), '(max_correlation=0.9)\n', (10356, 10377), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((10597, 10661), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', (['"""univariate"""'], {'solver': 'f_regression', 'n_features': '(9)'}), "('univariate', solver=f_regression, n_features=9)\n", (10612, 10661), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((10896, 10943), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""PCA"""', 'n_features': '(0.7)'}), "(strategy='PCA', n_features=0.7)\n", (10911, 10943), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((11133, 11164), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""PCA"""'}), "(strategy='PCA')\n", (11148, 11164), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((12126, 12189), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""SFM"""', 'solver': '"""invalid"""', 'n_features': '(5)'}), "(strategy='SFM', solver='invalid', n_features=5)\n", (12141, 12189), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((13631, 13720), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""RFECV"""', 'solver': '"""RF_class"""', 'n_features': 'None', 'random_state': '(1)'}), "(strategy='RFECV', solver='RF_class', n_features=None,\n random_state=1)\n", (13646, 13720), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((14035, 14107), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', (['"""RFECV"""'], {'solver': '"""RF_reg"""', 'n_features': '(16)', 'random_state': '(1)'}), "('RFECV', solver='RF_reg', n_features=16, random_state=1)\n", (14050, 14107), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((14331, 14406), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', (['"""SFS"""'], {'solver': '"""RF_reg"""', 'n_features': '(6)', 'cv': '(3)', 'random_state': '(1)'}), "('SFS', solver='RF_reg', n_features=6, cv=3, random_state=1)\n", (14346, 14406), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((14981, 15075), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""PCA"""', 'solver': '"""arpack"""', 'tol': '(0.001)', 'n_features': '(12)', 'random_state': '(1)'}), "(strategy='PCA', solver='arpack', tol=0.001, n_features=12,\n random_state=1)\n", (14996, 15075), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((15290, 15392), 'atom.feature_engineering.FeatureSelector', 'FeatureSelector', ([], {'strategy': '"""RFECV"""', 'solver': '"""rf_class"""', 'scoring': '"""auc"""', 'n_features': '(12)', 'random_state': '(1)'}), "(strategy='RFECV', solver='rf_class', scoring='auc',\n n_features=12, random_state=1)\n", (15305, 15392), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((713, 779), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*the encoding_type parameter.*"""'}), "(ValueError, match='.*the encoding_type parameter.*')\n", (726, 779), False, 'import pytest\n'), ((958, 1025), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*an attribute of pd.Series.dt.*"""'}), "(ValueError, match='.*an attribute of pd.Series.dt.*')\n", (971, 1025), False, 'import pytest\n'), ((3572, 3623), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*should be >0.*"""'}), "(ValueError, match='.*should be >0.*')\n", (3585, 3623), False, 'import pytest\n'), ((4333, 4385), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*should be <1%.*"""'}), "(ValueError, match='.*should be <1%.*')\n", (4346, 4385), False, 'import pytest\n'), ((4601, 4662), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*should be either \'dfs\'.*"""'}), '(ValueError, match=".*should be either \'dfs\'.*")\n', (4614, 4662), False, 'import pytest\n'), ((4878, 4939), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*value in the operators.*"""'}), "(ValueError, match='.*value in the operators.*')\n", (4891, 4939), False, 'import pytest\n'), ((11550, 11613), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*different columns than X.*"""'}), "(ValueError, match='.*different columns than X.*')\n", (11563, 11613), False, 'import pytest\n'), ((12199, 12251), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*Unknown model.*"""'}), "(ValueError, match='.*Unknown model.*')\n", (12212, 12251), False, 'import pytest\n'), ((11849, 11885), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (11869, 11885), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((12910, 12946), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (12930, 12946), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((13257, 13293), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (13277, 13293), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((14669, 14705), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (14689, 14705), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((790, 831), 'atom.feature_engineering.FeatureExtractor', 'FeatureExtractor', ([], {'encoding_type': '"""invalid"""'}), "(encoding_type='invalid')\n", (806, 831), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((1036, 1072), 'atom.feature_engineering.FeatureExtractor', 'FeatureExtractor', ([], {'features': '"""invalid"""'}), "(features='invalid')\n", (1052, 1072), False, 'from atom.feature_engineering import FeatureExtractor, FeatureGenerator, FeatureSelector\n'), ((11429, 11465), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (11449, 11465), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((12491, 12527), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (12511, 12527), False, 'from sklearn.ensemble import ExtraTreesClassifier\n')]
|
from Bio import Entrez, SeqIO
import argparse
def gb_to_fasta(db_name, id_name, out_fasta):
Entrez.email = "<EMAIL>"
handle = Entrez.efetch(db=db_name, id=id_name, rettype="gb", retmode='text')
genome = SeqIO.read(handle, 'genbank')
#print(genome.features)
with open(out_fasta, "w") as ofasta:
for feature in genome.features:
gene_name = ">{}_{}\n".format(feature.type, feature.location)
seq = feature.extract(genome.seq)
seq = "{}\n".format(str(seq))
if feature.type != "source":
ofasta.write(gene_name)
ofasta.write(seq)
def main():
parser = argparse.ArgumentParser(description="Downloads a gb file from NCBI and converts it to fasta format")
parser.add_argument("db_name", help="NCBI Database to download from")
parser.add_argument("id_name", help="Species ID to download from")
parser.add_argument("out_fasta", help="Name of the output fasta file")
args = parser.parse_args()
gb_to_fasta(args.db_name, args.id_name, args.out_fasta)
if __name__ == "__main__":
main()
|
[
"Bio.Entrez.efetch",
"Bio.SeqIO.read",
"argparse.ArgumentParser"
] |
[((135, 202), 'Bio.Entrez.efetch', 'Entrez.efetch', ([], {'db': 'db_name', 'id': 'id_name', 'rettype': '"""gb"""', 'retmode': '"""text"""'}), "(db=db_name, id=id_name, rettype='gb', retmode='text')\n", (148, 202), False, 'from Bio import Entrez, SeqIO\n'), ((217, 246), 'Bio.SeqIO.read', 'SeqIO.read', (['handle', '"""genbank"""'], {}), "(handle, 'genbank')\n", (227, 246), False, 'from Bio import Entrez, SeqIO\n'), ((659, 764), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Downloads a gb file from NCBI and converts it to fasta format"""'}), "(description=\n 'Downloads a gb file from NCBI and converts it to fasta format')\n", (682, 764), False, 'import argparse\n')]
|