code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from rest_framework import serializers
class StageSerializer(serializers.Serializer):
name = serializers.CharField()
|
[
"rest_framework.serializers.CharField"
] |
[((99, 122), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (120, 122), False, 'from rest_framework import serializers\n')]
|
from telebot import types, TeleBot
from dynaconf import settings as _settings
import pyowm
bot = TeleBot(_settings.SECRET_KEY)
keyboard = types.InlineKeyboardMarkup()
key_yes = types.InlineKeyboardButton(text='Что у нас по погоде?', callback_data='weather')
keyboard.add(key_yes)
key_no = types.InlineKeyboardButton(text='Пока не надо', callback_data='bye')
keyboard.add(key_no)
@bot.message_handler(commands=["weather"])
def weather_handler(message):
chat_id = message.chat.id
city = bot.send_message(chat_id, "В каком городе Вам показать погоду?")
bot.register_next_step_handler(city, weather)
def weather(message):
chat_id = message.chat.id
city = message.text.lower()
owm = pyowm.OWM(_settings.API_KEY, language="ru")
city_weather = owm.weather_at_place(city)
w = city_weather.get_weather()
temperature = w.get_temperature("celsius")["temp"]
wind = w.get_wind()["speed"]
hum = w.get_humidity()
desc = w.get_detailed_status()
bot.send_message(
chat_id,
"Сейчас в городе "
+ str(city)
+ " "
+ str(desc)
+ ", температура - "
+ str(temperature)
+ "°C, влажность - "
+ str(hum)
+ "%, скорость ветра - "
+ str(wind)
+ "м/с.",
)
@bot.message_handler(commands=["start", "go"])
def start_message(message):
chat_id = message.chat.id
user_name = message.from_user.first_name
bot.send_message(
chat_id,
f"Приветствую вас, {user_name}!\n"
f"Я бот, которй сообщит вам погоду в нужном для вас городе.\n"
f"Для этого просто нажмите соответствующую кнопку.",
reply_markup=keyboard,
)
@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
if call.data == "weather":
bot.send_message(call.message.chat.id, "Для того, чтобы узнать погоду введите /weather")
else:
bot.send_message(call.message.chat.id, "Чтобы воспользоваться мной еще раз, то просто нажмите /start")
if __name__ == "__main__":
bot.polling(none_stop=True)
|
[
"telebot.types.InlineKeyboardButton",
"pyowm.OWM",
"telebot.TeleBot",
"telebot.types.InlineKeyboardMarkup"
] |
[((99, 128), 'telebot.TeleBot', 'TeleBot', (['_settings.SECRET_KEY'], {}), '(_settings.SECRET_KEY)\n', (106, 128), False, 'from telebot import types, TeleBot\n'), ((141, 169), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (167, 169), False, 'from telebot import types, TeleBot\n'), ((180, 265), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Что у нас по погоде?"""', 'callback_data': '"""weather"""'}), "(text='Что у нас по погоде?', callback_data='weather'\n )\n", (206, 265), False, 'from telebot import types, TeleBot\n'), ((292, 360), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Пока не надо"""', 'callback_data': '"""bye"""'}), "(text='Пока не надо', callback_data='bye')\n", (318, 360), False, 'from telebot import types, TeleBot\n'), ((709, 752), 'pyowm.OWM', 'pyowm.OWM', (['_settings.API_KEY'], {'language': '"""ru"""'}), "(_settings.API_KEY, language='ru')\n", (718, 752), False, 'import pyowm\n')]
|
from time import sleep, time
from .api import Api
class Snapshots(Api):
delay = 10
def get_snapshots(self, project):
return self.do_request('/naut/project/{project}/snapshots'.format(project=project))
def get_snapshot(self, project, id):
return self.do_request('/naut/project/{project}/snapshots/{id}'.format(project, id))
def get_snapshot_transfer(self, project, id):
return self.do_request('/naut/project/{project}/snapshots/transfer/{id}'.format(project, id))
def delete_snapshot(self, project, id):
return self.do_request('/naut/project/{project}/snapshots/{id}'.format(project, id), None, 'DELETE', empty_response=True)
def delete_all_snapshots(self, project):
snapshots = self.get_snapshots(project)
for snapshot in snapshots['data']:
self.delete_snapshot(project, snapshot['id'])
def create_snapshot(self, project, type, env):
data = {
"environment": env,
"mode": type
}
return self.do_request('/naut/project/{project}/snapshots'.format(project), data, 'POST')
def check_transfer_complete(self, project, transfer_id):
start_time = time()
complete = False
while complete == False:
transfer = self.get_snapshot_transfer(project, transfer_id)
if transfer['data']['attributes']['status'] == 'Finished':
complete = True
else:
print("Waiting for {project} snapshot to complete... elapsed {seconds} seconds".format(project, time() - start_time))
sleep(self.delay)
return transfer['data']['relationships']['snapshot']
def download_snapshot(self, project, snapshot_id):
snapshot = self.get_snapshot(project, snapshot_id)
download_link = snapshot['data']['links']['download_link']
self.download_request(download_link, "{project}-{id}-{mode}-snapshot.sspak".format(project=project, id=snapshot['data']['relationships']['source']['data'][0]['id'], mode=snapshot['data']['attributes']['mode']))
def easy_snapshot(self, project, type, env, filename = 'snapshot.spak'):
transfer = self.create_snapshot(project, type, env)
snapshot_info = self.check_transfer_complete(project, transfer['data']['id'])
snapshot = self.get_snapshot(project, snapshot_info['data']['id'])
download_link = snapshot['data']['links']['download_link']
download_file(download_link, filename)
|
[
"time.sleep",
"time.time"
] |
[((1195, 1201), 'time.time', 'time', ([], {}), '()\n', (1199, 1201), False, 'from time import sleep, time\n'), ((1603, 1620), 'time.sleep', 'sleep', (['self.delay'], {}), '(self.delay)\n', (1608, 1620), False, 'from time import sleep, time\n'), ((1565, 1571), 'time.time', 'time', ([], {}), '()\n', (1569, 1571), False, 'from time import sleep, time\n')]
|
from Genome.NN.Layer import Layer
import numpy as np
import pickle
class Brain:
def __init__(self, brain_structure):
self.brain_structure = brain_structure
self.layers = []
self.id = 0
# First layer added here
ids = []
genes = []
for i in range(brain_structure[0]):
ids.append(self.id)
self.id += 1
genes.append([np.random.rand(brain_structure[1]), np.random.rand(brain_structure[1]), np.random.rand(brain_structure[1])])
layer = Layer(ids)
layer.set_genes(genes)
self.layers.append(layer)
for i in range(1, len(brain_structure)):
if i == (len(brain_structure) - 1):
self.add_last_layer(brain_structure[-1])
else:
self.add_random_layer(brain_structure[i], brain_structure[i + 1])
def add_random_layer(self, node_count, next_node_count):
ids = []
genes = []
for i in range(node_count):
ids.append(self.id)
self.id += 1
genes.append([np.random.rand(next_node_count), np.random.rand(next_node_count), np.random.rand(next_node_count)])
layer = Layer(ids)
layer.set_genes(genes)
self.layers[-1].add_layer_connections(layer)
self.layers.append(layer)
def add_last_layer(self, node_count):
ids = []
for i in range(node_count):
ids.append(self.id)
self.id += 1
layer = Layer(ids)
self.layers[-1].add_layer_connections(layer)
self.layers.append(layer)
def set_data(self, data):
self.layers[0].set_layer_input(data)
def feed_forward(self):
for l in range(len(self.layers)):
if l != 0:
self.layers[l].normalize()
self.layers[l].feed_forward()
def make_bebe(self, partner, mutation_rate):
bebe = Brain(self.brain_structure)
for i in range(len(self.layers)):
bebe.layers[i] = self.layers[i].make_bebe(partner.layers[i], bebe.layers[i], mutation_rate)
return bebe
def get_answer(self):
return self.layers[-1].get_layer_input()
def save_model(self, file):
with open(file, 'wb') as config_dictionary_file:
pickle.dump(self, config_dictionary_file)
@staticmethod
def load_model(file):
with open(file, 'rb') as config_dictionary_file:
brain = pickle.load(config_dictionary_file)
return brain
def print_genes(self):
print("The genes od the brain")
for layer in self.layers:
print(layer.get_genes())
#
# brain = Brain(16, 32)
# brain.add_random_layer(32, 32)
# brain.add_random_layer(32, 48)
# brain.add_last_layer(2)
# brain.save_model("../Models/first_baby")
# brain = Brain.load_model("../Models/first_baby")
# print(len(brain.layers))
# brain.print_genes()
#
# brain.set_data(list(range(0, 16)))
# brain.feed_forward()
# print(brain.get_answer())
|
[
"numpy.random.rand",
"pickle.dump",
"pickle.load",
"Genome.NN.Layer.Layer"
] |
[((537, 547), 'Genome.NN.Layer.Layer', 'Layer', (['ids'], {}), '(ids)\n', (542, 547), False, 'from Genome.NN.Layer import Layer\n'), ((1202, 1212), 'Genome.NN.Layer.Layer', 'Layer', (['ids'], {}), '(ids)\n', (1207, 1212), False, 'from Genome.NN.Layer import Layer\n'), ((1500, 1510), 'Genome.NN.Layer.Layer', 'Layer', (['ids'], {}), '(ids)\n', (1505, 1510), False, 'from Genome.NN.Layer import Layer\n'), ((2290, 2331), 'pickle.dump', 'pickle.dump', (['self', 'config_dictionary_file'], {}), '(self, config_dictionary_file)\n', (2301, 2331), False, 'import pickle\n'), ((2454, 2489), 'pickle.load', 'pickle.load', (['config_dictionary_file'], {}), '(config_dictionary_file)\n', (2465, 2489), False, 'import pickle\n'), ((412, 446), 'numpy.random.rand', 'np.random.rand', (['brain_structure[1]'], {}), '(brain_structure[1])\n', (426, 446), True, 'import numpy as np\n'), ((448, 482), 'numpy.random.rand', 'np.random.rand', (['brain_structure[1]'], {}), '(brain_structure[1])\n', (462, 482), True, 'import numpy as np\n'), ((484, 518), 'numpy.random.rand', 'np.random.rand', (['brain_structure[1]'], {}), '(brain_structure[1])\n', (498, 518), True, 'import numpy as np\n'), ((1086, 1117), 'numpy.random.rand', 'np.random.rand', (['next_node_count'], {}), '(next_node_count)\n', (1100, 1117), True, 'import numpy as np\n'), ((1119, 1150), 'numpy.random.rand', 'np.random.rand', (['next_node_count'], {}), '(next_node_count)\n', (1133, 1150), True, 'import numpy as np\n'), ((1152, 1183), 'numpy.random.rand', 'np.random.rand', (['next_node_count'], {}), '(next_node_count)\n', (1166, 1183), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import copy,re,os
from waflib import Task,Utils,Logs,Errors,ConfigSet,Node
feats=Utils.defaultdict(set)
class task_gen(object):
mappings={}
prec=Utils.defaultdict(list)
def __init__(self,*k,**kw):
self.source=''
self.target=''
self.meths=[]
self.prec=Utils.defaultdict(list)
self.mappings={}
self.features=[]
self.tasks=[]
if not'bld'in kw:
self.env=ConfigSet.ConfigSet()
self.idx=0
self.path=None
else:
self.bld=kw['bld']
self.env=self.bld.env.derive()
self.path=self.bld.path
try:
self.idx=self.bld.idx[id(self.path)]=self.bld.idx.get(id(self.path),0)+1
except AttributeError:
self.bld.idx={}
self.idx=self.bld.idx[id(self.path)]=1
for key,val in kw.items():
setattr(self,key,val)
def __str__(self):
return"<task_gen %r declared in %s>"%(self.name,self.path.abspath())
def __repr__(self):
lst=[]
for x in self.__dict__.keys():
if x not in['env','bld','compiled_tasks','tasks']:
lst.append("%s=%s"%(x,repr(getattr(self,x))))
return"bld(%s) in %s"%(", ".join(lst),self.path.abspath())
def get_name(self):
try:
return self._name
except AttributeError:
if isinstance(self.target,list):
lst=[str(x)for x in self.target]
name=self._name=','.join(lst)
else:
name=self._name=str(self.target)
return name
def set_name(self,name):
self._name=name
name=property(get_name,set_name)
def to_list(self,val):
if isinstance(val,str):return val.split()
else:return val
def post(self):
if getattr(self,'posted',None):
return False
self.posted=True
keys=set(self.meths)
self.features=Utils.to_list(self.features)
for x in self.features+['*']:
st=feats[x]
if not st:
if not x in Task.classes:
Logs.warn('feature %r does not exist - bind at least one method to it'%x)
keys.update(list(st))
prec={}
prec_tbl=self.prec or task_gen.prec
for x in prec_tbl:
if x in keys:
prec[x]=prec_tbl[x]
tmp=[]
for a in keys:
for x in prec.values():
if a in x:break
else:
tmp.append(a)
tmp.sort()
out=[]
while tmp:
e=tmp.pop()
if e in keys:out.append(e)
try:
nlst=prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec:
raise Errors.WafError('Cycle detected in the method execution %r'%prec)
out.reverse()
self.meths=out
Logs.debug('task_gen: posting %s %d'%(self,id(self)))
for x in out:
try:
v=getattr(self,x)
except AttributeError:
raise Errors.WafError('%r is not a valid task generator method'%x)
Logs.debug('task_gen: -> %s (%d)'%(x,id(self)))
v()
Logs.debug('task_gen: posted %s'%self.name)
return True
def get_hook(self,node):
name=node.name
for k in self.mappings:
if name.endswith(k):
return self.mappings[k]
for k in task_gen.mappings:
if name.endswith(k):
return task_gen.mappings[k]
raise Errors.WafError("File %r has no mapping in %r (did you forget to load a waf tool?)"%(node,task_gen.mappings.keys()))
def create_task(self,name,src=None,tgt=None):
task=Task.classes[name](env=self.env.derive(),generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
self.tasks.append(task)
return task
def clone(self,env):
newobj=self.bld()
for x in self.__dict__:
if x in['env','bld']:
continue
elif x in['path','features']:
setattr(newobj,x,getattr(self,x))
else:
setattr(newobj,x,copy.copy(getattr(self,x)))
newobj.posted=False
if isinstance(env,str):
newobj.env=self.bld.all_envs[env].derive()
else:
newobj.env=env.derive()
return newobj
def declare_chain(name='',rule=None,reentrant=None,color='BLUE',ext_in=[],ext_out=[],before=[],after=[],decider=None,scan=None,install_path=None,shell=False):
ext_in=Utils.to_list(ext_in)
ext_out=Utils.to_list(ext_out)
if not name:
name=rule
cls=Task.task_factory(name,rule,color=color,ext_in=ext_in,ext_out=ext_out,before=before,after=after,scan=scan,shell=shell)
def x_file(self,node):
ext=decider and decider(self,node)or cls.ext_out
if ext_in:
_ext_in=ext_in[0]
tsk=self.create_task(name,node)
cnt=0
keys=list(self.mappings.keys())+list(self.__class__.mappings.keys())
for x in ext:
k=node.change_ext(x,ext_in=_ext_in)
tsk.outputs.append(k)
if reentrant!=None:
if cnt<int(reentrant):
self.source.append(k)
else:
for y in keys:
if k.name.endswith(y):
self.source.append(k)
break
cnt+=1
if install_path:
self.bld.install_files(install_path,tsk.outputs)
return tsk
for x in cls.ext_in:
task_gen.mappings[x]=x_file
return x_file
def taskgen_method(func):
setattr(task_gen,func.__name__,func)
return func
def feature(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for name in k:
feats[name].update([func.__name__])
return func
return deco
def before_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
before=before_method
def after_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
after=after_method
def extension(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for x in k:
task_gen.mappings[x]=func
return func
return deco
@taskgen_method
def to_nodes(self,lst,path=None):
tmp=[]
path=path or self.path
find=path.find_resource
if isinstance(lst,self.path.__class__):
lst=[lst]
for x in Utils.to_list(lst):
if isinstance(x,str):
node=find(x)
else:
node=x
if not node:
raise Errors.WafError("source not found: %r in %r"%(x,self))
tmp.append(node)
return tmp
@feature('*')
def process_source(self):
self.source=self.to_nodes(getattr(self,'source',[]))
for node in self.source:
self.get_hook(node)(self,node)
@feature('*')
@before_method('process_source')
def process_rule(self):
if not getattr(self,'rule',None):
return
name=str(getattr(self,'name',None)or self.target or getattr(self.rule,'__name__',self.rule))
try:
cache=self.bld.cache_rule_attr
except AttributeError:
cache=self.bld.cache_rule_attr={}
cls=None
if getattr(self,'cache_rule','True'):
try:
cls=cache[(name,self.rule)]
except KeyError:
pass
if not cls:
cls=Task.task_factory(name,self.rule,getattr(self,'vars',[]),shell=getattr(self,'shell',True),color=getattr(self,'color','BLUE'),scan=getattr(self,'scan',None))
if getattr(self,'scan',None):
cls.scan=self.scan
elif getattr(self,'deps',None):
def scan(self):
nodes=[]
for x in self.generator.to_list(getattr(self.generator,'deps',None)):
node=self.generator.path.find_resource(x)
if not node:
self.generator.bld.fatal('Could not find %r (was it declared?)'%x)
nodes.append(node)
return[nodes,[]]
cls.scan=scan
if getattr(self,'update_outputs',None):
Task.update_outputs(cls)
if getattr(self,'always',None):
Task.always_run(cls)
for x in['after','before','ext_in','ext_out']:
setattr(cls,x,getattr(self,x,[]))
if getattr(self,'cache_rule','True'):
cache[(name,self.rule)]=cls
tsk=self.create_task(name)
if getattr(self,'target',None):
if isinstance(self.target,str):
self.target=self.target.split()
if not isinstance(self.target,list):
self.target=[self.target]
for x in self.target:
if isinstance(x,str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self,'install_path',None):
self.bld.install_files(self.install_path,tsk.outputs)
if getattr(self,'source',None):
tsk.inputs=self.to_nodes(self.source)
self.source=[]
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
@feature('seq')
def sequence_order(self):
if self.meths and self.meths[-1]!='sequence_order':
self.meths.append('sequence_order')
return
if getattr(self,'seq_start',None):
return
if getattr(self.bld,'prev',None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev=self
re_m4=re.compile('@(\w+)@',re.M)
class subst_pc(Task.Task):
def run(self):
if getattr(self.generator,'is_copy',None):
self.outputs[0].write(self.inputs[0].read('rb'),'wb')
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
return
code=self.inputs[0].read(encoding=getattr(self.generator,'encoding','ISO8859-1'))
if getattr(self.generator,'subst_fun',None):
code=self.generator.subst_fun(self,code)
if code:
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
return
code=code.replace('%','%%')
lst=[]
def repl(match):
g=match.group
if g(1):
lst.append(g(1))
return"%%(%s)s"%g(1)
return''
code=re_m4.sub(repl,code)
try:
d=self.generator.dct
except AttributeError:
d={}
for x in lst:
tmp=getattr(self.generator,x,'')or self.env.get_flat(x)or self.env.get_flat(x.upper())
d[x]=str(tmp)
code=code%d
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
self.generator.bld.raw_deps[self.uid()]=self.dep_vars=lst
try:delattr(self,'cache_sig')
except AttributeError:pass
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
def sig_vars(self):
bld=self.generator.bld
env=self.env
upd=self.m.update
if getattr(self.generator,'subst_fun',None):
upd(Utils.h_fun(self.generator.subst_fun))
vars=self.generator.bld.raw_deps.get(self.uid(),[])
act_sig=bld.hash_env_vars(env,vars)
upd(act_sig)
lst=[getattr(self.generator,x,'')for x in vars]
upd(Utils.h_list(lst))
return self.m.digest()
@extension('.pc.in')
def add_pcfile(self,node):
tsk=self.create_task('subst_pc',node,node.change_ext('.pc','.pc.in'))
self.bld.install_files(getattr(self,'install_path','${LIBDIR}/pkgconfig/'),tsk.outputs)
class subst(subst_pc):
pass
@feature('subst')
@before_method('process_source','process_rule')
def process_subst(self):
src=Utils.to_list(getattr(self,'source',[]))
if isinstance(src,Node.Node):
src=[src]
tgt=Utils.to_list(getattr(self,'target',[]))
if isinstance(tgt,Node.Node):
tgt=[tgt]
if len(src)!=len(tgt):
raise Errors.WafError('invalid number of source/target for %r'%self)
for x,y in zip(src,tgt):
if not x or not y:
raise Errors.WafError('null source or target for %r'%self)
a,b=None,None
if isinstance(x,str)and isinstance(y,str)and x==y:
a=self.path.find_node(x)
b=self.path.get_bld().make_node(y)
if not os.path.isfile(b.abspath()):
b.sig=None
b.parent.mkdir()
else:
if isinstance(x,str):
a=self.path.find_resource(x)
elif isinstance(x,Node.Node):
a=x
if isinstance(y,str):
b=self.path.find_or_declare(y)
elif isinstance(y,Node.Node):
b=y
if not a:
raise Errors.WafError('cound not find %r for %r'%(x,self))
has_constraints=False
tsk=self.create_task('subst',a,b)
for k in('after','before','ext_in','ext_out'):
val=getattr(self,k,None)
if val:
has_constraints=True
setattr(tsk,k,val)
if not has_constraints and b.name.endswith('.h'):
tsk.before=[k for k in('c','cxx')if k in Task.classes]
inst_to=getattr(self,'install_path',None)
if inst_to:
self.bld.install_files(inst_to,b,chmod=getattr(self,'chmod',Utils.O644))
self.source=[]
|
[
"waflib.ConfigSet.ConfigSet",
"waflib.Task.always_run",
"waflib.Errors.WafError",
"waflib.Logs.debug",
"waflib.Task.task_factory",
"waflib.Utils.h_list",
"waflib.Utils.to_list",
"waflib.Utils.defaultdict",
"waflib.Logs.warn",
"waflib.Task.update_outputs",
"waflib.Utils.h_fun",
"re.compile"
] |
[((226, 248), 'waflib.Utils.defaultdict', 'Utils.defaultdict', (['set'], {}), '(set)\n', (243, 248), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((8419, 8447), 're.compile', 're.compile', (['"""@(\\\\w+)@"""', 're.M'], {}), "('@(\\\\w+)@', re.M)\n", (8429, 8447), False, 'import copy, re, os\n'), ((292, 315), 'waflib.Utils.defaultdict', 'Utils.defaultdict', (['list'], {}), '(list)\n', (309, 315), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((3974, 3995), 'waflib.Utils.to_list', 'Utils.to_list', (['ext_in'], {}), '(ext_in)\n', (3987, 3995), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((4005, 4027), 'waflib.Utils.to_list', 'Utils.to_list', (['ext_out'], {}), '(ext_out)\n', (4018, 4027), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((4059, 4189), 'waflib.Task.task_factory', 'Task.task_factory', (['name', 'rule'], {'color': 'color', 'ext_in': 'ext_in', 'ext_out': 'ext_out', 'before': 'before', 'after': 'after', 'scan': 'scan', 'shell': 'shell'}), '(name, rule, color=color, ext_in=ext_in, ext_out=ext_out,\n before=before, after=after, scan=scan, shell=shell)\n', (4076, 4189), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((5859, 5877), 'waflib.Utils.to_list', 'Utils.to_list', (['lst'], {}), '(lst)\n', (5872, 5877), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((407, 430), 'waflib.Utils.defaultdict', 'Utils.defaultdict', (['list'], {}), '(list)\n', (424, 430), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((1740, 1768), 'waflib.Utils.to_list', 'Utils.to_list', (['self.features'], {}), '(self.features)\n', (1753, 1768), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((2817, 2862), 'waflib.Logs.debug', 'Logs.debug', (["('task_gen: posted %s' % self.name)"], {}), "('task_gen: posted %s' % self.name)\n", (2827, 2862), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((10597, 10661), 'waflib.Errors.WafError', 'Errors.WafError', (["('invalid number of source/target for %r' % self)"], {}), "('invalid number of source/target for %r' % self)\n", (10612, 10661), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((517, 538), 'waflib.ConfigSet.ConfigSet', 'ConfigSet.ConfigSet', ([], {}), '()\n', (536, 538), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((2459, 2526), 'waflib.Errors.WafError', 'Errors.WafError', (["('Cycle detected in the method execution %r' % prec)"], {}), "('Cycle detected in the method execution %r' % prec)\n", (2474, 2526), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((5961, 6018), 'waflib.Errors.WafError', 'Errors.WafError', (["('source not found: %r in %r' % (x, self))"], {}), "('source not found: %r in %r' % (x, self))\n", (5976, 6018), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((7237, 7261), 'waflib.Task.update_outputs', 'Task.update_outputs', (['cls'], {}), '(cls)\n', (7256, 7261), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((7299, 7319), 'waflib.Task.always_run', 'Task.always_run', (['cls'], {}), '(cls)\n', (7314, 7319), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((10015, 10032), 'waflib.Utils.h_list', 'Utils.h_list', (['lst'], {}), '(lst)\n', (10027, 10032), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((10716, 10770), 'waflib.Errors.WafError', 'Errors.WafError', (["('null source or target for %r' % self)"], {}), "('null source or target for %r' % self)\n", (10731, 10770), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((11208, 11263), 'waflib.Errors.WafError', 'Errors.WafError', (["('cound not find %r for %r' % (x, self))"], {}), "('cound not find %r for %r' % (x, self))\n", (11223, 11263), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((9813, 9850), 'waflib.Utils.h_fun', 'Utils.h_fun', (['self.generator.subst_fun'], {}), '(self.generator.subst_fun)\n', (9824, 9850), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((1865, 1940), 'waflib.Logs.warn', 'Logs.warn', (["('feature %r does not exist - bind at least one method to it' % x)"], {}), "('feature %r does not exist - bind at least one method to it' % x)\n", (1874, 1940), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n'), ((2696, 2758), 'waflib.Errors.WafError', 'Errors.WafError', (["('%r is not a valid task generator method' % x)"], {}), "('%r is not a valid task generator method' % x)\n", (2711, 2758), False, 'from waflib import Task, Utils, Logs, Errors, ConfigSet, Node\n')]
|
# !/usr/bin/python3
# coding: utf-8
# Copyright 2015-2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import cv2
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from wand.image import Image as WandImage
from scipy.ndimage import interpolation as inter
from receipt_parser_core import Receipt
from receipt_parser_core.config import read_config
BASE_PATH = os.getcwd()
INPUT_FOLDER = os.path.join(BASE_PATH, "data/img")
TMP_FOLDER = os.path.join(BASE_PATH, "data/tmp")
OUTPUT_FOLDER = os.path.join(BASE_PATH, "data/txt")
ORANGE = '\033[33m'
RESET = '\033[0m'
def prepare_folders():
"""
:return: void
Creates necessary folders
"""
for folder in [
INPUT_FOLDER, TMP_FOLDER, OUTPUT_FOLDER
]:
if not os.path.exists(folder):
os.makedirs(folder)
def find_images(folder):
"""
:param folder: str
Path to folder to search
:return: generator of str
List of images in folder
"""
for file in os.listdir(folder):
full_path = os.path.join(folder, file)
if os.path.isfile(full_path):
try:
_ = Image.open(full_path) # if constructor succeeds
yield file
except:
pass
def rotate_image(input_file, output_file, angle=90):
"""
:param input_file: str
Path to image to rotate
:param output_file: str
Path to output image
:param angle: float
Angle to rotate
:return: void
Rotates image and saves result
"""
with WandImage(filename=input_file) as img:
width, height = img.size
if width < height:
angle = 0
print(ORANGE + '\t~: ' + RESET + 'Rotate image by: ' + str(angle) + "°" + RESET)
with img.clone() as rotated:
rotated.rotate(angle)
rotated.save(filename=output_file)
def deskew_image(image, delta=1, limit=5):
def determine_score(arr, angle):
data = inter.rotate(arr, angle, reshape=False, order=0)
histogram = np.sum(data, axis=1)
score = np.sum((histogram[1:] - histogram[:-1]) ** 2)
return histogram, score
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
scores = []
angles = np.arange(-limit, limit + delta, delta)
for angle in angles:
histogram, score = determine_score(thresh, angle)
scores.append(score)
best_angle = angles[scores.index(max(scores))]
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, best_angle, 1.0)
print(ORANGE + '\t~: ' + RESET + 'Deskew image by: ' + str(best_angle) + ' angle' + RESET)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
return rotated
def run_tesseract(input_file, output_file, language="deu"):
"""
:param input_file: str
Path to image to OCR
:param output_file: str
Path to output file
:return: void
Runs tesseract on image and saves result
"""
print(ORANGE + '\t~: ' + RESET + 'Parse image using pytesseract' + RESET)
print(ORANGE + '\t~: ' + RESET + 'Parse image at: ' + input_file + RESET)
print(ORANGE + '\t~: ' + RESET + 'Write result to: ' + output_file + RESET)
with io.BytesIO() as transfer:
with WandImage(filename=input_file) as img:
img.save(transfer)
with Image.open(transfer) as img:
image_data = pytesseract.image_to_string(img, lang=language, timeout=60, config="--psm 6")
out = open(output_file, "w", encoding='utf-8')
out.write(image_data)
out.close()
def rescale_image(img):
print(ORANGE + '\t~: ' + RESET + 'Rescale image' + RESET)
img = cv2.resize(img, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)
return img
def grayscale_image(img):
print(ORANGE + '\t~: ' + RESET + 'Grayscale image' + RESET)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def remove_noise(img):
kernel = np.ones((1, 1), np.uint8)
img = cv2.dilate(img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=1)
print(ORANGE + '\t~: ' + RESET + 'Applying gaussianBlur and medianBlur' + RESET)
img = cv2.threshold(cv2.GaussianBlur(img, (5, 5), 0), 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
img = cv2.threshold(cv2.bilateralFilter(img, 5, 75, 75), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
img = cv2.adaptiveThreshold(cv2.bilateralFilter(img, 9, 75, 75), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 31, 2)
return img
def remove_shadows(img):
rgb_planes = cv2.split(img)
result_planes = []
result_norm_planes = []
for plane in rgb_planes:
dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 21)
diff_img = 255 - cv2.absdiff(plane, bg_img)
norm_img = cv2.normalize(diff_img,None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
result_planes.append(diff_img)
result_norm_planes.append(norm_img)
result = cv2.merge(result_planes)
return result
def detect_orientation(image):
coords = np.column_stack(np.where(image > 0))
angle = cv2.minAreaRect(coords)[-1]
print(ORANGE + '\t~: ' + RESET + 'Get rotation angle:' + str(angle) + RESET)
return image
def enhance_image(img, tmp_path ,high_contrast=True, gaussian_blur=True, rotate=True):
img = rescale_image(img)
if rotate:
cv2.imwrite(tmp_path, img)
rotate_image(tmp_path, tmp_path)
img = cv2.imread(tmp_path)
img = deskew_image(img)
img = remove_shadows(img)
if high_contrast:
img = grayscale_image(img)
if gaussian_blur:
img = remove_noise(img)
return img
def process_receipt(config, filename, rotate=True, grayscale=True, gaussian_blur=True):
input_path = INPUT_FOLDER + "/" + filename
output_path = OUTPUT_FOLDER + "/" + filename.split(".")[0] + ".txt"
print(ORANGE + '~: ' + RESET + 'Process image: ' + ORANGE + input_path + RESET)
prepare_folders()
try:
img = cv2.imread(input_path)
except FileNotFoundError:
return Receipt(config=config, raw="")
tmp_path = os.path.join(
TMP_FOLDER, filename
)
img = enhance_image(img, tmp_path,grayscale, gaussian_blur)
print(ORANGE + '~: ' + RESET + 'Temporary store image at: ' + ORANGE + tmp_path + RESET)
cv2.imwrite(tmp_path, img)
run_tesseract(tmp_path, output_path, config.language)
print(ORANGE + '~: ' + RESET + 'Store parsed text at: ' + ORANGE + output_path + RESET)
raw = open(output_path, 'r').readlines()
return Receipt(config=config, raw=raw)
def main():
prepare_folders()
dir_path = os.getcwd()
config = read_config(config=dir_path + "/config.yml")
images = list(find_images(INPUT_FOLDER))
print(ORANGE + '~: ' + RESET + 'Found: ' + ORANGE + str(len(images)),
RESET + ' images in: ' + ORANGE + INPUT_FOLDER + RESET)
i = 1
for image in images:
input_path = os.path.join(
INPUT_FOLDER,
image
)
tmp_path = os.path.join(
TMP_FOLDER,
image
)
out_path = os.path.join(
OUTPUT_FOLDER,
image + ".txt"
)
if i != 1: print()
print(ORANGE + '~: ' + RESET + 'Process image (' + ORANGE + str(i) + '/' + str(
len(images)) + RESET + ') : ' + input_path + RESET)
img = cv2.imread(input_path)
img = enhance_image(img, tmp_path)
cv2.imwrite(tmp_path, img)
run_tesseract(tmp_path, out_path, config.language)
i = i + 1
if __name__ == '__main__':
main()
|
[
"cv2.GaussianBlur",
"numpy.sum",
"cv2.medianBlur",
"numpy.ones",
"cv2.bilateralFilter",
"cv2.warpAffine",
"os.path.isfile",
"numpy.arange",
"cv2.minAreaRect",
"cv2.normalize",
"cv2.erode",
"cv2.absdiff",
"wand.image.Image",
"cv2.getRotationMatrix2D",
"os.path.join",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"cv2.split",
"cv2.resize",
"io.BytesIO",
"receipt_parser_core.config.read_config",
"cv2.merge",
"os.listdir",
"pytesseract.pytesseract.image_to_string",
"receipt_parser_core.Receipt",
"os.makedirs",
"os.getcwd",
"cv2.threshold",
"scipy.ndimage.interpolation.rotate",
"PIL.Image.open",
"cv2.imread",
"numpy.where"
] |
[((906, 917), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (915, 917), False, 'import os\n'), ((933, 968), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""data/img"""'], {}), "(BASE_PATH, 'data/img')\n", (945, 968), False, 'import os\n'), ((982, 1017), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""data/tmp"""'], {}), "(BASE_PATH, 'data/tmp')\n", (994, 1017), False, 'import os\n'), ((1034, 1069), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""data/txt"""'], {}), "(BASE_PATH, 'data/txt')\n", (1046, 1069), False, 'import os\n'), ((1528, 1546), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1538, 1546), False, 'import os\n'), ((2709, 2748), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2721, 2748), False, 'import cv2\n'), ((2865, 2904), 'numpy.arange', 'np.arange', (['(-limit)', '(limit + delta)', 'delta'], {}), '(-limit, limit + delta, delta)\n', (2874, 2904), True, 'import numpy as np\n'), ((3137, 3185), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'best_angle', '(1.0)'], {}), '(center, best_angle, 1.0)\n', (3160, 3185), False, 'import cv2\n'), ((3297, 3390), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {'flags': 'cv2.INTER_CUBIC', 'borderMode': 'cv2.BORDER_REPLICATE'}), '(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.\n BORDER_REPLICATE)\n', (3311, 3390), False, 'import cv2\n'), ((4396, 4464), 'cv2.resize', 'cv2.resize', (['img', 'None'], {'fx': '(1.2)', 'fy': '(1.2)', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)\n', (4406, 4464), False, 'import cv2\n'), ((4582, 4619), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4594, 4619), False, 'import cv2\n'), ((4673, 4698), 'numpy.ones', 'np.ones', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (4680, 4698), True, 'import numpy as np\n'), ((4709, 4746), 'cv2.dilate', 'cv2.dilate', (['img', 'kernel'], {'iterations': '(1)'}), '(img, kernel, iterations=1)\n', (4719, 4746), False, 'import cv2\n'), ((4757, 4793), 'cv2.erode', 'cv2.erode', (['img', 'kernel'], {'iterations': '(1)'}), '(img, kernel, iterations=1)\n', (4766, 4793), False, 'import cv2\n'), ((5322, 5336), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (5331, 5336), False, 'import cv2\n'), ((5795, 5819), 'cv2.merge', 'cv2.merge', (['result_planes'], {}), '(result_planes)\n', (5804, 5819), False, 'import cv2\n'), ((6951, 6985), 'os.path.join', 'os.path.join', (['TMP_FOLDER', 'filename'], {}), '(TMP_FOLDER, filename)\n', (6963, 6985), False, 'import os\n'), ((7163, 7189), 'cv2.imwrite', 'cv2.imwrite', (['tmp_path', 'img'], {}), '(tmp_path, img)\n', (7174, 7189), False, 'import cv2\n'), ((7398, 7429), 'receipt_parser_core.Receipt', 'Receipt', ([], {'config': 'config', 'raw': 'raw'}), '(config=config, raw=raw)\n', (7405, 7429), False, 'from receipt_parser_core import Receipt\n'), ((7482, 7493), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7491, 7493), False, 'import os\n'), ((7507, 7551), 'receipt_parser_core.config.read_config', 'read_config', ([], {'config': "(dir_path + '/config.yml')"}), "(config=dir_path + '/config.yml')\n", (7518, 7551), False, 'from receipt_parser_core.config import read_config\n'), ((1568, 1594), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (1580, 1594), False, 'import os\n'), ((1606, 1631), 'os.path.isfile', 'os.path.isfile', (['full_path'], {}), '(full_path)\n', (1620, 1631), False, 'import os\n'), ((2088, 2118), 'wand.image.Image', 'WandImage', ([], {'filename': 'input_file'}), '(filename=input_file)\n', (2097, 2118), True, 'from wand.image import Image as WandImage\n'), ((2513, 2561), 'scipy.ndimage.interpolation.rotate', 'inter.rotate', (['arr', 'angle'], {'reshape': '(False)', 'order': '(0)'}), '(arr, angle, reshape=False, order=0)\n', (2525, 2561), True, 'from scipy.ndimage import interpolation as inter\n'), ((2582, 2602), 'numpy.sum', 'np.sum', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (2588, 2602), True, 'import numpy as np\n'), ((2619, 2664), 'numpy.sum', 'np.sum', (['((histogram[1:] - histogram[:-1]) ** 2)'], {}), '((histogram[1:] - histogram[:-1]) ** 2)\n', (2625, 2664), True, 'import numpy as np\n'), ((2762, 2830), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (2775, 2830), False, 'import cv2\n'), ((3925, 3937), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3935, 3937), False, 'import io\n'), ((5130, 5165), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['img', '(9)', '(75)', '(75)'], {}), '(img, 9, 75, 75)\n', (5149, 5165), False, 'import cv2\n'), ((5501, 5532), 'cv2.medianBlur', 'cv2.medianBlur', (['dilated_img', '(21)'], {}), '(dilated_img, 21)\n', (5515, 5532), False, 'import cv2\n'), ((5604, 5702), 'cv2.normalize', 'cv2.normalize', (['diff_img', 'None'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_8UC1'}), '(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_8UC1)\n', (5617, 5702), False, 'import cv2\n'), ((5901, 5920), 'numpy.where', 'np.where', (['(image > 0)'], {}), '(image > 0)\n', (5909, 5920), True, 'import numpy as np\n'), ((5934, 5957), 'cv2.minAreaRect', 'cv2.minAreaRect', (['coords'], {}), '(coords)\n', (5949, 5957), False, 'import cv2\n'), ((6204, 6230), 'cv2.imwrite', 'cv2.imwrite', (['tmp_path', 'img'], {}), '(tmp_path, img)\n', (6215, 6230), False, 'import cv2\n'), ((6286, 6306), 'cv2.imread', 'cv2.imread', (['tmp_path'], {}), '(tmp_path)\n', (6296, 6306), False, 'import cv2\n'), ((6836, 6858), 'cv2.imread', 'cv2.imread', (['input_path'], {}), '(input_path)\n', (6846, 6858), False, 'import cv2\n'), ((7795, 7828), 'os.path.join', 'os.path.join', (['INPUT_FOLDER', 'image'], {}), '(INPUT_FOLDER, image)\n', (7807, 7828), False, 'import os\n'), ((7882, 7913), 'os.path.join', 'os.path.join', (['TMP_FOLDER', 'image'], {}), '(TMP_FOLDER, image)\n', (7894, 7913), False, 'import os\n'), ((7968, 8011), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', "(image + '.txt')"], {}), "(OUTPUT_FOLDER, image + '.txt')\n", (7980, 8011), False, 'import os\n'), ((8241, 8263), 'cv2.imread', 'cv2.imread', (['input_path'], {}), '(input_path)\n', (8251, 8263), False, 'import cv2\n'), ((8315, 8341), 'cv2.imwrite', 'cv2.imwrite', (['tmp_path', 'img'], {}), '(tmp_path, img)\n', (8326, 8341), False, 'import cv2\n'), ((1293, 1315), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1307, 1315), False, 'import os\n'), ((1329, 1348), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (1340, 1348), False, 'import os\n'), ((3964, 3994), 'wand.image.Image', 'WandImage', ([], {'filename': 'input_file'}), '(filename=input_file)\n', (3973, 3994), True, 'from wand.image import Image as WandImage\n'), ((4048, 4068), 'PIL.Image.open', 'Image.open', (['transfer'], {}), '(transfer)\n', (4058, 4068), False, 'from PIL import Image\n'), ((4102, 4179), 'pytesseract.pytesseract.image_to_string', 'pytesseract.image_to_string', (['img'], {'lang': 'language', 'timeout': '(60)', 'config': '"""--psm 6"""'}), "(img, lang=language, timeout=60, config='--psm 6')\n", (4129, 4179), False, 'from pytesseract import pytesseract\n'), ((4905, 4937), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (4921, 4937), False, 'import cv2\n'), ((5013, 5048), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['img', '(5)', '(75)', '(75)'], {}), '(img, 5, 75, 75)\n', (5032, 5048), False, 'import cv2\n'), ((5458, 5483), 'numpy.ones', 'np.ones', (['(7, 7)', 'np.uint8'], {}), '((7, 7), np.uint8)\n', (5465, 5483), True, 'import numpy as np\n'), ((5558, 5584), 'cv2.absdiff', 'cv2.absdiff', (['plane', 'bg_img'], {}), '(plane, bg_img)\n', (5569, 5584), False, 'import cv2\n'), ((6904, 6934), 'receipt_parser_core.Receipt', 'Receipt', ([], {'config': 'config', 'raw': '""""""'}), "(config=config, raw='')\n", (6911, 6934), False, 'from receipt_parser_core import Receipt\n'), ((1670, 1691), 'PIL.Image.open', 'Image.open', (['full_path'], {}), '(full_path)\n', (1680, 1691), False, 'from PIL import Image\n')]
|
import unittest
from unittest.mock import MagicMock
from ocrd_browser.view import ViewImages
from ocrd_browser.ui import MainWindow
from tests import TestCase
class ViewImagesTestCase(TestCase):
def setUp(self):
self.vx = ViewImages('unique', MagicMock(spec=MainWindow))
def test_can_construct(self):
self.assertIsNotNone(self.vx)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"unittest.mock.MagicMock"
] |
[((392, 407), 'unittest.main', 'unittest.main', ([], {}), '()\n', (405, 407), False, 'import unittest\n'), ((258, 284), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'MainWindow'}), '(spec=MainWindow)\n', (267, 284), False, 'from unittest.mock import MagicMock\n')]
|
from __future__ import absolute_import, division, print_function
import types
import time
from threading import Thread
from manhattan.record import Record, PageRecord, GoalRecord
from manhattan.log.timerotating import TimeRotatingLog
from .base import BaseTest, work_path
def set_fake_name(log, index):
def fake_name(self, timestamp):
return '%s.%s' % (self.path, index)
log.log_name_for = types.MethodType(fake_name, log)
def make_thread_consumer(log_r, process_from=None):
consumed = []
last_pointer_container = [None]
log_r.sleep_delay = 0.001
def consume(l):
for rec, ptr in l.process(stay_alive=True, process_from=process_from):
consumed.append(Record.from_list(rec))
last_pointer_container[0] = ptr
consumer = Thread(target=consume, args=(log_r,))
consumer.start()
return consumed, consumer, last_pointer_container
class TimeRotatingLogTest(BaseTest):
def test_basic(self):
path = work_path('trl-basic')
log_w = TimeRotatingLog(path)
log_w.write(PageRecord(url='/foo').to_list())
log_r = TimeRotatingLog(path)
records = list(log_r.process(stay_alive=False))
self.assertEqual(len(records), 1)
rec = Record.from_list(records[0][0])
self.assertEqual(rec.url, '/foo')
def test_multiple_logs(self):
path = work_path('trl-multi')
log_w = TimeRotatingLog(path)
set_fake_name(log_w, '001')
log_w.write(PageRecord(url='/foo').to_list())
set_fake_name(log_w, '004')
log_w.write(PageRecord(url='/bar').to_list())
log_r = TimeRotatingLog(path)
records = list(log_r.process(stay_alive=False))
self.assertEqual(len(records), 2)
self.assertEqual(Record.from_list(records[0][0]).url, '/foo')
self.assertEqual(Record.from_list(records[1][0]).url, '/bar')
def test_stay_alive_single(self):
path = work_path('trl-stayalive')
log_r = TimeRotatingLog(path)
log_r.sleep_delay = 0.001
consumed, consumer, _ = make_thread_consumer(log_r)
try:
self.assertEqual(len(consumed), 0)
log_w = TimeRotatingLog(path)
log_w.write(PageRecord(url='/baz').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/baz')
log_w.write(PageRecord(url='/herp').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 2)
self.assertEqual(consumed[1].url, '/herp')
finally:
log_r.killed.set()
def test_stay_alive_multiple(self):
path = work_path('trl-stayalive-multi')
log_r = TimeRotatingLog(path)
log_r.sleep_delay = 0.001
consumed, consumer, _ = make_thread_consumer(log_r)
try:
self.assertEqual(len(consumed), 0)
log_w = TimeRotatingLog(path)
set_fake_name(log_w, '357')
log_w.write(PageRecord(url='/baz').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/baz')
set_fake_name(log_w, '358')
log_w.write(PageRecord(url='/herp').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 2)
self.assertEqual(consumed[1].url, '/herp')
finally:
log_r.killed.set()
def test_stay_alive_nofiles(self):
log_r = TimeRotatingLog(work_path('trl-stayalive-none'))
log_r.sleep_delay = 0.001
consumed, consumer, _ = make_thread_consumer(log_r)
log_r.killed.set()
def test_unicode_names(self):
path = work_path('trl-unicode')
log_w = TimeRotatingLog(path)
goal_name = u'Goo\xf6aa\xe1llll!!!'
rec = GoalRecord(name=goal_name,
value='',
value_type='',
value_format='')
log_w.write(rec.to_list())
log_r = TimeRotatingLog(path)
records = list(log_r.process(stay_alive=False))
self.assertEqual(len(records), 1)
rec = Record.from_list(records[0][0])
self.assertEqual(rec.name, goal_name)
def test_resume(self):
path = work_path('trl-resume')
log_w = TimeRotatingLog(path)
# Create a thread consumer
log_r1 = TimeRotatingLog(path)
consumed, consumer, ptr_container = make_thread_consumer(log_r1)
try:
# Write one record
log_w.write(PageRecord(url='/herp').to_list())
time.sleep(log_r1.sleep_delay * 10)
# Check that one record was read.
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/herp')
finally:
# Kill the thread
log_r1.killed.set()
# Wait for it to die.
time.sleep(log_r1.sleep_delay * 10)
last_pointer = ptr_container[0]
self.assertIsNotNone(last_pointer)
try:
# Write one record
log_w.write(PageRecord(url='/derp').to_list())
time.sleep(log_r1.sleep_delay * 10)
# Create a new thread consumer
log_r2 = TimeRotatingLog(path)
consumed, consumer, _ = \
make_thread_consumer(log_r2, process_from=last_pointer)
time.sleep(log_r2.sleep_delay * 10)
# Check that the second record was read.
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/derp')
finally:
log_r2.killed.set()
|
[
"threading.Thread",
"manhattan.log.timerotating.TimeRotatingLog",
"manhattan.record.GoalRecord",
"types.MethodType",
"manhattan.record.Record.from_list",
"time.sleep",
"manhattan.record.PageRecord"
] |
[((411, 443), 'types.MethodType', 'types.MethodType', (['fake_name', 'log'], {}), '(fake_name, log)\n', (427, 443), False, 'import types\n'), ((793, 830), 'threading.Thread', 'Thread', ([], {'target': 'consume', 'args': '(log_r,)'}), '(target=consume, args=(log_r,))\n', (799, 830), False, 'from threading import Thread\n'), ((1026, 1047), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (1041, 1047), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((1119, 1140), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (1134, 1140), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((1253, 1284), 'manhattan.record.Record.from_list', 'Record.from_list', (['records[0][0]'], {}), '(records[0][0])\n', (1269, 1284), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((1416, 1437), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (1431, 1437), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((1637, 1658), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (1652, 1658), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((1994, 2015), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (2009, 2015), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((2785, 2806), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (2800, 2806), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((3868, 3889), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (3883, 3889), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((3948, 4016), 'manhattan.record.GoalRecord', 'GoalRecord', ([], {'name': 'goal_name', 'value': '""""""', 'value_type': '""""""', 'value_format': '""""""'}), "(name=goal_name, value='', value_type='', value_format='')\n", (3958, 4016), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((4144, 4165), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (4159, 4165), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((4278, 4309), 'manhattan.record.Record.from_list', 'Record.from_list', (['records[0][0]'], {}), '(records[0][0])\n', (4294, 4309), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((4439, 4460), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (4454, 4460), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((4514, 4535), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (4529, 4535), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((2192, 2213), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (2207, 2213), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((2285, 2319), 'time.sleep', 'time.sleep', (['(log_r.sleep_delay * 10)'], {}), '(log_r.sleep_delay * 10)\n', (2295, 2319), False, 'import time\n'), ((2494, 2528), 'time.sleep', 'time.sleep', (['(log_r.sleep_delay * 10)'], {}), '(log_r.sleep_delay * 10)\n', (2504, 2528), False, 'import time\n'), ((2983, 3004), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (2998, 3004), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((3116, 3150), 'time.sleep', 'time.sleep', (['(log_r.sleep_delay * 10)'], {}), '(log_r.sleep_delay * 10)\n', (3126, 3150), False, 'import time\n'), ((3365, 3399), 'time.sleep', 'time.sleep', (['(log_r.sleep_delay * 10)'], {}), '(log_r.sleep_delay * 10)\n', (3375, 3399), False, 'import time\n'), ((4726, 4761), 'time.sleep', 'time.sleep', (['(log_r1.sleep_delay * 10)'], {}), '(log_r1.sleep_delay * 10)\n', (4736, 4761), False, 'import time\n'), ((5035, 5070), 'time.sleep', 'time.sleep', (['(log_r1.sleep_delay * 10)'], {}), '(log_r1.sleep_delay * 10)\n', (5045, 5070), False, 'import time\n'), ((5271, 5306), 'time.sleep', 'time.sleep', (['(log_r1.sleep_delay * 10)'], {}), '(log_r1.sleep_delay * 10)\n', (5281, 5306), False, 'import time\n'), ((5371, 5392), 'manhattan.log.timerotating.TimeRotatingLog', 'TimeRotatingLog', (['path'], {}), '(path)\n', (5386, 5392), False, 'from manhattan.log.timerotating import TimeRotatingLog\n'), ((5515, 5550), 'time.sleep', 'time.sleep', (['(log_r2.sleep_delay * 10)'], {}), '(log_r2.sleep_delay * 10)\n', (5525, 5550), False, 'import time\n'), ((710, 731), 'manhattan.record.Record.from_list', 'Record.from_list', (['rec'], {}), '(rec)\n', (726, 731), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((1782, 1813), 'manhattan.record.Record.from_list', 'Record.from_list', (['records[0][0]'], {}), '(records[0][0])\n', (1798, 1813), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((1852, 1883), 'manhattan.record.Record.from_list', 'Record.from_list', (['records[1][0]'], {}), '(records[1][0])\n', (1868, 1883), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((1068, 1090), 'manhattan.record.PageRecord', 'PageRecord', ([], {'url': '"""/foo"""'}), "(url='/foo')\n", (1078, 1090), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((1495, 1517), 'manhattan.record.PageRecord', 'PageRecord', ([], {'url': '"""/foo"""'}), "(url='/foo')\n", (1505, 1517), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((1586, 1608), 'manhattan.record.PageRecord', 'PageRecord', ([], {'url': '"""/bar"""'}), "(url='/bar')\n", (1596, 1608), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((2239, 2261), 'manhattan.record.PageRecord', 'PageRecord', ([], {'url': '"""/baz"""'}), "(url='/baz')\n", (2249, 2261), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((2447, 2470), 'manhattan.record.PageRecord', 'PageRecord', ([], {'url': '"""/herp"""'}), "(url='/herp')\n", (2457, 2470), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((3070, 3092), 'manhattan.record.PageRecord', 'PageRecord', ([], {'url': '"""/baz"""'}), "(url='/baz')\n", (3080, 3092), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((3318, 3341), 'manhattan.record.PageRecord', 'PageRecord', ([], {'url': '"""/herp"""'}), "(url='/herp')\n", (3328, 3341), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((4679, 4702), 'manhattan.record.PageRecord', 'PageRecord', ([], {'url': '"""/herp"""'}), "(url='/herp')\n", (4689, 4702), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n'), ((5224, 5247), 'manhattan.record.PageRecord', 'PageRecord', ([], {'url': '"""/derp"""'}), "(url='/derp')\n", (5234, 5247), False, 'from manhattan.record import Record, PageRecord, GoalRecord\n')]
|
import argparse
import d3rlpy
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--game', type=str, default='breakout')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', type=int)
args = parser.parse_args()
d3rlpy.seed(args.seed)
dataset, env = d3rlpy.datasets.get_atari_transitions(
args.game,
fraction=0.01,
index=1 if args.game == "asterix" else 0,
)
env.seed(args.seed)
cql = d3rlpy.algos.DiscreteCQL(
learning_rate=5e-5,
optim_factory=d3rlpy.models.optimizers.AdamFactory(eps=1e-2 / 32),
batch_size=32,
alpha=4.0,
q_func_factory=d3rlpy.models.q_functions.QRQFunctionFactory(
n_quantiles=200),
scaler="pixel",
n_frames=4,
target_update_interval=2000,
reward_scaler=d3rlpy.preprocessing.ClipRewardScaler(-1.0, 1.0),
use_gpu=args.gpu)
env_scorer = d3rlpy.metrics.evaluate_on_environment(env, epsilon=0.001)
cql.fit(dataset,
eval_episodes=[None],
n_steps=50000000 // 4,
n_steps_per_epoch=125000,
scorers={
'environment': env_scorer,
},
experiment_name=f"DiscreteCQL_{args.game}_{args.seed}")
if __name__ == '__main__':
main()
|
[
"d3rlpy.preprocessing.ClipRewardScaler",
"argparse.ArgumentParser",
"d3rlpy.datasets.get_atari_transitions",
"d3rlpy.models.optimizers.AdamFactory",
"d3rlpy.metrics.evaluate_on_environment",
"d3rlpy.seed",
"d3rlpy.models.q_functions.QRQFunctionFactory"
] |
[((57, 82), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (80, 82), False, 'import argparse\n'), ((281, 303), 'd3rlpy.seed', 'd3rlpy.seed', (['args.seed'], {}), '(args.seed)\n', (292, 303), False, 'import d3rlpy\n'), ((324, 434), 'd3rlpy.datasets.get_atari_transitions', 'd3rlpy.datasets.get_atari_transitions', (['args.game'], {'fraction': '(0.01)', 'index': "(1 if args.game == 'asterix' else 0)"}), "(args.game, fraction=0.01, index=1 if \n args.game == 'asterix' else 0)\n", (361, 434), False, 'import d3rlpy\n'), ((964, 1022), 'd3rlpy.metrics.evaluate_on_environment', 'd3rlpy.metrics.evaluate_on_environment', (['env'], {'epsilon': '(0.001)'}), '(env, epsilon=0.001)\n', (1002, 1022), False, 'import d3rlpy\n'), ((573, 624), 'd3rlpy.models.optimizers.AdamFactory', 'd3rlpy.models.optimizers.AdamFactory', ([], {'eps': '(0.01 / 32)'}), '(eps=0.01 / 32)\n', (609, 624), False, 'import d3rlpy\n'), ((691, 752), 'd3rlpy.models.q_functions.QRQFunctionFactory', 'd3rlpy.models.q_functions.QRQFunctionFactory', ([], {'n_quantiles': '(200)'}), '(n_quantiles=200)\n', (735, 752), False, 'import d3rlpy\n'), ((870, 918), 'd3rlpy.preprocessing.ClipRewardScaler', 'd3rlpy.preprocessing.ClipRewardScaler', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (907, 918), False, 'import d3rlpy\n')]
|
"""
trajectory/utils/prereqs.py
Author: <NAME>
Define a collection of useful utility functions for analyzing course and
departmental prerequisite structures.
"""
def get_prereq_graph(course_id, format=None):
"""
Generate a graph of prerequisites within a course. If format is not
requested, simply return a NetworkX graph object.
couse_id: the ID of the requested course
format: what format to return in (optional)
node: json formatted as node-link style
adjacency: json formatted as adjacency style
tree: json formatted as tree style
"""
from trajectory.models import Department, Course
from trajectory.models.meta import session
from trajectory.utils.common import row2dict
from networkx.readwrite import json_graph
import networkx as nx
import json
if format not in [None, "node", "adjacency", "tree"]:
raise RuntimeError("Unknown requested data format %s" % format)
# Initialize a new NetworkX graph.
G = nx.DiGraph()
# Attempt to look up the requested course.
course = session.query(Course).get(course_id)
if course is None:
return None
# Recursively add course ids in a subtree to the graph.
def add_tree(G, tree, parent=None):
cid = tree[0] # unpack information
prereqs = tree[1] # unpack information
course = session.query(Course).get(cid)
# Insert all known data, including department abbreviation.
node_data = row2dict(course)
node_data['dept'] = course.department.abbreviation
# Identify the primary course in the graph (the requested).
if str(cid) == str(course_id):
node_data['prime'] = True
else:
node_data['prime'] = False
# If the course has already been added, generate a unique ID for it
# based on its parent, and add it anyway. But don't recurse into
# its list of prereqs.
seen = False
if cid in G.nodes():
cid = str(parent) + "-" + str(cid)
seen = True
# Add course and an edge from its parent, if relevant.
G.add_node(cid, node_data)
if parent is not None:
G.add_edge(parent, cid)
# Recurse through the prerequisite tree and add in subtrees.
if not seen:
for prereq in prereqs:
add_tree(G, prereq, cid)
# Navigate the prerequisite tree and add the course ids as nodes, and
# prerequisite relationships as unweighted edges.
prereq_tree = get_prereq_tree(course_id)
add_tree(G, prereq_tree)
if G is None:
return G
# Calculate and apply a basic layout.
pos = nx.spring_layout(G)
for node in G.nodes():
G.node[node]["viz"] = {
'position': {
'x': pos[node][0],
'y': pos[node][1]
}
}
# Apply any requested data output formatting.
if format == "node":
return json.dumps(json_graph.node_link_data(G))
elif format == "adjacency":
return json.dumps(json_graph.adjacency_data(G))
elif format == "tree":
return json.dumps(json_graph.tree_data(G, int(course_id)))
else:
return G
def get_prereq_tree(course_id, parents=set()):
"""
Recursively identify the prerequisite chain of a course. This tree is
rooted at the requested parent course and is structured as a tuple of
tuples.
Ex:
(a [
(b, [ ]) prereq of a
(c, [ prereq of a
(d, []) prereq of c
(e, []) prereq of c
])
])
"""
from trajectory.models import Course
from trajectory.models.meta import session
# Attempt to identify the parent course.
course = session.query(Course).get(course_id)
if course is None:
return None
# Recursive depth base case.
if course_id in parents:
return None
else:
parents = parents | {course_id}
# Base case.
if len(course.prerequisites) == 0:
return (course.id, [])
# Recursive call.
builder = []
for prerequisite in course.prerequisites:
sub_prereqs = get_prereq_tree(prerequisite.id, parents)
if sub_prereqs is not None:
builder.append(sub_prereqs)
# Add recursively determined list.
return (course.id, builder)
def get_prereq_set(course_id):
"""
Get the set of prerequisite courses for a requested course. That is, a
flat set with no repeats. This set does not contain the requested
course.
"""
# Attempt to identify a reference to the requested course.
prereq_tree = get_prereq_tree(course_id)
if prereq_tree is None:
return set()
# Flatten function of an arbitrarily deeply nested list of lists.
def flatten(container):
for i in container:
if isinstance(i, list) or isinstance(i, tuple):
for j in flatten(i):
yield j
else:
yield i
# Remove duplicates.
return set(flatten(prereq_tree)) - {course_id}
|
[
"networkx.readwrite.json_graph.adjacency_data",
"trajectory.models.meta.session.query",
"networkx.readwrite.json_graph.node_link_data",
"networkx.spring_layout",
"trajectory.utils.common.row2dict",
"networkx.DiGraph"
] |
[((1041, 1053), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1051, 1053), True, 'import networkx as nx\n'), ((2728, 2747), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (2744, 2747), True, 'import networkx as nx\n'), ((1526, 1542), 'trajectory.utils.common.row2dict', 'row2dict', (['course'], {}), '(course)\n', (1534, 1542), False, 'from trajectory.utils.common import row2dict\n'), ((1115, 1136), 'trajectory.models.meta.session.query', 'session.query', (['Course'], {}), '(Course)\n', (1128, 1136), False, 'from trajectory.models.meta import session\n'), ((3028, 3056), 'networkx.readwrite.json_graph.node_link_data', 'json_graph.node_link_data', (['G'], {}), '(G)\n', (3053, 3056), False, 'from networkx.readwrite import json_graph\n'), ((3791, 3812), 'trajectory.models.meta.session.query', 'session.query', (['Course'], {}), '(Course)\n', (3804, 3812), False, 'from trajectory.models.meta import session\n'), ((1406, 1427), 'trajectory.models.meta.session.query', 'session.query', (['Course'], {}), '(Course)\n', (1419, 1427), False, 'from trajectory.models.meta import session\n'), ((3116, 3144), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['G'], {}), '(G)\n', (3141, 3144), False, 'from networkx.readwrite import json_graph\n')]
|
import re
import keras.backend as keras_backend
from keras.layers import DepthwiseConv2D
import numpy as np
from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property
from blusky.wavelets.i_wavelet_2d import IWavelet2D
class ApplyFatherWavlet2D(HasStrictTraits):
"""
Provides a "convolution" method that will apply a father wavelet to
the endpoints of a cascade. Be sure to first apply layers to remove
any of the padding.
Assuming the input to the cascade is a power of 2 in shape, the result
will be a set of scattering coefficients at all orders of the transform
sampled regularly throughout the image. You can imagine that every
set of coefficients will be computed at the center of a tile, the shape
of which is determined by the "J" parameter. The degree to which these
tiles over lap is controlled by the "overlap_log_2". For interpretation,
consider values of "J" to give a tile of shape (2**(J+2), 2**(J+2)),
over which the texture of the image can be considered stationary.
The tiles can overlap by a factor of "M", however if you use the
default decimation, you must ensure that you have oversampled enough
to properly represent the stride at all scales of the transform.
With default decimation, oversamples=1, overlap_log_2 can be upto
J - 1. For each unit of overlap, you need to pay the cost of an
additional unit of oversampling.
"""
#: (J) This is the "J" scale parameter of the father wavelet used in the
# transform.
J = Int(2)
#: (M) This is defines the overlap of the tiles, so overlap_log_2 = 0
# would be no overlap, overlap_log_2 = 1 would be 50% overlap,
# overlap_log_2 = 2 would be 75% etc.
overlap_log_2 = Int(0)
#: Size of the image input to the Cascade_2d. This needs to be padded to a
# power of "2" to ensure that the coefficients are consistent.
img_size = Tuple
#: The sample rate of the input data
sample_rate = Float
#: Wavelet to use in convolution
wavelet = Instance(IWavelet2D)
#: Equivalent tile size derived from the log scale J
# J = round(log2(min(tile_size))) - 2
_tile_size = Property(Int, depends_on="J")
def _get__tile_size(self):
size = 2 ** (self.J + 2)
if size > self.img_size[0] or size > self.img_size[1]:
mn = min(self.img_size)
msg = "For image {} by {}, max J is {}".format(
self.img_size[0], self.img_size[1], np.log2(mn) - 2
)
raise RuntimeError(msg)
return (2 ** (self.J + 2), 2 ** (self.J + 2))
def _convolve(self, input_layer, trainable=False):
"""
The concept here is to first derive the applied decimation
from the shape of the input layer, then pad the layer and
apply the a convolution with the father wavelet. The padding
and strideof the convolution is designed to return set of coefficients
for a collections of regular (optionally overlapping) tiles.
This will be the case provided the size of the original input to the
transform are a power of 2.
Parameters
----------
input_layer - Keras Layer
A layer to apply the father wavelet to. The applied wavelet
is derived from the shape of the layer and knowlege of the
input image shape.
trainable - Bool (optional)
Toggle setting the convolution to be trainable. Either way it
is initialized with a gabor wavelet.
Returns
-------
conv - Keras Layer
A Keras layer applying the convolution to the input
"""
# create a convenient name
name = re.sub("[_/].*", "", input_layer.name)
name += "phi"
_, nh, nw, _ = input_layer.shape
nh = nh
nw = nw
# amount of decimation to here.
factor_1 = self.img_size[0] // nh
factor_2 = self.img_size[1] // nw
# how much to decimate the wavelet to required bandwidth
wavelet_stride = min(factor_1, factor_2)
# need to guarantee this, ideally crop the wavelet to a
# power of "2"
wav = self.wavelet.kernel(
0.0, shape=(2 ** (self.J + 2) - 1, 2 ** (self.J + 2) - 1)
)
#
wav = wav[::wavelet_stride, ::wavelet_stride]
# needs to be real
if np.iscomplexobj(wav):
wav = wav.real
# define a little helper to intialize the weights.
def init_weights(shape, **kwargs):
dtype = np.float32
weights = np.zeros(shape, dtype=dtype)
for ichan in range(shape[2]):
weights[:, :, ichan, 0] = wav.astype(dtype)
return keras_backend.variable(value=weights, dtype=dtype)
# use the father wavelet scale here instead of the default:
conv_stride = (
max(
2 ** (-self.overlap_log_2) * self._tile_size[0] // factor_1, 1
),
max(
2 ** (-self.overlap_log_2) * self._tile_size[1] // factor_2, 1
),
)
conv_stride = (int(conv_stride[0]), int(conv_stride[0]))
conv = DepthwiseConv2D(
name=name,
kernel_size=wav.shape,
depth_multiplier=1,
data_format="channels_last",
padding="valid",
strides=conv_stride,
trainable=trainable,
depthwise_initializer=init_weights,
)
return conv(input_layer)
def convolve(self, end_points):
"""
Apply father wavelet convolution.
Parameters
----------
end_points - List(Keras Layers)
Typically this would be the multiple end-points of the 2-D Cascade.
Returns
-------
scattering_transform - List(Keras Layers)
The father wavelet applied to each end-point. The stride and
padding of the convolution produces a consistent set of
coefficients at each scale, provided the shape of the original
image is a power of 2. For example, img.shape = (128, 256).
"""
scattering_transform = [self._convolve(i) for i in end_points]
return scattering_transform
|
[
"traits.api.Instance",
"numpy.iscomplexobj",
"traits.api.Property",
"keras.layers.DepthwiseConv2D",
"keras.backend.variable",
"traits.api.Int",
"numpy.log2",
"numpy.zeros",
"re.sub"
] |
[((1557, 1563), 'traits.api.Int', 'Int', (['(2)'], {}), '(2)\n', (1560, 1563), False, 'from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property\n'), ((1770, 1776), 'traits.api.Int', 'Int', (['(0)'], {}), '(0)\n', (1773, 1776), False, 'from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property\n'), ((2063, 2083), 'traits.api.Instance', 'Instance', (['IWavelet2D'], {}), '(IWavelet2D)\n', (2071, 2083), False, 'from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property\n'), ((2202, 2231), 'traits.api.Property', 'Property', (['Int'], {'depends_on': '"""J"""'}), "(Int, depends_on='J')\n", (2210, 2231), False, 'from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property\n'), ((3753, 3791), 're.sub', 're.sub', (['"""[_/].*"""', '""""""', 'input_layer.name'], {}), "('[_/].*', '', input_layer.name)\n", (3759, 3791), False, 'import re\n'), ((4435, 4455), 'numpy.iscomplexobj', 'np.iscomplexobj', (['wav'], {}), '(wav)\n', (4450, 4455), True, 'import numpy as np\n'), ((5258, 5459), 'keras.layers.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'name': 'name', 'kernel_size': 'wav.shape', 'depth_multiplier': '(1)', 'data_format': '"""channels_last"""', 'padding': '"""valid"""', 'strides': 'conv_stride', 'trainable': 'trainable', 'depthwise_initializer': 'init_weights'}), "(name=name, kernel_size=wav.shape, depth_multiplier=1,\n data_format='channels_last', padding='valid', strides=conv_stride,\n trainable=trainable, depthwise_initializer=init_weights)\n", (5273, 5459), False, 'from keras.layers import DepthwiseConv2D\n'), ((4641, 4669), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (4649, 4669), True, 'import numpy as np\n'), ((4793, 4843), 'keras.backend.variable', 'keras_backend.variable', ([], {'value': 'weights', 'dtype': 'dtype'}), '(value=weights, dtype=dtype)\n', (4815, 4843), True, 'import keras.backend as keras_backend\n'), ((2508, 2519), 'numpy.log2', 'np.log2', (['mn'], {}), '(mn)\n', (2515, 2519), True, 'import numpy as np\n')]
|
import networkx as nx
from matplotlib.pyplot import draw, show, clf
from mason import mason
def add_node(g, node):
if node == "":
return "Add name to the node"
g.add_node(node)
return "Node added successfully"
def remove_node(g, node):
if g.has_node(node):
g.remove_node(node)
return "Node removed successfully"
else:
return "Node doesn't exist in graph"
def add_edge(g, from_node, to_node, weight):
if len(weight) == 0:
weight = '1'
if g.has_node(from_node) and g.has_node(to_node):
if weight.isdigit():
g.add_weighted_edges_from([(from_node, to_node, int(weight))])
return "Edge added successfully\nDefault weight is 1"
else:
"The weight must be positive integer"
else:
return "One of the nodes is not in the graph"
def remove_edge(g, from_node, to_node, weight):
if g.has_node(from_node) and g.has_node(to_node):
if len(g.get_edge_data(from_node, to_node)) == 0:
return "No edge exists"
elif len(g.get_edge_data(from_node, to_node)) == 1:
g.remove_edge_clicked(from_node, to_node)
return "Edge removed successfully (Weight is neglected because it's the only edge between the nodes)"
else:
if len(weight) == 0:
return "There are multiple edges, specify the weight"
try:
to_remove = [(u, v, k) for u, v, k in g.edges(data=True) if k['weight'] == int(weight)]
g.remove_edges_from(to_remove)
except:
return "An exception occurred"
return "Edge removed successfully"
else:
return "One of the nodes is not in the graph"
def refresh(g):
clf()
pos = nx.spring_layout(g)
nx.draw(g, pos, with_labels=True, connectionstyle='arc3, rad=0.1')
labels = {}
for u, v, data in g.edges(data=True):
labels[(u, v)] = data['weight']
nx.draw_networkx_edge_labels(g, pos, edge_labels=labels, label_pos=0.3)
draw()
show()
def solve(g, source, sink):
nodes = list(g.nodes)
if len(nodes) == 0:
return "The graph is empty"
if len(source) == 0:
source = nodes[0]
if len(sink) == 0:
sink = nodes[len(nodes) - 1]
if g.has_node(source) and g.has_node(sink):
return mason(g, source, sink)
else:
return "One of the nodes is not in the graph"
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.draw",
"networkx.spring_layout",
"networkx.draw",
"networkx.draw_networkx_edge_labels",
"mason.mason"
] |
[((1823, 1828), 'matplotlib.pyplot.clf', 'clf', ([], {}), '()\n', (1826, 1828), False, 'from matplotlib.pyplot import draw, show, clf\n'), ((1840, 1859), 'networkx.spring_layout', 'nx.spring_layout', (['g'], {}), '(g)\n', (1856, 1859), True, 'import networkx as nx\n'), ((1865, 1931), 'networkx.draw', 'nx.draw', (['g', 'pos'], {'with_labels': '(True)', 'connectionstyle': '"""arc3, rad=0.1"""'}), "(g, pos, with_labels=True, connectionstyle='arc3, rad=0.1')\n", (1872, 1931), True, 'import networkx as nx\n'), ((2038, 2109), 'networkx.draw_networkx_edge_labels', 'nx.draw_networkx_edge_labels', (['g', 'pos'], {'edge_labels': 'labels', 'label_pos': '(0.3)'}), '(g, pos, edge_labels=labels, label_pos=0.3)\n', (2066, 2109), True, 'import networkx as nx\n'), ((2115, 2121), 'matplotlib.pyplot.draw', 'draw', ([], {}), '()\n', (2119, 2121), False, 'from matplotlib.pyplot import draw, show, clf\n'), ((2127, 2133), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (2131, 2133), False, 'from matplotlib.pyplot import draw, show, clf\n'), ((2438, 2460), 'mason.mason', 'mason', (['g', 'source', 'sink'], {}), '(g, source, sink)\n', (2443, 2460), False, 'from mason import mason\n')]
|
# Generated by Django 2.2.24 on 2022-04-20 16:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ptart', '0010_project_archived'),
]
operations = [
migrations.AddField(
model_name='label',
name='deprecated',
field=models.BooleanField(default=False),
),
]
|
[
"django.db.models.BooleanField"
] |
[((334, 368), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (353, 368), False, 'from django.db import migrations, models\n')]
|
import time
from .base import FunctionalTest
from catalog.models import Book
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
bookCatalogLink = '/catalog/books/'
bookDetailsLink = '/catalog/book/'
class TestBookPage(FunctionalTest):
submit_selector = 'input[type=submit]'
def setUp(self):
return super().setUp()
def tearDown(self):
return super().tearDown()
def test_book_page_empty(self):
self.browser.get(self.live_server_url + bookCatalogLink)
self.assertEqual(self.browser.title, 'Local Library')
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertEqual(header_text, 'Book List')
list_text = self.browser.find_element_by_tag_name('p').text
self.assertEqual(list_text, 'There are no books in the library.')
def test_book_page_filled(self):
self.setUpBooks()
self.browser.get(self.live_server_url + bookCatalogLink)
time.sleep(1)
book_list = self.browser.find_element_by_id('book-list')
rows = book_list.find_elements_by_tag_name('li')
self.assertIn('Book Title (<NAME>)', [row.text for row in rows])
def test_book_page_create(self):
self.login(self.admin)
self.setUpBooks()
self.browser.get(self.live_server_url + '/book/create/')
time.sleep(10)
title = self.browser.find_element_by_css_selector('input[name=title]')
author_box = Select(self.browser.find_element_by_name('author'))
summary = self.browser.find_element_by_css_selector('textarea[name=summary]')
isbn = self.browser.find_element_by_css_selector('input[name=isbn]')
genre_box = Select(self.browser.find_element_by_name('genre'))
language = Select(self.browser.find_element_by_name('language'))
submit = self.browser.find_element_by_css_selector(self.submit_selector)
title.send_keys('Book Title 2')
author_box.select_by_visible_text('<NAME>')
summary.send_keys('Summary of Book 2')
isbn.send_keys('1234567890123')
genre_box.select_by_visible_text('Fantasy')
language.select_by_visible_text('English')
submit.send_keys(Keys.ENTER)
time.sleep(1)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertEqual(header_text, 'Title: Book Title 2')
def test_book_page_delete(self):
self.setUpBooks()
book = Book.objects.all()[0]
self.login(self.admin)
self.browser.get(self.live_server_url + bookDetailsLink + str(book.id))
delete_button = self.browser.find_element_by_link_text('Delete')
delete_button.click()
submit = self.browser.find_element_by_css_selector(self.submit_selector)
submit.send_keys(Keys.ENTER)
time.sleep(1)
self.browser.get(self.live_server_url + bookCatalogLink)
list_text = self.browser.find_element_by_tag_name('p').text
self.assertEqual(list_text, 'There are no books in the library.')
def test_book_page_update(self):
self.setUpBooks()
book = Book.objects.all()[0]
self.login(self.admin)
self.browser.get(self.live_server_url + bookDetailsLink + str(book.id))
delete_button = self.browser.find_element_by_link_text('Update')
delete_button.click()
title = self.browser.find_element_by_css_selector('input[name=title]')
title.clear()
title.send_keys('Laskar')
submit = self.browser.find_element_by_css_selector(self.submit_selector)
submit.send_keys(Keys.ENTER)
time.sleep(1)
self.browser.get(self.live_server_url + bookCatalogLink)
book_list = self.browser.find_element_by_id('book-list')
rows = book_list.find_elements_by_tag_name('li')
self.assertIn('Laskar (<NAME>)', [row.text for row in rows])
|
[
"catalog.models.Book.objects.all",
"time.sleep"
] |
[((1004, 1017), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1014, 1017), False, 'import time\n'), ((1382, 1396), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1392, 1396), False, 'import time\n'), ((2266, 2279), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2276, 2279), False, 'import time\n'), ((2857, 2870), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2867, 2870), False, 'import time\n'), ((3671, 3684), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3681, 3684), False, 'import time\n'), ((2492, 2510), 'catalog.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (2508, 2510), False, 'from catalog.models import Book\n'), ((3167, 3185), 'catalog.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (3183, 3185), False, 'from catalog.models import Book\n')]
|
from model_boundd import MIPwithBounds
import networkx as nx
def get_vars_and_coefficients(elements, start=3):
"""Use a list which comes from line.split() to create lists of float coefficients and SCIP variables."""
return [var for var in elements[start + 1::2]], [float(coeff) for coeff in elements[start::2]]
def check_sol(filepath, value_dict, eps=1e-8, print_values=False):
"""Check solution given by input variables for feasibility.
Args:
filepath: str, path to .rlv file with AssertOut for output constraints
value_dict: dict, mapping input variables names (str) to values of the solution
eps: float, tolerance for checking
Returns:
true, if solution is valid, false otherwise
"""
graph = nx.DiGraph()
relu_nodes = set()
max_pool_nodes = set()
linear_nodes = set()
relu_in_nodes = set()
mip = MIPwithBounds(filepath, 1e-7)
model, vars = mip.read_file_into_graph()
# vars is a dict of the input nodes
output_cons = []
input_cons = []
input_bounds = {}
with open(filepath, "r") as f:
for line in f:
if line.startswith("#"):
continue
elements = line.split()
if elements[0] == "Input":
input_bounds[elements[1]] = {"lb": None, "ub": None}
graph.add_node(elements[1], node_type="input")
if elements[0] == "ReLU":
bias = float(elements[2])
variables, coeffs = get_vars_and_coefficients(elements)
relu_nodes.add(elements[1])
graph.add_node(elements[1] + "_in", bias=bias)
graph.add_edge(elements[1] + "_in", elements[1])
relu_in_nodes.add(elements[1] + "_in")
for v, w in zip(variables, coeffs):
graph.add_edge(v, elements[1] + "_in", weight=w)
if elements[0] == "Linear":
linear_nodes.add(elements[1])
bias = float(elements[2])
variables, coeffs = get_vars_and_coefficients(elements)
graph.add_node(elements[1], bias=bias)
for v, w in zip(variables, coeffs):
graph.add_edge(v, elements[1], weight=w)
if elements[0] == "MaxPool":
max_pool_nodes.add(elements[1])
graph.add_node(elements[1], node_type="max_pool")
graph.add_edges_from(((v, elements[1]) for v in elements[2:]), weight=1)
if elements[0] == "AssertOut":
output_cons.append((float(elements[2]), elements[1], get_vars_and_coefficients(elements)))
if elements[0] == "Assert":
input_cons.append((float(elements[2]), elements[1], get_vars_and_coefficients(elements)))
"""if len(elements) == 5 and elements[-1] in input_bounds:
if elements[1] == "<=":
new_lb = float(elements[2]) / float(elements[3])
if input_bounds[elements[-1]]["lb"] is None or input_bounds[elements[-1]]["lb"] < new_lb:
input_bounds[elements[-1]]["lb"] = new_lb
elif elements[1] == ">=":
new_ub = float(elements[2]) / float(elements[3])
if input_bounds[elements[-1]]["ub"] is None or input_bounds[elements[-1]]["ub"] > new_ub:
input_bounds[elements[-1]]["ub"] = new_ub"""
val = True
for lhs, direction, (variables, coeffs) in input_cons:
if direction == "<=":
if lhs > sum(c * value_dict[v] for v, c in zip(variables, coeffs)) + eps:
val = False
print(lhs, direction, variables, coeffs)
break
elif direction == ">=":
if lhs < sum(c * value_dict[v] for v, c in zip(variables, coeffs)) - eps:
val = False
print(lhs, direction, variables, coeffs)
break
else:
raise NotImplementedError
if not val: # input constraints do not hold
print("input constraints not fulfilled")
return False
else:
if print_values:
print("input constraints hold")
nodes_sorted = list(nx.topological_sort(graph))
relu_phases = {x: -1 for x in relu_nodes}
relu_phases_all = {x: 0 for x in relu_nodes}
for node in nodes_sorted:
if node in vars:
continue # skip the input nodes
new_value = 0
if node in linear_nodes or node in relu_in_nodes:
for n in graph.predecessors(node):
new_value += graph.edges[n, node]["weight"] * value_dict[n]
new_value += graph.node[node]["bias"]
elif node in max_pool_nodes:
new_value = max(value_dict[n] for n in graph.predecessors(node))
elif node in relu_nodes:
pred = list(graph.predecessors(node))
assert len(pred) == 1
if value_dict[pred[0]] > 0: # apply ReLU here
new_value = value_dict[pred[0]]
relu_phases[node] = 1
else:
relu_phases[node] = 0
value_dict[node] = new_value
for relu, phase in relu_phases.items():
assert phase >= 0
relu_phases_all[relu] += phase
if print_values:
for s in value_dict.items():
print(s)
val = True
# check the ouput constraints
#print(output_cons)
for lhs, direction, (variables, coeffs) in output_cons:
if direction == "<=":
if lhs > sum(c * value_dict[v] for v, c in zip(variables, coeffs)) + eps:
val = False
break
elif direction == ">=":
if lhs < sum(c * value_dict[v] for v, c in zip(variables, coeffs)) - eps:
val = False
break
else:
raise NotImplementedError
return val
if __name__ == "__main__":
directory = "../benchmarks/collisionDetection/"
directory2 = "../../benchmarks/scip/ACAS/"
directory3 = "../benchmarks/twinladder/"
directory5_out = "../benchmarks/mnist/"
filepath = directory2 + "property2/5_3.rlv"
#filepath = directory2 + "property5/property.rlv"
#filepath = directory2 + "property_3.rlv"
file = "../logs/neurify_11_10_0_adv"
with open(file, "r") as f:
list_of_pixels = [float(x) for x in f.readline()[:-1].split()]
#value_dict = {"in_" + str(i): x*255 for i, x in enumerate(list_of_pixels)}
value_dict = {'in_0': 55947.69100, 'in_1': 0.198666, 'in_2': -3.051407, 'in_3': 1145.0000, 'in_4': 50.768384}
if check_sol(filepath, value_dict=value_dict, eps=1e-2, print_values=True):
print("valid solution found -> SAT")
else:
print("the solution is not valid")
|
[
"model_boundd.MIPwithBounds",
"networkx.DiGraph",
"networkx.topological_sort"
] |
[((762, 774), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (772, 774), True, 'import networkx as nx\n'), ((886, 916), 'model_boundd.MIPwithBounds', 'MIPwithBounds', (['filepath', '(1e-07)'], {}), '(filepath, 1e-07)\n', (899, 916), False, 'from model_boundd import MIPwithBounds\n'), ((4297, 4323), 'networkx.topological_sort', 'nx.topological_sort', (['graph'], {}), '(graph)\n', (4316, 4323), True, 'import networkx as nx\n')]
|
#! python3
import random
numberOfStreaks = 0
for experimentNumber in range(10000):
# Code that creates a list of 100 'heads' or 'tails' values.
randomList = []
for listEntry in range(100):
if random.randint(0,1) == 1:
randomList.append('H')
else:
randomList.append('T')
# Code that checks if there is a streak of 6 heads or tails in a row.
counterH, counterT = 0, 0
for flip in range(100):
if randomList[flip] == 'H':
counterH += 1
counterT = 0
else:
counterT += 1
counterH = 0
if counterH == 6 or counterT == 6:
numberOfStreaks +=1
counterH, counterT = 0, 0
print('Chance of streak: %s%%' % (numberOfStreaks/100))
|
[
"random.randint"
] |
[((213, 233), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (227, 233), False, 'import random\n')]
|
from django.shortcuts import render, redirect
from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
"""
SSO Views
"""
@login_required
def sso_callback(request):
code = request.GET.get('code', None)
eve_client = EveClient.get_instance()
# verify token
esi_security = EveClient.get_esi_security()
esi_token = esi_security.auth(code)
esi_character = esi_security.verify()
# create new token
new_token = EveToken.objects.get_or_create(
access_token=esi_token['access_token'],
refresh_token=esi_token['refresh_token'],
expires_in=esi_token['expires_in'],
user=request.user
)[0]
# set scopes M2M
scopes = EveScope.objects.filter(name__in=esi_character['scp'])
if scopes.count() != len(esi_character['scp']):
logger.error(
f"Whoa there. Somehow we added a scope we don't know about. Pass this to Krypted Developers: \n ${esi_character['scp']}")
new_token.scopes.set(scopes)
# find or create character
if EveCharacter.objects.filter(external_id=esi_character['sub'].split(":")[-1]).exists():
character = EveCharacter.objects.get(
external_id=esi_character['sub'].split(":")[-1])
if character.token:
old_token = character.token
old_token.delete()
character.token = new_token
character.save()
else:
character = EveCharacter.objects.create(
external_id=esi_character['sub'].split(":")[-1],
name=esi_character['name'],
token=new_token,
)
# if no primary user, set
if not PrimaryEveCharacterAssociation.objects.filter(user=request.user).exists():
PrimaryEveCharacterAssociation.objects.create(
user=request.user,
character=character
)
return redirect('/')
@login_required
def add_sso_token(request):
try:
sso_url = EveClient.get_instance().get_sso_url()
return redirect(sso_url)
except Exception:
logger.exception("Failed to get SSO url from EveClient")
messages.warning(
request, "Eve Settings are not configured correctly. Contact your administrator.")
return redirect('/')
@login_required
def update_sso_token(request, token_id):
eve_token = EveToken.objects.get(pk=token_id)
return redirect(EveClient.get_instance().get_sso_url(
EveScope.convert_to_list(eve_token.requested_scopes.all())
))
@login_required
def remove_sso_token(request, pk):
eve_token = EveToken.objects.get(pk=pk)
if request.user == eve_token.user:
try:
if PrimaryEveCharacterAssociation.objects.filter(character=eve_token.evecharacter).exists():
PrimaryEveCharacterAssociation.objects.filter(
character=eve_token.evecharacter).delete()
except Exception:
logger.exception(
"Encountered error when deleting token character associations")
eve_token.delete()
else:
messages.error(request, "You cannot delete someone elses token.")
messages.success(
request, "Successfully deleted EVE Online token and character data")
return redirect("/")
|
[
"django.contrib.messages.success",
"django_eveonline_connector.models.PrimaryEveCharacterAssociation.objects.create",
"django.shortcuts.redirect",
"django_eveonline_connector.models.EveToken.objects.get_or_create",
"django.contrib.messages.error",
"django_eveonline_connector.models.EveToken.objects.get",
"django_eveonline_connector.models.EveScope.objects.filter",
"django_eveonline_connector.models.EveClient.get_instance",
"django_eveonline_connector.models.EveClient.get_esi_security",
"django_eveonline_connector.models.PrimaryEveCharacterAssociation.objects.filter",
"logging.getLogger",
"django.contrib.messages.warning"
] |
[((357, 384), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (374, 384), False, 'import logging\n'), ((507, 531), 'django_eveonline_connector.models.EveClient.get_instance', 'EveClient.get_instance', ([], {}), '()\n', (529, 531), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((571, 599), 'django_eveonline_connector.models.EveClient.get_esi_security', 'EveClient.get_esi_security', ([], {}), '()\n', (597, 599), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((966, 1020), 'django_eveonline_connector.models.EveScope.objects.filter', 'EveScope.objects.filter', ([], {'name__in': "esi_character['scp']"}), "(name__in=esi_character['scp'])\n", (989, 1020), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((2111, 2124), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (2119, 2124), False, 'from django.shortcuts import render, redirect\n'), ((2582, 2615), 'django_eveonline_connector.models.EveToken.objects.get', 'EveToken.objects.get', ([], {'pk': 'token_id'}), '(pk=token_id)\n', (2602, 2615), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((2817, 2844), 'django_eveonline_connector.models.EveToken.objects.get', 'EveToken.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (2837, 2844), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((3381, 3470), 'django.contrib.messages.success', 'messages.success', (['request', '"""Successfully deleted EVE Online token and character data"""'], {}), "(request,\n 'Successfully deleted EVE Online token and character data')\n", (3397, 3470), False, 'from django.contrib import messages\n'), ((3488, 3501), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (3496, 3501), False, 'from django.shortcuts import render, redirect\n'), ((722, 898), 'django_eveonline_connector.models.EveToken.objects.get_or_create', 'EveToken.objects.get_or_create', ([], {'access_token': "esi_token['access_token']", 'refresh_token': "esi_token['refresh_token']", 'expires_in': "esi_token['expires_in']", 'user': 'request.user'}), "(access_token=esi_token['access_token'],\n refresh_token=esi_token['refresh_token'], expires_in=esi_token[\n 'expires_in'], user=request.user)\n", (752, 898), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((1979, 2069), 'django_eveonline_connector.models.PrimaryEveCharacterAssociation.objects.create', 'PrimaryEveCharacterAssociation.objects.create', ([], {'user': 'request.user', 'character': 'character'}), '(user=request.user, character=\n character)\n', (2024, 2069), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((2252, 2269), 'django.shortcuts.redirect', 'redirect', (['sso_url'], {}), '(sso_url)\n', (2260, 2269), False, 'from django.shortcuts import render, redirect\n'), ((3310, 3375), 'django.contrib.messages.error', 'messages.error', (['request', '"""You cannot delete someone elses token."""'], {}), "(request, 'You cannot delete someone elses token.')\n", (3324, 3375), False, 'from django.contrib import messages\n'), ((2365, 2468), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""Eve Settings are not configured correctly. Contact your administrator."""'], {}), "(request,\n 'Eve Settings are not configured correctly. Contact your administrator.')\n", (2381, 2468), False, 'from django.contrib import messages\n'), ((2493, 2506), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (2501, 2506), False, 'from django.shortcuts import render, redirect\n'), ((1896, 1960), 'django_eveonline_connector.models.PrimaryEveCharacterAssociation.objects.filter', 'PrimaryEveCharacterAssociation.objects.filter', ([], {'user': 'request.user'}), '(user=request.user)\n', (1941, 1960), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((2198, 2222), 'django_eveonline_connector.models.EveClient.get_instance', 'EveClient.get_instance', ([], {}), '()\n', (2220, 2222), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((2636, 2660), 'django_eveonline_connector.models.EveClient.get_instance', 'EveClient.get_instance', ([], {}), '()\n', (2658, 2660), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((2913, 2992), 'django_eveonline_connector.models.PrimaryEveCharacterAssociation.objects.filter', 'PrimaryEveCharacterAssociation.objects.filter', ([], {'character': 'eve_token.evecharacter'}), '(character=eve_token.evecharacter)\n', (2958, 2992), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n'), ((3019, 3098), 'django_eveonline_connector.models.PrimaryEveCharacterAssociation.objects.filter', 'PrimaryEveCharacterAssociation.objects.filter', ([], {'character': 'eve_token.evecharacter'}), '(character=eve_token.evecharacter)\n', (3064, 3098), False, 'from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation\n')]
|
from project.server.common.escape import cleanify
class TestReplacements:
def test_ae(self):
assert cleanify("") == ""
assert cleanify("Äpfel") == "Aepfel"
assert cleanify("äpfel") == "aepfel"
assert cleanify("Äpfel Äpfel äpfel") == "Aepfel Aepfel aepfel"
def test_oe(self):
assert cleanify("Ömel") == "Oemel"
assert cleanify("ömel") == "oemel"
assert cleanify("Ömel ömel Ömel") == "Oemel oemel Oemel"
def test_ue(self):
assert cleanify("Ümel") == "Uemel"
assert cleanify("ümel") == "uemel"
assert cleanify("Ümel ümel Ümel") == "Uemel uemel Uemel"
def test_ss(self):
assert cleanify("Scheiße") == "Scheisse"
|
[
"project.server.common.escape.cleanify"
] |
[((121, 133), 'project.server.common.escape.cleanify', 'cleanify', (['""""""'], {}), "('')\n", (129, 133), False, 'from project.server.common.escape import cleanify\n'), ((156, 173), 'project.server.common.escape.cleanify', 'cleanify', (['"""Äpfel"""'], {}), "('Äpfel')\n", (164, 173), False, 'from project.server.common.escape import cleanify\n'), ((202, 219), 'project.server.common.escape.cleanify', 'cleanify', (['"""äpfel"""'], {}), "('äpfel')\n", (210, 219), False, 'from project.server.common.escape import cleanify\n'), ((248, 277), 'project.server.common.escape.cleanify', 'cleanify', (['"""Äpfel Äpfel äpfel"""'], {}), "('Äpfel Äpfel äpfel')\n", (256, 277), False, 'from project.server.common.escape import cleanify\n'), ((346, 362), 'project.server.common.escape.cleanify', 'cleanify', (['"""Ömel"""'], {}), "('Ömel')\n", (354, 362), False, 'from project.server.common.escape import cleanify\n'), ((390, 406), 'project.server.common.escape.cleanify', 'cleanify', (['"""ömel"""'], {}), "('ömel')\n", (398, 406), False, 'from project.server.common.escape import cleanify\n'), ((434, 460), 'project.server.common.escape.cleanify', 'cleanify', (['"""Ömel ömel Ömel"""'], {}), "('Ömel ömel Ömel')\n", (442, 460), False, 'from project.server.common.escape import cleanify\n'), ((526, 542), 'project.server.common.escape.cleanify', 'cleanify', (['"""Ümel"""'], {}), "('Ümel')\n", (534, 542), False, 'from project.server.common.escape import cleanify\n'), ((570, 586), 'project.server.common.escape.cleanify', 'cleanify', (['"""ümel"""'], {}), "('ümel')\n", (578, 586), False, 'from project.server.common.escape import cleanify\n'), ((614, 640), 'project.server.common.escape.cleanify', 'cleanify', (['"""Ümel ümel Ümel"""'], {}), "('Ümel ümel Ümel')\n", (622, 640), False, 'from project.server.common.escape import cleanify\n'), ((706, 725), 'project.server.common.escape.cleanify', 'cleanify', (['"""Scheiße"""'], {}), "('Scheiße')\n", (714, 725), False, 'from project.server.common.escape import cleanify\n')]
|
#!/usr/local/bin/python3.5 -u
import sys
sys.stdout.write("Enter a number: ")
a = float(sys.stdin.readline())
if a < 0:
print("negative")
elif a == 0:
print("zero")
elif a < 10:
print("small")
else:
print("large")
|
[
"sys.stdout.write",
"sys.stdin.readline"
] |
[((41, 77), 'sys.stdout.write', 'sys.stdout.write', (['"""Enter a number: """'], {}), "('Enter a number: ')\n", (57, 77), False, 'import sys\n'), ((88, 108), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (106, 108), False, 'import sys\n')]
|
# Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Jinja folklore wrappers and handling of inline copy usage.
"""
from .Importing import importFromInlineCopy
environments = {}
def unlikely_if(value):
if value:
return "unlikely"
else:
return ""
def unlikely_or_likely_from(value):
if value:
return "unlikely"
else:
return "likely"
def getEnvironment(module_name):
if module_name not in environments:
# Import dependencies, sadly we get to manage this ourselves.
importFromInlineCopy("markupsafe", must_exist=True)
jinja2 = importFromInlineCopy("jinja2", must_exist=True)
import jinja2
env = jinja2.Environment(
loader=jinja2.PackageLoader(module_name, "templates"),
# extensions=["jinja2.ext.do"],
trim_blocks=True,
lstrip_blocks=True,
)
# For shared global functions.
env.globals.update(
{
"unlikely_if": unlikely_if,
"unlikely_or_likely_from": unlikely_or_likely_from,
}
)
env.undefined = jinja2.StrictUndefined
environments[module_name] = env
return environments[module_name]
def getTemplate(module_name, template_name):
return getEnvironment(module_name).get_template(template_name)
|
[
"jinja2.PackageLoader"
] |
[((1450, 1496), 'jinja2.PackageLoader', 'jinja2.PackageLoader', (['module_name', '"""templates"""'], {}), "(module_name, 'templates')\n", (1470, 1496), False, 'import jinja2\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import modules
import pytest
import numpy as np
# Import from package
from pyswarms.discrete import BinaryPSO
@pytest.mark.parametrize(
"options",
[
{"c2": 0.7, "w": 0.5, "k": 2, "p": 2},
{"c1": 0.5, "w": 0.5, "k": 2, "p": 2},
{"c1": 0.5, "c2": 0.7, "k": 2, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2},
],
)
def test_keyword_exception(options):
"""Tests if exceptions are thrown when keywords are missing"""
with pytest.raises(KeyError):
BinaryPSO(5, 2, options)
@pytest.mark.parametrize(
"options",
[
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": -1, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 6, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 5},
],
)
def test_invalid_k_or_p_values(options):
"""Tests if exception is thrown when passing
an invalid value for k or p"""
with pytest.raises(ValueError):
BinaryPSO(5, 2, options)
@pytest.mark.parametrize("velocity_clamp", [[1, 3], np.array([1, 3])])
def test_vclamp_type_exception(velocity_clamp, options):
"""Tests if exception is raised when velocity_clamp type is not a
tuple"""
with pytest.raises(TypeError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
@pytest.mark.parametrize("velocity_clamp", [(1, 1, 1), (2, 3, 1)])
def test_vclamp_shape_exception(velocity_clamp, options):
"""Tests if exception is raised when velocity_clamp's size is not equal
to 2"""
with pytest.raises(IndexError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
@pytest.mark.parametrize("velocity_clamp", [(3, 2), (10, 8)])
def test_vclamp_maxmin_exception(velocity_clamp, options):
"""Tests if the max velocity_clamp is less than min velocity_clamp and
vice-versa"""
with pytest.raises(ValueError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
def test_reset_default_values(binary_reset):
"""Tests if best cost and best pos are set properly when the reset()
method is called"""
assert binary_reset.swarm.best_cost == np.inf
assert set(binary_reset.swarm.best_pos) == set(np.array([]))
@pytest.mark.parametrize(
"history, expected_shape",
[
("cost_history", (1000,)),
("mean_pbest_history", (1000,)),
("mean_neighbor_history", (1000,)),
("pos_history", (1000, 10, 2)),
("velocity_history", (1000, 10, 2)),
],
)
def test_training_history_shape(binary_history, history, expected_shape):
"""Test if training histories are of expected shape"""
pso = vars(binary_history)
assert np.array(pso[history]).shape == expected_shape
|
[
"pytest.mark.parametrize",
"pytest.raises",
"numpy.array",
"pyswarms.discrete.BinaryPSO"
] |
[((163, 414), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""options"""', "[{'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2}, {'c1': 0.5, 'w': 0.5, 'k': 2, 'p': \n 2}, {'c1': 0.5, 'c2': 0.7, 'k': 2, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w':\n 0.5, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2}]"], {}), "('options', [{'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2},\n {'c1': 0.5, 'w': 0.5, 'k': 2, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'k': 2,\n 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'p': 2}, {'c1': 0.5, 'c2': \n 0.7, 'w': 0.5, 'k': 2}])\n", (186, 414), False, 'import pytest\n'), ((634, 831), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""options"""', "[{'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': -1, 'p': 2}, {'c1': 0.5, 'c2': 0.7,\n 'w': 0.5, 'k': 6, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 5}\n ]"], {}), "('options', [{'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': -\n 1, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 6, 'p': 2}, {'c1': \n 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 5}])\n", (657, 831), False, 'import pytest\n'), ((1381, 1446), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""velocity_clamp"""', '[(1, 1, 1), (2, 3, 1)]'], {}), "('velocity_clamp', [(1, 1, 1), (2, 3, 1)])\n", (1404, 1446), False, 'import pytest\n'), ((1704, 1764), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""velocity_clamp"""', '[(3, 2), (10, 8)]'], {}), "('velocity_clamp', [(3, 2), (10, 8)])\n", (1727, 1764), False, 'import pytest\n'), ((2287, 2513), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""history, expected_shape"""', "[('cost_history', (1000,)), ('mean_pbest_history', (1000,)), (\n 'mean_neighbor_history', (1000,)), ('pos_history', (1000, 10, 2)), (\n 'velocity_history', (1000, 10, 2))]"], {}), "('history, expected_shape', [('cost_history', (1000,\n )), ('mean_pbest_history', (1000,)), ('mean_neighbor_history', (1000,)),\n ('pos_history', (1000, 10, 2)), ('velocity_history', (1000, 10, 2))])\n", (2310, 2513), False, 'import pytest\n'), ((573, 596), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (586, 596), False, 'import pytest\n'), ((606, 630), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)', 'options'], {}), '(5, 2, options)\n', (615, 630), False, 'from pyswarms.discrete import BinaryPSO\n'), ((998, 1023), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1011, 1023), False, 'import pytest\n'), ((1033, 1057), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)', 'options'], {}), '(5, 2, options)\n', (1042, 1057), False, 'from pyswarms.discrete import BinaryPSO\n'), ((1280, 1304), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1293, 1304), False, 'import pytest\n'), ((1314, 1377), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)'], {'velocity_clamp': 'velocity_clamp', 'options': 'options'}), '(5, 2, velocity_clamp=velocity_clamp, options=options)\n', (1323, 1377), False, 'from pyswarms.discrete import BinaryPSO\n'), ((1112, 1128), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (1120, 1128), True, 'import numpy as np\n'), ((1602, 1627), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (1615, 1627), False, 'import pytest\n'), ((1637, 1700), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)'], {'velocity_clamp': 'velocity_clamp', 'options': 'options'}), '(5, 2, velocity_clamp=velocity_clamp, options=options)\n', (1646, 1700), False, 'from pyswarms.discrete import BinaryPSO\n'), ((1926, 1951), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1939, 1951), False, 'import pytest\n'), ((1961, 2024), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)'], {'velocity_clamp': 'velocity_clamp', 'options': 'options'}), '(5, 2, velocity_clamp=velocity_clamp, options=options)\n', (1970, 2024), False, 'from pyswarms.discrete import BinaryPSO\n'), ((2270, 2282), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2278, 2282), True, 'import numpy as np\n'), ((2738, 2760), 'numpy.array', 'np.array', (['pso[history]'], {}), '(pso[history])\n', (2746, 2760), True, 'import numpy as np\n')]
|
"""pytest-faker plugin."""
import pytest
from faker import Factory
@pytest.fixture(scope='session')
def faker_locale():
"""Faker locale.
None by default which means faker's default locale.
"""
return None
@pytest.fixture(scope='session')
def faker(faker_locale):
"""Faker factory object."""
return Factory.create(faker_locale)
|
[
"pytest.fixture",
"faker.Factory.create"
] |
[((71, 102), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (85, 102), False, 'import pytest\n'), ((228, 259), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (242, 259), False, 'import pytest\n'), ((328, 356), 'faker.Factory.create', 'Factory.create', (['faker_locale'], {}), '(faker_locale)\n', (342, 356), False, 'from faker import Factory\n')]
|
# Copyright 2017 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Variant utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from deepvariant.core.genomics import struct_pb2
from deepvariant.core.genomics import variants_pb2
from deepvariant.core import ranges
# The alternate allele string for reference (no alt).
NO_ALT_ALLELE = '.'
# The alternate allele string for the gVCF "any" alternate allele.
GVCF_ALT_ALLELE = '<*>'
def set_variantcall_gq(variant_call, gq):
if 'GQ' in variant_call.info:
del variant_call.info['GQ']
variant_call.info['GQ'].values.extend([struct_pb2.Value(number_value=gq)])
def decode_variants(encoded_iter):
"""Yields a genomics.Variant from encoded_iter.
Args:
encoded_iter: An iterable that produces binary encoded
third_party.nucleus.protos.Variant strings.
Yields:
A parsed third_party.nucleus.protos.Variant for each
encoded element of encoded_iter
in order.
"""
for encoded in encoded_iter:
yield variants_pb2.Variant.FromString(encoded)
def variant_position(variant):
"""Returns a new Range at the start position of variant.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A new Range with the same reference_name as variant and start but an end
that is start + 1. This produces a range that is the single basepair of the
start of variant, hence the name position.
"""
return ranges.make_range(variant.reference_name, variant.start,
variant.start + 1)
def variant_range(variant):
"""Returns a new Range covering variant.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A new Range with the same reference_name, start, and end as variant.
"""
return ranges.make_range(variant.reference_name, variant.start, variant.end)
def variant_range_tuple(variant):
"""Returns a new tuple of (reference_name, start, end) for the variant.
A common use case for this function is to sort variants by chromosomal
location, with usage like `sorted(variants, key=variant_range_tuple)`.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A three-tuple with the same reference_name, start, and end as variant.
"""
return (variant.reference_name, variant.start, variant.end)
@enum.unique
class GenotypeType(enum.Enum):
"""An enumeration of the types of genotypes."""
hom_ref = ('homozygous reference', [0, 0], 0)
het = ('heterozygous', [0, 1], 1)
hom_var = ('homozygous non-reference', [1, 1], 2)
no_call = ('no call', [-1, -1], -1)
def __init__(self, full_name, example_gt, class_id):
self.full_name = full_name
self.example_gt = example_gt
self.class_id = class_id
@enum.unique
class VariantType(enum.Enum):
"""An enumeration of the types of variants."""
# a variant.proto where there is no alt allele
ref = 0
# a non-reference variant.proto where all ref and alt alleles
# are single basepairs
snp = 1
# a non-reference variant.proto where at least one of ref or alt alleles
# are longer than 1 bp
indel = 2
def format_filters(variant):
"""Gets a human-readable string showing the filters applied to variant.
Returns a string with the filter field values of variant separated by commas.
If the filter field isn't set, returns '.'.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A string.
"""
return ','.join(variant.filter) if variant.filter else '.'
def format_alleles(variant):
"""Gets a string representation of the variants alleles.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A string ref_bases/alt1,alt2 etc.
"""
return '{}/{}'.format(variant.reference_bases, ','.join(
variant.alternate_bases))
def format_position(variant):
"""Gets a string representation of the variants position.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A string chr:start + 1 (as start is zero-based).
"""
return '{}:{}'.format(variant.reference_name, variant.start + 1)
def is_snp(variant):
"""Is variant a SNP?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if all alleles of variant are 1 bp in length.
"""
return (not is_ref(variant) and len(variant.reference_bases) == 1 and
len(variant.alternate_bases) >= 1 and
all(len(x) == 1 for x in variant.alternate_bases))
def is_indel(variant):
"""Is variant an indel?
An indel event is simply one where the size of at least one of the alleles
is > 1.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if the alleles in variant indicate an insertion/deletion event
occurs at this site.
"""
# redacted
# redacted
return (not is_ref(variant) and
(len(variant.reference_bases) > 1 or
any(len(alt) > 1 for alt in variant.alternate_bases)))
def is_biallelic(variant):
"""Returns True if variant has exactly one alternate allele."""
return len(variant.alternate_bases) == 1
def is_multiallelic(variant):
"""Does variant have multiple alt alleles?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if variant has more than one alt allele.
"""
return len(variant.alternate_bases) > 1
def is_ref(variant):
"""Returns true if variant is a reference record.
Variant protos can encode sites that aren't actually mutations in the
sample. For example, the record ref='A', alt='.' indicates that there is
no mutation present (i.e., alt is the missing value).
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A boolean.
"""
alts = variant.alternate_bases
return not alts or (len(alts) == 1 and alts[0] == '.')
def variant_type(variant):
"""Gets the VariantType of variant.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
VariantType indicating the type of this variant.
"""
if is_ref(variant):
return VariantType.ref
elif is_snp(variant):
return VariantType.snp
else:
return VariantType.indel
def is_transition(allele1, allele2):
"""Is the pair of single bp alleles a transition?
Args:
allele1: A string of the first allele, must be 1 bp in length.
allele2: A string of the second allele, must be 1 bp in length.
Returns:
True if allele1/allele2 are a transition SNP.
Raises:
ValueError: if allele1 and allele2 are equal or aren't 1 bp in length.
"""
if allele1 == allele2:
raise ValueError('Alleles must be unique:', allele1, allele2)
if len(allele1) != 1:
raise ValueError('Alleles must be 1 bp in length.', allele1)
if len(allele2) != 1:
raise ValueError('Alleles must be 1 bp in length.', allele2)
alleles_set = {allele1, allele2}
return any(alleles_set == x for x in [{'A', 'G'}, {'C', 'T'}])
def is_insertion(ref, alt):
"""Is alt an insertion w.r.t. ref?
Args:
ref: A string of the reference allele.
alt: A string of the alternative allele.
Returns:
True if alt is an insertion w.r.t. ref.
"""
return len(ref) < len(alt)
def is_deletion(ref, alt):
"""Is alt a deletion w.r.t. ref?
Args:
ref: A string of the reference allele.
alt: A string of the alternative allele.
Returns:
True if alt is a deletion w.r.t. ref.
"""
return len(ref) > len(alt)
def has_insertion(variant):
"""Does variant have an insertion?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if the alleles in variant indicate an insertion event
occurs at this site.
"""
ref = variant.reference_bases
return (is_indel(variant) and
any(is_insertion(ref, alt) for alt in variant.alternate_bases))
def has_deletion(variant):
"""Does variant have a deletion?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if the alleles in variant indicate an deletion event
occurs at this site.
"""
ref = variant.reference_bases
return (is_indel(variant) and
any(is_deletion(ref, alt) for alt in variant.alternate_bases))
@enum.unique
class AlleleMismatchType(enum.Enum):
"""An enumeration of the types of allele mismatches we detect."""
# Duplicate alleles
duplicate_eval_alleles = 1
duplicate_true_alleles = 2
# Truth has an allele that doesn't match any allele in eval.
unmatched_true_alleles = 3
# Eval has an allele that doesn't match any allele in truth.
unmatched_eval_alleles = 4
def allele_mismatches(evalv, truev):
"""Determines the set of allele mismatch discordances between evalv and truev.
Compares the alleles present in evalv and truev to determine if there are any
disagreements between the set of called alleles in the two Variant protos. The
type of differences basically boil down to:
-- Are there duplicate alt alleles?
-- Can we find a matching allele in the truev for each allele in evalv, and
vice versa?
Two alleles A and B match when they would produce the same sequence of bases
in ref and alt haplotypes starting at the same position. So CA=>TA is the same
as C=>T (position is the same, replacing A by A is a noop) but AC=>AT isn't
the same as C=>T because the former event changes bases 1 bp further along in
the reference genome than the C=>T allele.
Args:
evalv: A third_party.nucleus.protos.Variant.
truev: A third_party.nucleus.protos.Variant.
Returns:
A set of AlleleMismatchType values.
"""
unmatched_eval_alleles = []
# Use set removes duplicate alleles in truth and eval variants.
allele_matches = {alt: [] for alt in set(truev.alternate_bases)}
for eval_alt in set(evalv.alternate_bases):
# Loop over each possible alt allele, adding eval_alt to each matching alt
# allele.
found_match = False
for true_alt in allele_matches:
if (simplify_alleles(evalv.reference_bases, eval_alt) == simplify_alleles(
truev.reference_bases, true_alt)):
# We are a match to true_alt, so record that fact in allele_matches
allele_matches[true_alt].append(eval_alt)
found_match = True
if not found_match:
# We never found a match for eval_alt.
unmatched_eval_alleles.append(eval_alt)
# At this point we've checked every alt against every eval allele, and are
# ready to summarize the differences using our AlleleMismatchType enum.
types = set()
if len(set(evalv.alternate_bases)) != len(evalv.alternate_bases):
types.add(AlleleMismatchType.duplicate_eval_alleles)
if len(set(truev.alternate_bases)) != len(truev.alternate_bases):
types.add(AlleleMismatchType.duplicate_true_alleles)
if unmatched_eval_alleles:
types.add(AlleleMismatchType.unmatched_eval_alleles)
if any(len(match) != 1 for match in allele_matches.itervalues()):
types.add(AlleleMismatchType.unmatched_true_alleles)
return types
def simplify_alleles(*alleles):
"""Simplifies alleles by stripping off common postfix bases.
For example, simplify("AC", "GC") would produce the tuple "A", "G" as the "C"
base is a common postfix of both alleles. But simplify("AC", "GT") would
produce "AC", "GT" as there is no common postfix.
Note this function will never simplify any allele down to the empty string. So
if alleles = ['CACA', 'CA'], the longest common postfix is 'CA' but we will
not produce ['CA', ''] as this is an invalid Variant allele encoding. Instead
we produce ['CAC', 'C'].
Args:
*alleles: A tuple of bases, each as a string, to simplify.
Returns:
A tuple, one for each allele in alleles in order, with any common postfix
bases stripped off.
"""
def all_the_same(items):
first = next(items)
return all(item == first for item in items)
# Loop over the alleles to determine the length of the shared postfix. Start
# at 1 so every allele, even after trimming the postfix, has at least len 1.
# For example, alleles = ['ATT', 'TT'] reduces to ['AT', 'T'] not ['A', ''].
shortest_allele_len = min(len(a) for a in alleles)
common_postfix_len = 0
for i in range(1, shortest_allele_len):
if not all_the_same(a[-i] for a in alleles):
break
common_postfix_len = i
if common_postfix_len:
return tuple(a[0:-common_postfix_len] for a in alleles)
else:
# Fast path for the case where there's no shared postfix.
return alleles
def is_filtered(variant):
"""Returns True if variant has a non-PASS filter field, or False otherwise."""
return bool(variant.filter) and any(
f not in {'PASS', '.'} for f in variant.filter)
def is_variant_call(variant,
require_non_ref_genotype=True,
no_calls_are_variant=False):
"""Is variant a non-reference call?
A Variant proto doesn't always imply that there's a variant present in the
genome. The call may not have alternate bases, may be filtered, may a have
hom-ref genotype, etc. This function looks for all of those configurations
and returns true iff the variant is asserting that a mutation is present
in the same.
Note that this code allows a variant without a calls field to be variant,
but one with a genotype call must have a non-reference genotype to be
considered variant (if require_non_ref_genotype is True, the default). If
False, a variant that passes all fo the site-level requirements for being
a variant_call will return a True value, regardless of the genotypes, which
means that we'll consider a site with a sample with a hom-ref or no-call site
a variant call.
Args:
variant: third_party.nucleus.protos.Variant.
require_non_ref_genotype: Should we require a site with a genotype call to
have a non-reference (het, hom-var) genotype for the site to be considered
a variant call?
no_calls_are_variant: If a site has genotypes, should we consider no_call
genotypes as being variant or not?
Returns:
True if variant is really a mutation call.
Raises:
ValueError: If variant has more than one call (i.e., is multi-sample).
"""
if not variant.alternate_bases:
return False
elif is_filtered(variant):
return False
elif not variant.calls or not require_non_ref_genotype:
return True
# All tests after this point should only look at genotype-based fields, as
# we may have aborted out in the prev. line due to require_non_ref_genotype.
elif len(variant.calls) > 1:
raise ValueError('Unsupported: multiple genotypes found at', variant)
elif any(g > 0 for g in variant.calls[0].genotype):
return True
elif no_calls_are_variant:
return all(g == -1 for g in variant.calls[0].genotype)
else:
return False
def has_genotypes(variant):
"""Does variant have genotype calls?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if variant has genotype calls.
"""
# I don't want to return the actual data structure so I'm doing the
# explicit True/False evaluation here.
# pylint: disable=g-explicit-length-test
return len(variant.calls) > 0
def genotype_type(variant):
"""Gets the GenotypeType for variant.
If variant doesn't have genotypes, returns no_call. Otherwise
returns one of no_call, hom_ref, het, or hom_var depending on the
status of the genotypes in the call field of variant.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A GenotypeType.
Raises:
ValueError: If variant has more than one call (i.e., is multi-sample).
"""
if not has_genotypes(variant):
return GenotypeType.no_call
elif len(variant.calls) > 1:
raise ValueError('Unsupported: multiple genotypes found at', variant)
else:
gt = set(variant.calls[0].genotype)
if gt == {-1}:
return GenotypeType.no_call
elif gt == {0}:
return GenotypeType.hom_ref
elif len(gt) > 1:
return GenotypeType.het
else:
return GenotypeType.hom_var
def genotype_as_alleles(variant):
"""Gets genotype of the sample in variant as a list of actual alleles.
Returns the alleles specified by the genotype indices of variant.calls[0].
For example, if variant.reference_bases = 'A' and variant.alternative_bases
= ['C'] and the genotypes are [0, 1], this function will return
['A', 'C'].
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
A list of allele (string) from variant, one for each genotype in
variant.calls[0], in order.
Raises:
ValueError: If variant doesn't have genotypes.
ValueError: If variant has more than one call (i.e., is multi-sample).
"""
if not has_genotypes(variant):
raise ValueError('Not genotypes present in', variant)
elif len(variant.calls) > 1:
raise ValueError('Unsupported: multiple genotypes found at', variant)
else:
# Genotypes are encoded as integers, where 0 is the reference allele,
# indices > 0 refer to alt alleles, and the no-call genotypes is encoded
# as -1 in the genotypes. This code relies on this encoding to quickly
# reference into the alleles by adding 1 to the genotype index.
alleles = ['.', variant.reference_bases] + list(variant.alternate_bases)
return [alleles[i + 1] for i in variant.calls[0].genotype]
def genotype_quality(variant, default=None):
"""Gets the genotype quality (GQ) value the genotype call in variant.
If variant doesn't have genotypes, returns default, otherwise tries
to retrieve the GQ field of the call field, returning that value if
present otherwise returning default if its absent.
Args:
variant: third_party.nucleus.protos.Variant.
default: The value for GQ to return if variant has no genotypes or
if GQ is present in the genotype call record.
Returns:
The GQ value (may be a string or whatever value default is).
"""
if not has_genotypes(variant):
return default
call = variant.calls[0]
if 'GQ' in call.info:
return call.info['GQ'].values[0].number_value
else:
return default
def is_gvcf(variant):
"""Returns true if variant encodes a standard gVCF reference block.
This means in practice that variant has a single alternate allele that is the
canonical gVCF allele GVCF_ALT_ALLELE constant exported here.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
Boolean. True if variant is a gVCF record, False otherwise.
"""
return variant.alternate_bases == [GVCF_ALT_ALLELE]
def _genotype_order_in_likelihoods(num_alts, ploidy=2):
"""Yields tuples of `ploidy` ints for the given number of alt alleles.
https://samtools.github.io/hts-specs/VCFv4.1.pdf
"If A is the allele in REF and B,C,... are the alleles as ordered in ALT,
the ordering of genotypes for the likelihoods is given by:
F(j/k) = (k*(k+1)/2)+j. In other words, for biallelic sites the ordering is:
AA,AB,BB; for triallelic sites the ordering is: AA,AB,BB,AC,BC,CC, etc."
The biallelic sites in our case are 0/0, 0/1, 1/1.
The triallelic sites are 0/0, 0/1, 1/1, 0/2, 1/2, 2/2.
This wiki page has more information that generalizes to different ploidy.
http://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
Args:
num_alts: int. The number of alternate alleles at the site.
ploidy: int. The ploidy for which to return genotypes.
Yields:
Tuples of `ploidy` ints representing allele indices in the order they appear
in the corresponding genotype likelihood array.
"""
if ploidy == 1:
for i in range(num_alts + 1):
yield (i,)
elif ploidy == 2:
for j in range(num_alts + 1):
for i in range(j + 1):
yield (i, j)
else:
raise NotImplementedError('Only haploid and diploid supported.')
def genotype_ordering_in_likelihoods(variant):
"""Yields (i, j, allele_i, allele_j) for the genotypes ordering in GLs.
https://samtools.github.io/hts-specs/VCFv4.1.pdf
"If A is the allele in REF and B,C,... are the alleles as ordered in ALT,
the ordering of genotypes for the likelihoods is given by:
F(j/k) = (k*(k+1)/2)+j. In other words, for biallelic sites the ordering is:
AA,AB,BB; for triallelic sites the ordering is: AA,AB,BB,AC,BC,CC, etc."
The biallelic sites in our case are 0/0, 0/1, 1/1.
The triallelic sites are 0/0, 0/1, 1/1, 0/2, 1/2, 2/2.
This wiki page has more information that generalizes ot different ploidy.
http://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
Currently this function only implements for diploid cases.
Args:
variant: third_party.nucleus.protos.Variant.
Yields:
allele indices and strings (i, j, allele_i, allele_j) in the correct order.
"""
alleles = [variant.reference_bases] + list(variant.alternate_bases)
for i, j in _genotype_order_in_likelihoods(
len(variant.alternate_bases), ploidy=2):
yield i, j, alleles[i], alleles[j]
def genotype_likelihood_index(allele_indices):
"""Returns the genotype likelihood index for the given allele indices.
Args:
allele_indices: list(int). The list of allele indices for a given genotype.
E.g. diploid homozygous reference is represented as [0, 0].
Returns:
The index into the associated genotype likelihood array corresponding to
the likelihood of this list of alleles.
Raises:
NotImplementedError: The allele_indices are more than diploid.
"""
if len(allele_indices) == 1:
# Haploid case.
return allele_indices[0]
elif len(allele_indices) == 2:
# Diploid case.
g1, g2 = sorted(allele_indices)
return g1 + (g2 * (g2 + 1) // 2)
else:
raise NotImplementedError(
'Genotype likelihood index only supports haploid and diploid: {}'.
format(allele_indices))
def allele_indices_for_genotype_likelihood_index(gl_index, ploidy=2):
"""Returns a tuple of allele_indices corresponding to the given GL index.
This is the inverse function to `genotype_likelihood_index`.
Args:
gl_index: int. The index within a genotype likelihood array for which to
determine the associated alleles.
ploidy: int. The ploidy of the result.
Returns:
A tuple of `ploidy` ints representing the allele indices at this GL index.
Raises:
NotImplementedError: The requested allele indices are more than diploid.
"""
if ploidy == 1:
return gl_index
elif ploidy == 2:
# redacted
# https://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
# rather than creating all genotypes explicitly.
num_alts = 1
while genotype_likelihood_index([num_alts, num_alts]) < gl_index:
num_alts += 1
genotypes = list(_genotype_order_in_likelihoods(num_alts, ploidy=ploidy))
return genotypes[gl_index]
else:
raise NotImplementedError(
'Allele calculations only supported for haploid and diploid.')
def genotype_likelihood(variantcall, allele_indices):
"""Returns the genotype likelihood for the given allele indices.
Args:
variantcall: third_party.nucleus.protos.VariantCall. The VariantCall from
which to extract the genotype likelihood of the allele indices.
allele_indices: list(int). The list of allele indices for a given genotype.
E.g. diploid heterozygous alternate can be represented as [0, 1].
Returns:
The float value of the genotype likelihood of this set of alleles.
"""
return variantcall.genotype_likelihood[genotype_likelihood_index(
allele_indices)]
def allele_indices_with_num_alts(variant, num_alts, ploidy=2):
"""Returns a list of allele indices configurations with `num_alts` alternates.
Args:
variant: third_party.nucleus.protos.Variant. The variant of interest, which
defines the candidate alternate alleles that can be used to generate
allele indices configurations.
num_alts: int in [0, `ploidy`]. The number of non-reference alleles for
which to create the allele indices configurations.
ploidy: int. The ploidy for which to return allele indices configurations.
Returns: A list of tuples. Each tuple is of length `ploidy` and represents the
allele indices of all `ploidy` genotypes that contain `num_alts`
non-reference alleles.
Raises:
ValueError: The domain of `num_alts` is invalid.
NotImplementedError: `ploidy` is not diploid.
"""
if ploidy != 2:
raise NotImplementedError(
'allele_indices_with_num_alts only supports diploid.')
if not 0 <= num_alts <= ploidy:
raise ValueError(
'Invalid number of alternate alleles requested: {} for ploidy {}'.
format(num_alts, ploidy))
max_candidate_alt_ix = len(variant.alternate_bases)
if num_alts == 0:
return [(0, 0)]
elif num_alts == 1:
return [(0, i) for i in range(1, max_candidate_alt_ix + 1)]
else:
return [(i, j)
for i in range(1, max_candidate_alt_ix + 1)
for j in range(i, max_candidate_alt_ix + 1)]
|
[
"deepvariant.core.genomics.struct_pb2.Value",
"deepvariant.core.genomics.variants_pb2.Variant.FromString",
"deepvariant.core.ranges.make_range"
] |
[((2951, 3026), 'deepvariant.core.ranges.make_range', 'ranges.make_range', (['variant.reference_name', 'variant.start', '(variant.start + 1)'], {}), '(variant.reference_name, variant.start, variant.start + 1)\n', (2968, 3026), False, 'from deepvariant.core import ranges\n'), ((3285, 3354), 'deepvariant.core.ranges.make_range', 'ranges.make_range', (['variant.reference_name', 'variant.start', 'variant.end'], {}), '(variant.reference_name, variant.start, variant.end)\n', (3302, 3354), False, 'from deepvariant.core import ranges\n'), ((2123, 2156), 'deepvariant.core.genomics.struct_pb2.Value', 'struct_pb2.Value', ([], {'number_value': 'gq'}), '(number_value=gq)\n', (2139, 2156), False, 'from deepvariant.core.genomics import struct_pb2\n'), ((2529, 2569), 'deepvariant.core.genomics.variants_pb2.Variant.FromString', 'variants_pb2.Variant.FromString', (['encoded'], {}), '(encoded)\n', (2560, 2569), False, 'from deepvariant.core.genomics import variants_pb2\n')]
|
import MulensModel as mm
import Functions as mc
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib
from scipy.stats import truncnorm, loguniform, uniform
#plt.style.use('ggplot')
print(plt.style.available)
#print(plt.rcParams["font.family"].available)
#print(matplotlib.get_cachedir())
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
#rc('font',**{'family':'serif','serif':['Times New Roman']})
#rc('text', usetex=True)
#plt.rcParams["font.family"] = "serif"
#print(plt.rcParams.keys())
#plt.rcParams['font.size'] = 12
s_pi = mc.logUniDist(0.2, 5)
q_pi = mc.logUniDist(10e-6, 1)
alpha_pi = mc.uniDist(0, 360)
u0_pi = mc.uniDist(0, 2)
t0_pi = mc.uniDist(0, 72)
tE_pi = mc.truncatedLogNormDist(1, 100, 10**1.15, 10**0.45)
rho_pi = mc.logUniDist(10**-4, 10**-2)
distr = tE_pi
y=[]
x=np.linspace(1, 100, 1000)
mu=0
for i in x:
mu+=np.exp(distr.log_PDF(i))*i
y.append(np.exp(distr.log_PDF(i)))
print(mu/len(x))
#print(y)
plt.rcParams["font.family"] = "serif"
plt.rcParams['font.size'] = 12
plt.style.use('seaborn-bright')
plt.rcParams["legend.edgecolor"] = '0'
plt.rcParams["legend.framealpha"] = 1
plt.rcParams["legend.title_fontsize"] = 10
plt.rcParams["legend.fontsize"] = 9
plt.rcParams["grid.linestyle"] = 'dashed'
plt.rcParams["grid.alpha"] = 0.25
plt.plot(x, y, label='Probability\nDensity')
plt.xlabel(r'Parameter [$\chi$]')
plt.ylabel(r'Probability Density [$\rho$]')
plt.title('Probability Density Function')
plt.legend(title='Entries')#, framealpha=1.0, edgecolor='0.0') #
#plt.axis('scaled')
plt.tight_layout()
plt.grid()
plt.savefig('Plots/pdf-test.png')
def centre_offsets_pointilism(supset_model, subset_model, symbols, name = '', dpi = 100):
supset_offsets = (supset_model.sampled.states_array(scaled = True) - supset_model.centre.scaled[:, np.newaxis])
subset_offsets = (subset_model.sampled.states_array(scaled = True) - subset_model.centre.scaled[:, np.newaxis])
n_dim = subset_model.D
style()
# construct shape with corner
figure = corner.corner(subset_offsets.T)
# font/visibility
plt.rcParams['font.size'] = 8
plt.rcParams['axes.titlesize'] = 14
plt.rcParams['axes.labelsize'] = 14
# extract the axes
axes = np.array(figure.axes).reshape((n_dim, n_dim))
# Loop over the diagonal to remove from plot
for i in range(n_dim):
ax = axes[i, i]
ax.cla()
ax.patch.set_alpha(0.0)
ax.axis('off')
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
# loop over lower triangle
for yi in range(n_dim):
for xi in range(yi):
ax = axes[yi, xi]
ax.cla()
# overlay points
ax.scatter(subset_offsets[xi, :], subset_offsets[yi, :], c = np.linspace(0.0, 1.0, subset_model.sampled.n), cmap = 'winter', alpha = 0.15, marker = ".", s = 20, linewidth = 0.0)
ax.scatter(supset_offsets[xi, :], supset_offsets[yi, :], c = np.linspace(0.0, 1.0, supset_model.sampled.n), cmap = 'spring', alpha = 0.15, marker = ".", s = 20, linewidth = 0.0)
if yi == n_dim - 1: # last row
ax.set_xlabel(symbols[xi])
ax.tick_params(axis = 'x', labelrotation = 45)
else:
ax.axes.get_xaxis().set_ticklabels([])
if xi == 0: # first column
ax.set_ylabel(symbols[yi])
ax.tick_params(axis = 'y', labelrotation = 45)
else:
ax.axes.get_yaxis().set_ticklabels([])
figure.savefig('results/' + name + '-centreed-pointilism.png', bbox_inches = "tight", dpi = dpi, transparent=True)
figure.clf()
return
|
[
"matplotlib.pyplot.title",
"Functions.truncatedLogNormDist",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"Functions.uniDist",
"matplotlib.pyplot.style.use",
"numpy.array",
"numpy.linspace",
"Functions.logUniDist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid"
] |
[((613, 634), 'Functions.logUniDist', 'mc.logUniDist', (['(0.2)', '(5)'], {}), '(0.2, 5)\n', (626, 634), True, 'import Functions as mc\n'), ((642, 665), 'Functions.logUniDist', 'mc.logUniDist', (['(1e-05)', '(1)'], {}), '(1e-05, 1)\n', (655, 665), True, 'import Functions as mc\n'), ((677, 695), 'Functions.uniDist', 'mc.uniDist', (['(0)', '(360)'], {}), '(0, 360)\n', (687, 695), True, 'import Functions as mc\n'), ((704, 720), 'Functions.uniDist', 'mc.uniDist', (['(0)', '(2)'], {}), '(0, 2)\n', (714, 720), True, 'import Functions as mc\n'), ((729, 746), 'Functions.uniDist', 'mc.uniDist', (['(0)', '(72)'], {}), '(0, 72)\n', (739, 746), True, 'import Functions as mc\n'), ((755, 810), 'Functions.truncatedLogNormDist', 'mc.truncatedLogNormDist', (['(1)', '(100)', '(10 ** 1.15)', '(10 ** 0.45)'], {}), '(1, 100, 10 ** 1.15, 10 ** 0.45)\n', (778, 810), True, 'import Functions as mc\n'), ((817, 850), 'Functions.logUniDist', 'mc.logUniDist', (['(10 ** -4)', '(10 ** -2)'], {}), '(10 ** -4, 10 ** -2)\n', (830, 850), True, 'import Functions as mc\n'), ((870, 895), 'numpy.linspace', 'np.linspace', (['(1)', '(100)', '(1000)'], {}), '(1, 100, 1000)\n', (881, 895), True, 'import numpy as np\n'), ((1085, 1116), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-bright"""'], {}), "('seaborn-bright')\n", (1098, 1116), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1403), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""Probability\nDensity"""'}), '(x, y, label="""Probability\nDensity""")\n', (1364, 1403), True, 'import matplotlib.pyplot as plt\n'), ((1401, 1434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Parameter [$\\\\chi$]"""'], {}), "('Parameter [$\\\\chi$]')\n", (1411, 1434), True, 'import matplotlib.pyplot as plt\n'), ((1435, 1478), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability Density [$\\\\rho$]"""'], {}), "('Probability Density [$\\\\rho$]')\n", (1445, 1478), True, 'import matplotlib.pyplot as plt\n'), ((1479, 1520), 'matplotlib.pyplot.title', 'plt.title', (['"""Probability Density Function"""'], {}), "('Probability Density Function')\n", (1488, 1520), True, 'import matplotlib.pyplot as plt\n'), ((1521, 1548), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': '"""Entries"""'}), "(title='Entries')\n", (1531, 1548), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1626), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1624, 1626), True, 'import matplotlib.pyplot as plt\n'), ((1627, 1637), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1635, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1671), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Plots/pdf-test.png"""'], {}), "('Plots/pdf-test.png')\n", (1649, 1671), True, 'import matplotlib.pyplot as plt\n'), ((2291, 2312), 'numpy.array', 'np.array', (['figure.axes'], {}), '(figure.axes)\n', (2299, 2312), True, 'import numpy as np\n'), ((2869, 2914), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'subset_model.sampled.n'], {}), '(0.0, 1.0, subset_model.sampled.n)\n', (2880, 2914), True, 'import numpy as np\n'), ((3059, 3104), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'supset_model.sampled.n'], {}), '(0.0, 1.0, supset_model.sampled.n)\n', (3070, 3104), True, 'import numpy as np\n')]
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from ase import Atoms
from ase.constraints import dict2constraint
import copy
import importlib
import numpy as np
from pyiron_atomistics.atomistics.job.interactive import GenericInteractive
from pyiron_atomistics.atomistics.structure.atoms import pyiron_to_ase, Atoms as PAtoms
try:
from ase.cell import Cell
except ImportError:
Cell = None
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = "Sep 1, 2018"
def ase_structure_todict(structure):
atoms_dict = {
"symbols": structure.get_chemical_symbols(),
"positions": structure.get_positions(),
"pbc": structure.get_pbc(),
"celldisp": structure.get_celldisp(),
"constraint": [c.todict() for c in structure.constraints],
"info": copy.deepcopy(structure.info),
}
if Cell is not None:
atoms_dict["cell"] = structure.get_cell().todict()
else:
atoms_dict["cell"] = structure.get_cell()
if structure.has("tags"):
atoms_dict["tags"] = structure.get_tags()
if structure.has("masses"):
atoms_dict["masses"] = structure.get_masses()
if structure.has("momenta"):
atoms_dict["momenta"] = structure.get_momenta()
if structure.has("initial_magmoms"):
atoms_dict["magmoms"] = structure.get_initial_magnetic_moments()
if structure.has("initial_charges"):
atoms_dict["charges"] = structure.get_initial_charges()
if structure.calc is not None:
calculator_dict = structure.calc.todict()
calculator_dict["calculator_class"] = (
str(structure.calc.__class__).replace("'", " ").split()[1]
)
calculator_dict["label"] = structure.calc.label
atoms_dict["calculator"] = calculator_dict
return atoms_dict
def ase_calculator_fromdict(class_path, class_dict):
module_loaded = importlib.import_module(".".join(class_path.split(".")[:-1]))
module_class = getattr(module_loaded, class_path.split(".")[-1])
return module_class(**class_dict)
def ase_structure_fromdict(atoms_dict):
def cell_fromdict(celldict):
celldict.pop("pbc", None)
if Cell is not None:
return Cell(**celldict)
else:
return celldict
atoms_dict_copy = copy.deepcopy(atoms_dict)
if "calculator" in atoms_dict_copy.keys():
calculator_dict = atoms_dict_copy["calculator"]
calculator_class = calculator_dict["calculator_class"]
del calculator_dict["calculator_class"]
atoms_dict_copy["calculator"] = ase_calculator_fromdict(
calculator_class, calculator_dict
)
if "constraint" in atoms_dict_copy.keys():
atoms_dict_copy["constraint"] = [
dict2constraint(const_dict) for const_dict in atoms_dict_copy["constraint"]
]
atoms_dict_copy["cell"] = cell_fromdict(celldict=atoms_dict_copy["cell"])
atoms = Atoms(**atoms_dict_copy)
if atoms.calc is not None:
atoms.calc.read(atoms.calc.label)
return atoms
class AseJob(GenericInteractive):
def __init__(self, project, job_name):
super(AseJob, self).__init__(project, job_name)
self.__name__ = "AseJob"
self.__version__ = (
None # Reset the version number to the executable is set automatically
)
@property
def structure(self):
return GenericInteractive.structure.fget(self)
@structure.setter
def structure(self, structure):
if isinstance(structure, PAtoms):
structure = pyiron_to_ase(structure)
GenericInteractive.structure.fset(self, structure)
def to_hdf(self, hdf=None, group_name=None):
super(AseJob, self).to_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf_input:
hdf_input["structure"] = ase_structure_todict(self._structure)
def from_hdf(self, hdf=None, group_name=None):
super(AseJob, self).from_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf_input:
self.structure = ase_structure_fromdict(hdf_input["structure"])
def run_static(self):
pre_run_mode = self.server.run_mode
self.server.run_mode.interactive = True
self.run_if_interactive()
self.interactive_close()
self.server.run_mode = pre_run_mode
def run_if_interactive(self):
if self.structure.calc is None:
self.set_calculator()
super(AseJob, self).run_if_interactive()
self.interactive_collect()
def set_calculator(self):
raise NotImplementedError(
"The _set_calculator function is not implemented for this code."
)
def interactive_structure_setter(self, structure):
self.structure.calc.calculate(structure)
def interactive_positions_setter(self, positions):
self.structure.positions = positions
def interactive_initialize_interface(self):
self.status.running = True
self._structure.calc.set_label(self.working_directory + "/")
self._interactive_library = True
def interactive_close(self):
if self.interactive_is_activated():
super(AseJob, self).interactive_close()
with self.project_hdf5.open("output") as h5:
if "interactive" in h5.list_groups():
for key in h5["interactive"].list_nodes():
h5["generic/" + key] = h5["interactive/" + key]
def interactive_forces_getter(self):
return self.structure.get_forces()
def interactive_pressures_getter(self):
return -self.structure.get_stress(voigt=False)
def interactive_energy_pot_getter(self):
return self.structure.get_potential_energy()
def interactive_energy_tot_getter(self):
return self.structure.get_potential_energy()
def interactive_indices_getter(self):
element_lst = sorted(list(set(self.structure.get_chemical_symbols())))
return np.array(
[element_lst.index(el) for el in self.structure.get_chemical_symbols()]
)
def interactive_positions_getter(self):
return self.structure.positions.copy()
def interactive_steps_getter(self):
return len(self.interactive_cache[list(self.interactive_cache.keys())[0]])
def interactive_time_getter(self):
return self.interactive_steps_getter()
def interactive_volume_getter(self):
return self.structure.get_volume()
def interactive_cells_getter(self):
return self.structure.cell.copy()
def write_input(self):
pass
def collect_output(self):
pass
def run_if_scheduler(self):
self._create_working_directory()
super(AseJob, self).run_if_scheduler()
def interactive_index_organizer(self):
index_merge_lst = self._interactive_species_lst.tolist() + list(
np.unique(self._structure_current.get_chemical_symbols())
)
el_lst = sorted(set(index_merge_lst), key=index_merge_lst.index)
current_structure_index = [
el_lst.index(el) for el in self._structure_current.get_chemical_symbols()
]
previous_structure_index = [
el_lst.index(el) for el in self._structure_previous.get_chemical_symbols()
]
if not np.array_equal(
np.array(current_structure_index),
np.array(previous_structure_index),
):
self._logger.debug("Generic library: indices changed!")
self.interactive_indices_setter(self._structure_current.indices)
def _get_structure(self, frame=-1, wrap_atoms=True):
if (
self.server.run_mode.interactive
or self.server.run_mode.interactive_non_modal
):
# Warning: We only copy symbols, positions and cell information - no tags.
if self.output.indices is not None and len(self.output.indices) != 0:
indices = self.output.indices[frame]
else:
return None
if len(self._interactive_species_lst) == 0:
el_lst = list(np.unique(self._structure_current.get_chemical_symbols()))
else:
el_lst = self._interactive_species_lst.tolist()
if indices is not None:
if wrap_atoms:
positions = self.output.positions[frame]
else:
if len(self.output.unwrapped_positions) > max([frame, 0]):
positions = self.output.unwrapped_positions[frame]
else:
positions = (
self.output.positions[frame]
+ self.output.total_displacements[frame]
)
atoms = Atoms(
symbols=np.array([el_lst[el] for el in indices]),
positions=positions,
cell=self.output.cells[frame],
pbc=self.structure.pbc,
)
# Update indicies to match the indicies in the cache.
atoms.indices = indices
return atoms
else:
return None
else:
if (
self.get("output/generic/cells") is not None
and len(self.get("output/generic/cells")) != 0
):
return super()._get_structure(frame=frame, wrap_atoms=wrap_atoms)
else:
return None
class AseAdapter(object):
def __init__(self, ham, fast_mode=False):
self._ham = ham
self._fast_mode = fast_mode
if self._ham.server.run_mode.interactive and fast_mode:
self.interactive_cache = {
"velocities": [],
"energy_kin": [],
"momenta": [],
"positions": [],
"energy_tot": [],
"energy_pot": [],
}
self._ham.run()
self._ham.interactive_cache = {}
elif self._ham.server.run_mode.interactive:
self.interactive_cache = {"velocities": [], "energy_kin": [], "momenta": []}
self.constraints = []
try:
self.arrays = {
"positions": self._ham.structure.positions.copy(),
"numbers": self._ham.structure.numbers,
}
except AttributeError:
self.arrays = {
"positions": self._ham.structure.positions.copy(),
"numbers": self._ham.structure.get_atomic_numbers(),
}
@property
def communicator(self):
return None
def get_masses(self):
return np.array(self._ham.structure.get_masses())
def get_positions(self):
return self.arrays["positions"]
def set_positions(self, positions):
self.arrays["positions"] = positions
def get_forces(self, md=True):
if self._fast_mode:
self._ham.interactive_positions_setter(self.arrays["positions"])
self.interactive_cache["positions"].append(self.arrays["positions"])
self._ham.interactive_execute()
self.interactive_cache["energy_pot"].append(
self._ham.interactive_energy_pot_getter()
)
return np.array(self._ham.interactive_forces_getter())
else:
self._ham.structure.positions = self.arrays["positions"]
if self._ham.server.run_mode.interactive:
self._ham.run()
else:
self._ham.run(delete_existing_job=True)
return self._ham.output.forces[-1]
def interactive_close(self):
self._ham.interactive_store_in_cache(
"velocities", self.interactive_cache["velocities"]
)
self._ham.interactive_store_in_cache(
"energy_kin", self.interactive_cache["energy_kin"]
)
if self._fast_mode:
self._ham.interactive_store_in_cache(
"positions", self.interactive_cache["positions"]
)
self._ham.interactive_store_in_cache(
"energy_pot", self.interactive_cache["energy_pot"][::2]
)
self._ham.interactive_store_in_cache(
"energy_tot",
(
np.array(self.interactive_cache["energy_pot"][::2])
+ np.array(self.interactive_cache["energy_kin"])
).tolist(),
)
else:
self._ham.interactive_store_in_cache(
"energy_tot",
(
np.array(self._ham.output.energy_pot)[::2]
+ np.array(self.interactive_cache["energy_kin"])
).tolist(),
)
self._ham.interactive_close()
def get_number_of_atoms(self):
return self._ham.structure.get_number_of_atoms()
# ASE functions
def get_kinetic_energy(self):
"""Get the kinetic energy."""
momenta = self.arrays.get("momenta")
if momenta is None:
return 0.0
return 0.5 * np.vdot(momenta, self.get_velocities())
def set_momenta(self, momenta, apply_constraint=True):
"""Set momenta."""
if apply_constraint and len(self.constraints) > 0 and momenta is not None:
momenta = np.array(momenta) # modify a copy
for constraint in self.constraints:
if hasattr(constraint, "adjust_momenta"):
constraint.adjust_momenta(self, momenta)
self.set_array("momenta", momenta, float, (3,))
self.interactive_cache["velocities"].append(self.get_velocities())
self.interactive_cache["energy_kin"].append(self.get_kinetic_energy())
def set_velocities(self, velocities):
"""Set the momenta by specifying the velocities."""
self.set_momenta(self.get_masses()[:, np.newaxis] * velocities)
def get_momenta(self):
"""Get array of momenta."""
if "momenta" in self.arrays:
return self.arrays["momenta"].copy()
else:
return np.zeros((len(self), 3))
def set_array(self, name, a, dtype=None, shape=None):
"""Update array.
If *shape* is not *None*, the shape of *a* will be checked.
If *a* is *None*, then the array is deleted."""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError(
"Array has wrong shape %s != %s." % (a.shape, b.shape)
)
b[:] = a
def get_angular_momentum(self):
"""Get total angular momentum with respect to the center of mass."""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
return np.cross(positions, self.get_momenta()).sum(0)
def new_array(self, name, a, dtype=None, shape=None):
"""Add new array.
If *shape* is not *None*, the shape of *a* will be checked."""
if dtype is not None:
a = np.array(a, dtype, order="C")
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
if not a.flags["C_CONTIGUOUS"]:
a = np.ascontiguousarray(a)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError("Array has wrong length: %d != %d." % (len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError(
"Array has wrong shape %s != %s." % (a.shape, (a.shape[0:1] + shape))
)
self.arrays[name] = a
def has(self, name):
"""Check for existence of array.
name must be one of: 'tags', 'momenta', 'masses', 'initial_magmoms',
'initial_charges'."""
# XXX extend has to calculator properties
return name in self.arrays
def get_center_of_mass(self, scaled=False):
"""Get the center of mass.
If scaled=True the center of mass in scaled coordinates
is returned."""
m = self.get_masses()
com = np.dot(m, self.arrays["positions"]) / m.sum()
if scaled:
if self._fast_mode:
return np.linalg.solve(self._ham.structure.cells[-1].T, com)
else:
return np.linalg.solve(self._ham.output.cells[-1].T, com)
else:
return com
def get_velocities(self):
"""Get array of velocities."""
momenta = self.arrays.get("momenta")
if momenta is None:
return None
m = self.get_masses()
# m = self.arrays.get('masses')
# if m is None:
# m = atomic_masses[self.arrays['numbers']]
return momenta / m.reshape(-1, 1)
def __len__(self):
return len(self._ham.structure)
|
[
"ase.constraints.dict2constraint",
"copy.deepcopy",
"ase.cell.Cell",
"numpy.asarray",
"pyiron_atomistics.atomistics.job.interactive.GenericInteractive.structure.fset",
"numpy.array",
"pyiron_atomistics.atomistics.structure.atoms.pyiron_to_ase",
"pyiron_atomistics.atomistics.job.interactive.GenericInteractive.structure.fget",
"numpy.linalg.solve",
"numpy.dot",
"numpy.ascontiguousarray",
"ase.Atoms"
] |
[((2640, 2665), 'copy.deepcopy', 'copy.deepcopy', (['atoms_dict'], {}), '(atoms_dict)\n', (2653, 2665), False, 'import copy\n'), ((3278, 3302), 'ase.Atoms', 'Atoms', ([], {}), '(**atoms_dict_copy)\n', (3283, 3302), False, 'from ase import Atoms\n'), ((1159, 1188), 'copy.deepcopy', 'copy.deepcopy', (['structure.info'], {}), '(structure.info)\n', (1172, 1188), False, 'import copy\n'), ((3739, 3778), 'pyiron_atomistics.atomistics.job.interactive.GenericInteractive.structure.fget', 'GenericInteractive.structure.fget', (['self'], {}), '(self)\n', (3772, 3778), False, 'from pyiron_atomistics.atomistics.job.interactive import GenericInteractive\n'), ((3937, 3987), 'pyiron_atomistics.atomistics.job.interactive.GenericInteractive.structure.fset', 'GenericInteractive.structure.fset', (['self', 'structure'], {}), '(self, structure)\n', (3970, 3987), False, 'from pyiron_atomistics.atomistics.job.interactive import GenericInteractive\n'), ((2558, 2574), 'ase.cell.Cell', 'Cell', ([], {}), '(**celldict)\n', (2562, 2574), False, 'from ase.cell import Cell\n'), ((3102, 3129), 'ase.constraints.dict2constraint', 'dict2constraint', (['const_dict'], {}), '(const_dict)\n', (3117, 3129), False, 'from ase.constraints import dict2constraint\n'), ((3904, 3928), 'pyiron_atomistics.atomistics.structure.atoms.pyiron_to_ase', 'pyiron_to_ase', (['structure'], {}), '(structure)\n', (3917, 3928), False, 'from pyiron_atomistics.atomistics.structure.atoms import pyiron_to_ase, Atoms as PAtoms\n'), ((13747, 13764), 'numpy.array', 'np.array', (['momenta'], {}), '(momenta)\n', (13755, 13764), True, 'import numpy as np\n'), ((15749, 15778), 'numpy.array', 'np.array', (['a', 'dtype'], {'order': '"""C"""'}), "(a, dtype, order='C')\n", (15757, 15778), True, 'import numpy as np\n'), ((16956, 16991), 'numpy.dot', 'np.dot', (['m', "self.arrays['positions']"], {}), "(m, self.arrays['positions'])\n", (16962, 16991), True, 'import numpy as np\n'), ((7727, 7760), 'numpy.array', 'np.array', (['current_structure_index'], {}), '(current_structure_index)\n', (7735, 7760), True, 'import numpy as np\n'), ((7774, 7808), 'numpy.array', 'np.array', (['previous_structure_index'], {}), '(previous_structure_index)\n', (7782, 7808), True, 'import numpy as np\n'), ((15008, 15021), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (15018, 15021), True, 'import numpy as np\n'), ((15947, 15970), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['a'], {}), '(a)\n', (15967, 15970), True, 'import numpy as np\n'), ((17076, 17129), 'numpy.linalg.solve', 'np.linalg.solve', (['self._ham.structure.cells[-1].T', 'com'], {}), '(self._ham.structure.cells[-1].T, com)\n', (17091, 17129), True, 'import numpy as np\n'), ((17171, 17221), 'numpy.linalg.solve', 'np.linalg.solve', (['self._ham.output.cells[-1].T', 'com'], {}), '(self._ham.output.cells[-1].T, com)\n', (17186, 17221), True, 'import numpy as np\n'), ((9225, 9265), 'numpy.array', 'np.array', (['[el_lst[el] for el in indices]'], {}), '([el_lst[el] for el in indices])\n', (9233, 9265), True, 'import numpy as np\n'), ((12725, 12776), 'numpy.array', 'np.array', (["self.interactive_cache['energy_pot'][::2]"], {}), "(self.interactive_cache['energy_pot'][::2])\n", (12733, 12776), True, 'import numpy as np\n'), ((12799, 12845), 'numpy.array', 'np.array', (["self.interactive_cache['energy_kin']"], {}), "(self.interactive_cache['energy_kin'])\n", (12807, 12845), True, 'import numpy as np\n'), ((13085, 13131), 'numpy.array', 'np.array', (["self.interactive_cache['energy_kin']"], {}), "(self.interactive_cache['energy_kin'])\n", (13093, 13131), True, 'import numpy as np\n'), ((13020, 13057), 'numpy.array', 'np.array', (['self._ham.output.energy_pot'], {}), '(self._ham.output.energy_pot)\n', (13028, 13057), True, 'import numpy as np\n')]
|
from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship, backref
from mps_database.models import Base
class ThresholdFault(Base):
"""
ThresholdFault class (threshold_faults table)
Describe an analog fault, which is generated by an AnalogDevice.
The AnalogDevice provides a compressed analog value from the device,
the compressed value is expressed a reduced number of bits (e.g. 12).
The value read from the device is compared to the threshold stored
here. The conversion from the threshold to analog value is done
via the threshold_values_map and threshold_values tables.
Properties:
name: short fault description
greater_than: if true, if the AnalogDevice value is larger than the
compressed_threshold then a ThresholdFault is generated
if false, if the AnalogDevice value is smaller than the
compressed threshold then a ThresholdFault is generated
References:
analog_device_id: defines the type of analog device related to this
fault
threshold_value_id: defines which threshold value is used when calculating
if a fault happened
Relationships:
threshold_fault_state: through the ThresholdFaultStates this
ThresholdFault is linked to an AllowedClass (allowed beam class)
"""
__tablename__ = 'threshold_faults'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
analog_device_id = Column(Integer, ForeignKey('analog_devices.id'), nullable=False)
#If greater_than is true, a value larger than the threshold will generate a fault.
#If greater_than is false, a value smaller than the threshold will generate a fault.
greater_than = Column(Boolean, nullable=False)
threshold_fault_state = relationship("ThresholdFaultState", uselist=False, backref="threshold_fault")
threshold_value_id = Column(Integer, ForeignKey('threshold_values.id'), nullable=False)
@property
def less_than(self):
return not self.greater_than
|
[
"sqlalchemy.orm.relationship",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] |
[((1436, 1469), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1442, 1469), False, 'from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey\n'), ((1479, 1509), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (1485, 1509), False, 'from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey\n'), ((1785, 1816), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)'}), '(Boolean, nullable=False)\n', (1791, 1816), False, 'from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey\n'), ((1843, 1920), 'sqlalchemy.orm.relationship', 'relationship', (['"""ThresholdFaultState"""'], {'uselist': '(False)', 'backref': '"""threshold_fault"""'}), "('ThresholdFaultState', uselist=False, backref='threshold_fault')\n", (1855, 1920), False, 'from sqlalchemy.orm import relationship, backref\n'), ((1547, 1578), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""analog_devices.id"""'], {}), "('analog_devices.id')\n", (1557, 1578), False, 'from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey\n'), ((1960, 1993), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""threshold_values.id"""'], {}), "('threshold_values.id')\n", (1970, 1993), False, 'from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey\n')]
|
import numpy as np
from logistic_regression import logistic_kernel_regression, compute_label
from kernel_creation import convert_spectral_kernel_quad, convert_spectral_kernel_quint, convert_spectral_kernel_trig
from kernel_creation import convert_acid_kernel, convert_acid_quad, convert_mismatch_lev, convert_lect_trig, get_mismatch_dict
from kernel_creation import get_correspondances, convert_mismatch_dico, get_full_corres, convert_encode
from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian
from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted
from SVM import SVM, svm_compute_label
list_letters = ["A", "C", "G", "T"]
list_trig = [a + b + c for a in list_letters for b in list_letters for c in list_letters]
list_quad = [a + b + c + d for a in list_letters for b in list_letters for c in list_letters for d in list_letters]
list_quint = [a + b + c + d + e for a in list_letters for b in list_letters for c in list_letters for d in list_letters for e in list_letters]
list_six = [a + b + c + d + e + f for a in list_letters for b in list_letters for c in list_letters for d in list_letters for e in list_letters for f in list_letters]
dico_acid = {'Alanine': [ 'GCU', 'GCC', 'GCA', 'GCG'], 'Arginine': ['CGU', 'CGC', 'CGA', 'CGG' , 'AGA', 'AGG'],
'Asparagine': ['AAU', 'AAC'], 'Acide aspartique': ['GAU', 'GAC'],
'Cysteine': ['UGU', 'UGC'], 'Glutamine': ['CAA', 'CAG'], 'Acide glutamique':['GAA', 'GAG'],
'Glycine':['GGU', 'GGC', 'GGA', 'GGG'], 'Histidine': ['CAU', 'CAC'], 'Isoleucine': ['AUU', 'AUC', 'AUA'],
'Leucine': ['UUA', 'UUG' , 'CUU', 'CUC', 'CUA', 'CUG'], 'Lysine': ['AAA', 'AAG'],
'Methionine': ['AUG'], 'Phenylalanine':['UUU', 'UUC'], 'Proline' :['CCU', 'CCC', 'CCA', 'CCG'],
'Pyrrolysine': ['UAG'], 'Selenocysteine':['UGA'], 'Serine':['UCU', 'UCC', 'UCA', 'UCG' , 'AGU', 'AGC'],
'Threonine':['ACU', 'ACC', 'ACA', 'ACG'], 'Tryptophane':['UGG'], 'Tyrosine':['UAU', 'UAC'],
'Valine':['GUU', 'GUC', 'GUA', 'GUG'], 'Initiation': ['AUG'], 'Terminaison': ['UAG', 'UAA', 'UGA']}
def is_pos_def(x):
return np.all(np.linalg.eigvals(x) > 0)
## Parameters
lamb_log = 0.0000001
lamb_svm = 0.00001
sigma = 0.8
add_param = 10.**(-10)
list_seq_id = list_six
mis_lev = False
if mis_lev:
dict_mismatch = get_mismatch_dict(list_seq_id)
mis_dic = False
size_seq = 6
nb_mis = 0
beg = 0
if mis_dic:
dict_corres = get_correspondances(list_seq_id, nb_mis, list_letters)
list_mis_corres = dict_corres.keys()
print(list_mis_corres)
mis_dic_full = False
if mis_dic_full:
dict_corres = get_full_corres(list_seq_id, nb_mis, list_letters)
list_mis_corres = dict_corres.keys()
##
list_labels_log = []
list_labels_svm = []
for name in [ "0", "1","2"]:
print ("beginning loading of the data")
# Training data
sequences = read_csv_file_data("data/Xtr"+ name+ ".csv")
#list_converted = convert_spectral_kernel_trig(sequences, list_seq_id)
#list_converted = convert_spectral_kernel_quad(sequences, list_quad)
list_converted = convert_spectral_kernel_quint(sequences, list_quint)
#list_converted = convert_spectral_kernel_quint(sequences, list_quint)
#list_converted = convert_acid_kernel(sequences, dico_acid)
#list_converted = convert_acid_quad(sequences, dico_acid, list_quad
#list_converted = convert_mismatch_lev(sequences, list_seq_id, dict_mismatch, size_seq, nb_mis)
#list_converted = convert_lect_trig(sequences, list_seq_id, beg)
#list_converted = convert_lect_acid(sequences, dico_acid, beg)
#list_converted = convert_mismatch_dico(sequences, dict_corres,list_mis_corres, list_seq_id)
#list_converted = convert_encode(sequences, list_letters)
training = np.asarray(list_converted, dtype = float)
# to avoid huge values and to save time for the logistic regression :
sm = np.sum(training, axis= 1)
training = training/sm[0]
mean = np.mean(training, axis= 0)
training = training - mean
#vst = np.std(training, axis= 0)
#training = training / vst
#save_data_converted("spectral_kernel/Xtr"+ name+ ".csv", training)
# label training data
label = read_csv_file_label("data/Ytr"+ name+ ".csv")
label= np.asarray(label).reshape((len(label), ))
# select what will be the test for training
size_test = int(training.shape[0]/10)
test_train = training[0:size_test]
label_test_train = label[0:size_test]
print( label_test_train.shape)
size_total = training.shape[0]
training = training[size_test:size_total]
label_train = label[size_test:size_total]
print (label_train.shape)
# Test data
sequences_test = read_csv_file_data("data/Xte"+ name+ ".csv")
#list_converted_test = convert_spectral_kernel_trig(sequences_test, list_seq_id)
#list_converted_test = convert_spectral_kernel_quad(sequences_test, list_quad)
list_converted_test = convert_spectral_kernel_quint(sequences_test, list_quint)
#list_converted_test = convert_acid_kernel(sequences_test, dico_acid)
#list_converted_test = convert_acid_quad(sequences_test, dico_acid, list_quad)
#list_converted_test = convert_mismatch_lev(sequences_test, list_seq_id, dict_mismatch, size_seq, nb_mis)
#list_converted_test = convert_lect_trig(sequences_test, list_seq_id, beg )
#list_converted_test = convert_lect_acid(sequences_test, dico_acid, beg)
#list_converted_test = convert_mismatch_dico(sequences_test, dict_corres,list_mis_corres, list_seq_id)
#list_converted_test = convert_encode(sequences, list_letters)
testing = np.asarray(list_converted_test, dtype = float)
# to avoid huge values and to save time for the logistic regression :
testing = testing/sm[0]
testing = testing - mean
#testing = testing/ vst
# param for each dataset:
"""if name=="0":
lamb_svm = 0.000008
add_param = 10. ** (-10)
if name=="1":
lamb_svm = 0.00001
add_param = 10.**(-10)
if name == "2":
lamb_svm = 0.000005
add_param=10.**(-9)"""
if name=="2":
add_param = 10**(-9)
print ("data loaded")
# Computing the kernel
print ("beginning computing K")
K = compute_K_matrix(training)
add = add_param*np.identity(K.shape[0])
K_add = K + add # to make it positive definite
#K = compute_K_gaussian(training, sigma)
#K_add = K
print(K)
print("K shape", K.shape)
print(is_pos_def(K_add))
K_test_train = compute_test_matrix(training, test_train)
print (K_test_train.shape)
print ("K computed")
"""#Training : kernel logistic regression
alpha = logistic_kernel_regression(K, label_train, lamb_log, 15, K_test_train, label_test_train)
# Testing : kernel logistic regression
Ktest = compute_test_matrix(training, testing)
labels_test = compute_label(Ktest, alpha)
list_labels_log = list_labels_log + labels_test"""
# Training : SVM
alpha = SVM(K_add, label_train, lamb_svm, K_test_train, label_test_train)
print(alpha)
# Testing : kernel logistic regression
Ktest = compute_test_matrix(training, testing)
labels_test = svm_compute_label(Ktest, alpha)
list_labels_svm = list_labels_svm + labels_test
save_label(0, list_labels_svm,"results/SVM-quint-centered-mixed.csv" )
|
[
"read_fn.save_label",
"kernel_creation.get_mismatch_dict",
"kernel_creation.compute_test_matrix",
"numpy.sum",
"numpy.linalg.eigvals",
"SVM.SVM",
"numpy.asarray",
"numpy.identity",
"read_fn.read_csv_file_label",
"kernel_creation.compute_K_matrix",
"SVM.svm_compute_label",
"numpy.mean",
"kernel_creation.convert_spectral_kernel_quint",
"kernel_creation.get_full_corres",
"kernel_creation.get_correspondances",
"read_fn.read_csv_file_data"
] |
[((7342, 7412), 'read_fn.save_label', 'save_label', (['(0)', 'list_labels_svm', '"""results/SVM-quint-centered-mixed.csv"""'], {}), "(0, list_labels_svm, 'results/SVM-quint-centered-mixed.csv')\n", (7352, 7412), False, 'from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted\n'), ((2416, 2446), 'kernel_creation.get_mismatch_dict', 'get_mismatch_dict', (['list_seq_id'], {}), '(list_seq_id)\n', (2433, 2446), False, 'from kernel_creation import convert_acid_kernel, convert_acid_quad, convert_mismatch_lev, convert_lect_trig, get_mismatch_dict\n'), ((2527, 2581), 'kernel_creation.get_correspondances', 'get_correspondances', (['list_seq_id', 'nb_mis', 'list_letters'], {}), '(list_seq_id, nb_mis, list_letters)\n', (2546, 2581), False, 'from kernel_creation import get_correspondances, convert_mismatch_dico, get_full_corres, convert_encode\n'), ((2706, 2756), 'kernel_creation.get_full_corres', 'get_full_corres', (['list_seq_id', 'nb_mis', 'list_letters'], {}), '(list_seq_id, nb_mis, list_letters)\n', (2721, 2756), False, 'from kernel_creation import get_correspondances, convert_mismatch_dico, get_full_corres, convert_encode\n'), ((2953, 2999), 'read_fn.read_csv_file_data', 'read_csv_file_data', (["('data/Xtr' + name + '.csv')"], {}), "('data/Xtr' + name + '.csv')\n", (2971, 2999), False, 'from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted\n'), ((3167, 3219), 'kernel_creation.convert_spectral_kernel_quint', 'convert_spectral_kernel_quint', (['sequences', 'list_quint'], {}), '(sequences, list_quint)\n', (3196, 3219), False, 'from kernel_creation import convert_spectral_kernel_quad, convert_spectral_kernel_quint, convert_spectral_kernel_trig\n'), ((3844, 3883), 'numpy.asarray', 'np.asarray', (['list_converted'], {'dtype': 'float'}), '(list_converted, dtype=float)\n', (3854, 3883), True, 'import numpy as np\n'), ((3970, 3994), 'numpy.sum', 'np.sum', (['training'], {'axis': '(1)'}), '(training, axis=1)\n', (3976, 3994), True, 'import numpy as np\n'), ((4038, 4063), 'numpy.mean', 'np.mean', (['training'], {'axis': '(0)'}), '(training, axis=0)\n', (4045, 4063), True, 'import numpy as np\n'), ((4277, 4324), 'read_fn.read_csv_file_label', 'read_csv_file_label', (["('data/Ytr' + name + '.csv')"], {}), "('data/Ytr' + name + '.csv')\n", (4296, 4324), False, 'from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted\n'), ((4778, 4824), 'read_fn.read_csv_file_data', 'read_csv_file_data', (["('data/Xte' + name + '.csv')"], {}), "('data/Xte' + name + '.csv')\n", (4796, 4824), False, 'from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted\n'), ((5017, 5074), 'kernel_creation.convert_spectral_kernel_quint', 'convert_spectral_kernel_quint', (['sequences_test', 'list_quint'], {}), '(sequences_test, list_quint)\n', (5046, 5074), False, 'from kernel_creation import convert_spectral_kernel_quad, convert_spectral_kernel_quint, convert_spectral_kernel_trig\n'), ((5687, 5731), 'numpy.asarray', 'np.asarray', (['list_converted_test'], {'dtype': 'float'}), '(list_converted_test, dtype=float)\n', (5697, 5731), True, 'import numpy as np\n'), ((6311, 6337), 'kernel_creation.compute_K_matrix', 'compute_K_matrix', (['training'], {}), '(training)\n', (6327, 6337), False, 'from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian\n'), ((6584, 6625), 'kernel_creation.compute_test_matrix', 'compute_test_matrix', (['training', 'test_train'], {}), '(training, test_train)\n', (6603, 6625), False, 'from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian\n'), ((7061, 7126), 'SVM.SVM', 'SVM', (['K_add', 'label_train', 'lamb_svm', 'K_test_train', 'label_test_train'], {}), '(K_add, label_train, lamb_svm, K_test_train, label_test_train)\n', (7064, 7126), False, 'from SVM import SVM, svm_compute_label\n'), ((7199, 7237), 'kernel_creation.compute_test_matrix', 'compute_test_matrix', (['training', 'testing'], {}), '(training, testing)\n', (7218, 7237), False, 'from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian\n'), ((7256, 7287), 'SVM.svm_compute_label', 'svm_compute_label', (['Ktest', 'alpha'], {}), '(Ktest, alpha)\n', (7273, 7287), False, 'from SVM import SVM, svm_compute_label\n'), ((6358, 6381), 'numpy.identity', 'np.identity', (['K.shape[0]'], {}), '(K.shape[0])\n', (6369, 6381), True, 'import numpy as np\n'), ((2229, 2249), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['x'], {}), '(x)\n', (2246, 2249), True, 'import numpy as np\n'), ((4334, 4351), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (4344, 4351), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import unittest
import networkx
from challenge.algorithm import dijkstras_shortest_path
class TestAlgorithmModule(unittest.TestCase):
def test_empty_graph(self):
"""Nodes are memebers of graph.nodes"""
graph = networkx.Graph()
with self.assertRaises(ValueError):
dijkstras_shortest_path(graph, 'A', 'C')
def test_disjoint_graph(self):
graph = networkx.Graph()
graph.add_nodes_from(['A', 'B'])
path = dijkstras_shortest_path(graph, 'A', 'B')
self.assertListEqual(path, [])
def test_path_to_itself(self):
"""A"""
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C')])
path = dijkstras_shortest_path(graph, 'A', 'A')
self.assertListEqual(path, [])
def test_simple_shortest_path(self):
"""A - B - C """
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C')])
path = dijkstras_shortest_path(graph, 'A', 'C')
self.assertListEqual(path, ['A', 'B', 'C'])
def test_shortcut_path(self):
"""
A - B - C - D - E - F
\\ /
--- G ---
"""
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E'), ('E', 'F')])
graph.add_edges_from([('B', 'G'), ('G', 'E')])
path = dijkstras_shortest_path(graph, 'A', 'F')
self.assertListEqual(path, ['A', 'B', 'G', 'E', 'F'])
def test_cyclic_graph_path(self):
"""
A - B - C - D - E
| |
- G -
"""
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E')])
graph.add_edges_from([('C', 'G'), ('G', 'B')])
path = dijkstras_shortest_path(graph, 'A', 'E')
self.assertListEqual(path, ['A', 'B', 'C', 'D', 'E'])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"networkx.Graph",
"challenge.algorithm.dijkstras_shortest_path"
] |
[((1965, 1980), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1978, 1980), False, 'import unittest\n'), ((258, 274), 'networkx.Graph', 'networkx.Graph', ([], {}), '()\n', (272, 274), False, 'import networkx\n'), ((425, 441), 'networkx.Graph', 'networkx.Graph', ([], {}), '()\n', (439, 441), False, 'import networkx\n'), ((498, 538), 'challenge.algorithm.dijkstras_shortest_path', 'dijkstras_shortest_path', (['graph', '"""A"""', '"""B"""'], {}), "(graph, 'A', 'B')\n", (521, 538), False, 'from challenge.algorithm import dijkstras_shortest_path\n'), ((646, 662), 'networkx.Graph', 'networkx.Graph', ([], {}), '()\n', (660, 662), False, 'import networkx\n'), ((733, 773), 'challenge.algorithm.dijkstras_shortest_path', 'dijkstras_shortest_path', (['graph', '"""A"""', '"""A"""'], {}), "(graph, 'A', 'A')\n", (756, 773), False, 'from challenge.algorithm import dijkstras_shortest_path\n'), ((896, 912), 'networkx.Graph', 'networkx.Graph', ([], {}), '()\n', (910, 912), False, 'import networkx\n'), ((983, 1023), 'challenge.algorithm.dijkstras_shortest_path', 'dijkstras_shortest_path', (['graph', '"""A"""', '"""C"""'], {}), "(graph, 'A', 'C')\n", (1006, 1023), False, 'from challenge.algorithm import dijkstras_shortest_path\n'), ((1236, 1252), 'networkx.Graph', 'networkx.Graph', ([], {}), '()\n', (1250, 1252), False, 'import networkx\n'), ((1414, 1454), 'challenge.algorithm.dijkstras_shortest_path', 'dijkstras_shortest_path', (['graph', '"""A"""', '"""F"""'], {}), "(graph, 'A', 'F')\n", (1437, 1454), False, 'from challenge.algorithm import dijkstras_shortest_path\n'), ((1664, 1680), 'networkx.Graph', 'networkx.Graph', ([], {}), '()\n', (1678, 1680), False, 'import networkx\n'), ((1830, 1870), 'challenge.algorithm.dijkstras_shortest_path', 'dijkstras_shortest_path', (['graph', '"""A"""', '"""E"""'], {}), "(graph, 'A', 'E')\n", (1853, 1870), False, 'from challenge.algorithm import dijkstras_shortest_path\n'), ((332, 372), 'challenge.algorithm.dijkstras_shortest_path', 'dijkstras_shortest_path', (['graph', '"""A"""', '"""C"""'], {}), "(graph, 'A', 'C')\n", (355, 372), False, 'from challenge.algorithm import dijkstras_shortest_path\n')]
|
import os
import re
import subprocess
import sys
from enum import Enum
from pathlib import Path
from typing import Any, Optional, Tuple
import click
import click._bashcomplete
from .models import ParamMeta
from .params import Option
from .utils import get_params_from_function
try:
import shellingham
except ImportError: # pragma: nocover
shellingham = None
_click_patched = False
def get_completion_inspect_parameters() -> Tuple[ParamMeta, ParamMeta]:
completion_init()
test_disable_detection = os.getenv("_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION")
if shellingham and not test_disable_detection:
parameters = get_params_from_function(_install_completion_placeholder_function)
else:
parameters = get_params_from_function(
_install_completion_no_auto_placeholder_function
)
install_param, show_param = parameters.values()
return install_param, show_param
def install_callback(ctx: click.Context, param: click.Parameter, value: Any) -> Any:
if not value or ctx.resilient_parsing:
return value # pragma no cover
if isinstance(value, str):
shell, path = install(shell=value)
else:
shell, path = install()
click.secho(f"{shell} completion installed in {path}", fg="green")
click.echo("Completion will take effect once you restart the terminal")
sys.exit(0)
def show_callback(ctx: click.Context, param: click.Parameter, value: Any) -> Any:
if not value or ctx.resilient_parsing:
return value # pragma no cover
prog_name = ctx.find_root().info_name
assert prog_name
complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper())
if isinstance(value, str):
shell = value
elif shellingham:
shell, _ = shellingham.detect_shell()
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
click.echo(script_content)
sys.exit(0)
class Shells(str, Enum):
bash = "bash"
zsh = "zsh"
fish = "fish"
powershell = "powershell"
pwsh = "pwsh"
# Create a fake command function to extract the completion parameters
def _install_completion_placeholder_function(
install_completion: bool = Option(
None,
"--install-completion",
is_flag=True,
callback=install_callback,
expose_value=False,
help="Install completion for the current shell.",
),
show_completion: bool = Option(
None,
"--show-completion",
is_flag=True,
callback=show_callback,
expose_value=False,
help="Show completion for the current shell, to copy it or customize the installation.",
),
) -> Any:
pass # pragma no cover
def _install_completion_no_auto_placeholder_function(
install_completion: Shells = Option(
None,
callback=install_callback,
expose_value=False,
help="Install completion for the specified shell.",
),
show_completion: Shells = Option(
None,
callback=show_callback,
expose_value=False,
help="Show completion for the specified shell, to copy it or customize the installation.",
),
) -> Any:
pass # pragma no cover
COMPLETION_SCRIPT_BASH = """
%(complete_func)s() {
local IFS=$'\n'
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
%(autocomplete_var)s=complete_bash $1 ) )
return 0
}
complete -o default -F %(complete_func)s %(prog_name)s
"""
COMPLETION_SCRIPT_ZSH = """
#compdef %(prog_name)s
%(complete_func)s() {
eval $(env _TYPER_COMPLETE_ARGS="${words[1,$CURRENT]}" %(autocomplete_var)s=complete_zsh %(prog_name)s)
}
compdef %(complete_func)s %(prog_name)s
"""
COMPLETION_SCRIPT_FISH = 'complete --command %(prog_name)s --no-files --arguments "(env %(autocomplete_var)s=complete_fish _TYPER_COMPLETE_FISH_ACTION=get-args _TYPER_COMPLETE_ARGS=(commandline -cp) %(prog_name)s)" --condition "env %(autocomplete_var)s=complete_fish _TYPER_COMPLETE_FISH_ACTION=is-args _TYPER_COMPLETE_ARGS=(commandline -cp) %(prog_name)s"'
COMPLETION_SCRIPT_POWER_SHELL = """
Import-Module PSReadLine
Set-PSReadLineKeyHandler -Chord Tab -Function MenuComplete
$scriptblock = {
param($wordToComplete, $commandAst, $cursorPosition)
$Env:%(autocomplete_var)s = "complete_powershell"
$Env:_TYPER_COMPLETE_ARGS = $commandAst.ToString()
$Env:_TYPER_COMPLETE_WORD_TO_COMPLETE = $wordToComplete
%(prog_name)s | ForEach-Object {
$commandArray = $_ -Split ":::"
$command = $commandArray[0]
$helpString = $commandArray[1]
[System.Management.Automation.CompletionResult]::new(
$command, $command, 'ParameterValue', $helpString)
}
$Env:%(autocomplete_var)s = ""
$Env:_TYPER_COMPLETE_ARGS = ""
$Env:_TYPER_COMPLETE_WORD_TO_COMPLETE = ""
}
Register-ArgumentCompleter -Native -CommandName %(prog_name)s -ScriptBlock $scriptblock
"""
def install(
shell: Optional[str] = None,
prog_name: Optional[str] = None,
complete_var: Optional[str] = None,
) -> Tuple[str, Path]:
prog_name = prog_name or click.get_current_context().find_root().info_name
assert prog_name
if complete_var is None:
complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper())
if shell is None and shellingham is not None:
shell, _ = shellingham.detect_shell()
if shell == "bash":
installed_path = install_bash(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
elif shell == "zsh":
installed_path = install_zsh(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
elif shell == "fish":
installed_path = install_fish(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
elif shell in {"powershell", "pwsh"}:
installed_path = install_powershell(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
else:
click.echo(f"Shell {shell} is not supported.")
raise click.exceptions.Exit(1)
def install_bash(*, prog_name: str, complete_var: str, shell: str) -> Path:
# Ref: https://github.com/scop/bash-completion#faq
# It seems bash-completion is the official completion system for bash:
# Ref: https://www.gnu.org/software/bash/manual/html_node/A-Programmable-Completion-Example.html
# But installing in the locations from the docs doesn't seem to have effect
completion_path = Path.home() / f".bash_completions/{prog_name}.sh"
rc_path = Path.home() / ".bashrc"
rc_path.parent.mkdir(parents=True, exist_ok=True)
rc_content = ""
if rc_path.is_file():
rc_content = rc_path.read_text()
completion_init_lines = [f"source {completion_path}"]
for line in completion_init_lines:
if line not in rc_content: # pragma: nocover
rc_content += f"\n{line}"
rc_content += "\n"
rc_path.write_text(rc_content)
# Install completion
completion_path.parent.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
completion_path.write_text(script_content)
return completion_path
def install_zsh(*, prog_name: str, complete_var: str, shell: str) -> Path:
# Setup Zsh and load ~/.zfunc
zshrc_path = Path.home() / ".zshrc"
zshrc_path.parent.mkdir(parents=True, exist_ok=True)
zshrc_content = ""
if zshrc_path.is_file():
zshrc_content = zshrc_path.read_text()
completion_init_lines = [
"autoload -Uz compinit",
"compinit",
"zstyle ':completion:*' menu select",
"fpath+=~/.zfunc",
]
for line in completion_init_lines:
if line not in zshrc_content: # pragma: nocover
zshrc_content += f"\n{line}"
zshrc_content += "\n"
zshrc_path.write_text(zshrc_content)
# Install completion under ~/.zfunc/
path_obj = Path.home() / f".zfunc/_{prog_name}"
path_obj.parent.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
path_obj.write_text(script_content)
return path_obj
def install_fish(*, prog_name: str, complete_var: str, shell: str) -> Path:
path_obj = Path.home() / f".config/fish/completions/{prog_name}.fish"
parent_dir: Path = path_obj.parent
parent_dir.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
path_obj.write_text(f"{script_content}\n")
return path_obj
def install_powershell(*, prog_name: str, complete_var: str, shell: str) -> Path:
subprocess.run(
[
shell,
"-Command",
"Set-ExecutionPolicy",
"Unrestricted",
"-Scope",
"CurrentUser",
]
)
result = subprocess.run(
[shell, "-NoProfile", "-Command", "echo", "$profile"],
check=True,
stdout=subprocess.PIPE,
)
if result.returncode != 0: # pragma: nocover
click.echo("Couldn't get PowerShell user profile", err=True)
raise click.exceptions.Exit(result.returncode)
path_str = ""
if isinstance(result.stdout, str): # pragma: nocover
path_str = result.stdout
if isinstance(result.stdout, bytes):
try:
# PowerShell would be predominant in Windows
path_str = result.stdout.decode("windows-1252")
except UnicodeDecodeError: # pragma: nocover
try:
path_str = result.stdout.decode("utf8")
except UnicodeDecodeError:
click.echo("Couldn't decode the path automatically", err=True)
raise click.exceptions.Exit(1)
path_obj = Path(path_str.strip())
parent_dir: Path = path_obj.parent
parent_dir.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
with path_obj.open(mode="a") as f:
f.write(f"{script_content}\n")
return path_obj
def do_bash_complete(cli: click.Command, prog_name: str) -> bool:
cwords = click.parser.split_arg_string(os.getenv("COMP_WORDS", ""))
cword = int(os.getenv("COMP_CWORD", 0))
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
for item in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
click.echo(item[0])
return True
def do_zsh_complete(cli: click.Command, prog_name: str) -> bool:
completion_args = os.getenv("_TYPER_COMPLETE_ARGS", "")
cwords = click.parser.split_arg_string(completion_args)
args = cwords[1:]
if args and not completion_args.endswith(" "):
incomplete = args[-1]
args = args[:-1]
else:
incomplete = ""
def escape(s: str) -> str:
return (
s.replace('"', '""')
.replace("'", "''")
.replace("$", "\\$")
.replace("`", "\\`")
)
res = []
for item, help in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
if help:
res.append(f'"{escape(item)}":"{escape(help)}"')
else:
res.append(f'"{escape(item)}"')
if res:
args_str = "\n".join(res)
click.echo(f"_arguments '*: :(({args_str}))'")
else:
click.echo("_files")
return True
def do_fish_complete(cli: click.Command, prog_name: str) -> bool:
completion_args = os.getenv("_TYPER_COMPLETE_ARGS", "")
complete_action = os.getenv("_TYPER_COMPLETE_FISH_ACTION", "")
cwords = click.parser.split_arg_string(completion_args)
args = cwords[1:]
if args and not completion_args.endswith(" "):
incomplete = args[-1]
args = args[:-1]
else:
incomplete = ""
show_args = []
for item, help in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
if help:
formatted_help = re.sub(r"\s", " ", help)
show_args.append(f"{item}\t{formatted_help}")
else:
show_args.append(item)
if complete_action == "get-args":
if show_args:
for arg in show_args:
click.echo(arg)
elif complete_action == "is-args":
if show_args:
# Activate complete args (no files)
sys.exit(0)
else:
# Deactivate complete args (allow files)
sys.exit(1)
return True
def do_powershell_complete(cli: click.Command, prog_name: str) -> bool:
completion_args = os.getenv("_TYPER_COMPLETE_ARGS", "")
incomplete = os.getenv("_TYPER_COMPLETE_WORD_TO_COMPLETE", "")
cwords = click.parser.split_arg_string(completion_args)
args = cwords[1:]
for item, help in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
click.echo(f"{item}:::{help or ' '}")
return True
def do_shell_complete(*, cli: click.Command, prog_name: str, shell: str) -> bool:
if shell == "bash":
return do_bash_complete(cli, prog_name)
elif shell == "zsh":
return do_zsh_complete(cli, prog_name)
elif shell == "fish":
return do_fish_complete(cli, prog_name)
elif shell in {"powershell", "pwsh"}:
return do_powershell_complete(cli, prog_name)
return False
_completion_scripts = {
"bash": COMPLETION_SCRIPT_BASH,
"zsh": COMPLETION_SCRIPT_ZSH,
"fish": COMPLETION_SCRIPT_FISH,
"powershell": COMPLETION_SCRIPT_POWER_SHELL,
"pwsh": COMPLETION_SCRIPT_POWER_SHELL,
}
def get_completion_script(*, prog_name: str, complete_var: str, shell: str) -> str:
cf_name = click._bashcomplete._invalid_ident_char_re.sub(
"", prog_name.replace("-", "_")
)
script = _completion_scripts.get(shell)
if script is None:
click.echo(f"Shell {shell} not supported.", err=True)
sys.exit(1)
return (
script
% dict(
complete_func="_{}_completion".format(cf_name),
prog_name=prog_name,
autocomplete_var=complete_var,
)
).strip()
def handle_shell_complete(
cli: click.Command, prog_name: str, complete_var: str, complete_instr: str
) -> bool:
if "_" not in complete_instr:
click.echo("Invalid completion instruction.", err=True)
sys.exit(1)
command, shell = complete_instr.split("_", 1)
if command == "source":
click.echo(
get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
)
return True
elif command == "complete":
return do_shell_complete(cli=cli, prog_name=prog_name, shell=shell)
return False
def completion_init() -> None:
global _click_patched
if not _click_patched:
testing = os.getenv("_TYPER_COMPLETE_TESTING")
def testing_handle_shell_complete(
cli: click.Command, prog_name: str, complete_var: str, complete_instr: str
) -> bool:
result = handle_shell_complete(cli, prog_name, complete_var, complete_instr)
if result:
# Avoid fast_exit(1) in Click so Coverage can finish
sys.exit(1)
return result
if testing:
click._bashcomplete.bashcomplete = testing_handle_shell_complete
else:
click._bashcomplete.bashcomplete = handle_shell_complete
_click_patched = True
|
[
"subprocess.run",
"click.parser.split_arg_string",
"click.exceptions.Exit",
"pathlib.Path.home",
"click.get_current_context",
"click.echo",
"click._bashcomplete.get_choices",
"shellingham.detect_shell",
"click.secho",
"re.sub",
"os.getenv",
"sys.exit"
] |
[((520, 577), 'os.getenv', 'os.getenv', (['"""_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION"""'], {}), "('_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION')\n", (529, 577), False, 'import os\n'), ((1224, 1290), 'click.secho', 'click.secho', (['f"""{shell} completion installed in {path}"""'], {'fg': '"""green"""'}), "(f'{shell} completion installed in {path}', fg='green')\n", (1235, 1290), False, 'import click\n'), ((1295, 1366), 'click.echo', 'click.echo', (['"""Completion will take effect once you restart the terminal"""'], {}), "('Completion will take effect once you restart the terminal')\n", (1305, 1366), False, 'import click\n'), ((1371, 1382), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1379, 1382), False, 'import sys\n'), ((1934, 1960), 'click.echo', 'click.echo', (['script_content'], {}), '(script_content)\n', (1944, 1960), False, 'import click\n'), ((1965, 1976), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1973, 1976), False, 'import sys\n'), ((9004, 9107), 'subprocess.run', 'subprocess.run', (["[shell, '-Command', 'Set-ExecutionPolicy', 'Unrestricted', '-Scope',\n 'CurrentUser']"], {}), "([shell, '-Command', 'Set-ExecutionPolicy', 'Unrestricted',\n '-Scope', 'CurrentUser'])\n", (9018, 9107), False, 'import subprocess\n'), ((9214, 9324), 'subprocess.run', 'subprocess.run', (["[shell, '-NoProfile', '-Command', 'echo', '$profile']"], {'check': '(True)', 'stdout': 'subprocess.PIPE'}), "([shell, '-NoProfile', '-Command', 'echo', '$profile'], check\n =True, stdout=subprocess.PIPE)\n", (9228, 9324), False, 'import subprocess\n'), ((10759, 10824), 'click._bashcomplete.get_choices', 'click._bashcomplete.get_choices', (['cli', 'prog_name', 'args', 'incomplete'], {}), '(cli, prog_name, args, incomplete)\n', (10790, 10824), False, 'import click\n'), ((10959, 10996), 'os.getenv', 'os.getenv', (['"""_TYPER_COMPLETE_ARGS"""', '""""""'], {}), "('_TYPER_COMPLETE_ARGS', '')\n", (10968, 10996), False, 'import os\n'), ((11010, 11056), 'click.parser.split_arg_string', 'click.parser.split_arg_string', (['completion_args'], {}), '(completion_args)\n', (11039, 11056), False, 'import click\n'), ((11445, 11510), 'click._bashcomplete.get_choices', 'click._bashcomplete.get_choices', (['cli', 'prog_name', 'args', 'incomplete'], {}), '(cli, prog_name, args, incomplete)\n', (11476, 11510), False, 'import click\n'), ((11894, 11931), 'os.getenv', 'os.getenv', (['"""_TYPER_COMPLETE_ARGS"""', '""""""'], {}), "('_TYPER_COMPLETE_ARGS', '')\n", (11903, 11931), False, 'import os\n'), ((11954, 11998), 'os.getenv', 'os.getenv', (['"""_TYPER_COMPLETE_FISH_ACTION"""', '""""""'], {}), "('_TYPER_COMPLETE_FISH_ACTION', '')\n", (11963, 11998), False, 'import os\n'), ((12012, 12058), 'click.parser.split_arg_string', 'click.parser.split_arg_string', (['completion_args'], {}), '(completion_args)\n', (12041, 12058), False, 'import click\n'), ((12262, 12327), 'click._bashcomplete.get_choices', 'click._bashcomplete.get_choices', (['cli', 'prog_name', 'args', 'incomplete'], {}), '(cli, prog_name, args, incomplete)\n', (12293, 12327), False, 'import click\n'), ((12969, 13006), 'os.getenv', 'os.getenv', (['"""_TYPER_COMPLETE_ARGS"""', '""""""'], {}), "('_TYPER_COMPLETE_ARGS', '')\n", (12978, 13006), False, 'import os\n'), ((13024, 13073), 'os.getenv', 'os.getenv', (['"""_TYPER_COMPLETE_WORD_TO_COMPLETE"""', '""""""'], {}), "('_TYPER_COMPLETE_WORD_TO_COMPLETE', '')\n", (13033, 13073), False, 'import os\n'), ((13087, 13133), 'click.parser.split_arg_string', 'click.parser.split_arg_string', (['completion_args'], {}), '(completion_args)\n', (13116, 13133), False, 'import click\n'), ((13178, 13243), 'click._bashcomplete.get_choices', 'click._bashcomplete.get_choices', (['cli', 'prog_name', 'args', 'incomplete'], {}), '(cli, prog_name, args, incomplete)\n', (13209, 13243), False, 'import click\n'), ((5440, 5466), 'shellingham.detect_shell', 'shellingham.detect_shell', ([], {}), '()\n', (5464, 5466), False, 'import shellingham\n'), ((6736, 6747), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (6745, 6747), False, 'from pathlib import Path\n'), ((6800, 6811), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (6809, 6811), False, 'from pathlib import Path\n'), ((7619, 7630), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (7628, 7630), False, 'from pathlib import Path\n'), ((8220, 8231), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (8229, 8231), False, 'from pathlib import Path\n'), ((8583, 8594), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (8592, 8594), False, 'from pathlib import Path\n'), ((9409, 9469), 'click.echo', 'click.echo', (['"""Couldn\'t get PowerShell user profile"""'], {'err': '(True)'}), '("Couldn\'t get PowerShell user profile", err=True)\n', (9419, 9469), False, 'import click\n'), ((9484, 9524), 'click.exceptions.Exit', 'click.exceptions.Exit', (['result.returncode'], {}), '(result.returncode)\n', (9505, 9524), False, 'import click\n'), ((10551, 10578), 'os.getenv', 'os.getenv', (['"""COMP_WORDS"""', '""""""'], {}), "('COMP_WORDS', '')\n", (10560, 10578), False, 'import os\n'), ((10596, 10622), 'os.getenv', 'os.getenv', (['"""COMP_CWORD"""', '(0)'], {}), "('COMP_CWORD', 0)\n", (10605, 10622), False, 'import os\n'), ((10834, 10853), 'click.echo', 'click.echo', (['item[0]'], {}), '(item[0])\n', (10844, 10853), False, 'import click\n'), ((11702, 11748), 'click.echo', 'click.echo', (['f"""_arguments \'*: :(({args_str}))\'"""'], {}), '(f"_arguments \'*: :(({args_str}))\'")\n', (11712, 11748), False, 'import click\n'), ((11767, 11787), 'click.echo', 'click.echo', (['"""_files"""'], {}), "('_files')\n", (11777, 11787), False, 'import click\n'), ((13253, 13290), 'click.echo', 'click.echo', (['f"""{item}:::{help or \' \'}"""'], {}), '(f"{item}:::{help or \' \'}")\n', (13263, 13290), False, 'import click\n'), ((14218, 14271), 'click.echo', 'click.echo', (['f"""Shell {shell} not supported."""'], {'err': '(True)'}), "(f'Shell {shell} not supported.', err=True)\n", (14228, 14271), False, 'import click\n'), ((14280, 14291), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14288, 14291), False, 'import sys\n'), ((14657, 14712), 'click.echo', 'click.echo', (['"""Invalid completion instruction."""'], {'err': '(True)'}), "('Invalid completion instruction.', err=True)\n", (14667, 14712), False, 'import click\n'), ((14721, 14732), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14729, 14732), False, 'import sys\n'), ((15215, 15251), 'os.getenv', 'os.getenv', (['"""_TYPER_COMPLETE_TESTING"""'], {}), "('_TYPER_COMPLETE_TESTING')\n", (15224, 15251), False, 'import os\n'), ((1785, 1811), 'shellingham.detect_shell', 'shellingham.detect_shell', ([], {}), '()\n', (1809, 1811), False, 'import shellingham\n'), ((12375, 12399), 're.sub', 're.sub', (['"""\\\\s"""', '""" """', 'help'], {}), "('\\\\s', ' ', help)\n", (12381, 12399), False, 'import re\n'), ((12617, 12632), 'click.echo', 'click.echo', (['arg'], {}), '(arg)\n', (12627, 12632), False, 'import click\n'), ((12754, 12765), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (12762, 12765), False, 'import sys\n'), ((12845, 12856), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12853, 12856), False, 'import sys\n'), ((15599, 15610), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15607, 15610), False, 'import sys\n'), ((5189, 5216), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (5214, 5216), False, 'import click\n'), ((6239, 6285), 'click.echo', 'click.echo', (['f"""Shell {shell} is not supported."""'], {}), "(f'Shell {shell} is not supported.')\n", (6249, 6285), False, 'import click\n'), ((6300, 6324), 'click.exceptions.Exit', 'click.exceptions.Exit', (['(1)'], {}), '(1)\n', (6321, 6324), False, 'import click\n'), ((9987, 10049), 'click.echo', 'click.echo', (['"""Couldn\'t decode the path automatically"""'], {'err': '(True)'}), '("Couldn\'t decode the path automatically", err=True)\n', (9997, 10049), False, 'import click\n'), ((10072, 10096), 'click.exceptions.Exit', 'click.exceptions.Exit', (['(1)'], {}), '(1)\n', (10093, 10096), False, 'import click\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-15 13:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("program", "0041_auto_20170711_2248")]
operations = [
migrations.AddField(
model_name="eventproposal",
name="submission_notes",
field=models.TextField(
blank=True,
help_text="Private notes for the event. Only visible to the submitting user and the BornHack organisers.",
),
),
migrations.AddField(
model_name="speakerproposal",
name="submission_notes",
field=models.TextField(
blank=True,
help_text="Private notes for the event. Only visible to the submitting user and the BornHack organisers.",
),
),
]
|
[
"django.db.models.TextField"
] |
[((401, 546), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Private notes for the event. Only visible to the submitting user and the BornHack organisers."""'}), "(blank=True, help_text=\n 'Private notes for the event. Only visible to the submitting user and the BornHack organisers.'\n )\n", (417, 546), False, 'from django.db import migrations, models\n'), ((722, 867), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Private notes for the event. Only visible to the submitting user and the BornHack organisers."""'}), "(blank=True, help_text=\n 'Private notes for the event. Only visible to the submitting user and the BornHack organisers.'\n )\n", (738, 867), False, 'from django.db import migrations, models\n')]
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Utilities related to process/script execution.
"""
from rez.vendor.six import six
from rez.utils.yaml import dump_yaml
from rez.vendor.enum import Enum
from contextlib import contextmanager
import subprocess
import sys
import stat
import os
import io
@contextmanager
def add_sys_paths(paths):
"""Add to sys.path, and revert on scope exit.
"""
original_syspath = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = original_syspath
if six.PY2:
class _PopenBase(subprocess.Popen):
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
self.wait()
else: # py3
_PopenBase = subprocess.Popen
class Popen(_PopenBase):
"""subprocess.Popen wrapper.
Allows for Popen to be used as a context in both py2 and py3.
"""
def __init__(self, args, **kwargs):
# Avoids python bug described here: https://bugs.python.org/issue3905.
# This can arise when apps (maya) install a non-standard stdin handler.
#
# In newer version of maya and katana, the sys.stdin object can also
# become replaced by an object with no 'fileno' attribute, this is also
# taken into account.
#
if "stdin" not in kwargs:
try:
file_no = sys.stdin.fileno()
# https://github.com/nerdvegas/rez/pull/966
except (AttributeError, io.UnsupportedOperation):
file_no = None
if file_no is None and sys.__stdin__ is not None:
file_no = sys.__stdin__.fileno()
if file_no not in (0, 1, 2):
kwargs["stdin"] = subprocess.PIPE
# Add support for the new py3 "text" arg, which is equivalent to
# "universal_newlines".
# https://docs.python.org/3/library/subprocess.html#frequently-used-arguments
#
text = kwargs.pop("text", None)
universal_newlines = kwargs.pop("universal_newlines", None)
if text or universal_newlines:
kwargs["universal_newlines"] = True
# fixes py3/cmd.exe UnicodeDecodeError() with some characters.
# UnicodeDecodeError: 'charmap' codec can't decode byte
# 0x8d in position 1023172: character maps to <undefined>
#
# NOTE: currently no solution for `python3+<3.6`
#
if sys.version_info[:2] >= (3, 6) and "encoding" not in kwargs:
kwargs["encoding"] = "utf-8"
super(Popen, self).__init__(args, **kwargs)
class ExecutableScriptMode(Enum):
"""
Which scripts to create with util.create_executable_script.
"""
# Start with 1 to not collide with None checks
# Requested script only. Usually extension-less.
single = 1
# Create .py script that will allow launching scripts on
# windows without extension, but may require extension on
# other systems.
py = 2
# Will create py script on windows and requested on
# other platforms
platform_specific = 3
# Creates the requested script and an .py script so that scripts
# can be launched without extension from windows and other
# systems.
both = 4
# TODO: Maybe also allow distlib.ScriptMaker instead of the .py + PATHEXT.
def create_executable_script(filepath, body, program=None, py_script_mode=None):
"""
Create an executable script. In case a py_script_mode has been set to create
a .py script the shell is expected to have the PATHEXT environment
variable to include ".PY" in order to properly launch the command without
the .py extension.
Args:
filepath (str): File to create.
body (str or callable): Contents of the script. If a callable, its code
is used as the script body.
program (str): Name of program to launch the script. Default is 'python'
py_script_mode(ExecutableScriptMode): What kind of script to create.
Defaults to rezconfig.create_executable_script_mode.
Returns:
List of filepaths of created scripts. This may differ from the supplied
filepath depending on the py_script_mode
"""
from rez.config import config
from rez.utils.platform_ import platform_
program = program or "python"
py_script_mode = py_script_mode or config.create_executable_script_mode
# https://github.com/nerdvegas/rez/pull/968
is_forwarding_script_on_windows = (
program == "_rez_fwd"
and platform_.name == "windows"
and filepath.lower().endswith(".cmd")
)
if callable(body):
from rez.utils.sourcecode import SourceCode
code = SourceCode(func=body)
body = code.source
if not body.endswith('\n'):
body += '\n'
# Windows does not support shebang, but it will run with
# default python, or in case of later python versions 'py' that should
# try to use sensible python interpreters depending on the shebang line.
# Compare PEP-397.
# In order for execution to work in windows we need to create a .py
# file and set the PATHEXT to include .py (as done by the shell plugins)
# So depending on the py_script_mode we might need to create more then
# one script
script_filepaths = [filepath]
if program == "python":
script_filepaths = _get_python_script_files(filepath, py_script_mode,
platform_.name)
for current_filepath in script_filepaths:
with open(current_filepath, 'w') as f:
# TODO: make cross platform
if is_forwarding_script_on_windows:
# following lines of batch script will be stripped
# before yaml.load
f.write("@echo off\n")
f.write("%s.exe %%~dpnx0 %%*\n" % program)
f.write("goto :eof\n") # skip YAML body
f.write(":: YAML\n") # comment for human
else:
f.write("#!/usr/bin/env %s\n" % program)
f.write(body)
# TODO: Although Windows supports os.chmod you can only set the readonly
# flag. Setting the file readonly breaks the unit tests that expect to
# clean up the files once the test has run. Temporarily we don't bother
# setting the permissions, but this will need to change.
if os.name == "posix":
os.chmod(
current_filepath,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR
| stat.S_IXGRP | stat.S_IXOTH
)
return script_filepaths
def _get_python_script_files(filepath, py_script_mode, platform):
"""
Evaluates the py_script_mode for the requested filepath on the given
platform.
Args:
filepath: requested filepath
py_script_mode (ExecutableScriptMode):
platform (str): Platform to evaluate the script files for
Returns:
list of str: filepaths of scripts to create based on inputs
"""
script_filepaths = []
base_filepath, extension = os.path.splitext(filepath)
has_py_ext = extension == ".py"
is_windows = platform == "windows"
if (
py_script_mode == ExecutableScriptMode.single
or py_script_mode == ExecutableScriptMode.both
or (py_script_mode == ExecutableScriptMode.py and has_py_ext)
or (py_script_mode == ExecutableScriptMode.platform_specific and not is_windows)
or (py_script_mode == ExecutableScriptMode.platform_specific and is_windows and has_py_ext)
):
script_filepaths.append(filepath)
if (
not has_py_ext
and (
py_script_mode == ExecutableScriptMode.both
or py_script_mode == ExecutableScriptMode.py
or (py_script_mode == ExecutableScriptMode.platform_specific and is_windows)
)
):
script_filepaths.append(base_filepath + ".py")
return script_filepaths
def create_forwarding_script(filepath, module, func_name, *nargs, **kwargs):
"""Create a 'forwarding' script.
A forwarding script is one that executes some arbitrary Rez function. This
is used internally by Rez to dynamically create a script that uses Rez,
even though the parent environment may not be configured to do so.
"""
from rez.utils.platform_ import platform_
if platform_.name == "windows" and \
os.path.splitext(filepath)[-1].lower() != ".cmd":
filepath += ".cmd"
doc = dict(
module=module,
func_name=func_name)
if nargs:
doc["nargs"] = nargs
if kwargs:
doc["kwargs"] = kwargs
body = dump_yaml(doc)
create_executable_script(filepath, body, "_rez_fwd")
|
[
"os.chmod",
"sys.__stdin__.fileno",
"sys.path.extend",
"os.path.splitext",
"sys.stdin.fileno",
"rez.utils.yaml.dump_yaml",
"rez.utils.sourcecode.SourceCode"
] |
[((481, 503), 'sys.path.extend', 'sys.path.extend', (['paths'], {}), '(paths)\n', (496, 503), False, 'import sys\n'), ((7219, 7245), 'os.path.splitext', 'os.path.splitext', (['filepath'], {}), '(filepath)\n', (7235, 7245), False, 'import os\n'), ((8797, 8811), 'rez.utils.yaml.dump_yaml', 'dump_yaml', (['doc'], {}), '(doc)\n', (8806, 8811), False, 'from rez.utils.yaml import dump_yaml\n'), ((4799, 4820), 'rez.utils.sourcecode.SourceCode', 'SourceCode', ([], {'func': 'body'}), '(func=body)\n', (4809, 4820), False, 'from rez.utils.sourcecode import SourceCode\n'), ((6540, 6659), 'os.chmod', 'os.chmod', (['current_filepath', '(stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR | stat.S_IXGRP |\n stat.S_IXOTH)'], {}), '(current_filepath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |\n stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n', (6548, 6659), False, 'import os\n'), ((1431, 1449), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (1447, 1449), False, 'import sys\n'), ((1689, 1711), 'sys.__stdin__.fileno', 'sys.__stdin__.fileno', ([], {}), '()\n', (1709, 1711), False, 'import sys\n'), ((8549, 8575), 'os.path.splitext', 'os.path.splitext', (['filepath'], {}), '(filepath)\n', (8565, 8575), False, 'import os\n')]
|
import pickle
from . import ClientConstants as CC
from . import ClientNetworkingContexts
from . import ClientNetworkingDomain
from . import HydrusData
from . import HydrusSerialisable
from . import HydrusGlobals as HG
import requests
import threading
try:
import socket
import socks
SOCKS_PROXY_OK = True
except:
SOCKS_PROXY_OK = False
class NetworkSessionManager( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER
SERIALISABLE_NAME = 'Session Manager'
SERIALISABLE_VERSION = 1
SESSION_TIMEOUT = 60 * 60
def __init__( self ):
HydrusSerialisable.SerialisableBase.__init__( self )
self.engine = None
self._dirty = False
self._lock = threading.Lock()
self._network_contexts_to_sessions = {}
self._network_contexts_to_session_timeouts = {}
self._proxies_dict = {}
self._Reinitialise()
HG.client_controller.sub( self, 'Reinitialise', 'notify_new_options' )
def _CleanSessionCookies( self, network_context, session ):
if network_context not in self._network_contexts_to_session_timeouts:
self._network_contexts_to_session_timeouts[ network_context ] = 0
if HydrusData.TimeHasPassed( self._network_contexts_to_session_timeouts[ network_context ] ):
session.cookies.clear_session_cookies()
self._network_contexts_to_session_timeouts[ network_context ] = HydrusData.GetNow() + self.SESSION_TIMEOUT
session.cookies.clear_expired_cookies()
def _GenerateSession( self, network_context ):
session = requests.Session()
if network_context.context_type == CC.NETWORK_CONTEXT_HYDRUS:
session.verify = False
return session
def _GetSerialisableInfo( self ):
serialisable_network_contexts_to_sessions = [ ( network_context.GetSerialisableTuple(), pickle.dumps( session ).hex() ) for ( network_context, session ) in list(self._network_contexts_to_sessions.items()) ]
return serialisable_network_contexts_to_sessions
def _GetSessionNetworkContext( self, network_context ):
# just in case one of these slips through somehow
if network_context.context_type == CC.NETWORK_CONTEXT_DOMAIN:
second_level_domain = ClientNetworkingDomain.ConvertDomainIntoSecondLevelDomain( network_context.context_data )
network_context = ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, second_level_domain )
return network_context
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
serialisable_network_contexts_to_sessions = serialisable_info
for ( serialisable_network_context, pickled_session_hex ) in serialisable_network_contexts_to_sessions:
network_context = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_network_context )
try:
session = pickle.loads( bytes.fromhex( pickled_session_hex ) )
except:
# new version of requests uses a diff format, wew
continue
session.cookies.clear_session_cookies()
self._network_contexts_to_sessions[ network_context ] = session
def _Reinitialise( self ):
self._proxies_dict = {}
http_proxy = HG.client_controller.new_options.GetNoneableString( 'http_proxy' )
https_proxy = HG.client_controller.new_options.GetNoneableString( 'https_proxy' )
if http_proxy is not None:
self._proxies_dict[ 'http' ] = http_proxy
if https_proxy is not None:
self._proxies_dict[ 'https' ] = https_proxy
def _SetDirty( self ):
self._dirty = True
def ClearSession( self, network_context ):
with self._lock:
network_context = self._GetSessionNetworkContext( network_context )
if network_context in self._network_contexts_to_sessions:
del self._network_contexts_to_sessions[ network_context ]
self._SetDirty()
def GetNetworkContexts( self ):
with self._lock:
return list(self._network_contexts_to_sessions.keys())
def GetSession( self, network_context ):
with self._lock:
network_context = self._GetSessionNetworkContext( network_context )
if network_context not in self._network_contexts_to_sessions:
self._network_contexts_to_sessions[ network_context ] = self._GenerateSession( network_context )
session = self._network_contexts_to_sessions[ network_context ]
if session.proxies != self._proxies_dict:
session.proxies = dict( self._proxies_dict )
#
self._CleanSessionCookies( network_context, session )
#
# tumblr can't into ssl for some reason, and the data subdomain they use has weird cert properties, looking like amazon S3
# perhaps it is inward-facing somehow? whatever the case, let's just say fuck it for tumblr
if network_context.context_type == CC.NETWORK_CONTEXT_DOMAIN and network_context.context_data == 'tumblr.com':
session.verify = False
#
self._SetDirty()
return session
def GetSessionForDomain( self, domain ):
network_context = ClientNetworkingContexts.NetworkContext( context_type = CC.NETWORK_CONTEXT_DOMAIN, context_data = domain )
return self.GetSession( network_context )
def IsDirty( self ):
with self._lock:
return self._dirty
def Reinitialise( self ):
with self._lock:
self._Reinitialise()
def SetClean( self ):
with self._lock:
self._dirty = False
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER ] = NetworkSessionManager
|
[
"threading.Lock",
"requests.Session",
"pickle.dumps"
] |
[((847, 863), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (861, 863), False, 'import threading\n'), ((1884, 1902), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1900, 1902), False, 'import requests\n'), ((2232, 2253), 'pickle.dumps', 'pickle.dumps', (['session'], {}), '(session)\n', (2244, 2253), False, 'import pickle\n')]
|
# -*- coding: UTF-8 -*-
import unittest
import uuid
import pytz
from geopy import Point
from geopy.compat import u
from geopy.exc import GeocoderAuthenticationFailure, GeocoderQueryError
from geopy.geocoders import GeoNames
from test.geocoders.util import GeocoderTestBase, env
class GeoNamesTestCaseUnitTest(GeocoderTestBase):
def test_user_agent_custom(self):
geocoder = GeoNames(
username='DUMMYUSER_NORBERT',
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
@unittest.skipUnless(
bool(env.get('GEONAMES_USERNAME')),
"No GEONAMES_USERNAME env variable set"
)
class GeoNamesTestCase(GeocoderTestBase):
delta = 0.04
@classmethod
def setUpClass(cls):
cls.geocoder = GeoNames(username=env['GEONAMES_USERNAME'])
def reverse_timezone_run(self, payload, expected):
timezone = self._make_request(self.geocoder.reverse_timezone, **payload)
if expected is None:
self.assertIsNone(timezone)
else:
self.assertEqual(timezone.pytz_timezone, expected)
return timezone
def test_unicode_name(self):
self.geocode_run(
{"query": "Mount Everest, Nepal"},
{"latitude": 27.987, "longitude": 86.925},
skiptest_on_failure=True, # sometimes the result is empty
)
def test_query_urlencoding(self):
location = self.geocode_run(
{"query": u("Ry\u016b\u014d")},
{"latitude": 35.65, "longitude": 138.5},
skiptest_on_failure=True, # sometimes the result is empty
)
self.assertIn(u("Ry\u016b\u014d"), location.address)
def test_reverse(self):
location = self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
},
{
"latitude": 40.75376406311989,
"longitude": -73.98489005863667,
},
)
self.assertIn("Times Square", location.address)
def test_geocode_empty_response(self):
self.geocode_run(
{"query": "sdlahaslkhdkasldhkjsahdlkash"},
{},
expect_failure=True,
)
def test_reverse_nearby_place_name_raises_for_feature_code(self):
with self.assertRaises(ValueError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"feature_code": "ADM1",
},
{},
)
with self.assertRaises(ValueError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"feature_code": "ADM1",
"find_nearby_type": "findNearbyPlaceName",
},
{},
)
def test_reverse_nearby_place_name_lang(self):
location = self.reverse_run(
{
"query": "52.50, 13.41",
"exactly_one": True,
"lang": 'ru',
},
{},
)
self.assertIn(u'<NAME>', location.address)
def test_reverse_find_nearby_raises_for_lang(self):
with self.assertRaises(ValueError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": 'findNearby',
"lang": 'en',
},
{},
)
def test_reverse_find_nearby(self):
location = self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": 'findNearby',
},
{
"latitude": 40.75376406311989,
"longitude": -73.98489005863667,
},
)
self.assertIn("New York, United States", location.address)
def test_reverse_find_nearby_feature_code(self):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": 'findNearby',
"feature_code": "ADM1",
},
{
"latitude": 40.16706,
"longitude": -74.49987,
},
)
def test_reverse_raises_for_unknown_find_nearby_type(self):
with self.assertRaises(GeocoderQueryError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": "findSomethingNonExisting",
},
{},
)
def test_reverse_timezone(self):
new_york_point = Point(40.75376406311989, -73.98489005863667)
america_new_york = pytz.timezone("America/New_York")
timezone = self.reverse_timezone_run(
{"query": new_york_point},
america_new_york,
)
self.assertEqual(timezone.raw['countryCode'], 'US')
def test_reverse_timezone_unknown(self):
self.reverse_timezone_run(
# Geonames doesn't return `timezoneId` for Antarctica,
# but it provides GMT offset which can be used
# to create a FixedOffset pytz timezone.
{"query": "89.0, 1.0"},
pytz.UTC,
)
self.reverse_timezone_run(
{"query": "89.0, 80.0"},
pytz.FixedOffset(5 * 60),
)
def test_country_str(self):
self.geocode_run(
{"query": "kazan", "country": "TR"},
{"latitude": 40.2317, "longitude": 32.6839},
)
def test_country_list(self):
self.geocode_run(
{"query": "kazan", "country": ["CN", "TR", "JP"]},
{"latitude": 40.2317, "longitude": 32.6839},
)
def test_country_bias(self):
self.geocode_run(
{"query": "kazan", "country_bias": "TR"},
{"latitude": 40.2317, "longitude": 32.6839},
)
class GeoNamesInvalidAccountTestCase(GeocoderTestBase):
@classmethod
def setUpClass(cls):
cls.geocoder = GeoNames(username="geopy-not-existing-%s" % uuid.uuid4())
def reverse_timezone_run(self, payload, expected):
timezone = self._make_request(self.geocoder.reverse_timezone, **payload)
if expected is None:
self.assertIsNone(timezone)
else:
self.assertEqual(timezone.pytz_timezone, expected)
return timezone
def test_geocode(self):
with self.assertRaises(GeocoderAuthenticationFailure):
self.geocode_run(
{"query": "moscow"},
{},
expect_failure=True,
)
def test_reverse_timezone(self):
with self.assertRaises(GeocoderAuthenticationFailure):
self.reverse_timezone_run(
{"query": "40.6997716, -73.9753359"},
None,
)
|
[
"pytz.FixedOffset",
"geopy.compat.u",
"uuid.uuid4",
"geopy.geocoders.GeoNames",
"pytz.timezone",
"test.geocoders.util.env.get",
"geopy.Point"
] |
[((390, 460), 'geopy.geocoders.GeoNames', 'GeoNames', ([], {'username': '"""DUMMYUSER_NORBERT"""', 'user_agent': '"""my_user_agent/1.0"""'}), "(username='DUMMYUSER_NORBERT', user_agent='my_user_agent/1.0')\n", (398, 460), False, 'from geopy.geocoders import GeoNames\n'), ((809, 852), 'geopy.geocoders.GeoNames', 'GeoNames', ([], {'username': "env['GEONAMES_USERNAME']"}), "(username=env['GEONAMES_USERNAME'])\n", (817, 852), False, 'from geopy.geocoders import GeoNames\n'), ((5071, 5115), 'geopy.Point', 'Point', (['(40.75376406311989)', '(-73.98489005863667)'], {}), '(40.75376406311989, -73.98489005863667)\n', (5076, 5115), False, 'from geopy import Point\n'), ((5143, 5176), 'pytz.timezone', 'pytz.timezone', (['"""America/New_York"""'], {}), "('America/New_York')\n", (5156, 5176), False, 'import pytz\n'), ((606, 634), 'test.geocoders.util.env.get', 'env.get', (['"""GEONAMES_USERNAME"""'], {}), "('GEONAMES_USERNAME')\n", (613, 634), False, 'from test.geocoders.util import GeocoderTestBase, env\n'), ((1679, 1688), 'geopy.compat.u', 'u', (['"""Ryūō"""'], {}), "('Ryūō')\n", (1680, 1688), False, 'from geopy.compat import u\n'), ((5775, 5799), 'pytz.FixedOffset', 'pytz.FixedOffset', (['(5 * 60)'], {}), '(5 * 60)\n', (5791, 5799), False, 'import pytz\n'), ((1501, 1510), 'geopy.compat.u', 'u', (['"""Ryūō"""'], {}), "('Ryūō')\n", (1502, 1510), False, 'from geopy.compat import u\n'), ((6525, 6537), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6535, 6537), False, 'import uuid\n')]
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from cwr.grammar.field import basic, special, table, filename
"""
CWR fields grammar adapters.
These classes allow the factories to create rules in an homogeneous way,
by setting a basic interface which will wrap around field rules, giving
a basic common method through which rules can be created.
This interface is the FieldAdapter, having only the get_field method, which
will receive a series of parameters, all of them optional, and generate a
field rule from them. The concrete rule will depend on the implementation.
Additionally, it offers the wrap_as_optional method, which allows setting a
field as optional. It is meant to be used with a field created by the adapter,
so it can be overriden for specific fields.
"""
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'Development'
class FieldAdapter(object, metaclass=ABCMeta):
"""
Interface for adapting field rules creation to the parser factory
requirements.
This is meant to receive always the same, or similar, groups of values,
and then generate a specific field rule
from them.
"""
def __init__(self):
pass
@abstractmethod
def get_field(self, name=None, columns=None, values=None):
"""
Generates the rules for the field, applying the received parameters.
:param name: the name of the field
:param columns: number of columns
:param values: allowed values for the field
:return: the rule for the field
"""
raise NotImplementedError("The get_field method is not implemented")
def is_numeric(self):
return False
class AlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an Alphanumeric (A) field, accepting only the
specified number of characters.
By default Alphanumeric fields accept only ASCII characters, excluding
lowercases. If the extended flag is set to True, then non-ASCII characters
are allowed, but the no ASCII lowercase constraint is kept.
This can be a compulsory field, in which case the empty string is
disallowed.
The text will be stripped of heading and trailing whitespaces.
"""
def __init__(self):
super(AlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
field = basic.alphanum(columns, name, extended=False)
return field
class ExtendedAlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an Alphanumeric (A) field, accepting only the
specified number of characters.
By default Alphanumeric fields accept only ASCII characters, excluding
lowercases. If the extended flag is set to True, then non-ASCII characters
are allowed, but the no ASCII lowercase constraint is kept.
This can be a compulsory field, in which case the empty string is
disallowed.
The text will be stripped of heading and trailing whitespaces.
"""
def __init__(self):
super(ExtendedAlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.alphanum(columns, name, extended=True)
class EndAlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an Alphanumeric (A) field, accepting only the
specified number of characters.
By default Alphanumeric fields accept only ASCII characters, excluding
lowercases. If the extended flag is set to True, then non-ASCII characters
are allowed, but the no ASCII lowercase constraint is kept.
This can be a compulsory field, in which case the empty string is
disallowed.
The text will be stripped of heading and trailing whitespaces.
"""
def __init__(self):
super(EndAlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
reg = basic.alphanum(columns, name, extended=True, isLast=True)
return reg
class NumericAdapter(FieldAdapter):
"""
Creates the grammar for a Numeric (N) field, accepting only the specified
number of characters.
This version only allows integers.
"""
def __init__(self):
super(NumericAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.numeric(columns, name)
class BooleanAdapter(FieldAdapter):
"""
Creates the grammar for a Boolean (B) field, accepting only 'Y' or 'N'
"""
def __init__(self):
super(BooleanAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.boolean(name)
class FlagAdapter(FieldAdapter):
"""
Creates the grammar for a Flag (F) field, accepting only 'Y', 'N' or 'U'.
"""
def __init__(self):
super(FlagAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.flag(name)
class DateAdapter(FieldAdapter):
"""
Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
"""
def __init__(self):
super(DateAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.date(name)
def is_numeric(self):
return True
class TimeAdapter(FieldAdapter):
"""
Creates the grammar for a Time (D) field, accepting only numbers in a
certain pattern.
"""
def __init__(self):
super(TimeAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.time(name)
class DateTimeAdapter(FieldAdapter):
"""
Creates the grammar for a date and time field, which is a combination of
the Date (D) and Time or Duration field (T)
.
"""
def __init__(self):
super(DateTimeAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.date_time(name)
class BlankAdapter(FieldAdapter):
"""
Creates the grammar for a blank field.
These are for constant empty strings which should be ignored, as they are
used just as fillers.
"""
def __init__(self):
super(BlankAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.blank(columns, name)
class LookupAdapter(FieldAdapter):
"""
Creates the grammar for a Lookup (L) field, accepting only values from a
list.
"""
def __init__(self):
super(LookupAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.lookup(values, name)
class ISWCAdapter(FieldAdapter):
"""
ISWC field.
"""
def __init__(self):
super(ISWCAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.iswc(name)
class IPIBaseNumberAdapter(FieldAdapter):
"""
IPI Base Number field.
"""
def __init__(self):
super(IPIBaseNumberAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.ipi_base_number(name)
class IPINameNumberAdapter(FieldAdapter):
"""
IPI Name Number field.
"""
def __init__(self):
super(IPINameNumberAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.ipi_name_number(name, )
class PercentageAdapter(FieldAdapter):
"""
Creates the grammar for a Numeric (N) field storing a percentage and
accepting only the specified number of
characters.
"""
def __init__(self):
super(PercentageAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
if values is not None and len(values) > 0:
maximum = int(values[0])
else:
maximum = 100
return special.percentage(columns=columns, maximum=maximum, name=name)
class EAN13Adapter(FieldAdapter):
"""
Creates the grammar for an EAN 13 code.
"""
def __init__(self):
super(EAN13Adapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.ean_13(name=name)
class ISRCAdapter(FieldAdapter):
"""
Creates the grammar for an ISRC code.
"""
def __init__(self):
super(ISRCAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.isrc(name=name)
class VISANAdapter(FieldAdapter):
"""
Creates the grammar for a V-ISAN code.
"""
def __init__(self):
super(VISANAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.visan(name=name)
class AudioVisualKeydapter(FieldAdapter):
"""
Creates the grammar for an Audio Visual Key code.
"""
def __init__(self):
super(AudioVisualKeydapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
field = special.audio_visual_key(name=name)
return field
class CharSetAdapter(FieldAdapter):
"""
Character set code field.
"""
def __init__(self):
super(CharSetAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return table.char_code(columns=columns, name=name)
class VariableAlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an alphanumeric code where the size ranges between
two values.
"""
def __init__(self):
super(VariableAlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
if values is not None and len(values) > 0:
min_size = int(values[0])
else:
min_size = columns
return filename.alphanum_variable(min_size=min_size, max_size=columns,
name=name)
class NumericFloatAdapter(FieldAdapter):
"""
Creates the grammar for a Numeric (N) field, accepting only the specified
number of characters.
"""
def __init__(self):
super(NumericFloatAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
if values is not None and len(values) > 0:
nums_int = int(values[0])
else:
nums_int = columns
return basic.numeric_float(columns=columns, nums_int=nums_int,
name=name)
class YearAdapter(FieldAdapter):
"""
Creates the grammar for a year field, accepting only the specified number
of integers.
"""
def __init__(self):
super(YearAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return filename.year(columns=columns, name=name)
class FilenameVersionAdapter(FieldAdapter):
"""
Creates the grammar for a filename version field, accepting only specific
delimiters.
"""
def __init__(self):
super(FilenameVersionAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return filename.filename_version(values=values, name=name)
class LookupIntAdapter(FieldAdapter):
"""
Creates the grammar for an integer lookup field, accepting only specific
values, and transforming them to an integer.
"""
def __init__(self):
super(LookupIntAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.lookup_int(values=values, name=name)
|
[
"cwr.grammar.field.basic.flag",
"cwr.grammar.field.special.isrc",
"cwr.grammar.field.special.percentage",
"cwr.grammar.field.special.visan",
"cwr.grammar.field.table.char_code",
"cwr.grammar.field.basic.numeric_float",
"cwr.grammar.field.special.lookup_int",
"cwr.grammar.field.special.ipi_base_number",
"cwr.grammar.field.filename.year",
"cwr.grammar.field.basic.time",
"cwr.grammar.field.special.audio_visual_key",
"cwr.grammar.field.special.ipi_name_number",
"cwr.grammar.field.special.ean_13",
"cwr.grammar.field.basic.date",
"cwr.grammar.field.basic.boolean",
"cwr.grammar.field.basic.blank",
"cwr.grammar.field.special.date_time",
"cwr.grammar.field.basic.alphanum",
"cwr.grammar.field.filename.alphanum_variable",
"cwr.grammar.field.basic.lookup",
"cwr.grammar.field.special.iswc",
"cwr.grammar.field.basic.numeric",
"cwr.grammar.field.filename.filename_version"
] |
[((2373, 2418), 'cwr.grammar.field.basic.alphanum', 'basic.alphanum', (['columns', 'name'], {'extended': '(False)'}), '(columns, name, extended=False)\n', (2387, 2418), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((3148, 3192), 'cwr.grammar.field.basic.alphanum', 'basic.alphanum', (['columns', 'name'], {'extended': '(True)'}), '(columns, name, extended=True)\n', (3162, 3192), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((3889, 3946), 'cwr.grammar.field.basic.alphanum', 'basic.alphanum', (['columns', 'name'], {'extended': '(True)', 'isLast': '(True)'}), '(columns, name, extended=True, isLast=True)\n', (3903, 3946), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((4315, 4343), 'cwr.grammar.field.basic.numeric', 'basic.numeric', (['columns', 'name'], {}), '(columns, name)\n', (4328, 4343), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((4624, 4643), 'cwr.grammar.field.basic.boolean', 'basic.boolean', (['name'], {}), '(name)\n', (4637, 4643), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((4921, 4937), 'cwr.grammar.field.basic.flag', 'basic.flag', (['name'], {}), '(name)\n', (4931, 4937), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((5232, 5248), 'cwr.grammar.field.basic.date', 'basic.date', (['name'], {}), '(name)\n', (5242, 5248), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((5590, 5606), 'cwr.grammar.field.basic.time', 'basic.time', (['name'], {}), '(name)\n', (5600, 5606), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((5945, 5968), 'cwr.grammar.field.special.date_time', 'special.date_time', (['name'], {}), '(name)\n', (5962, 5968), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((6318, 6344), 'cwr.grammar.field.basic.blank', 'basic.blank', (['columns', 'name'], {}), '(columns, name)\n', (6329, 6344), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((6635, 6661), 'cwr.grammar.field.basic.lookup', 'basic.lookup', (['values', 'name'], {}), '(values, name)\n', (6647, 6661), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((6877, 6895), 'cwr.grammar.field.special.iswc', 'special.iswc', (['name'], {}), '(name)\n', (6889, 6895), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((7140, 7169), 'cwr.grammar.field.special.ipi_base_number', 'special.ipi_base_number', (['name'], {}), '(name)\n', (7163, 7169), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((7414, 7443), 'cwr.grammar.field.special.ipi_name_number', 'special.ipi_name_number', (['name'], {}), '(name)\n', (7437, 7443), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((7918, 7981), 'cwr.grammar.field.special.percentage', 'special.percentage', ([], {'columns': 'columns', 'maximum': 'maximum', 'name': 'name'}), '(columns=columns, maximum=maximum, name=name)\n', (7936, 7981), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((8227, 8252), 'cwr.grammar.field.special.ean_13', 'special.ean_13', ([], {'name': 'name'}), '(name=name)\n', (8241, 8252), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((8494, 8517), 'cwr.grammar.field.special.isrc', 'special.isrc', ([], {'name': 'name'}), '(name=name)\n', (8506, 8517), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((8762, 8786), 'cwr.grammar.field.special.visan', 'special.visan', ([], {'name': 'name'}), '(name=name)\n', (8775, 8786), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((9059, 9094), 'cwr.grammar.field.special.audio_visual_key', 'special.audio_visual_key', ([], {'name': 'name'}), '(name=name)\n', (9083, 9094), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((9351, 9394), 'cwr.grammar.field.table.char_code', 'table.char_code', ([], {'columns': 'columns', 'name': 'name'}), '(columns=columns, name=name)\n', (9366, 9394), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((9848, 9922), 'cwr.grammar.field.filename.alphanum_variable', 'filename.alphanum_variable', ([], {'min_size': 'min_size', 'max_size': 'columns', 'name': 'name'}), '(min_size=min_size, max_size=columns, name=name)\n', (9874, 9922), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((10419, 10485), 'cwr.grammar.field.basic.numeric_float', 'basic.numeric_float', ([], {'columns': 'columns', 'nums_int': 'nums_int', 'name': 'name'}), '(columns=columns, nums_int=nums_int, name=name)\n', (10438, 10485), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((10815, 10856), 'cwr.grammar.field.filename.year', 'filename.year', ([], {'columns': 'columns', 'name': 'name'}), '(columns=columns, name=name)\n', (10828, 10856), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((11172, 11223), 'cwr.grammar.field.filename.filename_version', 'filename.filename_version', ([], {'values': 'values', 'name': 'name'}), '(values=values, name=name)\n', (11197, 11223), False, 'from cwr.grammar.field import basic, special, table, filename\n'), ((11559, 11603), 'cwr.grammar.field.special.lookup_int', 'special.lookup_int', ([], {'values': 'values', 'name': 'name'}), '(values=values, name=name)\n', (11577, 11603), False, 'from cwr.grammar.field import basic, special, table, filename\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.swahili import swahili
def test_swahili():
"""Test module swahili.py by downloading
swahili.csv and testing shape of
extracted data has 480 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = swahili(test_path)
try:
assert x_train.shape == (480, 4)
except:
shutil.rmtree(test_path)
raise()
|
[
"shutil.rmtree",
"tempfile.mkdtemp",
"observations.r.swahili.swahili"
] |
[((361, 379), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (377, 379), False, 'import tempfile\n'), ((402, 420), 'observations.r.swahili.swahili', 'swahili', (['test_path'], {}), '(test_path)\n', (409, 420), False, 'from observations.r.swahili import swahili\n'), ((479, 503), 'shutil.rmtree', 'shutil.rmtree', (['test_path'], {}), '(test_path)\n', (492, 503), False, 'import shutil\n')]
|
import json
import boto3
dynamo = boto3.resource("dynamodb")
table = dynamo.Table("Attendance_Count")
def lambda_handler(event, context):
# TODO implement
res = table.get_item(Key = {"RollNo" : event['RollNo']})
print(res['Item']['Name'])
Count = res['Item']['Count']
Count= Count+1
inp = {"RollNo" : event['RollNo'], "Count" : Count, "Name" : res['Item']['Name']}
table.put_item(Item = inp)
return "Successful"
|
[
"boto3.resource"
] |
[((38, 64), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (52, 64), False, 'import boto3\n')]
|
# Python
import datetime
import logging
# Django
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.utils.timezone import now
# CyBorgBackup
from cyborgbackup.main.models import Job, Repository
class Command(BaseCommand):
'''
Management command to cleanup old jobs.
'''
help = 'Remove old jobs from the database.'
def add_arguments(self, parser):
parser.add_argument('--dry-run', dest='dry_run', action='store_true',
default=False, help='Dry run mode (show items that would '
'be removed)')
parser.add_argument('--jobs', dest='only_jobs', action='store_true',
default=True,
help='Remove jobs')
def cleanup_jobs(self):
# Sanity check: Is there already a running job on the System?
jobs = Job.objects.filter(status="running")
if jobs.exists():
print('A job is already running, exiting.')
return
repos = Repository.objects.filter(enabled=True)
repoArchives = []
if repos.exists():
for repo in repos:
lines = self.launch_command(["borg", "list", "::"], repo, repo.repository_key, repo.path, **kwargs)
for line in lines:
archive_name = line.split(' ')[0] #
for type in ('rootfs', 'vm', 'mysql', 'postgresql', 'config', 'piped', 'mail', 'folders'):
if '{}-'.format(type) in archive_name:
repoArchives.append(archive_name)
entries = Job.objects.filter(job_type='job')
if entries.exists():
for entry in entries:
if entry.archive_name != '' and entry.archive_name not in repoArchives:
action_text = 'would delete' if self.dry_run else 'deleting'
self.logger.info('%s %s', action_text, entry.archive_name)
if not self.dry_run:
entry.delete()
return 0, 0
@transaction.atomic
def handle(self, *args, **options):
self.verbosity = int(options.get('verbosity', 1))
self.init_logging()
self.dry_run = bool(options.get('dry_run', False))
model_names = ('jobs',)
models_to_cleanup = set()
for m in model_names:
if options.get('only_%s' % m, False):
models_to_cleanup.add(m)
if not models_to_cleanup:
models_to_cleanup.update(model_names)
for m in model_names:
if m in models_to_cleanup:
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
if self.dry_run:
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '),
deleted, skipped)
else:
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
[
"cyborgbackup.main.models.Job.objects.filter",
"cyborgbackup.main.models.Repository.objects.filter"
] |
[((926, 962), 'cyborgbackup.main.models.Job.objects.filter', 'Job.objects.filter', ([], {'status': '"""running"""'}), "(status='running')\n", (944, 962), False, 'from cyborgbackup.main.models import Job, Repository\n'), ((1081, 1120), 'cyborgbackup.main.models.Repository.objects.filter', 'Repository.objects.filter', ([], {'enabled': '(True)'}), '(enabled=True)\n', (1106, 1120), False, 'from cyborgbackup.main.models import Job, Repository\n'), ((1677, 1711), 'cyborgbackup.main.models.Job.objects.filter', 'Job.objects.filter', ([], {'job_type': '"""job"""'}), "(job_type='job')\n", (1695, 1711), False, 'from cyborgbackup.main.models import Job, Repository\n')]
|
import _thread
import time
import hashlib
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.models import Q
from django.conf import settings
from django.template.defaultfilters import slugify
from SocialNetwork_API.services.base import BaseService
from SocialNetwork_API.models import *
from SocialNetwork_API.const import ResourceType
class UserService(BaseService):
@classmethod
def get_all_users(cls):
try:
users = User.objects.all()
if len(users) > 0:
return users
return None
except Exception as exception:
cls.log_exception(exception)
return None
@classmethod
def get_user_friend(cls, user_id, friend_id):
try:
user_friend = Friend.objects.filter(user_id=user_id, friend_user_id=friend_id)
if len(user_friend) > 0:
return user_friend[0]
return None
except Exception as exception:
cls.log_exception(exception)
return None
@classmethod
def get_single_user(cls, user_id):
try:
return User.objects.get(pk=user_id)
except:
return None
@classmethod
def authenticate(cls, email, username, password):
try:
if email:
user = User.objects.filter(email=email)[0]
if username:
user = User.objects.filter(username=username)[0]
if user and user.check_password(password):
return user
else:
return None
except Exception as exception:
return None
@classmethod
def save(cls, user_data, instance=None):
try:
password = user_data.pop('password', None)
user = instance if instance else User()
is_new = instance is None
# Set property values
if 'username' in user_data and user.username != user_data['username']:
user.slug = slugify(user_data['username'])
for key in user_data:
setattr(user, key, user_data[key])
# Set password
if is_new:
user.set_password(password)
else:
if password:
user.set_password(password)
with transaction.atomic():
user.save()
return cls.get_user(user.id)
except Exception as exception:
raise exception
@classmethod
def user_friend(cls, user, friend):
try:
user_friend = Friend()
user_friend.user_id = user.id
user_friend.friend_user_id = friend.id
with transaction.atomic():
user_friend.save()
# # Save follow_user to arangodb
# if settings.SAVE_TO_ARANGODB:
# ArangoUserService.follow_band(band.userband.__dict__, activity.__dict__)
return True
except Exception as exception:
raise exception
@classmethod
def get_email(cls, email):
try:
user_email = UserEmail.objects.get(email=email)
if user_email:
return user_email
except Exception as e:
cls.log_exception(e)
return None
return None
@classmethod
def gen_token(cls, user_id):
text = str(user_id) + Utils.id_generator(10) + str(int(time.time()))
hash_object = hashlib.md5(text.encode('utf-8'))
return hash_object.hexdigest()
@classmethod
def get_by_email(cls, email):
try:
user = User.objects.get(email=email)
return cls.get_user(user.pk)
except User.DoesNotExist:
return None
@classmethod
def get_users(cls, *args, **kwargs):
limit = kwargs.get('limit', 20)
offset = kwargs.get('offset', 0)
search = kwargs.get('search', None)
end = offset + limit
filter = kwargs.get('filter', {})
order_by = kwargs.get('order', '-id')
includes = kwargs.get('includes', [])
users = []
if search:
term = Q(username__icontains=search)
user_ids = User.objects.values_list('id', flat=True) \
.order_by(order_by).filter(**filter).filter(term)[offset:end]
count = User.objects.values_list('id', flat=True) \
.order_by(order_by).filter(**filter).filter(term).count()
else:
user_ids = User.objects.values_list('id', flat=True).order_by(order_by).filter(**filter)[offset:end]
count = User.objects.values_list('id', flat=True).order_by(order_by).filter(**filter).count()
for id in user_ids:
users.append(cls.get_user(id, includes=includes))
return {
'result': users,
'count': count
}
@classmethod
def get_user(cls, user_id):
try:
user = User.objects.get(pk=user_id)
except Exception as e:
cls.log_exception(e)
return None
return user
|
[
"django.template.defaultfilters.slugify",
"django.db.transaction.atomic",
"django.db.models.Q",
"time.time"
] |
[((4231, 4260), 'django.db.models.Q', 'Q', ([], {'username__icontains': 'search'}), '(username__icontains=search)\n', (4232, 4260), False, 'from django.db.models import Q\n'), ((2057, 2087), 'django.template.defaultfilters.slugify', 'slugify', (["user_data['username']"], {}), "(user_data['username'])\n", (2064, 2087), False, 'from django.template.defaultfilters import slugify\n'), ((2382, 2402), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (2400, 2402), False, 'from django.db import transaction\n'), ((2758, 2778), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (2776, 2778), False, 'from django.db import transaction\n'), ((3505, 3516), 'time.time', 'time.time', ([], {}), '()\n', (3514, 3516), False, 'import time\n')]
|
import base64
from io import BytesIO
from os.path import dirname
from typing import Tuple
from collections import deque
from PIL import Image, ImageFont, ImageDraw, ImageOps
font_size = 70
line_gap = 20
body_pos = (205, 340)
subtitle_pos = (790, 320)
body_color = (0, 0, 0, 255)
subtitle_color = (129, 212, 250, 255)
line_rotate = -9.8
max_line_width = 680
max_content_height = 450
print(dirname(__file__) + "/res/font.ttc")
font = ImageFont.truetype(dirname(__file__) + "/res/font.ttf", font_size)
def image_to_byte_array(image: Image):
imgByteArr = io.BytesIO()
image.save(imgByteArr, format=image.format)
imgByteArr = imgByteArr.getvalue()
return imgByteArr
def im_2_b64(pic: Image.Image) -> str:
buf = BytesIO()
pic.save(buf, format="PNG")
base64_str = base64.b64encode(buf.getbuffer()).decode()
return "base64://" + base64_str
def draw_subtitle(im, text: str):
width, height = font.getsize(text)
image2 = Image.new("RGBA", (width, height))
draw2 = ImageDraw.Draw(image2)
draw2.text((0, 0), text=text, font=font, fill=subtitle_color)
image2 = image2.rotate(line_rotate, expand=1)
px, py = subtitle_pos
sx, sy = image2.size
im.paste(image2, (px, py, px + sx, py + sy), image2)
def generate_image(text: str):
origin_im = Image.open(dirname(__file__) + "/res/img.png")
text = text[:900]
length = len(text)
width, height = font.getsize(text)
current_width = 0
lines = []
line = ""
q = deque(text)
while q:
word = q.popleft()
width, _ = font.getsize(word)
current_width += width
if current_width >= max_line_width:
q.appendleft(word)
lines.append(line)
current_width = 0
line = ""
else:
line += word
lines.append(line)
image2 = Image.new("RGBA", (max_line_width, max_content_height))
draw2 = ImageDraw.Draw(image2)
for i, line in enumerate(lines):
y = i * (height + line_gap)
if y > max_content_height:
break
draw2.text((0, y), text=line, font=font, fill=body_color)
image2 = image2.rotate(line_rotate, expand=1)
px, py = body_pos
sx, sy = image2.size
origin_im.paste(image2, (px, py, px + sx, py + sy), image2)
draw_subtitle(origin_im, f"{length}/900")
return im_2_b64(origin_im)
|
[
"io.BytesIO",
"PIL.Image.new",
"os.path.dirname",
"PIL.ImageDraw.Draw",
"collections.deque"
] |
[((731, 740), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (738, 740), False, 'from io import BytesIO\n'), ((958, 992), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(width, height)'], {}), "('RGBA', (width, height))\n", (967, 992), False, 'from PIL import Image, ImageFont, ImageDraw, ImageOps\n'), ((1005, 1027), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image2'], {}), '(image2)\n', (1019, 1027), False, 'from PIL import Image, ImageFont, ImageDraw, ImageOps\n'), ((1492, 1503), 'collections.deque', 'deque', (['text'], {}), '(text)\n', (1497, 1503), False, 'from collections import deque\n'), ((1847, 1902), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(max_line_width, max_content_height)'], {}), "('RGBA', (max_line_width, max_content_height))\n", (1856, 1902), False, 'from PIL import Image, ImageFont, ImageDraw, ImageOps\n'), ((1915, 1937), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image2'], {}), '(image2)\n', (1929, 1937), False, 'from PIL import Image, ImageFont, ImageDraw, ImageOps\n'), ((389, 406), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (396, 406), False, 'from os.path import dirname\n'), ((452, 469), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (459, 469), False, 'from os.path import dirname\n'), ((1313, 1330), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (1320, 1330), False, 'from os.path import dirname\n')]
|
import os
from collections import OrderedDict
import pytest
from ruamel.yaml import YAML
from cli.src.helpers.build_io import (ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE,
ANSIBLE_OUTPUT_DIR,
ANSIBLE_VAULT_OUTPUT_DIR,
MANIFEST_FILE_NAME, SP_FILE_NAME,
TERRAFORM_OUTPUT_DIR,
get_ansible_config_file_path,
get_ansible_config_file_path_for_build,
get_ansible_path,
get_ansible_path_for_build,
get_ansible_vault_path, get_build_path,
get_inventory_path,
get_inventory_path_for_build,
get_manifest_path, get_output_path,
get_terraform_path, load_manifest,
save_ansible_config_file, save_inventory,
save_manifest, save_sp)
from cli.src.helpers.objdict_helpers import dict_to_objdict
from cli.src.helpers.yaml_helpers import safe_load, safe_load_all
from tests.unit.helpers.constants import (CLUSTER_NAME_LOAD, CLUSTER_NAME_SAVE,
NON_EXISTING_CLUSTER, OUTPUT_PATH,
TEST_CLUSTER_MODEL, TEST_DOCS,
TEST_INVENTORY)
TEST_SP = {'appId': 'xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx',
'displayName': 'test-rg',
'name': 'http://test-rg',
'password': '<PASSWORD>',
'tenant': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'}
ANSIBLE_CONFIG_FILE_SETTINGS = [('defaults', {
'interpreter_python': 'auto_legacy_silent',
'allow_world_readable_tmpfiles': 'true'
})]
def test_get_output_path():
output_path = os.path.join(OUTPUT_PATH)
result_path = os.path.normpath(get_output_path())
assert os.path.exists(output_path)
assert result_path == output_path
def test_get_build_path():
build_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE)
result_path = get_build_path(CLUSTER_NAME_SAVE)
assert os.path.exists(build_path)
assert result_path == build_path
def test_get_inventory_path():
assert get_inventory_path(CLUSTER_NAME_SAVE) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_INVENTORY_FILE)
def test_get_manifest_path():
assert get_manifest_path(CLUSTER_NAME_SAVE) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, MANIFEST_FILE_NAME)
def test_get_terraform_path():
terraform_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, TERRAFORM_OUTPUT_DIR)
result_path = get_terraform_path(CLUSTER_NAME_SAVE)
assert os.path.exists(terraform_path)
assert result_path == terraform_path
def test_get_ansible_path():
assert get_ansible_path(CLUSTER_NAME_SAVE) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR)
def test_get_ansible_vault_path():
ansible_vault_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_VAULT_OUTPUT_DIR)
result_path = get_ansible_vault_path(CLUSTER_NAME_SAVE)
assert os.path.exists(ansible_vault_path)
assert result_path == ansible_vault_path
def test_get_ansible_config_file_path():
assert get_ansible_config_file_path(CLUSTER_NAME_SAVE) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR, ANSIBLE_CFG_FILE)
def test_get_inventory_path_for_build():
assert get_inventory_path_for_build(os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE)) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_INVENTORY_FILE)
def test_get_ansible_path_for_build():
ansible_path_for_build_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR)
result_path = get_ansible_path_for_build(os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE))
assert os.path.exists(ansible_path_for_build_path)
assert result_path == ansible_path_for_build_path
def test_get_ansible_config_file_path_for_build():
assert get_ansible_config_file_path_for_build(os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE)) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR, ANSIBLE_CFG_FILE)
def test_save_manifest():
save_manifest(TEST_DOCS, CLUSTER_NAME_SAVE, MANIFEST_FILE_NAME)
manifest_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, MANIFEST_FILE_NAME)
manifest_stream = open(manifest_path, 'r')
manifest_file_content = safe_load_all(manifest_stream)
assert TEST_DOCS == manifest_file_content
def test_load_manifest():
build_path = get_build_path(CLUSTER_NAME_LOAD)
docs = load_manifest(build_path)
assert docs == TEST_DOCS
def test_load_not_existing_manifest_docs():
build_path = get_build_path(NON_EXISTING_CLUSTER)
with pytest.raises(Exception):
load_manifest(build_path)
def test_save_sp():
save_sp(TEST_SP, CLUSTER_NAME_SAVE)
sp_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, TERRAFORM_OUTPUT_DIR, SP_FILE_NAME)
sp_stream = open(sp_path, 'r')
sp_file_content = safe_load(sp_stream)
assert TEST_SP == sp_file_content
def test_save_inventory():
cluster_model = dict_to_objdict(TEST_CLUSTER_MODEL)
save_inventory(TEST_INVENTORY, cluster_model)
f = open(os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_INVENTORY_FILE), mode='r')
inventory_content = f.read()
assert 'test-1 ansible_host=10.0.0.1' in inventory_content
assert 'test-2 ansible_host=10.0.0.2' in inventory_content
assert 'test-3 ansible_host=10.0.0.3' in inventory_content
assert 'test-4 ansible_host=10.0.0.4' in inventory_content
assert 'ansible_user=operations' in inventory_content
assert 'ansible_ssh_private_key_file=id_rsa' in inventory_content
def test_save_ansible_config_file():
config_file_settings = OrderedDict(ANSIBLE_CONFIG_FILE_SETTINGS)
ansible_config_file_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR, ANSIBLE_CFG_FILE)
save_ansible_config_file(config_file_settings, ansible_config_file_path)
f = open(ansible_config_file_path, mode='r')
ansible_config_file_content = f.read()
assert 'interpreter_python = auto_legacy_silent' in ansible_config_file_content
assert 'allow_world_readable_tmpfiles = true' in ansible_config_file_content
|
[
"cli.src.helpers.build_io.get_ansible_vault_path",
"cli.src.helpers.objdict_helpers.dict_to_objdict",
"cli.src.helpers.build_io.save_manifest",
"cli.src.helpers.build_io.get_ansible_path",
"cli.src.helpers.build_io.save_inventory",
"cli.src.helpers.build_io.get_inventory_path",
"os.path.join",
"cli.src.helpers.build_io.get_build_path",
"os.path.exists",
"cli.src.helpers.build_io.get_terraform_path",
"cli.src.helpers.build_io.save_ansible_config_file",
"pytest.raises",
"cli.src.helpers.build_io.get_manifest_path",
"cli.src.helpers.build_io.get_ansible_config_file_path",
"cli.src.helpers.build_io.get_output_path",
"cli.src.helpers.yaml_helpers.safe_load",
"cli.src.helpers.build_io.load_manifest",
"cli.src.helpers.build_io.save_sp",
"cli.src.helpers.yaml_helpers.safe_load_all",
"collections.OrderedDict"
] |
[((2113, 2138), 'os.path.join', 'os.path.join', (['OUTPUT_PATH'], {}), '(OUTPUT_PATH)\n', (2125, 2138), False, 'import os\n'), ((2205, 2232), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (2219, 2232), False, 'import os\n'), ((2317, 2361), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE)\n', (2329, 2361), False, 'import os\n'), ((2380, 2413), 'cli.src.helpers.build_io.get_build_path', 'get_build_path', (['CLUSTER_NAME_SAVE'], {}), '(CLUSTER_NAME_SAVE)\n', (2394, 2413), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((2425, 2451), 'os.path.exists', 'os.path.exists', (['build_path'], {}), '(build_path)\n', (2439, 2451), False, 'import os\n'), ((2863, 2929), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'TERRAFORM_OUTPUT_DIR'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, TERRAFORM_OUTPUT_DIR)\n', (2875, 2929), False, 'import os\n'), ((2948, 2985), 'cli.src.helpers.build_io.get_terraform_path', 'get_terraform_path', (['CLUSTER_NAME_SAVE'], {}), '(CLUSTER_NAME_SAVE)\n', (2966, 2985), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((2997, 3027), 'os.path.exists', 'os.path.exists', (['terraform_path'], {}), '(terraform_path)\n', (3011, 3027), False, 'import os\n'), ((3286, 3356), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'ANSIBLE_VAULT_OUTPUT_DIR'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_VAULT_OUTPUT_DIR)\n', (3298, 3356), False, 'import os\n'), ((3375, 3416), 'cli.src.helpers.build_io.get_ansible_vault_path', 'get_ansible_vault_path', (['CLUSTER_NAME_SAVE'], {}), '(CLUSTER_NAME_SAVE)\n', (3397, 3416), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((3428, 3462), 'os.path.exists', 'os.path.exists', (['ansible_vault_path'], {}), '(ansible_vault_path)\n', (3442, 3462), False, 'import os\n'), ((3999, 4063), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'ANSIBLE_OUTPUT_DIR'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR)\n', (4011, 4063), False, 'import os\n'), ((4166, 4209), 'os.path.exists', 'os.path.exists', (['ansible_path_for_build_path'], {}), '(ansible_path_for_build_path)\n', (4180, 4209), False, 'import os\n'), ((4550, 4613), 'cli.src.helpers.build_io.save_manifest', 'save_manifest', (['TEST_DOCS', 'CLUSTER_NAME_SAVE', 'MANIFEST_FILE_NAME'], {}), '(TEST_DOCS, CLUSTER_NAME_SAVE, MANIFEST_FILE_NAME)\n', (4563, 4613), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((4634, 4698), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'MANIFEST_FILE_NAME'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, MANIFEST_FILE_NAME)\n', (4646, 4698), False, 'import os\n'), ((4774, 4804), 'cli.src.helpers.yaml_helpers.safe_load_all', 'safe_load_all', (['manifest_stream'], {}), '(manifest_stream)\n', (4787, 4804), False, 'from cli.src.helpers.yaml_helpers import safe_load, safe_load_all\n'), ((4896, 4929), 'cli.src.helpers.build_io.get_build_path', 'get_build_path', (['CLUSTER_NAME_LOAD'], {}), '(CLUSTER_NAME_LOAD)\n', (4910, 4929), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((4941, 4966), 'cli.src.helpers.build_io.load_manifest', 'load_manifest', (['build_path'], {}), '(build_path)\n', (4954, 4966), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((5059, 5095), 'cli.src.helpers.build_io.get_build_path', 'get_build_path', (['NON_EXISTING_CLUSTER'], {}), '(NON_EXISTING_CLUSTER)\n', (5073, 5095), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((5191, 5226), 'cli.src.helpers.build_io.save_sp', 'save_sp', (['TEST_SP', 'CLUSTER_NAME_SAVE'], {}), '(TEST_SP, CLUSTER_NAME_SAVE)\n', (5198, 5226), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((5241, 5326), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'TERRAFORM_OUTPUT_DIR', 'SP_FILE_NAME'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, TERRAFORM_OUTPUT_DIR, SP_FILE_NAME\n )\n', (5253, 5326), False, 'import os\n'), ((5379, 5399), 'cli.src.helpers.yaml_helpers.safe_load', 'safe_load', (['sp_stream'], {}), '(sp_stream)\n', (5388, 5399), False, 'from cli.src.helpers.yaml_helpers import safe_load, safe_load_all\n'), ((5487, 5522), 'cli.src.helpers.objdict_helpers.dict_to_objdict', 'dict_to_objdict', (['TEST_CLUSTER_MODEL'], {}), '(TEST_CLUSTER_MODEL)\n', (5502, 5522), False, 'from cli.src.helpers.objdict_helpers import dict_to_objdict\n'), ((5527, 5572), 'cli.src.helpers.build_io.save_inventory', 'save_inventory', (['TEST_INVENTORY', 'cluster_model'], {}), '(TEST_INVENTORY, cluster_model)\n', (5541, 5572), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((6145, 6186), 'collections.OrderedDict', 'OrderedDict', (['ANSIBLE_CONFIG_FILE_SETTINGS'], {}), '(ANSIBLE_CONFIG_FILE_SETTINGS)\n', (6156, 6186), False, 'from collections import OrderedDict\n'), ((6218, 6304), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'ANSIBLE_OUTPUT_DIR', 'ANSIBLE_CFG_FILE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR,\n ANSIBLE_CFG_FILE)\n', (6230, 6304), False, 'import os\n'), ((6305, 6377), 'cli.src.helpers.build_io.save_ansible_config_file', 'save_ansible_config_file', (['config_file_settings', 'ansible_config_file_path'], {}), '(config_file_settings, ansible_config_file_path)\n', (6329, 6377), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((2175, 2192), 'cli.src.helpers.build_io.get_output_path', 'get_output_path', ([], {}), '()\n', (2190, 2192), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((2533, 2570), 'cli.src.helpers.build_io.get_inventory_path', 'get_inventory_path', (['CLUSTER_NAME_SAVE'], {}), '(CLUSTER_NAME_SAVE)\n', (2551, 2570), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((2574, 2642), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'ANSIBLE_INVENTORY_FILE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_INVENTORY_FILE)\n', (2586, 2642), False, 'import os\n'), ((2695, 2731), 'cli.src.helpers.build_io.get_manifest_path', 'get_manifest_path', (['CLUSTER_NAME_SAVE'], {}), '(CLUSTER_NAME_SAVE)\n', (2712, 2731), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((2735, 2799), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'MANIFEST_FILE_NAME'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, MANIFEST_FILE_NAME)\n', (2747, 2799), False, 'import os\n'), ((3111, 3146), 'cli.src.helpers.build_io.get_ansible_path', 'get_ansible_path', (['CLUSTER_NAME_SAVE'], {}), '(CLUSTER_NAME_SAVE)\n', (3127, 3146), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((3150, 3214), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'ANSIBLE_OUTPUT_DIR'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR)\n', (3162, 3214), False, 'import os\n'), ((3562, 3609), 'cli.src.helpers.build_io.get_ansible_config_file_path', 'get_ansible_config_file_path', (['CLUSTER_NAME_SAVE'], {}), '(CLUSTER_NAME_SAVE)\n', (3590, 3609), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((3613, 3699), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'ANSIBLE_OUTPUT_DIR', 'ANSIBLE_CFG_FILE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR,\n ANSIBLE_CFG_FILE)\n', (3625, 3699), False, 'import os\n'), ((3846, 3914), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'ANSIBLE_INVENTORY_FILE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_INVENTORY_FILE)\n', (3858, 3914), False, 'import os\n'), ((4109, 4153), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE)\n', (4121, 4153), False, 'import os\n'), ((4425, 4511), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'ANSIBLE_OUTPUT_DIR', 'ANSIBLE_CFG_FILE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR,\n ANSIBLE_CFG_FILE)\n', (4437, 4511), False, 'import os\n'), ((5105, 5129), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (5118, 5129), False, 'import pytest\n'), ((5139, 5164), 'cli.src.helpers.build_io.load_manifest', 'load_manifest', (['build_path'], {}), '(build_path)\n', (5152, 5164), False, 'from cli.src.helpers.build_io import ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE, ANSIBLE_OUTPUT_DIR, ANSIBLE_VAULT_OUTPUT_DIR, MANIFEST_FILE_NAME, SP_FILE_NAME, TERRAFORM_OUTPUT_DIR, get_ansible_config_file_path, get_ansible_config_file_path_for_build, get_ansible_path, get_ansible_path_for_build, get_ansible_vault_path, get_build_path, get_inventory_path, get_inventory_path_for_build, get_manifest_path, get_output_path, get_terraform_path, load_manifest, save_ansible_config_file, save_inventory, save_manifest, save_sp\n'), ((5586, 5654), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE', 'ANSIBLE_INVENTORY_FILE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_INVENTORY_FILE)\n', (5598, 5654), False, 'import os\n'), ((3788, 3832), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE)\n', (3800, 3832), False, 'import os\n'), ((4367, 4411), 'os.path.join', 'os.path.join', (['OUTPUT_PATH', 'CLUSTER_NAME_SAVE'], {}), '(OUTPUT_PATH, CLUSTER_NAME_SAVE)\n', (4379, 4411), False, 'import os\n')]
|
# # Developing a classification Audit trend
#
# In first iteration, this will only work on datasets that already have two or more binary classification variables included.
#
# We will need additional metadata: role options of being predictions or ground truths.
#
import pytest
import numpy as np
import pandas as pd
import wiggum as wg
# First, we will need a dataset that we can work with
# In[2]:
def test_classification_trends():
dataset = 'data/multi_decision_admisions/'
labeled_df = wg.LabeledDataFrame(dataset)
acc_trend = wg.Binary_Accuracy_Trend()
tpr_trend = wg.Binary_TPR_Trend()
ppv_trend = wg.Binary_PPV_Trend()
tnr_trend = wg.Binary_TNR_Trend()
fdr_trend = wg.Binary_FDR_Trend()
fnr_trend = wg.Binary_FNR_Trend()
err_trend = wg.Binary_Error_Trend()
f1_trend = wg.Binary_F1_Trend()
trend_list = [acc_trend,tpr_trend,ppv_trend, tnr_trend,fdr_trend,f1_trend, fnr_trend,
err_trend]
[trend.is_computable(labeled_df) for trend in trend_list]
labeled_df.get_subgroup_trends_1lev(trend_list)
# In[36]:
labeled_df.get_SP_rows(thresh=.2)
|
[
"wiggum.Binary_TPR_Trend",
"wiggum.Binary_Error_Trend",
"wiggum.Binary_F1_Trend",
"wiggum.LabeledDataFrame",
"wiggum.Binary_Accuracy_Trend",
"wiggum.Binary_FDR_Trend",
"wiggum.Binary_PPV_Trend",
"wiggum.Binary_FNR_Trend",
"wiggum.Binary_TNR_Trend"
] |
[((505, 533), 'wiggum.LabeledDataFrame', 'wg.LabeledDataFrame', (['dataset'], {}), '(dataset)\n', (524, 533), True, 'import wiggum as wg\n'), ((551, 577), 'wiggum.Binary_Accuracy_Trend', 'wg.Binary_Accuracy_Trend', ([], {}), '()\n', (575, 577), True, 'import wiggum as wg\n'), ((594, 615), 'wiggum.Binary_TPR_Trend', 'wg.Binary_TPR_Trend', ([], {}), '()\n', (613, 615), True, 'import wiggum as wg\n'), ((632, 653), 'wiggum.Binary_PPV_Trend', 'wg.Binary_PPV_Trend', ([], {}), '()\n', (651, 653), True, 'import wiggum as wg\n'), ((670, 691), 'wiggum.Binary_TNR_Trend', 'wg.Binary_TNR_Trend', ([], {}), '()\n', (689, 691), True, 'import wiggum as wg\n'), ((708, 729), 'wiggum.Binary_FDR_Trend', 'wg.Binary_FDR_Trend', ([], {}), '()\n', (727, 729), True, 'import wiggum as wg\n'), ((746, 767), 'wiggum.Binary_FNR_Trend', 'wg.Binary_FNR_Trend', ([], {}), '()\n', (765, 767), True, 'import wiggum as wg\n'), ((784, 807), 'wiggum.Binary_Error_Trend', 'wg.Binary_Error_Trend', ([], {}), '()\n', (805, 807), True, 'import wiggum as wg\n'), ((823, 843), 'wiggum.Binary_F1_Trend', 'wg.Binary_F1_Trend', ([], {}), '()\n', (841, 843), True, 'import wiggum as wg\n')]
|
# -*- coding: utf-8 -*-
from enum import Enum
import operator as op
from typing import TypeVar, Callable
from dewloosh.core.tools import getasany
from .function import Function
__all__ = ['Equality', 'InEquality']
class Relations(Enum):
eq = '='
gt = '>'
ge = '>='
lt = '<'
le = '<='
def to_op(self):
return _rel_to_op[self]
_rel_to_op = {
Relations.eq: op.eq,
Relations.gt: op.gt,
Relations.ge: op.ge,
Relations.lt: op.lt,
Relations.le: op.le
}
RelationType = TypeVar('RelationType', str, Relations, Callable)
class Relation(Function):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.op = None
self.opfunc = None
op = getasany(['op', 'operator'], None, **kwargs)
if op:
if isinstance(op, str):
self.op = Relations(op)
elif isinstance(op, Relations):
self.op = op
elif isinstance(op, Callable):
self.opfunc = op
self.op = None
else:
self.op = Relations.eq
if op and isinstance(self.op, Relations):
self.opfunc = self.op.to_op()
self.slack = 0
@property
def operator(self):
return self.op
def to_eq(self):
raise NotImplementedError
def relate(self, *args, **kwargs):
return self.opfunc(self.f0(*args, **kwargs), 0)
def __call__(self, *args, **kwargs):
return self.opfunc(self.f0(*args, **kwargs), 0)
class Equality(Relation):
def __init__(self, *args, **kwargs):
kwargs['op'] = Relations.eq
super().__init__(*args, **kwargs)
def to_eq(self):
return self
class InEquality(Relation):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def to_eq(self):
raise
if __name__ == '__main__':
gt = InEquality('x + y', op='>')
print(gt([0.0, 0.0]))
ge = InEquality('x + y', op='>=')
print(ge([0.0, 0.0]))
le = InEquality('x + y', op=lambda x, y: x <= y)
print(le([0.0, 0.0]))
lt = InEquality('x + y', op=lambda x, y: x < y)
print(lt([0.0, 0.0]))
|
[
"typing.TypeVar",
"dewloosh.core.tools.getasany"
] |
[((523, 572), 'typing.TypeVar', 'TypeVar', (['"""RelationType"""', 'str', 'Relations', 'Callable'], {}), "('RelationType', str, Relations, Callable)\n", (530, 572), False, 'from typing import TypeVar, Callable\n'), ((748, 792), 'dewloosh.core.tools.getasany', 'getasany', (["['op', 'operator']", 'None'], {}), "(['op', 'operator'], None, **kwargs)\n", (756, 792), False, 'from dewloosh.core.tools import getasany\n')]
|
"""
python -m tests.database
"""
try:
import _thread
except ImportError:
import thread as _thread
from bigorm.database import BigQueryDatabaseContext as DatabaseContext
from tests import UNIT_TEST_PROJECT
def _open_context():
with DatabaseContext(project=UNIT_TEST_PROJECT):
DatabaseContext.get_session()
def test_multithread():
with DatabaseContext(project=UNIT_TEST_PROJECT):
pass
thread_id = _thread.start_new_thread(_open_context, ())
if __name__ == '__main__':
test_multithread()
|
[
"bigorm.database.BigQueryDatabaseContext",
"thread.start_new_thread",
"bigorm.database.BigQueryDatabaseContext.get_session"
] |
[((437, 480), 'thread.start_new_thread', '_thread.start_new_thread', (['_open_context', '()'], {}), '(_open_context, ())\n', (461, 480), True, 'import thread as _thread\n'), ((247, 289), 'bigorm.database.BigQueryDatabaseContext', 'DatabaseContext', ([], {'project': 'UNIT_TEST_PROJECT'}), '(project=UNIT_TEST_PROJECT)\n', (262, 289), True, 'from bigorm.database import BigQueryDatabaseContext as DatabaseContext\n'), ((299, 328), 'bigorm.database.BigQueryDatabaseContext.get_session', 'DatabaseContext.get_session', ([], {}), '()\n', (326, 328), True, 'from bigorm.database import BigQueryDatabaseContext as DatabaseContext\n'), ((364, 406), 'bigorm.database.BigQueryDatabaseContext', 'DatabaseContext', ([], {'project': 'UNIT_TEST_PROJECT'}), '(project=UNIT_TEST_PROJECT)\n', (379, 406), True, 'from bigorm.database import BigQueryDatabaseContext as DatabaseContext\n')]
|
from django.db import models
class Region(models.Model):
region_name = models.CharField(max_length=30, primary_key=True)
def __str__(self):
return self.region_name
class Country(models.Model):
country_name = models.CharField(max_length=30, primary_key=True)
region = models.ForeignKey(Region, on_delete=models.CASCADE)
def __str__(self):
return self.country_name
class Location(models.Model):
street_adress = models.TextField()
postal_code = models.IntegerField()
city = models.CharField(max_length=30)
state_province = models.CharField(max_length=30, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE)
def __str__(self):
return '%s %d %s %s %s' % (self.street_adress, self.postal_code, self.city, self.state_province, self.country)
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey"
] |
[((74, 123), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'primary_key': '(True)'}), '(max_length=30, primary_key=True)\n', (90, 123), False, 'from django.db import models\n'), ((217, 266), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'primary_key': '(True)'}), '(max_length=30, primary_key=True)\n', (233, 266), False, 'from django.db import models\n'), ((277, 328), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Region'], {'on_delete': 'models.CASCADE'}), '(Region, on_delete=models.CASCADE)\n', (294, 328), False, 'from django.db import models\n'), ((426, 444), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (442, 444), False, 'from django.db import models\n'), ((460, 481), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (479, 481), False, 'from django.db import models\n'), ((490, 521), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (506, 521), False, 'from django.db import models\n'), ((540, 583), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(True)'}), '(max_length=30, blank=True)\n', (556, 583), False, 'from django.db import models\n'), ((595, 647), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Country'], {'on_delete': 'models.CASCADE'}), '(Country, on_delete=models.CASCADE)\n', (612, 647), False, 'from django.db import models\n')]
|
from address_extractor import datafile
def load_street_types():
return set(line.strip().lower() for line in datafile.read_street_types())
STREET_TYPES = load_street_types()
def is_valid(token):
return token.lower() in STREET_TYPES
|
[
"address_extractor.datafile.read_street_types"
] |
[((118, 146), 'address_extractor.datafile.read_street_types', 'datafile.read_street_types', ([], {}), '()\n', (144, 146), False, 'from address_extractor import datafile\n')]
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
QasmSimulator Integration Tests
"""
import unittest
from test.terra import common
from test.terra.backends.qasm_simulator.qasm_method import QasmMethodTests
from test.terra.backends.qasm_simulator.qasm_measure import QasmMeasureTests
from test.terra.backends.qasm_simulator.qasm_reset import QasmResetTests
from test.terra.backends.qasm_simulator.qasm_conditional import QasmConditionalTests
from test.terra.backends.qasm_simulator.qasm_cliffords import QasmCliffordTests
from test.terra.backends.qasm_simulator.qasm_algorithms import QasmAlgorithmTests
from test.terra.backends.qasm_simulator.qasm_extra import QasmExtraTests
class TestQasmStabilizerSimulator(common.QiskitAerTestCase,
QasmMethodTests,
QasmMeasureTests,
QasmResetTests,
QasmConditionalTests,
QasmCliffordTests,
QasmAlgorithmTests,
QasmExtraTests):
"""QasmSimulator stabilizer method tests."""
BACKEND_OPTS = {"method": "stabilizer"}
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main"
] |
[((1672, 1687), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1685, 1687), False, 'import unittest\n')]
|
#######################################
# --- Day 5: Hydrothermal Venture --- #
#######################################
from collections import defaultdict
import AOCUtils
def get_overlap_count(vents, part_two=False):
grid = defaultdict(int)
for start, end in vents:
delta = None
if start[0] == end[0]:
delta = (0, 1)
elif start[1] == end[1]:
delta = (1, 0)
elif part_two:
if start[1] < end[1]:
delta = (1, 1)
else:
delta = (1, -1)
if delta is None: continue
pos = start
grid[pos] += 1
while pos != end:
pos = (pos[0]+delta[0], pos[1]+delta[1])
grid[pos] += 1
return sum(v > 1 for v in grid.values())
#######################################
raw_vents = AOCUtils.load_input(5)
vents = []
for raw_vent in raw_vents:
raw_start, raw_end = raw_vent.split(' -> ')
start = tuple(map(int, raw_start.split(',')))
end = tuple(map(int, raw_end.split(',')))
vent = tuple(sorted([start, end]))
vents.append(vent)
print(f'Part 1: {get_overlap_count(vents)}')
print(f'Part 2: {get_overlap_count(vents, part_two=True)}')
AOCUtils.print_time_taken()
|
[
"collections.defaultdict",
"AOCUtils.print_time_taken",
"AOCUtils.load_input"
] |
[((842, 864), 'AOCUtils.load_input', 'AOCUtils.load_input', (['(5)'], {}), '(5)\n', (861, 864), False, 'import AOCUtils\n'), ((1221, 1248), 'AOCUtils.print_time_taken', 'AOCUtils.print_time_taken', ([], {}), '()\n', (1246, 1248), False, 'import AOCUtils\n'), ((231, 247), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (242, 247), False, 'from collections import defaultdict\n')]
|
# -*- coding: utf-8 -*-
from django.views import View
from django.shortcuts import render
from apps.accounts.models.choices import ActionCategory
from apps.accounts.models.pending_action import PendingAction
from apps.accounts.services.auth_service import AuthService
class ConfirmEmailView(View):
"""Renders the email confirmation page."""
def get(self, request, token, **kwargs):
"""It renders the html template to confirm email."""
context = {}
try:
pending_action = PendingAction.objects.get(
token=token,
category=ActionCategory.CONFIRM_EMAIL,
)
context['user'] = pending_action.user
context['next'] = pending_action.extra.get('next')
AuthService.confirm_email(pending_action)
except PendingAction.DoesNotExist:
context['user'] = None
return render(request, 'transactions/confirm_email.html', context)
|
[
"django.shortcuts.render",
"apps.accounts.models.pending_action.PendingAction.objects.get",
"apps.accounts.services.auth_service.AuthService.confirm_email"
] |
[((906, 965), 'django.shortcuts.render', 'render', (['request', '"""transactions/confirm_email.html"""', 'context'], {}), "(request, 'transactions/confirm_email.html', context)\n", (912, 965), False, 'from django.shortcuts import render\n'), ((519, 596), 'apps.accounts.models.pending_action.PendingAction.objects.get', 'PendingAction.objects.get', ([], {'token': 'token', 'category': 'ActionCategory.CONFIRM_EMAIL'}), '(token=token, category=ActionCategory.CONFIRM_EMAIL)\n', (544, 596), False, 'from apps.accounts.models.pending_action import PendingAction\n'), ((769, 810), 'apps.accounts.services.auth_service.AuthService.confirm_email', 'AuthService.confirm_email', (['pending_action'], {}), '(pending_action)\n', (794, 810), False, 'from apps.accounts.services.auth_service import AuthService\n')]
|
"""
Notebook
"""
import datetime
last_id = 0
class Note:
def __init__(self, memo, tags=''):
self.memo = memo
self.tags = tags
self.creation_date = datetime.date.today()
global last_id
last_id += 1
self.id = last_id
def match(self, filter):
return filter in self.memo or filter in self.tags
# n1 = Note("hello")
# print(n1.id)
# print(n1.match('Hello'))
class Notebook:
def __init__(self):
self.notes = []
def new_note(self, memo, tags=''):
self.notes.append(Note(memo, tags))
# def modify_memo(self, note_id, memo):
# for note in self.notes:
# if note.id == note_id:
# note.memo = memo
# break
def modify_tags(self, note_id, tags):
for note in self.notes:
if note.id == note_id:
note.tags = tags
break
def search(self, filter):
return [note for note in self.notes if note.match(filter)]
def _find_note(self, note_id):
for note in self.notes:
if str(note.id) == str(note_id):
return note
return None
def modify_memo(self, note_id, memo):
note = self._find_note(note_id)
if note:
note.memo = memo
return True
return False
|
[
"datetime.date.today"
] |
[((177, 198), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (196, 198), False, 'import datetime\n')]
|
#
# @@@ START COPYRIGHT @@@
#
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
#
# @@@ END COPYRIGHT @@@
#
#
import os
from xml.dom import minidom
dcsconfig_dir = os.environ.get('DCS_CONF_DIR')
if not dcsconfig_dir:
name = os.environ.get('DCS_INSTALL_DIR')
dcsconfig_dir=name+"/conf"
doc = minidom.parse(dcsconfig_dir+"/dcs-site.xml")
props = doc.getElementsByTagName("property")
for prop in props:
pname = prop.getElementsByTagName("name")[0]
if (pname.firstChild.data == "dcs.master.port"):
pvalue = prop.getElementsByTagName("value")[0]
dcsPort=pvalue.firstChild.data
print("%s" % (dcsPort))
if (pname.firstChild.data == "dcs.master.floating.ip.external.ip.address"):
pvalue = prop.getElementsByTagName("value")[0]
float_ipaddress=pvalue.firstChild.data
print("%s" % (float_ipaddress))
if (pname.firstChild.data == "dcs.master.floating.ip.external.interface"):
pvalue = prop.getElementsByTagName("value")[0]
float_interface=pvalue.firstChild.data
print("%s" % (float_interface))
|
[
"os.environ.get",
"xml.dom.minidom.parse"
] |
[((886, 916), 'os.environ.get', 'os.environ.get', (['"""DCS_CONF_DIR"""'], {}), "('DCS_CONF_DIR')\n", (900, 916), False, 'import os\n'), ((1022, 1068), 'xml.dom.minidom.parse', 'minidom.parse', (["(dcsconfig_dir + '/dcs-site.xml')"], {}), "(dcsconfig_dir + '/dcs-site.xml')\n", (1035, 1068), False, 'from xml.dom import minidom\n'), ((949, 982), 'os.environ.get', 'os.environ.get', (['"""DCS_INSTALL_DIR"""'], {}), "('DCS_INSTALL_DIR')\n", (963, 982), False, 'import os\n')]
|
"""
How to reference supporting evidence for some object in the database.
See: "Metadata in PyOpenWorm" for discussion on semantics of what giving
evidence for an object means.
"""
from __future__ import absolute_import
from __future__ import print_function
import PyOpenWorm as P
from PyOpenWorm.evidence import Evidence
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.document import Document
from PyOpenWorm.data import Data
from PyOpenWorm.context import Context
# Create dummy database configuration.
d = Data()
# Connect to database with dummy configuration
conn = P.connect(conf=d)
ctx = Context(ident='http://example.org/data', conf=conn.conf)
evctx = Context(ident='http://example.org/meta', conf=conn.conf)
# Create a new Neuron object to work with
n = ctx(Neuron)(name='AVAL')
# Create a new Evidence object with `doi` and `pmid` fields populated.
# See `PyOpenWorm/evidence.py` for other available fields.
d = evctx(Document)(key='Anonymous2011', doi='125.41.3/ploscompbiol', pmid='12345678')
e = evctx(Evidence)(key='Anonymous2011', reference=d)
# Evidence object asserts something about the enclosed dataObject.
# Here we add a receptor to the Neuron we made earlier, and "assert it".
# As the discussion (see top) reads, this might be asserting the existence of
# receptor UNC-8 on neuron AVAL.
n.receptor('UNC-8')
e.supports(ctx.rdf_object)
# Save the Neuron and Evidence objects to the database.
ctx.save_context()
evctx.save_context()
# What does my evidence object contain?
for e_i in evctx.stored(Evidence)().load():
print(e_i.reference(), e_i.supports())
# Disconnect from the database.
P.disconnect(conn)
|
[
"PyOpenWorm.disconnect",
"PyOpenWorm.context.Context",
"PyOpenWorm.data.Data",
"PyOpenWorm.connect"
] |
[((518, 524), 'PyOpenWorm.data.Data', 'Data', ([], {}), '()\n', (522, 524), False, 'from PyOpenWorm.data import Data\n'), ((580, 597), 'PyOpenWorm.connect', 'P.connect', ([], {'conf': 'd'}), '(conf=d)\n', (589, 597), True, 'import PyOpenWorm as P\n'), ((605, 661), 'PyOpenWorm.context.Context', 'Context', ([], {'ident': '"""http://example.org/data"""', 'conf': 'conn.conf'}), "(ident='http://example.org/data', conf=conn.conf)\n", (612, 661), False, 'from PyOpenWorm.context import Context\n'), ((670, 726), 'PyOpenWorm.context.Context', 'Context', ([], {'ident': '"""http://example.org/meta"""', 'conf': 'conn.conf'}), "(ident='http://example.org/meta', conf=conn.conf)\n", (677, 726), False, 'from PyOpenWorm.context import Context\n'), ((1629, 1647), 'PyOpenWorm.disconnect', 'P.disconnect', (['conn'], {}), '(conn)\n', (1641, 1647), True, 'import PyOpenWorm as P\n')]
|
from flask_wtf import FlaskForm
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class LoginForm(FlaskForm):
username = TextField("Login", validators=[DataRequired(), Length(min=6, max=20)])
password = PasswordField("Password", validators=[DataRequired(), Length(min=6, max=48)])
class RegisterForm(FlaskForm):
username = TextField("Login", validators=[DataRequired(), Length(min=6, max=20)])
email = TextField("Email", validators=[DataRequired(), Email()])
password = PasswordField(
"Password", validators=[DataRequired(), Length(min=6, max=48)]
)
confirm = PasswordField(
"Repeat Password", validators=[DataRequired(), EqualTo("password")]
)
fullname = TextField("Full Name", validators=[DataRequired()])
|
[
"wtforms.validators.Email",
"wtforms.validators.DataRequired",
"wtforms.validators.Length",
"wtforms.validators.EqualTo"
] |
[((227, 241), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (239, 241), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((243, 264), 'wtforms.validators.Length', 'Length', ([], {'min': '(6)', 'max': '(20)'}), '(min=6, max=20)\n', (249, 264), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((321, 335), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (333, 335), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((337, 358), 'wtforms.validators.Length', 'Length', ([], {'min': '(6)', 'max': '(48)'}), '(min=6, max=48)\n', (343, 358), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((444, 458), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (456, 458), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((460, 481), 'wtforms.validators.Length', 'Length', ([], {'min': '(6)', 'max': '(20)'}), '(min=6, max=20)\n', (466, 481), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((528, 542), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (540, 542), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((544, 551), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (549, 551), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((618, 632), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (630, 632), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((634, 655), 'wtforms.validators.Length', 'Length', ([], {'min': '(6)', 'max': '(48)'}), '(min=6, max=48)\n', (640, 655), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((734, 748), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (746, 748), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((750, 769), 'wtforms.validators.EqualTo', 'EqualTo', (['"""password"""'], {}), "('password')\n", (757, 769), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((829, 843), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (841, 843), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n')]
|
from setuptools import setup
setup(name='rcssl',
version='1.5',
description='Install Let\'s Encrypt SSL on RunCloud servers the easy way.',
author="Rehmat",
author_email="<EMAIL>",
url="https://github.com/rehmatworks/runcloud-letsencrypt",
license="MIT",
entry_points={
'console_scripts': [
'rcssl = rcssl.rcssl:main'
],
},
packages=[
'rcssl'
],
install_requires=[
'python-nginx'
]
)
|
[
"setuptools.setup"
] |
[((30, 396), 'setuptools.setup', 'setup', ([], {'name': '"""rcssl"""', 'version': '"""1.5"""', 'description': '"""Install Let\'s Encrypt SSL on RunCloud servers the easy way."""', 'author': '"""Rehmat"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/rehmatworks/runcloud-letsencrypt"""', 'license': '"""MIT"""', 'entry_points': "{'console_scripts': ['rcssl = rcssl.rcssl:main']}", 'packages': "['rcssl']", 'install_requires': "['python-nginx']"}), '(name=\'rcssl\', version=\'1.5\', description=\n "Install Let\'s Encrypt SSL on RunCloud servers the easy way.", author=\n \'Rehmat\', author_email=\'<EMAIL>\', url=\n \'https://github.com/rehmatworks/runcloud-letsencrypt\', license=\'MIT\',\n entry_points={\'console_scripts\': [\'rcssl = rcssl.rcssl:main\']},\n packages=[\'rcssl\'], install_requires=[\'python-nginx\'])\n', (35, 396), False, 'from setuptools import setup\n')]
|
import re
import json
import sys
import os
args = sys.argv
if (len(args) < 2):
sys.exit(1)
path = args[1]
if(path[-1:] == "/"):
path = path[:-1]
result_filedata_list = []
count = 0
while True:
# Decectory exist check
dirpath = path + '/command/' + str(count)
if os.path.isdir(dirpath):
count +=1
else:
break
filepath = dirpath + '/stdout.txt'
if os.path.isfile(filepath) and os.path.getsize(filepath) > 0:
with open(filepath) as file_object:
reader = json.load(file_object)
if isinstance(reader, list):
rows = reader
else:
rows = []
rows.append(reader)
for row in rows:
if count % 2 == 1:
encrypt_table = {}
for param_key, param_value in row.items():
encrypt_table[param_key] = param_value
else:
filedata_table = {}
for param_key, param_value in row.items():
if param_key == 'Path':
index = param_value.find('::')
if index != -1:
param_value = param_value[(index + 2):].strip()
if param_value in encrypt_table:
filedata_table['Encrypt'] = encrypt_table[param_value]
filedata_table['Name'] = param_value
elif param_key == 'AccessToString':
filedata_table[param_key] = param_value.split('\n')
else:
filedata_table[param_key] = param_value
if len(filedata_table) > 0:
filedata_table['Action'] = 'file'
result_filedata_list.append(filedata_table)
result = {}
target_parameter_root_key = 'VAR_WIN_FileProtectionSetting'
result[target_parameter_root_key] = result_filedata_list
print(json.dumps(result))
|
[
"json.load",
"os.path.isdir",
"os.path.getsize",
"json.dumps",
"os.path.isfile",
"sys.exit"
] |
[((84, 95), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (92, 95), False, 'import sys\n'), ((286, 308), 'os.path.isdir', 'os.path.isdir', (['dirpath'], {}), '(dirpath)\n', (299, 308), False, 'import os\n'), ((2044, 2062), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (2054, 2062), False, 'import json\n'), ((399, 423), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (413, 423), False, 'import os\n'), ((428, 453), 'os.path.getsize', 'os.path.getsize', (['filepath'], {}), '(filepath)\n', (443, 453), False, 'import os\n'), ((524, 546), 'json.load', 'json.load', (['file_object'], {}), '(file_object)\n', (533, 546), False, 'import json\n')]
|
#!/usr/bin/python
# coding=utf-8
import sys
import csv
import datetime
#run with python timezone.py 8.30 pdt eest
# time timezone timezone-of-conversion
def main():
# check if correct ammount of arguments doesnt work otherwise
if len(sys.argv) != 4:
print('Incorrect amount of arguments.')
sys.exit(1)
target_time = -99
# get the time given in UTC
with open('timezones.csv', newline='') as csvfile:
timezones = csv.reader(csvfile, delimiter=',')
utc_time = get_utc_time(timezones)
target_time = utc_time
#get the time difference of the target to UTC
with open('timezones.csv', newline='') as csvfile:
timezones = csv.reader(csvfile, delimiter=',')
split_time = get_target_time_dif(timezones)
#split utc time zone to hours and minutes in case of time zones with minutes
time_dif_h = float(split_time[0])
time_dif_m = 0
#check if there was a split before trying to get minutes
if len(split_time) == 2:
time_dif_m = float(split_time[1])
#apply timezone time difference
target_time = target_time + datetime.timedelta(hours=time_dif_h, minutes=time_dif_m)
to_print = sys.argv[1] + ' ' + sys.argv[2].upper() + ' is ' + target_time.strftime('%H.%M') + ' ' + sys.argv[3].upper()
print(to_print)
def get_utc_time(timezones):
for row in timezones:
#check for timezone argument against csv data
if row[0].lower() == sys.argv[2].lower():
utc_timezone = row[2]
utc_time_dif = utc_timezone[3:]
entered_time = datetime.datetime.strptime(sys.argv[1], '%H.%M')
#split utc time zone to hours and minutes in case of time zones with minutes
split_time = utc_time_dif.split('.')
time_dif_h = 0 - float(split_time[0])
time_dif_m = 0
#check if there was a split before trying to get minutes
if len(split_time) == 2:
time_dif_m = float(split_time[1])
#apply timezone time difference
utc_time = entered_time + datetime.timedelta(hours=time_dif_h, minutes=time_dif_m)
return utc_time
#if it get's here timezone code was wrong
print('First timezone not found')
sys.exit(1)
def get_target_time_dif(timezones):
#check for timezone argument against csv data
for row in timezones:
if row[0].lower() == sys.argv[3].lower():
utc_timezone = row[2]
utc_time_dif = utc_timezone[3:]
split_time = utc_time_dif.split('.')
return split_time
#if it get's here timezone code was wrong
print('Second timezone not found')
sys.exit(1)
if __name__ == "__main__":
main()
|
[
"datetime.datetime.strptime",
"csv.reader",
"datetime.timedelta",
"sys.exit"
] |
[((2318, 2329), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2326, 2329), False, 'import sys\n'), ((2739, 2750), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2747, 2750), False, 'import sys\n'), ((324, 335), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (332, 335), False, 'import sys\n'), ((467, 501), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (477, 501), False, 'import csv\n'), ((702, 736), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (712, 736), False, 'import csv\n'), ((1160, 1216), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'time_dif_h', 'minutes': 'time_dif_m'}), '(hours=time_dif_h, minutes=time_dif_m)\n', (1178, 1216), False, 'import datetime\n'), ((1634, 1682), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['sys.argv[1]', '"""%H.%M"""'], {}), "(sys.argv[1], '%H.%M')\n", (1660, 1682), False, 'import datetime\n'), ((2145, 2201), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'time_dif_h', 'minutes': 'time_dif_m'}), '(hours=time_dif_h, minutes=time_dif_m)\n', (2163, 2201), False, 'import datetime\n')]
|
# Script wh helps to plot Figures 3A and 3B
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Include all GENES, those containing Indels and SNVS (that's why I repeat this step of loading "alleles" dataframe) This prevents badly groupping in 20210105_plotStacked...INDELS.py
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#alleles['actionable'].loc[(alleles['SYMBOL'] == 'CYP4F2') & (alleles['allele'] == '*2')] = 'Yes'
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
dff = pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t')
mask = (dff['N_alleles'] != 0) & (dff['Phenotype_G6PD'] != 'G6PD_Normal')
dff_valid = dff[mask]
dff['N_phenotypes'] = 0
dff['Phenotype'] = dff['Phenotype'].apply(lambda x: ','.join([i for i in x.split(',') if i != 'G6PD_Normal']))
dff.loc[mask, 'N_phenotypes'] = dff_valid['Phenotype'].apply(lambda x: len(x.split(',')))
gf = dff.groupby('N_phenotypes')['sample'].count()
GF = {'Nr. phenotypes': list(gf.index), 'Count':100*(gf.values / gf.sum()), 'Group':['(N=5001)']*len(gf)}
GF = pd.DataFrame(GF)
tf = GF.iloc[0:4]
d = {'Nr. phenotypes':'[4,7]', 'Count':sum(GF['Count'].iloc[4:]), 'Group':'(N=5001)'}
tf = tf.append(d, ignore_index=True)
bottom = 0
f, ax1 = plt.subplots(figsize=(2,4))
f.set_size_inches(2.7, 4.0)
for i,j, in zip(list(tf['Count'].values), list(tf['Nr. phenotypes'])):
ax1.bar('N=5001',i,label=j, bottom = bottom, edgecolor = 'black')
bottom = bottom + i
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles[::-1], labels[::-1], loc='center left',bbox_to_anchor=(1.0, 0.5), title='Nr.alleles', fontsize=14,title_fontsize=14) # title = TITLE,
plt.ylabel('%',fontsize=14)
plt.yticks(np.arange(0, 100,10 ))
plt.subplots_adjust(left=0.23, bottom=0.1, right=0.5, top=0.95, wspace=0.14, hspace=0.24)
plt.savefig('/path/to/Figures/Figure_3A_nrphenotypes.png',format = 'png', dpi = 500)
plt.show()
####################################### FIGURE 3B
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Include all GENES, those containing Indels and SNVS (that's why I repeat this step of loading "alleles" dataframe) This prevents badly groupping in 20210105_plotStacked...INDELS.py
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#alleles['actionable'].loc[(alleles['SYMBOL'] == 'CYP4F2') & (alleles['allele'] == '*2')] = 'Yes'
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
dff = pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t')
#dff = dff.loc[dff['from'] == 'ESPAÑA']
mask = (dff['N_alleles'] != 0) & (dff['Phenotype_G6PD'] != 'G6PD_Normal')
dff_valid = dff[mask]
dff['N_phenotypes'] = 0
dff['Phenotype'] = dff['Phenotype'].apply(lambda x: ','.join([i for i in x.split(',') if i != 'G6PD_Normal']))
dff.loc[mask, 'N_phenotypes'] = dff_valid['Phenotype'].apply(lambda x: len(x.split(',')))
GENES.sort()
pct_phenot = list()
for gene in GENES:
pct_phenot.append(100*(dff.groupby('Phenotype_' + gene)['sample'].count().values.sum() / len(dff)))
f, ax1 = plt.subplots(figsize=(6,3.5))
plt.grid(axis='x')
plt.barh(GENES, [100]*len(GENES), align='center', height=.35, color='tab:grey',label='Actionable phenotype')
plt.barh(GENES, pct_phenot, align='center', height=.35, color='tab:red',label='Actionable phenotype',edgecolor = 'k')
plt.xlim([0,100])
plt.xlabel('% population with pharmacogenetic phenotype (n=5001)', fontsize=12)
plt.subplots_adjust(left=0.130, bottom=0.140, right=0.945, top=0.97, wspace=0.14, hspace=0.24)
#plt.savefig('/path/to/Figures/Fig3B.png',format = 'png', dpi = 500)
plt.savefig('Fig3B.png',format = 'png', dpi = 500)
plt.show()
'''### Figure 2A
cols = ['N_alleles','SNV_N_alleles','INDELS_N_alleles']
gf = df.groupby(cols[0])['sample'].count().reset_index()
gf = gf.rename(columns={'sample':cols[0] + '_all'})
dgf = dict(zip(list(df.groupby(cols[1])['sample'].count().index), list(df.groupby(cols[1])['sample'].count().values)))
plt.subplots_adjust(left=0.10, bottom=0.08, right=0.85, top=0.90, wspace=0.14, hspace=0.24)
plt.xticks(rotation=0)
plt.ylim(0,100)
plt.xlabel('')
plt.show()
plt.xticks(rotation=90)
plt.ylim(0,100)
plt.ylabel('%')
plt.show()'''
|
[
"pandas.DataFrame",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.barh",
"numpy.arange",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid"
] |
[((309, 363), 'pandas.read_csv', 'pd.read_csv', (['"""/path/to/Alleles_20201228.csv"""'], {'sep': '"""\t"""'}), "('/path/to/Alleles_20201228.csv', sep='\\t')\n", (320, 363), True, 'import pandas as pd\n'), ((629, 686), 'pandas.read_csv', 'pd.read_csv', (['"""/path/to/phenotypes_20210107.csv"""'], {'sep': '"""\t"""'}), "('/path/to/phenotypes_20210107.csv', sep='\\t')\n", (640, 686), True, 'import pandas as pd\n'), ((1170, 1186), 'pandas.DataFrame', 'pd.DataFrame', (['GF'], {}), '(GF)\n', (1182, 1186), True, 'import pandas as pd\n'), ((1350, 1378), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2, 4)'}), '(figsize=(2, 4))\n', (1362, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1774, 1802), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""%"""'], {'fontsize': '(14)'}), "('%', fontsize=14)\n", (1784, 1802), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1929), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.23)', 'bottom': '(0.1)', 'right': '(0.5)', 'top': '(0.95)', 'wspace': '(0.14)', 'hspace': '(0.24)'}), '(left=0.23, bottom=0.1, right=0.5, top=0.95, wspace=0.14,\n hspace=0.24)\n', (1855, 1929), True, 'import matplotlib.pyplot as plt\n'), ((1926, 2011), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/path/to/Figures/Figure_3A_nrphenotypes.png"""'], {'format': '"""png"""', 'dpi': '(500)'}), "('/path/to/Figures/Figure_3A_nrphenotypes.png', format='png',\n dpi=500)\n", (1937, 2011), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2019, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2338, 2392), 'pandas.read_csv', 'pd.read_csv', (['"""/path/to/Alleles_20201228.csv"""'], {'sep': '"""\t"""'}), "('/path/to/Alleles_20201228.csv', sep='\\t')\n", (2349, 2392), True, 'import pandas as pd\n'), ((2657, 2714), 'pandas.read_csv', 'pd.read_csv', (['"""/path/to/phenotypes_20210107.csv"""'], {'sep': '"""\t"""'}), "('/path/to/phenotypes_20210107.csv', sep='\\t')\n", (2668, 2714), True, 'import pandas as pd\n'), ((3242, 3272), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 3.5)'}), '(figsize=(6, 3.5))\n', (3254, 3272), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3290), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""x"""'}), "(axis='x')\n", (3280, 3290), True, 'import matplotlib.pyplot as plt\n'), ((3400, 3522), 'matplotlib.pyplot.barh', 'plt.barh', (['GENES', 'pct_phenot'], {'align': '"""center"""', 'height': '(0.35)', 'color': '"""tab:red"""', 'label': '"""Actionable phenotype"""', 'edgecolor': '"""k"""'}), "(GENES, pct_phenot, align='center', height=0.35, color='tab:red',\n label='Actionable phenotype', edgecolor='k')\n", (3408, 3522), True, 'import matplotlib.pyplot as plt\n'), ((3518, 3536), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 100]'], {}), '([0, 100])\n', (3526, 3536), True, 'import matplotlib.pyplot as plt\n'), ((3536, 3615), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""% population with pharmacogenetic phenotype (n=5001)"""'], {'fontsize': '(12)'}), "('% population with pharmacogenetic phenotype (n=5001)', fontsize=12)\n", (3546, 3615), True, 'import matplotlib.pyplot as plt\n'), ((3616, 3713), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.13)', 'bottom': '(0.14)', 'right': '(0.945)', 'top': '(0.97)', 'wspace': '(0.14)', 'hspace': '(0.24)'}), '(left=0.13, bottom=0.14, right=0.945, top=0.97, wspace=\n 0.14, hspace=0.24)\n', (3635, 3713), True, 'import matplotlib.pyplot as plt\n'), ((3780, 3827), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Fig3B.png"""'], {'format': '"""png"""', 'dpi': '(500)'}), "('Fig3B.png', format='png', dpi=500)\n", (3791, 3827), True, 'import matplotlib.pyplot as plt\n'), ((3832, 3842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3840, 3842), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1834), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10)'], {}), '(0, 100, 10)\n', (1822, 1834), True, 'import numpy as np\n')]
|
from plot_common import output_graph
# results of N = 5, 10, 15, 20
Ns = [5, 10, 15, 20]
large_dataset_precisions = {
'SKBPR-BC': (Ns, [0.0366, 0.0297, 0.0263, 0.0244], [0.0004, 0.0003, 0.0002, 0.0002], [0.0002, 0.0003, 0.0003, 0.0003]),
'SKBPR-BC-SEQ': (Ns, [0.0363, 0.0293, 0.0260, 0.0241], [0.0001, 0.0002, 0.0003, 0.0002], [0.0003, 0.0002, 0.0003, 0.0002]),
}
large_dataset_recalls = {
'SKBPR-BC': (Ns, [0.0844, 0.1139, 0.1367, 0.1559], [0.0011, 0.0010, 0.0010, 0.0006], [0.0011, 0.0011, 0.0006, 0.0004]),
'SKBPR-BC-SEQ': (Ns, [0.0855, 0.1154, 0.1384, 0.1578], [0.0005, 0.0006, 0.0007, 0.0006], [0.0007, 0.0006, 0.0009, 0.0010]),
}
if __name__ == '__main__':
precision_axis = [0, 25, 0.020, 0.040]
recall_axis = [0, 25, 0.06, 0.20]
# draw precision and recall on on graph
# omit results of small datasets
mixed_datasets = [
('Precision', 'Large', precision_axis, large_dataset_precisions),
('Recall', 'Large', recall_axis, large_dataset_recalls),
]
output_graph(mixed_datasets, 'output/sequence_precision_recall.png')
|
[
"plot_common.output_graph"
] |
[((1017, 1085), 'plot_common.output_graph', 'output_graph', (['mixed_datasets', '"""output/sequence_precision_recall.png"""'], {}), "(mixed_datasets, 'output/sequence_precision_recall.png')\n", (1029, 1085), False, 'from plot_common import output_graph\n')]
|
# OpenNero will execute ModMain when this mod is loaded
from Maze.client import ClientMain
def ModMain(mode = ""):
ClientMain(mode)
def StartMe():
from Maze.module import getMod
getMod().set_speedup(1.0) # full speed ahead
getMod().start_sarsa() # start an algorithm for headless mode
|
[
"Maze.module.getMod",
"Maze.client.ClientMain"
] |
[((124, 140), 'Maze.client.ClientMain', 'ClientMain', (['mode'], {}), '(mode)\n', (134, 140), False, 'from Maze.client import ClientMain\n'), ((200, 208), 'Maze.module.getMod', 'getMod', ([], {}), '()\n', (206, 208), False, 'from Maze.module import getMod\n'), ((250, 258), 'Maze.module.getMod', 'getMod', ([], {}), '()\n', (256, 258), False, 'from Maze.module import getMod\n')]
|
# Source Generated with Decompyle++
# File: bank_selection_component.pyc (Python 2.5)
from __future__ import absolute_import
from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.control import control_list, forward_control, ButtonControl
from item_lister_component import ItemListerComponent, ItemProvider
from bank_definitions import MAIN_KEY
class BankProvider(ItemProvider, SlotManager):
def __init__(self, bank_registry = None, banking_info = None, *a, **k):
if not bank_registry is not None:
raise AssertionError
if not banking_info is not None:
raise AssertionError
super(BankProvider, self).__init__(*a, **a)
self._bank_registry = bank_registry
self._banking_info = banking_info
self._device = None
self._on_device_bank_changed.subject = bank_registry
def set_device(self, device):
if self._device != device:
self._device = device
self.notify_items()
self.notify_selected_item()
def device(self):
return self._device
device = property(device)
def items(self):
nesting_level = 0
bank_names = self.internal_bank_names(self._banking_info.device_bank_names(self._device))
continue
return [ (NamedTuple(name = b), nesting_level) for b in bank_names ]
items = property(items)
def selected_item(self):
selected = None
if liveobj_valid(self._device) and len(self.items) > 0:
bank_index = self._bank_registry.get_device_bank(self._device)
selected = self.items[bank_index][0]
return selected
selected_item = property(selected_item)
def select_item(self, item):
nesting_level = 0
bank_index = self.items.index((item, nesting_level))
self._bank_registry.set_device_bank(self._device, bank_index)
def _on_device_bank_changed(self, device, _):
if device == self._device:
self.notify_selected_item()
_on_device_bank_changed = listens('device_bank')(_on_device_bank_changed)
def internal_bank_names(self, original_bank_names):
num_banks = len(original_bank_names)
if num_banks > 0:
pass
1
return [
MAIN_KEY]
class EditModeOptionsComponent(Component):
option_buttons = control_list(ButtonControl, color = 'ItemNavigation.ItemSelected', control_count = 8)
def __init__(self, back_callback = nop, device_options_provider = None, *a, **k):
super(EditModeOptionsComponent, self).__init__(*a, **a)
self._device = None
self._device_options_provider = device_options_provider
self._back = back_callback
self._EditModeOptionsComponent__on_device_changed.subject = device_options_provider
self._EditModeOptionsComponent__on_options_changed.subject = device_options_provider
self._update_button_feedback()
def _option_for_button(self, button):
options = self.options
if len(options) > button.index - 1:
pass
1
def option_buttons(self, button):
if button.index == 0:
self._back()
else:
option = self._option_for_button(button)
if option:
try:
option.trigger()
except RuntimeError:
pass
option_buttons = option_buttons.pressed(option_buttons)
def _set_device(self, device):
self._device = device
self._EditModeOptionsComponent__on_device_name_changed.subject = device
self.notify_device()
def device(self):
if liveobj_valid(self._device):
pass
1
return ''
device = listenable_property(device)
def options(self):
if self._device_options_provider:
pass
1
return []
options = listenable_property(options)
def __on_device_changed(self):
self._update_device()
_EditModeOptionsComponent__on_device_changed = listens('device')(__on_device_changed)
def __on_device_name_changed(self):
self.notify_device()
_EditModeOptionsComponent__on_device_name_changed = listens('name')(__on_device_name_changed)
def __on_options_changed(self):
self._EditModeOptionsComponent__on_active_options_changed.replace_subjects(self.options)
self._update_button_feedback()
self.notify_options()
_EditModeOptionsComponent__on_options_changed = listens('options')(__on_options_changed)
def __on_active_options_changed(self, _):
self._update_button_feedback()
_EditModeOptionsComponent__on_active_options_changed = listens_group('active')(__on_active_options_changed)
def _update_button_feedback(self):
for button in self.option_buttons:
if button.index > 0:
option = self._option_for_button(button)
if option:
pass
has_active_option = option.active
if has_active_option:
pass
1
button.color = 'ItemNotSelected' + 'NoItem'
continue
'ItemNavigation.'
def _update_device(self):
self._set_device(self._device_options_provider.device())
def update(self):
super(EditModeOptionsComponent, self).update()
if self.is_enabled():
self._update_device()
class BankSelectionComponent(ItemListerComponent):
__events__ = ('back',)
select_buttons = forward_control(ItemListerComponent.select_buttons)
def __init__(self, bank_registry = None, banking_info = None, device_options_provider = None, *a, **k):
self._bank_provider = BankProvider(bank_registry = bank_registry, banking_info = banking_info)
super(BankSelectionComponent, self).__init__(item_provider = self._bank_provider, *a, **a)
self._options = self.register_component(EditModeOptionsComponent(back_callback = self.notify_back, device_options_provider = device_options_provider))
self.register_disconnectable(self._bank_provider)
def select_buttons(self, button):
self._bank_provider.select_item(self.items[button.index].item)
select_buttons = select_buttons.checked(select_buttons)
def set_option_buttons(self, buttons):
self._options.option_buttons.set_control_element(buttons)
def set_device(self, item):
if item != self._bank_provider.device:
pass
1
device = None
self._bank_provider.set_device(device)
def options(self):
return self._options
options = property(options)
|
[
"ableton.v2.base.NamedTuple",
"ableton.v2.control_surface.control.control_list",
"ableton.v2.base.liveobj_valid",
"ableton.v2.base.listenable_property",
"ableton.v2.base.listens_group",
"ableton.v2.control_surface.control.forward_control",
"ableton.v2.base.listens"
] |
[((2548, 2633), 'ableton.v2.control_surface.control.control_list', 'control_list', (['ButtonControl'], {'color': '"""ItemNavigation.ItemSelected"""', 'control_count': '(8)'}), "(ButtonControl, color='ItemNavigation.ItemSelected',\n control_count=8)\n", (2560, 2633), False, 'from ableton.v2.control_surface.control import control_list, forward_control, ButtonControl\n'), ((4001, 4028), 'ableton.v2.base.listenable_property', 'listenable_property', (['device'], {}), '(device)\n', (4020, 4028), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n'), ((4159, 4187), 'ableton.v2.base.listenable_property', 'listenable_property', (['options'], {}), '(options)\n', (4178, 4187), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n'), ((5869, 5920), 'ableton.v2.control_surface.control.forward_control', 'forward_control', (['ItemListerComponent.select_buttons'], {}), '(ItemListerComponent.select_buttons)\n', (5884, 5920), False, 'from ableton.v2.control_surface.control import control_list, forward_control, ButtonControl\n'), ((2235, 2257), 'ableton.v2.base.listens', 'listens', (['"""device_bank"""'], {}), "('device_bank')\n", (2242, 2257), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n'), ((3913, 3940), 'ableton.v2.base.liveobj_valid', 'liveobj_valid', (['self._device'], {}), '(self._device)\n', (3926, 3940), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n'), ((4310, 4327), 'ableton.v2.base.listens', 'listens', (['"""device"""'], {}), "('device')\n", (4317, 4327), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n'), ((4480, 4495), 'ableton.v2.base.listens', 'listens', (['"""name"""'], {}), "('name')\n", (4487, 4495), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n'), ((4782, 4800), 'ableton.v2.base.listens', 'listens', (['"""options"""'], {}), "('options')\n", (4789, 4800), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n'), ((4973, 4996), 'ableton.v2.base.listens_group', 'listens_group', (['"""active"""'], {}), "('active')\n", (4986, 4996), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n'), ((1614, 1641), 'ableton.v2.base.liveobj_valid', 'liveobj_valid', (['self._device'], {}), '(self._device)\n', (1627, 1641), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n'), ((1457, 1475), 'ableton.v2.base.NamedTuple', 'NamedTuple', ([], {'name': 'b'}), '(name=b)\n', (1467, 1475), False, 'from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop\n')]
|
import torch
from torch.nn import Module, Linear, Softmax, CosineSimilarity, Embedding
class KeyValueMemoryNet(Module):
"""Defines PyTorch model for Key-Value Memory Network.
Key-Value Memory Networks (KV-MemNN) are described here: https://arxiv.org/pdf/1606.03126.pdf
Goal is to read correct response from memory, given query. Memory slots are
defined as pairs (k, v) where k is query and v is correct response. This
implementation of KV-MemNN uses separate encodings for input query and
possible candidates. Instead of using cross-entropy loss, we use cosine
embedding loss where we measure cosine distance between read responses and
candidate responses. We use only one 'hop' because more hops don't provide
any improvements.
This implementation supports batch training.
"""
def __init__(self, vocab_size, embedding_dim):
"""Initializes model layers.
Args:
vocab_size (int): Number of tokens in corpus. This is used to init embeddings.
embedding_dim (int): Dimension of embedding vector.
"""
super().__init__()
self._embedding_dim = embedding_dim
self.encoder_in = Encoder(vocab_size, embedding_dim)
self.encoder_out = Encoder(vocab_size, embedding_dim)
self.linear = Linear(embedding_dim, embedding_dim, bias=False)
self.similarity = CosineSimilarity(dim=2)
self.softmax = Softmax(dim=2)
def forward(self, query, response, memory_keys, memory_values, candidates):
"""Performs forward step.
Args:
query (torch.Tensor): Tensor with shape of (NxM) where N is batch size,
and M is length of padded query.
response (torch.Tensor): Tensor with same shape as query denoting correct responses.
memory_keys (torch.Tensor): Relevant memory keys for given query batch. Shape
of tensor is (NxMxD) where N is batch size, M is number of relevant memories
per query and D is length of memories.
memory_values (torch.Tensor): Relevant memory values for given query batch
with same shape as memory_keys.
candidates (torch.Tensor): Possible responses for query batch with shape
(NxMxD) where N is batch size, M is number of candidates per query and
D is length of candidates.
"""
view_shape = (len(query), 1, self._embedding_dim)
query_embedding = self.encoder_in(query).view(*view_shape)
memory_keys_embedding = self.encoder_in(memory_keys, mean_axis=2)
memory_values_embedding = self.encoder_in(memory_values, mean_axis=2)
similarity = self.similarity(query_embedding, memory_keys_embedding).unsqueeze(1)
softmax = self.softmax(similarity)
value_reading = torch.matmul(softmax, memory_values_embedding)
result = self.linear(value_reading)
candidates_embedding = self.encoder_out(candidates, mean_axis=2)
train_time = response is not None
if train_time:
response_embedding = self.encoder_out(response).view(*view_shape)
# First candidate response is correct one.
# This makes computing loss easier
candidates_embedding[:, 0, :] = response_embedding[:, 0, :]
x_encoded = torch.cat([result] * candidates.shape[1], dim=1)
y_encoded = candidates_embedding
return x_encoded, y_encoded
class Encoder(Module):
"""Embeds queries, memories or responses into vectors."""
def __init__(self, num_embeddings, embedding_dim):
"""Initializes embedding layer.
Args:
num_embeddings (int): Number of possible embeddings.
embedding_dim (int): Dimension of embedding vector.
"""
super().__init__()
self.embedding = Embedding(num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
max_norm=5,
padding_idx=1)
def forward(self, tokens, mean_axis=1):
return self.embedding(tokens).mean(mean_axis)
|
[
"torch.nn.Embedding",
"torch.cat",
"torch.nn.CosineSimilarity",
"torch.nn.Softmax",
"torch.nn.Linear",
"torch.matmul"
] |
[((1318, 1366), 'torch.nn.Linear', 'Linear', (['embedding_dim', 'embedding_dim'], {'bias': '(False)'}), '(embedding_dim, embedding_dim, bias=False)\n', (1324, 1366), False, 'from torch.nn import Module, Linear, Softmax, CosineSimilarity, Embedding\n'), ((1393, 1416), 'torch.nn.CosineSimilarity', 'CosineSimilarity', ([], {'dim': '(2)'}), '(dim=2)\n', (1409, 1416), False, 'from torch.nn import Module, Linear, Softmax, CosineSimilarity, Embedding\n'), ((1440, 1454), 'torch.nn.Softmax', 'Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (1447, 1454), False, 'from torch.nn import Module, Linear, Softmax, CosineSimilarity, Embedding\n'), ((2850, 2896), 'torch.matmul', 'torch.matmul', (['softmax', 'memory_values_embedding'], {}), '(softmax, memory_values_embedding)\n', (2862, 2896), False, 'import torch\n'), ((3354, 3402), 'torch.cat', 'torch.cat', (['([result] * candidates.shape[1])'], {'dim': '(1)'}), '([result] * candidates.shape[1], dim=1)\n', (3363, 3402), False, 'import torch\n'), ((3872, 3972), 'torch.nn.Embedding', 'Embedding', ([], {'num_embeddings': 'num_embeddings', 'embedding_dim': 'embedding_dim', 'max_norm': '(5)', 'padding_idx': '(1)'}), '(num_embeddings=num_embeddings, embedding_dim=embedding_dim,\n max_norm=5, padding_idx=1)\n', (3881, 3972), False, 'from torch.nn import Module, Linear, Softmax, CosineSimilarity, Embedding\n')]
|
import os
# Function to rename multiple files
def main():
i = 252
path="/home/tristan/Bilder/Temp2/"
for filename in os.listdir(path):
print(filename)
if "JPG" in filename:
print(filename)
my_dest =str(i) + ".jpg"
my_source =path + filename
my_dest =path + my_dest
os.rename(my_source, my_dest)
i += 1
# Driver Code
if __name__ == '__main__':
# Calling main() function
main()
|
[
"os.rename",
"os.listdir"
] |
[((131, 147), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (141, 147), False, 'import os\n'), ((338, 367), 'os.rename', 'os.rename', (['my_source', 'my_dest'], {}), '(my_source, my_dest)\n', (347, 367), False, 'import os\n')]
|
import platform, typing
from typing import Optional
from klgists.common.exceptions import ExternalCommandFailed
from klgists.files.wrap_cmd_call import wrap_cmd_call
from sauronlib import logger
from sauronlib.audio_info import AudioInfo
class CouldNotConfigureOsAudioError(IOError):
def description(self):
return "Could not set a needed audio device or gain through operating system calls." + "The platform appears to be {}.".format(
platform.system())
class GlobalAudio:
"""A global lock for audio input and output.
Calling start() turns it on and calling stop() turns it off.
Doing so may change the intput and output device if necessary.
"""
def start(self) -> None:
"""
Override this to do things like change the output source and volume.
"""
pass
def stop(self) -> None:
"""
Override this to do things like reset the output source and volume to their original values.
"""
pass
def __init__(self) -> None:
self.is_on = False # type: bool
def __enter__(self):
self.start()
self.is_on = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.is_on = False
def play(self, info: AudioInfo, blocking: bool = False):
assert self.is_on, "Cannot play sound because the audio service is off"
if info.intensity > 0:
aud = info.wave_obj.play()
if blocking:
aud.wait_done()
class SmartGlobalAudio(GlobalAudio):
"""
A GlobalAudio that switches devices and volumes by detecting the OS.
Looks for a parameterless function _start_{platform.system().lower()} to start, and _stop_{platform.system().lower()} to stop.
For example, for Mac OS 10.12, this will be _start_darwin() and _stop_darwin()
"""
def __init__(
self,
input_device: Optional[typing.Tuple[str, str]], output_device: Optional[typing.Tuple[str, str]],
input_gain: Optional[typing.Tuple[int, int]], output_gain: Optional[typing.Tuple[int, int]],
timeout_secs: float
):
super(SmartGlobalAudio, self).__init__()
self.input_device = input_device
self.output_device = output_device
self.input_gain = input_gain
self.output_gain = output_gain
self.timeout_secs = timeout_secs
def __repr__(self):
return "SmartGlobalAudio:{}(input={}@{},output={}@{})" \
.format(self.is_on, self.input_device, self.input_gain, self.output_device, self.output_gain)
def __str__(self):
return repr(self)
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback) -> None:
self.stop()
def start(self) -> None:
if self.is_on:
logger.debug("Audio handler is already on. Ignoring.")
return
self.__wrap('start')
logger.debug("Starting audio handler.")
self.is_on = True
logger.info("Started audio handler.")
def stop(self) -> None:
if not self.is_on:
logger.debug("Audio handler is already off. Ignoring.")
logger.debug("Stopping audio handler.")
self.__wrap('stop')
self.is_on = False
logger.info("Stopped audio handler.")
def __wrap(self, prefix: str):
try:
getattr(self, '_' + prefix + '_' + platform.system().lower())()
except ExternalCommandFailed as e:
raise CouldNotConfigureOsAudioError() from e
except AttributeError as e:
raise CouldNotConfigureOsAudioError("OS {} not recognized".format(platform.system())) from e
class DefaultSmartGlobalAudio(SmartGlobalAudio):
"""
A default implementation of SmartGlobalAudio.
WARNING: Won't break on Windows, but only changes sources in Mac OS.
"""
def __repr__(self):
return "{}:{}(input={}@{},output={}@{})" \
.format(self.__class__.__name__, self.is_on, self.input_device, self.input_gain, self.output_device,
self.output_gain)
def __str__(self):
return repr(self)
def _start_mac(self) -> None:
self.__darwin_switch(0)
def _stop_mac(self) -> None:
self.__darwin_switch(1)
def _start_windows(self) -> None:
self.__windows_switch(0)
def _stop_windows(self) -> None:
self.__windows_switch(1)
def __windows_switch(self, i: int):
def percent_to_real(percent: int) -> int:
audio_max = 65535 # This is true of Windows in general, so not necessary to put in config
# audio min is 0
return round(audio_max * percent / 100)
if self.output_device is not None:
logger.debug("Setting audio output device to %s" % self.output_device[i])
wrap_cmd_call(
['nircmd', 'setdefaultsounddevice', '%s' % self.output_device[i]],
timeout_secs=self.timeout_secs
)
if self.input_device is not None:
logger.debug("Setting audio input device to %s" % self.input_device[i])
wrap_cmd_call(
['nircmd', 'setdefaultsounddevice', '%s' % self.input_device[i], '2'],
timeout_secs=self.timeout_secs
)
if self.output_gain is not None:
logger.debug("Setting system volume to configured default %s" % self.output_gain[i])
wrap_cmd_call(
['nircmd', 'setsysvolume', '%s' % percent_to_real(self.output_gain[i]), self.output_device[i]],
timeout_secs=self.timeout_secs
)
if self.input_gain is not None:
logger.debug("Setting input gain to configured default %s" % self.input_gain[i])
wrap_cmd_call(
['nircmd', 'setsysvolume', '%s' % percent_to_real(self.input_gain[i]), self.input_device[i]],
timeout_secs=self.timeout_secs
)
def __darwin_switch(self, i: int):
if self.output_device is not None:
logger.debug("Setting audio output device to %s" % self.output_device[i])
wrap_cmd_call(['SwitchAudioSource', '-s', '%s' % self.output_device[i]])
if self.input_device is not None:
logger.debug("Setting audio input device to %s" % self.input_device[i])
wrap_cmd_call(['SwitchAudioSource', '-t input', '-s', '%s' % self.input_device[i]])
if self.output_gain is not None:
logger.debug("Setting system volume to configured default %s" % self.output_gain[i])
wrap_cmd_call(['osascript', '-e', 'set volume output volume %s' % self.output_gain[i]])
if self.input_gain is not None:
logger.debug("Setting input gain to configured default %s" % self.input_gain[i])
wrap_cmd_call(['osascript', '-e', 'set volume input volume %s' % self.input_gain[i]])
logger.debug("Done configuring audio")
__all__ = ['GlobalAudio', 'SmartGlobalAudio', 'DefaultSmartGlobalAudio']
|
[
"platform.system",
"klgists.files.wrap_cmd_call.wrap_cmd_call",
"sauronlib.logger.info",
"sauronlib.logger.debug"
] |
[((2629, 2668), 'sauronlib.logger.debug', 'logger.debug', (['"""Starting audio handler."""'], {}), "('Starting audio handler.')\n", (2641, 2668), False, 'from sauronlib import logger\n'), ((2691, 2728), 'sauronlib.logger.info', 'logger.info', (['"""Started audio handler."""'], {}), "('Started audio handler.')\n", (2702, 2728), False, 'from sauronlib import logger\n'), ((2837, 2876), 'sauronlib.logger.debug', 'logger.debug', (['"""Stopping audio handler."""'], {}), "('Stopping audio handler.')\n", (2849, 2876), False, 'from sauronlib import logger\n'), ((2922, 2959), 'sauronlib.logger.info', 'logger.info', (['"""Stopped audio handler."""'], {}), "('Stopped audio handler.')\n", (2933, 2959), False, 'from sauronlib import logger\n'), ((6061, 6099), 'sauronlib.logger.debug', 'logger.debug', (['"""Done configuring audio"""'], {}), "('Done configuring audio')\n", (6073, 6099), False, 'from sauronlib import logger\n'), ((2539, 2593), 'sauronlib.logger.debug', 'logger.debug', (['"""Audio handler is already on. Ignoring."""'], {}), "('Audio handler is already on. Ignoring.')\n", (2551, 2593), False, 'from sauronlib import logger\n'), ((2779, 2834), 'sauronlib.logger.debug', 'logger.debug', (['"""Audio handler is already off. Ignoring."""'], {}), "('Audio handler is already off. Ignoring.')\n", (2791, 2834), False, 'from sauronlib import logger\n'), ((4211, 4284), 'sauronlib.logger.debug', 'logger.debug', (["('Setting audio output device to %s' % self.output_device[i])"], {}), "('Setting audio output device to %s' % self.output_device[i])\n", (4223, 4284), False, 'from sauronlib import logger\n'), ((4288, 4405), 'klgists.files.wrap_cmd_call.wrap_cmd_call', 'wrap_cmd_call', (["['nircmd', 'setdefaultsounddevice', '%s' % self.output_device[i]]"], {'timeout_secs': 'self.timeout_secs'}), "(['nircmd', 'setdefaultsounddevice', '%s' % self.output_device\n [i]], timeout_secs=self.timeout_secs)\n", (4301, 4405), False, 'from klgists.files.wrap_cmd_call import wrap_cmd_call\n'), ((4453, 4524), 'sauronlib.logger.debug', 'logger.debug', (["('Setting audio input device to %s' % self.input_device[i])"], {}), "('Setting audio input device to %s' % self.input_device[i])\n", (4465, 4524), False, 'from sauronlib import logger\n'), ((4528, 4649), 'klgists.files.wrap_cmd_call.wrap_cmd_call', 'wrap_cmd_call', (["['nircmd', 'setdefaultsounddevice', '%s' % self.input_device[i], '2']"], {'timeout_secs': 'self.timeout_secs'}), "(['nircmd', 'setdefaultsounddevice', '%s' % self.input_device[\n i], '2'], timeout_secs=self.timeout_secs)\n", (4541, 4649), False, 'from klgists.files.wrap_cmd_call import wrap_cmd_call\n'), ((4696, 4785), 'sauronlib.logger.debug', 'logger.debug', (["('Setting system volume to configured default %s' % self.output_gain[i])"], {}), "('Setting system volume to configured default %s' % self.\n output_gain[i])\n", (4708, 4785), False, 'from sauronlib import logger\n'), ((4976, 5061), 'sauronlib.logger.debug', 'logger.debug', (["('Setting input gain to configured default %s' % self.input_gain[i])"], {}), "('Setting input gain to configured default %s' % self.input_gain[i]\n )\n", (4988, 5061), False, 'from sauronlib import logger\n'), ((5290, 5363), 'sauronlib.logger.debug', 'logger.debug', (["('Setting audio output device to %s' % self.output_device[i])"], {}), "('Setting audio output device to %s' % self.output_device[i])\n", (5302, 5363), False, 'from sauronlib import logger\n'), ((5367, 5439), 'klgists.files.wrap_cmd_call.wrap_cmd_call', 'wrap_cmd_call', (["['SwitchAudioSource', '-s', '%s' % self.output_device[i]]"], {}), "(['SwitchAudioSource', '-s', '%s' % self.output_device[i]])\n", (5380, 5439), False, 'from klgists.files.wrap_cmd_call import wrap_cmd_call\n'), ((5479, 5550), 'sauronlib.logger.debug', 'logger.debug', (["('Setting audio input device to %s' % self.input_device[i])"], {}), "('Setting audio input device to %s' % self.input_device[i])\n", (5491, 5550), False, 'from sauronlib import logger\n'), ((5554, 5642), 'klgists.files.wrap_cmd_call.wrap_cmd_call', 'wrap_cmd_call', (["['SwitchAudioSource', '-t input', '-s', '%s' % self.input_device[i]]"], {}), "(['SwitchAudioSource', '-t input', '-s', '%s' % self.\n input_device[i]])\n", (5567, 5642), False, 'from klgists.files.wrap_cmd_call import wrap_cmd_call\n'), ((5676, 5765), 'sauronlib.logger.debug', 'logger.debug', (["('Setting system volume to configured default %s' % self.output_gain[i])"], {}), "('Setting system volume to configured default %s' % self.\n output_gain[i])\n", (5688, 5765), False, 'from sauronlib import logger\n'), ((5764, 5856), 'klgists.files.wrap_cmd_call.wrap_cmd_call', 'wrap_cmd_call', (["['osascript', '-e', 'set volume output volume %s' % self.output_gain[i]]"], {}), "(['osascript', '-e', 'set volume output volume %s' % self.\n output_gain[i]])\n", (5777, 5856), False, 'from klgists.files.wrap_cmd_call import wrap_cmd_call\n'), ((5889, 5974), 'sauronlib.logger.debug', 'logger.debug', (["('Setting input gain to configured default %s' % self.input_gain[i])"], {}), "('Setting input gain to configured default %s' % self.input_gain[i]\n )\n", (5901, 5974), False, 'from sauronlib import logger\n'), ((5973, 6063), 'klgists.files.wrap_cmd_call.wrap_cmd_call', 'wrap_cmd_call', (["['osascript', '-e', 'set volume input volume %s' % self.input_gain[i]]"], {}), "(['osascript', '-e', 'set volume input volume %s' % self.\n input_gain[i]])\n", (5986, 6063), False, 'from klgists.files.wrap_cmd_call import wrap_cmd_call\n'), ((445, 462), 'platform.system', 'platform.system', ([], {}), '()\n', (460, 462), False, 'import platform, typing\n'), ((3251, 3268), 'platform.system', 'platform.system', ([], {}), '()\n', (3266, 3268), False, 'import platform, typing\n'), ((3038, 3055), 'platform.system', 'platform.system', ([], {}), '()\n', (3053, 3055), False, 'import platform, typing\n')]
|
from collections import UserDict
from functools import wraps
import logging
def var_method(func):
"""This decorator marks the given function as a scheduler variable. The
function must take no arguments (other than self)."""
# pylint: disable=W0212
# The scheduler plugin class will search for these.
func._is_var_method = True
func._is_deferable = False
# Wrap the function function so it keeps it's base attributes.
@wraps(func)
def _func(self):
# This is primarily to enforce the fact that these can't take arguments
return str(func(self))
return _func
class VarDict(UserDict):
"""A dictionary for defining dynamic variables in Pavilion.
Usage:
To add a variable, create a method and decorate it with
either '@var_method' or '@dfr_var_method()'. The method name will be the
variable name, and the method will be called to resolve the variable
value. Methods that start with '_' are ignored.
"""
def __init__(self, name):
"""Initialize the scheduler var dictionary.
:param str name: The name of this var dict.
"""
super().__init__(self)
self._name = name
self._keys = self._find_vars()
self.logger = logging.getLogger('{}_vars'.format(name))
@classmethod
def _find_vars(cls):
"""Find all the scheduler variables and add them as variables."""
keys = set()
for key in cls.__dict__.keys():
# Ignore anything that starts with an underscore
if key.startswith('_'):
continue
obj = getattr(cls, key)
if callable(obj) and getattr(obj, '_is_var_method', False):
keys.add(key)
return keys
def __getitem__(self, key):
"""As per the dict class."""
if key not in self._keys:
raise KeyError("Invalid {} variable '{}'"
.format(self._name, key))
if key not in self.data:
self.data[key] = getattr(self, key)()
return self.data[key]
def keys(self):
"""As per the dict class."""
return (k for k in self._keys)
def get(self, key, default=None):
"""As per the dict class."""
if key not in self._keys:
return default
return self[key]
def values(self):
"""As per the dict class."""
return (self[k] for k in self.keys())
def items(self):
"""As per the dict class."""
return ((k, self[k]) for k in self.keys())
def info(self, key):
"""Get an info dictionary about the given key."""
if key not in self._keys:
raise KeyError("Key '{}' does not exist in vardict '{}'"
.format(key, self._name))
func = getattr(self, key)
# Get rid of newlines
help_text = func.__doc__.replace('\n', ' ')
# Get rid of extra whitespace
help_text = ' '.join(help_text.split())
return {
'name': key,
'deferred': func._is_deferable, # pylint: disable=W0212
'help': help_text,
}
|
[
"functools.wraps"
] |
[((454, 465), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (459, 465), False, 'from functools import wraps\n')]
|
import numpy as np
import os
from astropy.io import fits
from astropy.stats import sigma_clip, sigma_clipped_stats
from specklepy.logging import logger
from specklepy.reduction.subwindow import SubWindow
from specklepy.utils.time import default_time_stamp
class MasterDark(object):
extensions = {'variance': 'VAR', 'mask': 'MASK'}
def __init__(self, file_list, file_name='MasterDark.fits', file_path=None, out_dir=None, setup=None,
sub_window=None, new=True):
self.files = file_list
self.file_name = self.insert_setup_to_file_name(file_name=file_name, setup=setup)
self.file_path = file_path if file_path is not None else ''
self.out_dir = out_dir if out_dir is not None else ''
# Store sub-window
if isinstance(sub_window, str):
self.sub_window = sub_window
else:
self.sub_window = np.unique(sub_window)[0]
# Initialize maps
self.image = None
self.var = None
self.mask = None
@classmethod
def from_file(cls, file_path):
# Create object from path information
out_dir, file_name = os.path.split(file_path)
obj = cls(file_list=None, file_name=file_name, out_dir=out_dir, setup=None)
# Load data from file
obj.image = fits.getdata(obj.path)
try:
obj.var = fits.getdata(obj.path, obj.extensions.get('variance'))
except KeyError:
logger.debug(f"Loading MasterDark from file {obj.path!r} without {obj.extensions.get('variance')!r} "
f"extension")
try:
obj.mask = fits.getdata(obj.path, obj.extensions.get('mask')).astype(bool)
except KeyError:
logger.debug(f"Loading MasterDark from file {obj.path!r} without {obj.extensions.get('mask')!r} "
f"extension")
obj.sub_window = fits.getheader(obj.path)["HIERARCH SPECKLEPY REDUCTION SUBWIN"]
return obj
@property
def path(self):
return os.path.join(self.out_dir, self.file_name)
@staticmethod
def insert_setup_to_file_name(file_name, setup=None):
if setup is None:
return file_name
else:
base, ext = os.path.splitext(file_name)
return f"{base}_{setup}{ext}"
def combine(self, max_number_frames=None, rejection_threshold=10):
logger.info("Combining master dark frame...")
# if max_number_frames is not None:
# logger.debug(f"Using only the first {max_number_frames} frames of each cube")
means = []
vars = []
number_frames = []
# Iterate through files
for file in self.files:
logger.info(f"Reading DARK frames from file {file!r}...")
path = os.path.join(self.file_path, file)
with fits.open(path) as hdu_list:
data = hdu_list[0].data.squeeze()
if data.ndim == 2:
means.append(data)
vars.append(np.zeros(data.shape))
# self.combine_mask(np.zeros(data.shape, dtype=bool))
number_frames.append(1)
elif data.ndim == 3:
logger.info("Computing statistics of data cube...")
clipped_mean, _, clipped_std = sigma_clipped_stats(data=data, sigma=rejection_threshold, axis=0)
# mean = np.mean(data, axis=0)
# std = np.std(data, axis=0)
#
# # Identify outliers based on sigma-clipping
# mean_mask = sigma_clip(mean, sigma=rejection_threshold, masked=True).mask
# std_mask = sigma_clip(std, sigma=rejection_threshold, masked=True).mask
# mask = np.logical_or(mean_mask, std_mask)
# mask_indexes = np.array(np.where(mask)).transpose()
#
# # Re-compute the identified pixels
# logger.info(f"Re-measuring {len(mask_indexes)} outliers...")
# for mask_index in mask_indexes:
# # Extract t-series for the masked pixel
# arr = data[:, mask_index[0], mask_index[1]]
#
# # Compute sigma-clipped statistics for this pixel
# arr_mean, _, arr_std = sigma_clipped_stats(arr, sigma=rejection_threshold)
# mean[mask_index[0], mask_index[1]] = arr_mean
# std[mask_index[0], mask_index[1]] = arr_std
#
# mean = sigma_clip(mean, sigma=rejection_threshold, masked=True)
# std = sigma_clip(std, sigma=rejection_threshold, masked=True)
# Store results into lists
means.append(clipped_mean)
vars.append(np.square(clipped_std))
# self.combine_mask(np.logical_or(mean.mask, std.mask))
number_frames.append(data.shape[0])
else:
raise ValueError(f"Shape of data {data.shape} is not understood. Data must be either 2 or "
f"3-dimensional!")
# Cast list of arrays into 3-dim arrays
means = np.array(means)
vars = np.array(vars)
# Combine variances
if (vars == 0).all(): # catch case, where all frames have no variance
self.var = np.var(means, axis=0)
else:
self.var = np.average(vars, axis=0, weights=number_frames)
# Build mask based on variances
bpm = self.var == 0 # Bad pixel mask
if bpm.all(): # Catch case, where all frames have no variance
bpm = np.zeros(bpm.shape, dtype=bool)
gpm = ~bpm # Good pixel mask
# Build weights based on variance, and combine images
weights = np.multiply(np.reciprocal(self.var, where=gpm), np.expand_dims(number_frames, (1, 2)))
self.image = np.average(means, axis=0, weights=weights)
# Combine mask
self.mask = bpm
# def combine_var(self, new_var):
# if self.var is None:
# self.var = new_var
# else:
# self.var = np.add(self.var, new_var)
#
# def combine_mask(self, new_mask):
# if self.mask is None:
# self.mask = new_mask
# else:
# self.mask = np.logical_or(self.mask, new_mask)
def write(self, overwrite=True):
# Build primary HDU
header = fits.Header()
for index, file in enumerate(self.files):
header.set(f"HIERARCH SPECKLEPY SOURCE FILE{index:04} NAME", os.path.basename(file))
header.set("HIERARCH SPECKLEPY REDUCTION SUBWIN", self.sub_window)
primary = fits.PrimaryHDU(data=self.image, header=header)
# Build HDU list
hdu_list = fits.HDUList([primary])
# Build variance HDU
if self.var is not None:
var_hdu = fits.ImageHDU(data=self.var, name=self.extensions.get('variance'))
hdu_list.append(var_hdu)
# Build mask HDU
if self.mask is not None:
mask_hdu = fits.ImageHDU(data=self.mask.astype(np.int16), name=self.extensions.get('mask'))
hdu_list.append(mask_hdu)
# Write HDU list to file
logger.info(f"Writing master dark frame to file {self.path!r}")
hdu_list.writeto(self.path, overwrite=overwrite)
def subtract(self, file_path, extension=None, sub_window=None, sub_window_order='xy'):
"""Subtract the master dark from a file containing image data.
The master dark is subtracted from the image or each frame in a data cube. Then uncertainties are propagated.
Arguments:
file_path (str):
Path to the file, containing image data.
extension (str, optional):
Classifier for the image data extension.
sub_window (str, optional):
Sub-window string to initialize sub-windows from.
sub_window_order (str, optional):
Order of axis in the sub-window strings.
"""
logger.info(f"Subtracting master dark {self.file_name!r} from file at {file_path!r}")
# Construct sub-window
sub_window = SubWindow.from_str(sub_window, full=self.sub_window, order=sub_window_order)
# Construct good pixel mask
if self.mask is None:
gpm = np.ones(sub_window(self.image).shape, dtype=bool)
else:
gpm = sub_window(~self.mask)
# Load image data
data = fits.getdata(file_path, extension)
# Subtract
if data.ndim == 2:
data = np.subtract(data, sub_window(self.image), where=gpm)
elif data.ndim == 3:
for f, frame in enumerate(data):
data[f] = np.subtract(frame, sub_window(self.image), where=gpm)
# Propagate variances
try:
var = fits.getdata(file_path, self.extensions.get('variance'))
has_var_hdu = True
var = np.add(var, sub_window(self.var), where=gpm)
except KeyError:
has_var_hdu = False
var = sub_window(self.var)
# Propagate mask
try:
mask = fits.getdata(file_path, self.extensions.get('mask')).astype(bool)
has_mask_hdu = True
mask = np.logical_or(mask, sub_window(self.mask))
except KeyError:
has_mask_hdu = False
mask = sub_window(self.mask)
# Store data to cube
with fits.open(file_path, mode='update') as hdu_list:
# Update header
hdu_list[0].header.set('HIERARCH SPECKLEPY REDUCTION DARKCORR', default_time_stamp())
# Image data
hdu_list[0].data = data
# Variance data
if has_var_hdu:
hdu_list[self.extensions.get('variance')].data = var
else:
var_hdu = fits.ImageHDU(data=var, name=self.extensions.get('variance'))
hdu_list.append(var_hdu)
# Mask data
if has_mask_hdu:
hdu_list[self.extensions.get('mask')].data = mask.astype(np.int16)
else:
mask_hdu = fits.ImageHDU(data=mask.astype(np.int16), name=self.extensions.get('mask'))
hdu_list.append(mask_hdu)
# Write HDU list to file
logger.info(f"Updating dark subtraction in file {file_path!r}")
hdu_list.flush()
|
[
"astropy.stats.sigma_clipped_stats",
"astropy.io.fits.PrimaryHDU",
"numpy.reciprocal",
"astropy.io.fits.Header",
"astropy.io.fits.HDUList",
"os.path.join",
"numpy.unique",
"specklepy.reduction.subwindow.SubWindow.from_str",
"astropy.io.fits.getdata",
"numpy.var",
"specklepy.utils.time.default_time_stamp",
"numpy.average",
"os.path.basename",
"specklepy.logging.logger.info",
"numpy.square",
"astropy.io.fits.open",
"numpy.zeros",
"numpy.expand_dims",
"astropy.io.fits.getheader",
"numpy.array",
"os.path.splitext",
"os.path.split"
] |
[((1150, 1174), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (1163, 1174), False, 'import os\n'), ((1310, 1332), 'astropy.io.fits.getdata', 'fits.getdata', (['obj.path'], {}), '(obj.path)\n', (1322, 1332), False, 'from astropy.io import fits\n'), ((2034, 2076), 'os.path.join', 'os.path.join', (['self.out_dir', 'self.file_name'], {}), '(self.out_dir, self.file_name)\n', (2046, 2076), False, 'import os\n'), ((2397, 2442), 'specklepy.logging.logger.info', 'logger.info', (['"""Combining master dark frame..."""'], {}), "('Combining master dark frame...')\n", (2408, 2442), False, 'from specklepy.logging import logger\n'), ((5342, 5357), 'numpy.array', 'np.array', (['means'], {}), '(means)\n', (5350, 5357), True, 'import numpy as np\n'), ((5373, 5387), 'numpy.array', 'np.array', (['vars'], {}), '(vars)\n', (5381, 5387), True, 'import numpy as np\n'), ((6061, 6103), 'numpy.average', 'np.average', (['means'], {'axis': '(0)', 'weights': 'weights'}), '(means, axis=0, weights=weights)\n', (6071, 6103), True, 'import numpy as np\n'), ((6596, 6609), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (6607, 6609), False, 'from astropy.io import fits\n'), ((6850, 6897), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'self.image', 'header': 'header'}), '(data=self.image, header=header)\n', (6865, 6897), False, 'from astropy.io import fits\n'), ((6943, 6966), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[primary]'], {}), '([primary])\n', (6955, 6966), False, 'from astropy.io import fits\n'), ((7400, 7463), 'specklepy.logging.logger.info', 'logger.info', (['f"""Writing master dark frame to file {self.path!r}"""'], {}), "(f'Writing master dark frame to file {self.path!r}')\n", (7411, 7463), False, 'from specklepy.logging import logger\n'), ((8235, 8325), 'specklepy.logging.logger.info', 'logger.info', (['f"""Subtracting master dark {self.file_name!r} from file at {file_path!r}"""'], {}), "(\n f'Subtracting master dark {self.file_name!r} from file at {file_path!r}')\n", (8246, 8325), False, 'from specklepy.logging import logger\n'), ((8374, 8450), 'specklepy.reduction.subwindow.SubWindow.from_str', 'SubWindow.from_str', (['sub_window'], {'full': 'self.sub_window', 'order': 'sub_window_order'}), '(sub_window, full=self.sub_window, order=sub_window_order)\n', (8392, 8450), False, 'from specklepy.reduction.subwindow import SubWindow\n'), ((8683, 8717), 'astropy.io.fits.getdata', 'fits.getdata', (['file_path', 'extension'], {}), '(file_path, extension)\n', (8695, 8717), False, 'from astropy.io import fits\n'), ((1900, 1924), 'astropy.io.fits.getheader', 'fits.getheader', (['obj.path'], {}), '(obj.path)\n', (1914, 1924), False, 'from astropy.io import fits\n'), ((2247, 2274), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (2263, 2274), False, 'import os\n'), ((2720, 2777), 'specklepy.logging.logger.info', 'logger.info', (['f"""Reading DARK frames from file {file!r}..."""'], {}), "(f'Reading DARK frames from file {file!r}...')\n", (2731, 2777), False, 'from specklepy.logging import logger\n'), ((2797, 2831), 'os.path.join', 'os.path.join', (['self.file_path', 'file'], {}), '(self.file_path, file)\n', (2809, 2831), False, 'import os\n'), ((5519, 5540), 'numpy.var', 'np.var', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (5525, 5540), True, 'import numpy as np\n'), ((5578, 5625), 'numpy.average', 'np.average', (['vars'], {'axis': '(0)', 'weights': 'number_frames'}), '(vars, axis=0, weights=number_frames)\n', (5588, 5625), True, 'import numpy as np\n'), ((5802, 5833), 'numpy.zeros', 'np.zeros', (['bpm.shape'], {'dtype': 'bool'}), '(bpm.shape, dtype=bool)\n', (5810, 5833), True, 'import numpy as np\n'), ((5965, 5999), 'numpy.reciprocal', 'np.reciprocal', (['self.var'], {'where': 'gpm'}), '(self.var, where=gpm)\n', (5978, 5999), True, 'import numpy as np\n'), ((6001, 6038), 'numpy.expand_dims', 'np.expand_dims', (['number_frames', '(1, 2)'], {}), '(number_frames, (1, 2))\n', (6015, 6038), True, 'import numpy as np\n'), ((9660, 9695), 'astropy.io.fits.open', 'fits.open', (['file_path'], {'mode': '"""update"""'}), "(file_path, mode='update')\n", (9669, 9695), False, 'from astropy.io import fits\n'), ((10520, 10583), 'specklepy.logging.logger.info', 'logger.info', (['f"""Updating dark subtraction in file {file_path!r}"""'], {}), "(f'Updating dark subtraction in file {file_path!r}')\n", (10531, 10583), False, 'from specklepy.logging import logger\n'), ((895, 916), 'numpy.unique', 'np.unique', (['sub_window'], {}), '(sub_window)\n', (904, 916), True, 'import numpy as np\n'), ((2849, 2864), 'astropy.io.fits.open', 'fits.open', (['path'], {}), '(path)\n', (2858, 2864), False, 'from astropy.io import fits\n'), ((6733, 6755), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (6749, 6755), False, 'import os\n'), ((9813, 9833), 'specklepy.utils.time.default_time_stamp', 'default_time_stamp', ([], {}), '()\n', (9831, 9833), False, 'from specklepy.utils.time import default_time_stamp\n'), ((3035, 3055), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (3043, 3055), True, 'import numpy as np\n'), ((3233, 3284), 'specklepy.logging.logger.info', 'logger.info', (['"""Computing statistics of data cube..."""'], {}), "('Computing statistics of data cube...')\n", (3244, 3284), False, 'from specklepy.logging import logger\n'), ((3336, 3401), 'astropy.stats.sigma_clipped_stats', 'sigma_clipped_stats', ([], {'data': 'data', 'sigma': 'rejection_threshold', 'axis': '(0)'}), '(data=data, sigma=rejection_threshold, axis=0)\n', (3355, 3401), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats\n'), ((4930, 4952), 'numpy.square', 'np.square', (['clipped_std'], {}), '(clipped_std)\n', (4939, 4952), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#python
import os
import shutil
import random
#appion
from appionlib import appionScript
from appionlib import apStack
from appionlib import apDisplay
from appionlib import apStackFormat
class convertStackScript(appionScript.AppionScript):
#=====================
def setupParserOptions(self):
self.formatoptions = ("eman", "spider", "frealign", "xmipp")
self.parser.set_usage("Usage: %prog --stackid=ID --format=PROGRAM_NAME [options]")
self.parser.add_option("-s", "--stackid", dest="stackid", type="int",
help="Stack database id", metavar="ID")
self.parser.add_option("--format", dest="format",
default="spider", type="choice", choices=self.formatoptions,
help="Format to be converted to, options: "+str(self.formatoptions))
#=====================
def checkConflicts(self):
if self.params['stackid'] is None:
apDisplay.printError("stackid was not defined")
if self.params['runname'] is None:
apDisplay.printError("new runname was not defined")
#=====================
def setRunDir(self):
stackdata = apStack.getOnlyStackData(self.params['stackid'], msg=False)
path = stackdata['path']['path']
uppath = os.path.dirname(os.path.abspath(path))
self.params['rundir'] = os.path.join(uppath, self.params['runname'])
#=====================
def start(self):
#new stack path
stackdata = apStack.getOnlyStackData(self.params['stackid'])
apStackFormat.linkFormattedStack(stackdata, self.params['format'],'test')
#apStackFormat.replaceFormattedStack(stackdata, self.params['format'], self.params['rundir'],'normlist.doc')
#=====================
if __name__ == "__main__":
subStack = convertStackScript()
subStack.start()
subStack.close()
|
[
"os.path.abspath",
"appionlib.apStackFormat.linkFormattedStack",
"appionlib.apStack.getOnlyStackData",
"appionlib.apDisplay.printError",
"os.path.join"
] |
[((1064, 1123), 'appionlib.apStack.getOnlyStackData', 'apStack.getOnlyStackData', (["self.params['stackid']"], {'msg': '(False)'}), "(self.params['stackid'], msg=False)\n", (1088, 1123), False, 'from appionlib import apStack\n'), ((1235, 1279), 'os.path.join', 'os.path.join', (['uppath', "self.params['runname']"], {}), "(uppath, self.params['runname'])\n", (1247, 1279), False, 'import os\n'), ((1355, 1403), 'appionlib.apStack.getOnlyStackData', 'apStack.getOnlyStackData', (["self.params['stackid']"], {}), "(self.params['stackid'])\n", (1379, 1403), False, 'from appionlib import apStack\n'), ((1406, 1480), 'appionlib.apStackFormat.linkFormattedStack', 'apStackFormat.linkFormattedStack', (['stackdata', "self.params['format']", '"""test"""'], {}), "(stackdata, self.params['format'], 'test')\n", (1438, 1480), False, 'from appionlib import apStackFormat\n'), ((863, 910), 'appionlib.apDisplay.printError', 'apDisplay.printError', (['"""stackid was not defined"""'], {}), "('stackid was not defined')\n", (883, 910), False, 'from appionlib import apDisplay\n'), ((951, 1002), 'appionlib.apDisplay.printError', 'apDisplay.printError', (['"""new runname was not defined"""'], {}), "('new runname was not defined')\n", (971, 1002), False, 'from appionlib import apDisplay\n'), ((1186, 1207), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (1201, 1207), False, 'import os\n')]
|
#!/usr/bin/env python3
# coding: utf-8
# Contains common methods frequently used across....
# The example reference at the below matplotlib is helpful in choosing an
# appropriate colormap for the output plot
# https://matplotlib.org/examples/color/colormaps_reference.html
# import the necessary packages
import numpy as np
import matplotlib.pyplot as plt
def create_meshgrid(x, y, margin=1, step=0.02):
"""Create a numoy rectangular meshgrid out of an array of
x values and an array of y values
@ref https://stackoverflow.com/questions/36013063
/what-is-the-purpose-of-meshgrid-in-python-numpy
:x: array-like point x
:y: array-like point y
:margin: (int) boundary
:step: (float) stepping the values, default = 0.02
Examples
--------
x = np.array([0, 1, 2, 3, 4])
y = np.array([0, 1, 2, 3, 4])
xx,yy=np.meshgrid(x,y)
plt.plot(xx,yy, marker='.', color='k',linestyle='none')
"""
x_min, x_max = x.min() - margin, x.max() + margin
y_min, y_max = y.min() - margin, y.max() + margin
# define the mesh grid, with xx and yy holding the grid of
# points where the function will be evaluated
xx, yy = np.meshgrid(
np.arange(x_min, x_max, step), np.arange(y_min, y_max, step))
return xx, yy
def draw_decision_boundary(x,
y,
classifier,
margin=1,
step=0.02,
alpha=0.8,
cmap=plt.cm.coolwarm):
"""Draw decision boundary separating the collections
Parameters
----------
x: {array-like}, shape = [n_samples, n_features]
y: array-like, shape = [n_samples]
margin: margin for the min and max
step_size: float This would be the buffer for clarity
This is spacing between values. For any output out, this is the distance
between two adjacent values, out[i+1] - out[i]
alpha: float
color alpha value
cmap: color map
"""
# set-up the marker generator and color map for plotting
markers = ('s', 'o', 'x', '^', 'v')
# for data, first set-up a grid for plotting.
X0, X1 = x[:, 0], x[:, 1]
xx, yy = create_meshgrid(X0, X1, margin, step)
mesh = np.array([xx.ravel(), yy.ravel()])
print("np.array: {}", format(mesh))
# compute the classifiers output
Z = classifier.predict(mesh.T)
Z = Z.reshape(xx.shape)
# now plot the contour
plt.contourf(xx, yy, Z, alpha=alpha, cmap=cmap)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
for idx, cl in enumerate(np.unique(y)):
print("cl: ", cl)
plt.scatter(
x=x[y == cl, 0],
y=x[y == cl, 1],
alpha=alpha,
marker=markers[idx],
label=cl,
edgecolor='yellow')
def plot_classifier(X,
y,
classifier,
margin=1.0,
step_size=0.01,
alpha=0.8,
test_idx=None,
cmap=plt.cm.Paired):
"""Draw the datapoints and boundaries
Parameters
----------
x: {array-like}, shape = [n_samples, n_features]
y: array-like, shape = [n_samples]
margin: margin for the min and max
step_size: float
This is spacing between values. For any output out, this is the distance
between two adjacent values, out[i+1] - out[i]
alpha: float
blending value to decide transparency - 0 (transparent) and 1 (opaque)
test_idx: list
cmap: object
color map for the output colors of objects
"""
# set-up the marker generator for plotting
markers = ('s', 'o', 'x', '*', 'v')
# setup and define a range for plotting the data
X0, X1 = X[:, 0], X[:, 1]
xx, yy = create_meshgrid(X0, X1, margin=margin, step=step_size)
# compute the output of the classifier
mesh = np.c_[xx.ravel(), yy.ravel()]
mesh_output = classifier.predict(mesh)
# reshape the array
mesh_output = mesh_output.reshape(xx.shape)
# draw and fill contour lines
plt.contourf(xx, yy, mesh_output, alpha=0.4, cmap=cmap)
# now overlay the training coordinates over the plot
# set boundaries
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks((np.arange(int(min(X[:, 0]) - 1), int(max(X[:, 0]) + 1), 1.0)))
plt.yticks((np.arange(int(min(X[:, 1]) - 1), int(max(X[:, 1]) + 1), 1.0)))
# use a separate marker for each training label
for (i, cl) in enumerate(np.unique(y)):
plt.scatter(
x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=alpha,
marker=markers[i],
label=cl,
edgecolors='purple')
# plotting and highlighting the test samples
if test_idx:
# x_test, y_test = X[test_idx, :], y[test_idx]
x_test = X[test_idx, :]
plt.scatter(
x_test[:, 0],
x_test[:, 1],
c='',
edgecolors='purple',
alpha=alpha,
linewidths=1,
marker='o',
s=100,
label='Test Data')
|
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.contourf",
"numpy.arange",
"numpy.unique"
] |
[((2481, 2528), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'alpha': 'alpha', 'cmap': 'cmap'}), '(xx, yy, Z, alpha=alpha, cmap=cmap)\n', (2493, 2528), True, 'import matplotlib.pyplot as plt\n'), ((4126, 4181), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'mesh_output'], {'alpha': '(0.4)', 'cmap': 'cmap'}), '(xx, yy, mesh_output, alpha=0.4, cmap=cmap)\n', (4138, 4181), True, 'import matplotlib.pyplot as plt\n'), ((1214, 1243), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'step'], {}), '(x_min, x_max, step)\n', (1223, 1243), True, 'import numpy as np\n'), ((1245, 1274), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'step'], {}), '(y_min, y_max, step)\n', (1254, 1274), True, 'import numpy as np\n'), ((2625, 2637), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2634, 2637), True, 'import numpy as np\n'), ((2674, 2788), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'x[y == cl, 0]', 'y': 'x[y == cl, 1]', 'alpha': 'alpha', 'marker': 'markers[idx]', 'label': 'cl', 'edgecolor': '"""yellow"""'}), "(x=x[y == cl, 0], y=x[y == cl, 1], alpha=alpha, marker=markers[\n idx], label=cl, edgecolor='yellow')\n", (2685, 2788), True, 'import matplotlib.pyplot as plt\n'), ((4567, 4579), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4576, 4579), True, 'import numpy as np\n'), ((4590, 4703), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'X[y == cl, 0]', 'y': 'X[y == cl, 1]', 'alpha': 'alpha', 'marker': 'markers[i]', 'label': 'cl', 'edgecolors': '"""purple"""'}), "(x=X[y == cl, 0], y=X[y == cl, 1], alpha=alpha, marker=markers[i\n ], label=cl, edgecolors='purple')\n", (4601, 4703), True, 'import matplotlib.pyplot as plt\n'), ((4934, 5070), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_test[:, 0]', 'x_test[:, 1]'], {'c': '""""""', 'edgecolors': '"""purple"""', 'alpha': 'alpha', 'linewidths': '(1)', 'marker': '"""o"""', 's': '(100)', 'label': '"""Test Data"""'}), "(x_test[:, 0], x_test[:, 1], c='', edgecolors='purple', alpha=\n alpha, linewidths=1, marker='o', s=100, label='Test Data')\n", (4945, 5070), True, 'import matplotlib.pyplot as plt\n')]
|
import ext
import re
CODE = "assembly.txt"
OUTPUT = "assembled.bin"
def parse(value):
if re.match("^0[a-zA-Z]", value):
if value[1] == "b":
parsed = int(value, 2)
elif value[1] == "x":
parsed = int(value, 16)
else:
parsed = int(value)
return parsed
with open(CODE, "r") as f:
lines = f.read().lower().replace("\t", " ").split("\n")
lookup = {}
counter = 0
section = None
sequence = []
for line in lines:
line = line.split(";")[0]
if line:
if line[0] == ".":
section = line[1:]
elif section == "data":
line = line.replace(" ", "")
token, value = line.split("=")
lookup[token] = parse(value)
elif section == "code":
if line[0] == ":":
line = line.replace(" ", "")
lookup[line[1:]] = counter
else:
tokens = [token for token in line.split(" ") if token]
arguments = [parse(token) if token[0].isdigit() else token for token in tokens[1:]]
sequence.append((counter, tokens[0], arguments))
counter += ext.inst_size(tokens[0])
raw = b""
for macro in sequence:
num = ext.render_inst(macro[1], macro[2], macro[0], lookup)
for operation in num:
binary = b"".join([value.to_bytes(1, "little") for value in ((operation[0] << 4) | operation[1], operation[2])])
raw += binary
with open(OUTPUT, "wb") as f:
f.write(raw)
print("Program size: %i operations (%i bytes)" % (len(raw) // 2, len(raw)))
print("Lookup table:")
for key, value in lookup.items():
print(" %s: %i" % (key, value))
|
[
"ext.inst_size",
"ext.render_inst",
"re.match"
] |
[((95, 124), 're.match', 're.match', (['"""^0[a-zA-Z]"""', 'value'], {}), "('^0[a-zA-Z]', value)\n", (103, 124), False, 'import re\n'), ((1231, 1284), 'ext.render_inst', 'ext.render_inst', (['macro[1]', 'macro[2]', 'macro[0]', 'lookup'], {}), '(macro[1], macro[2], macro[0], lookup)\n', (1246, 1284), False, 'import ext\n'), ((1162, 1186), 'ext.inst_size', 'ext.inst_size', (['tokens[0]'], {}), '(tokens[0])\n', (1175, 1186), False, 'import ext\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 <NAME>
# The source code contained in this file is licensed under the MIT license.
# SPDX-License-Identifier: MIT
import logging
from pythonic_testcase import assert_raises, PythonicTestCase
from ..testutils import (assert_did_log_message, assert_no_log_messages,
build_collecting_logger)
class TestutilsTest(PythonicTestCase):
def test_can_assert_logged_messages(self):
log, lc = build_collecting_logger()
log.info('foo')
log.debug('bar')
assert_did_log_message(lc, 'foo')
assert_did_log_message(lc, 'foo', level=logging.INFO)
with assert_raises(AssertionError):
assert_did_log_message(lc, 'foo', level=logging.DEBUG)
assert_no_log_messages(lc, min_level=logging.WARN)
|
[
"pythonic_testcase.assert_raises"
] |
[((640, 669), 'pythonic_testcase.assert_raises', 'assert_raises', (['AssertionError'], {}), '(AssertionError)\n', (653, 669), False, 'from pythonic_testcase import assert_raises, PythonicTestCase\n')]
|
import json
import string
from io import IOBase
from random import choice
from string import ascii_letters
from typing import Optional
import pytest
import requests
from werkzeug import Response
from infobip_channels.core.models import CamelCaseModel, MultipartMixin
def get_random_string(length: int) -> str:
return "".join(choice(ascii_letters) for _ in range(length))
def get_random_numbers(length: int) -> str:
return "".join(choice(string.digits) for _ in range(length))
class HttpTestClient:
def __init__(self, url, headers):
self.url = url
self.headers = headers
def post(self, endpoint, body, headers=None):
headers = headers or self.headers
return requests.post(url=f"{self.url}" + endpoint, json=body, headers=headers)
def get(self, endpoint, headers=None):
headers = headers or self.headers
return requests.get(url=f"{self.url}" + endpoint, headers=headers)
class Address(CamelCaseModel):
street: str
city: str
zip_code: int
class UserInfo(CamelCaseModel, MultipartMixin):
name: Optional[str] = None
last_name: str
address: Address
profile_image: IOBase
class Config(CamelCaseModel.Config):
arbitrary_types_allowed = True
@pytest.fixture
def http_test_client():
def _get_http_test_client(url, headers):
return HttpTestClient(url, headers)
return _get_http_test_client
def get_response_object(status_code, content):
return Response(json.dumps(content), status_code)
def get_response_error_invalid_content():
return {
"error": {"field_one": "error_one", "field_two": "error_two"},
}
def get_expected_post_headers(content_type="application/json"):
return {
"Authorization": "App secret",
"Content-Type": content_type,
"Accept": "application/json",
}
def get_expected_put_headers(content_type="application/json"):
return {
"Authorization": "App secret",
"Content-Type": content_type,
"Accept": "application/json",
}
def get_expected_get_headers():
return {
"Authorization": "App secret",
"Accept": "application/json",
}
def get_expected_delete_headers():
return {
"Authorization": "App secret",
"Accept": "application/json",
}
|
[
"requests.post",
"random.choice",
"requests.get",
"json.dumps"
] |
[((714, 785), 'requests.post', 'requests.post', ([], {'url': "(f'{self.url}' + endpoint)", 'json': 'body', 'headers': 'headers'}), "(url=f'{self.url}' + endpoint, json=body, headers=headers)\n", (727, 785), False, 'import requests\n'), ((887, 946), 'requests.get', 'requests.get', ([], {'url': "(f'{self.url}' + endpoint)", 'headers': 'headers'}), "(url=f'{self.url}' + endpoint, headers=headers)\n", (899, 946), False, 'import requests\n'), ((1490, 1509), 'json.dumps', 'json.dumps', (['content'], {}), '(content)\n', (1500, 1509), False, 'import json\n'), ((333, 354), 'random.choice', 'choice', (['ascii_letters'], {}), '(ascii_letters)\n', (339, 354), False, 'from random import choice\n'), ((444, 465), 'random.choice', 'choice', (['string.digits'], {}), '(string.digits)\n', (450, 465), False, 'from random import choice\n')]
|
import numpy as np
import seaborn as sns
def p_x_given_y(y, mus, sigmas):
mu = mus[0] + sigmas[1, 0] / sigmas[0, 0] * (y - mus[1])
sigma = sigmas[0, 0] - sigmas[1, 0] / sigmas[1, 1] * sigmas[1, 0]
return np.random.normal(mu, sigma)
def p_y_given_x(x, mus, sigmas):
mu = mus[1] + sigmas[0, 1] / sigmas[1, 1] * (x - mus[0])
sigma = sigmas[1, 1] - sigmas[0, 1] / sigmas[0, 0] * sigmas[0, 1]
return np.random.normal(mu, sigma)
def gibbs_sampling(mus, sigmas, iter=10000):
samples = np.zeros((iter, 2))
y = np.random.rand() * 10
for i in range(iter):
x = p_x_given_y(y, mus, sigmas)
y = p_y_given_x(x, mus, sigmas)
samples[i, :] = [x, y]
return samples
if __name__ == "__main__":
mus = np.array([5, 5])
sigmas = np.array([[1, 0.9], [0.9, 1]])
samples = gibbs_sampling(mus, sigmas)
sns.jointplot(samples[:, 0], samples[:, 1])
|
[
"numpy.zeros",
"numpy.array",
"numpy.random.normal",
"seaborn.jointplot",
"numpy.random.rand"
] |
[((218, 245), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (234, 245), True, 'import numpy as np\n'), ((423, 450), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (439, 450), True, 'import numpy as np\n'), ((512, 531), 'numpy.zeros', 'np.zeros', (['(iter, 2)'], {}), '((iter, 2))\n', (520, 531), True, 'import numpy as np\n'), ((759, 775), 'numpy.array', 'np.array', (['[5, 5]'], {}), '([5, 5])\n', (767, 775), True, 'import numpy as np\n'), ((789, 819), 'numpy.array', 'np.array', (['[[1, 0.9], [0.9, 1]]'], {}), '([[1, 0.9], [0.9, 1]])\n', (797, 819), True, 'import numpy as np\n'), ((867, 910), 'seaborn.jointplot', 'sns.jointplot', (['samples[:, 0]', 'samples[:, 1]'], {}), '(samples[:, 0], samples[:, 1])\n', (880, 910), True, 'import seaborn as sns\n'), ((540, 556), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (554, 556), True, 'import numpy as np\n')]
|
from pathlib import Path
ROOT = Path(__file__).parent
TEMPLATES_DIR = ROOT / "templates"
OUT = ROOT / "out"
UBUNTU_VERSIONS = {"18.04": "bionic", "20.04": "focal"}
PYTHON_VERSIONS = {"3.6.15", "3.7.12", "3.8.12", "3.9.7", "3.10.0rc2"}
VERSIONS = ((u_ver, p_ver) for u_ver in UBUNTU_VERSIONS for p_ver in PYTHON_VERSIONS)
PIP_VERSION = "21.2.4"
|
[
"pathlib.Path"
] |
[((33, 47), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (37, 47), False, 'from pathlib import Path\n')]
|
# coding: utf-8
# Creates:
# * cachito_fe_vel_comp.pdf
# In[1]:
import os
import numpy as np
import yaml
from astropy.io import ascii as asc
from astropy.time import Time
import astropy.units as u
import astropy.constants as c
from astropy.modeling import models, fitting
from matplotlib import pyplot as plt
#get_ipython().run_line_magic('matplotlib', 'inline')
from utilities_az import supernova
# In[2]:
plt.style.use(['seaborn-paper', 'az-paper-onecol'])
# In[3]:
TEST_FILE_DIR = '../../data/line_info/testing/'
FIG_DIR = './'
DATA_DIR = '../../data/line_info'
# In[4]:
HA = 6563.0
SiII = 6355.0
FeII = 5169.0
IR_dates = Time(['2015-09-05','2015-10-05', '2015-10-10'])
# In[5]:
sn15oz = supernova.LightCurve2('asassn-15oz')
texpl = Time(sn15oz.jdexpl, format='jd')
# In[6]:
new_fit_cachito = asc.read(os.path.join(TEST_FILE_DIR, 'cachito.tab'))
# In[7]:
def calc_velocity(obs_wl, rest_wl):
velocity = c.c*(obs_wl/rest_wl - 1)
return velocity
# In[8]:
phase_cachito = (Time(new_fit_cachito['date'])-texpl).value
velocity_cachito = -1*calc_velocity(new_fit_cachito['vel0'], HA).to(u.km/u.s).value
# In[9]:
#tbdata_feII = asc.read(os.path.join(DATA_DIR, 'FeII_multi.tab'))
#tbdata_feII.remove_columns(['vel1', 'vel_err_left_1', 'vel_err_right_1', 'vel_pew_1', 'vel_pew_err1'])
tbdata_feII = asc.read(os.path.join(DATA_DIR, 'FeII_5169.tab'))
tbdata_feII.rename_column('vel0', 'velocity')
tbdata_feII.rename_column('vel_err_left_0', 'vel_err_left')
tbdata_feII.rename_column('vel_err_right_0', 'vel_err_right')
tbdata_feII.rename_column('vel_pew_0', 'pew')
tbdata_feII.rename_column('vel_pew_err0', 'pew_err')
# In[10]:
phase_feII = (Time(tbdata_feII['date'])-texpl).value
velocity_feII = -1*calc_velocity(tbdata_feII['velocity'], FeII).to(u.km/u.s)
# In[15]:
fig = plt.figure()
fig.subplotpars.update(left=.17, bottom=0.23)
ax_Fe = fig.add_subplot(1,1,1)
ax_Fe.plot((Time(new_fit_cachito['date'])-texpl).value, -1*calc_velocity(new_fit_cachito['vel0'], SiII).to(u.km/u.s)/1000, '^', label='Cachito (as SiII 6533)')
ax_Fe.plot(phase_feII, velocity_feII/1000, 'o', label='FeII (5169)')
ax_Fe.set_xticks(np.arange(0, 90, 10))
ax_Fe.legend()
ax_Fe.set_ylim(5, 11)
ax_Fe.set_xlim(0, 40)
ax_Fe.set_xlabel('Phase (day)')
ax_Fe.set_ylabel('Velocity (1000 km/s)')
plt.savefig(os.path.join(FIG_DIR, 'cachito_fe_vel_comp.pdf'))
|
[
"astropy.time.Time",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.arange",
"utilities_az.supernova.LightCurve2",
"os.path.join"
] |
[((426, 477), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['seaborn-paper', 'az-paper-onecol']"], {}), "(['seaborn-paper', 'az-paper-onecol'])\n", (439, 477), True, 'from matplotlib import pyplot as plt\n'), ((660, 708), 'astropy.time.Time', 'Time', (["['2015-09-05', '2015-10-05', '2015-10-10']"], {}), "(['2015-09-05', '2015-10-05', '2015-10-10'])\n", (664, 708), False, 'from astropy.time import Time\n'), ((734, 770), 'utilities_az.supernova.LightCurve2', 'supernova.LightCurve2', (['"""asassn-15oz"""'], {}), "('asassn-15oz')\n", (755, 770), False, 'from utilities_az import supernova\n'), ((779, 811), 'astropy.time.Time', 'Time', (['sn15oz.jdexpl'], {'format': '"""jd"""'}), "(sn15oz.jdexpl, format='jd')\n", (783, 811), False, 'from astropy.time import Time\n'), ((1864, 1876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1874, 1876), True, 'from matplotlib import pyplot as plt\n'), ((856, 898), 'os.path.join', 'os.path.join', (['TEST_FILE_DIR', '"""cachito.tab"""'], {}), "(TEST_FILE_DIR, 'cachito.tab')\n", (868, 898), False, 'import os\n'), ((1384, 1423), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""FeII_5169.tab"""'], {}), "(DATA_DIR, 'FeII_5169.tab')\n", (1396, 1423), False, 'import os\n'), ((2203, 2223), 'numpy.arange', 'np.arange', (['(0)', '(90)', '(10)'], {}), '(0, 90, 10)\n', (2212, 2223), True, 'import numpy as np\n'), ((2369, 2417), 'os.path.join', 'os.path.join', (['FIG_DIR', '"""cachito_fe_vel_comp.pdf"""'], {}), "(FIG_DIR, 'cachito_fe_vel_comp.pdf')\n", (2381, 2417), False, 'import os\n'), ((1047, 1076), 'astropy.time.Time', 'Time', (["new_fit_cachito['date']"], {}), "(new_fit_cachito['date'])\n", (1051, 1076), False, 'from astropy.time import Time\n'), ((1724, 1749), 'astropy.time.Time', 'Time', (["tbdata_feII['date']"], {}), "(tbdata_feII['date'])\n", (1728, 1749), False, 'from astropy.time import Time\n'), ((1967, 1996), 'astropy.time.Time', 'Time', (["new_fit_cachito['date']"], {}), "(new_fit_cachito['date'])\n", (1971, 1996), False, 'from astropy.time import Time\n')]
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Programmable Proxy Server in a single Python file.
:copyright: (c) 2013-present by <NAME> and contributors.
:license: BSD, see LICENSE for more details.
"""
import logging
import multiprocessing
import selectors
import socket
import threading
# import time
from multiprocessing import connection
from multiprocessing.reduction import send_handle, recv_handle
from typing import List, Optional, Type, Tuple
from .threadless import ThreadlessWork, Threadless
from .event import EventQueue, EventDispatcher, eventNames
from ..common.flags import Flags
logger = logging.getLogger(__name__)
class AcceptorPool:
"""AcceptorPool.
Pre-spawns worker processes to utilize all cores available on the system. Server socket connection is
dispatched over a pipe to workers. Each worker accepts incoming client request and spawns a
separate thread to handle the client request.
"""
def __init__(self, flags: Flags, work_klass: Type[ThreadlessWork]) -> None:
self.flags = flags
self.running: bool = False
self.socket: Optional[socket.socket] = None
self.acceptors: List[Acceptor] = []
self.work_queues: List[connection.Connection] = []
self.work_klass = work_klass
self.event_queue: Optional[EventQueue] = None
self.event_dispatcher: Optional[EventDispatcher] = None
self.event_dispatcher_thread: Optional[threading.Thread] = None
self.event_dispatcher_shutdown: Optional[threading.Event] = None
if self.flags.enable_events:
self.event_queue = EventQueue()
def listen(self) -> None:
self.socket = socket.socket(self.flags.family, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((str(self.flags.hostname), self.flags.port))
self.socket.listen(self.flags.backlog)
self.socket.setblocking(False)
logger.info(
'Listening on %s:%d' %
(self.flags.hostname, self.flags.port))
def start_workers(self) -> None:
"""Start worker processes."""
for acceptor_id in range(self.flags.num_workers):
work_queue = multiprocessing.Pipe()
acceptor = Acceptor(
idd=acceptor_id,
work_queue=work_queue[1],
flags=self.flags,
work_klass=self.work_klass,
event_queue=self.event_queue
)
acceptor.start()
logger.debug('Started acceptor process %d', acceptor.pid)
self.acceptors.append(acceptor)
self.work_queues.append(work_queue[0])
logger.info('Started %d workers' % self.flags.num_workers)
def start_event_dispatcher(self) -> None:
self.event_dispatcher_shutdown = threading.Event()
assert self.event_dispatcher_shutdown
assert self.event_queue
self.event_dispatcher = EventDispatcher(
shutdown=self.event_dispatcher_shutdown,
event_queue=self.event_queue
)
self.event_dispatcher_thread = threading.Thread(
target=self.event_dispatcher.run
)
self.event_dispatcher_thread.start()
logger.debug('Thread ID: %d', self.event_dispatcher_thread.ident)
def shutdown(self) -> None:
logger.info('Shutting down %d workers' % self.flags.num_workers)
if self.flags.enable_events:
assert self.event_dispatcher_shutdown
assert self.event_dispatcher_thread
self.event_dispatcher_shutdown.set()
self.event_dispatcher_thread.join()
logger.debug(
'Shutdown of global event dispatcher thread %d successful',
self.event_dispatcher_thread.ident)
for acceptor in self.acceptors:
acceptor.join()
logger.debug('Acceptors shutdown')
def setup(self) -> None:
"""Listen on port, setup workers and pass server socket to workers."""
self.running = True
self.listen()
if self.flags.enable_events:
self.start_event_dispatcher()
self.start_workers()
# Send server socket to all acceptor processes.
assert self.socket is not None
for index in range(self.flags.num_workers):
send_handle(
self.work_queues[index],
self.socket.fileno(),
self.acceptors[index].pid
)
self.work_queues[index].close()
self.socket.close()
class Acceptor(multiprocessing.Process):
"""Socket client acceptor.
Accepts client connection over received server socket handle and
starts a new work thread.
"""
lock = multiprocessing.Lock()
def __init__(
self,
idd: int,
work_queue: connection.Connection,
flags: Flags,
work_klass: Type[ThreadlessWork],
event_queue: Optional[EventQueue] = None) -> None:
super().__init__()
self.idd = idd
self.work_queue: connection.Connection = work_queue
self.flags = flags
self.work_klass = work_klass
self.event_queue = event_queue
self.running = False
self.selector: Optional[selectors.DefaultSelector] = None
self.sock: Optional[socket.socket] = None
self.threadless_process: Optional[multiprocessing.Process] = None
self.threadless_client_queue: Optional[connection.Connection] = None
def start_threadless_process(self) -> None:
pipe = multiprocessing.Pipe()
self.threadless_client_queue = pipe[0]
self.threadless_process = Threadless(
client_queue=pipe[1],
flags=self.flags,
work_klass=self.work_klass,
event_queue=self.event_queue
)
self.threadless_process.start()
logger.debug('Started process %d', self.threadless_process.pid)
def shutdown_threadless_process(self) -> None:
assert self.threadless_process and self.threadless_client_queue
logger.debug('Stopped process %d', self.threadless_process.pid)
self.threadless_process.join()
self.threadless_client_queue.close()
def start_work(self, conn: socket.socket, addr: Tuple[str, int]) -> None:
if self.flags.threadless and \
self.threadless_client_queue and \
self.threadless_process:
self.threadless_client_queue.send(addr)
send_handle(
self.threadless_client_queue,
conn.fileno(),
self.threadless_process.pid
)
conn.close()
else:
work = self.work_klass(
fileno=conn.fileno(),
addr=addr,
flags=self.flags,
event_queue=self.event_queue
)
work_thread = threading.Thread(target=work.run)
work.publish_event(
event_name=eventNames.WORK_STARTED,
event_payload={'fileno': conn.fileno(), 'addr': addr},
publisher_id=self.__class__.__name__
)
work_thread.start()
def run_once(self) -> None:
assert self.selector and self.sock
with self.lock:
events = self.selector.select(timeout=1)
if len(events) == 0:
return
conn, addr = self.sock.accept()
# now = time.time()
# fileno: int = conn.fileno()
self.start_work(conn, addr)
# logger.info('Work started for fd %d in %f seconds', fileno, time.time() - now)
def run(self) -> None:
self.running = True
self.selector = selectors.DefaultSelector()
fileno = recv_handle(self.work_queue)
self.work_queue.close()
self.sock = socket.fromfd(
fileno,
family=self.flags.family,
type=socket.SOCK_STREAM
)
try:
self.selector.register(self.sock, selectors.EVENT_READ)
if self.flags.threadless:
self.start_threadless_process()
while self.running:
self.run_once()
except KeyboardInterrupt:
pass
finally:
self.selector.unregister(self.sock)
if self.flags.threadless:
self.shutdown_threadless_process()
self.sock.close()
self.running = False
|
[
"threading.Thread",
"multiprocessing.Lock",
"socket.socket",
"socket.fromfd",
"selectors.DefaultSelector",
"threading.Event",
"multiprocessing.Pipe",
"multiprocessing.reduction.recv_handle",
"logging.getLogger"
] |
[((650, 677), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (667, 677), False, 'import logging\n'), ((4808, 4830), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (4828, 4830), False, 'import multiprocessing\n'), ((1717, 1769), 'socket.socket', 'socket.socket', (['self.flags.family', 'socket.SOCK_STREAM'], {}), '(self.flags.family, socket.SOCK_STREAM)\n', (1730, 1769), False, 'import socket\n'), ((2884, 2901), 'threading.Event', 'threading.Event', ([], {}), '()\n', (2899, 2901), False, 'import threading\n'), ((3172, 3222), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.event_dispatcher.run'}), '(target=self.event_dispatcher.run)\n', (3188, 3222), False, 'import threading\n'), ((5646, 5668), 'multiprocessing.Pipe', 'multiprocessing.Pipe', ([], {}), '()\n', (5666, 5668), False, 'import multiprocessing\n'), ((7802, 7829), 'selectors.DefaultSelector', 'selectors.DefaultSelector', ([], {}), '()\n', (7827, 7829), False, 'import selectors\n'), ((7847, 7875), 'multiprocessing.reduction.recv_handle', 'recv_handle', (['self.work_queue'], {}), '(self.work_queue)\n', (7858, 7875), False, 'from multiprocessing.reduction import send_handle, recv_handle\n'), ((7928, 8000), 'socket.fromfd', 'socket.fromfd', (['fileno'], {'family': 'self.flags.family', 'type': 'socket.SOCK_STREAM'}), '(fileno, family=self.flags.family, type=socket.SOCK_STREAM)\n', (7941, 8000), False, 'import socket\n'), ((2267, 2289), 'multiprocessing.Pipe', 'multiprocessing.Pipe', ([], {}), '()\n', (2287, 2289), False, 'import multiprocessing\n'), ((6990, 7023), 'threading.Thread', 'threading.Thread', ([], {'target': 'work.run'}), '(target=work.run)\n', (7006, 7023), False, 'import threading\n')]
|
import gym
import itertools
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import baselines.common.tf_util as U
from baselines.common.tf_util import load_state, save_state
from baselines import logger
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer
from baselines.deepq.utils import ObservationInput
from baselines.common.schedules import LinearSchedule
def model(inpt, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
out = inpt
out = layers.fully_connected(out, num_outputs=128, activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=128, activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
if __name__ == '__main__':
saver = tf.train.Saver()
with U.make_session(8) as sess:
# Create the environment
env = gym.make("LunarLander-v2")
# Create all the functions necessary to train the model
act, train, update_target, debug = deepq.build_train(
make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name),
q_func=model,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
)
# Create the schedule for exploration starting from 1 (every action is random) down to
# 0.02 (98% of actions are selected according to values predicted by the model).
exploration = LinearSchedule(schedule_timesteps=10000, initial_p=0, final_p=0)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
saver.restore(sess, "./models/custom_model.ckpt")
obs = env.reset()
while True:
env.render()
# Take action and update exploration to the newest value
action = act(obs[None], update_eps=exploration.value(t))[0]
new_obs, rew, done, _ = env.step(action)
if done:
break
|
[
"gym.make",
"tensorflow.train.Saver",
"tensorflow.contrib.layers.fully_connected",
"baselines.common.tf_util.make_session",
"baselines.common.tf_util.initialize",
"tensorflow.variable_scope",
"baselines.deepq.utils.ObservationInput",
"baselines.common.schedules.LinearSchedule",
"tensorflow.train.AdamOptimizer"
] |
[((868, 884), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (882, 884), True, 'import tensorflow as tf\n'), ((494, 531), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (511, 531), True, 'import tensorflow as tf\n'), ((566, 636), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': '(128)', 'activation_fn': 'tf.nn.tanh'}), '(out, num_outputs=128, activation_fn=tf.nn.tanh)\n', (588, 636), True, 'import tensorflow.contrib.layers as layers\n'), ((651, 721), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': '(128)', 'activation_fn': 'tf.nn.tanh'}), '(out, num_outputs=128, activation_fn=tf.nn.tanh)\n', (673, 721), True, 'import tensorflow.contrib.layers as layers\n'), ((736, 808), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': 'num_actions', 'activation_fn': 'None'}), '(out, num_outputs=num_actions, activation_fn=None)\n', (758, 808), True, 'import tensorflow.contrib.layers as layers\n'), ((895, 912), 'baselines.common.tf_util.make_session', 'U.make_session', (['(8)'], {}), '(8)\n', (909, 912), True, 'import baselines.common.tf_util as U\n'), ((969, 995), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (977, 995), False, 'import gym\n'), ((1572, 1636), 'baselines.common.schedules.LinearSchedule', 'LinearSchedule', ([], {'schedule_timesteps': '(10000)', 'initial_p': '(0)', 'final_p': '(0)'}), '(schedule_timesteps=10000, initial_p=0, final_p=0)\n', (1586, 1636), False, 'from baselines.common.schedules import LinearSchedule\n'), ((1719, 1733), 'baselines.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (1731, 1733), True, 'import baselines.common.tf_util as U\n'), ((1303, 1347), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0005)'}), '(learning_rate=0.0005)\n', (1325, 1347), True, 'import tensorflow as tf\n'), ((1159, 1209), 'baselines.deepq.utils.ObservationInput', 'ObservationInput', (['env.observation_space'], {'name': 'name'}), '(env.observation_space, name=name)\n', (1175, 1209), False, 'from baselines.deepq.utils import ObservationInput\n')]
|
######################################################################################
# ----------Copyright 2021 Division of Medical and Environmental Computing,----------#
# ----------Technical University of Darmstadt, Darmstadt, Germany--------------------#
######################################################################################
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
# Main driver for running self supervised learning pretext tasks
def main():
import argparse
parser = argparse.ArgumentParser(description="We extend nnUNet to offer self-supervision tasks. This step is to"
" split the dataset into two - self-supervision input and self- "
"supervisio output folder.")
parser.add_argument("-t", type=int, help="Task id. The task name you wish to run self-supervision task for. "
"It must have a matching folder 'TaskXXX_' in the raw "
"data folder", required=True)
parser.add_argument("-ss", help="Run self-supervision pretext asks. Specify which self-supervision task you "
"wish to train. Current supported tasks: context_restoration| jigsaw_puzzle | byol")
args = parser.parse_args()
base = join(os.environ['nnUNet_raw_data_base'], 'nnUNet_raw_data')
task_name = convert_id_to_task_name(args.t)
target_base = join(base, task_name)
pretext = str(args.ss)
print(f'Hey there: here\'s pretext task {pretext} for {task_name}. '
f'Path to get ss datasets are {join(target_base, "ssInput" + "BYOL")} and {join(target_base, "ssOutput" + "BYOL")}')
if __name__ == "__main__":
main()
|
[
"nnunet.utilities.task_name_id_conversion.convert_id_to_task_name",
"argparse.ArgumentParser"
] |
[((605, 807), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""We extend nnUNet to offer self-supervision tasks. This step is to split the dataset into two - self-supervision input and self- supervisio output folder."""'}), "(description=\n 'We extend nnUNet to offer self-supervision tasks. This step is to split the dataset into two - self-supervision input and self- supervisio output folder.'\n )\n", (628, 807), False, 'import argparse\n'), ((1540, 1571), 'nnunet.utilities.task_name_id_conversion.convert_id_to_task_name', 'convert_id_to_task_name', (['args.t'], {}), '(args.t)\n', (1563, 1571), False, 'from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name\n')]
|
import json
from collections import OrderedDict
from django import forms
from django.dispatch import receiver
from django.template.loader import get_template
from django.utils.translation import gettext_lazy as _
from pretix.base.forms import SecretKeySettingsField
from pretix.base.signals import (
logentry_display, register_global_settings, register_payment_providers,
requiredaction_display,
)
@receiver(register_payment_providers, dispatch_uid="payment_paypal")
def register_payment_provider(sender, **kwargs):
from .payment import Paypal
return Paypal
@receiver(signal=logentry_display, dispatch_uid="paypal_logentry_display")
def pretixcontrol_logentry_display(sender, logentry, **kwargs):
if logentry.action_type != 'pretix.plugins.paypal.event':
return
data = json.loads(logentry.data)
event_type = data.get('event_type')
text = None
plains = {
'PAYMENT.SALE.COMPLETED': _('Payment completed.'),
'PAYMENT.SALE.DENIED': _('Payment denied.'),
'PAYMENT.SALE.REFUNDED': _('Payment refunded.'),
'PAYMENT.SALE.REVERSED': _('Payment reversed.'),
'PAYMENT.SALE.PENDING': _('Payment pending.'),
}
if event_type in plains:
text = plains[event_type]
else:
text = event_type
if text:
return _('PayPal reported an event: {}').format(text)
@receiver(signal=requiredaction_display, dispatch_uid="paypal_requiredaction_display")
def pretixcontrol_action_display(sender, action, request, **kwargs):
if not action.action_type.startswith('pretix.plugins.paypal'):
return
data = json.loads(action.data)
if action.action_type == 'pretix.plugins.paypal.refund':
template = get_template('pretixplugins/paypal/action_refund.html')
elif action.action_type == 'pretix.plugins.paypal.overpaid':
template = get_template('pretixplugins/paypal/action_overpaid.html')
elif action.action_type == 'pretix.plugins.paypal.double':
template = get_template('pretixplugins/paypal/action_double.html')
ctx = {'data': data, 'event': sender, 'action': action}
return template.render(ctx, request)
@receiver(register_global_settings, dispatch_uid='paypal_global_settings')
def register_global_settings(sender, **kwargs):
return OrderedDict([
('payment_paypal_connect_client_id', forms.CharField(
label=_('PayPal Connect: Client ID'),
required=False,
)),
('payment_paypal_connect_secret_key', SecretKeySettingsField(
label=_('PayPal Connect: Secret key'),
required=False,
)),
('payment_paypal_connect_endpoint', forms.ChoiceField(
label=_('PayPal Connect Endpoint'),
initial='live',
choices=(
('live', 'Live'),
('sandbox', 'Sandbox'),
),
)),
])
|
[
"django.template.loader.get_template",
"django.dispatch.receiver",
"json.loads",
"django.utils.translation.gettext_lazy"
] |
[((411, 478), 'django.dispatch.receiver', 'receiver', (['register_payment_providers'], {'dispatch_uid': '"""payment_paypal"""'}), "(register_payment_providers, dispatch_uid='payment_paypal')\n", (419, 478), False, 'from django.dispatch import receiver\n'), ((581, 654), 'django.dispatch.receiver', 'receiver', ([], {'signal': 'logentry_display', 'dispatch_uid': '"""paypal_logentry_display"""'}), "(signal=logentry_display, dispatch_uid='paypal_logentry_display')\n", (589, 654), False, 'from django.dispatch import receiver\n'), ((1371, 1461), 'django.dispatch.receiver', 'receiver', ([], {'signal': 'requiredaction_display', 'dispatch_uid': '"""paypal_requiredaction_display"""'}), "(signal=requiredaction_display, dispatch_uid=\n 'paypal_requiredaction_display')\n", (1379, 1461), False, 'from django.dispatch import receiver\n'), ((2166, 2239), 'django.dispatch.receiver', 'receiver', (['register_global_settings'], {'dispatch_uid': '"""paypal_global_settings"""'}), "(register_global_settings, dispatch_uid='paypal_global_settings')\n", (2174, 2239), False, 'from django.dispatch import receiver\n'), ((808, 833), 'json.loads', 'json.loads', (['logentry.data'], {}), '(logentry.data)\n', (818, 833), False, 'import json\n'), ((1620, 1643), 'json.loads', 'json.loads', (['action.data'], {}), '(action.data)\n', (1630, 1643), False, 'import json\n'), ((939, 962), 'django.utils.translation.gettext_lazy', '_', (['"""Payment completed."""'], {}), "('Payment completed.')\n", (940, 962), True, 'from django.utils.translation import gettext_lazy as _\n'), ((995, 1015), 'django.utils.translation.gettext_lazy', '_', (['"""Payment denied."""'], {}), "('Payment denied.')\n", (996, 1015), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1050, 1072), 'django.utils.translation.gettext_lazy', '_', (['"""Payment refunded."""'], {}), "('Payment refunded.')\n", (1051, 1072), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1107, 1129), 'django.utils.translation.gettext_lazy', '_', (['"""Payment reversed."""'], {}), "('Payment reversed.')\n", (1108, 1129), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1163, 1184), 'django.utils.translation.gettext_lazy', '_', (['"""Payment pending."""'], {}), "('Payment pending.')\n", (1164, 1184), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1725, 1780), 'django.template.loader.get_template', 'get_template', (['"""pretixplugins/paypal/action_refund.html"""'], {}), "('pretixplugins/paypal/action_refund.html')\n", (1737, 1780), False, 'from django.template.loader import get_template\n'), ((1865, 1922), 'django.template.loader.get_template', 'get_template', (['"""pretixplugins/paypal/action_overpaid.html"""'], {}), "('pretixplugins/paypal/action_overpaid.html')\n", (1877, 1922), False, 'from django.template.loader import get_template\n'), ((1321, 1354), 'django.utils.translation.gettext_lazy', '_', (['"""PayPal reported an event: {}"""'], {}), "('PayPal reported an event: {}')\n", (1322, 1354), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2005, 2060), 'django.template.loader.get_template', 'get_template', (['"""pretixplugins/paypal/action_double.html"""'], {}), "('pretixplugins/paypal/action_double.html')\n", (2017, 2060), False, 'from django.template.loader import get_template\n'), ((2393, 2423), 'django.utils.translation.gettext_lazy', '_', (['"""PayPal Connect: Client ID"""'], {}), "('PayPal Connect: Client ID')\n", (2394, 2423), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2553, 2584), 'django.utils.translation.gettext_lazy', '_', (['"""PayPal Connect: Secret key"""'], {}), "('PayPal Connect: Secret key')\n", (2554, 2584), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2707, 2735), 'django.utils.translation.gettext_lazy', '_', (['"""PayPal Connect Endpoint"""'], {}), "('PayPal Connect Endpoint')\n", (2708, 2735), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
from functools import total_ordering
from MinPQ import MinPQ
"""
Write a program to solve the 8-puzzle problem
(and its natural generalizations) using the A* search algorithm
"""
class Board():
def __init__(self, array):
"""constructor takes an
n x n list of lists containing
the n ** 2 integers between 0 and n ** 2 - 1,
where 0 represents the blank square
"""
self.flat = [tile for row in array for tile in row]
self.dimension = len(array)
def __repr__(self):
output = "\n" + str(self.dimension)
for n in range(len(self.flat)):
if n % (self.dimension) == 0:
output += "\n" + str(self.flat[n])
else:
output += " " + str(self.flat[n])
return output
def hamming_distance(self):
"""Number of tiles out of place
"""
distance = 0
for pos in range(1, len(self.flat)):
if pos != self.flat[pos - 1]:
distance += 1
return distance
def manhattan_distance(self):
"""Sum of manhattan distances
between self and goal
"""
distance = 0
for pos in range(1, len(self.flat) + 1):
tile = self.flat[pos - 1]
actual = ((pos - 1) // self.dimension,
(pos - 1) % self.dimension)
goal = ((tile - 1) // self.dimension,
(tile - 1) % self.dimension)
if tile != 0:
distance += abs(goal[0] - actual[0]) + abs(goal[1] - actual[1])
return distance
def neighbors(self):
for board in self._neighbor_boards():
neighbor = Board([])
neighbor.dimension = self.dimension
neighbor.flat = board
yield neighbor
def _neighbor_boards(self):
for k in range(0, len(self.flat)):
if self.flat[k] == 0:
pos_0 = k
coords = ((k // self.dimension), (k % self.dimension))
break
neighbor_boards = []
# horizontal neighbors
if coords[1] == 0:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 + 1))
elif coords[1] == self.dimension - 1:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 - 1))
else:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 - 1))
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 + 1))
# vertical neighbors
if coords[0] == 0:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 + self.dimension))
elif coords[0] == self.dimension - 1:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 - self.dimension))
else:
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 + self.dimension))
neighbor_boards.append(self._exch(self.flat.copy(),
pos_0, pos_0 - self.dimension))
return neighbor_boards
@staticmethod
def _exch(flat, a, b):
swap = flat[a]
flat[a] = flat[b]
flat[b] = swap
return flat
def is_goal(self):
"""Is this the goal board?
"""
for k in range(len(self.flat) - 1):
if k + 1 != self.flat[k]:
return False
return True
def __eq__(self, other):
return (self.flat == other.flat)
def twin(self):
"""a board that is obtained
by exchanging any pair of tiles
"""
for i in range(len(self.flat)):
if self.flat[i] != 0:
for j in range(i + 1, len(self.flat)):
if self.flat[j] != 0:
t = self._exch(self.flat.copy(), i, j)
break
break
tw = Board([])
tw.flat = t
tw.dimension = self.dimension
return tw
@total_ordering
class SearchNode():
def __init__(self, board: Board, moves: int, prev):
self.board = board
self.moves = moves
self.prev = prev
self.priority = moves + self.board.manhattan_distance()
def __eq__(self, other):
return self.priority == other.priority
def __lt__(self, other):
return self.priority < other.priority
class Solver():
def __init__(self, initial: Board):
self.init_node = SearchNode(initial, 0, None)
self.queue = MinPQ()
self.queue.insert(self.init_node)
self.twin_init = SearchNode(initial.twin(), 0, None)
self.twin_queue = MinPQ()
self.twin_queue.insert(self.twin_init)
def is_solvable(self):
return self.solution()[0] != -1
def n_moves(self):
return self.solution()[0]
@staticmethod
def seek_soln(queue):
while True:
min_node = queue.del_min()
if min_node.board.is_goal():
yield min_node
for neighbor in min_node.board.neighbors():
if min_node.prev is None or neighbor != min_node.prev.board:
queue.insert(SearchNode(
neighbor, min_node.moves + 1, min_node))
yield False
def solution(self):
min_node = False
impossible = False
while min_node is False and impossible is False:
min_node = next(self.seek_soln(self.queue))
impossible = next(self.seek_soln(self.twin_queue))
if not impossible:
moves = min_node.moves
solns = []
while min_node.prev is not None:
solns.append(min_node.board)
min_node = min_node.prev
solns.append(min_node)
solns.reverse()
return (moves, solns)
else:
return (-1, None)
sample = Board([
[5, 2, 6],
[3, 7, 1],
[8, 4, 0],
])
impossible_sample = Board([
[1, 2, 3],
[4, 5, 6],
[8, 7, 0],
])
s = Solver(sample)
i = Solver(impossible_sample)
print(s.solution())
print(i.solution())
|
[
"MinPQ.MinPQ"
] |
[((4847, 4854), 'MinPQ.MinPQ', 'MinPQ', ([], {}), '()\n', (4852, 4854), False, 'from MinPQ import MinPQ\n'), ((4984, 4991), 'MinPQ.MinPQ', 'MinPQ', ([], {}), '()\n', (4989, 4991), False, 'from MinPQ import MinPQ\n')]
|
import os
MIGRATION_BASE_DIR = os.path.dirname(__file__)
accept_versions = {'1.9.0', '1.10.0', '2.0.0'}
|
[
"os.path.dirname"
] |
[((32, 57), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (47, 57), False, 'import os\n')]
|
import numpy as np
from sklearn.metrics import roc_auc_score,jaccard_score
import cv2
from torch import nn
import torch.nn.functional as F
import math
from functools import wraps
import warnings
import weakref
from torch.optim.optimizer import Optimizer
class WeightedBCE(nn.Module):
def __init__(self, weights=[0.4, 0.6]):
super(WeightedBCE, self).__init__()
self.weights = weights
def forward(self, logit_pixel, truth_pixel):
# print("====",logit_pixel.size())
logit = logit_pixel.view(-1)
truth = truth_pixel.view(-1)
assert(logit.shape==truth.shape)
loss = F.binary_cross_entropy(logit, truth, reduction='none')
pos = (truth>0.5).float()
neg = (truth<0.5).float()
pos_weight = pos.sum().item() + 1e-12
neg_weight = neg.sum().item() + 1e-12
loss = (self.weights[0]*pos*loss/pos_weight + self.weights[1]*neg*loss/neg_weight).sum()
return loss
class WeightedDiceLoss(nn.Module):
def __init__(self, weights=[0.5, 0.5]): # W_pos=0.8, W_neg=0.2
super(WeightedDiceLoss, self).__init__()
self.weights = weights
def forward(self, logit, truth, smooth=1e-5):
batch_size = len(logit)
logit = logit.view(batch_size,-1)
truth = truth.view(batch_size,-1)
assert(logit.shape==truth.shape)
p = logit.view(batch_size,-1)
t = truth.view(batch_size,-1)
w = truth.detach()
w = w*(self.weights[1]-self.weights[0])+self.weights[0]
# p = w*(p*2-1) #convert to [0,1] --> [-1, 1]
# t = w*(t*2-1)
p = w*(p)
t = w*(t)
intersection = (p * t).sum(-1)
union = (p * p).sum(-1) + (t * t).sum(-1)
dice = 1 - (2*intersection + smooth) / (union +smooth)
# print "------",dice.data
loss = dice.mean()
return loss
class WeightedDiceBCE(nn.Module):
def __init__(self,dice_weight=1,BCE_weight=1):
super(WeightedDiceBCE, self).__init__()
self.BCE_loss = WeightedBCE(weights=[0.5, 0.5])
self.dice_loss = WeightedDiceLoss(weights=[0.5, 0.5])
self.BCE_weight = BCE_weight
self.dice_weight = dice_weight
def _show_dice(self, inputs, targets):
inputs[inputs>=0.5] = 1
inputs[inputs<0.5] = 0
# print("2",np.sum(tmp))
targets[targets>0] = 1
targets[targets<=0] = 0
hard_dice_coeff = 1.0 - self.dice_loss(inputs, targets)
return hard_dice_coeff
def forward(self, inputs, targets):
# inputs = inputs.contiguous().view(-1)
# targets = targets.contiguous().view(-1)
# print "dice_loss", self.dice_loss(inputs, targets)
# print "focal_loss", self.focal_loss(inputs, targets)
dice = self.dice_loss(inputs, targets)
BCE = self.BCE_loss(inputs, targets)
# print "dice",dice
# print "focal",focal
dice_BCE_loss = self.dice_weight * dice + self.BCE_weight * BCE
return dice_BCE_loss
def auc_on_batch(masks, pred):
'''Computes the mean Area Under ROC Curve over a batch during training'''
aucs = []
for i in range(pred.shape[1]):
prediction = pred[i][0].cpu().detach().numpy()
# print("www",np.max(prediction), np.min(prediction))
mask = masks[i].cpu().detach().numpy()
# print("rrr",np.max(mask), np.min(mask))
aucs.append(roc_auc_score(mask.reshape(-1), prediction.reshape(-1)))
return np.mean(aucs)
def iou_on_batch(masks, pred):
'''Computes the mean Area Under ROC Curve over a batch during training'''
ious = []
for i in range(pred.shape[0]):
pred_tmp = pred[i][0].cpu().detach().numpy()
# print("www",np.max(prediction), np.min(prediction))
mask_tmp = masks[i].cpu().detach().numpy()
pred_tmp[pred_tmp>=0.5] = 1
pred_tmp[pred_tmp<0.5] = 0
# print("2",np.sum(tmp))
mask_tmp[mask_tmp>0] = 1
mask_tmp[mask_tmp<=0] = 0
# print("rrr",np.max(mask), np.min(mask))
ious.append(jaccard_score(mask_tmp.reshape(-1), pred_tmp.reshape(-1)))
return np.mean(ious)
def dice_coef(y_true, y_pred):
smooth = 1e-5
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_on_batch(masks, pred):
'''Computes the mean Area Under ROC Curve over a batch during training'''
dices = []
for i in range(pred.shape[0]):
pred_tmp = pred[i][0].cpu().detach().numpy()
# print("www",np.max(prediction), np.min(prediction))
mask_tmp = masks[i].cpu().detach().numpy()
pred_tmp[pred_tmp>=0.5] = 1
pred_tmp[pred_tmp<0.5] = 0
# print("2",np.sum(tmp))
mask_tmp[mask_tmp>0] = 1
mask_tmp[mask_tmp<=0] = 0
# print("rrr",np.max(mask), np.min(mask))
dices.append(dice_coef(mask_tmp, pred_tmp))
return np.mean(dices)
def save_on_batch(images1, masks, pred, names, vis_path):
'''Computes the mean Area Under ROC Curve over a batch during training'''
for i in range(pred.shape[0]):
pred_tmp = pred[i][0].cpu().detach().numpy()
mask_tmp = masks[i].cpu().detach().numpy()
pred_tmp[pred_tmp>=0.5] = 255
pred_tmp[pred_tmp<0.5] = 0
mask_tmp[mask_tmp>0] = 255
mask_tmp[mask_tmp<=0] = 0
cv2.imwrite(vis_path+ names[i][:-4]+"_pred.jpg", pred_tmp)
cv2.imwrite(vis_path+names[i][:-4]+"_gt.jpg", mask_tmp)
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1):
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
# Initialize epoch and base learning rates
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(method):
if getattr(method, '_with_counter', False):
# `optimizer.step()` has already been replaced, return.
return method
# Keep a weak reference to the optimizer instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True
return wrapper
self.optimizer.step = with_counter(self.optimizer.step)
self.optimizer._step_count = 0
self._step_count = 0
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
return self._last_lr
def get_lr(self):
# Compute learning rate using chainable form of the scheduler
raise NotImplementedError
def step(self, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule. "
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
return self
with _enable_get_lr_call(self):
if epoch is None:
self.last_epoch += 1
values = self.get_lr()
else:
self.last_epoch = epoch
if hasattr(self, "_get_closed_form_lr"):
values = self._get_closed_form_lr()
else:
values = self.get_lr()
for param_group, lr in zip(self.optimizer.param_groups, values):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
class CosineAnnealingWarmRestarts(_LRScheduler):
r"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}`
is the number of epochs since the last restart and :math:`T_{i}` is the number
of epochs between two warm restarts in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
\cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right)
When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`.
When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_0 (int): Number of iterations for the first restart.
T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.
eta_min (float, optional): Minimum learning rate. Default: 0.
last_epoch (int, optional): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1):
if T_0 <= 0 or not isinstance(T_0, int):
raise ValueError("Expected positive integer T_0, but got {}".format(T_0))
if T_mult < 1 or not isinstance(T_mult, int):
raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch)
self.T_cur = self.last_epoch
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", DeprecationWarning)
return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2
for base_lr in self.base_lrs]
def step(self, epoch=None):
"""Step could be called after every batch update
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> iters = len(dataloader)
>>> for epoch in range(20):
>>> for i, sample in enumerate(dataloader):
>>> inputs, labels = sample['inputs'], sample['labels']
>>> scheduler.step(epoch + i / iters)
>>> optimizer.zero_grad()
>>> outputs = net(inputs)
>>> loss = criterion(outputs, labels)
>>> loss.backward()
>>> optimizer.step()
This function can be called in an interleaved way.
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> for epoch in range(20):
>>> scheduler.step()
>>> scheduler.step(26)
>>> scheduler.step() # scheduler.step(27), instead of scheduler(20)
"""
if epoch is None and self.last_epoch < 0:
epoch = 0
if epoch is None:
epoch = self.last_epoch + 1
self.T_cur = self.T_cur + 1
if self.T_cur >= self.T_i:
self.T_cur = self.T_cur - self.T_i
self.T_i = self.T_i * self.T_mult
else:
if epoch < 0:
raise ValueError("Expected non-negative epoch, but got {}".format(epoch))
if epoch >= self.T_0:
if self.T_mult == 1:
self.T_cur = epoch % self.T_0
else:
n = int(math.log((epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult))
self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1)
self.T_i = self.T_0 * self.T_mult ** (n)
else:
self.T_i = self.T_0
self.T_cur = epoch
self.last_epoch = math.floor(epoch)
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
return self
with _enable_get_lr_call(self):
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
|
[
"torch.nn.functional.binary_cross_entropy",
"numpy.sum",
"cv2.imwrite",
"math.floor",
"numpy.mean",
"math.cos",
"functools.wraps",
"warnings.warn",
"math.log",
"weakref.ref"
] |
[((3473, 3486), 'numpy.mean', 'np.mean', (['aucs'], {}), '(aucs)\n', (3480, 3486), True, 'import numpy as np\n'), ((4124, 4137), 'numpy.mean', 'np.mean', (['ious'], {}), '(ious)\n', (4131, 4137), True, 'import numpy as np\n'), ((4271, 4298), 'numpy.sum', 'np.sum', (['(y_true_f * y_pred_f)'], {}), '(y_true_f * y_pred_f)\n', (4277, 4298), True, 'import numpy as np\n'), ((5000, 5014), 'numpy.mean', 'np.mean', (['dices'], {}), '(dices)\n', (5007, 5014), True, 'import numpy as np\n'), ((628, 682), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['logit', 'truth'], {'reduction': '"""none"""'}), "(logit, truth, reduction='none')\n", (650, 682), True, 'import torch.nn.functional as F\n'), ((5442, 5503), 'cv2.imwrite', 'cv2.imwrite', (["(vis_path + names[i][:-4] + '_pred.jpg')", 'pred_tmp'], {}), "(vis_path + names[i][:-4] + '_pred.jpg', pred_tmp)\n", (5453, 5503), False, 'import cv2\n'), ((5509, 5568), 'cv2.imwrite', 'cv2.imwrite', (["(vis_path + names[i][:-4] + '_gt.jpg')", 'mask_tmp'], {}), "(vis_path + names[i][:-4] + '_gt.jpg', mask_tmp)\n", (5520, 5568), False, 'import cv2\n'), ((15174, 15191), 'math.floor', 'math.floor', (['epoch'], {}), '(epoch)\n', (15184, 15191), False, 'import math\n'), ((6976, 7004), 'weakref.ref', 'weakref.ref', (['method.__self__'], {}), '(method.__self__)\n', (6987, 7004), False, 'import weakref\n'), ((7179, 7190), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (7184, 7190), False, 'from functools import wraps\n'), ((12852, 12983), 'warnings.warn', 'warnings.warn', (['"""To get the last learning rate computed by the scheduler, please use `get_last_lr()`."""', 'DeprecationWarning'], {}), "(\n 'To get the last learning rate computed by the scheduler, please use `get_last_lr()`.'\n , DeprecationWarning)\n", (12865, 12983), False, 'import warnings\n'), ((4342, 4358), 'numpy.sum', 'np.sum', (['y_true_f'], {}), '(y_true_f)\n', (4348, 4358), True, 'import numpy as np\n'), ((4361, 4377), 'numpy.sum', 'np.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (4367, 4377), True, 'import numpy as np\n'), ((8904, 9205), 'warnings.warn', 'warnings.warn', (['"""Seems like `optimizer.step()` has been overridden after learning rate scheduler initialization. Please, make sure to call `optimizer.step()` before `lr_scheduler.step()`. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate"""', 'UserWarning'], {}), "(\n 'Seems like `optimizer.step()` has been overridden after learning rate scheduler initialization. Please, make sure to call `optimizer.step()` before `lr_scheduler.step()`. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate'\n , UserWarning)\n", (8917, 9205), False, 'import warnings\n'), ((9460, 9876), 'warnings.warn', 'warnings.warn', (['"""Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`. Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate"""', 'UserWarning'], {}), "(\n 'Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`. Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate'\n , UserWarning)\n", (9473, 9876), False, 'import warnings\n'), ((14836, 14899), 'math.log', 'math.log', (['(epoch / self.T_0 * (self.T_mult - 1) + 1)', 'self.T_mult'], {}), '(epoch / self.T_0 * (self.T_mult - 1) + 1, self.T_mult)\n', (14844, 14899), False, 'import math\n'), ((13067, 13108), 'math.cos', 'math.cos', (['(math.pi * self.T_cur / self.T_i)'], {}), '(math.pi * self.T_cur / self.T_i)\n', (13075, 13108), False, 'import math\n')]
|
import unittest
import numpy as np
from scipy.optimize import root
from scipy.interpolate import interp1d
from scipy.stats import entropy, poisson
import warnings
from epipack.numeric_epi_models import (
DynamicBirthRate,
ConstantBirthRate,
DynamicLinearRate,
ConstantLinearRate,
DynamicQuadraticRate,
ConstantQuadraticRate,
EpiModel,
SISModel,
SIModel,
SIRModel,
SEIRModel,
SIRSModel,
)
from epipack.integrators import time_leap_ivp, time_leap_newton
from epipack.stochastic_epi_models import StochasticEpiModel
class EpiTest(unittest.TestCase):
def test_compartments(self):
epi = EpiModel(list("SEIR"))
assert(all([ i == epi.get_compartment_id(C) for i, C in enumerate("SEIR") ]))
assert(epi.get_compartment_id("E") == 1)
assert(epi.get_compartment(1) == "E")
def test_linear_rates(self):
epi = EpiModel(list("SEIR"))
epi.add_transition_processes([
("E", 1.0, "I"),
("I", 1.0, "R"),
])
linear_rates = [ ConstantLinearRate(1.0,1), ConstantLinearRate(1.0,2) ]
linear_events = [ np.array([0,-1,+1,0]), np.array([0,0,-1,+1.]) ]
for r0, r1 in zip(linear_rates, epi.linear_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(linear_events, epi.linear_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
epi = EpiModel(list("SEIR"))
_r0 = lambda t, y: 2+np.cos(t)
_r1 = lambda t, y: 2+np.sin(t)
epi.add_transition_processes([
("E", _r0, "I"),
("I", _r1, "R"),
])
linear_rates = [ DynamicLinearRate(_r0,1), DynamicLinearRate(_r1,2) ]
linear_events = [ np.array([0,-1,+1,0]), np.array([0,0,-1,+1.]) ]
for r0, r1 in zip(linear_rates, epi.linear_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(linear_events, epi.linear_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_adding_linear_rates(self):
epi = EpiModel(list("SEIR"))
epi.set_processes([
("E", 1.0, "I"),
])
epi.add_transition_processes([
("I", 1.0, "R"),
])
linear_rates = [ ConstantLinearRate(1.0,1), ConstantLinearRate(1.0,2) ]
linear_events = [ np.array([0,-1,+1,0]), np.array([0,0,-1,+1.]) ]
for r0, r1 in zip(linear_rates, epi.linear_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(linear_events, epi.linear_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_quadratic_processes(self):
epi = EpiModel(list("SEIAR"))
quadratic_rates = [ ConstantQuadraticRate(1.0,2,0)]
quadratic_events = [ np.array([-1,+1,0,0,0.])]
epi.add_transmission_processes([
("S", "I", 1.0, "I", "E"),
])
for r0, r1 in zip(quadratic_rates, epi.quadratic_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(quadratic_events, epi.quadratic_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_adding_quadratic_processes(self):
epi = EpiModel(list("SEIAR"))
quadratic_rates = [ ConstantQuadraticRate(1.0,2,0), ConstantQuadraticRate(1.0,3,0) ]
quadratic_events = [ np.array([-1,+1,0,0,0.]), np.array([-1,+1,0,0,0.]) ]
epi.set_processes([
("S", "I", 1.0, "I", "E"),
])
epi.add_transmission_processes([
("S", "A", 1.0, "A", "E"),
])
for r0, r1 in zip(quadratic_rates, epi.quadratic_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(quadratic_events, epi.quadratic_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_SIS_with_simulation_restart_and_euler(self):
N = 100
epi = SISModel(infection_rate=2,recovery_rate=1,initial_population_size=N)
epi.set_initial_conditions({'S':0.99*N,'I':0.01*N})
tt = np.linspace(0,100,2)
result = epi.integrate(tt,['S'])
assert(np.isclose(result['S'][-1],N/2))
tt = np.linspace(0,100,1000)
result = epi.integrate_and_return_by_index(tt,['S'],integrator='euler')
assert(np.isclose(result[0,-1],N/2))
def test_repeated_simulation(self):
N = 100
epi = SISModel(infection_rate=2,recovery_rate=1,initial_population_size=N)
epi.set_initial_conditions({'S':0.99*N,'I':0.01*N})
tt = np.linspace(0,100,100)
old_t = tt[0]
for it, t in enumerate(tt[1:]):
result = epi.integrate_and_return_by_index([old_t,t],integrator='euler',adopt_final_state=True)
old_t = t
assert(np.isclose(result[0,-1],N/2))
def test_birth_death(self):
epi = EpiModel(list("SIR"))
R0 = 2
rho = 1
mu = 0.2
eta = R0 * rho
with self.assertWarns(UserWarning):
epi.set_processes([
("S", "I", eta, "I", "I"),
("I", rho, "R"),
(None, mu, "S"),
("S", mu, None),
("R", mu, None),
("I", mu, None),
])
epi.set_initial_conditions({'S': 0.8, 'I':0.2 })
t = [0,1000]
res = epi.integrate(t)
assert(np.isclose(res['S'][-1],(mu+rho)/eta))
assert(np.isclose(res['I'][-1],mu/eta*(eta-mu-rho)/(mu+rho)))
def test_dynamic_birth(self):
A = "A"
epi = EpiModel([A])
epi.set_initial_conditions({A:1})
with self.assertWarns(UserWarning):
epi.set_processes([
(None, lambda t, y: 2*t, A),
])
res = epi.integrate([0,5])
assert(np.isclose(res[A][-1],5**2+1))
def test_correcting_for_declining_pop_size(self):
A, B = list("AB")
epi = EpiModel([A, B],10,correct_for_dynamical_population_size=True)
epi.add_transition_processes([
#(None, 0.1, A),
])
epi.add_fusion_processes([
(A, B, 1, B),
])
epi.set_initial_conditions({B:4, A:6})
tt = np.linspace(0,30)
result = epi.integrate(tt)
#from matplotlib import pyplot as pl
#pl.plot(tt, result[A], label=A)
#pl.plot(tt, result[B], label=B)
epi.correct_for_dynamical_population_size = False
result = epi.integrate(tt)
#pl.plot(tt, result[A], label=A)
#pl.plot(tt, result[B], label=B)
#pl.legend()
#pl.show()
def test_fusion_and_adding_rates(self):
A, B, C = list("ABC")
epi = EpiModel(list("ABC"))
# this should not raise a warning that rates do not sum to zero
# as it will be actively suppressed
epi.add_fusion_processes([
(A, B, 1, C),
])
with self.assertWarns(UserWarning):
# this should raise a warning that rates do not sum to zero
epi.add_quadratic_events([
((A, B), 1, [(C, -1),(A, +1)]),
])
# now rates should sum to zero
epi.add_quadratic_events([
((A, B), 1, [(B, +1)]),
])
with self.assertWarns(UserWarning):
# this should raise a warning that rates do not sum to zero
epi.add_linear_events([
((A,), 1, [(B,-1)])
])
def test_initial_condition_warnings(self):
A, B, C = list("ABC")
epi = EpiModel(list("ABC"))
with self.assertWarns(UserWarning):
# this should raise a warning that rates do not sum to zero
epi.set_initial_conditions({A:0.1,B:0.2})
with self.assertWarns(UserWarning):
# this should raise a warning that initial conditions were set twice
epi.set_initial_conditions([(A,0.1),(A,0.2)])
def test_custom_models(self):
S, I, R = list("SIR")
eta = 1
epi = SIModel(eta)
epi.set_initial_conditions({"S":0.99, "I":0.01})
epi.integrate([0,1000],adopt_final_state=True)
assert(np.isclose(epi.y0[0],0))
eta = 2
rho = 1
epi = SIRModel(eta,rho)
S0 = 0.99
epi.set_initial_conditions({S:S0, I:1-S0})
R0 = eta/rho
Rinf = lambda x: 1-x-S0*np.exp(-x*R0)
res = epi.integrate([0,100])
SIR_theory = root(Rinf,0.5).x[0]
assert(np.isclose(res[R][-1],SIR_theory))
omega = 1
epi = SEIRModel(eta,rho,omega)
epi.set_initial_conditions({S:S0, I:1-S0})
res = epi.integrate([0,100])
assert(np.isclose(res[R][-1],SIR_theory))
#======================
epi = SISModel(eta, rho, initial_population_size=100)
epi.set_initial_conditions({S: 99, I:1 })
tt = np.linspace(0,1000,2)
result = epi.integrate(tt)
assert(np.isclose(result[S][-1],50))
epi = SIRSModel(eta, rho, omega)
epi.set_initial_conditions({S: 0.99, I:0.01 })
tt = np.linspace(0,1000,2)
result = epi.integrate(tt)
assert(np.isclose(result[R][-1],(1-rho/eta)/(1+omega/rho)))
def test_inference_of_temporal_dependence(self,plot=False):
data = np.array([
(1.0, 2.00),
(10000.0, 2.00),
(10001.0, -2.00),
])
times, rates = data[:,0], data[:,1]
f = interp1d(times, rates, kind='linear')
def infection_rate(t,y):
return f(t)
S, I = list("SI")
N = 100
rec = 1
model = EpiModel([S,I], N)
# first, initialize the time to t0 = 1, so
# column sum tests do not fail
model.set_initial_conditions({S:99,I:1},initial_time=1)
# Here, the function will fail to evaluate time dependence
# but will warn the user that there were errors in time
# evaluation.
self.assertWarns(UserWarning,model.set_processes,
[
(S, I, infection_rate, I, I),
(I, infection_rate, S),
],
)
assert(not model.rates_have_explicit_time_dependence)
assert(model.rates_have_functional_dependence)
# this should warn the user that rates are functionally dependent
# but that no temporal dependence could be inferred, to in case
# they know that there's a time dependence, they have to state
# that explicitly
self.assertWarns(UserWarning,model.simulate,tmax=2)
model.set_initial_conditions({S:99,I:1},initial_time=1)
# here, the time dependence is given explicitly and so
# the warning will not be shown
model.simulate(tmax=2,rates_have_explicit_time_dependence=True)
def test_temporal_gillespie(self,plot=False):
scl = 40
def R0(t,y=None):
return 4+np.cos(t*scl)
S, I = list("SI")
N = 100
rec = 1
model = EpiModel([S,I], N)
model.set_processes([
(S, I, R0, I, I),
(I, rec, S),
])
I0 = 1
S0 = N - I0
model.set_initial_conditions({
S: S0,
I: I0,
})
taus = []
N_sample = 10000
for sample in range(N_sample):
tau, _ = model.get_time_leap_and_proposed_compartment_changes(0)
taus.append(tau)
I = lambda t: (4*t + 1/scl*np.sin(t*scl))
I2 = lambda t: I(t)*S0*I0/N+I0*rec*t
pdf = lambda t: (R0(t)*S0*I0/N + I0*rec) * np.exp(-I2(t))
measured, bins = np.histogram(taus,bins=100,density=True)
theory = [ np.exp(-I2(bins[i-1]))-np.exp(-I2(bins[i])) for i in range(1,len(bins)) if measured[i-1] > 0]
experi = [ measured[i-1] for i in range(1,len(bins)) if measured[i-1] > 0]
# make sure the kullback-leibler divergence is below some threshold
if plot: # pragma: no cover
import matplotlib.pyplot as pl
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.yscale('log')
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.show()
assert(entropy(theory, experi) < 0.01)
def test_temporal_gillespie_repeated_simulation(self,plot=False):
scl = 40
def R0(t,y=None):
return 4+np.cos(t*scl)
S, I = list("SI")
N = 100
rec = 1
model = EpiModel([S,I], N)
model.set_processes([
(S, I, R0, I, I),
(I, rec, S),
])
I0 = 1
S0 = N - I0
model.set_initial_conditions({
S: S0,
I: I0,
})
taus = []
N_sample = 10000
if plot:
from tqdm import tqdm
else:
tqdm = lambda x: x
tt = np.linspace(0,1,100)
for sample in tqdm(range(N_sample)):
tau = None
model.set_initial_conditions({
S: S0,
I: I0,
})
for _t in tt[1:]:
time, result = model.simulate(_t,adopt_final_state=True)
#print(time, result['I'])
if result['I'][-1] != I0:
tau = time[1]
break
#print()
if tau is not None:
taus.append(tau)
I = lambda t: (4*t + 1/scl*np.sin(t*scl))
I2 = lambda t: I(t)*S0*I0/N+I0*rec*t
pdf = lambda t: (R0(t)*S0*I0/N + I0*rec) * np.exp(-I2(t))
measured, bins = np.histogram(taus,bins=100,density=True)
theory = [ np.exp(-I2(bins[i-1]))-np.exp(-I2(bins[i])) for i in range(1,len(bins)) if measured[i-1] > 0]
experi = [ measured[i-1] for i in range(1,len(bins)) if measured[i-1] > 0]
# make sure the kullback-leibler divergence is below some threshold
if plot:
import matplotlib.pyplot as pl
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.yscale('log')
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.show()
assert(entropy(theory, experi) < 0.01)
def test_stochastic_well_mixed(self):
S, E, I, R = list("SEIR")
N = 75000
tmax = 100
model = EpiModel([S,E,I,R],N)
model.set_processes([
( S, I, 2, E, I ),
( I, 1, R),
( E, 1, I),
])
model.set_initial_conditions({S: N-100, I: 100})
tt = np.linspace(0,tmax,10000)
result_int = model.integrate(tt)
t, result_sim = model.simulate(tmax,sampling_dt=1,return_compartments=[S, R])
model = StochasticEpiModel([S,E,I,R],N)
model.set_link_transmission_processes([
( I, S, 2, I, E ),
])
model.set_node_transition_processes([
( I, 1, R),
( E, 1, I),
])
model.set_random_initial_conditions({S: N-100, I: 100})
t, result_sim2 = model.simulate(tmax,sampling_dt=1,return_compartments=[S, R])
for c, res in result_sim2.items():
#print(c, np.abs(1-res[-1]/result_int[c][-1]))
#print(c, np.abs(1-res[-1]/result_sim[c][-1]))
assert(np.abs(1-res[-1]/result_int[c][-1]) < 0.05)
assert(np.abs(1-res[-1]/result_sim[c][-1]) < 0.05)
def test_stochastic_fission(self):
A, B, C = list("ABC")
N = 10
epi = EpiModel([A,B,C],N,correct_for_dynamical_population_size=True)
epi.add_fusion_processes([
(A, B, 1.0, C),
])
epi.set_initial_conditions({ A: 5, B: 5})
t, res = epi.simulate(1e9)
assert(res[C][-1] == 5)
def test_birth_stochastics(self):
A, B, C = list("ABC")
epi = EpiModel([A,B,C],10,correct_for_dynamical_population_size=True)
epi.set_initial_conditions({A:5, B:5})
epi.set_processes([
(None, 1, A),
(A, 1, B),
(B, 1, None),
],allow_nonzero_column_sums=True)
_, res = epi.simulate(200,sampling_dt=0.05)
vals = np.concatenate([res[A][_>10], res[B][_>10]])
rv = poisson(vals.mean())
measured, bins = np.histogram(vals,bins=np.arange(10)-0.5,density=True)
theory = [ rv.pmf(i) for i in range(0,len(bins)-1) if measured[i] > 0]
experi = [ measured[i] for i in range(0,len(bins)-1) if measured[i] > 0]
# make sure the kullback-leibler divergence is below some threshold
#for a, b in zip(theory, experi):
# print(a,b)
assert(entropy(theory, experi) < 1e-2)
assert(np.median(res[A]) == 1)
def test_sampling_callback(self):
epi = SIModel(infection_rate=5.0,initial_population_size=100)
epi.set_initial_conditions({"S":90,"I":10})
self.assertRaises(ValueError,epi.simulate,1,sampling_callback=lambda x: x)
i = 0
samples = []
def sampled():
samples.append(epi.y0[0])
t, res = epi.simulate(10,sampling_dt=0.1,sampling_callback=sampled)
assert(all([a==b for a, b in zip(res['S'], samples)]))
def test_integral_solvers(self):
def get_event_rates(t, y):
return y * (0.05 + 0.03 * np.array([ np.cos(t), np.sin(t), np.cos(t)**2, np.sin(t)**2 ]))
rand = 0.834053
t0 = 1.0
y0 = np.array([0.1,0.2,0.3,0.4])
t_nwt = time_leap_newton(t0, y0, get_event_rates, rand)
t_ivp = time_leap_ivp(t0, y0, get_event_rates, rand)
expected = 30.76
numeric = np.array([t_nwt, t_ivp])
assert(np.all( np.abs(numeric-expected)/numeric < 1e-3) )
def test_integrate_until(self):
N = 100
epi = SIModel(infection_rate=5.0,initial_population_size=N)
epi.set_initial_conditions({"S":90,"I":10})
thresh = 0.5
iS = epi.get_compartment_id("S")
stop_condition = lambda t, y: thresh*N - y[iS]
t, res = epi.integrate_until(0,stop_condition,return_compartments=['S'])
assert(np.isclose(thresh*N,res['S'][-1]))
if __name__ == "__main__":
import sys
T = EpiTest()
T.test_fusion_and_adding_rates()
T.test_inference_of_temporal_dependence()
#T.test_integrate_until()
#T.test_integral_solvers()
#T.test_temporal_gillespie_repeated_simulation()
#T.test_sampling_callback()
#T.test_birth_stochastics()
#T.test_stochastic_fission()
#T.test_correcting_for_declining_pop_size()
#T.test_dynamic_birth()
#T.test_stochastic_well_mixed()
#T.test_temporal_gillespie()
#T.test_compartments()
#T.test_linear_rates()
#T.test_adding_linear_rates()
#T.test_quadratic_processes()
#T.test_adding_quadratic_processes()
#T.test_SIS_with_simulation_restart_and_euler()
#T.test_repeated_simulation()
#T.test_custom_models()
#T.test_birth_death()
#T.test_initial_condition_warnings()
|
[
"matplotlib.pyplot.yscale",
"numpy.abs",
"numpy.isclose",
"numpy.histogram",
"matplotlib.pyplot.figure",
"epipack.numeric_epi_models.ConstantLinearRate",
"numpy.sin",
"numpy.exp",
"numpy.arange",
"scipy.interpolate.interp1d",
"epipack.numeric_epi_models.ConstantQuadraticRate",
"numpy.linspace",
"epipack.numeric_epi_models.DynamicLinearRate",
"epipack.numeric_epi_models.SISModel",
"epipack.numeric_epi_models.SIRSModel",
"epipack.stochastic_epi_models.StochasticEpiModel",
"matplotlib.pyplot.show",
"numpy.median",
"epipack.integrators.time_leap_newton",
"numpy.cos",
"scipy.optimize.root",
"numpy.concatenate",
"epipack.numeric_epi_models.EpiModel",
"epipack.numeric_epi_models.SIModel",
"matplotlib.pyplot.hist",
"scipy.stats.entropy",
"epipack.numeric_epi_models.SEIRModel",
"numpy.array",
"epipack.integrators.time_leap_ivp",
"epipack.numeric_epi_models.SIRModel"
] |
[((4359, 4429), 'epipack.numeric_epi_models.SISModel', 'SISModel', ([], {'infection_rate': '(2)', 'recovery_rate': '(1)', 'initial_population_size': 'N'}), '(infection_rate=2, recovery_rate=1, initial_population_size=N)\n', (4367, 4429), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((4501, 4523), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(2)'], {}), '(0, 100, 2)\n', (4512, 4523), True, 'import numpy as np\n'), ((4578, 4612), 'numpy.isclose', 'np.isclose', (["result['S'][-1]", '(N / 2)'], {}), "(result['S'][-1], N / 2)\n", (4588, 4612), True, 'import numpy as np\n'), ((4625, 4650), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (4636, 4650), True, 'import numpy as np\n'), ((4744, 4776), 'numpy.isclose', 'np.isclose', (['result[0, -1]', '(N / 2)'], {}), '(result[0, -1], N / 2)\n', (4754, 4776), True, 'import numpy as np\n'), ((4846, 4916), 'epipack.numeric_epi_models.SISModel', 'SISModel', ([], {'infection_rate': '(2)', 'recovery_rate': '(1)', 'initial_population_size': 'N'}), '(infection_rate=2, recovery_rate=1, initial_population_size=N)\n', (4854, 4916), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((4988, 5012), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (4999, 5012), True, 'import numpy as np\n'), ((5220, 5252), 'numpy.isclose', 'np.isclose', (['result[0, -1]', '(N / 2)'], {}), '(result[0, -1], N / 2)\n', (5230, 5252), True, 'import numpy as np\n'), ((5845, 5887), 'numpy.isclose', 'np.isclose', (["res['S'][-1]", '((mu + rho) / eta)'], {}), "(res['S'][-1], (mu + rho) / eta)\n", (5855, 5887), True, 'import numpy as np\n'), ((5899, 5965), 'numpy.isclose', 'np.isclose', (["res['I'][-1]", '(mu / eta * (eta - mu - rho) / (mu + rho))'], {}), "(res['I'][-1], mu / eta * (eta - mu - rho) / (mu + rho))\n", (5909, 5965), True, 'import numpy as np\n'), ((6020, 6033), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[A]'], {}), '([A])\n', (6028, 6033), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((6270, 6304), 'numpy.isclose', 'np.isclose', (['res[A][-1]', '(5 ** 2 + 1)'], {}), '(res[A][-1], 5 ** 2 + 1)\n', (6280, 6304), True, 'import numpy as np\n'), ((6397, 6461), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[A, B]', '(10)'], {'correct_for_dynamical_population_size': '(True)'}), '([A, B], 10, correct_for_dynamical_population_size=True)\n', (6405, 6461), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((6687, 6705), 'numpy.linspace', 'np.linspace', (['(0)', '(30)'], {}), '(0, 30)\n', (6698, 6705), True, 'import numpy as np\n'), ((8545, 8557), 'epipack.numeric_epi_models.SIModel', 'SIModel', (['eta'], {}), '(eta)\n', (8552, 8557), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((8685, 8709), 'numpy.isclose', 'np.isclose', (['epi.y0[0]', '(0)'], {}), '(epi.y0[0], 0)\n', (8695, 8709), True, 'import numpy as np\n'), ((8758, 8776), 'epipack.numeric_epi_models.SIRModel', 'SIRModel', (['eta', 'rho'], {}), '(eta, rho)\n', (8766, 8776), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((9006, 9040), 'numpy.isclose', 'np.isclose', (['res[R][-1]', 'SIR_theory'], {}), '(res[R][-1], SIR_theory)\n', (9016, 9040), True, 'import numpy as np\n'), ((9075, 9101), 'epipack.numeric_epi_models.SEIRModel', 'SEIRModel', (['eta', 'rho', 'omega'], {}), '(eta, rho, omega)\n', (9084, 9101), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((9203, 9237), 'numpy.isclose', 'np.isclose', (['res[R][-1]', 'SIR_theory'], {}), '(res[R][-1], SIR_theory)\n', (9213, 9237), True, 'import numpy as np\n'), ((9286, 9333), 'epipack.numeric_epi_models.SISModel', 'SISModel', (['eta', 'rho'], {'initial_population_size': '(100)'}), '(eta, rho, initial_population_size=100)\n', (9294, 9333), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((9399, 9422), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(2)'], {}), '(0, 1000, 2)\n', (9410, 9422), True, 'import numpy as np\n'), ((9471, 9500), 'numpy.isclose', 'np.isclose', (['result[S][-1]', '(50)'], {}), '(result[S][-1], 50)\n', (9481, 9500), True, 'import numpy as np\n'), ((9516, 9542), 'epipack.numeric_epi_models.SIRSModel', 'SIRSModel', (['eta', 'rho', 'omega'], {}), '(eta, rho, omega)\n', (9525, 9542), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((9613, 9636), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(2)'], {}), '(0, 1000, 2)\n', (9624, 9636), True, 'import numpy as np\n'), ((9685, 9747), 'numpy.isclose', 'np.isclose', (['result[R][-1]', '((1 - rho / eta) / (1 + omega / rho))'], {}), '(result[R][-1], (1 - rho / eta) / (1 + omega / rho))\n', (9695, 9747), True, 'import numpy as np\n'), ((9819, 9874), 'numpy.array', 'np.array', (['[(1.0, 2.0), (10000.0, 2.0), (10001.0, -2.0)]'], {}), '([(1.0, 2.0), (10000.0, 2.0), (10001.0, -2.0)])\n', (9827, 9874), True, 'import numpy as np\n'), ((9982, 10019), 'scipy.interpolate.interp1d', 'interp1d', (['times', 'rates'], {'kind': '"""linear"""'}), "(times, rates, kind='linear')\n", (9990, 10019), False, 'from scipy.interpolate import interp1d\n'), ((10153, 10172), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[S, I]', 'N'], {}), '([S, I], N)\n', (10161, 10172), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((11611, 11630), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[S, I]', 'N'], {}), '([S, I], N)\n', (11619, 11630), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((12249, 12291), 'numpy.histogram', 'np.histogram', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (12261, 12291), True, 'import numpy as np\n'), ((13250, 13269), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[S, I]', 'N'], {}), '([S, I], N)\n', (13258, 13269), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((13665, 13687), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (13676, 13687), True, 'import numpy as np\n'), ((14391, 14433), 'numpy.histogram', 'np.histogram', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (14403, 14433), True, 'import numpy as np\n'), ((15280, 15305), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[S, E, I, R]', 'N'], {}), '([S, E, I, R], N)\n', (15288, 15305), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((15509, 15536), 'numpy.linspace', 'np.linspace', (['(0)', 'tmax', '(10000)'], {}), '(0, tmax, 10000)\n', (15520, 15536), True, 'import numpy as np\n'), ((15680, 15715), 'epipack.stochastic_epi_models.StochasticEpiModel', 'StochasticEpiModel', (['[S, E, I, R]', 'N'], {}), '([S, E, I, R], N)\n', (15698, 15715), False, 'from epipack.stochastic_epi_models import StochasticEpiModel\n'), ((16469, 16535), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[A, B, C]', 'N'], {'correct_for_dynamical_population_size': '(True)'}), '([A, B, C], N, correct_for_dynamical_population_size=True)\n', (16477, 16535), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((16818, 16885), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[A, B, C]', '(10)'], {'correct_for_dynamical_population_size': '(True)'}), '([A, B, C], 10, correct_for_dynamical_population_size=True)\n', (16826, 16885), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((17160, 17208), 'numpy.concatenate', 'np.concatenate', (['[res[A][_ > 10], res[B][_ > 10]]'], {}), '([res[A][_ > 10], res[B][_ > 10]])\n', (17174, 17208), True, 'import numpy as np\n'), ((17760, 17816), 'epipack.numeric_epi_models.SIModel', 'SIModel', ([], {'infection_rate': '(5.0)', 'initial_population_size': '(100)'}), '(infection_rate=5.0, initial_population_size=100)\n', (17767, 17816), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((18421, 18451), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.4]'], {}), '([0.1, 0.2, 0.3, 0.4])\n', (18429, 18451), True, 'import numpy as np\n'), ((18465, 18512), 'epipack.integrators.time_leap_newton', 'time_leap_newton', (['t0', 'y0', 'get_event_rates', 'rand'], {}), '(t0, y0, get_event_rates, rand)\n', (18481, 18512), False, 'from epipack.integrators import time_leap_ivp, time_leap_newton\n'), ((18529, 18573), 'epipack.integrators.time_leap_ivp', 'time_leap_ivp', (['t0', 'y0', 'get_event_rates', 'rand'], {}), '(t0, y0, get_event_rates, rand)\n', (18542, 18573), False, 'from epipack.integrators import time_leap_ivp, time_leap_newton\n'), ((18617, 18641), 'numpy.array', 'np.array', (['[t_nwt, t_ivp]'], {}), '([t_nwt, t_ivp])\n', (18625, 18641), True, 'import numpy as np\n'), ((18776, 18830), 'epipack.numeric_epi_models.SIModel', 'SIModel', ([], {'infection_rate': '(5.0)', 'initial_population_size': 'N'}), '(infection_rate=5.0, initial_population_size=N)\n', (18783, 18830), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((19098, 19134), 'numpy.isclose', 'np.isclose', (['(thresh * N)', "res['S'][-1]"], {}), "(thresh * N, res['S'][-1])\n", (19108, 19134), True, 'import numpy as np\n'), ((1173, 1199), 'epipack.numeric_epi_models.ConstantLinearRate', 'ConstantLinearRate', (['(1.0)', '(1)'], {}), '(1.0, 1)\n', (1191, 1199), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((1200, 1226), 'epipack.numeric_epi_models.ConstantLinearRate', 'ConstantLinearRate', (['(1.0)', '(2)'], {}), '(1.0, 2)\n', (1218, 1226), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((1254, 1278), 'numpy.array', 'np.array', (['[0, -1, +1, 0]'], {}), '([0, -1, +1, 0])\n', (1262, 1278), True, 'import numpy as np\n'), ((1277, 1303), 'numpy.array', 'np.array', (['[0, 0, -1, +1.0]'], {}), '([0, 0, -1, +1.0])\n', (1285, 1303), True, 'import numpy as np\n'), ((1845, 1870), 'epipack.numeric_epi_models.DynamicLinearRate', 'DynamicLinearRate', (['_r0', '(1)'], {}), '(_r0, 1)\n', (1862, 1870), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((1871, 1896), 'epipack.numeric_epi_models.DynamicLinearRate', 'DynamicLinearRate', (['_r1', '(2)'], {}), '(_r1, 2)\n', (1888, 1896), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((1924, 1948), 'numpy.array', 'np.array', (['[0, -1, +1, 0]'], {}), '([0, -1, +1, 0])\n', (1932, 1948), True, 'import numpy as np\n'), ((1947, 1973), 'numpy.array', 'np.array', (['[0, 0, -1, +1.0]'], {}), '([0, 0, -1, +1.0])\n', (1955, 1973), True, 'import numpy as np\n'), ((2531, 2557), 'epipack.numeric_epi_models.ConstantLinearRate', 'ConstantLinearRate', (['(1.0)', '(1)'], {}), '(1.0, 1)\n', (2549, 2557), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((2558, 2584), 'epipack.numeric_epi_models.ConstantLinearRate', 'ConstantLinearRate', (['(1.0)', '(2)'], {}), '(1.0, 2)\n', (2576, 2584), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((2612, 2636), 'numpy.array', 'np.array', (['[0, -1, +1, 0]'], {}), '([0, -1, +1, 0])\n', (2620, 2636), True, 'import numpy as np\n'), ((2635, 2661), 'numpy.array', 'np.array', (['[0, 0, -1, +1.0]'], {}), '([0, 0, -1, +1.0])\n', (2643, 2661), True, 'import numpy as np\n'), ((3048, 3080), 'epipack.numeric_epi_models.ConstantQuadraticRate', 'ConstantQuadraticRate', (['(1.0)', '(2)', '(0)'], {}), '(1.0, 2, 0)\n', (3069, 3080), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((3109, 3138), 'numpy.array', 'np.array', (['[-1, +1, 0, 0, 0.0]'], {}), '([-1, +1, 0, 0, 0.0])\n', (3117, 3138), True, 'import numpy as np\n'), ((3643, 3675), 'epipack.numeric_epi_models.ConstantQuadraticRate', 'ConstantQuadraticRate', (['(1.0)', '(2)', '(0)'], {}), '(1.0, 2, 0)\n', (3664, 3675), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((3675, 3707), 'epipack.numeric_epi_models.ConstantQuadraticRate', 'ConstantQuadraticRate', (['(1.0)', '(3)', '(0)'], {}), '(1.0, 3, 0)\n', (3696, 3707), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((3737, 3766), 'numpy.array', 'np.array', (['[-1, +1, 0, 0, 0.0]'], {}), '([-1, +1, 0, 0, 0.0])\n', (3745, 3766), True, 'import numpy as np\n'), ((3763, 3792), 'numpy.array', 'np.array', (['[-1, +1, 0, 0, 0.0]'], {}), '([-1, +1, 0, 0, 0.0])\n', (3771, 3792), True, 'import numpy as np\n'), ((12653, 12664), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (12662, 12664), True, 'import matplotlib.pyplot as pl\n'), ((12677, 12714), 'matplotlib.pyplot.hist', 'pl.hist', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (12684, 12714), True, 'import matplotlib.pyplot as pl\n'), ((12730, 12752), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (12741, 12752), True, 'import numpy as np\n'), ((12796, 12812), 'matplotlib.pyplot.yscale', 'pl.yscale', (['"""log"""'], {}), "('log')\n", (12805, 12812), True, 'import matplotlib.pyplot as pl\n'), ((12825, 12836), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (12834, 12836), True, 'import matplotlib.pyplot as pl\n'), ((12849, 12886), 'matplotlib.pyplot.hist', 'pl.hist', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (12856, 12886), True, 'import matplotlib.pyplot as pl\n'), ((12902, 12924), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (12913, 12924), True, 'import numpy as np\n'), ((12968, 12977), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (12975, 12977), True, 'import matplotlib.pyplot as pl\n'), ((12993, 13016), 'scipy.stats.entropy', 'entropy', (['theory', 'experi'], {}), '(theory, experi)\n', (13000, 13016), False, 'from scipy.stats import entropy, poisson\n'), ((14776, 14787), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (14785, 14787), True, 'import matplotlib.pyplot as pl\n'), ((14800, 14837), 'matplotlib.pyplot.hist', 'pl.hist', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (14807, 14837), True, 'import matplotlib.pyplot as pl\n'), ((14853, 14875), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (14864, 14875), True, 'import numpy as np\n'), ((14919, 14935), 'matplotlib.pyplot.yscale', 'pl.yscale', (['"""log"""'], {}), "('log')\n", (14928, 14935), True, 'import matplotlib.pyplot as pl\n'), ((14948, 14959), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (14957, 14959), True, 'import matplotlib.pyplot as pl\n'), ((14972, 15009), 'matplotlib.pyplot.hist', 'pl.hist', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (14979, 15009), True, 'import matplotlib.pyplot as pl\n'), ((15025, 15047), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (15036, 15047), True, 'import numpy as np\n'), ((15091, 15100), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (15098, 15100), True, 'import matplotlib.pyplot as pl\n'), ((15116, 15139), 'scipy.stats.entropy', 'entropy', (['theory', 'experi'], {}), '(theory, experi)\n', (15123, 15139), False, 'from scipy.stats import entropy, poisson\n'), ((17636, 17659), 'scipy.stats.entropy', 'entropy', (['theory', 'experi'], {}), '(theory, experi)\n', (17643, 17659), False, 'from scipy.stats import entropy, poisson\n'), ((17683, 17700), 'numpy.median', 'np.median', (['res[A]'], {}), '(res[A])\n', (17692, 17700), True, 'import numpy as np\n'), ((1649, 1658), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1655, 1658), True, 'import numpy as np\n'), ((1688, 1697), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1694, 1697), True, 'import numpy as np\n'), ((8971, 8986), 'scipy.optimize.root', 'root', (['Rinf', '(0.5)'], {}), '(Rinf, 0.5)\n', (8975, 8986), False, 'from scipy.optimize import root\n'), ((11522, 11537), 'numpy.cos', 'np.cos', (['(t * scl)'], {}), '(t * scl)\n', (11528, 11537), True, 'import numpy as np\n'), ((13161, 13176), 'numpy.cos', 'np.cos', (['(t * scl)'], {}), '(t * scl)\n', (13167, 13176), True, 'import numpy as np\n'), ((16260, 16299), 'numpy.abs', 'np.abs', (['(1 - res[-1] / result_int[c][-1])'], {}), '(1 - res[-1] / result_int[c][-1])\n', (16266, 16299), True, 'import numpy as np\n'), ((16323, 16362), 'numpy.abs', 'np.abs', (['(1 - res[-1] / result_sim[c][-1])'], {}), '(1 - res[-1] / result_sim[c][-1])\n', (16329, 16362), True, 'import numpy as np\n'), ((8898, 8913), 'numpy.exp', 'np.exp', (['(-x * R0)'], {}), '(-x * R0)\n', (8904, 8913), True, 'import numpy as np\n'), ((12098, 12113), 'numpy.sin', 'np.sin', (['(t * scl)'], {}), '(t * scl)\n', (12104, 12113), True, 'import numpy as np\n'), ((14240, 14255), 'numpy.sin', 'np.sin', (['(t * scl)'], {}), '(t * scl)\n', (14246, 14255), True, 'import numpy as np\n'), ((17287, 17300), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (17296, 17300), True, 'import numpy as np\n'), ((18665, 18691), 'numpy.abs', 'np.abs', (['(numeric - expected)'], {}), '(numeric - expected)\n', (18671, 18691), True, 'import numpy as np\n'), ((18313, 18322), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (18319, 18322), True, 'import numpy as np\n'), ((18324, 18333), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (18330, 18333), True, 'import numpy as np\n'), ((18335, 18344), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (18341, 18344), True, 'import numpy as np\n'), ((18349, 18358), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (18355, 18358), True, 'import numpy as np\n')]
|
from cyber_sdk.client.lcd import LCDClient
if __name__ == "__main__":
client = LCDClient(url="https://lcd.space-pussy-1.cybernode.ai/", chain_id="space-pussy-1")
client.tx.tx_info(
"D22FC6EB287D9F099DD8EBADAAC5D9A0F6AA9D6B87F4A35A3FACEF4182706A16"
)
|
[
"cyber_sdk.client.lcd.LCDClient"
] |
[((84, 171), 'cyber_sdk.client.lcd.LCDClient', 'LCDClient', ([], {'url': '"""https://lcd.space-pussy-1.cybernode.ai/"""', 'chain_id': '"""space-pussy-1"""'}), "(url='https://lcd.space-pussy-1.cybernode.ai/', chain_id=\n 'space-pussy-1')\n", (93, 171), False, 'from cyber_sdk.client.lcd import LCDClient\n')]
|
# Generated by Django 3.1.6 on 2021-02-08 21:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0090_rename_repository_project"),
]
operations = [
migrations.AddField(
model_name="user",
name="onboarded_at",
field=models.DateTimeField(
blank=True,
help_text="Date of the last time the user completed the interactive onboarding",
null=True,
),
),
]
|
[
"django.db.models.DateTimeField"
] |
[((341, 474), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'help_text': '"""Date of the last time the user completed the interactive onboarding"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Date of the last time the user completed the interactive onboarding',\n null=True)\n", (361, 474), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python3
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
from torch.optim.optimizer import Optimizer
import numpy as np
from scipy.optimize import minimize
__all__ = ('CurveSGD',)
class CurveSGD(Optimizer):
r"""Implements Self-Tuning Stochastic Optimization with
Curvature-Aware Gradient Filtering algorithm (https://arxiv.org/pdf/2011.04803.pdf).
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
Example:
>>> import curvesgd as curve
>>> optimizer = curve.CurveSGD(model.parameters(), lr=0.1)
>>>
>>> for _ in range(iterations):
>>> def closure():
>>> optimizer.zero_grad()
>>> f = func(x)
>>> f.backward(retain_graph=True, create_graph=True)
>>> return f
>>> optimizer.step(closure)
"""
def __init__(
self,
params,
lr: float = 1e-3,
beta_r=0.999,
beta_sigma=0.999,
beta_alpha=0.999,
):
if lr <= 0.0:
raise ValueError('Invalid learning rate: {}'.format(lr))
defaults = dict(
lr=lr,
beta_r=beta_r,
beta_sigma=beta_sigma,
beta_alpha=beta_alpha,
)
super(CurveSGD, self).__init__(params, defaults)
def get_hessian_prod(self, params, grads, delta):
"""Get an estimate of Hessian product.
This is done by computing the Hessian vector product with the stored delta
vector at the current gradient point, to estimate Hessian trace by
computing the gradient of <gradsH, s>.
Args:
params: iterable of parameters to optimize or dicts defining
parameter groups
grads: gradient of parameters
delta: vector to be multiplied against the Hessian (right multiplied)
Returns:
hessian_prod: Product of hessian and delta argument
"""
# Check backward was called with create_graph set to True
if grads.grad_fn is None:
msg = (
'Gradient tensor {:} does not have grad_fn. When '
'calling loss.backward(), make sure the option '
'create_graph is set to True.'
)
raise RuntimeError(msg.format(i))
# this is for distributed setting with single node and multi-gpus,
# for multi nodes setting, we have not support it yet.
hvs = torch.autograd.grad(
grads, params, grad_outputs=delta, only_inputs=True, retain_graph=True
)
return hvs[0]
def _get_prob_improve_num_den(self, alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t):
"""Helper function for probability improvement/gradient calculation. See
prob_improve for full description of its use
Args:
alpha: value of step size
delta_t: Gradient change
m_t: Kalman filtered gradient mean
B_delta: Hessian-vector product
s_t: Kalman filtered function mean
P_t: Kalman filtered gradient covariance
Q_t: Covariance of Hessian-vector product
Returns:
prob_gradient: Numerator and denominator of probability function evaluation
"""
alpha = alpha[0]
numerator = -alpha * delta_t.matmul(m_t) + alpha ** 2 / 2 * delta_t.t().matmul(B_delta)
denominator = 2 * s_t + alpha ** 2 * delta_t.t().matmul(P_t).matmul(delta_t) \
+ alpha ** 4 / 4 * delta_t.t().matmul(Q_t).matmul(delta_t)
numerator = numerator.detach().numpy()
denominator = np.sqrt(denominator.detach().numpy())[0]
return numerator, denominator
def prob_improve(self, alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t):
"""Get an estimate of improvement probability assuming alpha step size.
This is done as a subroutine procedure to determine the optimal
step size within after running filtering on the function and gradient values.
Intended to be used in conjunction with an optimization procedure (i.e scipy.optimize)
assuming all parameters fixed except alpha.
Args:
alpha: value of step size
delta_t: Gradient change
m_t: Kalman filtered gradient mean
B_delta: Hessian-vector product
s_t: Kalman filtered function mean
P_t: Kalman filtered gradient covariance
Q_t: Covariance of Hessian-vector product
Returns:
prob: Improvement probability function evaluation
"""
numerator, denominator = self._get_prob_improve_num_den(alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t)
return numerator / denominator
def prob_improve_num_grad(self, alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t):
"""Get an estimate of improvement numerical gradient. See prob_improve for docs
Args:
alpha: value of step size
delta_t: Gradient change
m_t: Kalman filtered gradient mean
B_delta: Hessian-vector product
s_t: Kalman filtered function mean
P_t: Kalman filtered gradient covariance
Q_t: Covariance of Hessian-vector product
Returns:
prob_gradient: Numerical gradient of improvement probability function
"""
eps = 1e-4
f_plus = self.prob_improve(alpha + eps, delta_t, m_t, B_delta, s_t, P_t, Q_t)
f_minus = self.prob_improve(alpha - eps, delta_t, m_t, B_delta, s_t, P_t, Q_t)
return (f_plus - f_minus) / (2 * eps)
def prob_improve_grad(self, alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t):
"""Get an estimate of improvement probability gradient. See prob_improve for docs
Args:
alpha: value of step size
delta_t: Gradient change
m_t: Kalman filtered gradient mean
B_delta: Hessian-vector product
s_t: Kalman filtered function mean
P_t: Kalman filtered gradient covariance
Q_t: Covariance of Hessian-vector product
Returns:
prob_gradient: Gradient of improvement probability function
"""
numerator, denominator = self._get_prob_improve_num_den(alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t)
alpha = alpha[0]
numerator_grad = -delta_t.t().matmul(m_t) + alpha * delta_t.t().matmul(B_delta)
denominator_grad = 1 / (2 * denominator) * (2 * alpha * delta_t.t().matmul(P_t).matmul(delta_t) \
+ alpha ** 3 * delta_t.t().matmul(Q_t).matmul(delta_t))
numerator_grad = numerator_grad.detach().numpy()
denominator_grad = denominator_grad.detach().numpy()
return (denominator * numerator_grad - numerator * denominator_grad) / denominator ** 2
def mean_var_ewa(self, ema, emvar, x, beta):
r"""Computes exponential moving average/variance of tensor with update weight beta.
Args:
ema: Current exponential moving average.
emvar: Current exponential moving variance.
x: New datapoint (should have same untis as ema).
beta: Averaging weight for update step.
Returns:
(ema, emvar): Tuple of weighted average and variance
"""
alpha = 1 - beta
delta = x - ema
ema_new = ema.add(delta.mul(alpha))
emvar_new = emvar.add(delta.mul(delta).mul(alpha)).mul(1 - alpha)
return ema_new, emvar_new
def step(self, closure = None):
r"""Performs a single optimization step.
Args:
closure: A closure that reevaluates the model and returns the loss.
Returns:
loss: Loss (before taking optimizer step)
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
beta_r = group['beta_r']
beta_sigma = group['beta_sigma']
beta_alpha = group['beta_alpha']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data.flatten()
if d_p.is_sparse:
msg = (
'CurveSGD does not support sparse gradients, '
'please consider SparseAdam instead'
)
raise RuntimeError(msg)
state = self.state[p]
# State initialization
if len(state) == 0:
state['t'] = 0
state['delta_t'] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of function values
state['func_exp_avg'] = loss.clone()
state['func_exp_var'] = torch.zeros((1))
# Exponential moving average of gradient values
state['grad_exp_avg'] = d_p.clone()
state['grad_exp_var'] = torch.zeros_like(
p.flatten(), memory_format=torch.preserve_format
)
# Exponential moving average of Hessian values
state['hess_exp_avg'] = self.get_hessian_prod(p, p.grad, state['delta_t']).flatten().clone()
state['hess_exp_var'] = torch.zeros_like(
p.flatten(), memory_format=torch.preserve_format
)
# Kalman Filter states
state['m_t'] = torch.zeros_like(
p.flatten(), memory_format=torch.preserve_format
)
state['P_t'] = torch.eye(d_p.size()[0]).mul(1e4)
state['u_t'] = 0
state['s_t'] = 1e4
func_exp_avg = state['func_exp_avg']
func_exp_var = state['func_exp_var']
grad_exp_avg = state['grad_exp_avg']
grad_exp_var = state['grad_exp_var']
hess_exp_avg = state['hess_exp_avg']
hess_exp_var = state['hess_exp_var']
delta_t = state['delta_t']
B_delta = self.get_hessian_prod(p, p.grad, delta_t).flatten()
delta_t = delta_t.flatten()
if state['t'] != 0:
beta_delta = 1 - 1 / state['t'] # non-smoothed running average/variance
func_exp_avg, func_exp_var = self.mean_var_ewa(func_exp_avg, func_exp_var, loss, beta_r)
grad_exp_avg, grad_exp_var = self.mean_var_ewa(grad_exp_avg, grad_exp_var, d_p, beta_sigma)
hess_exp_avg, hess_exp_var = self.mean_var_ewa(hess_exp_avg, hess_exp_var, B_delta, beta_delta)
eps = 10e-1
sigma_t = max(eps, torch.mean(grad_exp_var))
q_t = max(eps, torch.mean(hess_exp_var))
# Match notation from paper for convenience
y_t = func_exp_avg
r_t = func_exp_var
g_t = grad_exp_avg
Sigma_t = torch.eye(d_p.size()[0]).mul(sigma_t)
b_t = hess_exp_avg
Q_t = torch.eye(d_p.size()[0]).mul(q_t)
# Kalman Filter update for f
u_t = state['u_t']
s_t = state['s_t']
m_t = state['m_t']
P_t = state['P_t']
# steps for Kalman filter
# compute u_t_minus
u_t_minus = u_t + m_t.t().matmul(delta_t) + 1 / 2 * delta_t.t().matmul(B_delta)
c_t = s_t + delta_t.t().matmul(P_t).matmul(delta_t) + 1 / 4 * delta_t.t().matmul(Q_t).matmul(delta_t) + r_t
lambda_t = max((y_t - u_t_minus) ** 2 - c_t, 0)
s_t_minus = lambda_t + c_t - r_t
mix_t = s_t_minus / (s_t_minus + r_t)
u_t = (1 - mix_t) * u_t_minus + mix_t * y_t
s_t = (1 - mix_t) ** 2 * s_t_minus + mix_t ** 2 * r_t
# Kalman Filter update for grad f
m_t_minus = m_t + B_delta
P_t_minus = P_t + Q_t
K_t = P_t_minus.matmul((P_t_minus + Sigma_t).inverse())
m_t = (torch.eye(d_p.size()[0]) - K_t).matmul(m_t_minus) + K_t.matmul(g_t)
P_t = (torch.eye(d_p.size()[0]) - K_t).matmul(P_t_minus).matmul((torch.eye(d_p.size()[0]) - K_t).t()) \
+ K_t.matmul(Sigma_t).matmul(K_t.t())
prob_improve_closure = lambda alpha : self.prob_improve(alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t)
prob_improve_grad_closure = lambda alpha : self.prob_improve_num_grad(alpha, delta_t, m_t, B_delta, s_t, P_t, Q_t)
if state['t'] == 0:
lr = group['lr']
else:
lr = min(.0015, minimize(prob_improve_closure, group['lr'], jac=prob_improve_grad_closure, method='BFGS').x[0])
delta_t = m_t.mul(lr).reshape(p.data.shape)
state['t'] += 1
state['u_t'] = u_t
state['s_t'] = s_t
state['m_t'] = m_t
state['P_t'] = P_t
state['func_exp_avg'] = func_exp_avg
state['func_exp_var'] = func_exp_var
state['grad_exp_avg'] = grad_exp_avg
state['grad_exp_var'] = grad_exp_var
state['hess_exp_avg'] = hess_exp_avg
state['hess_exp_var'] = hess_exp_var
state['delta_t'] = delta_t
# Use filtered gradient estimate for update step
p.data.sub_(delta_t)
return loss
|
[
"torch.mean",
"scipy.optimize.minimize",
"torch.zeros_like",
"torch.autograd.grad",
"torch.zeros"
] |
[((2653, 2748), 'torch.autograd.grad', 'torch.autograd.grad', (['grads', 'params'], {'grad_outputs': 'delta', 'only_inputs': '(True)', 'retain_graph': '(True)'}), '(grads, params, grad_outputs=delta, only_inputs=True,\n retain_graph=True)\n', (2672, 2748), False, 'import torch\n'), ((8827, 8883), 'torch.zeros_like', 'torch.zeros_like', (['p'], {'memory_format': 'torch.preserve_format'}), '(p, memory_format=torch.preserve_format)\n', (8843, 8883), False, 'import torch\n'), ((9100, 9114), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (9111, 9114), False, 'import torch\n'), ((11106, 11130), 'torch.mean', 'torch.mean', (['grad_exp_var'], {}), '(grad_exp_var)\n', (11116, 11130), False, 'import torch\n'), ((11163, 11187), 'torch.mean', 'torch.mean', (['hess_exp_var'], {}), '(hess_exp_var)\n', (11173, 11187), False, 'import torch\n'), ((13168, 13261), 'scipy.optimize.minimize', 'minimize', (['prob_improve_closure', "group['lr']"], {'jac': 'prob_improve_grad_closure', 'method': '"""BFGS"""'}), "(prob_improve_closure, group['lr'], jac=prob_improve_grad_closure,\n method='BFGS')\n", (13176, 13261), False, 'from scipy.optimize import minimize\n')]
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from proboscis.asserts import assert_raises
from proboscis import before_class
from proboscis.decorators import time_out
from proboscis import test
from trove.common.utils import poll_until
from trove import tests
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
from trove.tests.config import CONFIG
from trove.tests.util.check import AttrCheck
from trove.tests.util import create_dbaas_client
from trove.tests.util import create_nova_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
from troveclient.compat import exceptions
from troveclient.v1.flavors import Flavor
GROUP = "dbaas.api.flavors"
GROUP_DS = "dbaas.api.datastores"
FAKE_MODE = test_config.values['fake_mode']
servers_flavors = None
dbaas_flavors = None
user = None
def assert_attributes_equal(name, os_flavor, dbaas_flavor):
"""Given an attribute name and two objects,
ensures the attribute is equal.
"""
assert_true(hasattr(os_flavor, name),
"open stack flavor did not have attribute %s" % name)
assert_true(hasattr(dbaas_flavor, name),
"dbaas flavor did not have attribute %s" % name)
expected = getattr(os_flavor, name)
actual = getattr(dbaas_flavor, name)
assert_equal(expected, actual,
'DBaas flavor differs from Open Stack on attribute ' + name)
def assert_flavors_roughly_equivalent(os_flavor, dbaas_flavor):
assert_attributes_equal('name', os_flavor, dbaas_flavor)
assert_attributes_equal('ram', os_flavor, dbaas_flavor)
def assert_link_list_is_equal(flavor):
assert_true(hasattr(flavor, 'links'))
assert_true(flavor.links)
if flavor.id:
flavor_id = str(flavor.id)
else:
flavor_id = flavor.str_id
for link in flavor.links:
href = link['href']
if "self" in link['rel']:
expected_href = os.path.join(test_config.dbaas_url, "flavors",
str(flavor.id))
url = test_config.dbaas_url.replace('http:', 'https:', 1)
msg = ("REL HREF %s doesn't start with %s" %
(href, test_config.dbaas_url))
assert_true(href.startswith(url), msg)
url = os.path.join("flavors", flavor_id)
msg = "REL HREF %s doesn't end in '%s'" % (href, url)
assert_true(href.endswith(url), msg)
elif "bookmark" in link['rel']:
base_url = test_config.version_url.replace('http:', 'https:', 1)
expected_href = os.path.join(base_url, "flavors", flavor_id)
msg = 'bookmark "href" must be %s, not %s' % (expected_href, href)
assert_equal(href, expected_href, msg)
else:
assert_false(True, "Unexpected rel - %s" % link['rel'])
@test(groups=[tests.DBAAS_API, GROUP, GROUP_DS, tests.PRE_INSTANCES],
depends_on_groups=["services.initialize"])
class Flavors(object):
@before_class
def setUp(self):
rd_user = test_config.users.find_user(
Requirements(is_admin=False, services=["trove"]))
self.rd_client = create_dbaas_client(rd_user)
if test_config.nova_client is not None:
nova_user = test_config.users.find_user(
Requirements(services=["nova"]))
self.nova_client = create_nova_client(nova_user)
def get_expected_flavors(self):
# If we have access to the client, great! Let's use that as the flavors
# returned by Trove should be identical.
if test_config.nova_client is not None:
return self.nova_client.flavors.list()
# If we don't have access to the client the flavors need to be spelled
# out in the config file.
flavors = [Flavor(Flavors, flavor_dict, loaded=True)
for flavor_dict in test_config.flavors]
return flavors
@test
def confirm_flavors_lists_nearly_identical(self):
os_flavors = self.get_expected_flavors()
dbaas_flavors = self.rd_client.flavors.list()
print("Open Stack Flavors:")
print(os_flavors)
print("DBaaS Flavors:")
print(dbaas_flavors)
# Length of both flavors list should be identical.
assert_equal(len(os_flavors), len(dbaas_flavors))
for os_flavor in os_flavors:
found_index = None
for index, dbaas_flavor in enumerate(dbaas_flavors):
if os_flavor.name == dbaas_flavor.name:
msg = ("Flavor ID '%s' appears in elements #%s and #%d." %
(dbaas_flavor.id, str(found_index), index))
assert_true(found_index is None, msg)
assert_flavors_roughly_equivalent(os_flavor, dbaas_flavor)
found_index = index
msg = "Some flavors from OS list were missing in DBAAS list."
assert_false(found_index is None, msg)
for flavor in dbaas_flavors:
assert_link_list_is_equal(flavor)
@test
def test_flavor_list_attrs(self):
allowed_attrs = ['id', 'name', 'ram', 'vcpus', 'disk', 'links',
'ephemeral', 'local_storage', 'str_id']
flavors = self.rd_client.flavors.list()
attrcheck = AttrCheck()
for flavor in flavors:
flavor_dict = flavor._info
attrcheck.contains_allowed_attrs(
flavor_dict, allowed_attrs,
msg="Flavors list")
attrcheck.links(flavor_dict['links'])
@test
def test_flavor_get_attrs(self):
allowed_attrs = ['id', 'name', 'ram', 'vcpus', 'disk', 'links',
'ephemeral', 'local_storage', 'str_id']
flavor = self.rd_client.flavors.get(1)
attrcheck = AttrCheck()
flavor_dict = flavor._info
attrcheck.contains_allowed_attrs(
flavor_dict, allowed_attrs,
msg="Flavor Get 1")
attrcheck.links(flavor_dict['links'])
@test
def test_flavor_not_found(self):
assert_raises(exceptions.NotFound,
self.rd_client.flavors.get, "foo")
@test
def test_flavor_list_datastore_version_associated_flavors(self):
datastore = self.rd_client.datastores.get(
test_config.dbaas_datastore)
dbaas_flavors = (self.rd_client.flavors.
list_datastore_version_associated_flavors(
datastore=test_config.dbaas_datastore,
version_id=datastore.default_version))
os_flavors = self.get_expected_flavors()
assert_equal(len(dbaas_flavors), len(os_flavors))
# verify flavor lists are identical
for os_flavor in os_flavors:
found_index = None
for index, dbaas_flavor in enumerate(dbaas_flavors):
if os_flavor.name == dbaas_flavor.name:
msg = ("Flavor ID '%s' appears in elements #%s and #%d." %
(dbaas_flavor.id, str(found_index), index))
assert_true(found_index is None, msg)
assert_flavors_roughly_equivalent(os_flavor, dbaas_flavor)
found_index = index
msg = "Some flavors from OS list were missing in DBAAS list."
assert_false(found_index is None, msg)
for flavor in dbaas_flavors:
assert_link_list_is_equal(flavor)
@test(runs_after=[Flavors],
groups=[tests.DBAAS_API, GROUP, GROUP_DS],
depends_on_groups=["services.initialize"],
enabled=FAKE_MODE)
class DatastoreFlavorAssociation(object):
@before_class
def setUp(self):
rd_user = test_config.users.find_user(
Requirements(is_admin=False, services=["trove"]))
self.rd_client = create_dbaas_client(rd_user)
self.datastore = self.rd_client.datastores.get(
test_config.dbaas_datastore)
self.name1 = "test_instance1"
self.name2 = "test_instance2"
self.volume = {'size': 2}
self.instance_id = None
self.nics = None
shared_network = CONFIG.get('shared_network', None)
if shared_network:
self.nics = [{'net-id': shared_network}]
@test
@time_out(TIMEOUT_INSTANCE_CREATE)
def test_create_instance_with_valid_flavor_association(self):
# all the nova flavors are associated with the default datastore
result = self.rd_client.instances.create(
name=self.name1, flavor_id='1', volume=self.volume,
datastore=self.datastore.id,
nics=self.nics)
self.instance_id = result.id
assert_equal(200, self.rd_client.last_http_code)
def result_is_active():
instance = self.rd_client.instances.get(self.instance_id)
if instance.status == "ACTIVE":
return True
else:
# If its not ACTIVE, anything but BUILD must be
# an error.
assert_equal("BUILD", instance.status)
return False
poll_until(result_is_active)
self.rd_client.instances.delete(self.instance_id)
@test(runs_after=[test_create_instance_with_valid_flavor_association])
def test_create_instance_with_invalid_flavor_association(self):
dbaas_flavors = (self.rd_client.flavors.
list_datastore_version_associated_flavors(
datastore=test_config.dbaas_datastore,
version_id=self.datastore.default_version))
self.flavor_not_associated = None
os_flavors = Flavors().get_expected_flavors()
for os_flavor in os_flavors:
if os_flavor not in dbaas_flavors:
self.flavor_not_associated = os_flavor.id
break
if self.flavor_not_associated is not None:
assert_raises(exceptions.BadRequest,
self.rd_client.instances.create, self.name2,
flavor_not_associated, self.volume,
datastore=self.datastore.id,
nics=self.nics)
|
[
"troveclient.v1.flavors.Flavor",
"trove.tests.util.create_dbaas_client",
"proboscis.asserts.assert_raises",
"nose.tools.assert_true",
"proboscis.test",
"trove.common.utils.poll_until",
"trove.tests.util.test_config.version_url.replace",
"nose.tools.assert_equal",
"trove.tests.util.test_config.dbaas_url.replace",
"trove.tests.util.check.AttrCheck",
"trove.tests.config.CONFIG.get",
"proboscis.decorators.time_out",
"nose.tools.assert_false",
"trove.tests.util.users.Requirements",
"os.path.join",
"trove.tests.util.create_nova_client"
] |
[((3544, 3659), 'proboscis.test', 'test', ([], {'groups': '[tests.DBAAS_API, GROUP, GROUP_DS, tests.PRE_INSTANCES]', 'depends_on_groups': "['services.initialize']"}), "(groups=[tests.DBAAS_API, GROUP, GROUP_DS, tests.PRE_INSTANCES],\n depends_on_groups=['services.initialize'])\n", (3548, 3659), False, 'from proboscis import test\n'), ((8176, 8311), 'proboscis.test', 'test', ([], {'runs_after': '[Flavors]', 'groups': '[tests.DBAAS_API, GROUP, GROUP_DS]', 'depends_on_groups': "['services.initialize']", 'enabled': 'FAKE_MODE'}), "(runs_after=[Flavors], groups=[tests.DBAAS_API, GROUP, GROUP_DS],\n depends_on_groups=['services.initialize'], enabled=FAKE_MODE)\n", (8180, 8311), False, 'from proboscis import test\n'), ((2010, 2106), 'nose.tools.assert_equal', 'assert_equal', (['expected', 'actual', "('DBaas flavor differs from Open Stack on attribute ' + name)"], {}), "(expected, actual, \n 'DBaas flavor differs from Open Stack on attribute ' + name)\n", (2022, 2106), False, 'from nose.tools import assert_equal\n'), ((2393, 2418), 'nose.tools.assert_true', 'assert_true', (['flavor.links'], {}), '(flavor.links)\n', (2404, 2418), False, 'from nose.tools import assert_true\n'), ((8991, 9024), 'proboscis.decorators.time_out', 'time_out', (['TIMEOUT_INSTANCE_CREATE'], {}), '(TIMEOUT_INSTANCE_CREATE)\n', (8999, 9024), False, 'from proboscis.decorators import time_out\n'), ((9912, 9981), 'proboscis.test', 'test', ([], {'runs_after': '[test_create_instance_with_valid_flavor_association]'}), '(runs_after=[test_create_instance_with_valid_flavor_association])\n', (9916, 9981), False, 'from proboscis import test\n'), ((3858, 3886), 'trove.tests.util.create_dbaas_client', 'create_dbaas_client', (['rd_user'], {}), '(rd_user)\n', (3877, 3886), False, 'from trove.tests.util import create_dbaas_client\n'), ((6008, 6019), 'trove.tests.util.check.AttrCheck', 'AttrCheck', ([], {}), '()\n', (6017, 6019), False, 'from trove.tests.util.check import AttrCheck\n'), ((6518, 6529), 'trove.tests.util.check.AttrCheck', 'AttrCheck', ([], {}), '()\n', (6527, 6529), False, 'from trove.tests.util.check import AttrCheck\n'), ((6781, 6850), 'proboscis.asserts.assert_raises', 'assert_raises', (['exceptions.NotFound', 'self.rd_client.flavors.get', '"""foo"""'], {}), "(exceptions.NotFound, self.rd_client.flavors.get, 'foo')\n", (6794, 6850), False, 'from proboscis.asserts import assert_raises\n'), ((8541, 8569), 'trove.tests.util.create_dbaas_client', 'create_dbaas_client', (['rd_user'], {}), '(rd_user)\n', (8560, 8569), False, 'from trove.tests.util import create_dbaas_client\n'), ((8860, 8894), 'trove.tests.config.CONFIG.get', 'CONFIG.get', (['"""shared_network"""', 'None'], {}), "('shared_network', None)\n", (8870, 8894), False, 'from trove.tests.config import CONFIG\n'), ((9392, 9440), 'nose.tools.assert_equal', 'assert_equal', (['(200)', 'self.rd_client.last_http_code'], {}), '(200, self.rd_client.last_http_code)\n', (9404, 9440), False, 'from nose.tools import assert_equal\n'), ((9819, 9847), 'trove.common.utils.poll_until', 'poll_until', (['result_is_active'], {}), '(result_is_active)\n', (9829, 9847), False, 'from trove.common.utils import poll_until\n'), ((2761, 2812), 'trove.tests.util.test_config.dbaas_url.replace', 'test_config.dbaas_url.replace', (['"""http:"""', '"""https:"""', '(1)'], {}), "('http:', 'https:', 1)\n", (2790, 2812), False, 'from trove.tests.util import test_config\n'), ((2989, 3023), 'os.path.join', 'os.path.join', (['"""flavors"""', 'flavor_id'], {}), "('flavors', flavor_id)\n", (3001, 3023), False, 'import os\n'), ((3783, 3831), 'trove.tests.util.users.Requirements', 'Requirements', ([], {'is_admin': '(False)', 'services': "['trove']"}), "(is_admin=False, services=['trove'])\n", (3795, 3831), False, 'from trove.tests.util.users import Requirements\n'), ((4069, 4098), 'trove.tests.util.create_nova_client', 'create_nova_client', (['nova_user'], {}), '(nova_user)\n', (4087, 4098), False, 'from trove.tests.util import create_nova_client\n'), ((4496, 4537), 'troveclient.v1.flavors.Flavor', 'Flavor', (['Flavors', 'flavor_dict'], {'loaded': '(True)'}), '(Flavors, flavor_dict, loaded=True)\n', (4502, 4537), False, 'from troveclient.v1.flavors import Flavor\n'), ((5632, 5670), 'nose.tools.assert_false', 'assert_false', (['(found_index is None)', 'msg'], {}), '(found_index is None, msg)\n', (5644, 5670), False, 'from nose.tools import assert_false\n'), ((8051, 8089), 'nose.tools.assert_false', 'assert_false', (['(found_index is None)', 'msg'], {}), '(found_index is None, msg)\n', (8063, 8089), False, 'from nose.tools import assert_false\n'), ((8466, 8514), 'trove.tests.util.users.Requirements', 'Requirements', ([], {'is_admin': '(False)', 'services': "['trove']"}), "(is_admin=False, services=['trove'])\n", (8478, 8514), False, 'from trove.tests.util.users import Requirements\n'), ((10631, 10802), 'proboscis.asserts.assert_raises', 'assert_raises', (['exceptions.BadRequest', 'self.rd_client.instances.create', 'self.name2', 'flavor_not_associated', 'self.volume'], {'datastore': 'self.datastore.id', 'nics': 'self.nics'}), '(exceptions.BadRequest, self.rd_client.instances.create, self.\n name2, flavor_not_associated, self.volume, datastore=self.datastore.id,\n nics=self.nics)\n', (10644, 10802), False, 'from proboscis.asserts import assert_raises\n'), ((3202, 3255), 'trove.tests.util.test_config.version_url.replace', 'test_config.version_url.replace', (['"""http:"""', '"""https:"""', '(1)'], {}), "('http:', 'https:', 1)\n", (3233, 3255), False, 'from trove.tests.util import test_config\n'), ((3284, 3328), 'os.path.join', 'os.path.join', (['base_url', '"""flavors"""', 'flavor_id'], {}), "(base_url, 'flavors', flavor_id)\n", (3296, 3328), False, 'import os\n'), ((3420, 3458), 'nose.tools.assert_equal', 'assert_equal', (['href', 'expected_href', 'msg'], {}), '(href, expected_href, msg)\n', (3432, 3458), False, 'from nose.tools import assert_equal\n'), ((3485, 3540), 'nose.tools.assert_false', 'assert_false', (['(True)', "('Unexpected rel - %s' % link['rel'])"], {}), "(True, 'Unexpected rel - %s' % link['rel'])\n", (3497, 3540), False, 'from nose.tools import assert_false\n'), ((4005, 4036), 'trove.tests.util.users.Requirements', 'Requirements', ([], {'services': "['nova']"}), "(services=['nova'])\n", (4017, 4036), False, 'from trove.tests.util.users import Requirements\n'), ((9742, 9780), 'nose.tools.assert_equal', 'assert_equal', (['"""BUILD"""', 'instance.status'], {}), "('BUILD', instance.status)\n", (9754, 9780), False, 'from nose.tools import assert_equal\n'), ((5389, 5426), 'nose.tools.assert_true', 'assert_true', (['(found_index is None)', 'msg'], {}), '(found_index is None, msg)\n', (5400, 5426), False, 'from nose.tools import assert_true\n'), ((7808, 7845), 'nose.tools.assert_true', 'assert_true', (['(found_index is None)', 'msg'], {}), '(found_index is None, msg)\n', (7819, 7845), False, 'from nose.tools import assert_true\n')]
|
import random
import ctypes
import sys
import wgpu.backends.rs # noqa
import numpy as np
from pytest import skip
from testutils import run_tests, get_default_device
from testutils import can_use_wgpu_lib, is_ci
from renderutils import render_to_texture, render_to_screen # noqa
if not can_use_wgpu_lib:
skip("Skipping tests that need the wgpu lib", allow_module_level=True)
elif is_ci and sys.platform == "win32":
skip("These tests fail on dx12 for some reason", allow_module_level=True)
# %% 1D
def test_compute_tex_1d_rgba8uint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<u32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<rgba8uint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 = vec4<i32>(textureLoad(r_tex1, i, 0));
let color2 = vec4<i32>(color1.x + i, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, vec4<u32>(color2));
}
"""
# Generate data
nx, ny, nz, nc = 64, 1, 1, 4
data1 = (ctypes.c_uint8 * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba8uint,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_1d_rgba16sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<rgba16sint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 : vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 128, 1, 1, 4
data1 = (ctypes.c_int16 * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba16sint,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_1d_r32sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<r32sint, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 : vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 1, 1, 1
data1 = (ctypes.c_int32 * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32sint,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_1d_r32float():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<f32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<r32float,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 : vec4<f32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<f32>(color1.x + f32(i), color1.y + 1.0, color1.z * 2.0, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 1, 1, 1
data1 = (ctypes.c_float * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32float,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
# %% 2D
def test_compute_tex_2d_rgba8uint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_2d<u32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<rgba8uint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1 = vec4<i32>(textureLoad(r_tex1, i, 0));
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, vec4<u32>(color2));
}
"""
# Generate data
nx, ny, nz, nc = 64, 8, 1, 4
data1 = (ctypes.c_uint8 * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba8uint,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_2d_rgba16sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_2d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<rgba16sint, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 128, 8, 1, 4
data1 = (ctypes.c_int16 * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba16sint,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_2d_r32sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_2d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<r32sint, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 8, 1, 1
data1 = (ctypes.c_int32 * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32sint,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_2d_r32float():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1:texture_2d<f32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<r32float, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1: vec4<f32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<f32>(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 8, 1, 1
data1 = (ctypes.c_float * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32float,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
# %% 3D
def test_compute_tex_3d_rgba8uint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<u32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<rgba8uint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1 = vec4<i32>(textureLoad(r_tex1, i, 0));
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, vec4<u32>(color2));
}
"""
# Generate data
nx, ny, nz, nc = 64, 8, 6, 4
data1 = (ctypes.c_uint8 * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba8uint,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_3d_rgba16sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<rgba16sint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 128, 8, 6, 4
data1 = (ctypes.c_int16 * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba16sint,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_3d_r32sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<r32sint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 8, 6, 1
data1 = (ctypes.c_int32 * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32sint,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_3d_r32float():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<f32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<r32float,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1: vec4<f32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<f32>(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 64, 8, 6, 1
data1 = (ctypes.c_float * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32float,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
# %%
def _compute_texture(compute_shader, texture_format, texture_dim, texture_size, data1):
"""
Apply a computation on a texture and validate the result. The shader should:
* Add the x-coordinate to the red channel.
* Add 1 to the green channel.
* Multiply the blue channel by 2.
* The alpha channel must remain equal.
"""
nx, ny, nz, nc = texture_size
nbytes = ctypes.sizeof(data1)
bpp = nbytes // (nx * ny * nz) # bytes per pixel
device = get_default_device()
cshader = device.create_shader_module(code=compute_shader)
# Create textures and views
texture1 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.TEXTURE_BINDING | wgpu.TextureUsage.COPY_DST,
)
texture2 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.STORAGE_BINDING | wgpu.TextureUsage.COPY_SRC,
)
texture_view1 = texture1.create_view()
texture_view2 = texture2.create_view()
# Create buffer that we need to upload the data
buffer_usage = wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST
buffer = device.create_buffer_with_data(data=data1, usage=buffer_usage)
assert buffer.usage == buffer_usage
texture_sample_type = "float"
if "uint" in texture_format:
texture_sample_type = "uint"
elif "sint" in texture_format:
texture_sample_type = "sint"
# Define bindings
# One can see here why we need 2 textures: one is readonly, one writeonly
bindings = [
{"binding": 0, "resource": texture_view1},
{"binding": 1, "resource": texture_view2},
]
binding_layouts = [
{
"binding": 0,
"visibility": wgpu.ShaderStage.COMPUTE,
"texture": {
"sample_type": texture_sample_type,
"view_dimension": texture_dim,
},
},
{
"binding": 1,
"visibility": wgpu.ShaderStage.COMPUTE,
"storage_texture": {
"access": wgpu.StorageTextureAccess.write_only,
"format": texture_format,
"view_dimension": texture_dim,
},
},
]
bind_group_layout = device.create_bind_group_layout(entries=binding_layouts)
pipeline_layout = device.create_pipeline_layout(
bind_group_layouts=[bind_group_layout]
)
bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings)
# Create a pipeline and run it
compute_pipeline = device.create_compute_pipeline(
layout=pipeline_layout,
compute={"module": cshader, "entry_point": "main"},
)
assert compute_pipeline.get_bind_group_layout(0) is bind_group_layout
command_encoder = device.create_command_encoder()
command_encoder.copy_buffer_to_texture(
{
"buffer": buffer,
"offset": 0,
"bytes_per_row": bpp * nx,
"rows_per_image": ny,
},
{"texture": texture1, "mip_level": 0, "origin": (0, 0, 0)},
(nx, ny, nz),
)
compute_pass = command_encoder.begin_compute_pass()
compute_pass.push_debug_group("foo")
compute_pass.insert_debug_marker("setting pipeline")
compute_pass.set_pipeline(compute_pipeline)
compute_pass.insert_debug_marker("setting bind group")
compute_pass.set_bind_group(
0, bind_group, [], 0, 999999
) # last 2 elements not used
compute_pass.insert_debug_marker("dispatch!")
compute_pass.dispatch(nx, ny, nz)
compute_pass.pop_debug_group()
compute_pass.end_pass()
command_encoder.copy_texture_to_buffer(
{"texture": texture2, "mip_level": 0, "origin": (0, 0, 0)},
{
"buffer": buffer,
"offset": 0,
"bytes_per_row": bpp * nx,
"rows_per_image": ny,
},
(nx, ny, nz),
)
device.queue.submit([command_encoder.finish()])
# Read the current data of the output buffer
data2 = data1.__class__.from_buffer(device.queue.read_buffer(buffer))
# Numpy arrays are easier to work with
a1 = np.ctypeslib.as_array(data1).reshape(nz, ny, nx, nc)
a2 = np.ctypeslib.as_array(data2).reshape(nz, ny, nx, nc)
# Validate!
for x in range(nx):
assert np.all(a2[:, :, x, 0] == a1[:, :, x, 0] + x)
if nc >= 2:
assert np.all(a2[:, :, :, 1] == a1[:, :, :, 1] + 1)
if nc >= 3:
assert np.all(a2[:, :, :, 2] == a1[:, :, :, 2] * 2)
if nc >= 4:
assert np.all(a2[:, :, :, 3] == a1[:, :, :, 3])
if __name__ == "__main__":
run_tests(globals())
|
[
"random.randint",
"ctypes.sizeof",
"testutils.get_default_device",
"pytest.skip",
"numpy.ctypeslib.as_array",
"numpy.all"
] |
[((313, 383), 'pytest.skip', 'skip', (['"""Skipping tests that need the wgpu lib"""'], {'allow_module_level': '(True)'}), "('Skipping tests that need the wgpu lib', allow_module_level=True)\n", (317, 383), False, 'from pytest import skip\n'), ((13362, 13382), 'ctypes.sizeof', 'ctypes.sizeof', (['data1'], {}), '(data1)\n', (13375, 13382), False, 'import ctypes\n'), ((13451, 13471), 'testutils.get_default_device', 'get_default_device', ([], {}), '()\n', (13469, 13471), False, 'from testutils import run_tests, get_default_device\n'), ((428, 501), 'pytest.skip', 'skip', (['"""These tests fail on dx12 for some reason"""'], {'allow_module_level': '(True)'}), "('These tests fail on dx12 for some reason', allow_module_level=True)\n", (432, 501), False, 'from pytest import skip\n'), ((17373, 17417), 'numpy.all', 'np.all', (['(a2[:, :, x, 0] == a1[:, :, x, 0] + x)'], {}), '(a2[:, :, x, 0] == a1[:, :, x, 0] + x)\n', (17379, 17417), True, 'import numpy as np\n'), ((17449, 17493), 'numpy.all', 'np.all', (['(a2[:, :, :, 1] == a1[:, :, :, 1] + 1)'], {}), '(a2[:, :, :, 1] == a1[:, :, :, 1] + 1)\n', (17455, 17493), True, 'import numpy as np\n'), ((17525, 17569), 'numpy.all', 'np.all', (['(a2[:, :, :, 2] == a1[:, :, :, 2] * 2)'], {}), '(a2[:, :, :, 2] == a1[:, :, :, 2] * 2)\n', (17531, 17569), True, 'import numpy as np\n'), ((17601, 17641), 'numpy.all', 'np.all', (['(a2[:, :, :, 3] == a1[:, :, :, 3])'], {}), '(a2[:, :, :, 3] == a1[:, :, :, 3])\n', (17607, 17641), True, 'import numpy as np\n'), ((1291, 1312), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (1305, 1312), False, 'import random\n'), ((2279, 2300), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (2293, 2300), False, 'import random\n'), ((3263, 3284), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (3277, 3284), False, 'import random\n'), ((4254, 4275), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (4268, 4275), False, 'import random\n'), ((17202, 17230), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['data1'], {}), '(data1)\n', (17223, 17230), True, 'import numpy as np\n'), ((17264, 17292), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['data2'], {}), '(data2)\n', (17285, 17292), True, 'import numpy as np\n'), ((5306, 5327), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (5320, 5327), False, 'import random\n'), ((6342, 6363), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (6356, 6363), False, 'import random\n'), ((7372, 7393), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (7386, 7393), False, 'import random\n'), ((8410, 8431), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (8424, 8431), False, 'import random\n'), ((9507, 9528), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (9521, 9528), False, 'import random\n'), ((10587, 10608), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (10601, 10608), False, 'import random\n'), ((11662, 11683), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (11676, 11683), False, 'import random\n'), ((12744, 12765), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (12758, 12765), False, 'import random\n')]
|
import requests
# import re
from bs4 import BeautifulSoup
import time
courl = "https://e-hentai.org/?page={}"
def getContent(url):
res = requests.get(url)
soup = BeautifulSoup(res.text, 'lxml')
return soup
def getImage(soup):
src = soup.find_all('div', {'class': 'it5'})
name = soup.find_all('div', {'class': 'it2'})
print(src,name)
for (i, j) in zip(src, name):
if (j.find('img') is None) | (i is None):
continue
else:
f.write("name: "+i.find('a').get_text()) # get name
f.write("title_pic link: "+j.find('img').get('src')) # get title_pic link
f.write("manga link: "+i.find('a').get('href')) # get manga link
f.write("\n")
return None
page_num = 14333
f = open('hentai_log', 'a')
for i in range(1, page_num + 1):
soup = getContent(courl.format(i))
print(soup)
getImage(soup)
print("fetching content of page{}".format(i))
time.sleep(1)
# print("total number:{}".format(page_num))
f.close()
|
[
"bs4.BeautifulSoup",
"requests.get",
"time.sleep"
] |
[((142, 159), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (154, 159), False, 'import requests\n'), ((171, 202), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""lxml"""'], {}), "(res.text, 'lxml')\n", (184, 202), False, 'from bs4 import BeautifulSoup\n'), ((956, 969), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (966, 969), False, 'import time\n')]
|
# Note: model title and parameter table are inserted automatically
r"""
This model provides the scattering intensity, $I(q)$, for a lyotropic lamellar
phase where a random distribution in solution are assumed. The SLD of the head
region is taken to be different from the SLD of the tail region.
Definition
----------
The scattering intensity $I(q)$ is
.. math::
I(q) = 2\pi\frac{\text{scale}}{2(\delta_H + \delta_T)} P(q) \frac{1}{q^2}
The form factor $P(q)$ is
.. math::
P(q) = \frac{4}{q^2}
\left\lbrace
\Delta \rho_H
\left[\sin[q(\delta_H + \delta_T)\ - \sin(q\delta_T)\right]
+ \Delta\rho_T\sin(q\delta_T)
\right\rbrace^2
where $\delta_T$ is *length_tail*, $\delta_H$ is *length_head*,
$\Delta\rho_H$ is the head contrast (*sld_head* $-$ *sld_solvent*),
and $\Delta\rho_T$ is tail contrast (*sld* $-$ *sld_solvent*).
The total thickness of the lamellar sheet is
a_H + \delta_T + \delta_T + \delta_H$. Note that in a non aqueous solvent
the chemical "head" group may be the "Tail region" and vice-versa.
The 2D scattering intensity is calculated in the same way as 1D, where
the $q$ vector is defined as
.. math:: q = \sqrt{q_x^2 + q_y^2}
References
----------
#. <NAME>, <NAME>, and <NAME>, *J. Phys. II France*, 3, (1993) 487-502
#. <NAME>, <NAME>, <NAME>, <NAME>,
*J. Phys. Chem. B*, 105, (2001) 11081-11088
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:** <NAME> and <NAME> **Date** April 17, 2014
"""
import numpy as np
from numpy import inf
name = "lamellar_hg"
title = "Random lamellar phase with Head and Tail Groups"
description = """\
[Random lamellar phase with Head and Tail Groups]
I(q)= 2*pi*P(q)/(2(H+T)*q^(2)), where
P(q)= see manual
layer thickness =(H+T+T+H) = 2(Head+Tail)
sld = Tail scattering length density
sld_head = Head scattering length density
sld_solvent = solvent scattering length density
background = incoherent background
scale = scale factor
"""
category = "shape:lamellae"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [["length_tail", "Ang", 15, [0, inf], "volume", "Tail thickness ( total = H+T+T+H)"],
["length_head", "Ang", 10, [0, inf], "volume", "Head thickness"],
["sld", "1e-6/Ang^2", 0.4, [-inf,inf], "sld", "Tail scattering length density"],
["sld_head", "1e-6/Ang^2", 3.0, [-inf,inf], "sld", "Head scattering length density"],
["sld_solvent", "1e-6/Ang^2", 6, [-inf,inf], "sld", "Solvent scattering length density"]]
# pylint: enable=bad-whitespace, line-too-long
# No volume normalization despite having a volume parameter
# This should perhaps be volume normalized?
form_volume = """
return 1.0;
"""
Iq = """
const double qsq = q*q;
const double drh = sld_head - sld_solvent;
const double drt = sld - sld_solvent; //correction 13FEB06 by L.Porcar
const double qT = q*length_tail;
double Pq, inten;
Pq = drh*(sin(q*(length_head+length_tail))-sin(qT)) + drt*sin(qT);
Pq *= Pq;
Pq *= 4.0/(qsq);
inten = 2.0e-4*M_PI*Pq/qsq;
// normalize by the bilayer thickness
inten /= 2.0*(length_head+length_tail);
return inten;
"""
def random():
"""Return a random parameter set for the model."""
thickness = 10**np.random.uniform(1, 4)
length_head = thickness * np.random.uniform(0, 1)
length_tail = thickness - length_head
pars = dict(
length_head=length_head,
length_tail=length_tail,
)
return pars
#
tests = [
[{'scale': 1.0, 'background': 0.0, 'length_tail': 15.0, 'length_head': 10.0,
'sld': 0.4, 'sld_head': 3.0, 'sld_solvent': 6.0},
[0.001], [653143.9209]],
]
# ADDED by: RKH ON: 18Mar2016 converted from sasview previously, now renaming everything & sorting the docs
|
[
"numpy.random.uniform"
] |
[((3537, 3560), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(4)'], {}), '(1, 4)\n', (3554, 3560), True, 'import numpy as np\n'), ((3591, 3614), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3608, 3614), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from distutils.version import LooseVersion
from django.contrib.sites.shortcuts import get_current_site
from django.utils import timezone
from django.utils.dates import MONTHS
from django.utils.translation import ugettext_lazy as _, get_language_from_request
from cms import __version__ as cms_version
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .utils import (
build_calendar, is_valid_namespace_for_language,
get_valid_languages,
)
from .models import (
UpcomingPluginItem, Event, EventListPlugin, EventCalendarPlugin
)
from .forms import (
UpcomingPluginForm, EventListPluginForm, EventCalendarPluginForm,
)
CMS_GTE_330 = LooseVersion(cms_version) >= LooseVersion('3.3.0')
NO_APPHOOK_ERROR_MESSAGE = _(
'There is an error in plugin configuration: selected Events '
'config is not available. Please switch to edit mode and '
'change plugin app_config settings to use valid config. '
'Also note that aldryn-events should be used at least once '
'as an apphook for that config.')
class NameSpaceCheckMixin(object):
def get_namespace(self, instance):
if instance.app_config_id and instance.app_config.namespace:
return instance.app_config.namespace
return ''
def get_language(self, request):
return get_language_from_request(request, check_path=True)
def render(self, context, instance, placeholder):
# translated filter the events, language set current language
namespace = self.get_namespace(instance)
language = self.get_language(context['request'])
self.valid_languages = get_valid_languages(namespace, language)
# check if we can reverse list view for configured namespace
# if no prepare a message to admin users.
valid = False
for lang_code in self.valid_languages:
if is_valid_namespace_for_language(namespace, lang_code):
valid = True
break
if not valid:
# add message, should be properly handled in template
context['plugin_configuration_error'] = NO_APPHOOK_ERROR_MESSAGE
return super(NameSpaceCheckMixin, self).render(
context, instance, placeholder)
class AdjustableCacheMixin(object):
"""
For django CMS < 3.3.0 installations, we have no choice but to disable the
cache where there is time-sensitive information. However, in later CMS
versions, we can configure it with `get_cache_expiration()`.
"""
if not CMS_GTE_330:
cache = False
def get_cache_expiration(self, request, instance, placeholder):
return getattr(instance, 'cache_duration', 0)
def get_fieldsets(self, request, obj=None):
"""
Removes the cache_duration field from the displayed form if we're not
using django CMS v3.3.0 or later.
"""
fieldsets = super(AdjustableCacheMixin, self).get_fieldsets(
request, obj=None)
if CMS_GTE_330:
return fieldsets
field = 'cache_duration'
for fieldset in fieldsets:
new_fieldset = [
item for item in fieldset[1]['fields'] if item != field]
fieldset[1]['fields'] = tuple(new_fieldset)
return fieldsets
class UpcomingPlugin(NameSpaceCheckMixin, AdjustableCacheMixin,
CMSPluginBase):
render_template = False
name = _('Upcoming or Past Events')
module = _('Events')
model = UpcomingPluginItem
form = UpcomingPluginForm
def render(self, context, instance, placeholder):
context = super(UpcomingPlugin, self).render(context, instance,
placeholder)
if context.get('plugin_configuration_error') is not None:
return context
context['instance'] = instance
language = self.get_language(context['request'])
namespace = self.get_namespace(instance)
if instance.language not in self.valid_languages:
events = Event.objects.none()
else:
events = Event.objects.namespace(namespace).language(language)
events = events.translated(*self.valid_languages)
if instance.past_events:
events = events.past(count=instance.latest_entries)
else:
events = events.upcoming(count=instance.latest_entries)
context['events'] = events
return context
def get_render_template(self, context, instance, placeholder):
name = '%s/upcoming.html' % instance.style
return 'aldryn_events/plugins/upcoming/%s' % name
class EventListCMSPlugin(NameSpaceCheckMixin, CMSPluginBase):
render_template = False
module = _('Events')
name = _('List')
model = EventListPlugin
form = EventListPluginForm
def render(self, context, instance, placeholder):
context = super(EventListCMSPlugin, self).render(context, instance,
placeholder)
if context.get('plugin_configuration_error') is not None:
return context
language = self.get_language(context['request'])
namespace = self.get_namespace(instance)
context['instance'] = instance
if instance.language not in self.valid_languages:
events = Event.objects.none()
else:
events = instance.events.namespace(namespace).language(language)
events = events.translated(*self.valid_languages)
context['events'] = events
return context
def get_render_template(self, context, instance, placeholder):
return 'aldryn_events/plugins/list/%s/list.html' % instance.style
class CalendarPlugin(NameSpaceCheckMixin, AdjustableCacheMixin,
CMSPluginBase):
render_template = 'aldryn_events/plugins/calendar.html'
name = _('Calendar')
module = _('Events')
model = EventCalendarPlugin
form = EventCalendarPluginForm
def render(self, context, instance, placeholder):
context = super(CalendarPlugin, self).render(context, instance,
placeholder)
if context.get('plugin_configuration_error') is not None:
return context
namespace = self.get_namespace(instance)
language = self.get_language(context['request'])
site_id = getattr(get_current_site(context['request']), 'id', None)
year = context.get('event_year')
month = context.get('event_month')
if not all([year, month]):
year = str(timezone.now().date().year)
month = str(timezone.now().date().month)
current_date = datetime.date(int(year), int(month), 1)
context['event_year'] = year
context['event_month'] = month
context['days'] = build_calendar(
year, month, language, namespace, site_id)
context['current_date'] = current_date
context['last_month'] = current_date + datetime.timedelta(days=-1)
context['next_month'] = current_date + datetime.timedelta(days=35)
context['calendar_label'] = '%s %s' % (MONTHS.get(int(month)), year)
context['calendar_namespace'] = namespace
return context
plugin_pool.register_plugin(CalendarPlugin)
plugin_pool.register_plugin(EventListCMSPlugin)
plugin_pool.register_plugin(UpcomingPlugin)
|
[
"distutils.version.LooseVersion",
"django.utils.timezone.now",
"cms.plugin_pool.plugin_pool.register_plugin",
"datetime.timedelta",
"django.utils.translation.get_language_from_request",
"django.contrib.sites.shortcuts.get_current_site",
"django.utils.translation.ugettext_lazy"
] |
[((850, 1118), 'django.utils.translation.ugettext_lazy', '_', (['"""There is an error in plugin configuration: selected Events config is not available. Please switch to edit mode and change plugin app_config settings to use valid config. Also note that aldryn-events should be used at least once as an apphook for that config."""'], {}), "('There is an error in plugin configuration: selected Events config is not available. Please switch to edit mode and change plugin app_config settings to use valid config. Also note that aldryn-events should be used at least once as an apphook for that config.'\n )\n", (851, 1118), True, 'from django.utils.translation import ugettext_lazy as _, get_language_from_request\n'), ((7400, 7443), 'cms.plugin_pool.plugin_pool.register_plugin', 'plugin_pool.register_plugin', (['CalendarPlugin'], {}), '(CalendarPlugin)\n', (7427, 7443), False, 'from cms.plugin_pool import plugin_pool\n'), ((7444, 7491), 'cms.plugin_pool.plugin_pool.register_plugin', 'plugin_pool.register_plugin', (['EventListCMSPlugin'], {}), '(EventListCMSPlugin)\n', (7471, 7491), False, 'from cms.plugin_pool import plugin_pool\n'), ((7492, 7535), 'cms.plugin_pool.plugin_pool.register_plugin', 'plugin_pool.register_plugin', (['UpcomingPlugin'], {}), '(UpcomingPlugin)\n', (7519, 7535), False, 'from cms.plugin_pool import plugin_pool\n'), ((771, 796), 'distutils.version.LooseVersion', 'LooseVersion', (['cms_version'], {}), '(cms_version)\n', (783, 796), False, 'from distutils.version import LooseVersion\n'), ((800, 821), 'distutils.version.LooseVersion', 'LooseVersion', (['"""3.3.0"""'], {}), "('3.3.0')\n", (812, 821), False, 'from distutils.version import LooseVersion\n'), ((3524, 3552), 'django.utils.translation.ugettext_lazy', '_', (['"""Upcoming or Past Events"""'], {}), "('Upcoming or Past Events')\n", (3525, 3552), True, 'from django.utils.translation import ugettext_lazy as _, get_language_from_request\n'), ((3566, 3577), 'django.utils.translation.ugettext_lazy', '_', (['"""Events"""'], {}), "('Events')\n", (3567, 3577), True, 'from django.utils.translation import ugettext_lazy as _, get_language_from_request\n'), ((4857, 4868), 'django.utils.translation.ugettext_lazy', '_', (['"""Events"""'], {}), "('Events')\n", (4858, 4868), True, 'from django.utils.translation import ugettext_lazy as _, get_language_from_request\n'), ((4880, 4889), 'django.utils.translation.ugettext_lazy', '_', (['"""List"""'], {}), "('List')\n", (4881, 4889), True, 'from django.utils.translation import ugettext_lazy as _, get_language_from_request\n'), ((6015, 6028), 'django.utils.translation.ugettext_lazy', '_', (['"""Calendar"""'], {}), "('Calendar')\n", (6016, 6028), True, 'from django.utils.translation import ugettext_lazy as _, get_language_from_request\n'), ((6042, 6053), 'django.utils.translation.ugettext_lazy', '_', (['"""Events"""'], {}), "('Events')\n", (6043, 6053), True, 'from django.utils.translation import ugettext_lazy as _, get_language_from_request\n'), ((1413, 1464), 'django.utils.translation.get_language_from_request', 'get_language_from_request', (['request'], {'check_path': '(True)'}), '(request, check_path=True)\n', (1438, 1464), False, 'from django.utils.translation import ugettext_lazy as _, get_language_from_request\n'), ((6539, 6575), 'django.contrib.sites.shortcuts.get_current_site', 'get_current_site', (["context['request']"], {}), "(context['request'])\n", (6555, 6575), False, 'from django.contrib.sites.shortcuts import get_current_site\n'), ((7145, 7172), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(-1)'}), '(days=-1)\n', (7163, 7172), False, 'import datetime\n'), ((7220, 7247), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(35)'}), '(days=35)\n', (7238, 7247), False, 'import datetime\n'), ((6732, 6746), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6744, 6746), False, 'from django.utils import timezone\n'), ((6784, 6798), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6796, 6798), False, 'from django.utils import timezone\n')]
|
import os
import numpy as np
import pandas as pd
import tensorflow as tf
def load_data(data_path, label_col, feature_cols=None):
# Load data csv with pandas and divide it to features and labels
if feature_cols:
data = pd.read_csv(os.path.abspath(data_path), usecols=feature_cols + [label_col], low_memory=False)
features = data[feature_cols]
else:
data = pd.read_csv(os.path.abspath(data_path), usecols=None, low_memory=False)
features = data.drop(label_col, 1)
labels = data[label_col]
return features, labels
def create_dev_sess_regressor(sessions, is_dev_labels):
# Creates a regressor that returns the probability of a session originating from dev
# TODO: Implement this! return regressor for device
def dev_sess_regressor(sess):
return sess / 10
return dev_sess_regressor
def classify_sess(dev_sess_regressor, threshold, sess):
# Classifies a session with regressor according to threshold
return 1 if dev_sess_regressor(sess) > threshold else 0
def create_dev_sess_classifier(dev_sess_regressor, threshold):
# Creates a classifier that returns 1 if a session originates from dev and 0 otherwise
def dev_sess_classifier(sess):
return classify_sess(dev_sess_regressor, threshold, sess)
return dev_sess_classifier
def find_opt_threshold(dev_sess_regressor, sessions, is_dev_labels):
# TODO: Implement this! returns optimal threshold for device classefication with given regressor
return 0.5
def classify_seq(dev_sess_classifier, seq):
# Classifies a seq with classifier according to a majority vote
return 1 if sum(map(dev_sess_classifier, seq)) > len(seq) / 2 else 0
def create_dev_seq_classifier(dev_sess_classifier):
# Creates a classifier that returns 1 if a majority of sessions in a sequence originate from dev and 0 otherwise
def dev_seq_classifier(seq):
return classify_seq(dev_sess_classifier, seq)
return dev_seq_classifier
def classify_dev(dev_sess_classifier, opt_seq_len, sessions):
for start in range(len(sessions) - opt_seq_len):
if classify_seq(dev_sess_classifier, sessions[start:start + opt_seq_len]):
return 1
return 0
def create_dev_classifier(dev_sess_classifier, opt_seq_len):
def dev_classifier(sessions):
return classify_dev(dev_sess_classifier, opt_seq_len, sessions)
return dev_classifier
def find_opt_seq_len(this_dev, dev_sess_classifier, dev_sess_dict):
# Finds minimal seq length s.t accuracy=1 on all sessions
opt_seq_len = 1
# Find minimal sequence length s.t FPR=1 for all other devs
for dev, dev_sess in dev_sess_dict.items():
start = 1
seq_len = 1
while start + seq_len <= len(dev_sess):
is_dev = dev == this_dev
is_dev_pred = classify_seq(dev_sess_classifier, dev_sess[start:start + seq_len])
if is_dev == is_dev_pred:
start += 1
else:
start = 1
seq_len += 2
opt_seq_len = max(seq_len, opt_seq_len)
# Return minimal seq length s.t accuracy=1
return opt_seq_len
# def find_opt_seq_len(dev_sess_classifier, dev_sessions, other_devs_sessions):
# # Finds minimal seq length s.t accuracy=1 on all sessions
# opt_seq_len = 1
# # Find minimal sequence length s.t TPR=1
# start = 0
# seq_len = 1
# while start + seq_len <= len(dev_sessions):
# if classify_seq(dev_sess_classifier, dev_sessions[start:start + seq_len]):
# start += 1
# else:
# start = 1
# seq_len += 2
# opt_seq_len = max(seq_len, opt_seq_len)
# # Find minimal sequence length s.t FPR=1 for all other devs
# for other_dev_sessions in other_devs_sessions:
# start = 1
# seq_len = 1
# while start+seq_len <= len(other_dev_sessions):
# if classify_seq(dev_sess_classifier, other_dev_sessions[start:start + seq_len]):
# start = 1
# seq_len += 2
# else:
# start += 1
# opt_seq_len = max(seq_len, opt_seq_len)
# # Return minimal seq length s.t accuracy=1
# return opt_seq_len
def classify_multi_dev(dev_cls_dict, dev_sessions):
# Returns name of the device the session originated from or None for an unknown device
for dev, dev_classifier in dev_cls_dict.items():
if dev_classifier(dev_sessions):
return dev
return None
def create_multi_dev_classifier(dev_cls_dict):
def multi_dev_classifier(dev_sessions):
return classify_multi_dev(dev_cls_dict, dev_sessions)
return multi_dev_classifier
def is_eq(a):
return lambda b: 1 if a == b else 0
def create_iot_classifier(train, validation):
train_sessions = train.drop('device_category',1)
validation_sessions = validation.drop('device_category')
devs = train['device_category'].unique()
train_is_devt_dict = {dev: train['device_category'].apply(is_eq(dev)) for dev in devs}
validation_is_devt_dict = {dev: validation['device_category'].apply(is_eq(dev)) for dev in devs}
dev_sess_dict = {dev: sess for dev, sess in train.groupby('device_category')}
dev_sess_reg_dict = {dev: create_dev_sess_regressor(train_sessions, train_is_devt_dict[dev]) for dev in devs}
opt_thr_dict = {dev: find_opt_threshold(dev_sess_reg_dict[dev], validation_sessions, validation_is_devt_dict[dev])
for dev, is_dev in devs}
dev_sess_cls_dict = {dev: create_dev_sess_classifier(dev_sess_reg_dict[dev], opt_thr_dict[dev]) for dev in devs}
opt_seq_len_dict = {dev: find_opt_seq_len(dev, dev_sess_cls_dict[dev], dev_sess_dict) for dev in devs}
dev_cls_dict = {dev: create_dev_classifier(dev_sess_cls_dict[dev], opt_seq_len_dict[dev]) for dev in devs}
return create_multi_dev_classifier(dev_cls_dict)
train = pd.read_csv(os.path.abspath('data/train.csv'), usecols=['ack', 'device_category'], low_memory=False)
validation = pd.read_csv(os.path.abspath('data/validation.csv'), usecols=['ack', 'device_category'], low_memory=False)
test = pd.read_csv(os.path.abspath('data/test.csv'), usecols=['ack', 'device_category'], low_memory=False)
classifier = create_iot_classifier(train, validation)
print('@@ DONE @@')
|
[
"os.path.abspath"
] |
[((5919, 5952), 'os.path.abspath', 'os.path.abspath', (['"""data/train.csv"""'], {}), "('data/train.csv')\n", (5934, 5952), False, 'import os\n'), ((6033, 6071), 'os.path.abspath', 'os.path.abspath', (['"""data/validation.csv"""'], {}), "('data/validation.csv')\n", (6048, 6071), False, 'import os\n'), ((6146, 6178), 'os.path.abspath', 'os.path.abspath', (['"""data/test.csv"""'], {}), "('data/test.csv')\n", (6161, 6178), False, 'import os\n'), ((248, 274), 'os.path.abspath', 'os.path.abspath', (['data_path'], {}), '(data_path)\n', (263, 274), False, 'import os\n'), ((405, 431), 'os.path.abspath', 'os.path.abspath', (['data_path'], {}), '(data_path)\n', (420, 431), False, 'import os\n')]
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.connect = 'sqlite_file:userconf.db'
process.CondDBCommon.DBParameters.authenticationPath = '.'
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
logconnect = cms.untracked.string('sqlite_file:log.db'),
toPut = cms.VPSet(
cms.PSet(
record = cms.string('DTCCBConfigRcd'),
tag = cms.string('conf_test'),
timetype = cms.untracked.string('runnumber')
),
cms.PSet(
record = cms.string('keyedConfBricks'),
tag = cms.string('DT_keyedConfBricks_V01'),
timetype = cms.untracked.string('hash'),
withWrapper = cms.untracked.bool(True),
outOfOrder = cms.untracked.bool(True)
),
cms.PSet(
record = cms.string('keyedConfListIOV'),
tag = cms.string('DT_keyedConfListIOV_V01'),
timetype = cms.untracked.string('runnumber'),
withWrapper = cms.untracked.bool(True),
outOfOrder = cms.untracked.bool(False)
)
)
)
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.essource = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
DumpStat=cms.untracked.bool(True),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('DTKeyedConfigListRcd'),
tag = cms.string('DT_keyedConfListIOV_V01')
),
cms.PSet(
record = cms.string('DTKeyedConfigContainerRcd'),
tag = cms.string('DT_keyedConfBricks_V01')
)
)
)
process.conf_o2o = cms.EDAnalyzer("DTUserKeyedConfigPopConAnalyzer",
name = cms.untracked.string('DTCCBConfig'),
Source = cms.PSet(
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('.')
),
onlineDB = cms.string('sqlite_file:dummy_online.db'),
tag = cms.string('conf_test'),
run = cms.int32(1),
writeKeys = cms.bool(True),
writeData = cms.bool(True),
container = cms.string('keyedConfBricks'),
DTConfigKeys = cms.VPSet(
cms.PSet(
configType = cms.untracked.int32(1),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(2),
configKey = cms.untracked.int32(926)
),
cms.PSet(
configType = cms.untracked.int32(3),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(4),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(5),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(6),
configKey = cms.untracked.int32(1226)
)
),
onlineAuthentication = cms.string('.')
),
SinceAppendMode = cms.bool(True),
record = cms.string('DTCCBConfigRcd'),
loggingOn = cms.untracked.bool(True),
debug = cms.bool(False)
)
process.p = cms.Path(process.conf_o2o)
|
[
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.uint64",
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.Process",
"FWCore.ParameterSet.Config.int32",
"FWCore.ParameterSet.Config.bool",
"FWCore.ParameterSet.Config.Path"
] |
[((52, 71), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""TEST"""'], {}), "('TEST')\n", (63, 71), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3367, 3393), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['process.conf_o2o'], {}), '(process.conf_o2o)\n', (3375, 3393), True, 'import FWCore.ParameterSet.Config as cms\n'), ((349, 391), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""sqlite_file:log.db"""'], {}), "('sqlite_file:log.db')\n", (369, 391), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1183, 1206), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""runnumber"""'], {}), "('runnumber')\n", (1193, 1206), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1225, 1238), 'FWCore.ParameterSet.Config.uint64', 'cms.uint64', (['(1)'], {}), '(1)\n', (1235, 1238), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1257, 1270), 'FWCore.ParameterSet.Config.uint64', 'cms.uint64', (['(1)'], {}), '(1)\n', (1267, 1270), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1289, 1302), 'FWCore.ParameterSet.Config.uint64', 'cms.uint64', (['(1)'], {}), '(1)\n', (1299, 1302), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1395, 1419), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (1413, 1419), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1772, 1807), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""DTCCBConfig"""'], {}), "('DTCCBConfig')\n", (1792, 1807), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3223, 3237), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (3231, 3237), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3252, 3280), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""DTCCBConfigRcd"""'], {}), "('DTCCBConfigRcd')\n", (3262, 3280), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3298, 3322), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (3316, 3322), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3336, 3351), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (3344, 3351), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2005, 2046), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""sqlite_file:dummy_online.db"""'], {}), "('sqlite_file:dummy_online.db')\n", (2015, 2046), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2062, 2085), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""conf_test"""'], {}), "('conf_test')\n", (2072, 2085), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2101, 2113), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(1)'], {}), '(1)\n', (2110, 2113), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2135, 2149), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (2143, 2149), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2171, 2185), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (2179, 2185), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2207, 2236), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""keyedConfBricks"""'], {}), "('keyedConfBricks')\n", (2217, 2236), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3178, 3193), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""."""'], {}), "('.')\n", (3188, 3193), True, 'import FWCore.ParameterSet.Config as cms\n'), ((447, 475), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""DTCCBConfigRcd"""'], {}), "('DTCCBConfigRcd')\n", (457, 475), True, 'import FWCore.ParameterSet.Config as cms\n'), ((491, 514), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""conf_test"""'], {}), "('conf_test')\n", (501, 514), True, 'import FWCore.ParameterSet.Config as cms\n'), ((535, 568), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""runnumber"""'], {}), "('runnumber')\n", (555, 568), True, 'import FWCore.ParameterSet.Config as cms\n'), ((607, 636), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""keyedConfBricks"""'], {}), "('keyedConfBricks')\n", (617, 636), True, 'import FWCore.ParameterSet.Config as cms\n'), ((652, 688), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""DT_keyedConfBricks_V01"""'], {}), "('DT_keyedConfBricks_V01')\n", (662, 688), True, 'import FWCore.ParameterSet.Config as cms\n'), ((709, 737), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""hash"""'], {}), "('hash')\n", (729, 737), True, 'import FWCore.ParameterSet.Config as cms\n'), ((761, 785), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (779, 785), True, 'import FWCore.ParameterSet.Config as cms\n'), ((808, 832), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (826, 832), True, 'import FWCore.ParameterSet.Config as cms\n'), ((871, 901), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""keyedConfListIOV"""'], {}), "('keyedConfListIOV')\n", (881, 901), True, 'import FWCore.ParameterSet.Config as cms\n'), ((917, 954), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""DT_keyedConfListIOV_V01"""'], {}), "('DT_keyedConfListIOV_V01')\n", (927, 954), True, 'import FWCore.ParameterSet.Config as cms\n'), ((975, 1008), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""runnumber"""'], {}), "('runnumber')\n", (995, 1008), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1032, 1056), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (1050, 1056), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1079, 1104), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (1097, 1104), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1471, 1505), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""DTKeyedConfigListRcd"""'], {}), "('DTKeyedConfigListRcd')\n", (1481, 1505), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1517, 1554), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""DT_keyedConfListIOV_V01"""'], {}), "('DT_keyedConfListIOV_V01')\n", (1527, 1554), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1589, 1628), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""DTKeyedConfigContainerRcd"""'], {}), "('DTKeyedConfigContainerRcd')\n", (1599, 1628), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1640, 1676), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""DT_keyedConfBricks_V01"""'], {}), "('DT_keyedConfBricks_V01')\n", (1650, 1676), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1892, 1914), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(0)'], {}), '(0)\n', (1911, 1914), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1949, 1974), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""."""'], {}), "('.')\n", (1969, 1974), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2323, 2345), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(1)'], {}), '(1)\n', (2342, 2345), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2376, 2400), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(542)'], {}), '(542)\n', (2395, 2400), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2467, 2489), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(2)'], {}), '(2)\n', (2486, 2489), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2520, 2544), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(926)'], {}), '(926)\n', (2539, 2544), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2611, 2633), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(3)'], {}), '(3)\n', (2630, 2633), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2664, 2688), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(542)'], {}), '(542)\n', (2683, 2688), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2755, 2777), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(4)'], {}), '(4)\n', (2774, 2777), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2808, 2832), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(542)'], {}), '(542)\n', (2827, 2832), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2899, 2921), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(5)'], {}), '(5)\n', (2918, 2921), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2952, 2976), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(542)'], {}), '(542)\n', (2971, 2976), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3043, 3065), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(6)'], {}), '(6)\n', (3062, 3065), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3096, 3121), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(1226)'], {}), '(1226)\n', (3115, 3121), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
from gcloud_sync_ssh.host_config import HostConfig
def test_empty():
hc = HostConfig()
assert not hc.minidict()
def test_default_lines():
hc = HostConfig(HostName="172.16.31.10", CertificateFile="exists-or-not-we-dont-check")
ls = hc.lines()
assert ls == [' CertificateFile exists-or-not-we-dont-check\n', ' HostName 4.4.4.4\n']
def test_ordering_default():
hc = HostConfig(HostName="192.168.0.20", User="mrzor", ForwardX11=False)
assert hc.lines() == [' ForwardX11 no\n',
' HostName 192.168.0.20\n',
' User mrzor\n']
def test_ordering_fully_specified():
hc = HostConfig(HostName="192.168.0.20", User="mrzor", ForwardX11=False)
ordering = ['User', 'HostName', 'ForwardX11']
assert hc.lines(ordering=ordering) == [' User mrzor\n',
' HostName 192.168.0.20\n',
' ForwardX11 no\n']
def test_ordering_partially_specified():
hc = HostConfig(HostName="192.168.0.20", User="mrzor", ForwardX11=False)
ordering = ['User']
assert hc.lines(ordering=ordering) == [' User mrzor\n',
' ForwardX11 no\n',
' HostName 192.168.0.20\n']
def test_ordering_extra_keys():
hc = HostConfig(HostName="192.168.0.20", User="mrzor", ForwardX11=False)
ordering = ['User', 'BindAddress']
assert hc.lines(ordering=ordering) == [' User mrzor\n',
' ForwardX11 no\n',
' HostName 192.168.0.20\n']
def test_custom_casing():
hc = HostConfig(HostName="192.168.0.20", User="mrzor")
assert hc.lines(casings=["HoStNaMe", "USER"]) == [' HoStNaMe 192.168.0.20\n',
' USER mrzor\n']
def test_custom_separator():
hc = HostConfig(User="narcissus")
assert hc.lines(separator="=") == [' User=narcissus\n']
assert hc.lines(separator=" = ") == [' User = narcissus\n']
def test_custom_indent():
hc = HostConfig(User="narcissus")
assert hc.lines(indent="") == ['User narcissus\n']
assert hc.lines(indent="\t") == ['\tUser narcissus\n']
def test_quoting():
hc = HostConfig(User="narcissus")
assert hc.lines(force_quotes=True) == [' User "narcissus"\n']
hc = HostConfig(User="n<NAME>")
assert hc.lines() == [' User "n<NAME>"\n']
assert hc.lines(force_quotes=True) == [' User "n<NAME>"\n']
def test_multiple_values():
hc = HostConfig(LocalForward=['lf1', 'lf2', 'lf3'], User="narcissus")
assert len(hc.lines()) == 4
|
[
"gcloud_sync_ssh.host_config.HostConfig"
] |
[((80, 92), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {}), '()\n', (90, 92), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((159, 246), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'HostName': '"""172.16.31.10"""', 'CertificateFile': '"""exists-or-not-we-dont-check"""'}), "(HostName='172.16.31.10', CertificateFile=\n 'exists-or-not-we-dont-check')\n", (169, 246), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((399, 466), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'HostName': '"""192.168.0.20"""', 'User': '"""mrzor"""', 'ForwardX11': '(False)'}), "(HostName='192.168.0.20', User='mrzor', ForwardX11=False)\n", (409, 466), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((667, 734), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'HostName': '"""192.168.0.20"""', 'User': '"""mrzor"""', 'ForwardX11': '(False)'}), "(HostName='192.168.0.20', User='mrzor', ForwardX11=False)\n", (677, 734), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((1040, 1107), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'HostName': '"""192.168.0.20"""', 'User': '"""mrzor"""', 'ForwardX11': '(False)'}), "(HostName='192.168.0.20', User='mrzor', ForwardX11=False)\n", (1050, 1107), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((1378, 1445), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'HostName': '"""192.168.0.20"""', 'User': '"""mrzor"""', 'ForwardX11': '(False)'}), "(HostName='192.168.0.20', User='mrzor', ForwardX11=False)\n", (1388, 1445), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((1725, 1774), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'HostName': '"""192.168.0.20"""', 'User': '"""mrzor"""'}), "(HostName='192.168.0.20', User='mrzor')\n", (1735, 1774), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((1974, 2002), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'User': '"""narcissus"""'}), "(User='narcissus')\n", (1984, 2002), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((2170, 2198), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'User': '"""narcissus"""'}), "(User='narcissus')\n", (2180, 2198), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((2344, 2372), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'User': '"""narcissus"""'}), "(User='narcissus')\n", (2354, 2372), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((2452, 2478), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'User': '"""n<NAME>"""'}), "(User='n<NAME>')\n", (2462, 2478), False, 'from gcloud_sync_ssh.host_config import HostConfig\n'), ((2635, 2699), 'gcloud_sync_ssh.host_config.HostConfig', 'HostConfig', ([], {'LocalForward': "['lf1', 'lf2', 'lf3']", 'User': '"""narcissus"""'}), "(LocalForward=['lf1', 'lf2', 'lf3'], User='narcissus')\n", (2645, 2699), False, 'from gcloud_sync_ssh.host_config import HostConfig\n')]
|
import json
import PySimpleGUI as sg
from ..Ventanas import principal as ventana, ayuda
from ..Componentes import menu_estadisticas, menu_tablero
from ..Componentes import menu_puntajes
from ..Componentes import menu_configuracion
from ..Handlers import usuario
def iniciar():
''' comienza la ejecucion del menu del juego '''
with open('data/usuarios.json',"r", encoding="utf8") as file:
users = json.load(file)
user = [user for user in users if user["conectado"] == 1][0]
sg.theme(user['configuracion']['paleta_de_colores'])
window = ventana.crear()
loop(window)
def loop(window):
''' mantiene la ventana abierta y recibe el input del usuario '''
while True:
event, _value = window.read()
if(event == '-JUGAR-'):
window.hide()
menu_tablero.start()
window.un_hide()
elif(event == '-CONFIG-'):
window.hide()
menu_configuracion.start()
window.un_hide()
elif(event == '-PUNTOS-'):
window.hide()
menu_puntajes.start()
window.un_hide()
elif(event == '-ESTAD-'):
window.hide()
menu_estadisticas.start()
window.un_hide()
elif(event == '-AYUDA-'):
win = ayuda.crear()
win.read()
win.close()
elif(event == '-SALIR-' or event == sg.WIN_CLOSED):
user = usuario.usuario_conectado()
usuario.user_disconnected(user)
window.close()
break
|
[
"PySimpleGUI.theme",
"json.load"
] |
[((415, 430), 'json.load', 'json.load', (['file'], {}), '(file)\n', (424, 430), False, 'import json\n'), ((508, 560), 'PySimpleGUI.theme', 'sg.theme', (["user['configuracion']['paleta_de_colores']"], {}), "(user['configuracion']['paleta_de_colores'])\n", (516, 560), True, 'import PySimpleGUI as sg\n')]
|
import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
'''
Creates a Spark Session
'''
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
'''
Process song data to build the songs and artists table and write them to parquet files
Inputs:
spark: spark session
input_data: path to data files to extract the data
output_data: path where the created tables will be stored
'''
# get filepath to song data file
song_data = input_data + 'song_data/A/*/*/*.json'
# read song data file
df = spark.read.json(song_data)
# extract columns to create songs table
songs_table = df.select('song_id', 'title', 'artist_id','year', 'duration').dropDuplicates()
# write songs table to parquet files partitioned by year and artist
songs_table.write.partitionBy('year', 'artist_id').parquet((output_data + 'songs/songs.parquet'), 'overwrite')
# extract columns to create artists table
artists_table = df.select('artist_id','artist_name','artist_location','artist_latitude','artist_longitude').dropDuplicates()
# write artists table to parquet files
artists_table.write.parquet((output_data + 'artists/artists.parquet'), 'overwrite')
def process_log_data(spark, input_data, output_data):
'''
Process log data to build the user, time and songsplays tables and write them to parquet files
Inputs:
spark: spark session
input_data: path to data files to extract the data
output_data: path where the created tables will be stored
'''
# get filepath to log data file
log_data = input_data + 'log_data/*/*/*.json'
# read log data file
df = spark.read.json(log_data)
# filter by actions for song plays
actions_df = df.filter(df.page == 'NextSong').select('ts', 'userId', 'level', 'song', 'artist',
'sessionId', 'location','userAgent')
# extract columns for users table
users_table = df.select('userId', 'firstName', 'lastName','gender', 'level').dropDuplicates()
# write users table to parquet files
users.write.parquet((output_data + 'users/users.parquet'), 'overwrite')
# create timestamp column from original timestamp column
get_timestamp = udf(lambda x: str(int(int(x)/1000)))
df = actions_df.withColumn('timestamp', get_timestamp(actions_df.ts))
# create datetime column from original timestamp column
get_datetime = udf(lambda x: str(datetime.fromtimestamp(int(x) / 1000)))
df = df.withColumn('start_time', get_datetime(df.ts))
# extract columns to create time table
df = df.withColumn('hour', hour('start_time'))
df = df.withColumn('day', dayofmonth('start_time'))
df = df.withColumn('month', month('start_time'))
df = df.withColumn('year', year('start_time'))
df = df.withColumn('week', weekofyear('start_time'))
df = df.withColumn('weekday', dayofweek('start_time'))
time_table = df.select('start_time','hour','day','week','month','year','weekday').dropDuplicates()
# write time table to parquet files partitioned by year and month
time_table.write.partitionBy('year', 'month').parquet((output_data + 'time/time.parquet'), 'overwrite')
# read in song data to use for songplays table
song_df = spark.read.json(input_data + 'song_data/A/*/*/*.json')
df = df.join(song_df, song_df.title == df.song)
# extract columns from joined song and log datasets to create songplays table
songplays_table = df.select('start_time','userId','level','song_id','artist_id','ssessionId',
'location','userAgent').withColumn('songplay_id',monotonically_increasing_id())
# write songplays table to parquet files partitioned by year and month
songplays_table.write.partitionBy('year', 'month').parquet((output_data + 'songplays/songplays.parquet'),'overwrite')
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = ""
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
|
[
"pyspark.sql.functions.dayofmonth",
"pyspark.sql.functions.month",
"pyspark.sql.SparkSession.builder.config",
"pyspark.sql.functions.weekofyear",
"pyspark.sql.functions.hour",
"pyspark.sql.functions.dayofweek",
"configparser.ConfigParser",
"pyspark.sql.functions.year"
] |
[((253, 280), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (278, 280), False, 'import configparser\n'), ((3268, 3286), 'pyspark.sql.functions.hour', 'hour', (['"""start_time"""'], {}), "('start_time')\n", (3272, 3286), False, 'from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek\n'), ((3318, 3342), 'pyspark.sql.functions.dayofmonth', 'dayofmonth', (['"""start_time"""'], {}), "('start_time')\n", (3328, 3342), False, 'from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek\n'), ((3376, 3395), 'pyspark.sql.functions.month', 'month', (['"""start_time"""'], {}), "('start_time')\n", (3381, 3395), False, 'from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek\n'), ((3428, 3446), 'pyspark.sql.functions.year', 'year', (['"""start_time"""'], {}), "('start_time')\n", (3432, 3446), False, 'from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek\n'), ((3479, 3503), 'pyspark.sql.functions.weekofyear', 'weekofyear', (['"""start_time"""'], {}), "('start_time')\n", (3489, 3503), False, 'from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek\n'), ((3539, 3562), 'pyspark.sql.functions.dayofweek', 'dayofweek', (['"""start_time"""'], {}), "('start_time')\n", (3548, 3562), False, 'from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek\n'), ((528, 620), 'pyspark.sql.SparkSession.builder.config', 'SparkSession.builder.config', (['"""spark.jars.packages"""', '"""org.apache.hadoop:hadoop-aws:2.7.0"""'], {}), "('spark.jars.packages',\n 'org.apache.hadoop:hadoop-aws:2.7.0')\n", (555, 620), False, 'from pyspark.sql import SparkSession\n')]
|
import json
import requests
import sys
def fetch_jsons(json_output=False):
print('[INFO] Fetching latest resource files.')
try:
insecure_deps = json.loads(requests.get('https://raw.githubusercontent.com/pyupio/safety-db/master/data/insecure.json').content)
print('[INFO] Fetched list of Insecure Dependencies.')
insecure_deps_full = json.loads(requests.get('https://raw.githubusercontent.com/pyupio/safety-db/master/data/insecure_full.json').content)
print('[INFO] Fetched list of Security Advisories')
print('[INFO] Fetch complete.')
return insecure_deps, insecure_deps_full
except:
print('[ERR] An error occurred while fetching resouce files. Maybe you\'re not connected to the internet?')
sys.exit(1)
if __name__ == "__main__":
insecure_deps, insecure_deps_full = fetch_jsons()
for deps in insecure_deps:
ver_list = insecure_deps[deps]
for ver in ver_list:
if len(ver.split(',')) > 2:
print(ver_list, ver)
|
[
"requests.get",
"sys.exit"
] |
[((778, 789), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (786, 789), False, 'import sys\n'), ((174, 276), 'requests.get', 'requests.get', (['"""https://raw.githubusercontent.com/pyupio/safety-db/master/data/insecure.json"""'], {}), "(\n 'https://raw.githubusercontent.com/pyupio/safety-db/master/data/insecure.json'\n )\n", (186, 276), False, 'import requests\n'), ((380, 487), 'requests.get', 'requests.get', (['"""https://raw.githubusercontent.com/pyupio/safety-db/master/data/insecure_full.json"""'], {}), "(\n 'https://raw.githubusercontent.com/pyupio/safety-db/master/data/insecure_full.json'\n )\n", (392, 487), False, 'import requests\n')]
|
# %%
# line printer
def printer(info):
print('\n\n================================= {} =================================================\n\n'.format(info))
# %%
import itertools
# to get a counter starting from 0 to infinte
counter = itertools.count() # return type is the iterator and count will start from 0 and you can use it with for loop
# or next() command
# if you want to get a counter starting from certain number
start = 10
counter = itertools.count(start=start, step=-5) # step function is optional and can be negative
# itertools.count() can be used with zip function like
counter = itertools.count(start=start, step=-5)
l = [100, 200, 300, 400]
zipped = zip(counter, l)
print(list(zipped))
# %%
from itertools import zip_longest
# zip is used to map two list and shortest list length will be considered, Iteration continues until the longest
# iterable is exhausted
l1 = ['sanket', 'sanchita', 'rohan', 'devi', 'adarsh', 'vishnu', 'prashant', 'chirag']
l2 = [1, 2, 3, 4, 5, 6]
print(list(zip(l1, l2)))
print(list(zip_longest(l1, l2)))
# %%
from itertools import cycle
# cycle is used as circular linked list where we can iterate through certain value over and over again
# it can be use with list, tuple, string
counter = cycle([1, 2, 3, 4])
counter = cycle(('On', 'Off'))
counter = cycle('san') # it will repeat 's' 'a' and 'n'
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
# %%
from itertools import repeat
# repeat can be used to repeat single value multiple time
counter = repeat(2, times=3) # repeat 2, 3 times-- times is optional element and if not provided repeat many times
print(next(counter))
print(next(counter))
print(next(counter))
# example
squares = map(pow, range(1, 10), repeat(2, times=3))
print(list(squares))
# %%
from itertools import starmap
'''
def starmap(function, iterable):
# starmap(pow, [(2,5), (3,2), (10,3)]) --> 32 9 1000
for args in iterable:
yield function(*args)
'''
def power(x, y):
return x ** y
# the above example in repeat can be used with starmap as
squares = starmap(power, [(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)])
print(list(squares))
# how *args work
# for k in [(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)]:
# print(*k)
# %%
from itertools import combinations, permutations
# used for getting all the possible combination
letters = ['a', 'b', 'c', 'd', 'e']
number = [0, 1, 2, 3]
names = ['sanket', 'raj']
result = combinations(letters, 3) # produce the combination where (a,b) and (b,a) is same so only one will be mentioned
print(list(result))
# if order matters the use permutations
result = permutations(letters, 3)
print(list(result))
# for example if we want to craete 4 digit code using number where combiantion can also include same number multiple
# time
from itertools import product
# computes the cartesion product
# product(iterable, repeat=n) this is permutation with replacement
# for solution see https://www.hackerrank.com/challenges/itertools-product/problem
result = product(number, repeat=3) # arrange numbers in group of 3
print(list(result))
# in product function we have to provide repeat argumnt, the similar function that can be used is
# combinations_with_replacement
from itertools import combinations_with_replacement
result = combinations_with_replacement(number, 4) # arrange number in a group of 4 where each number can be used
# multiple times
print(list(result))
# for permutations with replacement use below link for solution
# https://stackoverflow.com/questions/46255220/how-to-produce-permutations-with-replacement-in-python
'''
import itertools
choices = [-1, 1]
n = 3
l = [choices] * n
list(itertools.product(*l))
'''
# %%
letters = ['a', 'b', 'c', 'd', 'e']
number = [0, 1, 2, 3]
names = ['sanket', 'raj']
# if you want to iter through all letters, number, names you first have to craete a list add all those to a single list
# and then iter through new list, this can be solved using chain
from itertools import chain
iterator = chain(letters, number, names)
print(iterator) # object of iterator
# print(list(iterator)) # print the complete list
# %%
# islice is used to slice the iterable object which is memory efficient
letters = ['a', 'b', 'c', 'd', 'e']
number = [0, 1, 2, 3]
names = ['sanket', 'raj']
import itertools
from itertools import islice
result = islice(letters, 1, 3)
print(list(result))
result = itertools.islice(range(10), 1, 5) # islice take (iterable, start, stop) or (iterable, stop)
# islice is used when , suppose you have a very long iterator which is hard to load into memory and then slicing, it can
# be costly, lets, suppose we have a file
import os
with open('collections_module/islice_text', 'r') as file:
# file object is an iterator
header = islice(file, 3)
for line in header:
print(line, end='')
#
# %%
import numpy as np
np.random.seed(42) # seeding is done for pseudo number generator
selectors = np.random.randint(1, 10000, 50)
'''
letters = np.array(['a', 'b', 'c', 'd', 'e'])
print(selectors)
print(letters[selectors])
'''
# filterfalse work as same as filter instead of returning true value it returns false values
from itertools import filterfalse
result = filterfalse(lambda x: x > 5000, selectors) # return an iterator
print(list(result))
# %%
# dropwhile can be used when you want to filter until the condition is met for the first time
# similar to dropwhile is takewhile but the opposite of the former
import numpy as np
np.random.seed(42)
selectors = np.random.randint(1, 10000, 50)
print(selectors)
from itertools import dropwhile
result = dropwhile(lambda x: x > 500, selectors)
print(len(list(result)))
# %%
# accumulate -- it is used to work on the iterable and is used to perform cumulative operations
import numpy as np
np.random.seed(42)
selectors = np.random.randint(1, 10, 10)
print(selectors)
from itertools import accumulate
import operator
result = accumulate(selectors, operator.mul)
print(list(result))
printer('string work')
# working with the string
selectors = np.random.choice(['a', 'b', 'c', 'f', 'g'], size=10)
print(selectors)
result = accumulate(selectors, lambda x, y: x + y)
print(list(result))
#%%
# groupby work as same as pandas.groupby
people = [
{
'name': '<NAME>',
'city': 'Gotham',
'state': 'NY'
},
{
'name': '<NAME>',
'city': 'Kings Landing',
'state': 'NY'
},
{
'name': '<NAME>',
'city': 'Boulder',
'state': 'CO'
},
{
'name': '<NAME>',
'city': 'Denver',
'state': 'CO'
},
{
'name': '<NAME>',
'city': 'Hinton',
'state': 'WV'
},
{
'name': '<NAME>',
'city': 'Rand',
'state': 'WV'
},
{
'name': '<NAME>',
'city': 'Asheville',
'state': 'NC'
},
{
'name': '<NAME>',
'city': 'Charlotte',
'state': 'NC'
},
{
'name': '<NAME>',
'city': 'Faketown',
'state': 'NC'
}
]
def get_state(person):
return person['state']
# for groupby to work efficiently, the key has to be sorted else it will not work as expected
from itertools import groupby
result = groupby(people, get_state)
for key, group in result:
# here group is an iterator
#
for g in group:
print(key,' ', g)
#%%
# in order to create multiple copies of iterator you can use tee
import numpy as np
np.random.seed(42) # seeding is done for pseudo number generator
selectors = np.random.randint(1, 10000, 50)
from itertools import filterfalse
from itertools import tee
result = filterfalse(lambda x: x < 500, selectors)
copy1, copy2 = tee(result) # don't use the original result iterator
print(list(copy1))
print(list(copy2))
|
[
"numpy.random.seed",
"numpy.random.randint",
"itertools.cycle",
"itertools.permutations",
"itertools.zip_longest",
"numpy.random.choice",
"itertools.product",
"itertools.chain",
"itertools.filterfalse",
"itertools.accumulate",
"itertools.count",
"itertools.combinations",
"itertools.islice",
"itertools.tee",
"itertools.groupby",
"itertools.repeat",
"itertools.starmap",
"itertools.combinations_with_replacement",
"itertools.dropwhile"
] |
[((244, 261), 'itertools.count', 'itertools.count', ([], {}), '()\n', (259, 261), False, 'import itertools\n'), ((456, 493), 'itertools.count', 'itertools.count', ([], {'start': 'start', 'step': '(-5)'}), '(start=start, step=-5)\n', (471, 493), False, 'import itertools\n'), ((609, 646), 'itertools.count', 'itertools.count', ([], {'start': 'start', 'step': '(-5)'}), '(start=start, step=-5)\n', (624, 646), False, 'import itertools\n'), ((1257, 1276), 'itertools.cycle', 'cycle', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1262, 1276), False, 'from itertools import cycle\n'), ((1287, 1307), 'itertools.cycle', 'cycle', (["('On', 'Off')"], {}), "(('On', 'Off'))\n", (1292, 1307), False, 'from itertools import cycle\n'), ((1318, 1330), 'itertools.cycle', 'cycle', (['"""san"""'], {}), "('san')\n", (1323, 1330), False, 'from itertools import cycle\n'), ((1595, 1613), 'itertools.repeat', 'repeat', (['(2)'], {'times': '(3)'}), '(2, times=3)\n', (1601, 1613), False, 'from itertools import repeat\n'), ((2147, 2211), 'itertools.starmap', 'starmap', (['power', '[(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)]'], {}), '(power, [(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)])\n', (2154, 2211), False, 'from itertools import starmap\n'), ((2528, 2552), 'itertools.combinations', 'combinations', (['letters', '(3)'], {}), '(letters, 3)\n', (2540, 2552), False, 'from itertools import combinations, permutations\n'), ((2710, 2734), 'itertools.permutations', 'permutations', (['letters', '(3)'], {}), '(letters, 3)\n', (2722, 2734), False, 'from itertools import combinations, permutations\n'), ((3103, 3128), 'itertools.product', 'product', (['number'], {'repeat': '(3)'}), '(number, repeat=3)\n', (3110, 3128), False, 'from itertools import product\n'), ((3376, 3416), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['number', '(4)'], {}), '(number, 4)\n', (3405, 3416), False, 'from itertools import combinations_with_replacement\n'), ((4117, 4146), 'itertools.chain', 'chain', (['letters', 'number', 'names'], {}), '(letters, number, names)\n', (4122, 4146), False, 'from itertools import chain\n'), ((4456, 4477), 'itertools.islice', 'islice', (['letters', '(1)', '(3)'], {}), '(letters, 1, 3)\n', (4462, 4477), False, 'from itertools import islice\n'), ((4979, 4997), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4993, 4997), True, 'import numpy as np\n'), ((5057, 5088), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000)', '(50)'], {}), '(1, 10000, 50)\n', (5074, 5088), True, 'import numpy as np\n'), ((5326, 5368), 'itertools.filterfalse', 'filterfalse', (['(lambda x: x > 5000)', 'selectors'], {}), '(lambda x: x > 5000, selectors)\n', (5337, 5368), False, 'from itertools import filterfalse\n'), ((5600, 5618), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5614, 5618), True, 'import numpy as np\n'), ((5631, 5662), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000)', '(50)'], {}), '(1, 10000, 50)\n', (5648, 5662), True, 'import numpy as np\n'), ((5723, 5762), 'itertools.dropwhile', 'dropwhile', (['(lambda x: x > 500)', 'selectors'], {}), '(lambda x: x > 500, selectors)\n', (5732, 5762), False, 'from itertools import dropwhile\n'), ((5911, 5929), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5925, 5929), True, 'import numpy as np\n'), ((5942, 5970), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (5959, 5970), True, 'import numpy as np\n'), ((6048, 6083), 'itertools.accumulate', 'accumulate', (['selectors', 'operator.mul'], {}), '(selectors, operator.mul)\n', (6058, 6083), False, 'from itertools import accumulate\n'), ((6167, 6219), 'numpy.random.choice', 'np.random.choice', (["['a', 'b', 'c', 'f', 'g']"], {'size': '(10)'}), "(['a', 'b', 'c', 'f', 'g'], size=10)\n", (6183, 6219), True, 'import numpy as np\n'), ((6246, 6287), 'itertools.accumulate', 'accumulate', (['selectors', '(lambda x, y: x + y)'], {}), '(selectors, lambda x, y: x + y)\n', (6256, 6287), False, 'from itertools import accumulate\n'), ((7351, 7377), 'itertools.groupby', 'groupby', (['people', 'get_state'], {}), '(people, get_state)\n', (7358, 7377), False, 'from itertools import groupby\n'), ((7580, 7598), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (7594, 7598), True, 'import numpy as np\n'), ((7658, 7689), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000)', '(50)'], {}), '(1, 10000, 50)\n', (7675, 7689), True, 'import numpy as np\n'), ((7761, 7802), 'itertools.filterfalse', 'filterfalse', (['(lambda x: x < 500)', 'selectors'], {}), '(lambda x: x < 500, selectors)\n', (7772, 7802), False, 'from itertools import filterfalse\n'), ((7819, 7830), 'itertools.tee', 'tee', (['result'], {}), '(result)\n', (7822, 7830), False, 'from itertools import tee\n'), ((1808, 1826), 'itertools.repeat', 'repeat', (['(2)'], {'times': '(3)'}), '(2, times=3)\n', (1814, 1826), False, 'from itertools import repeat\n'), ((4881, 4896), 'itertools.islice', 'islice', (['file', '(3)'], {}), '(file, 3)\n', (4887, 4896), False, 'from itertools import islice\n'), ((1044, 1063), 'itertools.zip_longest', 'zip_longest', (['l1', 'l2'], {}), '(l1, l2)\n', (1055, 1063), False, 'from itertools import zip_longest\n')]
|
#!/usr/bin/python
import subprocess
import sys
import os
# This is in the `unicode-character-database` package on Arch Linux.
UNICODE_DATABASE = '/usr/share/unicode-character-database/UnicodeData.txt'
WORDS = 'data/words.txt'
NOTO_EMOJI_DIR = '/home/jasper/Stash/noto-emoji'
def load_unicode_database():
data = {}
with open(UNICODE_DATABASE, 'rb') as db:
for line in db:
[code, name, _] = line.decode('utf-8').split(';', 2)
name = name.lower().replace(' ', '-')
code = code.lower()
data[name] = code
return data
def load_words():
words = {}
with open(WORDS, 'r') as f:
for line in f:
pieces = line.split(':')
if len(pieces) <= 1:
words[pieces[0].strip()] = pieces[0].strip()
else:
words[pieces[0].strip()] = pieces[1].strip()
return words
if __name__ == "__main__":
unicode_database = load_unicode_database()
words = load_words()
montage_files = []
for word, canonical in words.items():
code = unicode_database[canonical]
print('{} -> {} -> {}'.format(word, canonical, code), file=sys.stderr)
png_128 = "{}/png/128/emoji_u{}.png".format(NOTO_EMOJI_DIR, code)
if not os.path.isfile(png_128):
raise Exception("{} does not exist".format(png_128))
else:
montage_files += [png_128]
print('Invoking montage...', file=sys.stderr)
tmp_png = 'build/sprites-tmp-01.png'
montage_command = \
['montage', '-geometry', '32x32+0+0', '-background', 'none'] + \
montage_files + \
['png32:{}'.format(tmp_png)]
subprocess.run(montage_command, check=True)
print('Fixing alpha...', file=sys.stderr)
subprocess.run([
'convert', '-channel', 'A', '-threshold', '50%', tmp_png,
'png32:build/sprites.png'
], check=True)
|
[
"subprocess.run",
"os.path.isfile"
] |
[((1539, 1582), 'subprocess.run', 'subprocess.run', (['montage_command'], {'check': '(True)'}), '(montage_command, check=True)\n', (1553, 1582), False, 'import subprocess\n'), ((1630, 1747), 'subprocess.run', 'subprocess.run', (["['convert', '-channel', 'A', '-threshold', '50%', tmp_png,\n 'png32:build/sprites.png']"], {'check': '(True)'}), "(['convert', '-channel', 'A', '-threshold', '50%', tmp_png,\n 'png32:build/sprites.png'], check=True)\n", (1644, 1747), False, 'import subprocess\n'), ((1176, 1199), 'os.path.isfile', 'os.path.isfile', (['png_128'], {}), '(png_128)\n', (1190, 1199), False, 'import os\n')]
|
from fastapi import FastAPI
from pydantic import BaseModel
from typing import List
import subprocess
import random
import string
import os
app = FastAPI()
class Item(BaseModel):
kube_str: str
yaml_file: List[str]
class Response(BaseModel):
Output: str
Error: str
@app.get("/general/{item}", response_model=Response)
async def general_kubectl(item: str):
if item.startswith("kubectl"):
input = []
else:
input = ["kubectl"]
input.extend(item.split(" "))
result = subprocess.run(input, capture_output=True, text=True)
return {"Output": result.stdout, "Error": result.stderr}
@app.get("/general_json", response_model=Response)
async def general_kubectl_json(request: Item):
if request.kube_str.count(".yaml") + request.kube_str.count(".yml") >=2:
return {"Output": "", "Error": "There should be only one YAML in kube_str"}
#Generate YAML file
file_name = ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=30))+".yaml"
file_path_name = "/cache/"+ file_name
with open(file_path_name, 'w') as f:
for item in request.yaml_file:
f.write("%s\n" % item)
#Create Kubectl command line string
if request.kube_str.startswith("kubectl"):
input = []
else:
input = ["kubectl"]
splitted_kube_str = [item if ("yaml" not in item and "yml" not in item) else file_path_name for item in request.kube_str.split(" ")]
input.extend(splitted_kube_str)
#Execute and remove YAML file
result = subprocess.run(input, capture_output=True, text=True)
if os.path.exists(file_path_name):
os.remove(file_path_name)
return {"Output": result.stdout, "Error": result.stderr}
|
[
"subprocess.run",
"os.remove",
"random.choices",
"os.path.exists",
"fastapi.FastAPI"
] |
[((146, 155), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (153, 155), False, 'from fastapi import FastAPI\n'), ((513, 566), 'subprocess.run', 'subprocess.run', (['input'], {'capture_output': '(True)', 'text': '(True)'}), '(input, capture_output=True, text=True)\n', (527, 566), False, 'import subprocess\n'), ((1557, 1610), 'subprocess.run', 'subprocess.run', (['input'], {'capture_output': '(True)', 'text': '(True)'}), '(input, capture_output=True, text=True)\n', (1571, 1610), False, 'import subprocess\n'), ((1618, 1648), 'os.path.exists', 'os.path.exists', (['file_path_name'], {}), '(file_path_name)\n', (1632, 1648), False, 'import os\n'), ((1658, 1683), 'os.remove', 'os.remove', (['file_path_name'], {}), '(file_path_name)\n', (1667, 1683), False, 'import os\n'), ((937, 1027), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.ascii_lowercase + string.digits)'], {'k': '(30)'}), '(string.ascii_uppercase + string.ascii_lowercase + string.\n digits, k=30)\n', (951, 1027), False, 'import random\n')]
|
from collections import OrderedDict
import pytest
from powersimdata.data_access.execute_table import ExecuteTable
from powersimdata.data_access.sql_store import SqlError
row_id = 9000
def _get_test_row():
global row_id
row_id += 1
return OrderedDict([("id", row_id)])
class NoEffectSqlStore(ExecuteTable):
def __exit__(self, exc_type, exc_value, traceback):
self.conn.rollback()
super().__exit__(exc_type, exc_value, traceback)
class RaiseErrorSqlStore(ExecuteTable):
def add_entry(self, scenario_info):
raise Exception("Error while executing sql")
@pytest.fixture
def store():
with NoEffectSqlStore() as store:
yield store
@pytest.mark.integration
@pytest.mark.db
def test_err_handle():
with pytest.raises(SqlError):
with RaiseErrorSqlStore() as store:
store.add_entry(None)
@pytest.mark.integration
@pytest.mark.db
def test_select_no_limit(store):
store.add_entry(_get_test_row())
store.add_entry(_get_test_row())
result = store.get_execute_table()
assert result.shape[0] == 2
@pytest.mark.integration
@pytest.mark.db
def test_select_with_limit(store):
n_rows = 6
limit = 3
for i in range(n_rows):
store.add_entry(_get_test_row())
result = store.get_execute_table(limit)
assert result.shape[0] == limit
@pytest.mark.integration
@pytest.mark.db
def test_add_entry(store):
info = _get_test_row()
store.add_entry(info)
status = store.get_status(info["id"])
assert status.loc[0, "status"] == "created"
@pytest.mark.integration
@pytest.mark.db
def test_update_entry(store):
info = _get_test_row()
store.add_entry(info)
sid = info["id"]
store.set_status(sid, "testing")
status = store.get_status(sid)
assert status.loc[0, "status"] == "testing"
@pytest.mark.integration
@pytest.mark.db
def test_delete_entry(store):
info = _get_test_row()
sid = info["id"]
store.add_entry(info)
store.delete_entry(sid)
status = store.get_status(sid)
assert status.shape == (0, 0)
|
[
"collections.OrderedDict",
"pytest.raises"
] |
[((255, 284), 'collections.OrderedDict', 'OrderedDict', (["[('id', row_id)]"], {}), "([('id', row_id)])\n", (266, 284), False, 'from collections import OrderedDict\n'), ((766, 789), 'pytest.raises', 'pytest.raises', (['SqlError'], {}), '(SqlError)\n', (779, 789), False, 'import pytest\n')]
|
import requests
import github3
import expecter
import json
import sys
from mock import patch
from io import BytesIO
from unittest import TestCase
from requests.structures import CaseInsensitiveDict
is_py3 = sys.version_info > (3, 0)
def load(name):
with path(name) as f:
j = json.load(f)
return j
def path(name, mode='r'):
return open('tests/json/{0}'.format(name), mode)
class CustomExpecter(expecter.expect):
def is_not_None(self):
assert self._actual is not None, (
'Expected anything but None but got it.'
)
def is_None(self):
assert self._actual is None, (
'Expected None but got %s' % repr(self._actual) # nopep8
)
def is_True(self):
assert self._actual is True, (
'Expected True but got %s' % repr(self._actual) # nopep8
)
def is_False(self):
assert self._actual is False, (
'Expected False but got %s' % repr(self._actual) # nopep8
)
def is_in(self, iterable):
assert self._actual in iterable, (
"Expected %s in %s but it wasn't" % (
repr(self._actual), repr(iterable)
)
)
@classmethod
def githuberror(cls):
return cls.raises(github3.GitHubError)
expect = CustomExpecter
class BaseCase(TestCase):
github_url = 'https://api.github.com/'
def setUp(self):
self.g = github3.GitHub()
self.args = ()
self.conf = {'allow_redirects': True}
self.mock = patch.object(requests.sessions.Session, 'request')
self.request = self.mock.start()
def tearDown(self):
self.mock.stop()
def login(self):
self.g.login('user', 'password')
def mock_assertions(self):
assert self.request.called is True
conf = self.conf.copy()
args, kwargs = self.request.call_args
expect(self.args) == args
if 'data' in self.conf:
if isinstance(self.conf['data'], dict):
for k, v in list(self.conf['data'].items()):
s = json.dumps({k: v})[1:-1]
expect(s).is_in(kwargs['data'])
else:
expect(self.conf['data']) == kwargs['data']
del self.conf['data']
for k in self.conf:
expect(k).is_in(kwargs)
expect(self.conf[k]) == kwargs[k]
self.request.reset_mock()
self.conf = conf
def response(self, path_name, status_code=200, enc='utf-8',
_iter=False, **headers):
r = requests.Response()
r.status_code = status_code
r.encoding = enc
if path_name:
with path(path_name) as f:
content = f.read().strip()
if _iter:
content = '[{0}]'.format(content)
r.raw = RequestsBytesIO(content.encode())
elif is_py3:
r.raw = RequestsBytesIO(content.encode())
else:
r.raw = RequestsBytesIO(content)
else:
r.raw = RequestsBytesIO()
if headers:
r.headers = CaseInsensitiveDict(headers)
self.request.return_value = r
def delete(self, url):
self.args = ('DELETE', url)
self.conf = {}
def get(self, url):
self.args = ('GET', url)
def patch(self, url):
self.args = ('PATCH', url)
def post(self, url):
self.args = ('POST', url)
def put(self, url):
self.args = ('PUT', url)
def not_called(self):
expect(self.request.called).is_False()
class RequestsBytesIO(BytesIO):
def read(self, chunk_size, *args, **kwargs):
return super(RequestsBytesIO, self).read(chunk_size)
|
[
"mock.patch.object",
"json.load",
"json.dumps",
"requests.Response",
"requests.structures.CaseInsensitiveDict",
"github3.GitHub"
] |
[((290, 302), 'json.load', 'json.load', (['f'], {}), '(f)\n', (299, 302), False, 'import json\n'), ((1429, 1445), 'github3.GitHub', 'github3.GitHub', ([], {}), '()\n', (1443, 1445), False, 'import github3\n'), ((1535, 1585), 'mock.patch.object', 'patch.object', (['requests.sessions.Session', '"""request"""'], {}), "(requests.sessions.Session, 'request')\n", (1547, 1585), False, 'from mock import patch\n'), ((2578, 2597), 'requests.Response', 'requests.Response', ([], {}), '()\n', (2595, 2597), False, 'import requests\n'), ((3142, 3170), 'requests.structures.CaseInsensitiveDict', 'CaseInsensitiveDict', (['headers'], {}), '(headers)\n', (3161, 3170), False, 'from requests.structures import CaseInsensitiveDict\n'), ((2098, 2116), 'json.dumps', 'json.dumps', (['{k: v}'], {}), '({k: v})\n', (2108, 2116), False, 'import json\n')]
|
from pathlib import Path
from typing import List, Optional, Union
from exofile.archive import ExoFile
from pandas.core.frame import DataFrame
from ephemere import constants as const
def load_archive(
query: bool = True,
exofile_param_file: Optional[Union[str, Path]] = None,
use_alt_exofile: bool = False,
keep_controv: bool = False,
warn_units: bool = False,
warn_local_file: bool = False,
convert_omega: bool = True,
return_pandas: bool = True,
**kwargs
) -> DataFrame:
# TODO: Either merge the warn_units in exofile or use warning filters here instead
# Masterfile PR: https://github.com/AntoineDarveau/exofile/pull/26
tbl = ExoFile.load(
query=query,
param=exofile_param_file,
use_alt_file=use_alt_exofile,
warn_units=warn_units,
warn_local_file=warn_local_file,
**kwargs,
)
if not keep_controv:
tbl = tbl[tbl[const.CONTROV_FLAG] == 0]
# All our RV calculations expect omega in radians, so convert now
if convert_omega and tbl[const.OMEGA_KEY].unit != "rad":
tbl[const.OMEGA_KEY] = tbl[const.OMEGA_KEY].to("rad")
tbl[const.OMEGA_KEY + "err1"] = tbl[const.OMEGA_KEY + "err1"].to("rad")
tbl[const.OMEGA_KEY + "err2"] = tbl[const.OMEGA_KEY + "err2"].to("rad")
return tbl.to_pandas() if return_pandas else tbl
def get_archive_names(names: List[str]) -> List[str]:
new_objs = names.copy()
# GL/GJ stars are all "GJ " in NASA archive (and in exofile)
def _replace_gj(oname: str):
# Version with spaces before otherwise the space-free version will match
gj_alts = ["GJ", "GL", "Gl"]
gj_alts_with_space = [gja + " " for gja in gj_alts]
gj_alts = tuple(gj_alts_with_space + gj_alts)
if oname.startswith(gj_alts):
for gja in gj_alts:
if oname.startswith(gja):
return oname.replace(gja, "GJ ")
else:
return oname
new_objs = [_replace_gj(o) for o in new_objs]
# Handle binary stars
def _format_binary(oname: str):
if oname.endswith((" A", " B")):
return oname
elif oname.endswith(("A", "B")):
return oname[:-1] + " " + oname[-1]
else:
return oname
new_objs = [_format_binary(o) for o in new_objs]
return new_objs
|
[
"exofile.archive.ExoFile.load"
] |
[((681, 837), 'exofile.archive.ExoFile.load', 'ExoFile.load', ([], {'query': 'query', 'param': 'exofile_param_file', 'use_alt_file': 'use_alt_exofile', 'warn_units': 'warn_units', 'warn_local_file': 'warn_local_file'}), '(query=query, param=exofile_param_file, use_alt_file=\n use_alt_exofile, warn_units=warn_units, warn_local_file=warn_local_file,\n **kwargs)\n', (693, 837), False, 'from exofile.archive import ExoFile\n')]
|