id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
445991
|
import unittest
from winthingies.win32.const import EVENT_TRACE_FLAG_REGISTRY
from winthingies.trace import TraceProvider
from winthingies.trace import TraceSession
class TestProvider(unittest.TestCase):
def test_kernel_provider(self):
provider_kernel_trace = TraceProvider(
'Windows Kernel Trace',
"{9E814AAD-3204-11D2-9A82-006008A86939}",
match_any_keyword=EVENT_TRACE_FLAG_REGISTRY
)
t_session = TraceSession(
"NT Kernel Logger",
[provider_kernel_trace]
)
t_session.start()
t_session.stop()
def test_other_provider(self):
provider_kernel_trace = TraceProvider(
'Microsoft-Windows-Kernel-Registry',
"{70EB4F03-C1DE-4F73-A051-33D13D5413BD}"
)
t_session = TraceSession(
"Test 1",
[provider_kernel_trace]
)
t_session.start()
t_session.stop()
if __name__ == '__main__':
unittest.main()
|
446022
|
from random import randint
import codecs
import pickle
# First create a dictionary for each relation with the heads/tails
# Given a triple sample a negative triple by changing the head/tail from the corresponding triple dictionary
def load_binary_file(in_file, py_version=2):
if py_version == 2:
with open(in_file, 'rb') as f:
embeddings = pickle.load(f)
return embeddings
else:
with open(in_file, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
p = u.load()
return p
def load_triples(triple_file):
triples = []
text_file = codecs.open(triple_file, "r", "utf-8")
lines = text_file.readlines()
for line in lines:
line_arr = line.rstrip("\r\n").split("\t")
head = line_arr[0]
tail = line_arr[1]
rel = line_arr[2]
triples.append((head,tail,rel))
return triples
def load_triples_with_labels(triple_file):
triples = []
text_file = codecs.open(triple_file, "r", "utf-8")
lines = text_file.readlines()
for line in lines:
line_arr = line.rstrip("\r\n").split("\t")
head = line_arr[0]
tail = line_arr[1]
rel = line_arr[2]
label = line_arr[3]
triples.append((head,tail,rel,label))
return triples
def create_relation_dicts(triples):
relation_head_dict = {}
relation_tail_dict = {}
for triple in triples:
head = triple[0]
tail = triple[1]
rel = triple[2]
if rel not in relation_head_dict:
relation_head_dict[rel] = [head]
elif head not in relation_head_dict[rel]:
relation_head_dict[rel].append(head)
if rel not in relation_tail_dict:
relation_tail_dict[rel] = [tail]
elif tail not in relation_tail_dict[rel]:
relation_tail_dict[rel].append(tail)
return relation_head_dict, relation_tail_dict
def create_negative_triple(triple,relation_head_dict,relation_tail_dict,corrupt_head = True):
head = triple[0]
tail = triple[1]
rel = triple[2]
if corrupt_head:
neg_heads = [h for h in relation_head_dict[rel] if h != head]
if len(neg_heads) > 0 :
random_index = randint(0, len(neg_heads)-1)
neg_head = neg_heads[random_index]
return (neg_head,tail,rel)
else:
return None
else:
neg_tails = [t for t in relation_tail_dict[rel] if t != tail]
if len(neg_tails) > 0:
random_index = randint(0, len(neg_tails)-1)
neg_tail = neg_tails[random_index]
return (head, neg_tail, rel)
return None
|
446026
|
import pandas as pd
import json
class ParsePsort:
def __init__(self, max_entries):
self.max_entries = max_entries
def parse_psortdb(self):
"""
To parse database psortdb gram negative without outer membrane file
and create JSON files conforming to datanator_pattern/observation_compiled.json
Args:
max_entries(:obj:'int'): number of rows to parse.
A JSON file will be created for each of the first <max_entries> rows
Return:
()
"""
data=pd.read_csv('Computed-Gram_negative_without_outer_membrane-PSORTdb-3.00.tab',delimiter="\t",low_memory=False)
data = data.where(pd.notnull(data), None)
for i in range(self.max_entries):
d={}
#entity
d["entity"]={}
d["entity"]["type"]="protein"
d["entity"]["name"]=str(data.iloc[i,0])[str(data.iloc[i,0]).rfind("|")+2:]
d["entity"]["synonyms"]=[]
#identifiers
d["entity"]["identifiers"]=[]
seq_id = {}
seq_id["namespace"]="Seq_ID"
seq_id["value"]=str(data.iloc[i,0])[str(data.iloc[i,0]).find("ref")+4:str(data.iloc[i,0]).rfind("|")]
d["entity"]["identifiers"].append(seq_id)
#localizations
d["value"]={}
d["value"]["PPSVM_Localization"]=data.iloc[i,1]
d["value"]["Profile_Localization"]=data.iloc[i,3]
d["value"]["Signal_Localization"]=data.iloc[i,5]
d["value"]["SCL-BLASTe_Localization"]=data.iloc[i,7]
d["value"]["CMSVM_Localization"]=data.iloc[i,9]
d["value"]["SCL-BLAST_Localization"]=data.iloc[i,11]
d["value"]["OMPMotif_Localization"]=data.iloc[i,13]
d["value"]["OMSVM_Localization"]=data.iloc[i,15]
d["value"]["Motif_Localization"]=data.iloc[i,17]
d["value"]["CytoSVM_Localization"]=data.iloc[i,19]
d["value"]["CWSVM_Localization"]=data.iloc[i,21]
d["value"]["ModHMM_Localization"]=data.iloc[i,23]
d["value"]["ECSVM_Localization"]=data.iloc[i,25]
d["value"]["Cytoplasmic Membrane_Score"]=data.iloc[i,27]
d["value"]["Cellwall_Score"]=data.iloc[i,28]
d["value"]["Extracellular_Score"]=data.iloc[i,29]
d["value"]["Cytoplasmic_Score"]=data.iloc[i,30]
d["value"]["Final_Localization"]=data.iloc[i,31]
d["value"]["Final_Localization_2"]=data.iloc[i,32]
d["value"]["Secondary_Localization"]=data.iloc[i,34]
d["value"]["Final_Score"]=data.iloc[i,35]
#source
d["source"]={}
d["source"]["namespace"]="PSORT"
d["source"]["value"]="Version "+str(data.iloc[i,36])
with open("Gram_Negative_WO_Outer_Membrane/"+str(data.iloc[i,0])[str(data.iloc[i,0]).find("ref")+4:str(data.iloc[i,0]).rfind("|")]+".json","w+") as f:
json.dump(d,f,indent=4)
p1=ParsePsort(10)
p1.parse_psortdb()
|
446050
|
from typing import List, Dict, Set
from pathlib import PurePosixPath
from collections import deque
import re
from alipcs_py.alipcs import AliPCSApi, PcsFile
from alipcs_py.commands.list_files import list_files
from alipcs_py.commands.sifter import Sifter
from alipcs_py.commands.display import display_shared_links
from alipcs_py.commands.download import (
download,
Downloader,
DEFAULT_DOWNLOADER,
DownloadParams,
DEFAULT_DOWNLOADPARAMS,
)
from alipcs_py.commands.play import play, Player, DEFAULT_PLAYER
import requests # type: ignore
from rich import print
def share_files(api: AliPCSApi, *remotepaths: str, password: str = "", period: int = 0):
pcs_files = api.paths(*remotepaths)
assert all(pcs_files)
file_ids = [pf.file_id for pf in pcs_files if pf]
shared_link = api.share(*file_ids, password=password, period=period)
display_shared_links(shared_link)
def list_shared(api: AliPCSApi, show_all=True):
pcs_shared_links = api.list_shared_all()
if not pcs_shared_links:
return
display_shared_links(*pcs_shared_links)
def cancel_shared(api: AliPCSApi, *share_ids: str):
api.cancel_shared(*share_ids)
def _redirect(url: str) -> str:
if "alywp.net" not in url:
return url
resp = requests.get(url, allow_redirects=False)
return resp.headers.get("Location") or ""
def _extract_share_id(share_url: str) -> str:
m = re.search(r"/s/(\w+)", share_url)
return m.group(1) if m else ""
def _extract_file_id(share_url: str) -> str:
m = re.search(r"/folder/(\w+)", share_url)
return m.group(1) if m else ""
def save_shared_by_url(
api: AliPCSApi, remotedir: str, share_url: str, password: str = ""
):
share_url = _redirect(share_url)
share_id = _extract_share_id(share_url)
file_id = _extract_file_id(share_url)
file_ids = [file_id] if file_id else []
assert share_id
save_shared_by_file_ids(api, remotedir, share_id, file_ids, password=password)
def save_shared_by_file_ids(
api: AliPCSApi,
remotedir: str,
share_id: str,
file_ids: List[str],
password: str = "",
):
assert share_id
share_token = api.get_share_token(share_id, share_password=password)
file_ids = file_ids or ["root"]
sfs = api.meta(*file_ids, share_id=share_id, share_token=share_token)
for sf in sfs:
if not sf.path:
sf.path = sf.name
shared_files = deque(sfs)
# Record the remotedir of each shared_file
_remotedirs: Dict[str, str] = {}
for sp in shared_files:
_remotedirs[sp.file_id] = remotedir
# Map the remotedir to its pcs_file
dest_pcs_files: Dict[str, PcsFile] = {}
while shared_files:
shared_file = shared_files.popleft()
rd = _remotedirs[shared_file.file_id]
# Make sure remote dir exists
if rd not in dest_pcs_files:
dest_pcs_files[rd] = api.makedir_path(rd)
dest_pcs_file = dest_pcs_files[rd]
if not shared_file.is_root() and not remotepath_exists(
api, shared_file.name, rd
):
api.transfer_shared_files(
[shared_file.file_id],
dest_pcs_file.file_id,
share_id,
share_token,
auto_rename=False,
)
print(f"save: `{shared_file.path}` to `{rd}`")
else:
# Ignore existed file
if shared_file.is_file:
print(f"[yellow]WARNING[/]: `{shared_file.path}` has be in `{rd}`")
continue
else: # shared_file.is_dir
sub_files = list(
api.list_path_iter(
shared_file.path,
file_id=shared_file.file_id,
share_id=share_id,
share_token=share_token,
)
)
rd = (PurePosixPath(rd) / shared_file.name).as_posix()
for sp in sub_files:
_remotedirs[sp.file_id] = rd
shared_files.extendleft(sub_files[::-1])
def save_shared(
api: AliPCSApi,
remotedir: str,
share_id: str = "",
share_url: str = "",
file_ids: List[str] = [],
password: str = "",
):
assert remotedir.startswith("/"), "`remotedir` must be an absolute path"
assert int(bool(share_id)) ^ int(
bool(share_url)
), "`share_id` and `share_url` only can be given one"
if share_url:
save_shared_by_url(api, remotedir, share_url, password=password)
else:
save_shared_by_file_ids(api, remotedir, share_id, file_ids, password=password)
def list_shared_files(
api: AliPCSApi,
*remotepaths: str,
share_id: str = "",
share_url: str = "",
password: str = "",
file_ids: List[str] = [],
desc: bool = False,
name: bool = False,
time: bool = False,
size: bool = False,
all: bool = True,
limit: int = 100,
recursive: bool = False,
sifters: List[Sifter] = [],
highlight: bool = False,
show_size: bool = False,
show_date: bool = False,
show_file_id: bool = False,
show_hash: bool = False,
show_absolute_path: bool = False,
csv: bool = False,
):
assert int(bool(share_id)) ^ int(
bool(share_url)
), "`share_id` and `share_url` only can be given one"
share_url = _redirect(share_url)
if share_url:
share_id = _extract_share_id(share_url)
if not file_ids:
file_id = _extract_file_id(share_url)
file_ids = [file_id] if file_id else []
if not remotepaths and not file_ids:
return
assert share_id
share_token = api.get_share_token(share_id, share_password=password)
list_files(
api,
*remotepaths,
file_ids=file_ids,
share_id=share_id,
share_token=share_token,
desc=desc,
name=name,
time=time,
size=size,
all=all,
limit=limit,
recursive=recursive,
sifters=sifters,
highlight=highlight,
show_size=show_size,
show_date=show_date,
show_file_id=show_file_id,
show_hash=show_hash,
show_absolute_path=show_absolute_path,
csv=csv,
)
def remotepath_exists(
api: AliPCSApi, name: str, rd: str, _cache: Dict[str, Set[str]] = {}
) -> bool:
names = _cache.get(rd)
if not names:
names = set([sp.name for sp in api.list_path_iter(rd)])
_cache[rd] = names
return name in names
def download_shared(
api: AliPCSApi,
remotepaths: List[str],
file_ids: List[str],
localdir: str,
share_id: str = "",
share_url: str = "",
password: str = "",
sifters: List[Sifter] = [],
recursive: bool = False,
from_index: int = 0,
downloader: Downloader = DEFAULT_DOWNLOADER,
downloadparams: DownloadParams = DEFAULT_DOWNLOADPARAMS,
out_cmd: bool = False,
encrypt_password: bytes = b"",
):
assert int(bool(share_id)) ^ int(
bool(share_url)
), "`share_id` and `share_url` only can be given one"
share_url = _redirect(share_url)
if share_url:
share_id = _extract_share_id(share_url)
if not file_ids:
file_id = _extract_file_id(share_url)
file_ids = [file_id] if file_id else []
if not remotepaths and not file_ids:
return
assert share_id
share_token = api.get_share_token(share_id, share_password=password)
download(
api,
remotepaths,
file_ids=file_ids,
localdir=localdir,
share_id=share_id,
share_token=share_token,
sifters=sifters,
recursive=recursive,
from_index=from_index,
downloader=downloader,
downloadparams=downloadparams,
out_cmd=out_cmd,
encrypt_password=encrypt_password,
)
def play_shared(
api: AliPCSApi,
remotepaths: List[str],
file_ids: List[str],
share_id: str,
share_url: str = "",
password: str = "",
sifters: List[Sifter] = [],
recursive: bool = False,
from_index: int = 0,
player: Player = DEFAULT_PLAYER,
player_params: List[str] = [],
quiet: bool = False,
shuffle: bool = False,
ignore_ext: bool = False,
out_cmd: bool = False,
local_server: str = "",
):
assert int(bool(share_id)) ^ int(
bool(share_url)
), "`share_id` and `share_url` only can be given one"
share_url = _redirect(share_url)
if share_url:
share_id = _extract_share_id(share_url)
if not file_ids:
file_id = _extract_file_id(share_url)
file_ids = [file_id] if file_id else []
if not remotepaths and not file_ids:
return
assert share_id
share_token = api.get_share_token(share_id, share_password=password)
play(
api,
remotepaths,
file_ids=file_ids,
share_id=share_id,
share_token=share_token,
sifters=sifters,
recursive=recursive,
from_index=from_index,
player=player,
player_params=player_params,
quiet=quiet,
shuffle=shuffle,
ignore_ext=ignore_ext,
out_cmd=out_cmd,
local_server=local_server,
)
|
446088
|
from django.contrib import admin
from manabi.apps.books.models import Textbook
from manabi.apps.flashcards.models import (
Card,
CardHistory,
Deck,
DeckCollection,
Fact,
)
class CardAdmin(admin.ModelAdmin):
raw_id_fields = ('fact',)
list_display = (
'__unicode__', 'last_due_at', 'due_at', 'last_reviewed_at',
)
class FactAdmin(admin.ModelAdmin):
raw_id_fields = ('synchronized_with',)
list_display = ('__unicode__', 'owner',)
list_filter = ('deck',)
readonly_fields = ('created_at', 'modified_at',)
class FactInline(admin.TabularInline):
model = Fact
fields = (
'expression', 'reading', 'meaning',
)
readonly_fields = ('created_at', 'modified_at',)
@admin.register(Deck)
class DeckAdmin(admin.ModelAdmin):
fields = (
'name', 'description', 'suspended', 'active', 'image', 'collection',
'collection_ordinal', 'randomize_card_order',
)
raw_id_fields = ('synchronized_with',)
list_display = ('__unicode__', 'owner', 'synchronized_with')
list_filter = ('shared',)
readonly_fields = ('created_at', 'modified_at',)
inlines = [FactInline]
def get_queryset(self, request):
queryset = super(DeckAdmin, self).get_queryset(request)
return queryset.filter(active=True)
@admin.register(DeckCollection)
class DeckCollectionAdmin(admin.ModelAdmin):
fields = ('name', 'description', 'image')
readonly_fields = ('created_at', 'modified_at',)
class TextbookAdmin(admin.ModelAdmin):
pass
#TODO admin.site.register(Deck, DeckAdmin)
#TODO admin.site.register(CardHistory)
#TODO admin.site.register(Fact, FactAdmin)
#TODO admin.site.register(Card, CardAdmin)
#TODO admin.site.register(Textbook)
|
446101
|
import logging
import os
import sys
from functools import wraps
from telegram.ext import CommandHandler
from telegram.ext.dispatcher import run_async
from config import config
logger = logging.getLogger(__name__)
def restricted(func):
@wraps(func)
def wrapped(bot, update, *args, **kwargs):
user_id = update.effective_user.id
if user_id != int(config.telegram.owner_id):
logger.warning('unauthorized access denied: %d', user_id)
return
return func(bot, update, *args, **kwargs)
return wrapped
@restricted
@run_async
def restart_bot(bot, update, args):
logger.info('restarting bot...')
update.message.reply_text('restarting {}'.format(*args if len(args) > 0 else '(no args)'))
# updater.stop() # this will just make the script hang
script_args = [sys.argv[0]] # keep just the first element of sys.argv
script_args.extend(args)
os.execl(sys.executable, sys.executable, *script_args)
@restricted
@run_async
def send_db(bot, update):
logger.info('sending db file')
with open(config.sqlite.filename, 'rb') as f:
update.message.reply_document(f)
@restricted
@run_async
def send_log(bot, update):
logger.info('sending log file')
with open(config.log.filename, 'rb') as f:
update.message.reply_document(f)
HANDLERS = (
CommandHandler('restart', restart_bot, pass_args=True),
CommandHandler('db', send_db),
CommandHandler('log', send_log)
)
|
446124
|
import click
import rvo.db as db
import rvo.views as views
@click.command(short_help="Show transactions",
help="""
Transactions are logged informations
about changes and access to the documents.
`log' is used to get those transactions listed.
Having a hard time remembering what you did?
""")
@click.option('entries', '-e', '--entries', default=15, type=int,
help='Number of entries being shown')
@click.pass_context
def log(ctx, entries):
"""
Shows n latest transactions
:n: int
:returns: bool
"""
coll = db.get_transactions_collection(ctx)
SUM = {}
c = 0
print("")
for doc in coll.find({}).sort("date", -1).limit(entries):
c += 1
SUM[c] = doc
views.transactions(SUM, c+1)
return True
|
446150
|
import os
from setuptools import setup
PACKAGE = "allure-robotframework"
classifiers = [
'Development Status :: 5 - Production/Stable',
'Framework :: Robot Framework',
'Framework :: Robot Framework :: Tool',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
]
setup_requires = [
"setuptools_scm"
]
install_requires = [
]
def prepare_version():
from setuptools_scm import get_version
configuration = {"root": "..", "relative_to": __file__}
version = get_version(**configuration)
install_requires.append("allure-python-commons=={version}".format(version=version))
return configuration
def get_readme(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
if __name__ == '__main__':
setup(
name=PACKAGE,
use_scm_version=prepare_version,
description="Allure Robot Framework integration",
license="Apache-2.0",
install_requires=install_requires,
setup_requires=setup_requires,
keywords="allure reporting robotframework",
packages=['allure_robotframework', 'AllureLibrary'],
package_dir={"allure_robotframework": "src/listener", 'AllureLibrary': 'src/library'},
py_modules=['allure_robotframework'],
url="https://github.com/allure-framework/allure-python",
author="<NAME>",
author_email="<EMAIL>",
long_description=get_readme('README.rst'),
classifiers=classifiers,
)
|
446156
|
class BinarySearchTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insert(self, to_add):
if self.value > to_add:
if self.left:
self.left.insert(to_add)
else:
self.left = BinarySearchTree(to_add)
else:
if self.right:
self.right.insert(to_add)
else:
self.right = BinarySearchTree(to_add)
def all_values_less_than(self, value):
if self.value >= value:
return False
left_less_than = True
if self.left:
left_less_than = self.left.all_values_less_than(value)
right_less_than = True
if self.right:
right_less_than = self.right.all_values_less_than(value)
return left_less_than and right_less_than
def all_values_geq_than(self, value):
if self.value <= value:
return False
left_geq_than = True
if self.left:
left_geq_than = self.left.all_values_geq_than(value)
right_geq_than = True
if self.right:
right_geq_than = self.right.all_values_geq_than(value)
return left_geq_than and right_geq_than
def is_bst(self):
left_ok = True
if self.left:
left_ok = self.left.all_values_less_than(self.value) and self.left.is_bst()
right_ok = True
if self.right:
right_ok = self.right.all_values_geq_than(self.value) and self.right.is_bst()
return right_ok and left_ok
def __repr__(self):
return "({} L{} R{})".format(self.value, self.left, self.right)
|
446191
|
from easydict import EasyDict as edict
from tensorflow.python.keras import Input
from tensorflow.python.keras import Model
from tensorflow.python.keras import layers
from gans.models import model
class LatentToImageConditionalGenerator(model.Model):
def __init__(
self,
model_parameters: edict,
):
super().__init__(model_parameters)
def define_model(self):
z = Input(shape=[self.model_parameters.latent_size])
class_id = Input(shape=[1])
embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
embedded_id = layers.Dense(units=7 * 7)(embedded_id)
embedded_id = layers.Reshape(target_shape=(7, 7, 1))(embedded_id)
x = layers.Dense(units=7 * 7 * 256, use_bias=False)(z)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Reshape((7, 7, 256))(x)
inputs = layers.Concatenate(axis=3)([x, embedded_id])
x = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(x)
model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
return model
class LatentToImageCifar10CConditionalGenerator(model.Model):
def __init__(
self,
model_parameters: edict,
):
super().__init__(model_parameters)
def define_model(self):
z = Input(shape=[self.model_parameters.latent_size])
class_id = Input(shape=[1])
embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
embedded_id = layers.Dense(units=8 * 8)(embedded_id)
embedded_id = layers.Reshape(target_shape=(8, 8, 1))(embedded_id)
x = layers.Dense(units=8 * 8 * 256, use_bias=False)(z)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Reshape((8, 8, 256))(x)
inputs = layers.Concatenate(axis=3)([x, embedded_id])
x = layers.Conv2DTranspose(128, kernel_size=(4, 4), strides=(2, 2), padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(128, kernel_size=(5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2DTranspose(128, kernel_size=(4, 4), strides=(2, 2), padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(128, kernel_size=(5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(128, kernel_size=(5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(3, kernel_size=(5, 5), strides=(1, 1), padding='same', use_bias=False, activation='tanh')(x)
model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
return model
class LatentToImageNNUpsamplingCifar10CConditionalGenerator(model.Model):
def __init__(
self,
model_parameters: edict,
):
super().__init__(model_parameters)
def define_model(self):
z = Input(shape=[self.model_parameters.latent_size])
class_id = Input(shape=[1])
embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
embedded_id = layers.Dense(units=8 * 8)(embedded_id)
embedded_id = layers.Reshape(target_shape=(8, 8, 1))(embedded_id)
x = layers.Dense(units=8 * 8 * 256, use_bias=False)(z)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Reshape((8, 8, 256))(x)
inputs = layers.Concatenate(axis=3)([x, embedded_id])
x = layers.Conv2D(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2D(3, (5, 5), strides=(1, 1), padding='same', use_bias=False, activation='tanh')(x)
model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
return model
class LatentToImageNNUpSamplingConditionalGenerator(model.Model):
def __init__(
self,
model_parameters: edict,
):
super().__init__(model_parameters)
def define_model(self):
z = Input(shape=[self.model_parameters.latent_size])
class_id = Input(shape=[1])
embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
embedded_id = layers.Dense(units=7 * 7)(embedded_id)
embedded_id = layers.Reshape(target_shape=(7, 7, 1))(embedded_id)
x = layers.Dense(units=7 * 7 * 256, use_bias=False)(z)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Reshape((7, 7, 256))(x)
inputs = layers.Concatenate(axis=3)([x, embedded_id])
x = layers.Conv2D(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2D(1, (5, 5), strides=(1, 1), padding='same', use_bias=False, activation='tanh')(x)
model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
return model
|
446203
|
import numpy as np
from scipy import ndimage as nd
import tensorflow as tf
from prdepth import sampler
import prdepth.utils as ut
import cv2
H, W = sampler.H, sampler.W
IH, IW = sampler.IH, sampler.IW
PSZ = sampler.PSZ
STRIDE = sampler.STRIDE
HNPS, WNPS = sampler.HNPS, sampler.WNPS
class S2DOptimizer:
''' Optimizer class for sparse-to-dense with random sampling.
Optimizations are done in the DORN output resolution, which is lower than
the original image. The optimized global prediction is upsampled to the
original resolution.
'''
def __init__(self, depth_sampler):
self.patched_samples = depth_sampler.patched_samples
self.nsamples = depth_sampler.nsamples
self.PO = ut.PatchOp(1, IH, IW, PSZ, STRIDE)
# Variables for optimization
# Global estimation (DORN resolution).
self.image_current = tf.Variable(
tf.zeros([1, IH, IW, 1], dtype=tf.float32))
# Patches used to get image_current, i.e. if group average these
# patches, you would get image_current.
self.patched_before = tf.Variable(
tf.zeros([1, HNPS, WNPS, PSZ**2], dtype=tf.float32))
# Global estimation (original resolution).
self.resized_current = tf.image.resize_images(
self.image_current, [H, W], align_corners=True)
# Graph for initialization
patched_init = tf.reduce_mean(self.patched_samples, axis=0)
image_init = self.PO.group_patches(patched_init)
self._init_op = tf.group([
tf.assign(self.patched_before, patched_init).op,
tf.assign(self.image_current, image_init).op])
# Graph for updating sample selection (i.e., patched_before) based on
# the updated global estimation, which is got by carrying out a few
# number of gradient steps using the addtional global cost function.
# In this sparse-to-dense application, the global estimation is updated
# using sparse depth and Eq 9. & 10. in the paper.
self._resized_updated_ph = tf.placeholder(
shape=[H, W], dtype=tf.float32)
image_current = tf.image.resize_images(
self._resized_updated_ph[None, :, :, None],
[IH, IW], align_corners=True)
patched_current = self.PO.extract_patches(image_current)
# Select the sample with the min distance to the (patch of) updated
# global prediction.
distance = ut.mean_diff(
patched_current[None], self.patched_samples, axis=-1)
min_index = tf.argmin(distance, axis=0)
indices = tf.meshgrid(
*[np.arange(i) for i in min_index.get_shape().as_list()], indexing='ij')
min_indices = tf.stack([min_index] + indices, axis=-1)
patched_best = tf.gather_nd(self.patched_samples, min_indices)
image_best = self.PO.group_patches(patched_best)
# Difference b/w the current prediction and the previous, used for
# stopping the optimization.
self._diff = ut.mean_diff(image_best, self.image_current)
with tf.control_dependencies([self._diff]):
self._sample_selection_op = tf.group([
tf.assign(self.patched_before, patched_best).op,
tf.assign(self.image_current, image_best).op])
def initialize(self, sess):
''' Initialize the prediction. '''
sess.run(self._init_op)
def update_global_estimation(self, sparse_depth, gamma, num_gd_steps, sess):
''' Update the global depth estimation using sparse depth.
By carrying out a few number of gradient steps using the addtional
global cost function (Eq 9. & 10. in the paper). The sampling operataion
in this case is just sampling at the measured locations in the sparse
depth map. The tranpose of the sampling operataion is nearest neighbor
interpolation of the valid pixels in the sparse depth.
Args:
sparse_depth: a sparse depth map (numpy array).
gamma: step size for gradient descent.
num_gd_steps: number of gradient descent steps.
sess: TF session.
Returns:
Updated global estimation of the original resolution.
'''
# A map of indices, of which each pixel is the indices of the closest
# valid measurement on the sparse depth map to this pixel.
# This is used for filling values for all pixels of the sparse depth map
# using nearest neighbor.
if not hasattr(self, '_edt_indices'):
invalid = (sparse_depth == 0)
self._edt_indices = tuple(nd.distance_transform_edt(
invalid, return_distances=False, return_indices=True))
global_current = sess.run(self.resized_current).squeeze()
for i in range(num_gd_steps):
diff = global_current - sparse_depth
gradient = diff[self._edt_indices]
global_current = global_current - gamma * gradient
return global_current
def update_sample_selection(self, global_current, sess):
''' Update sample selection using the current global estimation.
Args:
global_current: the current global depth estimation of the original
resolution.
sess: TF session.
Returns:
Averaged squared difference of the current estimation and the
previous estimation.
'''
diff, _ = sess.run(
[self._diff, self._sample_selection_op],
feed_dict={self._resized_updated_ph: global_current})
return diff
class UpsamplingOptimizer(S2DOptimizer):
def update_global_estimation(self, lowres_depth, gamma, num_gd_steps, sess):
''' Update the global depth estimation using low-resolution depth map.
By carrying out a few number of gradient steps using the addtional
global cost function (Eq 9. & 10. in the paper). The sampling operataion
in this case is bicubic downsampling. The tranpose of the sampling
operataion is bi-linear interpolation of the low-resolution depth.
Args:
lowres_depth: a low-resolution depth map (numpy array).
gamma: step size for gradient descent.
num_gd_steps: number of gradient descent steps.
sess: TF session.
Returns:
Updated global estimation of the original resolution.
'''
global_current = sess.run(self.resized_current).squeeze()
lh, lw = lowres_depth.shape
for i in range(num_gd_steps):
down_current = cv2.resize(
global_current, (lw, lh), interpolation=cv2.INTER_CUBIC)
diff = down_current - lowres_depth
gradient = cv2.resize(diff, (W, H), interpolation=cv2.INTER_LINEAR)
global_current = global_current - gamma * gradient
return global_current
|
446222
|
from sepal.ee.image import band_intersection, evaluate, select_and_add_missing, when
def to_index(image, index_name):
return {
'ndvi': to_ndvi(image),
'ndmi': to_ndmi(image),
'ndwi': to_ndwi(image),
'mndwi': to_mndwi(image),
'evi': to_evi(image),
'evi2': to_evi2(image),
'savi': to_savi(image),
'nbr': to_nbr(image),
'ui': to_ui(image),
'ndbi': to_ndbi(image),
'ibi': to_ibi(image),
'nbi': to_nbi(image),
'ebbi': to_ebbi(image),
'bui': to_bui(image),
'ndfi': to_ndfi(image),
}[index_name]
def to_ndvi(image):
"""
Calculates the Normalized Difference Vegetation Index (NDVI) for the provided image.
Required bands: ['red', 'nir']
Args:
image: Image to calculate NDVI for
Returns:
If image contains required bands, single band image named 'ndvi', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['red', 'nir'],
expression='(nir - red) / (nir + red)',
name='ndvi'
)
def to_ndmi(image):
"""
Calculates the Normalized Difference Moisture Index (NDMI) for the provided image.
Required bands: ['nir', 'swir1']
Args:
image: Image to calculate NDMI for
Returns:
If image contains required bands, single band image named 'ndmi', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['nir', 'swir1'],
expression='(nir - swir1) / (nir + swir1)',
name='ndmi'
)
def to_ndwi(image):
"""
Calculates the Normalized Difference Water Index (NDWI) for the provided image.
Required bands: ['green', 'nir']
Args:
image: Image to calculate NDWI for
Returns:
If image contains required bands, single band image named 'ndwi', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['green', 'nir'],
expression='(green - nir) / (green + nir)',
name='ndwi'
)
def to_mndwi(image):
"""
Calculates the Modified Normalized Difference Water Index (MNDWI) for the provided image.
Required bands: ['green', 'swir1']
Args:
image: Image to calculate MNDWI for
Returns:
If image contains required bands, single band image named 'mndwi', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['green', 'swir1'],
expression='(green - swir1) / (green + swir1)',
name='mndwi'
)
def to_ndfi(image):
"""
Calculates the Normalized Difference Fraction Index (NDFI) for the provided image.
Required bands: ['blue', 'green', 'red', 'nir', 'swir1', 'swir2']
Args:
image: Image to calculate NDFI for
Returns:
If image contains required bands, single band image named 'ndfi', otherwise an image without bands.
"""
required_bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2']
valid_image = band_intersection(image, required_bands).length().eq(6)
def calculate_ndfi():
gv = [500, 900, 400, 6100, 3000, 1000]
shade = [0, 0, 0, 0, 0, 0]
npv = [1400, 1700, 2200, 3000, 5500, 3000]
soil = [2000, 3000, 3400, 5800, 6000, 5800]
cloud = [9000, 9600, 8000, 7800, 7200, 6500]
unmixed = select_and_add_missing(image, required_bands) \
.unmix(
endmembers=[gv, shade, npv, soil, cloud],
sumToOne=True,
nonNegative=True
).rename(['gv', 'shade', 'npv', 'soil', 'cloud'])
return unmixed \
.expression(
'((i.gv / (1 - i.shade)) - (i.npv + i.soil)) / ((i.gv / (1 - i.shade)) + i.npv + i.soil)',
{'i': unmixed}
) \
.rename('ndfi') \
.float()
return when(valid_image, calculate_ndfi)
def to_evi(image, L=1, C1=6, C2=7.5, G=2.5):
"""
Calculates the Enhanced Vegetation Index (EVI) for the provided image.
Required bands: ['blue', 'red', 'nir']
Args:
image: Image to calculate EVI for
L: (optional) Canopy background adjustment
C1: (optional) Aerosol resistance term 1
C2: (optional) Aerosol resistance term 2
G: (optional) Gain factor
Returns:
If image contains required bands, single band image named 'evi', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['blue', 'red', 'nir'],
expression='{G} * ((nir - red) / (nir + {C1} * red - {C2} * blue + {L}))'.format(L=L, C1=C1, C2=C2, G=G),
name='evi'
)
def to_evi2(image):
"""
Calculates the Enhanced Vegetation Index 2 (EVI2) for the provided image.
Required bands: ['red', 'nir']
Args:
image: Image to calculate EVI2 for
Returns:
If image contains required bands, single band image named 'evi2', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['blue', 'red', 'nir'],
expression='2.5 * (nir - red) / (nir + 2.4 * red + 1)',
name='evi2'
)
def to_savi(image, L=0.5):
"""
Calculates the Soil-adjusted Vegetation Index (SAVI) for the provided image.
Required bands: ['red', 'nir']
Args:
image: Image to calculate SAVI for
L: (optional) Soil brightness correction factor
Returns:
If image contains required bands, single band image named 'savi', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['red', 'nir'],
expression='(nir - red) * (1 + {L})/(nir + red + {L})'.format(L=L),
name='savi'
)
def to_nbr(image):
"""
Calculates the Normalized Burn Ratio (NBR) for the provided image.
Required bands: ['nir', 'swir2']
Args:
image: Image to calculate NBR for
Returns:
If image contains required bands, single band image named 'NBR', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['nir', 'swir2'],
expression='(nir - swir2) / (nir + swir2)',
name='nbr'
)
def to_ui(image):
"""
Calculates the Urban Index (UI) for the provided image.
Required bands: ['nir', 'swir2']
Args:
image: Image to calculate UI for
Returns:
If image contains required bands, single band image named 'ui', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['nir', 'swir2'],
expression='(swir2 - nir) / (swir2 + nir)',
name='ui'
)
def to_ndbi(image):
"""
Calculates the Normalized Difference Built-up Index (NDBI) for the provided image.
Required bands: ['nir', 'swir1']
Args:
image: Image to calculate NDBI for
Returns:
If image contains required bands, single band image named 'ndbi', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['nir', 'swir1'],
expression='(swir1 - nir) / (swir1 + nir)',
name='ndbi'
)
def to_ibi(image, high_plant_cover=False, L=0.5):
"""
Calculates the Index-based Built-up Index (IBI) for the provided image.
Required bands: ['green', 'red', 'nir', 'swir1']
Args:
image: Image to calculate IBI for
high_plant_cover: Set to True if plant cover is over 30%. It will then use NDVI instead of SAVI in
the calculation
L: (optional) Soil brightness correction factor. Will have no effect when high plant cover.
Returns:
If image contains required bands, single band image named 'savi', otherwise an image without bands.
"""
ndbi = to_ndbi(image)
mndwi = to_mndwi(image)
if high_plant_cover:
ndvi = to_ndvi(image)
return evaluate(
ndbi.addBands(ndvi).addBands(mndwi),
['ndbi', 'ndvi', 'mndwi'],
'(ndbi - (ndvi + mndwi) / 2) / (ndbi + (ndvi + mndwi) / 2)',
'ibi'
)
else:
savi = to_savi(image, L)
return evaluate(
ndbi.addBands(savi).addBands(mndwi),
['ndbi', 'savi', 'mndwi'],
'(ndbi - (savi + mndwi) / 2) / (ndbi + (savi + mndwi) / 2)',
'ibi'
)
def to_nbi(image):
"""
Calculates the New Built-up Index (NBI) for the provided image.
Required bands: ['red', 'nir', 'swir1']
Args:
image: Image to calculate NBI for
Returns:
If image contains required bands, single band image named 'nbi', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['red', 'nir', 'swir1'],
expression='red * swir1 / nir',
name='nbi'
)
def to_ebbi(image):
"""
Calculates the Enhanced Built-up and Bareness Index (EBBI) for the provided image.
Required bands: ['nir', 'swir1', 'thermal']
Args:
image: Image to calculate EBBI for
Returns:
If image contains required bands, single band image named 'ebbi', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['nir', 'swir1', 'swir2'],
expression='(swir1 - nir) / 10 * sqrt(swir1 + thermal)',
name='ebbi'
)
def to_bui(image):
"""
Calculates the Built-up Index (BUI) for the provided image.
Required bands: ['red', 'swir1', 'swir2']
Args:
image: Image to calculate BUI for
Returns:
If image contains required bands, single band image named 'bui', otherwise an image without bands.
"""
return evaluate(
image=image,
required_bands=['red', 'swir1', 'swir2'],
expression='(red - swir1) / (red + swir) + (swir2 - swir1) / (swir2 + swir1)',
name='bui'
)
|
446248
|
import requests
from . import FeedSource, _request_headers
class Biki(FeedSource):
def _fetch(self):
feed = {}
url = "https://openapi.biki.cc/open/api/get_ticker?symbol={quote}{base}"
for base in self.bases:
for quote in self.quotes:
if quote == base:
continue
response = requests.get(url=url.format(
quote=quote,
base=base
), headers=_request_headers, timeout=self.timeout)
result = response.json()
if 'msg' in result and result['msg'] != 'suc':
continue
self.add_rate(feed, base, quote, float(result['data']["last"]), float(result['data']["vol"]))
return feed
|
446276
|
from __future__ import division
import numpy as np
import logging
from scipy.ndimage import zoom
from .base import Attack
from .base import generator_decorator
from ..utils import softmax
class GenAttack(Attack):
"""The GenAttack introduced in [1]_.
This attack is performs a genetic search in order to find an adversarial
perturbation in a black-box scenario in as few queries as possible.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>,
"GenAttack: Practical Black-box Attacks with Gradient-Free
Optimization",
https://arxiv.org/abs/1607.02533
"""
@generator_decorator
def as_generator(
self,
a,
generations=10,
alpha=1.0,
p=5e-2,
N=10,
tau=0.1,
search_shape=None,
epsilon=0.3,
binary_search=20,
):
"""A black-box attack based on genetic algorithms.
Can either try to find an adversarial perturbation for a fixed epsilon
distance or perform a binary search over epsilon values in order to find
a minimal perturbation.
Parameters
----------
inputs : `numpy.ndarray`
Batch of inputs with shape as expected by the underlying model.
labels : `numpy.ndarray`
Class labels of the inputs as a vector of integers in [0, number of classes).
unpack : bool
If true, returns the adversarial inputs as an array, otherwise returns Adversarial objects.
generations : int
Number of generations, i.e. iterations, in the genetic algorithm.
alpha : float
Mutation-range.
p : float
Mutation probability.
N : int
Population size of the genetic algorithm.
tau: float
Temperature for the softmax sampling used to determine the parents
of the new crossover.
search_shape : tuple (default: None)
Set this to a smaller image shape than the true shape to search in
a smaller input space. The input will be scaled using a linear
interpolation to match the required input shape of the model.
binary_search : bool or int
Whether to perform a binary search over epsilon and using their
values to start the search. If False, hyperparameters are not
optimized. Can also be an integer, specifying the number of binary
search steps (default 20).
epsilon : float
Limit on the perturbation size; if binary_search is True,
this value is only for initialization and automatically
adapted.
"""
assert a.target_class is not None, "GenAttack is a targeted attack."
if binary_search:
if isinstance(binary_search, bool):
k = 20
else:
k = int(binary_search)
yield from self._run_binary_search(
a, epsilon, k, generations, alpha, p, N, tau, search_shape
)
return
else:
yield from self._run_one(
a, generations, alpha, p, N, tau, search_shape, epsilon
)
return
def _run_one(self, a, generations, alpha, rho, N, tau, search_shape, epsilon):
min_, max_ = a.bounds()
x = a.unperturbed
search_shape = x.shape if search_shape is None else search_shape
assert len(search_shape) == len(x.shape), (
"search_shape must have the same rank as the original " "image's shape"
)
def get_perturbed(population_noises):
if population_noises[0].shape != x.shape:
factors = [float(d[1]) / d[0] for d in zip(search_shape, x.shape)]
population_noises = zoom(population_noises, zoom=(1, *factors), order=2)
# project into epsilon ball and valid bounds
return np.clip(
np.clip(population_noises, -epsilon, epsilon) + x, min_, max_
)
population = np.random.uniform(-epsilon, +epsilon, (N, *search_shape)).astype(
x.dtype
)
for g in range(generations):
x_perturbed = get_perturbed(population)
probs, is_adversarial = [], []
# TODO: Replace this with a single call to a.forward(...) once this
# is implemented
for it in x_perturbed:
l, i = yield from a.forward_one(it)
probs.append(softmax(l))
is_adversarial.append(i)
probs = np.array(probs)
masked_probs = probs.copy()
masked_probs[:, a.target_class] = 0
fitnesses = np.log(probs[:, a.target_class] + 1e-30) - np.log(
np.sum(masked_probs, 1) + 1e-30
)
# find elite member
elite_idx = np.argmax(fitnesses)
# TODO: Does this make sense in our framework? We can just ignore
# this and use the minimal distortion tracked by the a
# elite member already is adversarial example
if is_adversarial[elite_idx]:
yield from a.forward_one(x_perturbed[elite_idx])
return True
next_population = population.copy()
mutation_probabilities = softmax(fitnesses / tau)
# determine crossover between two parents
parents_idx = np.random.choice(
N, 2 * N - 2, replace=True, p=mutation_probabilities
).reshape(2, -1)
p = fitnesses[parents_idx[0]] / (
fitnesses[parents_idx[0]] + fitnesses[parents_idx[1]]
)
p = p.reshape(-1, *([1] * (len(population.shape) - 1)))
crossover = (
p * population[parents_idx[0]] + (1 - p) * population[parents_idx[1]]
)
# determine new mutation in this generation
b = (np.random.uniform(0, 1, (N - 1, 1, 1, 1)) < rho).astype(np.float32)
mutation = b * np.random.uniform(
-alpha * epsilon, +alpha * epsilon, (N - 1, *search_shape)
)
next_population[1:] = crossover + mutation
population = next_population
return False
def _run_binary_search(
self, a, epsilon, k, generations, alpha, p, N, tau, search_shape
):
def try_epsilon(epsilon):
success = yield from self._run_one(
a, generations, alpha, p, N, tau, search_shape, epsilon
)
return success
for i in range(k):
success = yield from try_epsilon(epsilon)
if success:
logging.info("successful for eps = {}".format(epsilon))
break
logging.info("not successful for eps = {}".format(epsilon))
epsilon = epsilon * 1.5
else:
logging.warning("exponential search failed")
return
bad = 0
good = epsilon
for i in range(k):
epsilon = (good + bad) / 2
success = yield from try_epsilon(epsilon)
if success:
good = epsilon
logging.info("successful for eps = {}".format(epsilon))
else:
bad = epsilon
logging.info("not successful for eps = {}".format(epsilon))
|
446318
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from torch.nn.modules.container import ModuleList
from transformers import BertModel
class ArgModule(nn.Module):
def __init__(self, arg_layer, n_layers):
"""
Module for extracting arguments based on given encoder output and predicates.
It uses ArgExtractorLayer as a base block and repeat the block N('n_layers') times
:param arg_layer: an instance of the ArgExtractorLayer() class (required)
:param n_layers: the number of sub-layers in the ArgModule (required).
"""
super(ArgModule, self).__init__()
self.layers = _get_clones(arg_layer, n_layers)
self.n_layers = n_layers
def forward(self, encoded, predicate, pred_mask=None):
"""
:param encoded: output from sentence encoder with the shape of (L, B, D),
where L is the sequence length, B is the batch size, D is the embedding dimension
:param predicate: output from predicate module with the shape of (L, B, D)
:param pred_mask: mask that prevents attention to tokens which are not predicates
with the shape of (B, L)
:return: tensor like Transformer Decoder Layer Output
"""
output = encoded
for layer_idx in range(self.n_layers):
output = self.layers[layer_idx](
target=output, source=predicate, key_mask=pred_mask)
return output
class ArgExtractorLayer(nn.Module):
def __init__(self,
d_model=768,
n_heads=8,
d_feedforward=2048,
dropout=0.1,
activation='relu'):
"""
A layer similar to Transformer decoder without decoder self-attention.
(only encoder-decoder multi-head attention followed by feed-forward layers)
:param d_model: model dimensionality (default=768 from BERT-base)
:param n_heads: number of heads in multi-head attention layer
:param d_feedforward: dimensionality of point-wise feed-forward layer
:param dropout: drop rate of all layers
:param activation: activation function after first feed-forward layer
"""
super(ArgExtractorLayer, self).__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.linear1 = nn.Linear(d_model, d_feedforward)
self.dropout1 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def forward(self, target, source, key_mask=None):
"""
Single Transformer Decoder layer without self-attention
:param target: a tensor which takes a role as a query
:param source: a tensor which takes a role as a key & value
:param key_mask: key mask tensor with the shape of (batch_size, sequence_length)
"""
# Multi-head attention layer (+ add & norm)
attended = self.multihead_attn(
target, source, source,
key_padding_mask=key_mask)[0]
skipped = target + self.dropout1(attended)
normed = self.norm1(skipped)
# Point-wise feed-forward layer (+ add & norm)
projected = self.linear2(self.dropout2(self.activation(self.linear1(normed))))
skipped = normed + self.dropout1(projected)
normed = self.norm2(skipped)
return normed
class Multi2OIE(nn.Module):
def __init__(self,
bert_config='bert-base-cased',
mh_dropout=0.1,
pred_clf_dropout=0.,
arg_clf_dropout=0.3,
n_arg_heads=8,
n_arg_layers=4,
pos_emb_dim=64,
pred_n_labels=3,
arg_n_labels=9):
super(Multi2OIE, self).__init__()
self.pred_n_labels = pred_n_labels
self.arg_n_labels = arg_n_labels
self.bert = BertModel.from_pretrained(
bert_config,
output_hidden_states=True)
d_model = self.bert.config.hidden_size
self.pred_dropout = nn.Dropout(pred_clf_dropout)
self.pred_classifier = nn.Linear(d_model, self.pred_n_labels)
self.position_emb = nn.Embedding(3, pos_emb_dim, padding_idx=0)
d_model += (d_model + pos_emb_dim)
arg_layer = ArgExtractorLayer(
d_model=d_model,
n_heads=n_arg_heads,
dropout=mh_dropout)
self.arg_module = ArgModule(arg_layer, n_arg_layers)
self.arg_dropout = nn.Dropout(arg_clf_dropout)
self.arg_classifier = nn.Linear(d_model, arg_n_labels)
def forward(self,
input_ids,
attention_mask,
predicate_mask=None,
predicate_hidden=None,
total_pred_labels=None,
arg_labels=None):
# predicate extraction
bert_hidden = self.bert(input_ids, attention_mask)[0]
pred_logit = self.pred_classifier(self.pred_dropout(bert_hidden))
# predicate loss
if total_pred_labels is not None:
loss_fct = nn.CrossEntropyLoss()
active_loss = attention_mask.view(-1) == 1
active_logits = pred_logit.view(-1, self.pred_n_labels)
active_labels = torch.where(
active_loss, total_pred_labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(total_pred_labels))
pred_loss = loss_fct(active_logits, active_labels)
# inputs for argument extraction
pred_feature = _get_pred_feature(bert_hidden, predicate_mask)
position_vectors = self.position_emb(_get_position_idxs(predicate_mask, input_ids))
bert_hidden = torch.cat([bert_hidden, pred_feature, position_vectors], dim=2)
bert_hidden = bert_hidden.transpose(0, 1)
# argument extraction
arg_hidden = self.arg_module(bert_hidden, bert_hidden, predicate_mask)
arg_hidden = arg_hidden.transpose(0, 1)
arg_logit = self.arg_classifier(self.arg_dropout(arg_hidden))
# argument loss
if arg_labels is not None:
loss_fct = nn.CrossEntropyLoss()
active_loss = attention_mask.view(-1) == 1
active_logits = arg_logit.view(-1, self.arg_n_labels)
active_labels = torch.where(
active_loss, arg_labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(arg_labels))
arg_loss = loss_fct(active_logits, active_labels)
# total loss
batch_loss = pred_loss + arg_loss
outputs = (batch_loss, pred_loss, arg_loss)
return outputs
def extract_predicate(self,
input_ids,
attention_mask):
bert_hidden = self.bert(input_ids, attention_mask)[0]
pred_logit = self.pred_classifier(bert_hidden)
return pred_logit, bert_hidden
def extract_argument(self,
input_ids,
predicate_hidden,
predicate_mask):
pred_feature = _get_pred_feature(predicate_hidden, predicate_mask)
position_vectors = self.position_emb(_get_position_idxs(predicate_mask, input_ids))
arg_input = torch.cat([predicate_hidden, pred_feature, position_vectors], dim=2)
arg_input = arg_input.transpose(0, 1)
arg_hidden = self.arg_module(arg_input, arg_input, predicate_mask)
arg_hidden = arg_hidden.transpose(0, 1)
return self.arg_classifier(arg_hidden)
class BERTBiLSTM(nn.Module):
def __init__(self,
bert_config='bert-base-cased',
lstm_dropout=0.3,
pred_clf_dropout=0.,
arg_clf_dropout=0.3,
pos_emb_dim=256,
pred_n_labels=3,
arg_n_labels=9):
super(BERTBiLSTM, self).__init__()
self.pred_n_labels = pred_n_labels
self.arg_n_labels = arg_n_labels
self.bert = BertModel.from_pretrained(
bert_config,
output_hidden_states=True)
d_model = self.bert.config.hidden_size
self.pred_dropout = nn.Dropout(pred_clf_dropout)
self.pred_classifier = nn.Linear(d_model, self.pred_n_labels)
self.position_emb = nn.Embedding(3, pos_emb_dim, padding_idx=0)
d_model += pos_emb_dim
self.arg_module = nn.LSTM(
input_size=d_model,
hidden_size=d_model,
num_layers=3,
dropout=lstm_dropout,
batch_first=True,
bidirectional=True)
self.arg_dropout = nn.Dropout(arg_clf_dropout)
self.arg_classifier = nn.Linear(d_model * 2, arg_n_labels)
def forward(self,
input_ids,
attention_mask,
predicate_mask=None,
predicate_hidden=None,
total_pred_labels=None,
arg_labels=None):
# predicate extraction
bert_hidden = self.bert(input_ids, attention_mask)[0]
pred_logit = self.pred_classifier(self.pred_dropout(bert_hidden))
# predicate loss
if total_pred_labels is not None:
loss_fct = nn.CrossEntropyLoss()
active_loss = attention_mask.view(-1) == 1
active_logits = pred_logit.view(-1, self.pred_n_labels)
active_labels = torch.where(
active_loss, total_pred_labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(total_pred_labels))
pred_loss = loss_fct(active_logits, active_labels)
# argument extraction
position_vectors = self.position_emb(_get_position_idxs(predicate_mask, input_ids))
bert_hidden = torch.cat([bert_hidden, position_vectors], dim=2)
arg_hidden = self.arg_module(bert_hidden)[0]
arg_logit = self.arg_classifier(self.arg_dropout(arg_hidden))
# argument loss
if arg_labels is not None:
loss_fct = nn.CrossEntropyLoss()
active_loss = attention_mask.view(-1) == 1
active_logits = arg_logit.view(-1, self.arg_n_labels)
active_labels = torch.where(
active_loss, arg_labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(arg_labels))
arg_loss = loss_fct(active_logits, active_labels)
# total loss
batch_loss = pred_loss + arg_loss
outputs = (batch_loss, pred_loss, arg_loss)
return outputs
def extract_predicate(self,
input_ids,
attention_mask):
bert_hidden = self.bert(input_ids, attention_mask)[0]
pred_logit = self.pred_classifier(bert_hidden)
return pred_logit, bert_hidden
def extract_argument(self,
input_ids,
predicate_hidden,
predicate_mask):
position_vectors = self.position_emb(_get_position_idxs(predicate_mask, input_ids))
arg_input = torch.cat([predicate_hidden, position_vectors], dim=2)
arg_hidden = self.arg_module(arg_input)[0]
return self.arg_classifier(arg_hidden)
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
else:
raise RuntimeError("activation should be relu/gelu, not %s." % activation)
def _get_clones(module, n):
return ModuleList([copy.deepcopy(module) for _ in range(n)])
def _get_position_idxs(pred_mask, input_ids):
position_idxs = torch.zeros(pred_mask.shape, dtype=int, device=pred_mask.device)
for mask_idx, cur_mask in enumerate(pred_mask):
position_idxs[mask_idx, :] += 2
cur_nonzero = (cur_mask == 0).nonzero()
start = torch.min(cur_nonzero).item()
end = torch.max(cur_nonzero).item()
position_idxs[mask_idx, start:end + 1] = 1
pad_start = max(input_ids[mask_idx].nonzero()).item() + 1
position_idxs[mask_idx, pad_start:] = 0
return position_idxs
def _get_pred_feature(pred_hidden, pred_mask):
B, L, D = pred_hidden.shape
pred_features = torch.zeros((B, L, D), device=pred_mask.device)
for mask_idx, cur_mask in enumerate(pred_mask):
pred_position = (cur_mask == 0).nonzero().flatten()
pred_feature = torch.mean(pred_hidden[mask_idx, pred_position], dim=0)
pred_feature = torch.cat(L * [pred_feature.unsqueeze(0)])
pred_features[mask_idx, :, :] = pred_feature
return pred_features
|
446343
|
import intelhex
ih = intelhex.IntelHex()
for i in range(65536): ih[i] = i & 0x0FF
print(len(ih))
print(ih.get_memory_size())
|
446358
|
import logging
import os
import sys
from nltk.corpus import stopwords as nltk_stopwords
from hunmisc.utils.huntool_wrapper import Hundisambig, Ocamorph, OcamorphAnalyzer, MorphAnalyzer # nopep8
from stemming.porter2 import stem as porter_stem
from utils import get_cfg
class Lemmatizer():
def __init__(self, cfg):
self.cfg = cfg
self.analyzer, self.morph_analyzer = self.get_analyzer()
self.stopwords = set(nltk_stopwords.words('english'))
self.stopwords.add('as') # TODO
self.stopwords.add('root') # TODO
self.read_cache()
self.oov = set()
def clear_cache(self):
self.cache = {}
self.oov = set()
def _analyze(self, word):
stem = porter_stem(word)
lemma = list(self.analyzer.analyze(
[[word]]))[0][0][1].split('||')[0].split('<')[0]
cand_krs = self.morph_analyzer.analyze([[word]]).next().next()
candidates = [cand.split('||')[0].split('<')[0] for cand in cand_krs]
self.cache[word] = (stem, lemma, candidates)
def _lemmatize_with_stopwords(self, word, uppercase):
if word == 'have':
return 'HAS'
elif not uppercase:
return word
elif word in self.stopwords:
return word.upper()
else:
return word
def lemmatize(self, word, defined=None, stem_first=False, uppercase=False,
debug=False):
# if 'defined' is provided, will refuse to return lemmas not in it
# if the word is defined, we just return it
if defined is not None and word in defined:
return self._lemmatize_with_stopwords(word, uppercase)
# if the word is not in our cache, we run all analyses
if word not in self.cache:
self._analyze(word)
stem, lemma, candidates = self.cache[word]
# if stem_first flag is on, we rerun lemmatize on the stem
# and return the result unless it doesn't exist
if stem_first:
if defined is None:
logging.warning("stem_first=True and defined=None, \
'lemmatize' is now a blind Porter stemmer")
stemmed_lemma = self.lemmatize(
stem, defined=defined, stem_first=False, uppercase=uppercase)
if stemmed_lemma is not None:
return self._lemmatize_with_stopwords(stemmed_lemma, uppercase)
# we return the lemma unless it's not in defined
if defined is None or lemma in defined:
return self._lemmatize_with_stopwords(lemma, uppercase)
# we go over the other candidates as a last resort
for cand in candidates:
if cand in defined:
return self._lemmatize_with_stopwords(cand, uppercase)
# last resort is the porter stem:
if stem in defined:
return self._lemmatize_with_stopwords(stem, uppercase)
# if that doesn't work either, we return None
return None
def get_analyzer(self):
hunmorph_path = self.cfg.get('lemmatizer', 'hunmorph_path')
ocamorph_fn = os.path.join(hunmorph_path, "ocamorph")
morphdb_model_fn = os.path.join(hunmorph_path, "morphdb_en.bin")
hundisambig_fn = os.path.join(hunmorph_path, "hundisambig")
hunpos_model_fn = os.path.join(hunmorph_path, "en_wsj.model")
logging.warning('loading hunmorph using binaries in {0}'.format(hunmorph_path))
for fn in (ocamorph_fn, morphdb_model_fn, hundisambig_fn,
hunpos_model_fn):
if not os.path.exists(fn):
raise Exception("can't find hunmorph resource: {0}".format(fn))
ocamorph = Ocamorph(ocamorph_fn, morphdb_model_fn)
ocamorph_analyzer = OcamorphAnalyzer(ocamorph)
hundisambig = Hundisambig(hundisambig_fn, hunpos_model_fn)
morph_analyzer = MorphAnalyzer(ocamorph, hundisambig)
return morph_analyzer, ocamorph_analyzer
def read_cache(self):
self.clear_cache()
cache_fn = self.cfg.get('lemmatizer', 'cache_file')
if not os.path.exists(cache_fn):
return
logging.info('reading hunmorph cache...')
with open(cache_fn) as f_obj:
for line in f_obj:
try:
fields = line.decode('utf-8').strip().split('\t')
except (ValueError, UnicodeDecodeError), e:
raise Exception('error parsing line in tok2lemma file: \
{0}\n{1}'.format(e, line))
word, stem, lemma = fields[:3]
candidates = fields[3:]
self.cache[word] = (stem, lemma, candidates)
logging.info('done!')
def write_cache(self):
cache_fn = self.cfg.get('lemmatizer', 'cache_file')
logging.info('writing hunmorph cache...')
with open(cache_fn, 'w') as f_obj:
for word, (stem, lemma, candidates) in self.cache.iteritems():
f_obj.write(u"{0}\t{1}\t{2}\t{3}\n".format(
word, stem, lemma, "\t".join(candidates)).encode('utf-8'))
logging.info('done!')
def main():
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s : " +
"%(module)s (%(lineno)s) - %(levelname)s - %(message)s")
cfg_file = sys.argv[1] if len(sys.argv) > 1 else None
cfg = get_cfg(cfg_file)
lemmatizer = Lemmatizer(cfg)
while True:
word = raw_input('> ')
print lemmatizer.lemmatize(word)
if __name__ == "__main__":
main()
|
446361
|
import torch
import ci_sdr
def test_burn_single_source():
t1 = torch.tensor([1., 2, 4, 7, 1, 3, 7, 8, 0, 3, 4])
t2 = torch.clone(t1)
t2[:4] += 2
sdr = ci_sdr.pt.ci_sdr(t1, t2, filter_length=3)
assert sdr.shape == (), sdr.shape
torch.testing.assert_allclose(sdr, 13.592828750610352)
def test_burn_multi_source():
t1 = torch.tensor([
[1., 2, 4, 7, 1, 3, 7, 8, 0, 3, 4],
[5., 2, 7, 9, 3, 8, 4, 2, 9, 4, 5],
])
t2 = torch.clone(t1)
t2[:, :4] += 2
sdr = ci_sdr.pt.ci_sdr(t1, t2, filter_length=3, compute_permutation=False)
assert sdr.shape == (2,), sdr.shape
torch.testing.assert_allclose(sdr, [13.592828750610352, 17.48115348815918])
sdr = ci_sdr.pt.ci_sdr(
t1, t2[(1, 0), :], filter_length=3, compute_permutation=True)
assert sdr.shape == (2,), sdr.shape
torch.testing.assert_allclose(sdr, [13.592828750610352, 17.48115348815918])
|
446371
|
import sublime
import sublime_plugin
from ..lib import omnisharp
from ..lib import helpers
class OmniSharpNavigateTo(sublime_plugin.TextCommand):
data = None
def run(self, edit):
if self.data is None:
params = {}
params['ShowAccessModifiers'] = False
omnisharp.get_response(
self.view, '/currentfilemembersasflat', self._handle_file_members, params)
else:
self._show_file_members(edit)
def _handle_file_members(self, data):
print(data)
if data is None:
return
self.data = data
self.view.run_command('omni_sharp_navigate_to')
def _show_file_members(self, edit):
self.quickitems = [];
for i in self.data:
self.quickitems.append(i['Text'].strip())
if len(self.quickitems) > 0:
self.view.window().show_quick_panel(self.quickitems, self.on_done)
else:
self.data = None
def is_enabled(self):
return helpers.is_csharp(self.view)
def on_done(self, index):
if index == -1:
self.data = None
return
item = self.data[index]
self.view.run_command("goto_line", {"line": item["Line"]})
|
446379
|
from libzmx import FiniteSurface, Standard
from libzmx import (Property, Parameter, AuxParameter, ExtraParameter,
PickupFormat)
from libzmx import SemiDiameterParameter
class Toroidal(Standard):
surface_type = "TOROIDAL"
radius_of_rotation = Property(AuxParameter, 1)
num_poly_terms = Property(ExtraParameter, 1, int)
norm_radius = Property(ExtraParameter, 2)
class Grating(Standard):
surface_type = "DGRATING"
groove_freq = Property(AuxParameter, 1)
order = Property(AuxParameter, 2)
class GeneralisedFresnel(Standard):
surface_type = "GEN_FRES"
num_poly_terms = Property(ExtraParameter, 1, int)
norm_radius = Property(ExtraParameter, 2)
x1y0 = Property(ExtraParameter, 3)
x0y1 = Property(ExtraParameter, 4)
class RetroReflect(FiniteSurface):
# When glass=="mirror", exit rays coincide with incident rays
surface_type = "RETROREF"
glass = Property(Parameter, 4, str, 2, PickupFormat(2, False, False), 0)
semidia = Property(SemiDiameterParameter)
coating = Property(Parameter, 7, str)
thermal_expansivity = Property(Parameter, 8, float, True)
|
446381
|
import sys
import time
import random
from twisted.internet import fdesc
from twisted.internet import reactor
from twisted.internet import defer, abstract
from scapy.config import conf
from scapy.all import RandShort, IP, IPerror, ICMP, ICMPerror, TCP, TCPerror, UDP, UDPerror
from ooni.errors import ProtocolNotRegistered, ProtocolAlreadyRegistered, LibraryNotInstalledError
from ooni.utils import log
from ooni.utils.net import getDefaultIface, getAddresses
from ooni.settings import config
# Check to see if libdnet or libpcap are installed and set the according
# variables.
# In debian libdnet is called dumbnet instead of dnet, but scapy is
# expecting "dnet" so we try and import it under such name.
try:
import dumbnet
sys.modules['dnet'] = dumbnet
except ImportError:
pass
try:
conf.use_pcap = True
conf.use_dnet = True
from scapy.arch import pcapdnet
config.pcap_dnet = True
except ImportError as e:
log.err(e.message + ". Pypcap or dnet are not properly installed. Certain tests may not work.")
config.pcap_dnet = False
conf.use_pcap = False
conf.use_dnet = False
# This is required for unix systems that are different than linux (OSX for
# example) since scapy explicitly wants pcap and libdnet installed for it
# to work.
try:
from scapy.arch import pcapdnet
except ImportError:
log.err("Your platform requires having libdnet and libpcap installed.")
raise LibraryNotInstalledError
_PCAP_DNET_INSTALLED = config.pcap_dnet
if _PCAP_DNET_INSTALLED:
from scapy.all import PcapWriter
else:
class DummyPcapWriter:
def __init__(self, pcap_filename, *arg, **kw):
log.err("Initializing DummyPcapWriter. We will not actually write to a pcapfile")
@staticmethod
def write(self):
pass
PcapWriter = DummyPcapWriter
from scapy.all import Gen, SetGen, MTU
class ScapyFactory(abstract.FileDescriptor):
"""
Inspired by muxTCP scapyLink:
https://github.com/enki/muXTCP/blob/master/scapyLink.py
"""
def __init__(self, interface, super_socket=None, timeout=5):
abstract.FileDescriptor.__init__(self, reactor)
if interface == 'auto':
interface = getDefaultIface()
if not super_socket and sys.platform == 'darwin':
super_socket = conf.L3socket(iface=interface, promisc=True, filter='')
elif not super_socket:
super_socket = conf.L3socket(iface=interface)
self.protocols = []
fdesc._setCloseOnExec(super_socket.ins.fileno())
self.super_socket = super_socket
def writeSomeData(self, data):
"""
XXX we actually want to use this, but this requires overriding doWrite
or writeSequence.
"""
pass
def send(self, packet):
"""
Write a scapy packet to the wire.
"""
return self.super_socket.send(packet)
def fileno(self):
return self.super_socket.ins.fileno()
def doRead(self):
packet = self.super_socket.recv(MTU)
if packet:
for protocol in self.protocols:
protocol.packetReceived(packet)
def registerProtocol(self, protocol):
if not self.connected:
self.startReading()
if protocol not in self.protocols:
protocol.factory = self
self.protocols.append(protocol)
else:
raise ProtocolAlreadyRegistered
def unRegisterProtocol(self, protocol):
if protocol in self.protocols:
self.protocols.remove(protocol)
if len(self.protocols) == 0:
self.loseConnection()
else:
raise ProtocolNotRegistered
class ScapyProtocol(object):
factory = None
def packetReceived(self, packet):
"""
When you register a protocol, this method will be called with argument
the packet it received.
Every protocol that is registered will have this method called.
"""
raise NotImplementedError
class ScapySender(ScapyProtocol):
timeout = 5
# This deferred will fire when we have finished sending a receiving packets.
# Should we look for multiple answers for the same sent packet?
multi = False
# When 0 we stop when all the packets we have sent have received an
# answer
expected_answers = 0
def processPacket(self, packet):
"""
Hook useful for processing packets as they come in.
"""
def processAnswer(self, packet, answer_hr):
log.debug("Got a packet from %s" % packet.src)
log.debug("%s" % self.__hash__)
for i in range(len(answer_hr)):
if packet.answers(answer_hr[i]):
self.answered_packets.append((answer_hr[i], packet))
if not self.multi:
del (answer_hr[i])
break
if len(self.answered_packets) == len(self.sent_packets):
log.debug("All of our questions have been answered.")
self.stopSending()
return
if self.expected_answers and self.expected_answers == len(self.answered_packets):
log.debug("Got the number of expected answers")
self.stopSending()
def packetReceived(self, packet):
if self.timeout and time.time() - self._start_time > self.timeout:
self.stopSending()
if packet:
self.processPacket(packet)
# A string that has the same value for the request than for the
# response.
hr = packet.hashret()
if hr in self.hr_sent_packets:
answer_hr = self.hr_sent_packets[hr]
self.processAnswer(packet, answer_hr)
def stopSending(self):
result = (self.answered_packets, self.sent_packets)
self.d.callback(result)
self.factory.unRegisterProtocol(self)
def sendPackets(self, packets):
if not isinstance(packets, Gen):
packets = SetGen(packets)
for packet in packets:
hashret = packet.hashret()
if hashret in self.hr_sent_packets:
self.hr_sent_packets[hashret].append(packet)
else:
self.hr_sent_packets[hashret] = [packet]
self.sent_packets.append(packet)
self.factory.send(packet)
def startSending(self, packets):
# This dict is used to store the unique hashes that allow scapy to
# match up request with answer
self.hr_sent_packets = {}
# These are the packets we have received as answer to the ones we sent
self.answered_packets = []
# These are the packets we send
self.sent_packets = []
self._start_time = time.time()
self.d = defer.Deferred()
self.sendPackets(packets)
return self.d
class ScapySniffer(ScapyProtocol):
def __init__(self, pcap_filename, *arg, **kw):
self.pcapwriter = PcapWriter(pcap_filename, *arg, **kw)
def packetReceived(self, packet):
self.pcapwriter.write(packet)
def close(self):
self.pcapwriter.close()
class ParasiticTraceroute(ScapyProtocol):
def __init__(self):
self.numHosts = 7
self.rate = 15
self.hosts = {}
self.ttl_max = 15
self.ttl_min = 1
self.sent_packets = []
self.received_packets = []
self.matched_packets = {}
self.addresses = [str(x) for x in getAddresses()]
def sendPacket(self, packet):
self.factory.send(packet)
self.sent_packets.append(packet)
log.debug("Sent packet to %s with ttl %d" % (packet.dst, packet.ttl))
def packetReceived(self, packet):
try:
packet[IP]
except IndexError:
return
# Add TTL Expired responses.
if isinstance(packet.getlayer(3), TCPerror):
self.received_packets.append(packet)
# Live traceroute?
log.debug("%s replied with icmp-ttl-exceeded for %s" % (packet.src, packet[IPerror].dst))
return
elif packet.dst in self.hosts:
if random.randint(1, 100) > self.rate:
# Don't send a packet this time
return
try:
packet[IP].ttl = self.hosts[packet.dst]['ttl'].pop()
del packet.chksum # XXX Why is this incorrect?
self.sendPacket(packet)
k = (packet.id, packet[TCP].sport, packet[TCP].dport, packet[TCP].seq)
self.matched_packets[k] = {'ttl': packet.ttl}
return
except IndexError:
return
def maxttl(packet=None):
if packet:
return min(self.ttl_max, *map(lambda x: x - packet.ttl, [64, 128, 256])) - 1
else:
return self.ttl_max
def genttl(packet=None):
ttl = range(self.ttl_min, maxttl(packet))
random.shuffle(ttl)
return ttl
if len(self.hosts) < self.numHosts:
if packet.dst not in self.hosts \
and packet.dst not in self.addresses \
and isinstance(packet.getlayer(1), TCP):
self.hosts[packet.dst] = {'ttl': genttl()}
log.debug("Tracing to %s" % packet.dst)
return
if packet.src not in self.hosts \
and packet.src not in self.addresses \
and isinstance(packet.getlayer(1), TCP):
self.hosts[packet.src] = {'ttl': genttl(packet),
'ttl_max': maxttl(packet)}
log.debug("Tracing to %s" % packet.src)
return
if packet.src in self.hosts and not 'ttl_max' in self.hosts[packet.src]:
self.hosts[packet.src]['ttl_max'] = ttl_max = maxttl(packet)
log.debug("set ttl_max to %d for host %s" % (ttl_max, packet.src))
ttl = []
for t in self.hosts[packet.src]['ttl']:
if t < ttl_max:
ttl.append(t)
self.hosts[packet.src]['ttl'] = ttl
return
def stopListening(self):
self.factory.unRegisterProtocol(self)
class MPTraceroute(ScapyProtocol):
dst_ports = [0, 22, 23, 53, 80, 123, 443, 8080, 65535]
ttl_min = 1
ttl_max = 30
def __init__(self):
self.sent_packets = []
self._recvbuf = []
self.received_packets = {}
self.matched_packets = {}
self.hosts = []
self.interval = 0.2
self.timeout = ((self.ttl_max - self.ttl_min) * len(self.dst_ports) * self.interval) + 5
self.numPackets = 1
def ICMPTraceroute(self, host):
if host not in self.hosts:
self.hosts.append(host)
d = defer.Deferred()
reactor.callLater(self.timeout, d.callback, self)
self.sendPackets(IP(dst=host, ttl=(self.ttl_min, self.ttl_max), id=RandShort()) / ICMP(id=RandShort()))
return d
def UDPTraceroute(self, host):
if host not in self.hosts:
self.hosts.append(host)
d = defer.Deferred()
reactor.callLater(self.timeout, d.callback, self)
for dst_port in self.dst_ports:
self.sendPackets(
IP(dst=host, ttl=(self.ttl_min, self.ttl_max), id=RandShort()) / UDP(dport=dst_port, sport=RandShort()))
return d
def TCPTraceroute(self, host):
if host not in self.hosts:
self.hosts.append(host)
d = defer.Deferred()
reactor.callLater(self.timeout, d.callback, self)
for dst_port in self.dst_ports:
self.sendPackets(
IP(dst=host, ttl=(self.ttl_min, self.ttl_max), id=RandShort()) / TCP(flags=2L, dport=dst_port,
sport=RandShort(),
seq=RandShort()))
return d
@defer.inlineCallbacks
def sendPackets(self, packets):
def sleep(seconds):
d = defer.Deferred()
reactor.callLater(seconds, d.callback, seconds)
return d
if not isinstance(packets, Gen):
packets = SetGen(packets)
for packet in packets:
for i in xrange(self.numPackets):
self.sent_packets.append(packet)
self.factory.super_socket.send(packet)
yield sleep(self.interval)
def matchResponses(self):
def addToReceivedPackets(key, packet):
"""
Add a packet into the received packets dictionary,
typically the key is a tuple of packet fields used
to correlate sent packets with received packets.
"""
# Initialize or append to the lists of packets
# with the same key
if key in self.received_packets:
self.received_packets[key].append(packet)
else:
self.received_packets[key] = [packet]
def matchResponse(k, p):
if k in self.received_packets:
if p in self.matched_packets:
log.debug("Matched sent packet to more than one response!")
self.matched_packets[p].extend(self.received_packets[k])
else:
self.matched_packets[p] = self.received_packets[k]
log.debug("Packet %s matched %s" % ([p], self.received_packets[k]))
return 1
return 0
for p in self._recvbuf:
l = p.getlayer(2)
if isinstance(l, IPerror):
l = p.getlayer(3)
if isinstance(l, ICMPerror):
addToReceivedPackets(('icmp', l.id), p)
elif isinstance(l, TCPerror):
addToReceivedPackets(('tcp', l.dport, l.sport), p)
elif isinstance(l, UDPerror):
addToReceivedPackets(('udp', l.dport, l.sport), p)
elif hasattr(p, 'src') and p.src in self.hosts:
l = p.getlayer(1)
if isinstance(l, ICMP):
addToReceivedPackets(('icmp', l.id), p)
elif isinstance(l, TCP):
addToReceivedPackets(('tcp', l.ack - 1, l.dport, l.sport), p)
elif isinstance(l, UDP):
addToReceivedPackets(('udp', l.dport, l.sport), p)
for p in self.sent_packets:
# for each sent packet, find corresponding
# received packets
l = p.getlayer(1)
i = 0
if isinstance(l, ICMP):
i += matchResponse(('icmp', p.id), p) # match by ipid
i += matchResponse(('icmp', l.id), p) # match by icmpid
if isinstance(l, TCP):
i += matchResponse(('tcp', l.dport, l.sport), p) # match by s|dport
i += matchResponse(('tcp', l.seq, l.sport, l.dport), p)
if isinstance(l, UDP):
i += matchResponse(('udp', l.dport, l.sport), p)
i += matchResponse(('udp', l.sport, l.dport), p)
if i == 0:
log.debug("No response for packet %s" % [p])
del self._recvbuf
def packetReceived(self, packet):
l = packet.getlayer(1)
if not l:
return
elif isinstance(l, ICMP) or isinstance(l, UDP) or isinstance(l, TCP):
self._recvbuf.append(packet)
def stopListening(self):
self.factory.unRegisterProtocol(self)
|
446383
|
try:
from collections import OrderedDict
except ImportError:
from .OrderedDictFallback import OrderedDictFallback as OrderedDict
|
446393
|
dest = [0x21, 0x58, 0x33, 0x57, 0x24, 0x2c, 0x66, 0x25,
0x45, 0x53, 0x34, 0x28, 0x08, 0x61, 0x11, 0x07,
0x14, 0x3d, 0x07, 0x62, 0x13, 0x72, 0x02, 0x4c]
flag = ""
random = 2019
for i in range(24):
random = (random * 23333 + 19260817) % 127
flag += chr(random ^ dest[i])
print(flag)
|
446422
|
from django.urls import reverse
def _get_relative_path_to_watch(pk: int = 1) -> str:
url: str = reverse("stream:watch", kwargs={"movie_id": pk})
return url.replace(str(pk), "")
|
446436
|
from typing import Dict, List, Tuple
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from classy.data.data_drivers import (
GenerationSample,
QASample,
SentencePairSample,
SequenceSample,
TokensSample,
)
from classy.evaluation.base import Evaluation
from classy.utils.commons import flatten
def accuracy(gold, pred) -> float:
return accuracy_score(gold, pred)
def p_r_f_support(gold, pred) -> Dict[str, float]:
result = {}
for avg in ["micro", "macro", "weighted"]:
p, r, f1, _ = precision_recall_fscore_support(gold, pred, average=avg)
for k, v in zip(["precision", "recall", "f1"], [p, r, f1]):
result[f"{avg}_{k}"] = v
return result
class SequenceSimpleEvaluation(Evaluation):
def __call__(self, path: str, predicted_samples: List[SequenceSample]) -> Dict:
gold = [sample.reference_annotation for sample in predicted_samples]
pred = [sample.predicted_annotation for sample in predicted_samples]
return {"accuracy": accuracy(gold, pred), **p_r_f_support(gold, pred)}
class SentencePairSimpleEvaluation(Evaluation):
def __call__(self, path: str, predicted_samples: List[SentencePairSample]) -> Dict:
gold = [sample.reference_annotation for sample in predicted_samples]
pred = [sample.predicted_annotation for sample in predicted_samples]
return {"accuracy": accuracy(gold, pred), **p_r_f_support(gold, pred)}
class TokenSimpleEvaluation(Evaluation):
def __call__(self, path: str, predicted_samples: List[TokensSample]) -> Dict:
gold = [sample.reference_annotation for sample in predicted_samples]
pred = [sample.predicted_annotation for sample in predicted_samples]
gold, pred = flatten(gold), flatten(pred)
return {"accuracy": accuracy(gold, pred), **p_r_f_support(gold, pred)}
class QASimpleEvaluation(Evaluation):
"""
Computes a simple exact-match accuracy
"""
def __call__(self, path: str, predicted_samples: List[QASample]) -> Dict:
n, d = 0, 0
for sample in predicted_samples:
d += 1
if sample.reference_annotation == sample.predicted_annotation:
n += 1
return {"exact-match-accuracy": f"{n / d:.2f}"}
class GenerationSimpleEvaluation(Evaluation):
"""
Computes a simple full-text accuracy
"""
def __call__(self, path: str, predicted_samples: List[GenerationSample]) -> Dict:
n, d = 0, 0
for sample in predicted_samples:
d += 1
if sample.reference_annotation == sample.predicted_annotation:
n += 1
return {"full-generation-accuracy": f"{n / d:.2f}"}
|
446453
|
from apps.blocklist.models import BlocklistApp, BlocklistPlugin
def run():
# only blocked plugins with all 3 are app based blocks, otherwise the
# min/max refer to the version of the plugin.
plugins = (BlocklistPlugin.objects.exclude(min='').exclude(min=None)
.exclude(max='').exclude(max=None)
.exclude(guid='').exclude(guid=None))
for plugin in plugins:
if plugin.guid and plugin.min and plugin.max:
BlocklistApp.objects.create(blplugin=plugin, guid=plugin.guid,
min=plugin.min, max=plugin.max)
# Null out the fields so the migration can be resumed if
# interrupted. This way when the guid field is removed, the min and
# max wont be treated like plugin min and max when they are app min
# and max.
plugin.guid = None
plugin.min = None
plugin.max = None
plugin.save()
|
446476
|
n= int(input("Enter the number : "))
sum = 0
temp = n
while (n):
i = 1
fact = 1
rem = int(n % 10)
while(i <= rem):
fact = fact * i
i = i + 1
sum = sum + fact
n = int(n / 10)
if(sum == temp):
print(temp,end = "")
print(" is a strong number")
else:
print(temp,end = "")
print(" is not a strong number")
|
446477
|
import glob
from ConfigParser import SafeConfigParser, NoSectionError, NoOptionError
from os.path import expanduser
class Config(object):
"""A ConfigParser wrapper to support defaults when calling instance
methods, and also tied to a single section"""
def __init__(self, section=None, values=None, extra_sources=()):
if section is None:
sources = self._getsources()
self.cp = SafeConfigParser()
self.cp.read(sources)
for fp in extra_sources:
self.cp.readfp(fp)
else:
self.cp = SafeConfigParser(values)
self.cp.add_section(section)
def _getsources(self):
sources = [expanduser('~/.bastion_ssh.ini')]
return sources
def _getany(self, method, section, option, default):
try:
return method(section, option)
except (NoSectionError, NoOptionError):
if default is not None:
return default
raise
def get(self, section, option, default=None):
return self._getany(self.cp.get, section, option, default)
def getint(self, section, option, default=None):
return (int)(self._getany(self.cp.get, section, option, default))
def getfloat(self, section, option, default=None):
return (float)(self._getany(self.cp.get, section, option, default))
def getboolean(self, section, option, default=None):
return (bool)(self._getany(self.cp.get, section, option, default))
def items(self, section, default=None):
try:
return self.cp.items(section)
except (NoSectionError, NoOptionError):
if default is not None:
return default
raise
|
446528
|
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader, random_split
from torch.optim.lr_scheduler import LambdaLR
import pytorch_lightning as pl
import numpy as np
import math
from argparse import ArgumentParser
from gpt2 import GPT2
from utils import quantize
def _to_sequence(x):
"""shape batch of images for input into GPT2 model"""
x = x.view(x.shape[0], -1) # flatten images into sequences
x = x.transpose(0, 1).contiguous() # to shape [seq len, batch]
return x
class ImageGPT(pl.LightningModule):
def __init__(
self,
centroids,
embed_dim=16,
num_heads=2,
num_layers=8,
num_pixels=28,
num_vocab=16,
num_classes=10,
classify=False,
learning_rate=3e-3,
steps=10_000,
warmup_steps=500,
**kwargs,
):
super(ImageGPT, self).__init__()
self.save_hyperparameters()
self.gpt = GPT2(
embed_dim=embed_dim,
num_heads=num_heads,
num_layers=num_layers,
num_positions=num_pixels * num_pixels,
num_vocab=num_vocab,
num_classes=num_classes,
)
self.centroids = nn.Parameter(
torch.from_numpy(np.load(centroids)), requires_grad=False
)
self.criterion = nn.CrossEntropyLoss()
self.classify = classify
self.learning_rate = learning_rate
self.steps = steps
self.warmup_steps = warmup_steps
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--embed_dim", type=int, default=16)
parser.add_argument("--num_heads", type=int, default=2)
parser.add_argument("--num_layers", type=int, default=8)
parser.add_argument("--num_pixels", type=int, default=28)
parser.add_argument("--num_vocab", type=int, default=16)
parser.add_argument("--num_classes", type=int, default=10)
parser.add_argument("--classify", action="store_true", default=False)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--learning_rate", type=float, default=3e-3)
parser.add_argument("--steps", type=int, default=25_000)
return parser
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.gpt.parameters(), lr=self.learning_rate)
# no learning rate schedule for fine-tuning
if self.classify:
return optimizer
scheduler = {
"scheduler": LambdaLR(
optimizer, learning_rate_schedule(self.warmup_steps, self.steps)
),
"interval": "step",
}
return [optimizer], [scheduler]
def forward(self, x):
return self.gpt(x)
def training_step(self, batch, batch_idx):
x, y = batch
x = quantize(x, self.centroids)
x = _to_sequence(x)
if self.classify:
clf_logits, logits = self.gpt(x, classify=True)
clf_loss = self.criterion(clf_logits, y)
gen_loss = self.criterion(logits.view(-1, logits.size(-1)), x.view(-1))
# joint loss for classification
loss = clf_loss + gen_loss
else:
logits = self.gpt(x)
loss = self.criterion(logits.view(-1, logits.size(-1)), x.view(-1))
logs = {"loss": loss}
return {"loss": loss, "log": logs}
def validation_step(self, batch, batch_idx):
x, y = batch
x = quantize(x, self.centroids)
x = _to_sequence(x)
if self.classify or self.hparams.classify:
clf_logits, logits = self.gpt(x, classify=True)
clf_loss = self.criterion(clf_logits, y)
gen_loss = self.criterion(logits.view(-1, logits.size(-1)), x.view(-1))
# joint loss for classification
loss = clf_loss + gen_loss
_, preds = torch.max(clf_logits, 1)
correct = preds == y
return {"val_loss": loss, "correct": correct}
else:
logits = self.gpt(x)
loss = self.criterion(logits.view(-1, logits.size(-1)), x.view(-1))
return {"val_loss": loss}
def validation_epoch_end(self, outs):
avg_loss = torch.stack([x["val_loss"] for x in outs]).mean()
logs = {"val_loss": avg_loss}
if self.classify or self.hparams.classify:
correct = torch.cat([x["correct"] for x in outs])
logs["val_acc"] = correct.sum().float() / correct.shape[0]
return {"val_loss": avg_loss, "log": logs}
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outs):
result = self.validation_epoch_end(outs)
# replace valid stats with test stats becuase we are reusing function
result["log"]["test_loss"] = result["log"].pop("val_loss")
result["test_loss"] = result.pop("val_loss")
if self.hparams.classify:
result["log"]["test_acc"] = result["log"].pop("val_acc")
return result
def learning_rate_schedule(warmup_steps, total_steps):
"""Linear warmup for warmup_steps, with cosine annealing to 0 at total_steps"""
def learning_rate_fn(step):
if step < warmup_steps:
return float(step) / float(max(1, warmup_steps))
else:
progress = float(step - warmup_steps) / float(
max(1, total_steps - warmup_steps)
)
return 0.5 * (1.0 + math.cos(math.pi * progress))
return learning_rate_fn
|
446533
|
from django import template
register = template.Library()
@register.inclusion_tag(
'transitions/templatetags/available_transitions.html', takes_context=True
)
def available_transitions(context, obj, field):
"""Render available transitions for instance."""
get_available_transitions = getattr(
obj, 'get_available_transitions_for_{}'.format(field), lambda user: []
)
if get_available_transitions:
transitions = []
for transition in get_available_transitions(user=context.request.user):
transition.show_form = transition.has_form()
transitions.append(transition)
context.update({
'transitions': transitions
})
return context
@register.filter(name='choice_str')
def choice_str(obj, field_name):
field = obj._meta.get_field(field_name)
value = getattr(obj, field_name)
try:
return field.choices.desc_from_id(value)
except (AttributeError, ValueError):
return value
class HistoryForObjectNode(template.Node):
def __init__(self, job, obj, var_name='data'):
self.job = job
self.obj = obj
self.var_name = var_name
def render(self, context):
job = context[self.job]
obj = context[self.obj]
history = job.params['history_kwargs']
context[self.var_name] = history[obj.pk].items()
if not context[self.var_name]:
return '–'
return ''
@register.tag(name='history_for_object')
def history_for_object(parser, token):
error = False
try:
_, job, obj, _as, var_name = token.split_contents()
if _as != 'as':
error = True
except:
error = True
if error:
raise template.TemplateSyntaxError(
'history_for_object must be of the form, "history_for_object <job> <obj> as <var_name>"' # noqa
)
else:
return HistoryForObjectNode(job, obj, var_name)
|
446535
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import os
import numpy as np
import json
import gdown
from PIL import Image
import cv2
from .models.fcn_net import _build_fcn
from .util.utils import make_color_seg_map
__PREFIX__ = os.path.dirname(os.path.realpath(__file__))
__models__ = ["fcn-resnet50-coco", "fcn-resnet101-coco"]
def available_models():
"""Returns list of available models"""
return __models__
class FCN(object):
def __init__(self, model="fcn-resnet50-coco", wtspath="weights/", device="cpu",
save=None, show=True, draw_map=True, draw_blend=True, classfile=None,
colors=None, blend_alpha=.7, pretrained=True):
"""
Pyramid Scene Parsing (PSPNet) Segmentation Module. This class exposes the inference method that
is used to run inference on an Image. For implementation details refer
to the PSPNet readme.md . All major model parameters can be configured here.
Returns:
preds, segmentation_map (optional), blend_img (optional)
- preds (numpy array of shape (H, W)): A numpy array whose every pixel contains
index of the class that pixel is classified into. (H, W) are height and
width of the given image respectively. Check readme.md.
- color_img (optional; PIL Image): a PIL image of the segmentation map
- blend_img (optional; PIL Image): a PIL image of the segmentation map blended into the
original image.
Arguments:
- model (default: fcn-resnet50-coco): The pretrained model to be used.
For list of all supported models,
either check readme.md or available_models()
- wtspath (default: weights/): Path to .pth file. NOTE: Provide full path to .pth if providing
custom path. Else, leave the parameter unchanged, for the module
will automatically download the required weights in default directory
- device (default: cpu): device to run inference on.
- save (default: None): full path + filename that the results are to be saved as. For example,
to save a result image as path/to/result.png, save should be path/to/result.
- show (default: True): Show the results
- draw_map (default: True): Draw the segmentation map.
- draw_blend (default: True): Blend the segmentaion map into the original input image and
produce a new image
- classfile (default: None): path to classfile. No need to provide any value as classfile is already
available in repo
- colors (default: None): path to color pallette file. No need to provide any value as color pallette files
are already available in repo.
- blend_alpha (default: .7): alpha channel parameter of segmentation map that is blended into original input image
- pretrained (default: True): To whether use pretrained models or not. Currently the only method supported.
"""
if model not in __models__:
raise ValueError(f"{model} not supported yet. Only {__models__} supported")
if device is not "cpu":
if not torch.cuda.is_available():
raise ValueError("device={} but cuda not detected".format(device))
device = "cuda"
if classfile is not None and not os.path.exists(classfile):
raise FileNotFoundError("{} not a valid path".format(classfile))
if colors is not None and not os.path.exists(colors):
raise FileNotFoundError("{} not a valid path".format(colors))
self.device = device
self.model_name = model
self.save = save
self.show = show
self.draw_blend = draw_blend
self.draw_map = draw_map
self.classfile = classfile
self.colors = colors
self.blend_alpha = blend_alpha
self.pretrained = pretrained
self._norm_mean = [0.485, 0.456, 0.406]
self._norm_std = [0.229, 0.224, 0.225]
self.tsfms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=self._norm_mean, std=self._norm_std)
])
_dataset_name = "voc2012"
with open(__PREFIX__+"/data/{}_classes.txt".format(_dataset_name), "r") as f:
self.class_names = f.read().split("\n")[:-1]
with open(__PREFIX__+"/data/{}_colors.txt".format(_dataset_name), "r") as f:
self.colors = f.read().split("\n")[:-1]
self.colors = np.loadtxt(__PREFIX__+"/data/{}_colors.txt".format(_dataset_name)).astype(np.uint8)
_backbone = self.model_name.split("-")[1]
self.model = _build_fcn(_backbone, 21, pretrained=False, aux=True)
resp = self._check_or_download_weights(__PREFIX__+"/"+wtspath)
#resp = self._check_or_download_weights(wtspath)
print(self.wtspath)
if resp == 0:
print("Weights downloaded.")
else:
print("Weights found.")
if self.pretrained:
self._load_weights(self.wtspath)
print("Model load complete.")
self.model.eval()
self.model = self.model.to(self.device)
def class_names(self):
return self.class_names
def _check_or_download_weights(self, wtspath):
if os.path.join(__PREFIX__, "weights") not in wtspath and not os.path.exists(wtspath):
raise FileNotFoundError("File not found. Either file doesnt exist or directory provided")
elif not os.path.exists(wtspath + self.model_name + ".pth"):
if os.path.exists(__PREFIX__+"/weights/") and len(os.listdir(__PREFIX__+"/weights/")) == 0:
os.rmdir(__PREFIX__+"/"+"weights/")
os.mkdir(__PREFIX__+"/weights/")
if not os.path.exists(__PREFIX__+"/weights/"):
os.mkdir(__PREFIX__+"/weights/")
with open(os.path.join(__PREFIX__, "config/weights_download.json")) as fp:
json_file = json.load(fp)
print("Fetching file ids for {}".format(self.model_name))
file_id = json_file[self.model_name]
url = 'https://drive.google.com/uc?id={}'.format(file_id)
wtspath = __PREFIX__ + "/weights/{}.pth".format(self.model_name)
gdown.download(url, wtspath, quiet=False)
self.wtspath = wtspath
return 0
else:
self.wtspath = wtspath + "{}.pth".format(self.model_name)
return 1
def _load_weights(self, wtspath):
source_state_dict = torch.load(self.wtspath, map_location=torch.device(self.device))
self.model.load_state_dict(source_state_dict)
def inference(self, img, save=False, show=False, draw_blend=None, draw_map=None):
if save is None:
save = self.save
if show is None:
show = self.show
if draw_map is None:
draw_map = self.draw_map
if draw_blend is None:
draw_blend = self.draw_blend
if draw_blend is True and draw_map is False:
raise ValueError("draw_blend cannot be True with draw_map being False")
if show is True and draw_map is False:
raise ValueError("show cannot be True with draw_map being False")
if isinstance(img, str):
if os.path.exists(img):
img_name = os.path.basename(img)
img = Image.open(img).convert("RGB")
else:
raise FileNotFoundError(f"{img} no such path")
elif isinstance(img, Image.Image):
pass
elif isinstance(img, np.ndarray):
img = Image.fromarray(img)
orig_img = img
img = self.tsfms(img)
img = torch.unsqueeze(img, 0)
img = img.to(self.device)
with torch.no_grad():
output = self.model(img)["out"]
output = torch.squeeze(output, 0).cpu().numpy()
output = np.argmax(output, axis=0)
if draw_map:
color_img = make_color_seg_map(output, self.colors)
if draw_blend:
orig_img = orig_img.convert("RGBA")
color_img = color_img.convert("RGBA")
blend = Image.blend(orig_img, color_img, alpha=.7)
if save is not None:
color_img.save(f"{save}_map.png")
try:
blend.save(f"{save}_blend.png")
except:
pass
if show is not None and show is True:
try:
blend.show()
except:
pass
try:
return output, color_img, blend
except:
try:
return output, color_img
except:
return output
|
446628
|
import numpy as np
from keras.preprocessing import sequence
from keras.datasets import imdb
import torch
from torch.autograd import Variable
from torch.optim import Adam
import Model.Constants as Constants
from Model.Modules import Encoder, Generator, Discriminator
from utils import check_cuda
max_features = 10000
maxlen = 20
batch_size = 128
epoch = 1
c_dim = 2
d_word_vec = 150
lambda_c = 0.1
lambda_z = 0.1
use_cuda = False
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(
num_words=max_features,
start_char=Constants.BOS,
oov_char=Constants.UNK,
index_from=Constants.EOS,
)
forward_dict = imdb.get_word_index()
for key, value in forward_dict.items():
forward_dict[key] = value + Constants.EOS
forward_dict[Constants.PAD_WORD] = Constants.PAD
forward_dict[Constants.UNK_WORD] = Constants.UNK
forward_dict[Constants.BOS_WORD] = Constants.BOS
forward_dict[Constants.EOS_WORD] = Constants.EOS
backward_dict = {}
for key, value in forward_dict.items():
backward_dict[value] = key
x_train = sequence.pad_sequences(
x_train,
maxlen=maxlen,
padding='post',
truncating='post',
value=Constants.PAD,
)
x_test = sequence.pad_sequences(
x_test,
maxlen=maxlen,
padding='post',
truncating='post',
value=Constants.PAD,
)
def get_batch(data, index, batch_size, testing=False):
tensor = torch.from_numpy(data[index:index+batch_size]).type(torch.LongTensor)
input_data = Variable(tensor, volatile=testing, requires_grad=False)
input_data = check_cuda(input_data, use_cuda)
output_data = input_data
return input_data, output_data
def get_batch_label(data, label, index, batch_size, testing=False):
tensor = torch.from_numpy(data[index:index+batch_size]).type(torch.LongTensor)
input_data = Variable(tensor, volatile=testing, requires_grad=False)
input_data = check_cuda(input_data, use_cuda)
label_tensor = torch.from_numpy(label[index:index+batch_size]).type(torch.LongTensor)
output_data = Variable(label_tensor, volatile=testing, requires_grad=False)
output_data = check_cuda(output_data, use_cuda)
return input_data, output_data
# Borrow from https://github.com/ethanluoyc/pytorch-vae/blob/master/vae.py
def latent_loss(z_mean, z_stddev):
mean_sq = z_mean * z_mean
stddev_sq = z_stddev * z_stddev
return 0.5 * torch.mean(mean_sq + stddev_sq - torch.log(stddev_sq) - 1)
# Make instances
encoder = Encoder(
n_src_vocab=max_features,
use_cuda=use_cuda,
)
decoder = Generator(
n_target_vocab=max_features,
c_dim=c_dim,
use_cuda=use_cuda,
)
discriminator = Discriminator(
n_src_vocab=max_features,
maxlen=maxlen,
use_cuda=use_cuda,
)
encoder = check_cuda(encoder, use_cuda)
decoder = check_cuda(decoder, use_cuda)
discriminator = check_cuda(discriminator, use_cuda)
criterion = torch.nn.CrossEntropyLoss()
vae_parameters = list(encoder.parameters()) + list(decoder.parameters())
vae_opt = Adam(vae_parameters)
e_opt = Adam(encoder.parameters())
g_opt = Adam(decoder.parameters())
d_opt = Adam(discriminator.parameters())
def train_discriminator(discriminator):
# TODO: empirical Shannon entropy
print_epoch = 0
for epoch_index in range(epoch):
for batch, index in enumerate(range(0, len(x_train) - 1, batch_size)):
discriminator.train()
input_data, output_data = get_batch_label(
x_train,
y_train,
index,
batch_size
)
discriminator.zero_grad()
output = discriminator(input_data)
loss = criterion(output, output_data)
loss.backward()
d_opt.step()
if batch % 25 == 0:
print("[Discriminator] Epoch {} batch {}'s loss: {}".format(
epoch_index,
batch,
loss.data[0],
))
if print_epoch == epoch_index and print_epoch:
discriminator.eval()
print_epoch = epoch_index + 1
input_data, output_data = get_batch_label(x_test, y_test, 0, len(y_test), testing=True)
_, predicted = torch.max(discriminator(input_data).data, 1)
correct = (predicted == torch.from_numpy(y_test)).sum()
print("[Discriminator] Test accuracy {} %".format(
100 * correct / len(y_test)
))
def train_vae(encoder, decoder):
encoder.train()
decoder.train()
for epoch_index in range(epoch):
for batch, index in enumerate(range(0, len(x_train) - 1, batch_size)):
total_loss = 0
input_data, output_data = get_batch(x_train, index, batch_size)
encoder.zero_grad()
decoder.zero_grad()
vae_opt.zero_grad()
# Considering the data may do not have enough data for batching
# Init. hidden with len(input_data) instead of batch_size
enc_hidden = encoder.init_hidden(len(input_data))
# Input of encoder is a batch of sequence.
enc_hidden = encoder(input_data, enc_hidden)
# Generate the random one-hot array from prior p(c)
# NOTE: Assume general distribution for now
random_one_dim = np.random.randint(c_dim, size=len(input_data))
one_hot_array = np.zeros((len(input_data), c_dim))
one_hot_array[np.arange(len(input_data)), random_one_dim] = 1
c = torch.from_numpy(one_hot_array).float()
var_c = Variable(c, requires_grad=False)
var_c = check_cuda(var_c, use_cuda)
# TODO: use iteration along first dim.
cat_hidden = (torch.cat([enc_hidden[0][0], var_c], dim=1).unsqueeze(0),
torch.cat([decoder.init_hidden_c_for_lstm(len(input_data))[0], var_c], dim=1).unsqueeze(0))
# Reshape output_data from (batch_size, seq_len) to (seq_len, batch_size)
output_data = output_data.permute(1, 0)
# Input of decoder is a batch of word-by-word.
for index, word in enumerate(output_data):
if index == len(output_data) - 1:
break
output, cat_hidden = decoder(word, cat_hidden)
next_word = output_data[index+1]
total_loss += criterion(output.view(-1, max_features), next_word)
# Train
avg_loss = total_loss.data[0] / maxlen
ll = latent_loss(encoder.z_mean, encoder.z_sigma)
total_loss += ll
total_loss.backward()
vae_opt.step()
if batch % 25 == 0:
print("[VAE] Epoch {} batch {}'s average language loss: {}, latent loss: {}".format(
epoch_index,
batch,
avg_loss,
ll.data[0],
))
def train_vae_with_attr_loss(encoder, decoder, discriminator):
for epoch_index in range(epoch):
for batch, index in enumerate(range(0, len(x_train) - 1, batch_size)):
encoder.zero_grad()
decoder.zero_grad()
e_opt.zero_grad()
g_opt.zero_grad()
vae_loss = 0
ll = 0
input_data, output_data = get_batch_label(x_train, y_train, index, batch_size)
enc_hidden = encoder.init_hidden(len(input_data))
enc_hidden = encoder(input_data, enc_hidden)
target = np.array([output_data.cpu().data.numpy()]).reshape(-1)
one_hot_array = np.eye(c_dim)[target]
c = torch.from_numpy(one_hot_array).float()
var_c = Variable(c, requires_grad=False)
var_c = check_cuda(var_c, use_cuda)
# TODO: use iteration along first dim.
cat_hidden = (torch.cat([enc_hidden[0][0], var_c], dim=1).unsqueeze(0),
torch.cat([decoder.init_hidden_c_for_lstm(len(input_data))[0], var_c], dim=1).unsqueeze(0))
batch_init_word = np.zeros((batch_size, max_features))
batch_init_word[np.arange(batch_size), Constants.BOS] = 1
batch_init_word = Variable(torch.from_numpy(batch_init_word), requires_grad=False).float()
batch_init_word = check_cuda(batch_init_word, use_cuda)
input_data = input_data.permute(1, 0)
for index in range(maxlen - 1):
if 'next_word' in locals():
word = next_word.squeeze(1)
word = check_cuda(word, use_cuda)
output, cat_hidden, pre_soft = decoder(
word,
cat_hidden,
low_temp=True,
one_hot_input=True
)
else:
word = batch_init_word
word = check_cuda(word, use_cuda)
output, cat_hidden, pre_soft = decoder(
word,
cat_hidden,
low_temp=True,
one_hot_input=True
)
# From one-hot to word embedding
next_word = output
correct_word = input_data[index+1]
vae_loss += criterion(pre_soft.view(-1, max_features), correct_word)
if len(batch_init_word.size()) == 2:
batch_init_word = batch_init_word.unsqueeze(1)
if len(next_word.size()) == 2:
next_word = next_word.unsqueeze(1)
batch_init_word = torch.cat([batch_init_word, next_word], dim=1)
# NOTE Latent loss
ll = latent_loss(encoder.z_mean, encoder.z_sigma)
# NOTE L_attr_c loss
generated_sentence = batch_init_word
discriminator.eval()
logit = discriminator(generated_sentence, dont_pass_emb=True)
l_attr_c = criterion(logit, output_data)
# NOTE L_attr_z loss
encoder.eval()
generated_sentence = decoder.one_hot_to_word_emb(generated_sentence)
encoded_gen = encoder.init_hidden(len(generated_sentence))
encoded_gen = encoder(generated_sentence, encoded_gen, dont_pass_emb=True)
l_attr_z = latent_loss(encoder.z_mean, encoder.z_sigma)
avg_loss = vae_loss.data[0] / maxlen
total_vae_loss = vae_loss + ll
extra_decoder_loss = lambda_c * l_attr_c + lambda_z * l_attr_z
total_vae_loss.backward()
#e_opt.step()
#extra_decoder_loss.backward()
#g_opt.step()
vae_opt.step()
if batch % 25 == 0:
print("[Attr] Epoch {} batch {}'s average language loss: {}, latent loss: {}".format(
epoch_index,
batch,
avg_loss,
ll.data[0],
))
print("l_attr_c loss: {}, l_attr_z loss: {}".format(
l_attr_c.data[0],
l_attr_z.data[0],
))
def main_alg(encoder, decoder, discriminator):
train_vae(encoder, decoder)
repeat_times = 10
for repeat_index in range(repeat_times):
train_discriminator(discriminator)
#train_discriminator(discriminator)
#train_vae(encoder, decoder)
train_vae_with_attr_loss(encoder, decoder, discriminator)
|
446654
|
import asyncio
from aiohttp import web
from aiohttp_request import ThreadContext, middleware_factory, grequest, get_request
def thread():
assert grequest['sense'] == 42
async def task():
# grequest is `lazy` version of request
assert grequest['sense'] == 42
# works for threads as well with ThreadContext
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, ThreadContext(thread))
async def hello(request):
# get_request is on-demand function to get current request
assert get_request() is request
request['sense'] = 42
await asyncio.ensure_future(task())
return web.Response(text="Hello, world")
app = web.Application(middlewares=[middleware_factory()])
app.add_routes([web.get('/', hello)])
web.run_app(app)
|
446668
|
from setuptools import find_packages
from setuptools import setup
import os
from glob import glob
package_name = 'simple_launch'
setup(
name=package_name,
version='1.0.0',
packages=find_packages('src', exclude=['test']),
package_dir={'': 'src'},
data_files=[
(os.path.join('share', package_name), ['package.xml']),
('share/ament_index/resource_index/packages', ['resource/' + package_name]),
# Include examples.
(os.path.join('share', package_name, 'example'), glob('example/[!_]*'))
],
install_requires=['setuptools'],
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/oKermorgant/simple_launch',
download_url='https://github.com/oKermorgant/simple_launch',
keywords=['ROS 2', 'launch'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Python helpers for the ROS 2 launch system',
license='MIT',
# scripts here.
entry_points={
'console_scripts': [
'frame_prefix_gazebo = simple_launch.frame_prefix_gazebo:main'
],
},
)
|
446699
|
class GRPCConfiguration:
def __init__(self, client_side: bool, server_string=None, user_agent=None,
message_encoding=None, message_accept_encoding=None,
max_message_length=33554432):
self._client_side = client_side
if client_side and server_string is not None:
raise ValueError("Passed client_side=True and server_string at the same time")
if not client_side and user_agent is not None:
raise ValueError("Passed user_agent put didn't pass client_side=True")
self._server_string = server_string
self._user_agent = user_agent
self._max_message_length = max_message_length
self._message_encoding = message_encoding
# TODO: this does not need to be passed in config, may be just a single global string
# with all encodings supported by grpclib
if message_accept_encoding is not None:
self._message_accept_encoding = ",".join(message_accept_encoding)
else:
self._message_accept_encoding = None
@property
def client_side(self):
return self._client_side
@property
def server_string(self):
return self._server_string
@property
def user_agent(self):
return self._user_agent
@property
def message_encoding(self):
return self._message_encoding
@property
def message_accept_encoding(self):
return self._message_accept_encoding
@property
def max_message_length(self):
return self._max_message_length
|
446712
|
import requests
from typing import Dict, Any, List, Union, Tuple, Any, Optional
from mist.action_run import execute_from_text
from mist.lang.config import config
import asyncio, json
from zipfile import ZipFile
from io import BytesIO
import os
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
''' CLIENT CLASS '''
class Client():
def __init__(self, base_url, verify):
self.base_url = base_url
self.verify = verify
def baseintegration_dummy(self, url, params) -> Tuple[str, dict]:
#TODO: download from git repo
req = requests.get(url)
if req.status_code == 200:
try:
if url.endswith(".zip"):
zipfile = ZipFile(BytesIO(req.content))
zipfile.extractall("./mist_tmp_zip")
os.chdir('./mist_tmp_zip')
with open("main.mist") as f:
mist_content = f.read()
else:
mist_content = req.content.decode("utf-8", "ignore")
output = asyncio.run(execute_from_text(mist_content, fn_params=params))
result = {"url": url, "raw_output": output}
try:
output_json = json.loads(output)
for k,v in output_json.items():
result[k] = v
except:
pass
return "ok", result
except Exception as e:
return "mist file error", {"url": url, "output": str(e)}
finally:
if os.path.abspath(os.path.curdir).endswith("mist_tmp_zip"):
os.chdir('..')
os.system("rm -rf ./mist_tmp_zip")
return "network error", {"url": url, "output": str(req.status_code)}
''' HELPER FUNCTIONS '''
''' COMMAND FUNCTIONS '''
def test_module(client: Client) -> str:
message: str = ''
try:
# TODO: ADD HERE some code to test connectivity and authentication to your service.
# This should validate all the inputs given in the integration configuration panel,
# either manually or by using an API that uses them.
message = 'ok'
except DemistoException as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e): # TODO: make sure you capture authentication errors
message = 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return message
def baseintegration_dummy_command(client: Client, url, params = "") -> Tuple[str, dict]: #CommandResults:
if not url:
raise ValueError('url not specified')
params_dict = {}
if params:
try:
params_json = json.loads(params)
for p in params_json:
f = p.strip().split("=")
params_dict[f[0].strip()] = f[1].strip()
except:
raise ValueError('Malformed params.')
# Call the Client function and get the raw response
result = client.baseintegration_dummy(url, params_dict)
# return CommandResults(
# outputs_prefix='BaseIntegration',
# outputs_key_field='',
# outputs=result,
# )
#return "BaseIntegration.Output", {}, result
return result
# TODO: ADD additional command functions that translate XSOAR inputs/outputs to Client
''' MAIN FUNCTION '''
def main():
params = demisto.params() # Lo de la seccion de configuracion. Estos es la comnfigutacion de la instancia
args = demisto.args() # Argumentos de la integracion (playbook)
command = demisto.command() # El nombre del comando (playbook)
client = Client(
"",
False
#argToList(params.get('urls')),
# params.get('credentials', {}).get('identifier'),
# params.get('credentials', {}).get('password'),
# params['database'],
# bool(params.get('use_ssl', False)),
# bool(params.get('insecure', False))
)
commands = {
'test-module': test_module,
'baseintegration-dummy': baseintegration_dummy_command
}
try:
return_outputs(*commands[command](client, **args)) # type: ignore[operator]
except Exception as e:
return_error(f'MistLang: {str(e)}', error=e)
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
446729
|
import math
from builtins import object
import numpy as np
import numba as nb
@nb.vectorize
def _pow(x, y):
return math.pow(x, y)
@nb.vectorize
def _log10(x):
return math.log10(x)
class ParameterTransformation(object):
def __init__(self, is_positive=False):
self._is_positive = is_positive
@property
def is_positive(self):
return self._is_positive
def forward(self, external_value):
raise NotImplementedError("You have to implement this")
def backward(self, internal_value):
raise NotImplementedError("You have to implement this")
class LogarithmicTransformation(ParameterTransformation):
def __init__(self):
super(LogarithmicTransformation, self).__init__(is_positive=True)
def forward(self, external_value, vector=False):
# Throw an error if taking the logarithm of a negative number (or nan)
with np.errstate(invalid='raise'):
# math is 4 times faster than numpy here
res = _log10(external_value)
return res
def backward(self, internal_value):
# math is 10x faster than numpy or numba
return _pow(10., internal_value)
_known_transformations = {'log10': LogarithmicTransformation}
def get_transformation(transformation_name):
"""
Returns an instance of a transformation by name
:param transformation_name:
:return: instance of transformation with provided name
"""
if not transformation_name in _known_transformations:
raise ValueError("Transformation %s is not known" % transformation_name)
else:
return _known_transformations[transformation_name]()
|
446749
|
class PlayFabBaseObject():
pass
class PlayFabRequestCommon(PlayFabBaseObject):
"""
This is a base-class for all Api-request objects.
It is currently unfinished, but we will add result-specific properties,
and add template where-conditions to make some code easier to follow
"""
pass
class PlayFabResultCommon(PlayFabBaseObject):
"""
This is a base-class for all Api-result objects.
It is currently unfinished, but we will add result-specific properties,
and add template where-conditions to make some code easier to follow
"""
pass
|
446762
|
import numpy as np
import pytest
from numpy.testing import assert_almost_equal, assert_raises
from ...tools import power, rot_ksamp
from .. import MANOVA
class TestManova:
@pytest.mark.parametrize(
"n, obs_stat, obs_pvalue",
[(1000, 0.005062841807278008, 1.0), (100, 8.24e-5, 0.9762956529114515)],
)
def test_linear_oned(self, n, obs_stat, obs_pvalue):
np.random.seed(123456789)
x, y = rot_ksamp("linear", n, 1, k=2, noise=False)
stat, pvalue = MANOVA().test(x, y)
assert_almost_equal(stat, obs_stat, decimal=1)
assert_almost_equal(pvalue, obs_pvalue, decimal=1)
class TestManovaErrorWarn:
"""Tests errors and warnings derived from MGC."""
def test_no_indeptest(self):
# raises error if not indep test
x = np.arange(100).reshape(5, 20)
y = np.arange(50, 150).reshape(5, 20)
assert_raises(ValueError, MANOVA().test, x, y)
class TestManovaTypeIError:
def test_oned(self):
np.random.seed(123456789)
est_power = power(
"MANOVA",
sim_type="gauss",
sim="multimodal_independence",
case=1,
n=100,
alpha=0.05,
)
assert est_power <= 0.05
|
446788
|
from setuptools import setup, find_packages
import os
long_description = open('README.rst').read()
VERSION = '0.0.4'
package_data = {"github_bot_close_inactive_issues": ["logging.conf"]}
setup(name='github-bot-close-inactive-issues',
version=VERSION,
description='Bot for automatically closing inactive issues in GitHub repositories',
url='https://github.com/bstriner/github-bot',
download_url='https://github.com/bstriner/github-bot-close-inactive-issues/tarball/v{}'.format(VERSION),
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
package_data=package_data,
long_description=long_description,
keywords=['github', 'issues', 'inactive', 'abandoned'],
license='MIT',
classifiers=[
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
],
entry_points={
'console_scripts': [
'github-rate-limit=github_bot_close_inactive_issues.rate_limit:main',
'github-close-inactive-issues=github_bot_close_inactive_issues.close_inactive_issues:main'
]
},
install_requires=[
'PyGithub','pyyaml'
]
)
|
446804
|
import os
import json
val= json.load(open('quora_prepro_test.json', 'r'))
out=[]
outermost = {}
for i in range(0,len(val)):
# imgid = str(val[i]['ques_id'])[:-1]
# qcapid = val[i]['ques_cap_id']
# quesid = val[i]['question_index']
# ques = val[i]['question']
# imgpath = val[i]['image_filename']
# img_id = val[i]['image_index']
jimg = {}
#jimg['caption'] = capt
jimg['question'] = val[i]['question']
jimg['question1'] = val[i]['question1']
# jimg['img_path'] = imgpath
jimg['ques_id'] = val[i]['ques_id']
jimg['ques_id1'] = val[i]['ques_id1']
# jimg['para_id'] = paraid
jimg['para_id'] = int(val[i]['id'])
##########################################################
out.append(jimg)
outermost['questions'] = out
print len(out)
json.dump(outermost, open('quora_prepro_test_updated_int.json', 'w'))
|
446811
|
import requests
import json
import hashlib
import time
from Crypto.Cipher import AES
from src.plugins import PluginManager
from lib.conf.config import settings
from concurrent.futures import ThreadPoolExecutor
class Base(object):
pool = ThreadPoolExecutor(10)
def api_encrypt(self):
"""
api接口验证
:return:
"""
key = settings.AUTHKEY
ctime = time.time()
auth_key = "%s|%s" % (key, ctime)
m = hashlib.md5()
m.update(bytes(auth_key, encoding='utf-8'))
md5_key = m.hexdigest()
md5_time_key = "%s|%s" % (md5_key, ctime)
return md5_time_key
def data_encrypt(self, data):
"""
post数据加密
:param data:
:return:
"""
key = settings.DATAKEY
cipher = AES.new(key, AES.MODE_CBC, key)
ba_data = bytearray(data, encoding='utf-8')
v1 = len(ba_data)
v2 = v1 % 16
if v2 == 0:
v3 = 16
else:
v3 = 16 - v2
for i in range(v3):
ba_data.append(v3)
final_data = ba_data.decode('utf-8')
# print(type(final_data), 'fina_data')
cipher_data = cipher.encrypt(bytes(final_data, encoding='utf-8'))
return cipher_data
def post_asset(self, server_info):
"""向API发送数据"""
server_info = json.dumps(server_info)
requests.post(settings.API, headers={'OpenKey': self.api_encrypt()}, data=self.data_encrypt(server_info))
class Agent(Base):
"""
Agent方式采集资产信息
"""
def execute(self):
server_info = PluginManager().exec_plugin()
hostname = server_info['Basic']['data']['hostname']
cert_hostname = open(settings.CERT_PATH, 'r', encoding='utf-8').read().strip()
if not cert_hostname:
with open(settings.CERT_PATH, 'w', encoding='utf-8') as f: f.write(hostname)
else:
server_info['Basic']['data']['hostname'] = cert_hostname
self.post_asset(server_info)
# print(server_info)
class SSH_SALT(Base):
"""
SSH或RPC方式采集资产信息
"""
def get_host(self):
# 需要先从后台获取需要收集信息的主机列表
response = requests.get(settings.API)
# {'stauts': True, 'data': ['host1', 'host2',]}
result = json.loads(response.text)
if not result['status']:
return
return result['data']
def run(self, host):
server_info = PluginManager(hostname=host).exec_plugin()
self.post_asset(server_info)
def execute(self):
host_list = self.get_host()
for host in host_list:
self.pool.submit(self.run, host)
|
446865
|
from visions.types import (
Boolean,
Categorical,
Complex,
DateTime,
Float,
Generic,
Integer,
Object,
String,
TimeDelta,
)
from visions.typesets.typeset import VisionsTypeset
class StandardSet(VisionsTypeset):
"""The standard visions typesets
Includes support for the following types:
- Float
- Integer
- Boolean
- Object
- String
- Complex
- Categorical
- DateTime
- TimeDelta
"""
def __init__(self) -> None:
types = {
Generic,
Boolean,
Float,
Object,
Complex,
Categorical,
DateTime,
TimeDelta,
Integer,
String,
}
super().__init__(types)
|
446900
|
from .kmatch import K
class KmatchTestMixin(object):
"""
A mixin for test classes to perform kmatch validation on dictionaries
"""
def assertKmatches(self, pattern, value, suppress_key_errors=False):
"""
Assert that the value matches the kmatch pattern.
:type pattern: list
:param pattern: The kmatch pattern
:type value: dict
:param value: The dictionary to evaluate
:type suppress_key_errors: bool
:param suppress_key_errors: Suppress KeyError exceptions on filters and return False instead. False by default
:raises:
* :class:`KeyError <exceptions.KeyError>` if key from pattern does not exist in input value and the \
suppress_key_errors class variable is False
* :class:`AssertionError <exceptions.AssertionError>` if the value **does not** match the pattern
"""
assert K(pattern, suppress_key_errors=suppress_key_errors).match(value)
def assertNotKmatches(self, pattern, value, suppress_key_errors=True):
"""
Assert that the value does **not** matches the kmatch pattern.
:type pattern: list
:param pattern: The kmatch pattern
:type value: dict
:param value: The dictionary to evaluate
:type suppress_key_errors: bool
:param suppress_key_errors: Suppress KeyError exceptions on filters and return False instead. True by default
:raises:
* :class:`KeyError <exceptions.KeyError>` if key from pattern does not exist in input value and the \
suppress_key_errors class variable is False
* :class:`AssertionError <exceptions.AssertionError>` if the value **does match** the pattern
"""
assert not K(pattern, suppress_key_errors=suppress_key_errors).match(value)
|
446905
|
from loguru import logger
from datetime import datetime
from pymysql import Connection
from pymysql.cursors import Cursor
def newHoldemRecord(db: Connection, userID: int, money: int, tableID: int, tableUUID: str) -> bool:
if db is None:
return False
now: datetime = datetime.utcnow()
currentTime: str = now.strftime("%Y-%m-%d %H:%M:%S")
try:
cursor: Cursor = db.cursor()
cursor.execute(f"INSERT INTO `holdemGameRecord` (`userID`, `moneyInvested`, `status`, `tableID`, `time`, `tableUUID`) VALUES ({userID}, {money}, 0, {tableID}, '{currentTime}', '{tableUUID}');")
db.commit()
except Exception as err:
logger.error(err)
return False
return True
def removeHoldemRecord(db: Connection, userID: int, tableUUID: str) -> bool:
if db is None:
return False
try:
cursor: Cursor = db.cursor()
cursor.execute(f"DELETE FROM `holdemGameRecord` WHERE `userID` = {userID} AND `tableUUID` = '{tableUUID}';")
db.commit()
except Exception as err:
logger.error(err)
return False
return True
def getHoldemRecord(db: Connection, userID: int, tableUUID: str) -> tuple or None:
if db is None:
return None
try:
cursor: Cursor = db.cursor()
cursor.execute(f"SELECT * FROM `holdemGameRecord` WHERE `userID` = '{userID}' AND `tableUUID` = '{tableUUID}';")
result: tuple = cursor.fetchone()
except Exception as err:
logger.error(err)
return None
return result
def setHoldemRecordStatus(db: Connection, userID: int, tableUUID: str, status: int) -> bool:
"""
:param db:
:param userID:
:param tableUUID:
:param status:
0 represent in progress; 1 represent lose or fold; 2 represent win; 3 represent game close
:return:
"""
if db is None:
return False
try:
cursor: Cursor = db.cursor()
sql: str = f"UPDATE `holdemGameRecord` SET `status` = {status} WHERE `holdemGameRecord`.`userID` = '{userID}' AND `holdemGameRecord`.`tableUUID` = '{tableUUID}';"
cursor.execute(sql)
db.commit()
except Exception as err:
logger.error(err)
return False
return True
def addMoneyToHoldemRecord(db: Connection, userID: int, tableUUID: str, money: int) -> bool:
if db is None:
return False
try:
cursor: Cursor = db.cursor()
sql = f"UPDATE `holdemGameRecord` SET `moneyInvested` = `moneyInvested` + {money} WHERE `userID` = '{userID}' AND `tableUUID` = '{tableUUID}';"
cursor.execute(sql)
db.commit()
except Exception as err:
logger.error(err)
return False
return True
|
446910
|
from os import makedirs
from os.path import exists
from traceback import print_exc
from gi.repository.GLib import get_user_config_dir, get_user_cache_dir, \
log_default_handler, LogLevelFlags
from gi.repository.Gtk import Orientation
try:
from configparser import ConfigParser, NoSectionError, NoOptionError
except ImportError:
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from formiko.renderer import PARSERS
class View():
EDITOR = 1
PREVIEW = 2
BOTH = 3
def __new__(cls, value):
value = int(value)
assert 0 < value < 4
return value
def smart_bool(value):
if value.lower() in ("1", "true", "yes", "on", "enable"):
return True
elif value.lower() in ("0", "false", "no", "off", "disable"):
return False
raise ValueError("%s is not boolean value" % value)
class SmartParser(ConfigParser):
def smart_get(self, obj, key, conv=str, sec='main'):
try:
val = self.get(sec, key)
setattr(obj, key, conv(val))
except NoSectionError:
pass
except NoOptionError:
pass
except Exception:
print_exc()
def smart_set(self, obj, key, sec='main'):
self.set(sec, key, str(getattr(obj, key)))
class EditorPreferences(object):
period_save = True
check_spelling = True
spell_lang = ""
spaces_instead_of_tabs = False
tab_width = 8
auto_indent = True
line_numbers = True
right_margin = True
current_line = False
text_wrapping = True
white_chars = False
class UserPreferences(object):
preview = Orientation.HORIZONTAL.numerator
auto_scroll = True
parser = 'rst'
writer = 'html4'
style = ''
custom_style = False
editor = EditorPreferences()
def __init__(self):
self.load()
def load(self):
directory = get_user_config_dir()
cp = SmartParser()
cp.read("%s/formiko.ini" % directory)
cp.smart_get(self, 'preview', int)
cp.smart_get(self, 'auto_scroll', smart_bool)
cp.smart_get(self, 'parser')
if self.parser not in PARSERS:
log_default_handler("Application", LogLevelFlags.LEVEL_WARNING,
"Unknow parser `%s' in config, set default."
% self.parser)
self.parser = 'rst'
cp.smart_get(self, 'writer')
cp.smart_get(self, 'style')
cp.smart_get(self, 'custom_style', smart_bool)
cp.smart_get(self.editor, 'period_save', smart_bool, 'editor')
cp.smart_get(self.editor, 'check_spelling', smart_bool, 'editor')
cp.smart_get(self.editor, 'spell_lang', str, 'editor')
cp.smart_get(self.editor, 'spaces_instead_of_tabs', smart_bool,
'editor')
cp.smart_get(self.editor, 'tab_width', int, 'editor')
cp.smart_get(self.editor, 'auto_indent', smart_bool, 'editor')
cp.smart_get(self.editor, 'line_numbers', smart_bool, 'editor')
cp.smart_get(self.editor, 'right_margin', smart_bool, 'editor')
cp.smart_get(self.editor, 'current_line', smart_bool, 'editor')
cp.smart_get(self.editor, 'text_wrapping', smart_bool, 'editor')
cp.smart_get(self.editor, 'white_chars', smart_bool, 'editor')
def save(self):
cp = SmartParser()
cp.add_section('main')
cp.set('main', 'preview', str(int(self.preview)))
cp.smart_set(self, 'auto_scroll')
cp.smart_set(self, 'parser')
cp.smart_set(self, 'writer')
cp.smart_set(self, 'style')
cp.smart_set(self, 'custom_style')
cp.add_section('editor')
cp.smart_set(self.editor, 'period_save', 'editor')
cp.smart_set(self.editor, 'check_spelling', 'editor')
cp.smart_set(self.editor, 'spell_lang', 'editor')
cp.smart_set(self.editor, 'spaces_instead_of_tabs', 'editor')
cp.smart_set(self.editor, 'tab_width', 'editor')
cp.smart_set(self.editor, 'auto_indent', 'editor')
cp.smart_set(self.editor, 'line_numbers', 'editor')
cp.smart_set(self.editor, 'right_margin', 'editor')
cp.smart_set(self.editor, 'current_line', 'editor')
cp.smart_set(self.editor, 'text_wrapping', 'editor')
cp.smart_set(self.editor, 'white_chars', 'editor')
directory = get_user_config_dir()
if not exists(directory):
makedirs(directory)
with open("%s/formiko.ini" % directory, 'w+') as fp:
cp.write(fp)
class UserCache(object):
width = 800
height = 600
paned = 400
is_maximized = False
view = View.BOTH
def __init__(self):
self.load()
def load(self):
directory = get_user_cache_dir()
cp = SmartParser()
cp.read("%s/formiko/window.ini" % directory)
cp.smart_get(self, 'width', int)
cp.smart_get(self, 'height', int)
cp.smart_get(self, 'paned', int)
cp.smart_get(self, 'is_maximized', smart_bool)
cp.smart_get(self, 'view', View)
def save(self):
cp = SmartParser()
cp.add_section('main')
cp.set('main', 'width', str(self.width))
cp.set('main', 'height', str(self.height))
cp.set('main', 'paned', str(self.paned))
cp.set('main', 'is_maximized', str(self.is_maximized))
cp.set('main', 'view', str(self.view))
directory = get_user_cache_dir()+"/formiko"
if not exists(directory):
makedirs(directory)
with open("%s/window.ini" % directory, 'w+') as fp:
cp.write(fp)
|
446929
|
import pickle
import random
import numpy as np
from scipy.stats import rankdata
import torch
import torch.autograd as autograd
import torch.utils.data as data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class AnswerSelection(nn.Module):
def __init__(self, conf):
super(AnswerSelection, self).__init__()
self.vocab_size = conf['vocab_size']
self.hidden_dim = conf['hidden_dim']
self.embedding_dim = conf['embedding_dim']
self.question_len = conf['question_len']
self.answer_len = conf['answer_len']
self.batch_size = conf['batch_size']
self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim // 2, num_layers=1, bidirectional=True, batch_first=True)
self.cnns = nn.ModuleList([nn.Conv1d(self.hidden_dim, 500, filter_size, stride=1, padding=filter_size-(i+1)) for i, filter_size in enumerate([1,3,5])])
self.question_maxpool = nn.MaxPool1d(self.question_len, stride=1)
self.answer_maxpool = nn.MaxPool1d(self.answer_len, stride=1)
self.dropout = nn.Dropout(p=0.2)
self.init_weights()
self.hiddenq = self.init_hidden(self.batch_size)
self.hiddena = self.init_hidden(self.batch_size)
def init_hidden(self, batch_len):
return (autograd.Variable(torch.randn(2, batch_len, self.hidden_dim // 2)).cuda(),
autograd.Variable(torch.randn(2, batch_len, self.hidden_dim // 2)).cuda())
def init_weights(self):
initrange = 0.1
self.word_embeddings.weight.data.uniform_(-initrange, initrange)
def forward(self, question, answer):
question_embedding = self.word_embeddings(question)
answer_embedding = self.word_embeddings(answer)
q_lstm, self.hiddenq = self.lstm(question_embedding, self.hiddenq)
a_lstm, self.hiddena = self.lstm(answer_embedding, self.hiddena)
q_lstm = q_lstm.contiguous()
a_lstm = a_lstm.contiguous()
q_lstm = question_embedding
a_lstm = answer_embedding
q_lstm = q_lstm.view(-1,self.hidden_dim, self.question_len)
a_lstm = a_lstm.view(-1,self.hidden_dim, self.answer_len)
question_pool = []
answer_pool = []
for cnn in self.cnns:
question_conv = cnn(q_lstm)
answer_conv = cnn(a_lstm)
question_max_pool = self.question_maxpool(question_conv)
answer_max_pool = self.answer_maxpool(answer_conv)
question_activation = F.tanh(torch.squeeze(question_max_pool))
answer_activation = F.tanh(torch.squeeze(answer_max_pool))
question_pool.append(question_activation)
answer_pool.append(answer_activation)
question_output = torch.cat(question_pool, dim=1)
answer_output = torch.cat(answer_pool, dim=1)
question_output = self.dropout(question_output)
answer_output = self.dropout(answer_output)
similarity = F.cosine_similarity(question_output, answer_output, dim=1)
return similarity
def fit(self, questions, good_answers, bad_answers):
good_similarity = self.forward(questions, good_answers)
bad_similarity = self.forward(questions, bad_answers)
zeros = autograd.Variable(torch.zeros(good_similarity.size()[0]), requires_grad=False).cuda()
margin = autograd.Variable(torch.linspace(0.05,0.05,good_similarity.size()[0]), requires_grad=False).cuda()
loss = torch.max(zeros, autograd.Variable.sub(margin, autograd.Variable.sub(bad_similarity, good_similarity)))
#similarity = torch.stack([good_similarity,bad_similarity],dim=1)
#loss = torch.squeeze(torch.stack(map(lambda x: F.relu(0.05 - x[0] + x[1]), similarity), dim=0))
accuracy = torch.eq(loss,zeros).type(torch.DoubleTensor).mean()
return loss.sum(), accuracy.data[0]
class Evaluate():
def __init__(self, conf):
self.conf = conf
self.all_answers = self.load('answers')
self.vocab = self.load('vocabulary')
self.conf['vocab_size'] = len(self.vocab) + 1
if conf['mode'] == 'train':
print "Training"
self.model = AnswerSelection(self.conf)
if conf['resume']:
self.model.load_state_dict(torch.load("saved_model/answer_selection_model_cnnlstm"))
self.model.cuda()
self.train()
if conf['mode'] == 'test':
print "Testing"
self.model = AnswerSelection(self.conf)
self.validate()
def load(self, name):
return pickle.load(open('insurance_qa_python/'+name))
def pad_question(self, data):
return self.pad(data, self.conf.get('question_len', None))
def pad_answer(self, data):
return self.pad(data, self.conf.get('answer_len', None))
def id_to_word(self, sentence):
return [self.vocab.get(i,'<PAD>') for i in sentence]
def pad(self, data, max_length):
for i, item in enumerate(data):
if len(item) >= max_length:
data[i] = item[:max_length]
elif len(item) < max_length:
data[i] += [0] * (max_length - len(item))
return data
def train(self):
batch_size = self.conf['batch_size']
epochs = self.conf['epochs']
training_set = self.load('train')
questions = list()
good_answers = list()
for i, q in enumerate(training_set):
questions += [q['question']] * len(q['answers'])
good_answers += [self.all_answers[j] for j in q['answers']]
questions = torch.LongTensor(self.pad_question(questions))
good_answers = torch.LongTensor(self.pad_answer(good_answers))
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.conf['learning_rate'])
for i in xrange(epochs):
bad_answers = torch.LongTensor(self.pad_answer(random.sample(self.all_answers.values(), len(good_answers))))
train_loader = data.DataLoader(dataset=torch.cat([questions,good_answers,bad_answers],dim=1), batch_size=batch_size)
avg_loss = []
avg_acc = []
self.model.train()
for step, train in enumerate(train_loader):
batch_question = autograd.Variable(train[:,:self.conf['question_len']]).cuda()
batch_good_answer = autograd.Variable(train[:,self.conf['question_len']:self.conf['question_len']+self.conf['answer_len']]).cuda()
batch_bad_answer = autograd.Variable(train[:,self.conf['question_len']+self.conf['answer_len']:]).cuda()
optimizer.zero_grad()
self.model.hiddenq = self.model.init_hidden(len(train))
self.model.hiddena = self.model.init_hidden(len(train))
loss, acc = self.model.fit(batch_question, batch_good_answer, batch_bad_answer)
avg_loss.append(loss.data[0])
avg_acc.append(acc)
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), 0.25)
optimizer.step()
print "Epoch: {0} Epoch Average loss: {1} Accuracy {2}".format(str(i), str(np.mean(avg_loss)), str(np.mean(avg_acc)))
torch.save(self.model.state_dict(), "saved_model/answer_selection_model_cnnlstm")
if i % 50 == 0 and i > 0:
self.validate(validation=True)
def get_eval_sets(self, validation=False):
if validation:
return dict([(s, self.load(s)) for s in ['dev']])
return dict([(s, self.load(s)) for s in ['test1', 'test2']])
def validate(self, validation=False):
self.model.load_state_dict(torch.load("saved_model/answer_selection_model_cnnlstm"))
#self.model = torch.load("saved_model/answer_selection_model")
self.model.cuda()
self.model.lstm.flatten_parameters()
eval_datasets = self.get_eval_sets(validation)
for name, dataset in eval_datasets.iteritems():
#index = 0
#score_list = []
print "Now evaluating : " + name
#questions = list()
#answers = list()
self.model.eval()
'''
for i, d in enumerate(dataset):
indices = d['good'] + d['bad']
answers += [self.all_answers[i] for i in indices]
questions += [d['question']]*len(indices)
questions = torch.LongTensor(self.pad_question(questions))
answers = torch.LongTensor(self.pad_answer(answers))
test_loader = data.DataLoader(dataset=torch.cat([questions,answers],dim=1), batch_size=self.conf['batch_size'], shuffle=True)
for step, test in enumerate(test_loader):
batch_question = autograd.Variable(test[:,:self.conf['question_len']]).cuda()
batch_answer = autograd.Variable(test[:,self.conf['question_len']:]).cuda()
self.model.hiddena = self.model.init_hidden(batch_answer.size()[0])
self.model.hiddenq = self.model.init_hidden(batch_question.size()[0])
similarity = self.model.forward(question,answers)
score_list.append(similarity.cpu.data.numpy())
sdict = {}
'''
#Doesn't Work -- Maybe -- from Keras implementation
c_1, c_2 = 0, 0
for i, d in enumerate(dataset):
if i%10 == 0:
print "Progress : {0:.2f}%".format(float(i)/len(dataset)*100),"\r",
indices = d['good'] + d['bad']
answers = autograd.Variable(torch.LongTensor(self.pad_answer([self.all_answers[i] for i in indices]))).cuda()
question = autograd.Variable(torch.LongTensor(self.pad_question([d['question']]*len(indices)))).cuda()
self.model.hiddena = self.model.init_hidden(answers.size()[0])
self.model.hiddenq = self.model.init_hidden(question.size()[0])
similarity = self.model.forward(question,answers)
similarity = similarity.cpu().data.numpy()
max_r = np.argmax(similarity)
max_n = np.argmax(similarity[:len(d['good'])])
r = rankdata(similarity, method='max')
c_1 += 1 if max_r == max_n else 0
c_2 += 1 / float(r[max_r] - r[max_n] + 1)
top1 = c_1 / float(len(dataset))
mrr = c_2 / float(len(dataset))
print('Top-1 Precision: %f' % top1)
print('MRR: %f' % mrr)
conf = {
'question_len':20,
'answer_len':150,
'batch_size':256,
'epochs':10000,
'embedding_dim':512,
'hidden_dim':512,
'learning_rate':0.01,
'margin':0.05,
'mode':'test',
'resume':1
}
ev = Evaluate(conf)
|
446932
|
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
class ModuleGroup(models.Model):
class Meta:
verbose_name_plural = _('Module Groups')
verbose_name = _('Module Group')
ordering = ['sort']
def __str__(self):
return self.name
slug = models.SlugField(unique=True, verbose_name=_('Slug'))
name = models.CharField(max_length=64, verbose_name=_('Name'))
fa = models.SlugField(verbose_name=_('FontAwesome class'))
sort = models.IntegerField(default=1, verbose_name=_('Sort'))
description = models.TextField(default='', verbose_name=_('Description'),
blank=True)
class Module(models.Model):
class Meta:
verbose_name_plural = _('Modules')
verbose_name = _('Module')
ordering = ['group', 'sort']
def __str__(self):
return self.group.name + ' / ' + self.name
group = models.ForeignKey(ModuleGroup, to_field='slug', verbose_name=_('Group'),
related_name='modules',
on_delete=models.CASCADE)
name = models.CharField(max_length=64, verbose_name=_('Name'))
sort = models.IntegerField(default=1, verbose_name=_('Sort'))
description = models.TextField(default='', verbose_name=_('Description'),
blank=True)
slug = models.SlugField(unique=True, verbose_name=_('Slug'))
config = JSONField(null=True, blank=True, verbose_name=_('Config'))
user = models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name=_('User'))
is_enabled = models.BooleanField(default=True, verbose_name=_('Is Enabled'))
class ModulePermission(models.Model):
class Meta:
verbose_name_plural = _('Module permissions')
verbose_name = _('Module permission')
ordering = ['id']
def __str__(self):
return str(_(f'Module permission for {self.user}, {str(self.module)}'))
module = models.ForeignKey(Module, verbose_name=_('Module'),
related_name='module_permissions',
on_delete=models.CASCADE)
user = models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name=_('User'),
related_name='user_module_permissions',
on_delete=models.CASCADE)
can_insert = models.BooleanField(default=True, verbose_name=_('Can insert data'))
can_update = models.BooleanField(default=True, verbose_name=_('Can update data'))
can_delete = models.BooleanField(default=True, verbose_name=_('Can delete data'))
date_changed = models.DateTimeField(auto_now=True)
|
446945
|
from . import term
from . import log as _log
"""
Import the log object into your project to use fastlog
This can be done simply by using:
>>> from fastlog import log
or
>>> from fastlog import *
"""
log = _log.FastLogger()
|
446953
|
import logging
from amitools.vamos.cfgcore import ConfigDict
from amitools.vamos.log import *
def do_log():
log_main.debug("debug")
log_main.info("info")
log_main.warning("warn")
log_main.error("error")
log_mem.debug("debug")
log_mem.info("info")
log_mem.warning("warn")
log_mem.error("error")
def vamos_log_setup_default_test(caplog):
log_cfg = ConfigDict(
{
"file": None,
"quiet": False,
"verbose": False,
"timestamps": True,
"levels": None,
}
)
assert log_setup(log_cfg)
do_log()
assert caplog.record_tuples == [
("main", logging.WARNING, "warn"),
("main", logging.ERROR, "error"),
("mem", logging.WARNING, "warn"),
("mem", logging.ERROR, "error"),
]
def vamos_log_setup_verbose_test(caplog):
log_cfg = ConfigDict(
{
"file": None,
"quiet": False,
"verbose": True, # verbose sets main to info
"timestamps": True,
"levels": None,
}
)
assert log_setup(log_cfg)
do_log()
assert caplog.record_tuples == [
("main", logging.INFO, "info"),
("main", logging.WARNING, "warn"),
("main", logging.ERROR, "error"),
("mem", logging.WARNING, "warn"),
("mem", logging.ERROR, "error"),
]
def vamos_log_setup_quiet_test(caplog):
log_cfg = ConfigDict(
{
"file": None,
"quiet": True, # set all levels to error
"verbose": False,
"timestamps": True,
"levels": None,
}
)
assert log_setup(log_cfg)
do_log()
assert caplog.record_tuples == [
("main", logging.ERROR, "error"),
("mem", logging.ERROR, "error"),
]
def vamos_log_setup_levels_test(caplog):
log_cfg = ConfigDict(
{
"file": None,
"quiet": False,
"verbose": False,
"timestamps": True,
"levels": {"mem": "info"},
}
)
assert log_setup(log_cfg)
do_log()
assert caplog.record_tuples == [
("main", logging.WARN, "warn"),
("main", logging.ERROR, "error"),
("mem", logging.INFO, "info"),
("mem", logging.WARN, "warn"),
("mem", logging.ERROR, "error"),
]
def vamos_log_setup_levels_fail_test(caplog):
log_cfg = ConfigDict(
{
"file": None,
"quiet": False,
"verbose": False,
"timestamps": True,
"levels": {"foo": "info"},
}
)
assert not log_setup(log_cfg)
assert caplog.record_tuples == [("config", logging.ERROR, "invalid logger: foo")]
def vamos_log_setup_levels_fail2_test(caplog):
log_cfg = ConfigDict(
{
"file": None,
"quiet": False,
"verbose": False,
"timestamps": True,
"levels": {"mem": "foo"},
}
)
assert not log_setup(log_cfg)
assert caplog.record_tuples == [("config", logging.ERROR, "invalid log level: foo")]
|
446962
|
import uvicore
from app1.models.post import Post
from app1.models.comment import Comment
from app1.models.tag import Tag
from app1.models.hashtag import Hashtag
from app1.models.image import Image
from uvicore.support.dumper import dump, dd
from uvicore import log
@uvicore.seeder()
async def seed():
log.item('Seeding table posts')
# Get all tags keyed by name
tags = await Tag.query().key_by('name').get()
# Get all hastags keyed by name
hashtags = await Hashtag.query().key_by('name').get()
#post = PostModel(slug='test-post1', title='Test Post1', other='other stuff1', creator_id=1)
#await post.save()
# Now I want to do inline, though has to be DIct
# where I create the post with comments=[{dict}]
# WORKS!!!
await Post.insert_with_relations([
{
'slug': 'test-post1',
'title': 'Test Post1',
'body': 'This is the body for test post1. I like the color red and green.',
'other': 'other stuff1',
'creator_id': 1,
'owner_id': 2,
'comments': [
{
'title': 'Post1 Comment1',
'body': 'Body for post1 comment1',
#'post_id': 1, # No id needed, thats what post.create() does
'creator_id': 1,
}
],
# Many-To-Many tags works with existing Model, new Model or new Dict
'tags': [
# Existing Tag
tags['linux'],
tags['mac'],
tags['bsd'],
tags['bsd'], # Yes its a duplicate, testing that it doesn't fail
# New Tag as Model (tag created and linked)
Tag(name='test1', creator_id=4),
# New Tag as Dict (tag created and linked)
{'name': 'test2', 'creator_id': 4},
],
# Polymorphic One-To-One
'image': {
'filename': 'post1-image.png',
'size': 1234932,
},
# Polymorphic One-To-Many Attributes
'attributes': [
{'key': 'post1-test1', 'value': 'value for post1-test1'},
{'key': 'post1-test2', 'value': 'value for post1-test2'},
{'key': 'badge', 'value': 'IT'},
],
# Polymorphic Many-To-Many Hashtags
'hashtags': [
hashtags['important'],
hashtags['outdated'],
hashtags['outdated'], # Yes its a duplicate, testing that it doesn't fail
# New hashtag by model
Hashtag(name='test1'),
# New hashtag by dict
{'name': 'test2'},
],
},
])
# Example of adding attributes later
post = await Post.query().find(1)
# ISSUE: How can we update an attribute that is a dict?
# If it weren't a dict we could get it (post.attributes['badge']) then change an attribute then call post.attributes['badge'].save() probably
# But if a dict, how can we update a value? Doing a .create/.add like so
# await post.add('attributes', [
# {'key': 'post1-test2', 'value': 'xxxx'},
# ])
# Gives us an Integrity error due to models.py insert() around line 92. It assume a bulk insert and cannot upsert
# If we don't pass a list it does a single insert which will also fail with IntegrityError. I would have to add code
# to know how to SELECT to see if exists based on PK or in the case of polymorphism, all 3 or more poly columns.
# Example of adding attributes whos value is also a Dict - DOES NOT WORK YET, need auto-serialization, though I could serialize manually to str
# await post.add('attributes', [
# {'key': 'post1-test3', 'value': {
# 'this': 'value',
# 'is': 'a dict itself!'
# }},
# {'key': 'post1-test4', 'value': ['one', 'two', 'three']}
# ])
# # Blow out all attributes and set this complete List
# await post.set('attributes', [
# {'key': 'post1-test3', 'value': 'value for post1-test3'},
# {'key': 'post1-test4', 'value': 'value for post1-test4'},
# ])
# Example of setting all a Polymorphic Many-To-Many Hashtags - WORKS
# await post.set('hashtags', [
# {'name': 'test1'},
# {'name': 'test2'},
# ])
# Example of setting all Many-To-Many Tags - WORKS
# await post.set('tags', [
# tags['linux'],
# ])
# Example of deleting all Polymorphic Many-To-Many Hashtags - DELETE DOES NOT WORK FOR POLY MTM (as currently designed)
#await post.delete('hashtags')
# Example of deleting all One-To-Many comments - DELETE DOES NOT WORK FOR OTM (as currently designed)
#await post.delete('comments')
# Example of deleteing a HasOne child - WORKS
#post = await Post.query().find(1)
#await post.delete('image')
# Example of linking Polymorphic Many-To-Many Hashags - WORKS
# await post.link('hashtags', [
# hashtags.get('obsolete')
# ])
# Example of linking tags (does not create, only links EXISTING tags) - WORKS
# await post.link('tags', [
# # Linking can be EXISTING Dict
# # {
# # 'id': 1,
# # 'name': 'linux',
# # 'creator_id': 1,
# # }
# # Or existing Model
# tags.get('linux'),
# tags.get('mac'),
# tags.get('bsd'),
# ])
# Test unlink
# await post.unlink('tags', tags.get('linux')) # As not list
# await post.unlink('tags', [tags.get('mac')]) # As list
# await post.unlink('tags') # All
# Create (if not exists) AND link tags
# await post.create('tags', [
# tags['linux'], # Already exists, won't re-create
# Tag(id=1, name='linux', creator_id=1), # Already exists, should just link
# Tag(name='test1', creator_id=4), # Does not exist, should create and link
# {
# 'name': 'test2',
# 'creator_id': 4,
# }
# ])
#post.create()
# Show Attributes
#post.attributes
# Create and link attributes
#post.create('attributes', [{'key': 'asdf', 'value': 'asdf'}])
# Delete and unlink attributes
# post.delete('attributes') # all
# post.delete('attributes', [attribute1, attribute2]) # by model
# post.delete('attributes', 'key1', 'key2') # not by pk, but secondary pk the "key" column somehow
# contacts table for a One-To-One Poly
# combined PK of table_name + table_pk for unique (so could get rid of ID column technically)
# id | table_name | table_pk | name | email | phone
# ------------------------------------------------------
# 1 | users | 1 | Matthew | @asdf | 555
# 2 | employee | 4 | Bob | @asdf | 444
# attributes table for a One-To-Many Poly
# Only unique has to be ID column, or I suppose a combo of table_name+table_pk+key would do it, would also be the composit index
# Then could get rid of ID column
# id | table_name | table_pk | key | value
# -------------------------------------------
# 1 | users | 1 | name | matthew
# 2 | users | 1 | age | 37
# poly_tags pivot table for a Many-To-Many Poly
# entity_tags
# poly_tags
# tag_relations
# tag_linkage
# post_id | tag_id |
# table_name | table_pk | tag_id
# ------------------------------
# posts | 1 | 5
# posts | 1 | 6
# comments | 23 | 5
# comments | 23 | 7
# NO, add does not exist. Use create to make/link or link to just link
# .add() = create record and linkage
#post.query('attributes').add({'key': 'value'})
# this works NOW - it creates and links
#post.create('comments', ['asdf'])
# So this should create a tag and link it
#post.create('tags', ['tag1...])
# Easier than .tags()
# Link and unlink should be ONLY for ManyToMany
# Because all othe relations the ID is a foreing key on one of the tables
# So to unlink it, you have to DELETE the record, there is no "link"
# post.link('tags', tags)
# post.unlink('tags', tag[0]) #unlink one tag
# post.unlink('tags') # unlink all tags
# You can insert one so you can insert relations right after
post = await Post(slug='test-post2', title='Test Post2',
body='This is the body for test post2. My favorite frameworks are Laravel and Uvicore!',
other=None, creator_id=1, owner_id=2
).save()
# Create AND Link if nto exist Many-To-Many tags
await post.link('tags', [
tags['linux'],
tags['bsd'],
])
# Create Polymorphic One-To-One
await post.create('image', {
#'imageable_type': 'posts', # NO, inferred
#'imageable_id': 2, # NO, inferred
'filename': 'post2-image.png',
'size': 2483282
})
# Create Polymorphic One-To-Many
# NOTE: .add is simplay an alias for .create()
await post.add('attributes', [
{'key': 'post2-test1', 'value': 'value for post2-test1'},
{'key': 'post2-test2', 'value': 'value for post2-test2'},
{'key': 'badge', 'value': 'IT'},
])
# Create Polymorphic Many-To-Many
await post.add('hashtags', [
hashtags['obsolete'],
hashtags['outdated'],
hashtags['outdated'], # Yes its a duplicate, testing that it doesn't fail
])
# You can NOT insert relations right away, these tags will be IGNORED
# User Dict with insert_with_relations if you want this
post = await Post(
slug='test-post3',
title='Test Post3',
body='This is the body for test post1. I like the programming in PHP, Python and anything Typescript.',
other='other stuff2-bad', # We'll update this away below
creator_id=2,
owner_id=1,
tags=[ # TAGS IGNORED
tags['linux'],
tags['bsd'],
]
).save()
# Test an update
post.other = 'other stuff3'
await post.save()
await post.add('attributes', [
{'key': 'badge', 'value': 'DEV'},
])
await post.add('hashtags', [
hashtags['important'],
])
# You can use .insert() as a List of model instances
# But obviously you cant then add in tags
# This WILL NOT insert relations at all
await Post.insert([
# 2 posts for admin
#Post(slug='test-post1', title='Test Post1', other='other stuff1', creator_id=1),
#Post(slug='test-post2', title='Test Post2', other=None, creator_id=1, owner_id=2),
# 3 posts for manager1
#Post(slug='test-post3', title='Test Post3', other='other stuff2', creator_id=2, owner_id=1),
Post(slug='test-post4', title='Test Post4',
body='This is the body for test post1. My favorite morotcycles are super fast crotch rockets!',
other=None, creator_id=2, owner_id=1),
Post(slug='test-post5', title='Test Post5',
body='This is the body for test post1. Everyone loves and cynic.',
other=None, creator_id=2, owner_id=2),
# 2 posts for user2
#Post(slug='test-post6', title='Test Post6', other='other stuff3', creator_id=5),
#Post(slug='test-post7', title='Test Post7', other=None, creator_id=5),
])
# You can also user .insert() as a list of Dict
# This one inserts BelongsTo children FIRST (user, then contact, then post)
# This is a multi nesting deep insert (NOT bulk, in a loop because of relations)
# Creates User First, then Contact Second, Then finally Post with new creator_id
await Post.insert_with_relations([
{
'slug': 'test-post6',
'title': 'Test Post6',
'body': 'This is the body for test post1. Everyone wants to fly.',
'other': 'other stuff6',
#NO - 'creator_id': 5,
'creator': {
'username': '<EMAIL>',
'email': '<EMAIL>',
'first_name': 'User',
'last_name': 'Two',
'creator_id': 2,
'password': '<PASSWORD>',
'contact': {
'name': '<NAME>',
'title': 'User2',
'address': '444 User Dr.',
'phone': '444-444-4444'
# NO user_id=5
},
'info': {
'extra1': 'user5 extra',
},
},
'owner_id': 3,
# Polymorphic One-To-One
'image': {
'filename': 'post6-image.png',
'size': 3345432,
},
# Polymorphic One-To-Many
'attributes': [
{'key': 'post6-test1', 'value': 'value for post6-test1'},
{'key': 'post6-test2', 'value': 'value for post6-test2'},
{'key': 'post6-test3', 'value': 'value for post6-test3'},
{'key': 'badge', 'value': 'IT'},
#{'key': 'test', 'value': 'Hi there, my name is <NAME>, what is your name? Again, my name is <NAME>, what is your name? Again, my name is <NAME>, what is your name?'},
],
# Polymorphic Many-To-Many
'hashtags': [
hashtags['outdated']
],
}
])
# This does NOT work yet, but would be nice. Especially if it can UPDATE an existing child
#post = await Post.query().find(6)
# await post.add('creator', {
# 'email': '<EMAIL>',
# 'contact': {
# 'name': '<NAME>',
# 'title': 'User2',
# 'address': '444 User Dr.',
# 'phone': '444-444-4444'
# # NO user_id=5
# },
# })
# You can insert a single model with .save()
post = Post(slug='test-post7', title='Test Post7',
body='This is the body for test post1. I like the to code alone.',
other=None, creator_id=5, owner_id=4)
await post.save()
await post.create('tags', [
tags.get('linux'),
tags.get('bsd'),
tags.get('laravel'),
])
|
447082
|
import click
import pytest
@pytest.mark.xfail(raises=AttributeError,
reason="App Engine doesn't provide a tty")
def test_progressbar_strip_regression(runner, monkeypatch):
label = ' padded line'
@click.command()
def cli():
with click.progressbar(tuple(range(10)), label=label) as progress:
for thing in progress:
pass
monkeypatch.setattr(click._termui_impl, 'isatty', lambda _: True)
assert label in runner.invoke(cli, []).output
|
447107
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ota', '0008_auto_20190108_1808'),
]
operations = [
migrations.CreateModel(
name='DeviceLogRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255)),
('username', models.CharField(max_length=255)),
('device_id', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterUniqueTogether(
name='devicelogrequest',
unique_together=set([('domain', 'username', 'device_id')]),
),
]
|
447148
|
from abc import ABC, abstractmethod
class MMWrapper(ABC):
"""
A super class for all molecular mechanics wrappers
Note
----
Since MMWrapper is a super class and has abstract methods
the user cannot actually instantiate a MMWrapper object, but only its child objects
"""
kjmol_to_au = 0.0003808798034
nm_to_angstrom = 10.0000000
nm_to_bohr = 18.897161646321
kjmol_nm_to_au_bohr = kjmol_to_au/nm_to_bohr
au_bohr_to_kjmol_nm = nm_to_bohr/kjmol_to_au
def __init__(self, class_type,
sys_info=None,
sys_info_format=None):
self.system_info = sys_info
self.system_info_format = sys_info_format
self.class_type = class_type
self.main_info = None
self.main_charges = None
self.start_qmmm = 0
self.end_qmmm = 0
self.md_steps = 0
self.md_ensemble = 'NVE'
self.return_trajectory_interval = 0
self.return_trajectory_filename = 'output.nc'
self.trajectory_format = 'NetCDF'
self.return_checkpoint_interval = 0
self.return_checkpoint_filename = 'checkpoint.chk'
self.return_system = True,
self.return_system_filename = 'final.pdb'
self.return_info = []
self.return_info_interval = 0
self.return_info_filename = 'info.dat'
self.return_forces_filename = 'forces.pkl'
self.return_forces_interval = 0
super().__init__()
def get_energy_and_gradient(self, traj, geometry=None, include_coulomb='all', link_atoms=None, minimize=False, charges=None):
"""
Gets the energy and gradient from a MM computation
Parameters
----------
traj : MDtraj trajectory object
geometry : str
A string containing geometry information as XYZ coordinates. Not applicable for MM programs
include_coulomb : str
whether to include coulombic interactions.
'all' (default) includes coulombic forces for all particles,
'no_link' excludes coulombic forces for link atoms,
'only' excludes all other forces for all atoms,
'none' excludes coulombic forces for all particles.
link_atoms : list
indices of link_atoms
minimize : bool
whether to return the geometry optimized energy
charges : list
charges and corresponding positions in angstroms as xyz coordinates
Returns
-------
dict
A dictionary with energy('energy') and gradient('gradients') information
"""
topology, positions = self.convert_trajectory(traj)
if charges is not None:
self.set_external_charges(charges)
info = self.compute_info(topology, positions, include_coulomb=include_coulomb, link_atoms=link_atoms, minimize=minimize)
return info
def post_processing_input(self):
self.qmmm_steps = self.end_qmmm - self.start_qmmm
if (type(self.md_steps) is list and type(self.md_ensemble) is list):
self.other_md_ensembles = self.md_ensemble[0:-1]
self.other_ensemble_steps = self.md_steps[0:-1]
self.md_ensemble = self.md_ensemble[-1]
elif (type(self.md_steps) is int and type(self.md_ensemble) is str):
self.other_md_ensembles = None
self.other_ensemble_steps = None
if type(self.md_steps) is int:
self.end_steps = self.md_steps - self.end_qmmm
elif type(self.md_steps) is list:
self.end_steps = self.md_steps[-1] - self.end_qmmm
@abstractmethod
def compute_info(self):
"""
Function implemented in individual child classes
"""
pass
@abstractmethod
def set_external_charges(self):
"""
Function implemented in individual child classes
"""
pass
@abstractmethod
def initialize(self):
"""
Function implemented in individual child classes
"""
pass
@abstractmethod
def take_step(self, force):
"""
Function implemented in individual child classes
"""
pass
@abstractmethod
def get_main_info(self):
"""
Function implemented in individual child classes
"""
pass
@abstractmethod
def get_main_charges(self):
"""
Function implemented in individual child classes
"""
pass
@abstractmethod
def convert_trajectory(self):
"""
Function implemented in individual child classes
"""
pass
@abstractmethod
def convert_input(self):
"""
Function implemented in individual child classes
"""
pass
@abstractmethod
def set_up_reporters(self):
"""
Function implemented in individual child classes
"""
pass
def get_geom_from_trajectory(self):
"""
Function not implemented for MM wrappers
"""
raise Exception('method not implemented for class')
def set_qm_geometry(self):
"""
Function not implemented for MM wrappers
"""
raise Exception('method not implemented for class')
def build_qm_param(self):
"""
Function not implemented for MM wrappers
"""
raise Exception('method not implemented for class')
def optimize_geometry(self):
"""
Function not implemented for MM wrappers
"""
raise Exception('method not implemented for class')
|
447208
|
import os
import dill
from django.core.files.base import ContentFile
from django.db import models
from estimators import get_storage, get_upload_path, hashing
class PrimaryMixin(models.Model):
create_date = models.DateTimeField(
auto_now_add=True, blank=False, null=False)
class Meta:
abstract = True
class HashableFileQuerySet(models.QuerySet):
object_property_name = NotImplementedError()
def filter(self, *args, **kwargs):
"""filter lets django managers use `objects.filter` on a hashable object."""
obj = kwargs.pop(self.object_property_name, None)
if obj is not None:
kwargs['object_hash'] = self.model._compute_hash(obj)
return super().filter(*args, **kwargs)
def _extract_model_params(self, defaults, **kwargs):
"""this method allows django managers use `objects.get_or_create` and
`objects.update_or_create` on a hashable object.
"""
obj = kwargs.pop(self.object_property_name, None)
if obj is not None:
kwargs['object_hash'] = self.model._compute_hash(obj)
lookup, params = super()._extract_model_params(defaults, **kwargs)
if obj is not None:
params[self.object_property_name] = obj
del params['object_hash']
return lookup, params
class HashableFileMixin(models.Model):
create_date = models.DateTimeField(
auto_now_add=True, blank=False, null=False)
object_hash = models.CharField(
max_length=64, unique=True, default=None, null=False, editable=False)
object_file = models.FileField(
upload_to=get_upload_path, storage=get_storage(), default=None, null=False, blank=True, editable=False)
_object_property_name = NotImplementedError()
_persisted = False
objects = HashableFileQuerySet.as_manager()
class Meta:
abstract = True
@property
def upload_path(self):
if self.object_file.name is not None:
dir_name, file_name = os.path.split(self.object_file.name)
return get_upload_path(self, file_name)
@property
def file_path(self):
if self.object_file.name is not None:
return os.path.join(self.object_file.storage.location, self.upload_path)
@property
def is_file_persisted(self):
return self.object_file.name is not None and self.object_file.storage.exists(self.file_path)
@classmethod
def _compute_hash(cls, obj):
return hashing.hash(obj)
@property
def object_property(self):
return getattr(self, self._object_property_name)
@object_property.setter
def object_property(self, obj):
return setattr(self, self._object_property_name, obj)
def get_object(self):
if self.object_property is None:
self.load()
return self.object_property
def set_object(self, value):
object_hash = self._compute_hash(value)
self.object_property = value
self.object_hash = object_hash
self.object_file.name = self.object_hash
def persist(self):
"""a private method that persists an estimator object to the filesystem"""
if self.object_hash:
data = dill.dumps(self.object_property)
f = ContentFile(data)
self.object_file.save(self.object_hash, f, save=False)
f.close()
self._persisted = True
return self._persisted
def load(self):
"""a private method that loads an estimator object from the filesystem"""
if self.is_file_persisted:
self.object_file.open()
temp = dill.loads(self.object_file.read())
self.set_object(temp)
self.object_file.close()
def save(self, *args, **kwargs):
if not self.is_file_persisted:
self.persist()
super().save(*args, **kwargs)
@classmethod
def get_or_create(cls, obj):
"""Deprecated in favor for the canonical `objects.get_or_create` method"""
raise DeprecationWarning('Please use `%s.objects.get_or_create()` instead' % cls)
@classmethod
def create_from_file(cls, filename):
"""Return an Estimator object given the path of the file, relative to the MEDIA_ROOT"""
obj = cls()
obj.object_file = filename
obj.load()
return obj
|
447211
|
import sys
import datetime
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from radio.models import *
class Command(BaseCommand):
help = 'Helper for new TalkGroup Access'
def add_arguments(self, parser):
parser.add_argument('access_group_name')
def handle(self, *args, **options):
access_menu(self, options)
def access_menu(self, options):
try:
access_gp = TalkGroupAccess.objects.get(name=options['access_group_name'])
except TalkGroupAccess.DoesNotExist:
self.stdout.write(self.style.ERROR('Talk Group Access List [{}] does not exist, check case and spelling'.format(options['access_group_name'])))
all_access_names = TalkGroupAccess.objects.all()
if all_access_names:
self.stdout.write('Current Talk Group Access lists in the database:')
for tg in all_access_names:
self.stdout.write(tg.name)
else:
self.stdout.write(self.style.ERROR('**There are no Talk Group Access lists in the database'))
return
self.stdout.write('Setting all current public Talk Groups into {}'.format(access_gp.name))
ct=0
for tg in TalkGroupWithSystem.objects.filter(public=True):
access_gp.talkgroups.add(tg)
ct += 1
self.stdout.write(self.style.SUCCESS('Added {} TalkGroups to Talk Group Access List - {}'.format(ct, access_gp.name)))
|
447216
|
from __future__ import absolute_import, division, unicode_literals
from flask_restful.reqparse import RequestParser
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from uuid import UUID
from changes.api.base import APIView, error
from changes.models.build import Build
from changes.models.job import Job
from changes.models.source import Source
class SourceBuildIndexAPIView(APIView):
"""
Gets all the builds for a given source object
"""
get_parser = RequestParser()
get_parser.add_argument('source_id', type=UUID, location='args')
get_parser.add_argument('revision_sha', location='args')
get_parser.add_argument('repo_id', type=UUID, location='args')
get_parser.add_argument('tag', type=unicode, action='append', location='args')
def get(self):
args = self.get_parser.parse_args()
# this can take either a source id or a revision/repo id. For the
# latter, only non-patch sources are looked at
source_id = args.source_id
revision_sha = args.revision_sha
repo_id = args.repo_id
if source_id:
source = Source.query.filter(
Source.id == source_id,
).first()
elif revision_sha and repo_id:
source = Source.query.filter(
Source.revision_sha == revision_sha,
Source.repository_id == repo_id,
Source.patch_id == None # NOQA
).first()
else:
return error('invalid args')
if source is None:
return error("source not found", http_code=404)
filters = [Build.source_id == source.id]
if args.tag:
tags = filter(bool, args.tag)
# Avoid empty tags, which historically are meant to mean "no tag" restriction.
if tags:
filters.append(or_(*[Build.tags.any(t) for t in tags]))
builds = self.serialize(list(
Build.query.options(
joinedload('author')
).filter(
*filters
).order_by(Build.date_created.desc())
))
build_ids = [build['id'] for build in builds]
if len(builds) > 0:
jobs = self.serialize(list(Job.query.filter(
Job.build_id.in_(build_ids)
)))
for b in builds:
b['jobs'] = [j for j in jobs if j['build']['id'] == b['id']]
return self.paginate(builds, serialize=False)
|
447325
|
from eudplib import *
t = [Forward() for _ in range(115)]
def onPluginStart():
global reph_epd
reph_epd = f_epdread_epd(EPD(0x6D5CD8))
s = EUDArray([EPD(x) + 86 for x in t])
i = EUDVariable()
if EUDWhile()(i <= 114):
k = EUDVariable()
EUDWhile()(k <= 63 * 8)
EUDBreakIf([i == 114, k >= 15 * 8])
DoActions([
SetMemoryEPD(s[i] + k, SetTo, reph_epd),
reph_epd.AddNumber(1),
k.AddNumber(8),
])
EUDEndWhile()
DoActions([i.AddNumber(1), k.SetNumber(0)])
EUDEndWhile()
def beforeTriggerExec():
dummy = reph_epd.getValueAddr() - 8
for i in range(114):
t[i] << RawTrigger(actions=[SetMemory(dummy, SetTo, 0) for _ in range(64)])
t[114] << RawTrigger(actions=[SetMemory(dummy, SetTo, 0) for _ in range(15)])
|
447335
|
import unittest
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
import torchvision
import simplify
from simplify.layers import ConvB, ConvExpand
from utils import set_seed
class ConvBTest(unittest.TestCase):
def setUp(self):
set_seed(3)
def test_conv_b(self):
conv = nn.Conv2d(
3,
64,
3,
1,
padding=2,
padding_mode='zeros',
bias=True)
out1 = conv(torch.zeros((1, 3, 128, 128)))
bias = conv.bias.data.clone()
conv.bias.data.mul_(0)
conv = ConvB.from_conv(conv, bias[:, None, None].expand_as(out1[0]))
out2 = conv(torch.zeros((1, 3, 128, 128)))
out2.sum().backward()
assert (conv.weight.grad.shape == conv.weight.shape)
class ConvExpandTest(unittest.TestCase):
# TODO update __repr__ with correct output size
def setUp(self):
set_seed(3)
@torch.no_grad()
def test_expansion(self):
module = nn.Conv2d(3, 64, 3, 1, padding=1, bias=False)
optimizer = torch.optim.SGD(module.parameters(), lr=0.1)
x = torch.randn((57, 3, 128, 128))
prune.random_structured(module, 'weight', amount=0.5, dim=0)
prune.remove(module, 'weight')
y_src = module(x)
shape1 = module.weight.shape
nonzero_idx = ~(module.weight.sum(dim=(1, 2, 3)) == 0)
module.weight.data = module.weight.data[nonzero_idx]
shape2 = module.weight.shape
self.assertFalse(shape1 == shape2)
y_post = module(x)
self.assertFalse(torch.equal(y_src, y_post))
module = ConvB.from_conv(module, torch.zeros_like(y_post)[0])
module.register_parameter('bias', None)
idxs = torch.where(nonzero_idx)[0]
module = ConvExpand.from_conv(module, idxs, torch.zeros_like(y_src)[0])
module.out_channels = module.weight.shape[0]
for i in range(2):
with torch.enable_grad():
y_post = module(x)
y_post.sum().backward()
optimizer.step()
optimizer.zero_grad()
assert (module.weight.grad.shape == module.weight.shape)
def test_model(self):
model = torchvision.models.resnet50(True)
x = torch.randn((57, 3, 128, 128))
for name, module in model.named_modules():
if isinstance(module, nn.Conv2d):
prune.random_structured(module, 'weight', amount=0.8, dim=0)
prune.remove(module, 'weight')
zeros = torch.zeros(1, *x.shape[1:])
model.eval()
simplify.simplify(model, zeros, fuse_bn=False)
model.train()
with torch.enable_grad():
y = model(x)
y.sum().backward()
for p in model.parameters():
self.assertIsNotNone(p.grad)
self.assertTrue(p.shape == p.grad.shape)
|
447339
|
import os
import logging
import numpy as np
from xml.etree import ElementTree as ET
# setup logger
parent_dir, filename = os.path.split(__file__)
base_dir = os.path.basename(parent_dir)
logger = logging.getLogger(os.path.join(base_dir, filename))
class Parser(object):
def __init__(self, base_dir, xml_file):
self.base_dir = base_dir
xml_path = os.path.join(base_dir, xml_file)
self.root = ET.parse(xml_path).getroot()
self.get_image_path()
# some files in the dataset do not exist
if not os.path.exists(self.image_path):
logger.error(f"Could not find {self.image_path}")
raise FileNotFoundError(f"{self.image_path} does not exist")
@staticmethod
def _fetch_bounding_box(obj):
attrib = obj.find("bndbox")
xmin = int(attrib.find("xmin").text)
ymin = int(attrib.find("ymin").text)
xmax = int(attrib.find("xmax").text)
ymax = int(attrib.find("ymax").text)
return [xmin, ymin, xmax, ymax]
@staticmethod
def _fetch_difficulty(obj):
return int(obj.find("difficult").text)
@staticmethod
def _fetch_placement(obj):
return obj.find("name").text
def get_image_path(self):
image_dir = self.root.find("folder").text
filename = self.root.find("filename").text
parent_dir = os.path.dirname(os.path.dirname(self.base_dir))
self.image_path = os.path.join(parent_dir, image_dir, filename)
def fetch_metadata(self):
# initialize data
bboxes = []
difficulty = []
placement = []
# metadata is located withing each object attribute
for obj in self.root.findall("object"):
bboxes.append(self._fetch_bounding_box(obj))
difficulty.append(self._fetch_difficulty(obj))
placement.append(self._fetch_placement(obj))
# build data structure
metadata = {
"bboxes": np.array(bboxes).astype("int"), "difficult": np.array(difficulty), "placement": np.array(placement)
}
return metadata
|
447356
|
from __future__ import absolute_import, division
import os
from binascii import hexlify
import sqlalchemy
from sqlalchemy.engine import RowProxy
from sqlalchemy.event import listens_for
from sqlalchemy.exc import StatementError
from sqlalchemy.schema import CreateTable
from twisted.trial import unittest
from alchimia import TWISTED_STRATEGY, wrap_engine
from alchimia.engine import (
TwistedEngine, TwistedConnection, TwistedTransaction,
)
from .doubles import FakeThreadedReactor, ImmediateWorker
def create_engine(**kwargs):
if 'TEST_DB_URL' in os.environ:
TEST_DB_URL = os.environ['TEST_DB_URL']
else:
TEST_DB_URL = 'sqlite://'
if TEST_DB_URL.startswith("sqlite:"):
# per
# http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#serializable-isolation-savepoints-transactional-ddl,
# necessary to test savepoints in SQLite.
sub_engine = sqlalchemy.create_engine(TEST_DB_URL, **kwargs)
@listens_for(sub_engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@listens_for(sub_engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.execute("BEGIN")
return wrap_engine(FakeThreadedReactor(), sub_engine, ImmediateWorker)
engine = sqlalchemy.create_engine(
TEST_DB_URL, strategy=TWISTED_STRATEGY,
reactor=FakeThreadedReactor(), create_worker=ImmediateWorker,
**kwargs)
if TEST_DB_URL.startswith("postgresql"):
tmpdb_name = "testdb"+hexlify(os.urandom(16)).decode()
tmpdb_url = '/'.join(
TEST_DB_URL.split('/')[:-1] + [tmpdb_name])
conn = engine.connect().result
conn.execute("commit")
conn.execute("CREATE DATABASE {}".format(tmpdb_name))
conn.close()
engine = sqlalchemy.create_engine(
tmpdb_url, strategy=TWISTED_STRATEGY,
reactor=FakeThreadedReactor(), create_worker=ImmediateWorker,
**kwargs)
return engine
class TestEngineCreation(unittest.TestCase):
def test_simple_create_with_strategy(self):
engine = sqlalchemy.create_engine(
"sqlite://",
strategy=TWISTED_STRATEGY,
reactor=FakeThreadedReactor(),
)
assert isinstance(engine, TwistedEngine)
def test_wrap_engine(self):
sub_engine = sqlalchemy.create_engine("sqlite://")
twisted_engine = wrap_engine(FakeThreadedReactor(), sub_engine)
assert isinstance(twisted_engine, TwistedEngine)
class TestEngine(unittest.TestCase):
def test_connect(self):
engine = create_engine()
d = engine.connect()
connection = self.successResultOf(d)
assert isinstance(connection, TwistedConnection)
def test_execute(self):
engine = create_engine()
d = engine.execute("SELECT 42")
result = self.successResultOf(d)
d = result.scalar()
assert self.successResultOf(d) == 42
def test_table_names(self):
engine = create_engine()
d = engine.table_names()
assert self.successResultOf(d) == []
d = engine.execute("CREATE TABLE mytable (id int)")
self.successResultOf(d)
d = engine.table_names()
assert self.successResultOf(d) == ['mytable']
def test_table_names_with_connection(self):
# There's no easy way to tell which connection was actually used, so
# this test just provides coverage for the code path.
engine = create_engine()
conn = self.successResultOf(engine.connect())
d = engine.table_names(connection=conn)
assert self.successResultOf(d) == []
d = conn.execute("CREATE TABLE mytable (id int)")
self.successResultOf(d)
d = engine.table_names(connection=conn)
assert self.successResultOf(d) == ['mytable']
def test_has_table(self):
engine = create_engine()
d = engine.has_table('mytable')
assert self.successResultOf(d) is False
d = engine.execute("CREATE TABLE mytable (id int)")
self.successResultOf(d)
d = engine.has_table('mytable')
assert self.successResultOf(d) is True
class TestConnection(unittest.TestCase):
def get_connection(self):
engine = create_engine()
return self.successResultOf(engine.connect())
def execute_fetchall(self, conn, query_obj):
result = self.successResultOf(conn.execute(query_obj))
return self.successResultOf(result.fetchall())
def test_execute(self):
conn = self.get_connection()
d = conn.execute("SELECT 42")
result = self.successResultOf(d)
d = result.scalar()
assert self.successResultOf(d) == 42
def test_close(self):
conn = self.get_connection()
assert not conn.closed
result = self.successResultOf(conn.execute("SELECT 42"))
assert self.successResultOf(result.scalar()) == 42
self.successResultOf(conn.close())
assert conn.closed
failure = self.failureResultOf(
conn.execute("SELECT 42"), StatementError)
assert "This Connection is closed" in str(failure)
def test_in_transaction(self):
conn = self.get_connection()
assert not conn.in_transaction()
transaction = self.successResultOf(conn.begin())
assert isinstance(transaction, TwistedTransaction)
assert conn.in_transaction()
self.successResultOf(transaction.close())
assert not conn.in_transaction()
def test_repeated_begin(self):
conn = self.get_connection()
assert not conn.in_transaction()
trx1 = self.successResultOf(conn.begin())
assert conn.in_transaction()
trx2 = self.successResultOf(conn.begin())
assert conn.in_transaction()
self.successResultOf(trx2.close())
assert conn.in_transaction()
self.successResultOf(trx1.close())
assert not conn.in_transaction()
def test_savepoints(self):
conn = self.get_connection()
assert not conn.in_transaction()
self.successResultOf(conn.execute(
"create table effects (which integer)"
))
txn = self.successResultOf(conn.begin())
self.successResultOf(conn.execute("insert into effects values (1)"))
save = self.successResultOf(conn.begin_nested())
self.successResultOf(conn.execute("insert into effects values (2)"))
self.successResultOf(save.rollback())
self.assertEqual(
[(1,)],
self.successResultOf(
self.successResultOf(conn.execute("select * from effects"))
.fetchall()
)
)
self.successResultOf(txn.commit())
def test_transaction_commit(self):
metadata = sqlalchemy.MetaData()
tbl = sqlalchemy.Table(
'mytable', metadata,
sqlalchemy.Column("id", sqlalchemy.Integer(), primary_key=True),
sqlalchemy.Column("num", sqlalchemy.Integer()),
)
conn = self.get_connection()
self.successResultOf(conn.execute(CreateTable(tbl)))
trx = self.successResultOf(conn.begin())
self.successResultOf(conn.execute(tbl.insert().values(num=42)))
rows = self.execute_fetchall(conn, tbl.select())
assert len(rows) == 1
self.successResultOf(trx.commit())
rows = self.execute_fetchall(conn, tbl.select())
assert len(rows) == 1
def test_transaction_rollback(self):
metadata = sqlalchemy.MetaData()
tbl = sqlalchemy.Table(
'mytable', metadata,
sqlalchemy.Column("id", sqlalchemy.Integer(), primary_key=True),
sqlalchemy.Column("num", sqlalchemy.Integer()),
)
conn = self.get_connection()
self.successResultOf(conn.execute(CreateTable(tbl)))
trx = self.successResultOf(conn.begin())
self.successResultOf(conn.execute(tbl.insert().values(num=42)))
rows = self.execute_fetchall(conn, tbl.select())
assert len(rows) == 1
self.successResultOf(trx.rollback())
rows = self.execute_fetchall(conn, tbl.select())
assert len(rows) == 0
class TestResultProxy(unittest.TestCase):
def create_default_table(self):
engine = create_engine()
d = engine.execute("CREATE TABLE testtable (id int)")
self.successResultOf(d)
return engine
def test_fetchone(self):
engine = create_engine()
d = engine.execute("SELECT 42")
result = self.successResultOf(d)
d = result.fetchone()
row = self.successResultOf(d)
assert isinstance(row, RowProxy)
assert row[0] == 42
def test_fetchall(self):
engine = create_engine()
d = engine.execute("SELECT 10")
result = self.successResultOf(d)
d = result.fetchall()
rows = self.successResultOf(d)
assert len(rows) == 1
assert rows[0][0] == 10
def test_first(self):
engine = self.create_default_table()
d = engine.execute("INSERT INTO testtable (id) VALUES (2)")
self.successResultOf(d)
d = engine.execute("INSERT INTO testtable (id) VALUES (3)")
self.successResultOf(d)
d = engine.execute("SELECT * FROM testtable ORDER BY id ASC")
result = self.successResultOf(d)
d = result.first()
row = self.successResultOf(d)
assert len(row) == 1
assert row[0] == 2
def test_keys(self):
engine = create_engine()
d = engine.execute("CREATE TABLE testtable (id int, name varchar)")
self.successResultOf(d)
d = engine.execute("SELECT * FROM testtable")
result = self.successResultOf(d)
d = result.keys()
keys = self.successResultOf(d)
assert len(keys) == 2
assert 'id' in keys
assert 'name' in keys
def test_returns_rows(self):
engine = self.create_default_table()
d = engine.execute("INSERT INTO testtable values (2)")
result = self.successResultOf(d)
assert not result.returns_rows
d = engine.execute("SELECT * FROM testtable")
result = self.successResultOf(d)
assert result.returns_rows
def test_rowcount(self):
engine = self.create_default_table()
d = engine.execute("INSERT INTO testtable VALUES (1)")
self.successResultOf(d)
d = engine.execute("INSERT INTO testtable VALUES (2)")
self.successResultOf(d)
d = engine.execute("INSERT INTO testtable VALUES (3)")
self.successResultOf(d)
d = engine.execute("UPDATE testtable SET id = 7 WHERE id < 3")
result = self.successResultOf(d)
assert result.rowcount == 2
d = engine.execute("DELETE from testtable")
result = self.successResultOf(d)
assert result.rowcount == 3
def test_inserted_primary_key(self):
metadata = sqlalchemy.MetaData()
tbl = sqlalchemy.Table(
'testtable', metadata,
sqlalchemy.Column("id", sqlalchemy.Integer(), primary_key=True),
)
engine = create_engine()
d = engine.execute(CreateTable(tbl))
self.successResultOf(d)
d = engine.execute(tbl.insert().values())
result = self.successResultOf(d)
assert result.inserted_primary_key == [1]
def test_close(self):
engine = self.create_default_table()
d = engine.execute("INSERT INTO testtable VALUES (1)")
self.successResultOf(d)
d = engine.execute("INSERT INTO testtable VALUES (2)")
self.successResultOf(d)
d = engine.execute("SELECT * FROM testtable")
result = self.successResultOf(d)
self.successResultOf(result.close())
|
447359
|
from hyperadmin.mediatypes.passthrough import Passthrough
class ClientMixin(object):
"""
Contains logic for connecting to an endpoint on the API
"""
resource = None
url_name = None
client_site = None
def get_api_endpoint(self):
for endpoint in self.resource.get_view_endpoints():
if endpoint.name_suffix == self.url_name:
return endpoint
def get_api_kwargs(self):
return dict(self.kwargs)
def get_api_args(self):
return list(self.args)
def get_global_state(self):
#with media type = pass through
kwargs = {'media_types': {'*': Passthrough}}
#patch permissions by setting your own client_site
if self.client_site is not None:
kwargs['site'] = self.client_site
return kwargs
def get_api_response(self):
if not hasattr(self, '_api_response'):
endpoint = self.get_api_endpoint()
assert endpoint is not None, 'Failed to look up endpint for: %s in %s' % (self.url_name, [e['name'] for e in self.resource.get_view_endpoints()])
api_args = self.get_api_args()
api_kwargs = self.get_api_kwargs()
self._api_response = endpoint.get_view()(self.request, *api_args, **api_kwargs)
return self._api_response
def get_state(self):
return self.get_api_response().state
def get_link(self):
return self.get_api_response().link
def get_context_data(self, **kwargs):
context = super(ClientMixin, self).get_context_data(**kwargs)
context['state'] = self.get_state()
context['link'] = self.get_link()
return context
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
#TODO use an alternative template response class instead
patch_params = self.get_global_state()
with self.client_site.state.patch_state(**patch_params):
response = super(ClientMixin, self).dispatch(request, *args, **kwargs)
if hasattr(response, 'render'):
response.render()
return response
#CONSIDER: should we expose CRUD functionality as default and have the process entirely controlled by permissions?
|
447379
|
import traceback
import sys
class ViewErrorLoggingMiddleware:
def process_view(self, request, view_func, view_args, view_kwargs):
self.view_name = view_func.__name__
def process_exception(self, request, exception):
print '=' * 60
print '[ERROR] exception in view "%s"' % self.view_name
traceback.print_exc(file=sys.stdout)
print '=' * 60
|
447390
|
import numpy as np
import utils
from HopfieldNetwork import HopfieldNetwork
def process_shape(initial_shape: np.ndarray, net: HopfieldNetwork):
noised_shapes = []
for i in range(5, 105, 5):
noise_level = i / 100
noised_shapes.append(utils.noise_shape(initial_shape, noise_level))
for i in range(len(noised_shapes)):
recognized, adjusted_shape, iterations = net.recognize(noised_shapes[i])
print('Noise %:', '%.2f' % ((i + 1) * 0.05))
print('Amount of iterations:', iterations)
print('Is image recognized:', recognized)
print(adjusted_shape)
print('\n\n')
def main():
first_shape = np.array([
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
])
second_shape = np.array([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
])
third_shape = np.array([
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1],
])
# transform shapes to 1 / -1 values
first_shape = utils.normalize(first_shape)
second_shape = utils.normalize(second_shape)
third_shape = utils.normalize(third_shape)
dimension = 10
# create network instance
net = HopfieldNetwork(dimension)
# teaching the net
net.teach(first_shape)
net.teach(second_shape)
net.teach(third_shape)
# recreating shapes with noise
process_shape(first_shape, net)
process_shape(second_shape, net)
process_shape(third_shape, net)
if __name__ == '__main__':
main()
|
447506
|
from interactor.database.models.scene_spec import make_spec
from interactor.database.base import Base
from sqlalchemy import Column, Integer, String, Text, Boolean
from delfick_project.norms import sb
class Scene(Base):
uuid = Column(String(64), nullable=True, index=True)
matcher = Column(Text(), nullable=False)
power = Column(Boolean(), nullable=True)
color = Column(Text(), nullable=True)
zones = Column(Text(), nullable=True)
chain = Column(Text(), nullable=True)
duration = Column(Integer(), nullable=True)
__repr_columns__ = ("uuid", "matcher")
def as_object(self):
dct = {
"uuid": self.uuid,
"matcher": self.matcher,
"power": self.power,
"color": self.color,
"zones": self.zones,
"chain": self.chain,
"duration": self.duration,
}
return self.Spec(storing=False).empty_normalise(**dct)
def as_dict(self, ignore=None):
return {
k: v
for k, v in self.as_object().as_dict().items()
if v is not None and k not in (ignore or ())
}
@classmethod
def Spec(kls, storing=True):
return make_spec(storing=storing)
@classmethod
def DelayedSpec(kls, storing=True):
spec = kls.Spec(storing=storing)
class delayed(sb.Spec):
def normalise_filled(self, meta, val):
val = sb.dictionary_spec().normalise(meta, val)
def normalise(uuid):
if "uuid" in val:
del val["uuid"]
return spec.normalise(meta, {"uuid": uuid, **val})
return normalise
return delayed()
|
447530
|
import tensorflow as tf
from tfsnippet.utils import add_name_arg_doc, is_tensor_object
__all__ = ['smart_cond']
@add_name_arg_doc
def smart_cond(cond, true_fn, false_fn, name=None):
"""
Execute `true_fn` or `false_fn` according to `cond`.
Args:
cond (bool or tf.Tensor): A bool constant or a tensor.
true_fn (() -> tf.Tensor): The function of the true branch.
false_fn (() -> tf.Tensor): The function of the false branch.
Returns:
tf.Tensor: The output tensor.
"""
if is_tensor_object(cond):
return tf.cond(cond, true_fn, false_fn, name=name)
else:
if cond:
return true_fn()
else:
return false_fn()
|
447532
|
import numpy as np
import cv2
# Recognize the code from a barcode location
class BarcodeRecognizer(object):
def __init__(self, useDebugMode=False):
self.useDebugMode = useDebugMode
def reconize(self, image):
if image is None:
return False, None
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if gray is None:
return False, None
gray = cv2.resize(gray, (100, 100), interpolation=cv2.INTER_CUBIC)
valid = True
marginSize = 5
kernel = np.ones((7, 7), np.uint8)
# binarize the image
(ret, thresh) = cv2.threshold(gray, 130, 255, cv2.THRESH_BINARY)
width, height = thresh.shape
thresh[0:marginSize, 0:height] = 0
thresh[width - marginSize:width, 0:height] = 0
thresh[0:width, 0:marginSize] = 0
thresh[0:width, height - marginSize:height] = 0
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# find first corner
found = False
index = 0
leftCorner = 0
while found is False and index < width:
if thresh[index, index] == 255:
leftCorner = index
found = True
index = index + 1
index = 0
found = False
rightCorner = 0
while found is False and index < width:
if thresh[index, height - index - 1] == 255:
rightCorner = index
found = True
index = index + 1
index = 0
found = False
buttomCorner = 0
while found is False and index < width:
if thresh[width - index - 1, index] == 255:
buttomCorner = index
found = True
index = index + 1
index = 0
lastCorner = 0
found = False
while found is False and index < width:
if thresh[width - index - 1, height - index - 1] == 255:
lastCorner = index
found = True
index = index + 1
if lastCorner < leftCorner \
or lastCorner < rightCorner \
or lastCorner < buttomCorner:
valid = False
return valid, None
pts1 = np.float32([[leftCorner, leftCorner],
[rightCorner, height - rightCorner - 1],
[width - buttomCorner - 1, buttomCorner]])
pts2 = np.float32([[0, 0], [0, 100], [100, 0]])
affineTransform = cv2.getAffineTransform(pts1, pts2)
thresh = cv2.warpAffine(thresh, affineTransform, (width, height))
(ret, threshCentrated) = cv2.threshold(thresh, 125, 255, cv2.THRESH_BINARY)
valuesPredicted = np.zeros(25, np.int8)
index = 0
threshRecValue = 18 * 255
for i in range(0, 5):
for j in range(0, 5):
cv2.rectangle(threshCentrated, (i * 20, j * 20), (i * 20 + 20, j * 20 + 20), 128, 1)
sum = threshCentrated[i * 20 + 7:i * 20 + 13, j * 20 + 7:j * 20 + 13].sum().sum()
if sum > threshRecValue:
valuesPredicted[index] = 1
else:
valuesPredicted[index] = 0
index = index + 1
valuesPredicted[0] = valuesPredicted[4] = valuesPredicted[20] = 1
valuesPredicted[24] = 0
return valid, valuesPredicted
|
447552
|
from hive_metastore_client import HiveMetastoreClient
HIVE_HOST = "<ADD_HIVE_HOST_HERE>"
HIVE_PORT = 9083
# You must create a list with the columns' names to drop
columns = ["quantity"]
with HiveMetastoreClient(HIVE_HOST, HIVE_PORT) as hive_client:
# Dropping columns from table
hive_client.drop_columns_from_table(
db_name="store", table_name="order", columns=columns
)
|
447577
|
import os
import time
import tensorflow as tf
import qaData
from qaLSTMNet import QaLSTMNet
def restore():
try:
print("正在加载模型,大约需要一分钟...")
saver.restore(sess, trainedModel)
except Exception as e:
print(e)
print("加载模型失败,重新开始训练")
train()
def train():
print("重新训练,请保证计算机拥有至少8G空闲内存与2G空闲显存")
# 准备训练数据
print("正在准备训练数据,大约需要五分钟...")
qTrain, aTrain, lTrain, qIdTrain = qaData.loadData(trainingFile, word2idx, unrollSteps, True)
qDevelop, aDevelop, lDevelop, qIdDevelop = qaData.loadData(developFile, word2idx, unrollSteps, True)
trainQuestionCounts = qIdTrain[-1]
for i in range(len(qIdDevelop)):
qIdDevelop[i] += trainQuestionCounts
tqs, tta, tfa = [], [], []
for question, trueAnswer, falseAnswer in qaData.trainingBatchIter(qTrain + qDevelop, aTrain + aDevelop,
lTrain + lDevelop, qIdTrain + qIdDevelop,
batchSize):
tqs.append(question), tta.append(trueAnswer), tfa.append(falseAnswer)
print("加载完成!")
# 开始训练
print("开始训练,全部训练过程大约需要12小时")
sess.run(tf.global_variables_initializer())
lr = learningRate # 引入局部变量,防止shadow name
for i in range(lrDownCount):
optimizer = tf.train.GradientDescentOptimizer(lr)
optimizer.apply_gradients(zip(grads, tvars))
trainOp = optimizer.apply_gradients(zip(grads, tvars), global_step=globalStep)
for epoch in range(epochs):
for question, trueAnswer, falseAnswer in zip(tqs, tta, tfa):
startTime = time.time()
feed_dict = {
lstm.inputQuestions: question,
lstm.inputTrueAnswers: trueAnswer,
lstm.inputFalseAnswers: falseAnswer,
lstm.keep_prob: dropout
}
_, step, _, _, loss = \
sess.run([trainOp, globalStep, lstm.trueCosSim, lstm.falseCosSim, lstm.loss], feed_dict)
timeUsed = time.time() - startTime
print("step:", step, "loss:", loss, "time:", timeUsed)
saver.save(sess, saveFile)
lr *= lrDownRate
if __name__ == '__main__':
# 定义参数
trainingFile = "data/training.data"
developFile = "data/develop.data"
testingFile = "data/testing.data"
resultFile = "predictRst.score"
saveFile = "newModel/savedModel"
trainedModel = "trainedModel/savedModel"
embeddingFile = "word2vec/zhwiki_2017_03.sg_50d.word2vec"
embeddingSize = 50 # 词向量的维度
dropout = 1.0
learningRate = 0.4 # 学习速度
lrDownRate = 0.5 # 学习速度下降速度
lrDownCount = 4 # 学习速度下降次数
epochs = 20 # 每次学习速度指数下降之前执行的完整epoch次数
batchSize = 20 # 每一批次处理的<b>问题</b>个数
rnnSize = 100 # LSTM cell中隐藏层神经元的个数
margin = 0.1 # M is constant margin
unrollSteps = 100 # 句子中的最大词汇数目
max_grad_norm = 5 # 用于控制梯度膨胀,如果梯度向量的L2模超过max_grad_norm,则等比例缩小
allow_soft_placement = True # Allow device soft device placement
gpuMemUsage = 0.75 # 显存最大使用率
gpuDevice = "/gpu:0" # GPU设备名
# 读取测试数据
print("正在载入测试数据,大约需要一分钟...")
embedding, word2idx = qaData.loadEmbedding(embeddingFile)
qTest, aTest, _, qIdTest = qaData.loadData(testingFile, word2idx, unrollSteps)
print("测试数据加载完成")
# 配置TensorFlow
with tf.Graph().as_default(), tf.device(gpuDevice):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpuMemUsage)
session_conf = tf.ConfigProto(allow_soft_placement=allow_soft_placement, gpu_options=gpu_options)
with tf.Session(config=session_conf).as_default() as sess:
# 加载LSTM网络
print("正在加载LSTM网络,大约需要三分钟...")
globalStep = tf.Variable(0, name="globle_step", trainable=False)
lstm = QaLSTMNet(batchSize, unrollSteps, embedding, embeddingSize, rnnSize, margin)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(lstm.loss, tvars), max_grad_norm)
saver = tf.train.Saver()
print("加载完成!")
# 加载模型或训练模型
if os.path.exists(trainedModel + '.index'):
while True:
choice = input("找到已经训练好的模型,是否载入(y/n)")
if choice.strip().lower() == 'y':
restore()
break
elif choice.strip().lower() == 'n':
train()
break
else:
print("无效的输入!\n")
else:
train()
# 进行测试,输出结果
print("正在进行测试,大约需要三分钟...")
with open(resultFile, 'w') as file:
for question, answer in qaData.testingBatchIter(qTest, aTest, batchSize):
feed_dict = {
lstm.inputTestQuestions: question,
lstm.inputTestAnswers: answer,
lstm.keep_prob: dropout
}
_, scores = sess.run([globalStep, lstm.result], feed_dict)
for score in scores:
file.write("%.9f" % score + '\n')
print("所有步骤完成!程序结束")
|
447585
|
import argparse
import pickle
import numpy as np
import torch
from model import train
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--graph_file_path', type=str)
parser.add_argument('--random-walk-length', type=int, default=2)
parser.add_argument('--random-walk-restart-prob', type=float, default=0.5)
parser.add_argument('--num-random-walks', type=int, default=10)
parser.add_argument('--num-neighbors', type=int, default=5)
parser.add_argument('--num-layers', type=int, default=2)
parser.add_argument('--gat-num-heads', type=int, default=3)
parser.add_argument('--hidden-dims', type=int, default=512)
parser.add_argument('--batch-size', type=int, default=256)
parser.add_argument('--device', type=str, default='cpu')
parser.add_argument('--num-epochs', type=int, default=1) # 4
parser.add_argument('--batches-per-epoch', type=int, default=50) # 5000
parser.add_argument('--num-workers', type=int, default=0)
parser.add_argument('--lr', type=float, default=3e-5)
parser.add_argument('-k', type=int, default=10)
args = parser.parse_args()
# Load dataset
with open(args.graph_file_path, 'rb') as f:
dataset = pickle.load(f)
model, h_item = train(dataset, args)
# Write files
torch.save(model.state_dict(), 'MultiSAGE_weights.pth')
np.savez("h_items.npz", movie_vectors=h_item.numpy())
|
447596
|
import urllib2
#from google.appengine.api import oauth
print "==================Create System and device==================================="
print "[CreateSystem]"
print urllib2.urlopen('http://localhost:8888/createsystem?id=a&holder=b&devices=c&fbps=d&gateways=e&wuclasses=f').read()
print "[CreateDevice]"
print urllib2.urlopen('http://localhost:8888/createdevice?id=DEVICEa&wuobject=b&type=c&capacity=d&network=e&loc=lll').read()
print "[FindDevice]"
a=urllib2.urlopen('http://localhost:8888/finddevice').read()
print a
print "==================Delete one device==================================="
variable = raw_input("Press Enter to continue...")
print len(a)
print "[DeleteDevice]"
print urllib2.urlopen('http://localhost:8888/deletedevice?Did=55521cdc491d4312a60ec845').read()
print "[FindDevice]"
print urllib2.urlopen('http://localhost:8888/finddevice').read()
|
447675
|
from ckan.common import _
"""
A template file for comment notification emails.
"""
subject = _("New comment in dataset '{dataset}'")
message = _("""\
User {user} ({email}) has left a comment in dataset ({dataset}):
--
Subject:
{comment_subject}
Message:
{comment}
--
{link}
Best regards
Avoindata.fi support
<EMAIL>
""")
|
447681
|
from pylimit.pyratelimit import PyRateLimit
from pylimit.pyratelimit_exception import PyRateLimitException
from pylimit.redis_helper import RedisHelper
|
447693
|
import pybullet as p
import time
p.connect(p.GUI)
t = time.time() + 0.1
logId = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "haha")
while (time.time() < t):
p.submitProfileTiming("pythontest")
p.stopStateLogging(logId)
|
447706
|
from rest_framework import routers
from .api import TruckViewSet
router = routers.DefaultRouter()
router.register('api/truck', TruckViewSet, 'truck')
urlpatterns = router.urls
|
447801
|
import os
import sys
from subprocess import call
import argparse
import time
from time import sleep
from threading import Thread, Event
import ssl
import urllib.request
import dbus
from flask import Flask, render_template, jsonify, request
from flask_socketio import SocketIO, emit
debug = False
# Create a default SSL certificate. It stopped working out of the box on Python 3.8
ssl._create_default_https_context = ssl._create_unverified_context
firstRunTimestamp = str(int(time.time()))
if os.path.isfile('has-run-before'):
with open('has-run-before', 'r') as f:
firstRunTimestamp = f.read()
else:
with open('has-run-before', 'w+') as f:
f.write(firstRunTimestamp)
parser = argparse.ArgumentParser(description='patchOS control panel')
parser.add_argument('--port', dest='port', type=int, default=80)
args = parser.parse_args()
bus = dbus.SystemBus()
systemd = bus.get_object(
'org.freedesktop.systemd1',
'/org/freedesktop/systemd1'
)
manager = dbus.Interface(
systemd,
'org.freedesktop.systemd1.Manager'
)
clientCount = 0
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
app.config['DEBUG'] = debug
socketio = SocketIO(app, async_mode=None, logger=debug, engineio_logger=debug)
thread = Thread()
thread_stop_event = Event()
def getJackServiceStatus():
status = 'inactive'
try:
jackService = bus.get_object(
'org.freedesktop.systemd1',
object_path = manager.GetUnit('jack.service')
)
jackServiceInterface = dbus.Interface(
jackService,
dbus_interface = 'org.freedesktop.DBus.Properties'
)
status = jackServiceInterface.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
except:
pass
return status
def getJacktripServiceStatus():
serverStatus = 'inactive'
clientStatus = 'inactive'
try:
jacktripServerService = bus.get_object(
'org.freedesktop.systemd1',
object_path = manager.GetUnit('jacktrip-server.service')
)
jacktripClientService = bus.get_object(
'org.freedesktop.systemd1',
object_path = manager.GetUnit('jacktrip-client.service')
)
jacktripServerServiceInterface = dbus.Interface(
jacktripServerService,
dbus_interface ='org.freedesktop.DBus.Properties'
)
jacktripClientServiceInterface = dbus.Interface(
jacktripClientService,
dbus_interface = 'org.freedesktop.DBus.Properties'
)
serverStatus = jacktripServerServiceInterface.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
clientStatus = jacktripClientServiceInterface.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
except:
pass
if serverStatus == 'inactive' and clientStatus == 'inactive':
return {'status': 'inactive', 'mode': 'undefined'}
if serverStatus != 'inactive':
return {'status': serverStatus, 'mode': 'server'}
if clientStatus != 'inactive':
return {'status': clientStatus, 'mode': 'client'}
def checkStatusLoop():
while not thread_stop_event.isSet():
status = getJacktripServiceStatus()
socketio.emit('status', {'jack': getJackServiceStatus(), 'jacktrip': status['status'], 'jacktripMode': status['mode']})
socketio.sleep(5)
@app.route("/")
def index():
externalIp = urllib.request.urlopen('https://ident.me').read().decode('utf8')
lastConnectedIp = '0.0.0.0'
if os.path.isfile(os.path.join(sys.path[0], 'server-ip')):
with open(os.path.join(sys.path[0], 'server-ip'), 'r') as f:
lastConnectedIp = f.read().replace('JACKTRIP_SERVER_IP=', '')
templateData = {
'version': firstRunTimestamp,
'lastConnectedIp': lastConnectedIp,
'externalIp': externalIp
}
return render_template('index.html', **templateData)
@socketio.on('jacktrip-start-server')
def jacktripStart():
manager.StartUnit('jacktrip-server.service', 'replace')
@socketio.on('jacktrip-start-client')
def jacktripStartClient(serverIp):
with open(os.path.join(sys.path[0], 'server-ip'), 'w') as f:
f.write('JACKTRIP_SERVER_IP=' + serverIp)
manager.StartUnit('jacktrip-client.service', 'replace')
@socketio.on('jacktrip-stop')
def jacktripStop():
manager.ResetFailedUnit('jacktrip-server.service')
manager.ResetFailedUnit('jacktrip-client.service')
manager.StopUnit('jacktrip-server.service', 'replace')
manager.StopUnit('jacktrip-client.service', 'replace')
@socketio.on('connect')
def onConnect():
global thread
global clientCount
global thread_stop_event
thread_stop_event.clear()
clientCount += 1
if not thread.is_alive():
thread = socketio.start_background_task(checkStatusLoop)
@socketio.on('disconnect')
def onDisConnect():
global thread
global clientCount
global thread_stop_event
clientCount -= 1
if clientCount <= 0:
thread_stop_event.set()
@socketio.on('status?')
def queryStatus():
status = getJacktripServiceStatus()
emit('status', {'jack': getJackServiceStatus(), 'jacktrip': status['status'], 'jacktripMode': status['mode']})
@socketio.on('externalIp?')
def queryExternalIp():
emit('externalIp', urllib.request.urlopen('https://ident.me').read().decode('utf8'))
@socketio.on('shutdown?')
def queryExternalIp():
call("shutdown now", shell=True)
if __name__ == "__main__":
socketio.run(app, host='0.0.0.0', port=args.port)
|
447802
|
import os
import time
import pytest
from geopandas import GeoDataFrame
from sentinelhub import BBox, Geometry
from eogrow.core.area import UtmZoneAreaManager
from eogrow.core.config import interpret_config_from_path
from eogrow.utils.vector import count_points
pytestmark = pytest.mark.fast
@pytest.fixture(scope="session", name="large_area_config")
def large_area_config_fixture(config_folder):
filename = os.path.join(config_folder, "other", "large_area_global_config.json")
return interpret_config_from_path(filename)
def test_area_shape(storage, config):
area_manager = UtmZoneAreaManager.from_raw_config(config["area"], storage)
area_dataframe = area_manager.get_area_dataframe()
assert isinstance(area_dataframe, GeoDataFrame)
assert len(area_dataframe.index) == 3
geometry = area_manager.get_area_geometry()
assert isinstance(geometry, Geometry)
@pytest.mark.parametrize(
"simplification_factor,point_count", [(0, 128), (0.00001, 64), (0.0001, 25), (0.001, 10), (0.1, 5)]
)
def test_area_shape_simplification(storage, config, simplification_factor, point_count):
config["area"]["area_simplification_factor"] = simplification_factor
area_manager = UtmZoneAreaManager.from_raw_config(config["area"], storage)
geometry = area_manager.get_area_geometry()
assert count_points(geometry.geometry) == point_count
# No idea how to use @pytest.mark.parametrize over config, large_area_config
def test_bbox_split(storage, config, large_area_config):
for area_config, expected_zone_num, expected_bbox_num in [
(config["area"], 1, 2),
(large_area_config, 61, 311),
]:
area_manager = UtmZoneAreaManager.from_raw_config(area_config, storage)
start_time = time.time()
grid = area_manager.get_grid(add_bbox_column=True)
splitting_time = time.time() - start_time
_check_area_grid(
grid,
expected_zone_num,
expected_bbox_num,
check_bboxes=True,
expected_columns=["index_n", "index_x", "index_y", "total_num", "geometry", "BBOX"],
)
start_time = time.time()
grid = area_manager.get_grid()
assert time.time() - start_time < max(splitting_time / 2, 1) # Checking if data is kept in the class
_check_area_grid(
grid,
expected_zone_num,
expected_bbox_num,
check_bboxes=False,
expected_columns=["index_n", "index_x", "index_y", "total_num", "geometry"],
)
area_manager.cache_grid()
def _check_area_grid(grid, expected_zone_num, expected_bbox_num, check_bboxes, expected_columns):
assert isinstance(grid, list)
assert len(grid) == expected_zone_num
bbox_count = 0
for subgrid in grid:
assert isinstance(subgrid, GeoDataFrame)
assert subgrid.columns.tolist() == expected_columns
bbox_count += len(subgrid.index)
if check_bboxes:
assert "BBOX" in subgrid
assert all(isinstance(item, BBox) for item in subgrid["BBOX"].values)
assert bbox_count == expected_bbox_num
|
447828
|
import json
from django.http import JsonResponse
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import (
csrf_protect,
ensure_csrf_cookie,
)
from django.views.generic import View
from django_graph_api.graphql.request import Request
class GraphQLView(View):
"""
Django view handles Graph API queries.
``GET`` returns the HTML for the GraphiQL API explorer.
``POST`` accepts a JSON body in the form of::
{
"query": <query>,
"variables": <variables>
}
and returns a JSON response with a "data" and/or "error" object.
"""
graphiql_version = '0.11.11'
graphql_url = '/graphql'
template_name = 'django_graph_api/graphiql.html'
schema = None
@method_decorator(ensure_csrf_cookie)
@method_decorator(csrf_protect)
def dispatch(self, *args, **kwargs):
return super(GraphQLView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return TemplateResponse(
request=self.request,
template=[self.template_name],
context={
'graphiql_version': self.graphiql_version,
'graphql_url': self.graphql_url,
},
)
def post(self, request, *args, **kwargs):
# Python 2 json library raises ValueError; Python 3 raises more specific error.
JSONDecodeError = getattr(json, 'JSONDecodeError', ValueError)
try:
request_data = self.get_request_data()
graphql_request = Request(
document=request_data['query'],
variables=request_data.get('variables'),
operation_name=request_data.get('operationName'),
schema=self.schema
)
except (KeyError, JSONDecodeError):
return JsonResponse({
'errors': [
{'message': 'Data must be json with a "query" key and optional "variables" key'},
],
})
graphql_request.validate()
data = None
errors = graphql_request.errors
if not errors:
data, errors = graphql_request.execute()
response = {}
if data:
response['data'] = data
if errors:
response['errors'] = [error.serialize() for error in errors]
return JsonResponse(response)
def get_request_data(self):
"""
Takes an incoming request and parses it into a dictionary containing
query and variables. For now we only support json dictionaries in
the style of GraphiQL, i.e. {"query": query, "variables": null}
"""
body = self.request.body.decode('utf-8')
return json.loads(body)
|
447856
|
STACK_COUNT = 6
INITIAL_STACK_SIZE = 6
MAX_STACK_SIZE = 15
STACK_RANGE = range(STACK_COUNT)
class GameState:
def __init__(self):
self.actions_taken = 0
# List of lists of tuples
# Finished stacks become None - won state contains 4 Nones and 2 empty lists
self.stacks = []
# List of booleans
# True if the stack at index i has a cheated card on top
self.cheats = []
# Initialize all stacks as empty
for i in STACK_RANGE:
self.stacks.append([])
self.cheats.append(False)
def clone(self):
"""
Clones the given GameState object
"""
clone = GameState()
for i in STACK_RANGE:
clone.cheats[i] = self.cheats[i]
if self.stacks[i] is None:
clone.stacks[i] = None
continue
for j in range(len(self.stacks[i])):
# Copy each card from each stack
card = self.stacks[i][j]
clone.stacks[i].append(card)
clone.actions_taken = self.actions_taken
return clone
def is_won(self):
"""
Determine if the current state is the won end state
"""
# If any stack has open cards, return False
for i in STACK_RANGE:
if self.stacks[i] is not None and len(self.stacks[i]) > 0:
return False
return True
def query_stack_top(self, index):
"""
Return the card that is on top of the given stack at index. Returns None if stack is empty or finished
Does not remove the card from the stack
"""
stack = self.stacks[index]
if stack is None or len(stack) == 0:
return None
return stack[-1]
def get_total_card_count(self):
"""
Returns the total card count in the stacks
"""
return sum([len(x) for x in self.stacks])
def pull_from_stack(self, index, count):
"""
Return given number of cards from the given stack
Removes the said cards from the stack
"""
stack = self.stacks[index]
start = stack[:-count]
end = stack[-count:]
# Set the "new" stack and return the extra
self.stacks[index] = start
return end
def parse_card_into_stack(self, index, card):
"""
Puts the given card at the top of the stack, used in the image parsing
"""
self.stacks[index].append(card)
def get_legal_actions(self, allow_cheats):
"""
Returns all legal actions as a 2-tuple:
(from, to)
"from" is a 2-tuple
(stack_index, card_index)
with card_index meaning the index of the card in the given stack
"to" is a 4-tuple
(cheat, collapse, stack_index, stack_size)
with cheat meaning whether the move is a "cheating" move,
collapse meaning a stack collapse action,
stack_index being the target stack index, and
stack_size being the current size of the target stack (for replaying the actions accurately)
"""
actions = []
# Loop through all stacks, and list out all legal actions
for stack_index in STACK_RANGE:
stack = self.stacks[stack_index]
if stack is None:
continue
# Check for being able to collapse the stack (a top slice of it)
can_collapse = True
collapse_check_value = 6
for card_index in range(len(stack))[::-1]:
card = stack[card_index]
# Early exit conditions for cards that are not the topmost
if card_index < len(stack) - 1:
# If the value of the card is not +1 of the value on top of it, break from this stack loop
# (no cards below can be moved either)
if stack[card_index + 1] + 1 != stack[card_index]:
break
# Any card below a cheated card cannot be moved, break
if self.cheats[stack_index] == True:
break
# Check for collapsing
if collapse_check_value == card and can_collapse:
collapse_check_value += 1
if card == 14: # Add a collapse action, if there is a free slow
can_collapse = False
empty_stack_index = self.get_empty_stack()
if empty_stack_index >= 0:
actions.append((
(stack_index, card_index), (False,
True, empty_stack_index, 0)
))
else:
can_collapse = False
# Check if the card can be placed onto any other stack
for target_stack_index in STACK_RANGE:
# Can not move onto the same stack (legal nor cheating)
if stack_index == target_stack_index:
continue
# Can not move onto a finished stack (legal nor cheating)
if self.stacks[target_stack_index] is None:
continue
# Can not move onto a cheated stack
if self.cheats[target_stack_index]:
continue
if self.can_place(card, target_stack_index):
# Check if the action will perform a collapse
# Check that the target stack supports it - must start from 14 at the bottom and end somewhere
# before 6
target_card_value = 14
for i in self.stacks[target_stack_index]:
if i == target_card_value:
target_card_value -= 1
else:
target_card_value = -1
# Target stack supports collapse - now check the source stack
for i in range(len(self.stacks[stack_index])):
if i < card_index:
continue
if self.stacks[stack_index][i] == target_card_value:
target_card_value -= 1
# The action will perform a collapse if the above checks result in a target_card_value of 5
action_is_collapse = target_card_value == 5
actions.append((
(stack_index, card_index), (False,
action_is_collapse, target_stack_index, len(self.stacks[target_stack_index]))
))
else:
# Check for cheat moves (only for other stacks that have cards and where we cannot normally move)
# Can only cheat the topmost card
# Can not re-cheat a cheated card
if allow_cheats and card_index == len(stack) - 1 and not self.cheats[stack_index]:
actions.append((
(stack_index, card_index), (True,
False, target_stack_index, len(
self.stacks[target_stack_index]))
))
return actions
def can_place(self, card, stack_index):
"""
Returns true if the given card can be placed onto the given stack (legally)
"""
# Can always place on empty stack
if len(self.stacks[stack_index]) == 0:
return True
target_card = self.stacks[stack_index][-1]
return target_card == card + 1
def apply_action(self, action):
"""
Applies the given action to this state. Assumes that the action is valid.
"""
self.actions_taken += 1
action_from = action[0]
action_to = action[1]
# Moving a card or stack onto another stack
from_stack_index = action_from[0]
from_card_index = action_from[1]
to_cheat_state = action_to[0]
to_collapsing = action_to[1]
to_stack_index = action_to[2]
cards_to_pull = len(
self.stacks[from_stack_index]) - from_card_index
cards = self.pull_from_stack(from_stack_index, cards_to_pull)
if to_collapsing:
self.stacks[to_stack_index] = None
else:
self.stacks[to_stack_index] += cards
# Set the cheat state of the topmost card. Has a real effect only if moving a cheat card
self.cheats[to_stack_index] = to_cheat_state
# If moving a cheated card to a valid position, un-cheat that stack
if self.cheats[from_stack_index]:
self.cheats[from_stack_index] = False
def get_heuristic_value(self):
"""
Returns a heuristic value for choosing a state over another
"""
score = 0
# Completed stacks is very good
# Empty slots is good
for stack in self.stacks:
if stack is None:
score += 50
elif len(stack) == 0:
score += 10
# High stacks is good (consecutive cards)
for stack in self.stacks:
if stack is not None and len(stack) > 5:
score += (len(stack) - 5) * 2
# Lots of cheated cards is bad
score -= sum(self.cheats) * 15
return score
def get_empty_stack(self):
"""
Returns the index of a stack that is empty, or -1 if none are.
"""
for i in STACK_RANGE:
if self.stacks[i] is not None and len(self.stacks[i]) == 0:
return i
return -1
def __eq__(self, other):
for i in STACK_RANGE:
if self.stacks[i] != other.stacks[i]:
return False
return True
def hash_string(self):
stacks_hash = "-".join([",".join([str(y) for y in x])
if (x is not None and len(x) > 0)
else("C" if x is None else "E")
for x in self.stacks])
cheats_hash = "".join("C" if x else "L" for x in self.cheats)
return stacks_hash + "-" + cheats_hash
def __hash__(self):
return hash(self.hash_string())
def __str__(self):
return ("Board:\n" +
"\n".join([", ".join(
map(lambda slot: str(slot), self.stacks[stack_index])) + (" C" if self.cheats[stack_index] else "")
if self.stacks[stack_index] is not None else "COLLAPSED"
for stack_index in STACK_RANGE]
))
|
447858
|
import numpy as np
def check_grad(f, X, e, args=()):
"""
This function checks the gradients of f by comparing them to
finite difference approximations. The partial derivatives
returned by f and the finite difference approximations are
printed for comparison.
Parameters
----------
f : function to minimize. The function must return the value
of the function (float) and a numpy array of partial
derivatives of shape (D,) with respect to X, where D is
the dimensionality of the function.
X : numpy array - Shape : (D, 1)
argument for function f that the partial derivatives
relate to.
e : float
size of the perturbation used for the finite differences.
args : tuple
Tuple of parameters to be passed to the function f.
Return
------
d : the norm of the difference divided by the norm of
the sum of the gradients and finite differences.
dy : gradients
dh : finite differences
"""
y, dy = eval('f')(X, *list(args))
dy = dy.reshape(-1,1)
dh = np.zeros_like(X)
for i in range(X.shape[0]):
dx = np.zeros_like(X)
dx[i] = dx[i] + e
y2, _ = eval('f')(X + dx, *list(args))
dx = -dx
y1, _ = eval('f')(X + dx, *list(args))
dh[i] = (y2 - y1) / (2 * e)
vec = np.hstack((dy,dh))
print("Gradients vs finite difference:")
print(vec)
d = np.linalg.norm(dh - dy) / np.linalg.norm(dh + dy)
return d, dy, dh
|
447873
|
import sys
import os
PADDING_LEN = 0x100 + 32
def unrle(val):
out = []
for i in range(len(val)//2):
a, b = val[i*2], val[i*2+1]
if a == 0:
a = 256
out.append(bytes([b]) * a)
return b''.join(out)
def pad():
return b'AB' * (PADDING_LEN//4)
def main():
rv = pad()
rv += unrle(bytes.fromhex('004041b4')[::-1])
#rv += unrle(bytes.fromhex('00000000004041b4')[::-1])
with os.fdopen(sys.stdout.fileno(), "wb", closefd=False) as stdout:
stdout.write(rv)
stdout.flush()
if __name__ == '__main__':
main()
|
447889
|
import asyncio
import hashlib
from yarl import URL
from feedsearch_crawler.crawler.lib import to_bytes
class DuplicateFilter:
"""
Filters duplicate URLs.
"""
def __init__(self):
# Dictionary whose keys are the hashed fingerprints of the URLs
self.fingerprints = dict()
# Locks the fingerprints dict when accessing keys.
self._seen_lock = asyncio.Lock()
async def url_seen(self, url: URL, method: str = "") -> bool:
"""
Checks if the URL has already been seen, and adds the URL fingerprint if not.
:param url: URL object
:param method: Optional HTTP method to use for hashing
:return: True if URL already seen
"""
url_str: str = self.parse_url(url)
fp = self.url_fingerprint_hash(url_str, method)
async with self._seen_lock:
if fp in self.fingerprints:
return True
self.fingerprints[fp] = url_str
return False
def parse_url(self, url: URL) -> str:
"""
Parse the URL object to a string. Used for functionality such as filtering query strings.
:param url: URL object
:return: URL as string
"""
return str(url)
@staticmethod
def url_fingerprint_hash(url: str, method: str = "") -> str:
"""
Create a fingerprint hash of a URL string along with the method if provided.
:param url: URL as string
:param method: Optional HTTP method
:return: Hashed string
"""
# noinspection InsecureHash
fp = hashlib.sha1()
fp.update(to_bytes(url))
if method:
fp.update(to_bytes(method))
return fp.hexdigest()
|
447938
|
from keras.preprocessing import image as image_utils
from imagenet_utils import decode_predictions
from imagenet_utils import preprocess_input
from vgg16 import VGG16
import argparse
import cv2
import numpy as np
import os
import random
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--folder", required=True)
args = vars(ap.parse_args())
files = [os.path.join(args["folder"], f) for f in os.listdir(args["folder"])]
random.shuffle(files)
# Load the VGG16 network
print("[INFO] loading network...")
model = VGG16(weights="imagenet")
for file in files:
# Load the image using OpenCV
orig = cv2.imread(file)
# Load the image using Keras helper ultility
print("[INFO] loading and preprocessing image...")
image = image_utils.load_img(file, target_size=(224, 224))
image = image_utils.img_to_array(image)
# Convert (3, 224, 224) to (1, 3, 224, 224)
# Here "1" is the number of images passed to network
# We need it for passing batch containing serveral images in real project
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)
# Classify the image
print("[INFO] classifying image...")
preds = model.predict(image)
(inID, label) = decode_predictions(preds)[0]
# Display the predictions
print("ImageNet ID: {}, Label: {}".format(inID, label))
cv2.putText(orig, "Label: {}".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.imshow("Classification", orig)
cv2.waitKey(0)
|
447962
|
import numpy as np
from gym import error, spaces
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.base_env import ActionTuple
from mlagents_envs.side_channel.environment_parameters_channel import (EnvironmentParametersChannel,)
from mlagents_envs.side_channel.engine_configuration_channel import (EngineConfigurationChannel,)
from neroRL.environments.env import Env
from random import randint
class UnityWrapper(Env):
"""This class wraps Unity environments.
This wrapper has notable constraints:
- Only one agent (no multi-agent environments).
- Only one visual observation
- Only discrete and multi-discrete action spaces (no continuous action space)"""
def __init__(self, env_path, reset_params, worker_id = 1, no_graphis = False, realtime_mode = False, record_trajectory = False):
"""Instantiates the Unity Environment from a specified executable.
Arguments:
env_path {string} -- Path to the executable of the environment
reset_params {dict} -- Reset parameters of the environment such as the seed
Keyword Arguments:
worker_id {int} -- Port of the environment"s instance (default: {1})
no_graphis {bool} -- Whether to allow the executable to render or not (default: {False})
realtime_mode {bool} -- Whether to run the environment in real time or as fast as possible (default: {False})
record_trajectory {bool} -- Whether to record the trajectory of an entire episode. This can be used for video recording. (default: {False})
"""
# Initialize channels
self.reset_parameters = EnvironmentParametersChannel()
self.engine_config = EngineConfigurationChannel()
# Prepare default reset parameters
self._default_reset_parameters = {}
for key, value in reset_params.items():
self._default_reset_parameters[key] = value
if key != "start-seed" or key != "num-seeds":
self.reset_parameters.set_float_parameter(key, value)
self._realtime_mode = realtime_mode
if realtime_mode:
self.engine_config.set_configuration_parameters(time_scale=1.0, width=1280, height=720)
else:
self.engine_config.set_configuration_parameters(time_scale=30.0, width=256, height=256)
# Whether to record the trajectory of an entire episode
self._record = record_trajectory
# Launch the environment's executable
self._env = UnityEnvironment(file_name = env_path, worker_id = worker_id, no_graphics = no_graphis, side_channels=[self.reset_parameters, self.engine_config])
# If the Unity Editor chould be used instead of a build
# self._env = UnityEnvironment(file_name = None, worker_id = 0, no_graphics = no_graphis, side_channels=[self.reset_parameters, self.engine_config])
# Reset the environment
self._env.reset()
# Retrieve behavior configuration
self._behavior_name = list(self._env.behavior_specs)[0]
self._behavior_spec = self._env.behavior_specs[self._behavior_name]
# Check whether this Unity environment is supported
self._verify_environment()
# Set action space properties
if self._behavior_spec.action_spec.is_discrete():
num_action_branches = self._behavior_spec.action_spec.discrete_size
action_branch_dimensions = self._behavior_spec.action_spec.discrete_branches
if num_action_branches == 1:
self._action_space = spaces.Discrete(action_branch_dimensions[0])
else:
self._action_space = spaces.MultiDiscrete(action_branch_dimensions)
# Count visual and vector observations
self._num_vis_obs, self._num_vec_obs = 0, 0
self._vec_obs_indices = []
for index, obs in enumerate(self._behavior_spec.observation_specs):
if len(obs) > 1:
self._num_vis_obs = self._num_vis_obs + 1
self._vis_obs_index = index
else:
self._num_vec_obs = self._num_vec_obs + 1
self._vec_obs_indices.append(index)
# Set visual observation space property
if self._num_vis_obs == 1:
vis_obs_shape = self._behavior_spec.observation_specs[self._vis_obs_index].shape
self._visual_observation_space = spaces.Box(
low = 0,
high = 1.0,
shape = vis_obs_shape,
dtype = np.float32)
else:
self._visual_observation_space = None
# Set vector observation space property
if self._num_vec_obs > 0:
# Determine the length of vec obs by summing the length of each distinct one
vec_obs_length = sum([self._behavior_spec.observation_specs[i][0] for i in self._vec_obs_indices])
self._vector_observatoin_space = (vec_obs_length, )
else:
self._vector_observatoin_space = None
# Videos can only be recorded if the environment provides visual observations
if self._record and self._visual_observation_space is None:
UnityEnvironmentException("Videos cannot be rendered for a Unity environment that does not provide visual observations.")
@property
def unwrapped(self):
"""
Returns:
{UnityWrapper} -- Environment in its vanilla (i.e. unwrapped) state
"""
return self
@property
def action_space(self):
"""Returns the shape of the action space of the agent."""
return self._action_space
@property
def action_names(self):
return None
@property
def get_episode_trajectory(self):
"""Returns the trajectory of an entire episode as dictionary (vis_obs, vec_obs, rewards, actions).
"""
self._trajectory["action_names"] = self.action_names
return self._trajectory if self._trajectory else None
@property
def visual_observation_space(self):
return self._visual_observation_space
@property
def vector_observation_space(self):
return self._vector_observatoin_space
def reset(self, reset_params = None):
"""Resets the environment based on a global or just specified config.
Keyword Arguments:
config {dict} -- Reset parameters to configure the environment (default: {None})
Returns:
{numpy.ndarray} -- Visual observation
{numpy.ndarray} -- Vector observation
"""
# Track rewards of an entire episode
self._rewards = []
# Use initial or new reset parameters
if reset_params is None:
reset_params = self._default_reset_parameters
else:
reset_params = reset_params
# Apply reset parameters
for key, value in reset_params.items():
# Skip reset parameters that are not used by the Unity environment
if key != "start-seed" or key != "num-seeds":
self.reset_parameters.set_float_parameter(key, value)
# Sample the to be used seed
if reset_params["start-seed"] > -1:
seed = randint(reset_params["start-seed"], reset_params["start-seed"] + reset_params["num-seeds"] - 1)
else:
# Use unlimited seeds
seed = -1
self.reset_parameters.set_float_parameter("seed", seed)
# Reset and verify the environment
self._env.reset()
info, terminal_info = self._env.get_steps(self._behavior_name)
self._verify_environment()
# Retrieve initial observations
vis_obs, vec_obs, _, _ = self._process_agent_info(info, terminal_info)
# Prepare trajectory recording
self._trajectory = {
"vis_obs": [vis_obs * 255], "vec_obs": [vec_obs],
"rewards": [0.0], "actions": []
}
return vis_obs, vec_obs
def step(self, action):
"""Runs one timestep of the environment"s dynamics.
Once an episode is done, reset() has to be called manually.
Arguments:
action {List} -- A list of at least one discrete action to be executed by the agent
Returns:
{numpy.ndarray} -- Visual observation
{numpy.ndarray} -- Vector observation
{float} -- (Total) Scalar reward signaled by the environment
{bool} -- Whether the episode of the environment terminated
{dict} -- Further episode information (e.g. cumulated reward) retrieved from the environment once an episode completed
"""
# Carry out the agent's action
action_tuple = ActionTuple()
action_tuple.add_discrete(np.asarray(action).reshape([1, -1]))
self._env.set_actions(self._behavior_name, action_tuple)
self._env.step()
info, terminal_info = self._env.get_steps(self._behavior_name)
# Process step results
vis_obs, vec_obs, reward, done = self._process_agent_info(info, terminal_info)
self._rewards.append(reward)
# Record trajectory data
if self._record:
self._trajectory["vis_obs"].append(vis_obs * 255)
self._trajectory["vec_obs"].append(vec_obs)
self._trajectory["rewards"].append(reward)
self._trajectory["actions"].append(action)
# Episode information
if done:
info = {"reward": sum(self._rewards),
"length": len(self._rewards)}
else:
info = None
return vis_obs, vec_obs, reward, done, info
def close(self):
"""Shut down the environment."""
self._env.close()
def _process_agent_info(self, info, terminal_info):
"""Extracts the observations, rewards, dones, and episode infos.
Args:
info {DecisionSteps}: Current state
terminal_info {TerminalSteps}: Terminal state
Returns:
vis_obs {ndarray} -- Visual observation if available, else None
vec_obs {ndarray} -- Vector observation if available, else None
reward {float} -- Reward signal from the environment
done {bool} -- Whether the episode terminated or not
"""
# Determine if the episode terminated or not
if len(terminal_info) == 0:
done = False
use_info = info
else:
done = True
use_info = terminal_info
# Process visual observations
if self.visual_observation_space is not None:
vis_obs = use_info.obs[self._vis_obs_index][0]
else:
vis_obs = None
# Process vector observations
if self.vector_observation_space is not None:
for i, dim in enumerate(self._vec_obs_indices):
if i == 0:
vec_obs = use_info.obs[dim][0]
else:
vec_obs = np.concatenate((vec_obs, use_info.obs[dim][0]))
else:
vec_obs = None
return vis_obs, vec_obs, use_info.reward[0], done
def _verify_environment(self):
# Verify number of agent behavior types
if len(self._env.behavior_specs) != 1:
raise UnityEnvironmentException("The unity environment containts more than one agent type.")
# Verify number of agents
decision_steps, _ = self._env.get_steps(self._behavior_name)
if len(decision_steps) > 1:
raise UnityEnvironmentException("The unity environment contains more than one agent, which is not supported.")
# Verify action space type
if not self._behavior_spec.action_spec.is_discrete() or self._behavior_spec.action_spec.is_continuous():
raise UnityEnvironmentException("Continuous action spaces are not supported. "
"Only discrete and MultiDiscrete spaces are supported.")
# Verify that at least one observation is provided
num_vis_obs = 0
num_vec_obs = 0
for obs_spec in self._behavior_spec.observation_specs:
if len(obs_spec.shape) == 3:
num_vis_obs += 1
elif(len(obs_spec.shape)) == 1:
num_vec_obs += 1
if num_vis_obs == 0 and num_vec_obs == 0:
raise UnityEnvironmentException("The unity environment does not contain any observations.")
# Verify number of visual observations
if num_vis_obs > 1:
raise UnityEnvironmentException("The unity environment contains more than one visual observation.")
class UnityEnvironmentException(error.Error):
"""Any error related to running the Unity environment."""
pass
|
447975
|
from dbnd_luigi.luigi_tracking import dbnd_luigi_run
if __name__ == "__main__":
dbnd_luigi_run()
|
447992
|
import rdkit.Chem.AllChem as rdkit
def test_get_num_atoms(case_data):
"""
Test :meth:`.Molecule.get_num_atoms`.
Parameters
----------
case_data : :class:`.CaseData`
A test case. Holds the molecule to test and its correct SMILES.
Returns
-------
None : :class:`NoneType`
"""
_test_get_num_atoms(case_data.molecule, case_data.smiles)
def _test_get_num_atoms(molecule, smiles):
"""
Test :meth:`.Molecule.get_num_atoms`.
Parameters
----------
molecule : :class:`.Molecule`
The molecule to test.
smiles : :class:`str`
The correct SMILES for `molecule`.
Returns
-------
None : :class:`NoneType`
"""
expected = rdkit.MolFromSmiles(smiles, sanitize=False)
assert molecule.get_num_atoms() == expected.GetNumAtoms()
|
448024
|
from django.contrib.auth.models import Permission
from django.urls import reverse_lazy
from django.utils.text import slugify
from model_bakery import baker
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from sponsors.models import Sponsor
from sponsors.models.enums import LogoPlacementChoices, PublisherChoices
class LogoPlacementeAPIListTests(APITestCase):
url = reverse_lazy("logo_placement_list")
def setUp(self):
self.user = baker.make('users.User')
token = Token.objects.get(user=self.user)
self.permission = Permission.objects.get(name='Can access sponsor placement API')
self.user.user_permissions.add(self.permission)
self.authorization = f'Token {token.key}'
self.sponsors = baker.make(Sponsor, _create_files=True, _quantity=3)
def tearDown(self):
for sponsor in Sponsor.objects.all():
if sponsor.web_logo:
sponsor.web_logo.delete()
if sponsor.print_logo:
sponsor.print_logo.delete()
def test_list_logo_placement_as_expected(self):
sp1, sp2, sp3 = baker.make_recipe("sponsors.tests.finalized_sponsorship", sponsor=iter(self.sponsors), _quantity=3)
baker.make_recipe("sponsors.tests.logo_at_download_feature", sponsor_benefit__sponsorship=sp1)
baker.make_recipe("sponsors.tests.logo_at_sponsors_feature", sponsor_benefit__sponsorship=sp1)
baker.make_recipe("sponsors.tests.logo_at_sponsors_feature", sponsor_benefit__sponsorship=sp2)
baker.make_recipe("sponsors.tests.logo_at_pypi_feature", sponsor_benefit__sponsorship=sp3, link_to_sponsors_page=True, describe_as_sponsor=True)
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
data = response.json()
self.assertEqual(200, response.status_code)
self.assertEqual(4, len(data))
self.assertEqual(2, len([p for p in data if p["flight"] == LogoPlacementChoices.SPONSORS_PAGE.value]))
self.assertEqual(1, len([p for p in data if p["flight"] == LogoPlacementChoices.DOWNLOAD_PAGE.value]))
self.assertEqual(1, len([p for p in data if p["flight"] == LogoPlacementChoices.SIDEBAR.value]))
self.assertEqual(2, len([p for p in data if p["sponsor"] == self.sponsors[0].name]))
self.assertEqual(1, len([p for p in data if p["sponsor"] == self.sponsors[1].name]))
self.assertEqual(1, len([p for p in data if p["sponsor"] == self.sponsors[2].name]))
self.assertEqual(
None,
[p for p in data if p["publisher"] == PublisherChoices.FOUNDATION.value][0]['sponsor_url']
)
self.assertEqual(
f"http://testserver/psf/sponsors/#{slugify(sp3.sponsor.name)}",
[p for p in data if p["publisher"] == PublisherChoices.PYPI.value][0]['sponsor_url']
)
self.assertCountEqual(
[sp1.sponsor.description, sp1.sponsor.description, sp2.sponsor.description],
[p['description'] for p in data if p["publisher"] == PublisherChoices.FOUNDATION.value]
)
self.assertEqual(
[f"{sp3.sponsor.name} is a {sp3.level_name} sponsor of the Python Software Foundation."],
[p['description'] for p in data if p["publisher"] == PublisherChoices.PYPI.value]
)
def test_invalid_token(self):
Token.objects.all().delete()
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
self.assertEqual(401, response.status_code)
def test_superuser_user_have_permission_by_default(self):
self.user.user_permissions.remove(self.permission)
self.user.is_superuser = True
self.user.is_staff = True
self.user.save()
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
self.assertEqual(200, response.status_code)
def test_staff_have_permission_by_default(self):
self.user.user_permissions.remove(self.permission)
self.user.is_staff = True
self.user.save()
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
self.assertEqual(200, response.status_code)
def test_user_must_have_required_permission(self):
self.user.user_permissions.remove(self.permission)
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
self.assertEqual(403, response.status_code)
|
448067
|
import os
import time
import torch
import numpy as np
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from utils import *
from options import get_args
from dataloader import nyudv2_dataloader
from models.loss import cal_spatial_loss, cal_temporal_loss
from models.backbone_dict import backbone_dict
from models import modules
from models import net
cudnn.benchmark = True
args = get_args('train')
os.environ['CUDA_VISIBLE_DEVICES'] = args.devices
# Create folder
makedir(args.checkpoint_dir)
makedir(args.logdir)
# creat summary logger
logger = SummaryWriter(args.logdir)
# dataset, dataloader
TrainImgLoader = nyudv2_dataloader.getTrainingData_NYUDV2(args.batch_size, args.trainlist_path, args.root_path)
# model, optimizer
device = 'cuda' if torch.cuda.is_available() and args.use_cuda else 'cpu'
backbone = backbone_dict[args.backbone]()
Encoder = modules.E_resnet(backbone)
if args.backbone in ['resnet50']:
model = net.model(Encoder, num_features=2048, block_channel=[256, 512, 1024, 2048], refinenet=args.refinenet)
elif args.backbone in ['resnet18', 'resnet34']:
model = net.model(Encoder, num_features=512, block_channel=[64, 128, 256, 512], refinenet=args.refinenet)
model = nn.DataParallel(model).cuda()
disc = net.C_C3D_1().cuda()
optimizer = build_optimizer(model = model,
learning_rate=args.lr,
optimizer_name=args.optimizer_name,
weight_decay = args.weight_decay,
epsilon=args.epsilon,
momentum=args.momentum
)
start_epoch = 0
if args.resume:
all_saved_ckpts = [ckpt for ckpt in os.listdir(args.checkpoint_dir) if ckpt.endswith(".pth.tar")]
print(all_saved_ckpts)
all_saved_ckpts = sorted(all_saved_ckpts, key=lambda x:int(x.split('_')[-1].split('.')[0]))
loadckpt = os.path.join(args.checkpoint_dir, all_saved_ckpts[-1])
start_epoch = int(all_saved_ckpts[-1].split('_')[-1].split('.')[0])
print("loading the lastest model in checkpoint_dir: {}".format(loadckpt))
state_dict = torch.load(loadckpt)
model.load_state_dict(state_dict)
elif args.loadckpt is not None:
print("loading model {}".format(args.loadckpt))
start_epoch = args.loadckpt.split('_')[-1].split('.')[0]
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict)
else:
print("start at epoch {}".format(start_epoch))
def train():
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args.lr)
batch_time = AverageMeter()
losses = AverageMeter()
model.train()
end = time.time()
for batch_idx, sample in enumerate(TrainImgLoader):
image, depth = sample[0], sample[1]#(b,c,d,w,h)
depth = depth.cuda()
image = image.cuda()
image = torch.autograd.Variable(image)
depth = torch.autograd.Variable(depth)
optimizer.zero_grad()
global_step = len(TrainImgLoader) * epoch + batch_idx
gt_depth = depth
pred_depth = model(image)#(b, c, d, h, w)
# Calculate the total loss
spatial_losses=[]
for seq_idx in range(image.size(2)):
spatial_loss = cal_spatial_loss(pred_depth[:,:,seq_idx,:,:], gt_depth[:,:,seq_idx,:,:])
spatial_losses.append(spatial_loss)
spatial_loss = sum(spatial_losses)
pred_cls = disc(pred_depth)
gt_cls = disc(gt_depth)
temporal_loss = cal_temporal_loss(pred_cls, gt_cls)
loss = spatial_loss + 0.1 * temporal_loss
losses.update(loss.item(), image.size(0))
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
batchSize = depth.size(0)
print(('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})'
.format(epoch, batch_idx, len(TrainImgLoader), batch_time=batch_time, loss=losses)))
if (epoch+1)%1 == 0:
save_checkpoint(model.state_dict(), filename=args.checkpoint_dir + "ResNet18_checkpoints_small_" + str(epoch + 1) + ".pth.tar")
if __name__ == '__main__':
train()
|
448124
|
def isAnagram(string1, string2):
"""Checks if two strings are an anagram
An anagram is a word or phrase formed by rearranging the letters
of a different word or phrase.
This implementation ignores spaces and case.
@param string1 The first word or phrase
@param string2 The second word or phrase
@return A boolean representing if the strings are an anagram or not
"""
# Remove spaces
str1_nospace = string1.replace(" ", "")
str2_nospace = string2.replace(" ", "")
# Convert to lowercase and sort
list1 = list(str1_nospace.lower())
list1.sort()
list2 = list(str2_nospace.lower())
list2.sort()
# Check for equality
return (list1 == list2)
# Test cases
assert isAnagram('chair', 'archi') == True
assert isAnagram('Elbow', 'Below') == True
assert isAnagram('More', 'Moore') == False
assert isAnagram('Johnathan', 'Jonathan') == False
assert isAnagram('Dormitory', 'Dirty Room') == True
assert isAnagram('Conversation', 'Voices rant on') == True
|
448165
|
import json
import logging
import src.server.cea_608_encoder.caption_string_utility as utils
import src.server.cea_608_encoder.scene_utility as scene_utils
import src.server.config as config
# At a future time this could live in it's own config file
# Leaving it here temporarily
supported_caption_formats = [
'CEA-608'
]
def write_caption_data_to_file(caption_data: dict, file_name: str):
"""Writes caption data to file as json.
:param caption_data: json with byte pairs
:param file_name: used for saving resulting file
"""
path = config.path_to_data_folder
try:
with open(path + file_name, 'w', encoding='utf-8') as file:
json.dump(caption_data, file, ensure_ascii=False, indent=4)
except IOError as err:
logging.error(f'Could not write JSON to file: {err}')
def consume(caption_data: dict, time_stamp: str) -> list:
"""Perform error handling around caption format and ensure
there are scenes to create byte pairs for.
:param caption_data: the full JSON blob from the front end
:param time_stamp: the date time the request is received
"""
errors = []
if 'caption_format' not in caption_data:
errors.append(f'You must specify a caption format')
if caption_data['caption_format'] not in supported_caption_formats:
caption_format = caption_data['caption_format']
errors.append(f'The supplied caption format {caption_format} is not supported.')
if 'scene_list' not in caption_data:
errors.append(f'Cannot encode byte pairs with an empty scene list.')
if errors:
return errors
scene_data = caption_data['scene_list']
caption_format = caption_data['caption_format']
file_name = caption_data['file_name'] + f'_output_{time_stamp}.json'
scene_bytes, scene_errors = consume_scenes(scene_data)
caption_data = {
'type': caption_format,
'scenes': scene_bytes
}
if scene_errors:
return scene_errors
write_caption_data_to_file(caption_data, file_name)
return None
def consume_scenes(scene_list: list) -> tuple:
"""Iterate over the list of scenes and create bytes for fields that
are set in the scene data. Call the consume function for caption
strings to return byte pairs for caption strings inside a scene.
:param scene_list:
:return: scene_data
"""
scene_data = []
errors = []
for scene in scene_list:
scene_errors = []
current_scene_data = {
'data': []
}
if 'scene_id' not in scene:
scene_errors.append(f'Every scene must have a scene ID')
if 'start' not in scene:
scene_errors.append(f'\tdoes not have a start time')
else:
start = scene['start']
current_scene_data['start'] = start
# append RCL.
current_scene_data['data'].extend(scene_utils.create_byte_pairs_for_control_command(
scene_utils.get_resume_caption_loading_bytes()
))
# append ENM.
current_scene_data['data'].extend(scene_utils.create_byte_pairs_for_control_command(
scene_utils.get_erase_non_displayed_memory_bytes()
))
# append the Char Bytepairs.
caption_list, caption_errors = consume_captions(scene['caption_list'])
current_scene_data['data'].extend(caption_list)
scene_errors.extend(caption_errors)
# append EOC.
current_scene_data['data'].extend(scene_utils.create_byte_pairs_for_control_command(
scene_utils.get_end_of_caption_bytes()
))
scene_data.append(current_scene_data)
if scene_errors:
scene_errors.insert(0, f'Errors encountered while consuming scene with ID: {scene["scene_id"]}')
errors.extend(scene_errors)
errors.extend(validate_scene_ids(scene_list))
errors.extend(validate_start_times(scene_list))
return scene_data, errors
def consume_captions(caption_list: list) -> tuple:
"""Iterate over the list of captions in a scene and create bytes pairs
for the list of caption strings and properties that the strings have.
:param caption_list:
:return: caption_bytes
"""
caption_bytes = []
errors = []
for caption in caption_list:
caption_errors = []
if 'caption_id' not in caption:
caption_errors.append(f'Every caption must have a caption ID')
foreground_color_and_underline_style_changes = {}
if 'foreground_color' in caption and 'color' in caption['foreground_color']:
foreground_color = caption['foreground_color']['color']
foreground_color_and_underline_style_changes['color'] = foreground_color
if 'underline' in caption:
underlined = caption['underline']
foreground_color_and_underline_style_changes['underline'] = underlined
if 'position' in caption:
text_position = caption['position']
if 'row' in text_position and not (text_position['row'] == ""):
text_row_position = text_position['row']
else:
text_row_position = 11 #Default row position
if 'column' in text_position and not (text_position['column'] == ""):
text_column_position = text_position['column']
else:
text_column_position = 0 #Default row position
if 'underline' in foreground_color_and_underline_style_changes \
and foreground_color_and_underline_style_changes['underline'] == "true":
text_underlined = True
else:
text_underlined = False
caption_position_bytes, preamble_errors = utils.create_byte_pairs_for_preamble_address(int(text_row_position),
int(text_column_position),
text_underlined)
caption_errors.extend(preamble_errors)
caption_bytes.extend(caption_position_bytes)
if foreground_color_and_underline_style_changes:
midrow_bytes, midrow_errors = utils.create_byte_pairs_for_midrow_style(
**foreground_color_and_underline_style_changes)
caption_bytes.extend(midrow_bytes)
caption_errors.extend(midrow_errors)
background_color_and_transparency_style_changes = {}
if 'background_color' in caption and 'color' in caption['background_color']:
color = caption['background_color']['color']
background_color_and_transparency_style_changes['color'] = color
if 'transparency' in caption:
transparency = caption['transparency']
background_color_and_transparency_style_changes['transparency'] = transparency
if background_color_and_transparency_style_changes:
background_bytes, background_errors = utils.create_bytes_for_scene_background_color(
**background_color_and_transparency_style_changes)
caption_bytes.extend(background_bytes)
caption_errors.extend(background_errors)
if 'caption_string' in caption and caption['caption_string']:
string, string_errors = utils.create_byte_pairs_for_caption_string(caption['caption_string'])
caption_bytes.extend(string)
caption_errors.extend(string_errors)
else:
caption_errors.append(f'\t\tYou must specify a caption string')
if caption_errors:
caption_errors.insert(0, f'\tErrors encountered while consuming caption with ID: {caption["caption_id"]}')
errors.extend(caption_errors)
errors.extend(validate_caption_ids(caption_list))
return caption_bytes, errors
def validate_scene_ids(scene_list: list):
"""Validates the scene IDs to look for duplicate IDs
:param scene_list:
"""
scene_ids = {}
conflicting_id_errors = []
for scene in scene_list:
for key,value in scene.items():
if key == "scene_id":
if value not in scene_ids:
scene_ids[value] = 1
else:
scene_ids[value] = scene_ids.get(value) + 1
for id, number_of_that_id in scene_ids.items():
if number_of_that_id > 1:
conflicting_id_errors.append(f'There are duplicate scene IDs {id}.')
return conflicting_id_errors
def validate_caption_ids(caption_list: list):
"""Validates the caption IDs to look for duplicate IDs
:param caption_list:
"""
caption_ids = {}
conflicting_id_errors = []
for caption in caption_list:
for key, value in caption.items():
if key == "caption_id":
if value not in caption_ids:
caption_ids[value] = 1
else:
caption_ids[value] = caption_ids.get(value) + 1
for id, number_of_that_id in caption_ids.items():
if number_of_that_id > 1:
conflicting_id_errors.append(f'There are duplicate caption IDs {id}.')
return conflicting_id_errors
def validate_start_times(scene_list: list) -> list:
"""Checks if multiple scenes have the same start time
:param scene_list:
"""
errors = []
start_times = {}
for scene in scene_list:
for key, value in scene.items():
if key == "start":
scene_time = value["time"]
if scene_time not in start_times:
start_times[scene_time] = [1, [scene["scene_id"]]]
else:
start_times[scene_time][0] = start_times[scene_time][0] + 1
start_times[scene_time][1].append(scene["scene_id"])
for time, number_and_ids in start_times.items():
if number_and_ids[0] > 1:
errors.append(f'Scenes with the IDs {number_and_ids[1]} are starting at the same time of {time}.')
return errors
|
448197
|
from unittest import TestCase
from app import app
from i18n.i18n import I18n
class MockApp(object):
def add_template_filter(self, fn):
pass
class IntegrationTestBase(TestCase):
def setUp(self):
I18n(app)
app.testing = True
self.app = app.test_client()
def _assertStatusCode(self, code, response):
self.assertEqual(code, response.status_code)
def assertSuccess(self, response):
self._assertStatusCode(200, response)
def assertCreated(self, response):
self._assertStatusCode(201, response)
def assertRedirect(self, response):
self._assertStatusCode(302, response)
def assertNotFound(self, response):
self._assertStatusCode(404, response)
def assertConflict(self, response):
self._assertStatusCode(409, response)
|
448223
|
import itertools
import os
import platform
import signal
import stat
import sys
import subprocess
import time
from .options import get_parser, parse_args, validate
SKIP_DIRS = [
'.bzr', '.cache', '.git', '.hg', '.pytest_cache', '.svn',
'__pycache__', 'build', 'dist', 'node_modules',
]
SKIP_EXT = ['.pyc', '.pyo']
try:
# Python2
filterfalse = itertools.ifilterfalse
except AttributeError:
# Python3
filterfalse = itertools.filterfalse
def is_ignorable(filename, ignores):
'''
Returns True if filename is in 'ignores' or ends with a SKIP_EXT
'''
return (
any(os.path.basename(filename) == i for i in ignores) or
any(filename.endswith(skip) for skip in SKIP_EXT)
)
def get_file_mtime(filename):
return os.lstat(filename)[stat.ST_MTIME]
def skip_dirs(dirs, skips):
for skip in skips:
if skip in dirs:
dirs.remove(skip)
file_stat_cache = {}
def has_file_changed(filename):
'''
Has the given file changed since last invocation?
'''
try:
mtime = get_file_mtime(filename)
except FileNotFoundError:
if filename in file_stat_cache:
del file_stat_cache[filename]
return True
if (
filename not in file_stat_cache or
file_stat_cache[filename] != mtime
):
file_stat_cache[filename] = mtime
return True
return False
def get_changed_files(ignores):
'''
Walks subdirs of cwd, looking for files which have changed since last
invocation.
'''
changed_files = []
for root, dirs, files in os.walk('.'):
skip_dirs(dirs, ignores)
for filename in files:
relname = os.path.join(root, filename)
if (
has_file_changed(relname) and
not is_ignorable(relname, ignores)
):
changed_files.append(relname)
return changed_files
def clear_screen():
if platform.system().lower().startswith('win'):
os.system('cls')
else:
os.system('clear')
def run_command_in_shell(command, shell):
subprocess.call(command, shell=True, executable=shell)
def run_command_in_interactive_shell(command, shell):
try:
subprocess.call([shell, '-i', '-c', command])
finally:
# The terminal was attached to the interactive shell we just
# started, and left in limbo when that shell terminated. Retrieve
# it for this process group, so that we can still print and recieve
# keypresses.
os.tcsetpgrp(0, os.getpgrp())
def run_command(command, shell, interactive):
if interactive:
run_command_in_interactive_shell(command, shell)
else:
run_command_in_shell(command, shell)
def act(changed_files, options, first_time):
'''
Runs the user's specified command.
'''
clear_screen()
print(options.command)
if options.verbose and not first_time:
print(', '.join(sorted(changed_files)))
# Launch the user's given command in an interactive shell, so that aliases
# & functions are interpreted just as when the user types at a terminal.
run_command(options.command, options.shell, options.interactive)
def step(options, first_time=False):
changed_files = get_changed_files(options.ignore)
if changed_files:
act(changed_files, options, first_time)
time.sleep(0.2)
def mainloop(options):
step(options, first_time=True)
while True:
step(options)
def main():
# This fn exposed as a command-line entry point by setup.py install/develop.
# Ignore SIGTTOU, which we receive after subprocesses launching interactive
# shells (which take our terminal with them) terminate (leaving the terminal
# in limbo.) If we ignore the resulting SIGTTOU, then we can get the
# terminal back and proceed. See
# http://stackoverflow.com/questions/25099895/from-python-start-a-shell-that-can-interpret-functions-and-aliases
signal.signal(signal.SIGTTOU, signal.SIG_IGN)
mainloop(
validate(
parse_args(
get_parser('rerun', SKIP_DIRS, SKIP_EXT), sys.argv[1:]
)
)
)
|
448258
|
from django.conf.urls.defaults import patterns, url, include
urlpatterns = patterns('',
url(r'static/', include('core.registration.static.urls')),
)
|
448277
|
from __future__ import print_function
import os
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import time
import datetime
from constant import *
def currentTime():
return datetime.datetime.now().isoformat()
def get_mask(re_sel):
rm=torch.cuda.ByteTensor(len(re_sel),dimR)
rm.zero_()
for xy in re_sel:
rm[xy[0]][xy[1]]=1
return rm
|
448298
|
import collections
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
import torch
import torch.utils.data as data
from nonechucks import *
import nonechucks
class SafeDatasetTest(unittest.TestCase):
"""Unit tests for `SafeDataset`."""
SafeDatasetPair = collections.namedtuple("SafeDatasetPair", ["unsafe", "safe"])
@classmethod
def get_safe_dataset_pair(cls, dataset, **kwargs):
"""Returns a `SafeDatasetPair` (a tuple of size 2), which contains
both the unsafe and safe versions of the dataset.
"""
return SafeDatasetTest.SafeDatasetPair(
dataset, nonechucks.SafeDataset(dataset, **kwargs)
)
def setUp(self):
tensor_data = data.TensorDataset(torch.arange(0, 10))
self._dataset = self.get_safe_dataset_pair(tensor_data)
@property
def dataset(self):
self._dataset.safe._reset_index()
return self._dataset
def test_build_index(self):
dataset = data.TensorDataset(torch.arange(0, 10))
dataset = self.get_safe_dataset_pair(dataset, eager_eval=True)
self.assertTrue(dataset.safe.is_index_built)
self.assertEqual(len(dataset.safe), len(dataset.unsafe))
def test_dataset_iterator(self):
counter = 0
for i in self.dataset.safe:
self.assertEqual(i[0].tolist(), counter)
counter += 1
def test_iter_calls_safe_get_item(self):
dataset = data.TensorDataset(torch.arange(0, 10))
dataset = self.get_safe_dataset_pair(dataset).safe
for sample in dataset:
pass
self.assertTrue(dataset.is_index_built)
# @mock.patch('torch.utils.data.TensorDataset.__getitem__')
# def test_default_map(self, mock_get_item):
# def side_effect(idx):
# return [10, 11, 12, 13, None, 14, None, None, 15, 16][idx]
# mock_get_item.side_effect = side_effect
# dataset = data.TensorDataset(torch.arange(0, 10))
# dataset = self.get_safe_dataset_pair(dataset)
# self.assertEqual(dataset.safe[4], 14)
# self.assertEqual(dataset.safe[5], 15)
# self.assertEqual(dataset.safe[4], 14)
def test_memoization(self):
pass
def test_import(self):
self.assertIsNotNone(SafeDataset)
self.assertIsNotNone(SafeSampler)
if __name__ == "__main__":
unittest.main()
|
448345
|
import numpy as np
from pytools import generate_nonnegative_integer_tuples_summing_to_at_most \
as gnitstam
# prepare plot and eval nodes on triangle
dims = 2
node_n = 40
node_tuples = list(gnitstam(node_n, dims))
plot_nodes = np.array(node_tuples, dtype=np.float64) / node_n
eval_nodes = 2*(plot_nodes - 0.5).T
# get triangle submesh
from modepy.tools import submesh
tri_subtriangles = np.array(submesh(node_tuples))
# evaluate each basis function, build global tri mesh
node_count = 0
all_nodes = []
all_triangles = []
all_values = []
from modepy.modes import simplex_onb
p = 3
stretch_factor = 1.5
for (i, j), basis_func in zip(
gnitstam(p, dims),
simplex_onb(dims, p),
):
all_nodes.append(plot_nodes + [stretch_factor*i, stretch_factor*j])
all_triangles.append(tri_subtriangles + node_count)
all_values.append(basis_func(eval_nodes))
node_count += len(plot_nodes)
all_nodes = np.vstack(all_nodes)
all_triangles = np.vstack(all_triangles)
all_values = np.hstack(all_values)
# plot
import mayavi.mlab as mlab
fig = mlab.figure(bgcolor=(1, 1, 1))
mlab.triangular_mesh(
all_nodes[:, 0],
all_nodes[:, 1],
0.2*all_values,
all_triangles)
x, y = np.mgrid[-1:p*stretch_factor + 1:20j, -1:p*stretch_factor + 1:20j]
mlab.mesh(x, y, 0*x, representation="wireframe", color=(0.4, 0.4, 0.4),
line_width=0.6)
mlab.view(-153, 58, 10, np.array([1.61, 2.49, -0.59]))
mlab.show()
|
448360
|
from .knn_classifier import knn_classifier_log_cpm
from .knn_classifier import knn_classifier_scran
from .logistic_regression import logistic_regression_log_cpm
from .logistic_regression import logistic_regression_scran
from .mlp import mlp_log_cpm
from .mlp import mlp_scran
|
448380
|
import copy
import math
import random
from typing import *
import torch
from torch import Tensor
from torch import nn
from torch.nn import functional as F
from transformers import modeling_bart as bart
from transformers.modeling_utils import BeamHypotheses, calc_banned_ngram_tokens, calc_banned_bad_words_ids, \
top_k_top_p_filtering
def extract_backreferences(ids, num_embeddings, backpointer_idx):
ids_mask = ids >= num_embeddings
backreferences = ids.clone() - num_embeddings
backreferences[~ids_mask] = 0
backreferences += (~ids_mask).long() * torch.arange(
ids.size(1),
dtype=ids.dtype,
device=ids.device)
ids = ids.clone()
ids[ids_mask] = backpointer_idx
return ids, backreferences
class AMRBartEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer
is a :class:`EncoderLayer`.
Args:
config: BartConfig
"""
def __init__(self, config: bart.BartConfig, embed_tokens, backpointer_idx):
super().__init__()
self.backpointer_idx = backpointer_idx
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = config.max_position_embeddings
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = bart.SinusoidalPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx
)
else:
self.embed_positions = bart.LearnedPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx, #config.extra_pos_embeddings,
)
self.layers = nn.ModuleList([bart.EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = bart.LayerNorm(embed_dim) if config.normalize_embedding else nn.Identity()
# mbart has one extra layer_norm
self.layer_norm = bart.LayerNorm(config.d_model) if config.normalize_before else None
def forward(
self, input_ids, embedded=None, attention_mask=None,
):
"""
Args:
input_ids (LongTensor): tokens in the source language of shape
`(batch, src_len)`
attention_mask (torch.LongTensor): indicating which indices are padding tokens.
Returns:
Tuple comprised of:
- **x** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *self.output_hidden_states:* is True.
- **all_attentions** (List[Tensor]): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = bart.invert_mask(attention_mask)
input_ids, backreferences = extract_backreferences(
input_ids, self.embed_tokens.num_embeddings, self.backpointer_idx)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_ids)
x = inputs_embeds + embed_pos
if embedded is not None:
x += embedded
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states, all_attentions = [], []
for encoder_layer in self.layers:
if self.output_hidden_states:
encoder_states.append(x)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
attn = None
else:
x, attn = encoder_layer(x, attention_mask)
if self.output_attentions:
all_attentions.append(attn)
if self.layer_norm:
x = self.layer_norm(x)
if self.output_hidden_states:
encoder_states.append(x)
# T x B x C -> B x T x C
encoder_states = [hidden_state.transpose(0, 1) for hidden_state in encoder_states]
x = x.transpose(0, 1)
return x, encoder_states, all_attentions
class AMRBartDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer
is a :class:`DecoderLayer`.
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: bart.BartConfig, embed_tokens: nn.Embedding, backpointer_idx, amr_mode=True):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.backpointer_idx = backpointer_idx
embed_dim = embed_tokens.embedding_dim
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = bart.SinusoidalPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx
)
else:
self.embed_positions = bart.LearnedPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx, #config.extra_pos_embeddings,
)
self.layers = nn.ModuleList(
[bart.DecoderLayer(config) for _ in range(config.decoder_layers)]
) # type: List[DecoderLayer]
self.layernorm_embedding = bart.LayerNorm(config.d_model) if config.normalize_embedding else nn.Identity()
self.layer_norm = bart.LayerNorm(config.d_model) if config.add_final_layer_norm else None
self.pointer_k = nn.Linear(config.d_model, config.d_model)
# self.pointer_k.weight.data = self.layers[-1].self_attn.k_proj.weight.data.clone()
self.pointer_q = nn.Linear(config.d_model, config.d_model)
# self.pointer_q.weight.data = self.layers[-1].self_attn.q_proj.weight.data.clone()
# self.pointer_k = nn.Sequential(
# nn.Linear(config.d_model, config.decoder_ffn_dim),
# nn.GELU(),
# nn.Linear(config.decoder_ffn_dim, config.d_model),
# )
# self.pointer_q = nn.Sequential(
# nn.Linear(config.d_model, config.decoder_ffn_dim),
# nn.GELU(),
# nn.Linear(config.decoder_ffn_dim, config.d_model),
# )
self.amr_mode = amr_mode
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
decoder_cached_states=None,
use_cache=False,
**unused
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
input_ids (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
decoder_cached_states (dict or None): dictionary used for storing state during generation
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- hidden states
- attentions
"""
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = bart.invert_mask(encoder_padding_mask)
input_ids, backreferences = extract_backreferences(
input_ids,
self.embed_tokens.num_embeddings,
self.backpointer_idx)
# embed positions
embed_pos = self.embed_positions(input_ids, use_cache=use_cache)
positions = embed_pos
# to do this during prediction the old positions should be removed
if use_cache:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:] # happens after we embed them
# assert input_ids.ne(self.padding_idx).any()
x = self.embed_tokens(input_ids) * self.embed_scale
x += positions
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = ()
all_self_attns = ()
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if self.output_hidden_states:
all_hidden_states += (x,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_state = decoder_cached_states[idx] if decoder_cached_states is not None else None
x, layer_self_attn, layer_past = decoder_layer(
x,
encoder_hidden_states,
encoder_attn_mask=encoder_padding_mask,
decoder_padding_mask=decoder_padding_mask,
layer_state=layer_state,
causal_mask=decoder_causal_mask,
)
if use_cache:
next_decoder_cache.append(layer_past.copy())
if self.layer_norm and (idx == len(self.layers) - 1): # last layer of mbart
x = self.layer_norm(x)
if self.output_attentions:
all_self_attns += (layer_self_attn,)
# Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
all_hidden_states = [hidden_state.transpose(0, 1) for hidden_state in all_hidden_states]
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
xq = self.pointer_q(x)
xk = self.pointer_k(x)
if decoder_cached_states is not None:
if 'prev_key' in decoder_cached_states[-1].get('pointer', {}):
last_state = decoder_cached_states[-1]['pointer']
xk = torch.cat([last_state['prev_key'], xk], dim=1)
next_state = {'pointer': {'prev_key': xk}}
if use_cache:
next_decoder_cache.append(next_state)
if self.amr_mode:
scores = torch.einsum('bqh,bkh->bqk', xq, xk)
if decoder_cached_states:
mask = torch.full_like(scores[0], float('-inf'))
mask = mask.triu(diagonal=xk.size(1) - 1)
else:
mask = torch.full_like(scores[0], float('-inf'))
mask = mask.triu()
scores += mask.unsqueeze(0)
else:
scores = torch.full((xq.size(0), xq.size(1), xk.size(1)), float('-inf'), device=xq.device)
if use_cache:
next_cache = ((encoder_hidden_states, encoder_padding_mask), next_decoder_cache)
else:
next_cache = None
return (x, scores), next_cache, all_hidden_states, list(all_self_attns)
class AMRBartModel(bart.PretrainedBartModel):
def __init__(self, config: bart.BartConfig, backpointer_idx=None):
super().__init__(config)
self.output_attentions = True
self.output_hidden_states = config.output_hidden_states
self.padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, self.padding_idx)
if backpointer_idx is not None:
self.backpointer_idx = backpointer_idx
else:
self.backpointer_idx = self.shared.num_embeddings - 1
self.encoder = AMRBartEncoder(config, self.shared, backpointer_idx=self.backpointer_idx)
self.decoder = AMRBartDecoder(config, self.shared, backpointer_idx=self.backpointer_idx)
self.init_weights()
@property
def sentence_mode(self):
return self.decoder.amr_mode
@sentence_mode.setter
def sentence_mode(self, value):
assert isinstance(value, bool)
self.decoder.amr_mode = value
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
encoder_outputs: Optional[Tuple] = None,
decoder_attention_mask=None,
decoder_cached_states=None,
use_cache=False,
):
# make masks if user doesn't supply
if not use_cache:
decoder_input_ids, decoder_padding_mask, causal_mask = bart._prepare_bart_decoder_inputs(
self.config,
input_ids,
decoder_input_ids=decoder_input_ids,
decoder_padding_mask=decoder_attention_mask,
causal_mask_dtype=self.shared.weight.dtype,
)
else:
decoder_padding_mask, causal_mask = None, None
assert decoder_input_ids is not None
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
assert isinstance(encoder_outputs, tuple)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
decoder_input_ids,
encoder_outputs[0],
attention_mask,
decoder_padding_mask,
decoder_causal_mask=causal_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
# Attention and hidden_states will be [] or None if they aren't needed
# decoder_outputs: Tuple = bart._filter_out_falsey_values(decoder_outputs)
assert isinstance(decoder_outputs[0][0], torch.Tensor)
assert isinstance(decoder_outputs[0][1], torch.Tensor)
encoder_outputs: Tuple = bart._filter_out_falsey_values(encoder_outputs)
return decoder_outputs + encoder_outputs
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_output_embeddings(self):
return bart._make_linear_from_emb(self.shared) # make it on the fly
class AMRBartForConditionalGeneration(bart.PretrainedBartModel):
base_model_prefix = "model"
def __init__(self, config: bart.BartConfig, backpointer_idx=None):
super().__init__(config)
base_model = AMRBartModel(config, backpointer_idx)
self.model = base_model
self.pad_index = base_model.shared.padding_idx
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.backpointer_idx = backpointer_idx
self._rev = None
def init_reverse_model(self):
rev = AMRBartForConditionalGeneration(self.model.config, self.backpointer_idx)
rev.model.shared = self.model.shared
rev.model.encoder = self.model.encoder
rev.model.decoder.embed_tokens = self.model.decoder.embed_tokens
rev.model.decoder.embed_positions = self.model.decoder.embed_positions
self.amr_mode = True
rev.amr_mode = False
self._rev = rev
@property
def rev(self):
if self._rev is None:
return self
else:
return self._rev
@property
def amr_mode(self):
return self.model.decoder.amr_mode
@amr_mode.setter
def amr_mode(self, value):
assert isinstance(value, bool)
self.model.decoder.amr_mode = value
def forward(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_cached_states=None,
lm_labels=None,
use_cache=False,
**unused
):
r"""
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring).
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens
with labels
in ``[0, ..., config.vocab_size]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# Mask filling only works for bart-large
from transformers import BartTokenizer, BartForConditionalGeneration
tokenizer = BartTokenizer.from_pretrained('bart-large')
TXT = "My friends are <mask> but they eat too many carbs."
model = BartForConditionalGeneration.from_pretrained('bart-large')
input_ids = tokenizer.batch_encode_plus([TXT], return_tensors='pt')['input_ids']
logits = model(input_ids)[0]
masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
probs = logits[0, masked_index].softmax(dim=0)
values, predictions = probs.topk(5)
tokenizer.decode(predictions).split()
# ['good', 'great', 'all', 'really', 'very']
"""
# outputs = self.model(
# input_ids,
# attention_mask=attention_mask,
# decoder_input_ids=decoder_input_ids,
# encoder_outputs=encoder_outputs,
# decoder_attention_mask=decoder_attention_mask,
# decoder_cached_states=decoder_cached_states,
# use_cache=use_cache,
# )
# lm_logits = F.linear(outputs[0][0], self.model.shared.weight, bias=self.final_logits_bias)
# po_logits = outputs[0][1]
# po_padding = torch.full_like(po_logits[:, :, 0:1], float('-inf'))
# po_padding = po_padding.repeat(1, 1, 1024 - po_logits.size(-1))
# po_logits = torch.cat([po_logits, po_padding], -1)
# uni_logits = torch.cat([lm_logits, po_logits], -1)
#
# outputs = (uni_logits,) + outputs[1:] # Add cache, hidden states and attention if they are here
outputs = self.compute_logits(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
if lm_labels is not None:
uni_logits = outputs[0]
masked_lm_loss = F.nll_loss(
uni_logits.log_softmax(-1).contiguous().view(-1, uni_logits.size(-1)),
lm_labels.contiguous().view(-1),
ignore_index=self.pad_index)
outputs = (masked_lm_loss,) + outputs
return outputs
def compute_logits(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_cached_states=None,
use_cache=False,
):
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
lm_logits = F.linear(outputs[0][0], self.model.shared.weight, bias=self.final_logits_bias)
po_logits = outputs[0][1]
po_padding = torch.full_like(po_logits[:, :, 0:1], float('-inf'))
po_padding = po_padding.repeat(1, 1, 1024 - po_logits.size(-1))
po_logits = torch.cat([po_logits, po_padding], -1)
uni_logits = torch.cat([lm_logits, po_logits], -1)
outputs = (uni_logits,) + outputs[1:] # Add cache, hidden states and attention if they are here
return outputs
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[bool] = None,
num_beams: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
bad_words_ids: Optional[Iterable[int]] = None,
bos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
no_repeat_ngram_size: Optional[int] = None,
num_return_sequences: Optional[int] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_start_token_id: Optional[int] = None,
use_cache: Optional[bool] = None,
**model_specific_kwargs
) -> torch.LongTensor:
r""" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code`_.
.. _`Facebook's XLM beam search code`:
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
Parameters:
input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
The sequence used as a prompt for the generation. If `None` the method initializes
it as an empty `torch.LongTensor` of shape `(1,)`.
max_length: (`optional`) int
The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.
min_length: (`optional`) int
The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
do_sample: (`optional`) bool
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
early_stopping: (`optional`) bool
if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
num_beams: (`optional`) int
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
top_k: (`optional`) int
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
top_p: (`optional`) float
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
repetition_penalty: (`optional`) float
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
pad_token_id: (`optional`) int
Padding token. Default to specicic model pad_token_id or None if it does not exist.
bos_token_id: (`optional`) int
BOS token. Defaults to `bos_token_id` as defined in the models config.
eos_token_id: (`optional`) int
EOS token. Defaults to `eos_token_id` as defined in the models config.
length_penalty: (`optional`) float
Exponential penalty to the length. Default to 1.
no_repeat_ngram_size: (`optional`) int
If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
bad_words_ids: (`optional`) list of lists of int
`bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences: (`optional`) int
The number of independently computed returned sequences for each element in the batch. Default to 1.
attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
Defaults to `None`.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
use_cache: (`optional`) bool
If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.
model_specific_kwargs: (`optional`) dict
Additional model specific kwargs will be forwarded to the `forward` function of the model.
Return:
output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`
sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# current position and vocab size
if hasattr(self.config, "vocab_size"):
vocab_size = self.config.vocab_size
elif (
self.config.is_encoder_decoder
and hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "vocab_size")
):
vocab_size = self.config.decoder.vocab_size
vocab_size += 1024
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert (
decoder_start_token_id is not None
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
return output
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
use_cache,
model_specific_kwargs,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
next_token_logits, batch_size, num_beams, input_ids, repetition_penalty,
)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solution
next_token_logits = self.prepare_logits_for_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -float("inf")
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence or last iteration
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(), beam_token_score.item(),
)
else:
# add next predicted token if it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# Check if were done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len=cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1)
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch and update current length
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
cur_len = cur_len + 1
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
# finalize all open beam hypotheses and end to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() is not eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# shorter batches are filled with pad_token
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)
return decoded
@staticmethod
def _reorder_cache(past: Tuple, beam_idx: Tensor) -> Tuple[Tensor]:
return tuple(layer_past.index_select(1, beam_idx) for layer_past in past)
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
old_num_tokens = self.model.shared.num_embeddings
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self.model.shared = new_embeddings
self._resize_final_logits_bias(new_num_tokens, old_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None:
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def prepare_inputs_for_generation(self, decoder_input_ids, past, attention_mask, use_cache, **kwargs):
assert past is not None, "past has to be defined for encoder_outputs"
# first step, decoder_cached_states are empty
if not past[1]:
encoder_outputs, decoder_cached_states = past, None
else:
encoder_outputs, decoder_cached_states = past
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"decoder_cached_states": decoder_cached_states,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_logits_for_generation(self, logits, cur_len, max_length):
#if cur_len == 1:
# self._force_token_ids_generation(logits, self.config.bos_token_id)
if cur_len == max_length - 1 and self.config.eos_token_id is not None:
self._force_token_ids_generation(logits, self.config.eos_token_id)
return logits
def _force_token_ids_generation(self, scores, token_ids) -> None:
"""force one of token_ids to be generated by setting prob of all other tokens to 0"""
if isinstance(token_ids, int):
token_ids = [token_ids]
all_but_token_ids_mask = torch.tensor(
[x for x in range(self.config.vocab_size) if x not in token_ids],
dtype=torch.long,
device=next(self.parameters()).device,
)
assert len(scores.shape) == 2, "scores should be of rank 2 with shape: [batch_size, vocab_size]"
scores[:, all_but_token_ids_mask] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
((enc_out, enc_mask), decoder_cached_states) = past
reordered_past = []
for layer_past in decoder_cached_states:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: bart._reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
new_enc_out = enc_out if enc_out is None else enc_out.index_select(0, beam_idx)
new_enc_mask = enc_mask if enc_mask is None else enc_mask.index_select(0, beam_idx)
past = ((new_enc_out, new_enc_mask), reordered_past)
return past
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return bart._make_linear_from_emb(self.model.shared) # make it on the fly
|
448396
|
import comet_ml, json
import numpy as np
import torch
from torch import optim
from misc import clear_gradients
from lib import create_agent
from util.env_util import create_env
from util.plot_util import load_checkpoint
from lib.distributions import kl_divergence
from local_vars import PROJECT_NAME, WORKSPACE, LOADING_API_KEY, LOGGING_API_KEY
alim = [-1, 1]
aint = 0.01
BATCH_SIZE = 256
N_ACTION_SAMPLES = 100
# Reacher Experiments:
# direct: 48edb0b9aca847c09c6893793c982884
# iterative: 58ec5bc5273044e59ae30a969c3d7de4
def estimate_opt_landscape(exp_key, states=None, ckpt_timestep=None, device_id=None):
"""
Estimates the optimization landscape for a checkpointed agent. Also gets the
policy estimates during inference optimization.
Args:
exp_key (str): the comet experiment ID
state (list of torch.Tensor, optional): the state(s) used for estimation
ckpt_timestep (int, optional): the checkpoint for estimation
device_id (int, optional): the GPU ID
"""
# load the experiment
comet_api = comet_ml.API(api_key=LOADING_API_KEY)
experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=exp_key)
# create the environment (just to create agent)
param_summary = experiment.get_parameters_summary()
env_name = [a for a in param_summary if a['name'] == 'env'][0]['valueCurrent']
env = create_env(env_name)
# create the agent
asset_list = experiment.get_asset_list()
agent_config_asset_list = [a for a in asset_list if 'agent_args' in a['fileName']]
agent_args = None
if len(agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
agent_args = experiment.get_asset(agent_config_asset_list[0]['assetId'])
agent_args = json.loads(agent_args)
agent_args = agent_args if 'opt_type' in agent_args['inference_optimizer_args'] else None
agent = create_agent(env, agent_args=agent_args, device_id=device_id)[0]
# load the checkpoint
load_checkpoint(agent, exp_key, ckpt_timestep)
if states is None:
# load a random state from the most recently collected episode
state_asset = None
if ckpt_timestep is not None:
# load the corresponding episode if it is present
state_asset_list = [a for a in asset_list if 'episode_step_' + str(ckpt_timestep) + '_state' in a['fileName']]
if len(state_asset_list) > 0:
state_asset = state_asset_list[0]
if state_asset is None:
# get most recently collected episode
asset_times = [asset['createdAt'] for asset in asset_list if 'state' in asset['fileName']]
state_asset = [a for a in asset_list if a['createdAt'] == max(asset_times)][0]
episode_states = experiment.get_asset(state_asset['assetId'])
episode_states = json.loads(episode_states)
# state_timestep = [np.random.randint(len(episode_states)) for _ in range(100)]
state_timesteps = range(100)
states = [torch.from_numpy(np.array(episode_states[state_timestep])).view(1, -1).type(torch.FloatTensor) for state_timestep in state_timesteps]
# n_actions = int(((alim[1] - alim[0]) / aint) ** n_action_dims)
n_action_dims = env.action_space.shape[0]
a = np.arange(alim[0], alim[1], aint)
a_args = n_action_dims * [a]
a_coords = np.meshgrid(*a_args)
stacked_action_means = np.stack([a_coord.reshape(-1) for a_coord in a_coords]).T
n_batches = len(stacked_action_means) // BATCH_SIZE + 1
n_samples = agent.n_action_samples if N_ACTION_SAMPLES is None else N_ACTION_SAMPLES
q_estimates_list = []
log_ratios_list = []
approx_posts_list = []
for state_ind, state in enumerate(states):
if state_ind % 5 == 0:
print('Processing state ' + str(state_ind+1) + ' of ' + str(len(states)) + '.')
q_estimates = np.zeros(len(stacked_action_means))
log_ratios = np.zeros(len(stacked_action_means))
# perform inference on the state
batch_expanded_state = state.repeat(BATCH_SIZE, 1)
sample_expanded_state = batch_expanded_state.repeat(n_samples, 1)
agent.reset(batch_size=BATCH_SIZE); agent.eval()
agent.act(batch_expanded_state)
approx_posts = agent.inference_optimizer.dist_params
# loop over actions, get value estimates
for batch_ind in range(n_batches):
if batch_ind % 25 == 0:
print(' Processing batch ' + str(batch_ind+1) + ' of ' + str(n_batches) + '.')
# get a batch of actions
start_ind = batch_ind * BATCH_SIZE
end_ind = min((batch_ind + 1) * BATCH_SIZE, len(stacked_action_means))
action_mean_batch = stacked_action_means[start_ind:end_ind]
# evaluate the value estimate of the action in the state
if action_mean_batch.shape[0] != BATCH_SIZE:
# fill out the rest of the batch with zeros
temp_action_mean_batch = np.zeros((BATCH_SIZE, n_action_dims))
temp_action_mean_batch[:action_mean_batch.shape[0]] = action_mean_batch
action_mean_batch = temp_action_mean_batch
action_mean_batch = torch.from_numpy(action_mean_batch).type(torch.FloatTensor)
# reset approx post, sample actions
agent.reset(batch_size=BATCH_SIZE); agent.eval()
agent.approx_post.reset(batch_size=BATCH_SIZE, dist_params={'loc': action_mean_batch.clone().requires_grad_()})
# agent.inference_optimizer(agent, batch_expanded_state)
action_batch = agent.approx_post.sample(n_samples)
q_values = agent.q_value_estimator(agent, sample_expanded_state, action_batch)
q_values = q_values.view(n_samples, -1, 1).mean(dim=0)
kls = kl_divergence(agent.approx_post, agent.prior, n_samples=n_samples, sample=action_batch).sum(dim=1, keepdim=True)
q_estimates[start_ind:end_ind] = q_values[:end_ind-start_ind].view(-1).detach().cpu().numpy()
log_ratios[start_ind:end_ind] = kls[:end_ind-start_ind].view(-1).detach().cpu().numpy()
q_estimates = q_estimates.reshape(n_action_dims * [int((alim[1] - alim[0]) / aint)])
log_ratios = log_ratios.reshape(n_action_dims * [int((alim[1] - alim[0]) / aint)])
q_estimates_list.append(q_estimates)
log_ratios_list.append(log_ratios)
approx_posts_list.append(approx_posts)
return {'q_estimates': q_estimates_list,
'log_ratios': log_ratios_list,
'alpha_pi': agent.alphas['pi'].detach().cpu().numpy(),
'approx_posts': approx_posts_list}
def vis_inference(exp_key, action_indices, state_ind=0):
"""
Plots a 2D analysis of direct inference, comparing with gradient ascent.
Args:
exp_key (str): the experiment key
state_ind (int): state index to plot
action_indices (list): two action indices to vary
"""
# load the experiment
comet_api = comet_ml.API(api_key=LOADING_API_KEY)
experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=exp_key)
# create the environment
param_summary = experiment.get_parameters_summary()
env_name = [a for a in param_summary if a['name'] == 'env'][0]['valueCurrent']
env = create_env(env_name)
# create the agent
asset_list = experiment.get_asset_list()
agent_config_asset_list = [a for a in asset_list if 'agent_args' in a['fileName']]
agent_args = None
if len(agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
agent_args = experiment.get_asset(agent_config_asset_list[0]['assetId'])
agent_args = json.loads(agent_args)
agent_args = agent_args if 'opt_type' in agent_args['inference_optimizer_args'] else None
agent = create_agent(env, agent_args=agent_args)[0]
# load the checkpoint
load_checkpoint(agent, exp_key)
# load the state from the most recently collected episode
asset_times = [asset['createdAt'] for asset in asset_list if 'state' in asset['fileName']]
state_asset = [a for a in asset_list if a['createdAt'] == max(asset_times)][0]
episode_states = json.loads(experiment.get_asset(state_asset['assetId']))
state = torch.from_numpy(np.array(episode_states[state_ind])).view(1, -1).type(torch.FloatTensor)
print('STATE: ')
print(state)
# perform inference, get the direct approx post
agent.reset(); agent.eval()
agent.act(state)
loc = agent.approx_post.dist.loc.detach().clone(); scale = agent.approx_post.dist.scale.detach().clone()
direct_approx_post = {'loc': loc.clone().cpu().numpy(),
'scale': scale.clone().cpu().numpy()}
print('DIRECT APPROX. POST.: ')
print(direct_approx_post)
print('Performing gradient-based optimization...')
LR = 0.1
agent.n_action_samples = 100
# update the approx post using gradient descent on the 2 dims of the mean
sgd_objectives = [np.inf]
sgd_locs = [agent.approx_post.dist.loc.detach().cpu().numpy()]
# dist_params = {k: v.data.requires_grad_() for k, v in agent.approx_post.get_dist_params().items()}
sgd_loc = agent.approx_post.dist.loc.clone().detach().requires_grad_()
sgd_scale = agent.approx_post.dist.scale.clone().detach()
dist_params = {'loc': sgd_loc, 'scale': sgd_scale}
agent.approx_post.reset(dist_params=dist_params)
# dist_param_list = [param for _, param in dist_params.items()]
# just perform SGD on the mean
dist_param_list = [sgd_loc]
# optimizer = optim.SGD(dist_param_list, lr=LR, momentum=0.9)
optimizer = optim.Adam(dist_param_list, lr=LR)
optimizer.zero_grad()
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
sgd_objectives.append(-obj.detach())
for _ in range(250):
obj.sum().backward(retain_graph=True)
for a_dim in range(agent.approx_post.dist.loc.shape[1]):
if a_dim not in action_indices:
agent.approx_post.dist.loc.grad[:, a_dim] = 0.
optimizer.step()
optimizer.zero_grad()
agent.approx_post._sample = None
# reset the non-optimized dimensions
# for a_dim in range(agent.approx_post.dist.loc.shape[1]):
# if a_dim not in action_indices:
# agent.approx_post.dist.loc[:, a_dim] = loc[:, a_dim]
# agent.approx_post.dist.scale = scale
sgd_locs.append(agent.approx_post.dist.loc.clone().detach().cpu().numpy())
# reset the optimizer, pretty hacky...
# sgd_loc = agent.approx_post.dist.loc.clone().detach().requires_grad_()
# sgd_scale = agent.approx_post.dist.scale.clone().detach()
# dist_params = {'loc': sgd_loc, 'scale': sgd_scale}
# agent.approx_post.reset(dist_params=dist_params)
# # dist_param_list = [param for _, param in dist_params.items()]
# # just perform SGD on the mean
# dist_param_list = [sgd_loc]
# optimizer = optim.Adam(dist_param_list, lr=LR)
# optimizer.zero_grad()
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
sgd_objectives.append(-obj.detach())
clear_gradients(agent.generative_parameters())
agent.n_action_samples = 10
print('Done.')
print('Estimating objectives...')
agent.n_action_samples = 10
# get all action means
a = np.arange(alim[0], alim[1], aint)
a_args = 2 * [a]
a_coords = np.meshgrid(*a_args)
stacked_action_means = np.stack([a_coord.reshape(-1) for a_coord in a_coords]).T
n_batches = len(stacked_action_means) // BATCH_SIZE + 1
n_samples = agent.n_action_samples
batch_expanded_state = state.repeat(BATCH_SIZE, 1)
batch_expanded_loc = loc.repeat(BATCH_SIZE, 1)
batch_expanded_scale = scale.repeat(BATCH_SIZE, 1)
objectives = np.zeros((len(stacked_action_means), 1))
# estimate the objective at all action means
for batch_ind in range(n_batches):
if batch_ind % 25 == 0:
print(' Processing batch ' + str(batch_ind+1) + ' of ' + str(n_batches) + '.')
# get a batch of actions
start_ind = batch_ind * BATCH_SIZE
end_ind = min((batch_ind + 1) * BATCH_SIZE, len(stacked_action_means))
action_mean_batch = stacked_action_means[start_ind:end_ind]
if action_mean_batch.shape[0] != BATCH_SIZE:
# fill out the rest of the batch with zeros if at the end
temp_action_mean_batch = np.zeros((BATCH_SIZE, 2))
temp_action_mean_batch[:action_mean_batch.shape[0]] = action_mean_batch
action_mean_batch = temp_action_mean_batch
action_mean_batch = torch.from_numpy(np.arctanh(action_mean_batch + 1e-6)).type(torch.FloatTensor)
# reset approx post, sample actions
agent.reset(batch_size=BATCH_SIZE); agent.eval()
loc_batch = batch_expanded_loc
loc_batch[:, action_indices[0]] = action_mean_batch[:, 0]
loc_batch[:, action_indices[1]] = action_mean_batch[:, 1]
scale_batch = batch_expanded_scale
agent.approx_post.reset(batch_size=BATCH_SIZE, dist_params={'loc': loc_batch.clone().requires_grad_(),
'scale': scale_batch.clone()})
action_batch = agent.approx_post.sample(n_samples)
# evaluate the value estimate of the action in the state
objective = agent.estimate_objective(batch_expanded_state, action_batch).view(n_samples, -1, 1).mean(dim=0).detach().cpu().numpy()
objectives[start_ind:end_ind] = objective[:end_ind-start_ind]
objectives = objectives.reshape(2 * [int((alim[1] - alim[0]) / aint)])
agent.n_action_samples = 10
print('Done.')
return {'objectives': objectives,
'stacked_action_means': stacked_action_means,
'direct_approx_post': direct_approx_post,
'action_indices': action_indices,
'sgd_approx_post_means': sgd_locs,
'sgd_objectives': sgd_objectives}
# from util.analysis import vis_it_inference
# import numpy as np
# import pickle
#
# state = [0.7380273938179016,
# 0.9774200916290283,
# 0.014780346304178238,
# -0.053389377892017365,
# -0.20391426980495453,
# 0.09159323573112488,
# 1.2636744976043701,
# 0.49291884899139404,
# -0.8514286279678345,
# 0.027635907754302025,
# -0.523140549659729,
# -0.26849716901779175,
# 0.7275161147117615,
# 1.7905492782592773,
# 1.1246192455291748,
# 1.2539386749267578,
# -0.29752910137176514,
# 0.5522995591163635,
# -1.4331533908843994,
# 1.4389076232910156,
# 1.911720633506775,
# -1.2782995700836182,
# -3.6260697841644287,
# -2.3452537059783936,
# -0.010221259668469429,
# 3.8292510509490967,
# -1.393014907836914]
#
# np_state = np.array(state)
def vis_it_inference(exp_key, action_indices, state_ind=0, state=None):
"""
Plots a 2D analysis of iterative inference.
Args:
exp_key (str): the experiment key
state_ind (int): state index to plot
action_indices (list): two action indices to vary
state (np.array): the state to evaluate
"""
# load the experiment
comet_api = comet_ml.API(api_key=LOADING_API_KEY)
experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=exp_key)
# create the environment
param_summary = experiment.get_parameters_summary()
env_name = [a for a in param_summary if a['name'] == 'env'][0]['valueCurrent']
env = create_env(env_name)
# create the agent
asset_list = experiment.get_asset_list()
agent_config_asset_list = [a for a in asset_list if 'agent_args' in a['fileName']]
agent_args = None
if len(agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
agent_args = experiment.get_asset(agent_config_asset_list[0]['assetId'])
agent_args = json.loads(agent_args)
agent_args = agent_args if 'opt_type' in agent_args['inference_optimizer_args'] else None
agent = create_agent(env, agent_args=agent_args)[0]
# load the checkpoint
load_checkpoint(agent, exp_key)
if state is None:
# load the state from the most recently collected episode
asset_times = [asset['createdAt'] for asset in asset_list if 'state' in asset['fileName']]
state_asset = [a for a in asset_list if a['createdAt'] == max(asset_times)][0]
episode_states = json.loads(experiment.get_asset(state_asset['assetId']))
state = torch.from_numpy(np.array(episode_states[state_ind])).view(1, -1).type(torch.FloatTensor)
else:
state = torch.from_numpy(state).view(1,-1).type(torch.FloatTensor)
print('STATE: ')
print(state)
# perform iterative inference, get the approx post
agent.reset(); agent.eval()
agent.act(state)
loc = agent.approx_post.dist.loc.detach().clone(); scale = agent.approx_post.dist.scale.detach().clone()
it_approx_post = {'loc': loc.clone().cpu().numpy(),
'scale': scale.clone().cpu().numpy()}
print('ITERATIVE APPROX. POST.: ')
print(it_approx_post)
print('Performing iterative inference...')
# only optimize two of the means
agent.n_action_samples = 10
total_it_locs = []
total_it_objs = []
for inf_seed in range(10):
agent.reset(); agent.eval()
# random Gaussian init for the mean
agent.approx_post.reset(dist_params={'loc': 0.3*agent.approx_post.dist.loc.clone().detach().normal_(),
'scale': agent.approx_post.dist.scale.clone().detach()})
iterative_locs = []
iterative_objectives = []
# for inf_it in range(agent.inference_optimizer.n_inf_iters):
if False:
# gradient-based
LR = 0.05
sgd_loc = agent.approx_post.dist.loc.clone().detach().requires_grad_()
sgd_scale = agent.approx_post.dist.scale.clone().detach()
dist_params = {'loc': sgd_loc, 'scale': sgd_scale}
agent.approx_post.reset(dist_params=dist_params)
# only perform SGD on the mean
dist_param_list = [sgd_loc]
optimizer = optim.Adam(dist_param_list, lr=LR)
optimizer.zero_grad()
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
iterative_objectives.append(-obj.detach())
iterative_locs.append(agent.approx_post.dist.loc.clone().detach().cpu().numpy())
for _ in range(50):
obj.sum().backward(retain_graph=True)
for a_dim in range(agent.approx_post.dist.loc.shape[1]):
if a_dim not in action_indices:
agent.approx_post.dist.loc.grad[:, a_dim] = 0.
optimizer.step()
optimizer.zero_grad()
agent.approx_post._sample = None
iterative_locs.append(agent.approx_post.dist.loc.clone().detach().cpu().numpy())
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
iterative_objectives.append(-obj.detach())
clear_gradients(agent.generative_parameters())
else:
# amortized
for inf_it in range(50):
# reset the approx post dist
it_loc = agent.approx_post.dist.loc.clone().detach()
for a_dim in range(it_loc.shape[1]):
if a_dim not in action_indices:
it_loc[:, a_dim] = loc[:, a_dim]
it_scale = scale
dist_params = {'loc': it_loc.requires_grad_(), 'scale': it_scale.requires_grad_()}
agent.approx_post.reset(dist_params=dist_params)
iterative_locs.append(agent.approx_post.dist.loc.detach().cpu().numpy())
# estimate the objective, backprop
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
iterative_objectives.append(-obj.detach())
obj.sum().backward(retain_graph=True)
# update
params, grads = agent.approx_post.params_and_grads()
inf_input = agent.inference_optimizer.inference_model(params=params, grads=grads, state=state)
agent.approx_post.step(inf_input)
agent.approx_post.retain_grads()
# reset the approx post dist
it_loc = agent.approx_post.dist.loc.clone().detach()
for a_dim in range(it_loc.shape[1]):
if a_dim not in action_indices:
it_loc[:, a_dim] = loc[:, a_dim]
it_scale = scale
dist_params = {'loc': it_loc, 'scale': it_scale}
agent.approx_post.reset(dist_params=dist_params)
iterative_locs.append(agent.approx_post.dist.loc.detach().cpu().numpy())
total_it_locs.append(np.array(iterative_locs))
total_it_objs.append(np.array(iterative_objectives))
total_it_locs = np.stack(total_it_locs)
total_it_objs = np.stack(total_it_objs)
print('Done.')
print('Estimating objectives...')
agent.n_action_samples = 10
# get all action means
a = np.arange(alim[0], alim[1], aint)
a_args = 2 * [a]
a_coords = np.meshgrid(*a_args)
stacked_action_means = np.stack([a_coord.reshape(-1) for a_coord in a_coords]).T
n_batches = len(stacked_action_means) // BATCH_SIZE + 1
n_samples = agent.n_action_samples
batch_expanded_state = state.repeat(BATCH_SIZE, 1)
batch_expanded_loc = loc.repeat(BATCH_SIZE, 1)
batch_expanded_scale = scale.repeat(BATCH_SIZE, 1)
objectives = np.zeros((len(stacked_action_means), 1))
# estimate the objective at all action means
for batch_ind in range(n_batches):
if batch_ind % 25 == 0:
print(' Processing batch ' + str(batch_ind+1) + ' of ' + str(n_batches) + '.')
# get a batch of actions
start_ind = batch_ind * BATCH_SIZE
end_ind = min((batch_ind + 1) * BATCH_SIZE, len(stacked_action_means))
action_mean_batch = stacked_action_means[start_ind:end_ind]
if action_mean_batch.shape[0] != BATCH_SIZE:
# fill out the rest of the batch with zeros if at the end
temp_action_mean_batch = np.zeros((BATCH_SIZE, 2))
temp_action_mean_batch[:action_mean_batch.shape[0]] = action_mean_batch
action_mean_batch = temp_action_mean_batch
action_mean_batch = torch.from_numpy(np.arctanh(action_mean_batch + 1e-6)).type(torch.FloatTensor)
# reset approx post, sample actions
agent.reset(batch_size=BATCH_SIZE); agent.eval()
loc_batch = batch_expanded_loc
loc_batch[:, action_indices[0]] = action_mean_batch[:, 0]
loc_batch[:, action_indices[1]] = action_mean_batch[:, 1]
scale_batch = batch_expanded_scale
agent.approx_post.reset(batch_size=BATCH_SIZE, dist_params={'loc': loc_batch.clone().requires_grad_(),
'scale': scale_batch.clone()})
action_batch = agent.approx_post.sample(n_samples)
# evaluate the value estimate of the action in the state
objective = agent.estimate_objective(batch_expanded_state, action_batch).view(n_samples, -1, 1).mean(dim=0).detach().cpu().numpy()
objectives[start_ind:end_ind] = objective[:end_ind-start_ind]
objectives = objectives.reshape(2 * [int((alim[1] - alim[0]) / aint)])
agent.n_action_samples = 10
print('Done.')
return {'objectives': objectives,
'stacked_action_means': stacked_action_means,
'action_indices': action_indices,
'iterative_approx_post_means': total_it_locs,
'iterative_objectives': total_it_objs,
'final_it_approx_post': it_approx_post,}
def compare_inference(direct_exp_key, iterative_exp_key, action_indices, state_ind=0):
"""
Plots a 2D analysis of direct inference, comparing with an iterative inference agent.
Args:
direct_exp_key (str): the experiment key for the direct agent
iterative_exp_key (str): the experiment key for the iterative agent
action_indices (list): two action indices to vary
state_ind (int): state index to plot
"""
# load the experiment
comet_api = comet_ml.API(api_key=LOADING_API_KEY)
direct_experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=direct_exp_key)
iterative_experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=iterative_exp_key)
# create the environment
dir_param_summary = direct_experiment.get_parameters_summary()
dir_env_name = [a for a in dir_param_summary if a['name'] == 'env'][0]['valueCurrent']
it_param_summary = iterative_experiment.get_parameters_summary()
it_env_name = [a for a in it_param_summary if a['name'] == 'env'][0]['valueCurrent']
assert it_env_name == dir_env_name
env = create_env(dir_env_name)
# create the agents
# direct
dir_asset_list = direct_experiment.get_asset_list()
dir_agent_config_asset_list = [a for a in dir_asset_list if 'agent_args' in a['fileName']]
dir_agent_args = None
if len(dir_agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
dir_agent_args = direct_experiment.get_asset(dir_agent_config_asset_list[0]['assetId'])
dir_agent_args = json.loads(dir_agent_args)
dir_agent_args = dir_agent_args if 'opt_type' in dir_agent_args['inference_optimizer_args'] else None
dir_agent = create_agent(env, agent_args=dir_agent_args)[0]
# iterative
it_asset_list = iterative_experiment.get_asset_list()
it_agent_config_asset_list = [a for a in it_asset_list if 'agent_args' in a['fileName']]
it_agent_args = None
if len(it_agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
it_agent_args = iterative_experiment.get_asset(it_agent_config_asset_list[0]['assetId'])
it_agent_args = json.loads(it_agent_args)
it_agent_args = it_agent_args if 'opt_type' in it_agent_args['inference_optimizer_args'] else None
it_agent = create_agent(env, agent_args=it_agent_args)[0]
# load the checkpoints
load_checkpoint(dir_agent, direct_exp_key)
load_checkpoint(it_agent, iterative_exp_key)
# load the state from the most recently collected episode
asset_times = [asset['createdAt'] for asset in dir_asset_list if 'state' in asset['fileName']]
state_asset = [a for a in dir_asset_list if a['createdAt'] == max(asset_times)][0]
episode_states = json.loads(direct_experiment.get_asset(state_asset['assetId']))
state = torch.from_numpy(np.array(episode_states[state_ind])).view(1, -1).type(torch.FloatTensor)
print('STATE: ')
print(state)
# perform inference, get the direct approx post
print('Performing direct inference...')
dir_agent.reset(); dir_agent.eval()
dir_agent.act(state)
dir_loc = dir_agent.approx_post.dist.loc.detach().clone(); dir_scale = dir_agent.approx_post.dist.scale.detach().clone()
direct_approx_post = {'loc': dir_loc.clone().cpu().numpy(),
'scale': dir_scale.clone().cpu().numpy()}
print('DIRECT APPROX. POST.: ')
print(direct_approx_post)
print('Done.')
print('Performing iterative inference...')
it_agent.n_action_samples = 100
it_agent.q_value_estimator = dir_agent.q_value_estimator
it_agent.reset(); it_agent.eval()
iterative_locs = []
iterative_objectives = []
# for inf_it in range(it_agent.inference_optimizer.n_inf_iters):
for inf_it in range(25):
# reset the approx post dist
it_loc = it_agent.approx_post.dist.loc.clone().detach()
for a_dim in range(it_loc.shape[1]):
if a_dim not in action_indices:
it_loc[:, a_dim] = dir_loc[:, a_dim]
it_scale = dir_scale
dist_params = {'loc': it_loc.requires_grad_(), 'scale': it_scale.requires_grad_()}
it_agent.approx_post.reset(dist_params=dist_params)
iterative_locs.append(it_agent.approx_post.dist.loc.detach().cpu().numpy())
# estimate the objective, backprop
actions = it_agent.approx_post.sample(it_agent.n_action_samples)
obj = it_agent.estimate_objective(state, actions)
obj = - obj.view(it_agent.n_action_samples, -1, 1).mean(dim=0)
iterative_objectives.append(-obj.detach())
obj.sum().backward(retain_graph=True)
# update
params, grads = it_agent.approx_post.params_and_grads()
inf_input = it_agent.inference_optimizer.inference_model(params=params, grads=grads, state=state)
it_agent.approx_post.step(inf_input)
it_agent.approx_post.retain_grads()
# reset the approx post dist
it_loc = it_agent.approx_post.dist.loc.clone().detach()
for a_dim in range(it_loc.shape[1]):
if a_dim not in action_indices:
it_loc[:, a_dim] = dir_loc[:, a_dim]
it_scale = dir_scale
dist_params = {'loc': it_loc, 'scale': it_scale}
it_agent.approx_post.reset(dist_params=dist_params)
iterative_locs.append(it_agent.approx_post.dist.loc.detach().cpu().numpy())
print('Done.')
print('Estimating objectives...')
dir_agent.n_action_samples = 10
# get all action means
a = np.arange(alim[0], alim[1], aint)
a_args = 2 * [a]
a_coords = np.meshgrid(*a_args)
stacked_action_means = np.stack([a_coord.reshape(-1) for a_coord in a_coords]).T
n_batches = len(stacked_action_means) // BATCH_SIZE + 1
n_samples = dir_agent.n_action_samples
batch_expanded_state = state.repeat(BATCH_SIZE, 1)
batch_expanded_loc = dir_loc.repeat(BATCH_SIZE, 1)
batch_expanded_scale = dir_scale.repeat(BATCH_SIZE, 1)
objectives = np.zeros((len(stacked_action_means), 1))
# estimate the objective at all action means
for batch_ind in range(n_batches):
if batch_ind % 25 == 0:
print(' Processing batch ' + str(batch_ind+1) + ' of ' + str(n_batches) + '.')
# get a batch of actions
start_ind = batch_ind * BATCH_SIZE
end_ind = min((batch_ind + 1) * BATCH_SIZE, len(stacked_action_means))
action_mean_batch = stacked_action_means[start_ind:end_ind]
if action_mean_batch.shape[0] != BATCH_SIZE:
# fill out the rest of the batch with zeros if at the end
temp_action_mean_batch = np.zeros((BATCH_SIZE, 2))
temp_action_mean_batch[:action_mean_batch.shape[0]] = action_mean_batch
action_mean_batch = temp_action_mean_batch
action_mean_batch = torch.from_numpy(np.arctanh(action_mean_batch + 1e-6)).type(torch.FloatTensor)
# reset approx post, sample actions
dir_agent.reset(batch_size=BATCH_SIZE); dir_agent.eval()
loc_batch = batch_expanded_loc
loc_batch[:, action_indices[0]] = action_mean_batch[:, 0]
loc_batch[:, action_indices[1]] = action_mean_batch[:, 1]
scale_batch = batch_expanded_scale
dir_agent.approx_post.reset(batch_size=BATCH_SIZE, dist_params={'loc': loc_batch.clone().requires_grad_(),
'scale': scale_batch.clone()})
action_batch = dir_agent.approx_post.sample(n_samples)
# evaluate the value estimate of the action in the state
objective = dir_agent.estimate_objective(batch_expanded_state, action_batch).view(n_samples, -1, 1).mean(dim=0).detach().cpu().numpy()
objectives[start_ind:end_ind] = objective[:end_ind-start_ind]
objectives = objectives.reshape(2 * [int((alim[1] - alim[0]) / aint)])
dir_agent.n_action_samples = 10
print('Done.')
return {'objectives': objectives,
'stacked_action_means': stacked_action_means,
'direct_approx_post': direct_approx_post,
'action_indices': action_indices,
'iterative_approx_post_means': iterative_locs,
'iterative_objectives': iterative_objectives}
def vis_mb_opt(exp_key, state_ind, rollout_horizon=None):
"""
Evaluates iterative amortized policy optimization on model-based value
estimates.
Args:
exp_key (str): the experiment key for the (model-based) agent
state_ind (int): state index to evaluate
rollout_horizon (int): the MB rollout horizon (None for agent default)
Returns dictionary containing:
states (np.array) [n iters, horizon, n action samples, state dim]
rewards (np.array)
q_values (np.array)
actions (np.array)
objectives (np.array)
"""
# load the experiment
comet_api = comet_ml.API(api_key=LOADING_API_KEY)
experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=exp_key)
# create the environment
param_summary = experiment.get_parameters_summary()
env_name = [a for a in param_summary if a['name'] == 'env'][0]['valueCurrent']
env = create_env(env_name)
# create the agent
asset_list = experiment.get_asset_list()
agent_config_asset_list = [a for a in asset_list if 'agent_args' in a['fileName']]
agent_args = None
if len(agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
agent_args = experiment.get_asset(agent_config_asset_list[0]['assetId'])
agent_args = json.loads(agent_args)
agent_args = agent_args if 'opt_type' in agent_args['inference_optimizer_args'] else None
agent = create_agent(env, agent_args=agent_args)[0]
# set the rollout horizon to a different length
if rollout_horizon is not None:
agent.q_value_estimator.horizon = rollout_horizon
# load the checkpoint
load_checkpoint(agent, exp_key)
# load the state from the most recently collected episode
asset_times = [asset['createdAt'] for asset in asset_list if 'state' in asset['fileName'] and '.svg' not in asset['fileName']]
state_asset = [a for a in asset_list if a['createdAt'] == max(asset_times)][0]
episode_states = json.loads(experiment.get_asset(state_asset['assetId']))
state = torch.from_numpy(np.array(episode_states[state_ind])).view(1, -1).type(torch.FloatTensor)
print('STATE: ')
print(state)
# perform iterative inference, get the approx post
agent.reset(); agent.eval()
agent.act(state)
rollout_states = agent.q_value_estimator.rollout_states
rollout_rewards = agent.q_value_estimator.rollout_rewards
rollout_q_values = agent.q_value_estimator.rollout_q_values
rollout_actions = agent.q_value_estimator.rollout_actions
states = torch.stack([torch.stack(rs, dim=0).detach().cpu() for rs in rollout_states], dim=0).numpy()
rewards = torch.stack([torch.stack(rr, dim=0).detach().cpu() for rr in rollout_rewards], dim=0).numpy()
q_values = torch.stack([torch.stack(rq, dim=0).detach().cpu() for rq in rollout_q_values], dim=0).numpy()
actions = torch.stack([torch.stack(ra, dim=0).detach().cpu() for ra in rollout_actions], dim=0).numpy()
objectives = agent.inference_optimizer.estimated_objectives
final_actions = agent.approx_post.sample(agent.n_action_samples)
final_obj = agent.estimate_objective(state, final_actions)
objectives.append( - final_obj.view(agent.n_action_samples, -1, 1).mean(dim=0))
objs = np.array([-obj.item() for obj in objectives])
return {'states': states,
'rewards': rewards,
'q_values': q_values,
'action': actions,
'objectives': objs}
|
448458
|
import json
import re
import requests
from azure.cli.core.commands import CliCommandType
from .cli_utils import az_cli
def load_command_table(self, _):
custom = CliCommandType(operations_tmpl="{}#{{}}".format(__name__))
with self.command_group("vm auto-shutdown", custom_command_type=custom) as g:
g.custom_command("enable", "enable_vm_autoshutdown")
g.custom_command("disable", "disable_vm_autoshutdown")
g.custom_command("show", "show_vm_autoshutdown")
def load_arguments(self, _):
with self.argument_context("vm auto-shutdown") as c:
c.argument("vm_name", options_list=["--name", "-n"])
with self.argument_context("vm auto-shutdown enable") as c:
c.argument("time", options_list=["--time", "-t"])
c.argument("timezone_id", options_list=["--timezone-id", "-tz"])
def enable_vm_autoshutdown(vm_name, resource_group_name, time, timezone_id="UTC"):
_, subscription_id = get_access_token()
properties = {}
properties["status"] = "Enabled"
properties["dailyRecurrence"] = {}
properties["dailyRecurrence"]["time"] = time
properties[
"targetResourceId"
] = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}".format(
subscription_id, resource_group_name, vm_name
)
properties["taskType"] = "ComputeVmShutdownTask"
properties["timeZoneId"] = timezone_id
schedule = az_cli(
[
"resource",
"create",
"-g",
resource_group_name,
"-n",
"shutdown-computevm-{}".format(vm_name),
"--resource-type",
"Microsoft.DevTestLab/schedules",
"-p",
json.dumps(properties),
]
)
return schedule
def disable_vm_autoshutdown(vm_name, resource_group_name):
_, subscription_id = get_access_token()
resource_id = "/subscriptions/{}/resourcegroups/{}/providers/microsoft.devtestlab/schedules/shutdown-computevm-{}".format(
subscription_id, resource_group_name, vm_name
)
return az_cli(["resource", "delete", "--ids", resource_id])
def show_vm_autoshutdown(_, vm_name, resource_group_name):
access_token, subscription_id = get_access_token()
schedules = get_resources(
"Microsoft.DevTestLab",
"schedules",
access_token=access_token,
subscription_id=subscription_id,
api_version="2016-05-15",
)
search_string = "^/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}$".format(
subscription_id, resource_group_name, vm_name
)
regexp = re.compile(search_string)
active_schedules = [
x
for x in schedules
if x["properties"]["taskType"] == "ComputeVmShutdownTask"
and regexp.search(x["properties"]["targetResourceId"])
]
return active_schedules[0] if active_schedules else None
def get_access_token():
from azure.cli.core._profile import Profile
profile = Profile()
creds, subscription, _ = profile.get_raw_token()
return (creds[1], subscription)
def get_resources(
namespace, resource_type, access_token=None, subscription_id=None, api_version=None
):
if api_version is None:
api_version = get_latest_api_version(namespace, resource_type)
if subscription_id is None:
_, subscription_id = get_access_token()
if access_token is None:
access_token, _ = get_access_token()
url = "https://management.azure.com/subscriptions/{}/providers/{}/{}?api-version={}".format(
subscription_id, namespace, resource_type, api_version
)
headers = {"Authorization": "Bearer {}".format(access_token)}
resources = requests.get(url, headers=headers).json()["value"]
return resources
def get_latest_api_version(namespace, resource_type):
query = "resourceTypes[?resourceType=='{}'].apiVersions[0] | [0]".format(
resource_type
)
return az_cli(["provider", "show", "-n", namespace, "--query", query])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.