text stringlengths 957 885k |
|---|
# -*- coding: utf-8 -*-
"""
Test the publisher class
"""
import pytest
import numpy as np
import struct
import zmq
import time
import datetime as dt
from ..message import ArrayMessage
from ..sugar import Publisher
from .. import array as array_api
from .test_base import BaseContainerTests
from ...tests.test_helpers import ZeekoMutableMappingTests, ZeekoTestBase
class TestArrayMessage(ZeekoTestBase):
"""Test an array message object."""
cls = ArrayMessage
def test_init(self):
"""PublishedArray object __init__"""
p = self.cls("array", np.ones((10,)))
def test_update(self):
"""Test update array items."""
pub = self.cls("array", np.ones((10,)))
with pytest.raises(ValueError):
pub.array[:] = np.ones((20,))
pub.array[:] = np.ones((10,))
assert pub.array.shape == (10,)
assert pub.name == "array"
assert pub.dtype == np.float
assert pub.framecount == 0
assert pub.md == {'shape':[10], 'dtype':'<f8', 'version': 1}
assert isinstance(pub.metadata, str)
assert pub.timestamp > 0
class TestPublisherMapping(ZeekoMutableMappingTests):
"""Test mapping characterisitics of the publisher"""
cls = Publisher
@pytest.fixture
def mapping(self, name, n, shape):
"""Return the publisher, with arrays."""
publishers = [("{0:s}{1:d}".format(name, i), np.random.randn(*shape)) for i in range(n)]
pub = Publisher([])
for name, array in publishers:
pub[name] = array
return pub
@pytest.fixture
def keys(self, name, n):
"""Return the keys"""
return ["{0:s}{1:d}".format(name, i) for i in range(n)]
class BasePublisherTests(BaseContainerTests):
"""Base tests for publishers."""
@pytest.fixture(params=[2**18, 0, 50])
def framecount(self, request):
"""Fixture for the framecount."""
return request.param
def get_last_message(self, receiver):
"""Get the value of the last message."""
last_message = receiver.last_message
assert isinstance(last_message, (float, dt.datetime))
if isinstance(last_message, dt.datetime):
last_message = time.mktime(last_message.timetuple())
assert isinstance(last_message, float)
return last_message
def test_publisher_attrs(self, obj, framecount):
"""Test attributes"""
assert obj.framecount == framecount
last_message = self.get_last_message(obj)
now = dt.datetime.now()
last_timestamp = dt.datetime.fromtimestamp(last_message)
assert now - dt.timedelta(seconds=10) < last_timestamp
assert last_timestamp < now + dt.timedelta(seconds=10)
def test_publish(self, obj, push, pull, name, framecount):
"""Test the array publisher."""
obj.publish(push)
for i, key in enumerate(obj.keys()):
recvd_name, A = array_api.recv_named_array(pull)
assert "{0:s}{1:d}".format(name, i) == recvd_name
np.testing.assert_allclose(A, obj[key].array)
assert A.framecount == framecount + 1
def test_unbundled(self, obj, push, pull, name, framecount):
"""Test publisher in unbundled mode."""
obj.publish(push)
for i, key in enumerate(obj.keys()):
recvd_name, A = array_api.recv_named_array(pull)
assert "{0:s}{1:d}".format(name, i) == recvd_name
np.testing.assert_allclose(A, obj[key].array)
assert not pull.getsockopt(zmq.RCVMORE)
assert A.framecount == framecount + 1
class TestPublisher(BasePublisherTests):
"""Test the publisher."""
cls = Publisher
@pytest.fixture(params=[True, False], ids=['hardcopy', 'softcopy'])
def hardcopy(self, request):
"""Whether to hardcopy or not."""
return request.param
@pytest.fixture
def obj(self, name, n, shape, hardcopy, framecount):
"""Return the publisher, with arrays."""
publishers = [("{0:s}{1:d}".format(name, i), np.random.randn(*shape)) for i in range(n)]
pub = Publisher([])
if hardcopy:
pub.enable_hardcopy()
pub.framecount = framecount
for name, array in publishers:
pub[name] = array
return pub
@pytest.fixture
def n():
"""Number of arrays to publish."""
return 3
|
<reponame>Etxea/gestion_eide_web
from django.shortcuts import render
from django.views.generic import DetailView, ListView, CreateView, UpdateView, DeleteView
from django.views.generic.edit import DeletionMixin
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.core.urlresolvers import reverse, reverse_lazy
from django.shortcuts import render_to_response
## Para el calendario
from calendar import Calendar
from models import *
from forms import *
class CursosListView(ListView):
model=Curso
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CursosListView, self).dispatch(*args, **kwargs)
class CursoDetailView(DetailView):
model = Curso
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CursoDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(CursoDetailView, self).get_context_data(**kwargs)
# Add extra...
data = {'curso': self.object.id}
context['clase_form'] = ClaseForm(initial=data)
return context
class CursoDeleteView(DeleteView):
model = Curso
def get_success_url(self):
return reverse_lazy("cursos_lista")
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CursoDeleteView, self).dispatch(*args, **kwargs)
class ClaseCreateView(CreateView):
model = Clase
form_class = ClaseForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ClaseCursoCreateView, self).dispatch(*args, **kwargs)
class ClaseDeleteView(DeleteView):
model = Clase
def get_success_url(self):
return reverse_lazy("curso_detalle", kwargs={'pk': self.object.curso.pk})
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ClaseDeleteView, self).dispatch(*args, **kwargs)
class ClaseCursoCreateView(ClaseCreateView):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ClaseCursoCreateView, self).dispatch(*args, **kwargs)
##Recogemos los datos iniciales (clientes)
def get_initial(self):
super(ClaseCursoCreateView, self).get_initial()
cliente = Cliente.objects.get(pk=self.kwargs['cliente_id'])
user = self.request.user
self.initial = {"cliente":cliente.id}
return self.initial
def calendario_mes(request,curso_id,ano,mes):
ano = int(ano)
mes = int(mes)
cal = HTMLCalendar()
dias_semana = []
curso = Curso.objects.get(id=curso_id)
for clase in curso.clase_set.all():
dias_semana.append(clase.dia_semana-1)
dias_clase = []
c = Calendar()
for d in c.itermonthdates(ano,mes):
##print d
if d.weekday() in dias_semana:
#evitamos recibir los dias que no son del mes que toca
if d.month == mes:
dias_clase.append(d.day)
cal = ClasesCalendar(dias_clase)
calendario = cal.formatmonth(ano,mes)
return render_to_response('cursos/mes.html', {'calendario': calendario, "ano": ano, "mes": mes})
|
<reponame>DanielCohenHillel/pyEPR
# Zlatko
from pyEPR import *
import matplotlib.pyplot as plt
if 1:
# Specify the HFSS project to be analyzed
project_info = ProjectInfo(r"C:\Users\rslqulab\Desktop\zkm\2017_pyEPR_data\\")
project_info.project_name = '2017-10 re-sim SM22-R3C1'
project_info.design_name = '3. sweep both'
project_info.setup_name = None
## Describe the junctions in the HFSS desgin
project_info.junctions['jBright'] = {'rect':'juncV', 'line': 'juncH_line', 'Lj_variable':'LJ1', 'length':0.0001}
project_info.junctions['jDark'] = {'rect':'juncH', 'line': 'juncV_line', 'Lj_variable':'LJ2', 'length':0.0001}
# Dissipative elments EPR
project_info.dissipative.dielectric_surfaces = None # supply names here, there are more options in project_info.dissipative.
# Run analysis
epr_hfss = DistributedAnalysis(project_info)
epr_hfss.do_EPR_analysis()
if 1: # Analysis result
filename = epr_hfss.data_filename
#filename = r'C:\Users\rslqulab\Desktop\zkm\2017_pyEPR_data\\/2017-10 re-sim SM22-R3C1/1. R3C1/1. R3C1_20171016_110756.hdf5'
epr = QuantumAnalysis(filename)
epr.plot_convergence_f_lin()
epr._renorm_pj = True
plt.close('all')
epr.analyze_all_variations(cos_trunc = 10, fock_trunc = 8)
epr.plot_hamiltonian_results()
print(epr.data_filename)
#%%
if 1:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.reset_orig()
#epr.hfss_variables.loc['_LJ2']
kw_map = dict(vmin = -20, vmax = 20, linewidths=0.5, annot=True,\
cmap='seismic' ) # RdYlGn_r
target_f = pd.Series([4688, 5300, 9003], index=['D','B','C'])
target_alpha = pd.Series([148, 174], index=['D', 'B'])
target_chi = pd.Series([85, 5, 0.33], index=['DB', 'BC', 'DC'])
results = epr.results
f_ND = results.get_frequencies_ND().rename(\
index ={0:'D',1:'B',2:'C'})
f_error = f_ND.apply(lambda x: 100*(x.values-target_f)/x, axis = 'index')
fig, axs = plt.subplots(1, 3, figsize = (15,7.5))
sns.heatmap(f_error.transpose(), ax = axs[0], **kw_map)
chis = results.get_chi_ND()
chis = xarray_unravel_levels(chis, ['variation','m', 'n'])
alpha_ND = sort_df_col(chis.sel_points(m = [0,1], n=[0,1]).to_pandas())
alpha_ND.index = target_alpha.index
alpha_ND_err = alpha_ND.apply(lambda x: 100*(x.values-target_alpha)/x, axis = 'index')
sns.heatmap(alpha_ND_err.transpose(), ax = axs[1], **kw_map)
chi_ND = sort_df_col(chis.sel_points(m = [0,1,0], n=[1,2,2]).to_pandas())
chi_ND.index = target_chi.index
chi_ND_err = chi_ND.apply(lambda x: 100*(x.values-target_chi)/x, axis = 'index')
sns.heatmap(chi_ND_err.transpose(), ax = axs[2], **kw_map)
axs[0].set_title('Freq.')
axs[1].set_title('Anharmonicities')
axs[2].set_title('cross-Kerrs')
|
<gh_stars>0
# coding: utf-8
# In[1]:
from profiler.core import *
# ## 1. Instantiate Engine
# * workers : number of processes
# * tol : tolerance for differences when creating training data (set to 0 if data is completely clean)
# * eps : error bound for inverse covariance estimation (since we use conservative calculation when determining minimum sample size, we recommend to set eps <= 0.01)
# * embedtxt: if set to true, differentiate b/w textual data and categorical data, and use word embedding for the former
# In[2]:
pf = Profiler(workers=2, tol=1e-6, eps=0.05, embedtxt=True)
# ## 2. Load Data
# * name: any name you like
# * src: \[FILE; DF; DB (not implemented)\]
# * fpath: required if src == FILE
# * df: required if src == DF
# * check_param: print parameters used for data loading
# In[3]:
pf.session.load_data(name='hospital', src=FILE, fpath='data/hospital.csv', check_param=True, na_values='empty')
# ### 2.1 Change Data Types of Attributes
# * required input:
# * a list of attributes
# * a list of data types (must match the order of the attributes; can be CATEGORICAL, NUMERIC, TEXT, DATE)
# * optional input:
# * a list of regular expression extractor
# In[4]:
pf.session.change_dtypes(['ProviderNumber', 'ZipCode', 'PhoneNumber', 'State', 'EmergencyService','Score', 'Sample'],
[CATEGORICAL, NUMERIC, CATEGORICAL, TEXT, TEXT, NUMERIC, NUMERIC],
[None, None, None, None, None, r'(\d+)%', r'(\d+)\spatients'])
# ### 2.2. Load/Train Embeddings for TEXT
# * path: path to saved/to-save embedding folder
# * load: set to true -- load saved vec from 'path'; set to false -- train locally
# * save: (only for load = False) save trained vectors to 'path'
# In[5]:
pf.session.load_embedding(save=True, path='data/hospital-naive/', load=False, concate=False, mode="ft")
# In[6]:
# load clean data
pf2 = Profiler(workers=2, tol=1e-6, eps=0.05, embedtxt=True)
pf2.session.load_data(name='hospital', src=FILE, fpath='data/hospital_clean_unflatten.csv', check_param=True, na_values='empty')
pf2.session.change_dtypes(['ProviderNumber', 'ZipCode', 'PhoneNumber', 'State', 'EmergencyService','Score', 'Sample'],
[CATEGORICAL, NUMERIC, CATEGORICAL, TEXT, TEXT, NUMERIC, NUMERIC],
[None, None, None, None, None, r'(\d+)%', r'(\d+)\spatients'])
# In[7]:
clean = pf2.session.ds.df['HospitalName']
dirty = pf.session.ds.df['HospitalName']
vec = pf.session.embed.models['HospitalName'].vec
vocab = pf.session.embed.models['HospitalName'].vocab
# In[11]:
vocab.loc[np.nan] = vocab.shape[0]-1
# In[8]:
import pandas as pd
# In[13]:
import sklearn
# 1. calculate cosine distances with all other values in the domain
distance = sklearn.metrics.pairwise.cosine_distances(vec)
# In[15]:
dis_with_same = []
dis_with_other = []
vocab['index'] = vocab['index'].astype(int)
for i, cell in enumerate(vocab.index):
if not isinstance(cell, np.str):
continue
# for each word, find the corresponding correct word in clean data
clean_cell = clean.loc[dirty.index[dirty==cell][0]]
# find the index of the words in dirty vocab that equals to this word in groudtruth
variations = dirty[(clean==clean_cell) & (dirty != cell)]
variations = np.unique(variations[~pd.isnull(variations)])
if len(variations) == 0:
continue
print("cell: %s"%cell)
similar_idx = vocab.loc[variations,'index'].values
same_idx = [vocab.loc[cell, 'index']]
dis_with_same.extend(distance[i, similar_idx])
print("\n-- distance with variational representations of the word -- ")
maxdis = -1
for word, dis in zip(variations, distance[i, vocab.loc[variations, 'index']]):
print("%s (%.4f)"%(word, dis))
maxdis = max(dis, maxdis)
nonsimilar_idx = np.array(list(set(vocab['index'].values) - set(similar_idx) - set(same_idx)))
nonsimilar_dis = distance[i, nonsimilar_idx]
dis_with_other.extend(nonsimilar_dis)
print("\n-- nonsimilar words but with small distances --")
for word, dis in zip(vocab.index.values[nonsimilar_idx[nonsimilar_dis<maxdis]], nonsimilar_dis[nonsimilar_dis<maxdis]):
print("%s (%.4f)"%(word, dis))
print("\n====")
# In[17]:
data1 = pd.DataFrame(data=dis_with_same)
ax1 = data1.hist(bins=np.arange(0,1,0.1))[0][0]
ax1.set_title('[Average-Fasttext Embedding] \nHistogram of cosine distance\n between similar words')
ax1.set_xlabel('cosine distance')
ax1.set_ylabel('count')
data2 = pd.DataFrame(data=dis_with_other)
ax2 = data2.hist(bins=np.arange(0,1,0.1))[0][0]
ax2.set_title('[Average-Fasttext Embedding] \nHistogram of cosine distance\n between non-similar words')
ax2.set_xlabel('cosine distance')
ax2.set_ylabel('count')
# In[ ]:
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility functions including those for writing to csv and json files
and doing simple HTTP requests using the requests library
classes:
SimpleHTTPJSON
SWVersions
functions:
to_file
Copyright 2017, <<EMAIL>>
See COPYRIGHT for details
"""
from __future__ import (
print_function,
unicode_literals
)
# stdlib
from collections import defaultdict
from csv import (
DictWriter as CSVDictWriter,
writer as CSVWriter
)
from json import (
dump as json_dump,
load as json_load
)
from os.path import dirname, join, realpath
from time import sleep
import warnings
# third party
import requests
from requests.exceptions import (
RequestException
)
HTTP_RETRY_COUNT = 50
HTTP_RETRY_DELAY = 1
HTTP_TIMEOUT = 60
class SimpleHTTPJSON(object):
"""
Convenience base class for shortening HTTP pulls of JSON date
Very small class as it is meant to be extended
"""
HTTP_HEADER_ACCEPT_JSON = {
'Accept': 'application/json, text/javascript, */*; q=0.01'
}
def __init__(self):
super(SimpleHTTPJSON, self).__init__()
self._cache_path = join(dirname(realpath(__file__)), '..', 'cache')
def http_get_json(self,
url,
fatal_exception=True,
accepted_codes=[200],
verify=True,
auth=None,
headers=None,
timeout=HTTP_TIMEOUT,
encoding='utf-8',
retry_delay=HTTP_RETRY_DELAY,
retries=HTTP_RETRY_COUNT):
"""Simple get_http_json function for doing GET of a JSON file"""
retries += 1
if auth is None:
auth = []
if headers is None:
headers = {}
headers.update(self.HTTP_HEADER_ACCEPT_JSON)
client = requests.session()
client.encoding = encoding
warnings.filterwarnings("ignore")
live_retries = retries + 1
success = False
while live_retries != 0:
try:
response = client.get(
url,
verify=verify,
auth=auth,
headers=headers,
timeout=timeout)
success = True
break
except RequestException as err:
print('HTTP request error')
print(err)
print('Retrying ({}/{})'.format(retries - live_retries - 1,
retries))
live_retries -= 1
sleep(retry_delay)
if success is False:
raise RuntimeError(
'unable to make HTTP request after {} retries'.format(
retries))
warnings.filterwarnings("always")
if response.status_code not in accepted_codes:
if fatal_exception is True:
print('FATAL: code == %d' % (response.status_code))
if response.status_code == 401:
print('FATAL: did you provide auth credentials?')
elif response.status_code == 404:
print('FATAL: did you specify the correct URL?')
raise RuntimeError(
'bad HTTP status code (%d)' % (response.status_code))
else:
return None
try:
obj = response.json()
return obj
except ValueError as err:
err = err
if fatal_exception is True:
raise
else:
return None
class SWVersions(SimpleHTTPJSON):
"""
Class for programmatically getting the current versions of popular
software. This is done usually via HTTP requests, either directly to
the vendor or to some third party service
"""
CHROME_VERSION_URL = 'https://omahaproxy.appspot.com/all.json?os=%s&channel=%s'
VERGRABBER_URL = 'http://vergrabber.kingu.pl/vergrabber.json'
def _cache_load(self, cache_file):
with open(join(self._cache_path, cache_file), 'rb') as read_stream:
return json_load(read_stream)
def latest_chrome(self,
operating_system='mac',
channel='stable',
previous=False,
version_only=False):
"""
Retrieve version information for the newest Chrome stable release for Mac
If previous is True, return both the current and previous version
If version_only is True, return a simple string or tuple of strings only, without
including the release date information.
"""
chrome_version = defaultdict(dict)
url = self.CHROME_VERSION_URL % (operating_system, channel)
response = self.http_get_json(url, verify=False)
response = response.pop()
response = response['versions'].pop()
if version_only is True:
if previous is True:
return response['current_version'], response[
'previous_version']
return response['current_version']
chrome_version['current']['version'] = response['current_version']
chrome_version['current']['reldate'] = response['current_reldate']
if previous is True:
chrome_version['previous']['version'] = response[
'previous_version']
chrome_version['previous']['reldate'] = response[
'previous_reldate']
return chrome_version
def get_version(self,
client_versions=True,
server_versions=True,
version_only=False,
applications=None):
"""
Get latest versions of many common software packages. If application is
specified, get versions only for the applications in the list provided
"""
all_versions = {}
filtered_versions = {}
if not filter(None, (client_versions, server_versions)):
raise RuntimeError(
'must request either client data, server data, or both')
obj = self.http_get_json(self.VERGRABBER_URL, verify=False)
if server_versions is True:
all_versions.update(obj['server'])
if client_versions is True:
all_versions.update(obj['client'])
stripped = defaultdict(dict)
if version_only is True:
for product, branches in all_versions.iteritems():
for branch, version_info in branches.iteritems():
stripped[product][branch] = version_info['version']
all_versions = stripped
if applications is None:
return all_versions
for app in applications:
filtered_versions[app] = all_versions.get(app, 'N/A')
return filtered_versions
def to_file(dest,
obj,
csv_fields=None,
uniq=True,
filter_blanks=True,
silent=False):
"""
Dump to a file based on extension
If .json, do a standard dump() to the file
"""
try:
write_stream = open(dest, 'wb')
except OSError as err:
print(err)
raise
if dest.endswith('.json'):
# Basic JSON dump
json_dump(obj, write_stream, sort_keys=False)
elif dest.endswith('.csv'):
# Write out a plain CSV file, or one with a header if csv_fields is
# specified
if isinstance(obj, (set, tuple, list)) is False:
raise RuntimeError(
'ERROR: csv files must be generated from a list/tuple/set')
from json import dumps
print(dumps(obj, indent=2))
if len(obj) and isinstance(obj[0], dict):
csv_fields = obj[0].keys()
if csv_fields is not None:
writer = CSVDictWriter(write_stream, fieldnames=csv_fields)
writer.writeheader()
else:
writer = CSVWriter(write_stream)
for row in obj:
if obj is None:
continue
if csv_fields is not None:
if isinstance(row, dict):
row = {
k.encode('utf-8'): v.encode('utf-8')
for k, v in row.iteritems()
}
# new_row[k.encode('utf-8')] = v.encode('utf-8')
writer.writerow(row)
elif csv_fields is not None:
writer.writerow(dict(zip(csv_fields, row)))
else:
raise RuntimeError('unknown type for row')
else:
writer.writerow(row)
elif dest.endswith('.lst'):
if isinstance(obj, (set, tuple, list)) is False:
raise RuntimeError(
'ERROR: raw/.lst dump object must be set/tuple/list')
if uniq is True:
obj = set(obj)
for row in obj:
if isinstance(obj, (str, unicode)) is False:
raise RuntimeError(
'ERROR: raw/.lst files must be list of strings')
if filter_blanks is True and row.strip() == '':
continue
write_stream.write(row + '\n')
else:
# Unknown extension, assume list of strings
print('WARN: unknown file extension, dumping as list of strings')
for row in obj:
if not isinstance(row, str):
raise RuntimeError('ERROR: lst files must be list of strings')
write_stream.write(row.strip() + '\n')
write_stream.close()
if silent is False:
print('--- Object dumped to file %s ...' % (dest))
|
"""The new semantic analyzer (work in progress).
Bind names to definitions and do various other simple consistency
checks. It also detects special forms such as NamedTuple and cast().
Multiple analysis iterations may be needed to analyze forward
references and import cycles. Each iteration "fills in" additional
bindings and references until everything has been bound.
For example, consider this program:
x = 1
y = x
Here semantic analysis would detect that the assignment 'x = 1'
defines a new variable, the type of which is to be inferred (in a
later pass; type inference or type checking is not part of semantic
analysis). Also, it would bind both references to 'x' to the same
module-level variable (Var) node. The second assignment would also
be analyzed, and the type of 'y' marked as being inferred.
Semantic analysis of types is implemented in typeanal.py.
See semanal_main.py for the top-level logic.
Some important properties:
* After semantic analysis is complete, no PlaceholderNode and
PlaceholderType instances should remain. During semantic analysis,
if we encounter one of these, the current target should be deferred.
* A TypeInfo is only created once we know certain basic information about
a type, such as the MRO, existence of a Tuple base class (e.g., for named
tuples), and whether we have a TypedDict. We use a temporary
PlaceholderNode node in the symbol table if some such information is
missing.
* For assignments, we only add a non-placeholder symbol table entry once
we know the sort of thing being defined (variable, NamedTuple, type alias,
etc.).
* Every part of the analysis step must support multiple iterations over
the same AST nodes, and each iteration must be able to fill in arbitrary
things that were missing or incomplete in previous iterations.
* Changes performed by the analysis need to be reversible, since mypy
daemon strips and reuses existing ASTs (to improve performance and/or
reduce memory use).
"""
from contextlib import contextmanager
from typing import (
List, Dict, Set, Tuple, cast, TypeVar, Union, Optional, Callable, Iterator, Iterable,
)
from mypy.nodes import (
MypyFile, TypeInfo, Node, AssignmentStmt, FuncDef, OverloadedFuncDef,
ClassDef, Var, GDEF, FuncItem, Import, Expression, Lvalue,
ImportFrom, ImportAll, Block, LDEF, NameExpr, MemberExpr,
IndexExpr, TupleExpr, ListExpr, ExpressionStmt, ReturnStmt,
RaiseStmt, AssertStmt, OperatorAssignmentStmt, WhileStmt,
ForStmt, BreakStmt, ContinueStmt, IfStmt, TryStmt, WithStmt, DelStmt,
GlobalDecl, SuperExpr, DictExpr, CallExpr, RefExpr, OpExpr, UnaryExpr,
SliceExpr, CastExpr, RevealExpr, TypeApplication, Context, SymbolTable,
SymbolTableNode, ListComprehension, GeneratorExpr,
LambdaExpr, MDEF, Decorator, SetExpr, TypeVarExpr,
StrExpr, BytesExpr, PrintStmt, ConditionalExpr, PromoteExpr,
ComparisonExpr, StarExpr, ARG_POS, ARG_NAMED, type_aliases,
YieldFromExpr, NamedTupleExpr, NonlocalDecl, SymbolNode,
SetComprehension, DictionaryComprehension, TypeAlias, TypeAliasExpr,
YieldExpr, ExecStmt, BackquoteExpr, ImportBase, AwaitExpr,
IntExpr, FloatExpr, UnicodeExpr, TempNode, OverloadPart,
PlaceholderNode, COVARIANT, CONTRAVARIANT, INVARIANT,
nongen_builtins, get_member_expr_fullname, REVEAL_TYPE,
REVEAL_LOCALS, is_final_node, TypedDictExpr, type_aliases_target_versions,
EnumCallExpr
)
from mypy.tvar_scope import TypeVarScope
from mypy.typevars import fill_typevars
from mypy.visitor import NodeVisitor
from mypy.errors import Errors, report_internal_error
from mypy.messages import best_matches, MessageBuilder, pretty_or
from mypy import message_registry
from mypy.types import (
FunctionLike, UnboundType, TypeVarDef, TupleType, UnionType, StarType, function_type,
CallableType, Overloaded, Instance, Type, AnyType, LiteralType, LiteralValue,
TypeTranslator, TypeOfAny, TypeType, NoneType, PlaceholderType, TPDICT_NAMES
)
from mypy.type_visitor import TypeQuery
from mypy.nodes import implicit_module_attrs
from mypy.newsemanal.typeanal import (
TypeAnalyser, analyze_type_alias, no_subscript_builtin_alias,
TypeVariableQuery, TypeVarList, remove_dups, has_any_from_unimported_type,
check_for_explicit_any, type_constructors, fix_instance_types
)
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.options import Options
from mypy.plugin import (
Plugin, ClassDefContext, SemanticAnalyzerPluginInterface,
DynamicClassDefContext
)
from mypy.util import (
get_prefix, correct_relative_import, unmangle, module_prefix
)
from mypy.scope import Scope
from mypy.newsemanal.semanal_shared import (
SemanticAnalyzerInterface, set_callable_name, calculate_tuple_fallback, PRIORITY_FALLBACKS
)
from mypy.newsemanal.semanal_namedtuple import NamedTupleAnalyzer
from mypy.newsemanal.semanal_typeddict import TypedDictAnalyzer
from mypy.newsemanal.semanal_enum import EnumCallAnalyzer
from mypy.newsemanal.semanal_newtype import NewTypeAnalyzer
from mypy.reachability import (
infer_reachability_of_if_statement, infer_condition_value, ALWAYS_FALSE, ALWAYS_TRUE,
MYPY_TRUE, MYPY_FALSE
)
from mypy.mro import calculate_mro, MroError
MYPY = False
if MYPY:
from typing_extensions import Final
T = TypeVar('T')
# Map from obsolete name to the current spelling.
obsolete_name_mapping = {
'typing.Function': 'typing.Callable',
'typing.typevar': 'typing.TypeVar',
} # type: Final
# Map from the full name of a missing definition to the test fixture (under
# test-data/unit/fixtures/) that provides the definition. This is used for
# generating better error messages when running mypy tests only.
SUGGESTED_TEST_FIXTURES = {
'builtins.list': 'list.pyi',
'builtins.dict': 'dict.pyi',
'builtins.set': 'set.pyi',
'builtins.bool': 'bool.pyi',
'builtins.Exception': 'exception.pyi',
'builtins.BaseException': 'exception.pyi',
'builtins.isinstance': 'isinstancelist.pyi',
'builtins.property': 'property.pyi',
'builtins.classmethod': 'classmethod.pyi',
} # type: Final
TYPES_FOR_UNIMPORTED_HINTS = {
'typing.Any',
'typing.Callable',
'typing.Dict',
'typing.Iterable',
'typing.Iterator',
'typing.List',
'typing.Optional',
'typing.Set',
'typing.Tuple',
'typing.TypeVar',
'typing.Union',
'typing.cast',
} # type: Final
# Special cased built-in classes that are needed for basic functionality and need to be
# available very early on.
CORE_BUILTIN_CLASSES = ['object', 'bool', 'function'] # type: Final
# Used for tracking incomplete references
Tag = int
class NewSemanticAnalyzer(NodeVisitor[None],
SemanticAnalyzerInterface,
SemanticAnalyzerPluginInterface):
"""Semantically analyze parsed mypy files.
The analyzer binds names and does various consistency checks for an
AST. Note that type checking is performed as a separate pass.
"""
# Module name space
modules = None # type: Dict[str, MypyFile]
# Global name space for current module
globals = None # type: SymbolTable
# Names declared using "global" (separate set for each scope)
global_decls = None # type: List[Set[str]]
# Names declated using "nonlocal" (separate set for each scope)
nonlocal_decls = None # type: List[Set[str]]
# Local names of function scopes; None for non-function scopes.
locals = None # type: List[Optional[SymbolTable]]
# Nested block depths of scopes
block_depth = None # type: List[int]
# TypeInfo of directly enclosing class (or None)
type = None # type: Optional[TypeInfo]
# Stack of outer classes (the second tuple item contains tvars).
type_stack = None # type: List[Optional[TypeInfo]]
# Type variables bound by the current scope, be it class or function
tvar_scope = None # type: TypeVarScope
# Per-module options
options = None # type: Options
# Stack of functions being analyzed
function_stack = None # type: List[FuncItem]
# Set to True if semantic analysis defines a name, or replaces a
# placeholder definition. If some iteration makes no progress,
# there can be at most one additional final iteration (see below).
progress = False
deferred = False # Set to true if another analysis pass is needed
incomplete = False # Set to true if current module namespace is missing things
# Is this the final iteration of semantic analysis (where we report
# unbound names due to cyclic definitions and should not defer)?
_final_iteration = False
# These names couldn't be added to the symbol table due to incomplete deps.
missing_names = None # type: Set[str]
# Callbacks that will be called after semantic analysis to tweak things.
patches = None # type: List[Tuple[int, Callable[[], None]]]
loop_depth = 0 # Depth of breakable loops
cur_mod_id = '' # Current module id (or None) (phase 2)
is_stub_file = False # Are we analyzing a stub file?
_is_typeshed_stub_file = False # Are we analyzing a typeshed stub file?
imports = None # type: Set[str] # Imported modules (during phase 2 analysis)
# Note: some imports (and therefore dependencies) might
# not be found in phase 1, for example due to * imports.
errors = None # type: Errors # Keeps track of generated errors
plugin = None # type: Plugin # Mypy plugin for special casing of library features
statement = None # type: Node # Statement/definition being analyzed
def __init__(self,
modules: Dict[str, MypyFile],
missing_modules: Set[str],
incomplete_namespaces: Set[str],
errors: Errors,
plugin: Plugin) -> None:
"""Construct semantic analyzer.
We reuse the same semantic analyzer instance across multiple modules.
Args:
modules: Global modules dictionary
incomplete_namespaces: Namespaces that are being populated during semantic analysis
(can contain modules and classes within the current SCC; mutated by the caller)
errors: Report analysis errors using this instance
"""
self.locals = [None]
# Saved namespaces from previous iteration. Every top-level function/method body is
# analyzed in several iterations until all names are resolved. We need to save
# the local namespaces for the top level function and all nested functions between
# these iterations. See also semanal_main.process_top_level_function().
self.saved_locals = {} \
# type: Dict[Union[FuncItem, GeneratorExpr, DictionaryComprehension], SymbolTable]
self.imports = set()
self.type = None
self.type_stack = []
self.tvar_scope = TypeVarScope()
self.function_stack = []
self.block_depth = [0]
self.loop_depth = 0
self.errors = errors
self.modules = modules
self.msg = MessageBuilder(errors, modules)
self.missing_modules = missing_modules
# These namespaces are still in process of being populated. If we encounter a
# missing name in these namespaces, we need to defer the current analysis target,
# since it's possible that the name will be there once the namespace is complete.
self.incomplete_namespaces = incomplete_namespaces
self.all_exports = [] # type: List[str]
# Map from module id to list of explicitly exported names (i.e. names in __all__).
self.export_map = {} # type: Dict[str, List[str]]
self.plugin = plugin
# If True, process function definitions. If False, don't. This is used
# for processing module top levels in fine-grained incremental mode.
self.recurse_into_functions = True
self.scope = Scope()
# mypyc doesn't properly handle implementing an abstractproperty
# with a regular attribute so we make them properties
@property
def is_typeshed_stub_file(self) -> bool:
return self._is_typeshed_stub_file
@property
def final_iteration(self) -> bool:
return self._final_iteration
#
# Preparing module (performed before semantic analysis)
#
def prepare_file(self, file_node: MypyFile) -> None:
"""Prepare a freshly parsed file for semantic analysis."""
if 'builtins' in self.modules:
file_node.names['__builtins__'] = SymbolTableNode(GDEF,
self.modules['builtins'])
if file_node.fullname() == 'builtins':
self.prepare_builtins_namespace(file_node)
if file_node.fullname() == 'typing':
self.prepare_typing_namespace(file_node)
def prepare_typing_namespace(self, file_node: MypyFile) -> None:
"""Remove dummy alias definitions such as List = TypeAlias(object) from typing.
They will be replaced with real aliases when corresponding targets are ready.
"""
for stmt in file_node.defs.copy():
if (isinstance(stmt, AssignmentStmt) and len(stmt.lvalues) == 1 and
isinstance(stmt.lvalues[0], NameExpr)):
# Assignment to a simple name, remove it if it is a dummy alias.
if 'typing.' + stmt.lvalues[0].name in type_aliases:
file_node.defs.remove(stmt)
def prepare_builtins_namespace(self, file_node: MypyFile) -> None:
"""Add certain special-cased definitions to the builtins module.
Some definitions are too special or fundamental to be processed
normally from the AST.
"""
names = file_node.names
# Add empty definition for core built-in classes, since they are required for basic
# operation. These will be completed later on.
for name in CORE_BUILTIN_CLASSES:
cdef = ClassDef(name, Block([])) # Dummy ClassDef, will be replaced later
info = TypeInfo(SymbolTable(), cdef, 'builtins')
info._fullname = 'builtins.%s' % name
names[name] = SymbolTableNode(GDEF, info)
bool_info = names['bool'].node
assert isinstance(bool_info, TypeInfo)
bool_type = Instance(bool_info, [])
special_var_types = [
('None', NoneType()),
# reveal_type is a mypy-only function that gives an error with
# the type of its arg.
('reveal_type', AnyType(TypeOfAny.special_form)),
# reveal_locals is a mypy-only function that gives an error with the types of
# locals
('reveal_locals', AnyType(TypeOfAny.special_form)),
('True', bool_type),
('False', bool_type),
('__debug__', bool_type),
] # type: List[Tuple[str, Type]]
for name, typ in special_var_types:
v = Var(name, typ)
v._fullname = 'builtins.%s' % name
file_node.names[name] = SymbolTableNode(GDEF, v)
#
# Analyzing a target
#
def refresh_partial(self,
node: Union[MypyFile, FuncDef, OverloadedFuncDef],
patches: List[Tuple[int, Callable[[], None]]],
final_iteration: bool,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> None:
"""Refresh a stale target in fine-grained incremental mode."""
self.patches = patches
self.deferred = False
self.incomplete = False
self._final_iteration = final_iteration
self.missing_names = set()
with self.file_context(file_node, options, active_type):
if isinstance(node, MypyFile):
self.refresh_top_level(node)
else:
self.recurse_into_functions = True
self.accept(node)
del self.patches
def refresh_top_level(self, file_node: MypyFile) -> None:
"""Reanalyze a stale module top-level in fine-grained incremental mode."""
self.recurse_into_functions = False
self.add_implicit_module_attrs(file_node)
for d in file_node.defs:
self.accept(d)
if file_node.fullname() == 'typing':
self.add_builtin_aliases(file_node)
self.adjust_public_exports()
def add_implicit_module_attrs(self, file_node: MypyFile) -> None:
"""Manually add implicit definitions of module '__name__' etc."""
for name, t in implicit_module_attrs.items():
# unicode docstrings should be accepted in Python 2
if name == '__doc__':
if self.options.python_version >= (3, 0):
typ = UnboundType('__builtins__.str') # type: Type
else:
typ = UnionType([UnboundType('__builtins__.str'),
UnboundType('__builtins__.unicode')])
else:
assert t is not None, 'type should be specified for {}'.format(name)
typ = UnboundType(t)
existing = file_node.names.get(name)
if existing is not None and not isinstance(existing.node, PlaceholderNode):
# Already exists.
continue
an_type = self.anal_type(typ)
if an_type:
var = Var(name, an_type)
var._fullname = self.qualified_name(name)
var.is_ready = True
self.add_symbol(name, var, dummy_context())
else:
self.add_symbol(name,
PlaceholderNode(self.qualified_name(name), file_node),
dummy_context())
def add_builtin_aliases(self, tree: MypyFile) -> None:
"""Add builtin type aliases to typing module.
For historical reasons, the aliases like `List = list` are not defined
in typeshed stubs for typing module. Instead we need to manually add the
corresponding nodes on the fly. We explicitly mark these aliases as normalized,
so that a user can write `typing.List[int]`.
"""
assert tree.fullname() == 'typing'
for alias, target_name in type_aliases.items():
if type_aliases_target_versions[alias] > self.options.python_version:
# This alias is not available on this Python version.
continue
name = alias.split('.')[-1]
if name in tree.names and not isinstance(tree.names[name].node, PlaceholderNode):
continue
tag = self.track_incomplete_refs()
n = self.lookup_fully_qualified_or_none(target_name)
if n:
if isinstance(n.node, PlaceholderNode):
self.mark_incomplete(name, tree)
else:
# Found built-in class target. Create alias.
target = self.named_type_or_none(target_name, [])
assert target is not None
# Transform List to List[Any], etc.
fix_instance_types(target, self.fail)
alias_node = TypeAlias(target, alias,
line=-1, column=-1, # there is no context
no_args=True, normalized=True)
self.add_symbol(name, alias_node, tree)
elif self.found_incomplete_ref(tag):
# Built-in class target may not ready yet -- defer.
self.mark_incomplete(name, tree)
else:
# Test fixtures may be missing some builtin classes, which is okay.
# Kill the placeholder if there is one.
if name in tree.names:
assert isinstance(tree.names[name].node, PlaceholderNode)
del tree.names[name]
def adjust_public_exports(self) -> None:
"""Make variables not in __all__ not be public"""
if '__all__' in self.globals:
for name, g in self.globals.items():
if name not in self.all_exports:
g.module_public = False
@contextmanager
def file_context(self,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> Iterator[None]:
"""Configure analyzer for analyzing targets within a file/class.
Args:
file_node: target file
options: options specific to the file
active_type: must be the surrounding class to analyze method targets
"""
scope = self.scope
self.options = options
self.errors.set_file(file_node.path, file_node.fullname(), scope=scope)
self.cur_mod_node = file_node
self.cur_mod_id = file_node.fullname()
scope.enter_file(self.cur_mod_id)
self.is_stub_file = file_node.path.lower().endswith('.pyi')
self._is_typeshed_stub_file = self.errors.is_typeshed_file(file_node.path)
self.globals = file_node.names
self.tvar_scope = TypeVarScope()
self.named_tuple_analyzer = NamedTupleAnalyzer(options, self)
self.typed_dict_analyzer = TypedDictAnalyzer(options, self, self.msg)
self.enum_call_analyzer = EnumCallAnalyzer(options, self)
self.newtype_analyzer = NewTypeAnalyzer(options, self, self.msg)
# Counter that keeps track of references to undefined things potentially caused by
# incomplete namespaces.
self.num_incomplete_refs = 0
if active_type:
scope.enter_class(active_type)
self.enter_class(active_type.defn.info)
for tvar in active_type.defn.type_vars:
self.tvar_scope.bind_existing(tvar)
yield
if active_type:
scope.leave()
self.leave_class()
self.type = None
scope.leave()
del self.options
#
# Functions
#
def visit_func_def(self, defn: FuncDef) -> None:
self.statement = defn
defn.is_conditional = self.block_depth[-1] > 0
# Set full names even for those definitionss that aren't added
# to a symbol table. For example, for overload items.
defn._fullname = self.qualified_name(defn.name())
# We don't add module top-level functions to symbol tables
# when we analyze their bodies in the second phase on analysis,
# since they were added in the first phase. Nested functions
# get always added, since they aren't separate targets.
if not self.recurse_into_functions or len(self.function_stack) > 0:
if not defn.is_decorated and not defn.is_overload:
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
with self.scope.function_scope(defn):
self.analyze_func_def(defn)
def analyze_func_def(self, defn: FuncDef) -> None:
self.function_stack.append(defn)
if defn.type:
assert isinstance(defn.type, CallableType)
self.update_function_type_variables(defn.type, defn)
self.function_stack.pop()
if self.is_class_scope():
# Method definition
assert self.type is not None
defn.info = self.type
if defn.type is not None and defn.name() in ('__init__', '__init_subclass__'):
assert isinstance(defn.type, CallableType)
if isinstance(defn.type.ret_type, AnyType):
defn.type = defn.type.copy_modified(ret_type=NoneType())
self.prepare_method_signature(defn, self.type)
# Analyze function signature and initializers first.
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
if defn.type:
self.check_classvar_in_signature(defn.type)
assert isinstance(defn.type, CallableType)
# Signature must be analyzed in the surrounding scope so that
# class-level imported names and type variables are in scope.
analyzer = self.type_analyzer()
tag = self.track_incomplete_refs()
result = analyzer.visit_callable_type(defn.type, nested=False)
# Don't store not ready types (including placeholders).
if self.found_incomplete_ref(tag) or has_placeholder(result):
self.defer()
return
defn.type = result
self.add_type_alias_deps(analyzer.aliases_used)
self.check_function_signature(defn)
if isinstance(defn, FuncDef):
assert isinstance(defn.type, CallableType)
defn.type = set_callable_name(defn.type, defn)
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
self.analyze_function_body(defn)
if defn.is_coroutine and isinstance(defn.type, CallableType):
if defn.is_async_generator:
# Async generator types are handled elsewhere
pass
else:
# A coroutine defined as `async def foo(...) -> T: ...`
# has external return type `Coroutine[Any, Any, T]`.
any_type = AnyType(TypeOfAny.special_form)
ret_type = self.named_type_or_none('typing.Coroutine',
[any_type, any_type, defn.type.ret_type])
assert ret_type is not None, "Internal error: typing.Coroutine not found"
defn.type = defn.type.copy_modified(ret_type=ret_type)
def prepare_method_signature(self, func: FuncDef, info: TypeInfo) -> None:
"""Check basic signature validity and tweak annotation of self/cls argument."""
# Only non-static methods are special.
functype = func.type
if not func.is_static:
if not func.arguments:
self.fail('Method must have at least one argument', func)
elif isinstance(functype, CallableType):
self_type = functype.arg_types[0]
if isinstance(self_type, AnyType):
leading_type = fill_typevars(info) # type: Type
if func.is_class or func.name() in ('__new__', '__init_subclass__'):
leading_type = self.class_type(leading_type)
func.type = replace_implicit_first_type(functype, leading_type)
def set_original_def(self, previous: Optional[Node], new: Union[FuncDef, Decorator]) -> bool:
"""If 'new' conditionally redefine 'previous', set 'previous' as original
We reject straight redefinitions of functions, as they are usually
a programming error. For example:
def f(): ...
def f(): ... # Error: 'f' redefined
"""
if isinstance(new, Decorator):
new = new.func
if isinstance(previous, (FuncDef, Var, Decorator)) and new.is_conditional:
new.original_def = previous
return True
else:
return False
def update_function_type_variables(self, fun_type: CallableType, defn: FuncItem) -> None:
"""Make any type variables in the signature of defn explicit.
Update the signature of defn to contain type variable definitions
if defn is generic.
"""
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
a = self.type_analyzer()
fun_type.variables = a.bind_function_type_variables(fun_type, defn)
def visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
self.statement = defn
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
# NB: Since _visit_overloaded_func_def will call accept on the
# underlying FuncDefs, the function might get entered twice.
# This is fine, though, because only the outermost function is
# used to compute targets.
with self.scope.function_scope(defn):
self.analyze_overloaded_func_def(defn)
def analyze_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
# OverloadedFuncDef refers to any legitimate situation where you have
# more than one declaration for the same function in a row. This occurs
# with a @property with a setter or a deleter, and for a classic
# @overload.
defn._fullname = self.qualified_name(defn.name())
# TODO: avoid modifying items.
defn.items = defn.unanalyzed_items.copy()
first_item = defn.items[0]
first_item.is_overload = True
first_item.accept(self)
if isinstance(first_item, Decorator) and first_item.func.is_property:
# This is a property.
first_item.func.is_overload = True
self.analyze_property_with_multi_part_definition(defn)
typ = function_type(first_item.func, self.builtin_type('builtins.function'))
assert isinstance(typ, CallableType)
types = [typ]
else:
# This is an a normal overload. Find the item signatures, the
# implementation (if outside a stub), and any missing @overload
# decorators.
types, impl, non_overload_indexes = self.analyze_overload_sigs_and_impl(defn)
defn.impl = impl
if non_overload_indexes:
self.handle_missing_overload_decorators(defn, non_overload_indexes,
some_overload_decorators=len(types) > 0)
# If we found an implementation, remove it from the overload item list,
# as it's special.
if impl is not None:
assert impl is defn.items[-1]
defn.items = defn.items[:-1]
elif not non_overload_indexes:
self.handle_missing_overload_implementation(defn)
if types:
defn.type = Overloaded(types)
defn.type.line = defn.line
if not defn.items:
# It was not a real overload after all, but function redefinition. We've
# visited the redefinition(s) already.
return
# We know this is an overload def. Infer properties and perform some checks.
self.process_final_in_overload(defn)
self.process_static_or_class_method_in_overload(defn)
self.add_symbol(defn.name(), defn, defn)
def analyze_overload_sigs_and_impl(
self,
defn: OverloadedFuncDef) -> Tuple[List[CallableType],
Optional[OverloadPart],
List[int]]:
"""Find overload signatures, the implementation, and items with missing @overload.
Assume that the first was already analyzed. As a side effect:
analyzes remaining items and updates 'is_overload' flags.
"""
types = []
non_overload_indexes = []
impl = None # type: Optional[OverloadPart]
for i, item in enumerate(defn.items):
if i != 0:
# Assume that the first item was already visited
item.is_overload = True
item.accept(self)
# TODO: support decorated overloaded functions properly
if isinstance(item, Decorator):
callable = function_type(item.func, self.builtin_type('builtins.function'))
assert isinstance(callable, CallableType)
if not any(refers_to_fullname(dec, 'typing.overload')
for dec in item.decorators):
if i == len(defn.items) - 1 and not self.is_stub_file:
# Last item outside a stub is impl
impl = item
else:
# Oops it wasn't an overload after all. A clear error
# will vary based on where in the list it is, record
# that.
non_overload_indexes.append(i)
else:
item.func.is_overload = True
types.append(callable)
elif isinstance(item, FuncDef):
if i == len(defn.items) - 1 and not self.is_stub_file:
impl = item
else:
non_overload_indexes.append(i)
return types, impl, non_overload_indexes
def handle_missing_overload_decorators(self,
defn: OverloadedFuncDef,
non_overload_indexes: List[int],
some_overload_decorators: bool) -> None:
"""Generate errors for overload items without @overload.
Side effect: remote non-overload items.
"""
if some_overload_decorators:
# Some of them were overloads, but not all.
for idx in non_overload_indexes:
if self.is_stub_file:
self.fail("An implementation for an overloaded function "
"is not allowed in a stub file", defn.items[idx])
else:
self.fail("The implementation for an overloaded function "
"must come last", defn.items[idx])
else:
for idx in non_overload_indexes[1:]:
self.name_already_defined(defn.name(), defn.items[idx], defn.items[0])
if defn.impl:
self.name_already_defined(defn.name(), defn.impl, defn.items[0])
# Remove the non-overloads
for idx in reversed(non_overload_indexes):
del defn.items[idx]
def handle_missing_overload_implementation(self, defn: OverloadedFuncDef) -> None:
"""Generate error about missing overload implementation (only if needed)."""
if not self.is_stub_file:
if self.type and self.type.is_protocol and not self.is_func_scope():
# An overloded protocol method doesn't need an implementation.
for item in defn.items:
if isinstance(item, Decorator):
item.func.is_abstract = True
else:
item.is_abstract = True
else:
self.fail(
"An overloaded function outside a stub file must have an implementation",
defn)
def process_final_in_overload(self, defn: OverloadedFuncDef) -> None:
"""Detect the @final status of an overloaded function (and perform checks)."""
# If the implementation is marked as @final (or the first overload in
# stubs), then the whole overloaded definition if @final.
if any(item.is_final for item in defn.items):
# We anyway mark it as final because it was probably the intention.
defn.is_final = True
# Only show the error once per overload
bad_final = next(ov for ov in defn.items if ov.is_final)
if not self.is_stub_file:
self.fail("@final should be applied only to overload implementation",
bad_final)
elif any(item.is_final for item in defn.items[1:]):
bad_final = next(ov for ov in defn.items[1:] if ov.is_final)
self.fail("In a stub file @final must be applied only to the first overload",
bad_final)
if defn.impl is not None and defn.impl.is_final:
defn.is_final = True
def process_static_or_class_method_in_overload(self, defn: OverloadedFuncDef) -> None:
class_status = []
static_status = []
for item in defn.items:
if isinstance(item, Decorator):
inner = item.func
elif isinstance(item, FuncDef):
inner = item
else:
assert False, "The 'item' variable is an unexpected type: {}".format(type(item))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if defn.impl is not None:
if isinstance(defn.impl, Decorator):
inner = defn.impl.func
elif isinstance(defn.impl, FuncDef):
inner = defn.impl
else:
assert False, "Unexpected impl type: {}".format(type(defn.impl))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if len(set(class_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('classmethod', defn)
elif len(set(static_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('staticmethod', defn)
else:
defn.is_class = class_status[0]
defn.is_static = static_status[0]
def analyze_property_with_multi_part_definition(self, defn: OverloadedFuncDef) -> None:
"""Analyze a property defined using multiple methods (e.g., using @x.setter).
Assume that the first method (@property) has already been analyzed.
"""
defn.is_property = True
items = defn.items
first_item = cast(Decorator, defn.items[0])
for item in items[1:]:
if isinstance(item, Decorator) and len(item.decorators) == 1:
node = item.decorators[0]
if isinstance(node, MemberExpr):
if node.name == 'setter':
# The first item represents the entire property.
first_item.var.is_settable_property = True
# Get abstractness from the original definition.
item.func.is_abstract = first_item.func.is_abstract
else:
self.fail("Decorated property not supported", item)
if isinstance(item, Decorator):
item.func.accept(self)
def add_function_to_symbol_table(self, func: Union[FuncDef, OverloadedFuncDef]) -> None:
if self.is_class_scope():
assert self.type is not None
func.info = self.type
func._fullname = self.qualified_name(func.name())
self.add_symbol(func.name(), func, func)
def analyze_function_body(self, defn: FuncItem) -> None:
is_method = self.is_class_scope()
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
# Bind the type variables again to visit the body.
if defn.type:
a = self.type_analyzer()
a.bind_function_type_variables(cast(CallableType, defn.type), defn)
self.function_stack.append(defn)
self.enter(defn)
for arg in defn.arguments:
self.add_local(arg.variable, defn)
# The first argument of a non-static, non-class method is like 'self'
# (though the name could be different), having the enclosing class's
# instance type.
if is_method and not defn.is_static and not defn.is_class and defn.arguments:
defn.arguments[0].variable.is_self = True
defn.body.accept(self)
self.leave()
self.function_stack.pop()
def check_classvar_in_signature(self, typ: Type) -> None:
if isinstance(typ, Overloaded):
for t in typ.items(): # type: Type
self.check_classvar_in_signature(t)
return
if not isinstance(typ, CallableType):
return
for t in typ.arg_types + [typ.ret_type]:
if self.is_classvar(t):
self.fail_invalid_classvar(t)
# Show only one error per signature
break
def check_function_signature(self, fdef: FuncItem) -> None:
sig = fdef.type
assert isinstance(sig, CallableType)
if len(sig.arg_types) < len(fdef.arguments):
self.fail('Type signature has too few arguments', fdef)
# Add dummy Any arguments to prevent crashes later.
num_extra_anys = len(fdef.arguments) - len(sig.arg_types)
extra_anys = [AnyType(TypeOfAny.from_error)] * num_extra_anys
sig.arg_types.extend(extra_anys)
elif len(sig.arg_types) > len(fdef.arguments):
self.fail('Type signature has too many arguments', fdef, blocker=True)
def visit_decorator(self, dec: Decorator) -> None:
self.statement = dec
dec.func.is_conditional = self.block_depth[-1] > 0
if not dec.is_overload:
self.add_symbol(dec.name(), dec, dec)
dec.func._fullname = self.qualified_name(dec.name())
for d in dec.decorators:
d.accept(self)
removed = [] # type: List[int]
no_type_check = False
for i, d in enumerate(dec.decorators):
# A bunch of decorators are special cased here.
if refers_to_fullname(d, 'abc.abstractmethod'):
removed.append(i)
dec.func.is_abstract = True
self.check_decorated_function_is_method('abstractmethod', dec)
elif (refers_to_fullname(d, 'asyncio.coroutines.coroutine') or
refers_to_fullname(d, 'types.coroutine')):
removed.append(i)
dec.func.is_awaitable_coroutine = True
elif refers_to_fullname(d, 'builtins.staticmethod'):
removed.append(i)
dec.func.is_static = True
dec.var.is_staticmethod = True
self.check_decorated_function_is_method('staticmethod', dec)
elif refers_to_fullname(d, 'builtins.classmethod'):
removed.append(i)
dec.func.is_class = True
dec.var.is_classmethod = True
self.check_decorated_function_is_method('classmethod', dec)
elif (refers_to_fullname(d, 'builtins.property') or
refers_to_fullname(d, 'abc.abstractproperty')):
removed.append(i)
dec.func.is_property = True
dec.var.is_property = True
if refers_to_fullname(d, 'abc.abstractproperty'):
dec.func.is_abstract = True
self.check_decorated_function_is_method('property', dec)
if len(dec.func.arguments) > 1:
self.fail('Too many arguments', dec.func)
elif refers_to_fullname(d, 'typing.no_type_check'):
dec.var.type = AnyType(TypeOfAny.special_form)
no_type_check = True
elif (refers_to_fullname(d, 'typing.final') or
refers_to_fullname(d, 'typing_extensions.final')):
if self.is_class_scope():
assert self.type is not None, "No type set at class scope"
if self.type.is_protocol:
self.msg.protocol_members_cant_be_final(d)
else:
dec.func.is_final = True
dec.var.is_final = True
removed.append(i)
else:
self.fail("@final cannot be used with non-method functions", d)
for i in reversed(removed):
del dec.decorators[i]
if (not dec.is_overload or dec.var.is_property) and self.type:
dec.var.info = self.type
dec.var.is_initialized_in_class = True
if not no_type_check and self.recurse_into_functions:
dec.func.accept(self)
if dec.decorators and dec.var.is_property:
self.fail('Decorated property not supported', dec)
def check_decorated_function_is_method(self, decorator: str,
context: Context) -> None:
if not self.type or self.is_func_scope():
self.fail("'%s' used with a non-method" % decorator, context)
#
# Classes
#
def visit_class_def(self, defn: ClassDef) -> None:
self.statement = defn
with self.tvar_scope_frame(self.tvar_scope.class_frame()):
self.analyze_class(defn)
def analyze_class(self, defn: ClassDef) -> None:
fullname = self.qualified_name(defn.name)
if not defn.info and not self.is_core_builtin_class(defn):
# Add placeholder so that self-references in base classes can be
# resolved. We don't want this to cause a deferral, since if there
# are no incomplete references, we'll replace this with a TypeInfo
# before returning.
self.add_symbol(defn.name, PlaceholderNode(fullname, defn, True), defn,
can_defer=False)
tag = self.track_incomplete_refs()
# Restore base classes after previous iteration (things like Generic[T] might be removed).
defn.base_type_exprs.extend(defn.removed_base_type_exprs)
defn.removed_base_type_exprs.clear()
self.update_metaclass(defn)
bases = defn.base_type_exprs
bases, tvar_defs, is_protocol = self.clean_up_bases_and_infer_type_variables(defn, bases,
context=defn)
self.analyze_class_keywords(defn)
result = self.analyze_base_classes(bases)
if result is None or self.found_incomplete_ref(tag):
# Something was incomplete. Defer current target.
self.mark_incomplete(defn.name, defn)
return
base_types, base_error = result
if any(isinstance(base, PlaceholderType) for base, _ in base_types):
# We need to know the TypeInfo of each base to construct the MRO. Placeholder types
# are okay in nested positions, since they can't affect the MRO.
self.mark_incomplete(defn.name, defn)
return
is_typeddict, info = self.typed_dict_analyzer.analyze_typeddict_classdef(defn)
if is_typeddict:
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
return
if self.analyze_namedtuple_classdef(defn):
return
# Create TypeInfo for class now that base classes and the MRO can be calculated.
self.prepare_class_def(defn)
defn.type_vars = tvar_defs
defn.info.type_vars = [tvar.name for tvar in tvar_defs]
if base_error:
defn.info.fallback_to_any = True
with self.scope.class_scope(defn.info):
self.configure_base_classes(defn, base_types)
defn.info.is_protocol = is_protocol
self.analyze_metaclass(defn)
defn.info.runtime_protocol = False
for decorator in defn.decorators:
self.analyze_class_decorator(defn, decorator)
self.analyze_class_body_common(defn)
def is_core_builtin_class(self, defn: ClassDef) -> bool:
return self.cur_mod_id == 'builtins' and defn.name in CORE_BUILTIN_CLASSES
def analyze_class_body_common(self, defn: ClassDef) -> None:
"""Parts of class body analysis that are common to all kinds of class defs."""
self.enter_class(defn.info)
defn.defs.accept(self)
self.apply_class_plugin_hooks(defn)
self.leave_class()
def analyze_namedtuple_classdef(self, defn: ClassDef) -> bool:
"""Check if this class can define a named tuple."""
if defn.info and defn.info.is_named_tuple:
# Don't reprocess everything. We just need to process methods defined
# in the named tuple class body.
is_named_tuple, info = True, defn.info # type: bool, Optional[TypeInfo]
else:
is_named_tuple, info = self.named_tuple_analyzer.analyze_namedtuple_classdef(defn)
if is_named_tuple:
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
with self.scope.class_scope(defn.info):
with self.named_tuple_analyzer.save_namedtuple_body(info):
self.analyze_class_body_common(defn)
return True
return False
def apply_class_plugin_hooks(self, defn: ClassDef) -> None:
"""Apply a plugin hook that may infer a more precise definition for a class."""
def get_fullname(expr: Expression) -> Optional[str]:
if isinstance(expr, CallExpr):
return get_fullname(expr.callee)
elif isinstance(expr, IndexExpr):
return get_fullname(expr.base)
elif isinstance(expr, RefExpr):
if expr.fullname:
return expr.fullname
# If we don't have a fullname look it up. This happens because base classes are
# analyzed in a different manner (see exprtotype.py) and therefore those AST
# nodes will not have full names.
sym = self.lookup_type_node(expr)
if sym:
return sym.fullname
return None
for decorator in defn.decorators:
decorator_name = get_fullname(decorator)
if decorator_name:
hook = self.plugin.get_class_decorator_hook(decorator_name)
if hook:
hook(ClassDefContext(defn, decorator, self))
if defn.metaclass:
metaclass_name = get_fullname(defn.metaclass)
if metaclass_name:
hook = self.plugin.get_metaclass_hook(metaclass_name)
if hook:
hook(ClassDefContext(defn, defn.metaclass, self))
for base_expr in defn.base_type_exprs:
base_name = get_fullname(base_expr)
if base_name:
hook = self.plugin.get_base_class_hook(base_name)
if hook:
hook(ClassDefContext(defn, base_expr, self))
def analyze_class_keywords(self, defn: ClassDef) -> None:
for value in defn.keywords.values():
value.accept(self)
def enter_class(self, info: TypeInfo) -> None:
# Remember previous active class
self.type_stack.append(self.type)
self.locals.append(None) # Add class scope
self.block_depth.append(-1) # The class body increments this to 0
self.type = info
def leave_class(self) -> None:
""" Restore analyzer state. """
self.block_depth.pop()
self.locals.pop()
self.type = self.type_stack.pop()
def analyze_class_decorator(self, defn: ClassDef, decorator: Expression) -> None:
decorator.accept(self)
if isinstance(decorator, RefExpr):
if decorator.fullname in ('typing.runtime', 'typing_extensions.runtime'):
if defn.info.is_protocol:
defn.info.runtime_protocol = True
else:
self.fail('@runtime can only be used with protocol classes', defn)
elif decorator.fullname in ('typing.final',
'typing_extensions.final'):
defn.info.is_final = True
def clean_up_bases_and_infer_type_variables(
self,
defn: ClassDef,
base_type_exprs: List[Expression],
context: Context) -> Tuple[List[Expression],
List[TypeVarDef],
bool]:
"""Remove extra base classes such as Generic and infer type vars.
For example, consider this class:
class Foo(Bar, Generic[T]): ...
Now we will remove Generic[T] from bases of Foo and infer that the
type variable 'T' is a type argument of Foo.
Note that this is performed *before* semantic analysis.
Returns (remaining base expressions, inferred type variables, is protocol).
"""
removed = [] # type: List[int]
declared_tvars = [] # type: TypeVarList
is_protocol = False
for i, base_expr in enumerate(base_type_exprs):
self.analyze_type_expr(base_expr)
try:
base = expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
result = self.analyze_class_typevar_declaration(base)
if result is not None:
if declared_tvars:
self.fail('Only single Generic[...] or Protocol[...] can be in bases', context)
removed.append(i)
tvars, is_protocol = result
declared_tvars.extend(tvars)
if isinstance(base, UnboundType):
sym = self.lookup_qualified(base.name, base)
if sym is not None and sym.node is not None:
if (sym.node.fullname() in ('typing.Protocol',
'typing_extensions.Protocol') and
i not in removed):
# also remove bare 'Protocol' bases
removed.append(i)
is_protocol = True
all_tvars = self.get_all_bases_tvars(base_type_exprs, removed)
if declared_tvars:
if len(remove_dups(declared_tvars)) < len(declared_tvars):
self.fail("Duplicate type variables in Generic[...] or Protocol[...]", context)
declared_tvars = remove_dups(declared_tvars)
if not set(all_tvars).issubset(set(declared_tvars)):
self.fail("If Generic[...] or Protocol[...] is present"
" it should list all type variables", context)
# In case of error, Generic tvars will go first
declared_tvars = remove_dups(declared_tvars + all_tvars)
else:
declared_tvars = all_tvars
for i in reversed(removed):
# We need to actually remove the base class expressions like Generic[T],
# mostly because otherwise they will create spurious dependencies in fine
# grained incremental mode.
defn.removed_base_type_exprs.append(defn.base_type_exprs[i])
del base_type_exprs[i]
tvar_defs = [] # type: List[TypeVarDef]
for name, tvar_expr in declared_tvars:
tvar_def = self.tvar_scope.bind_new(name, tvar_expr)
tvar_defs.append(tvar_def)
return base_type_exprs, tvar_defs, is_protocol
def analyze_class_typevar_declaration(self, base: Type) -> Optional[Tuple[TypeVarList, bool]]:
"""Analyze type variables declared using Generic[...] or Protocol[...].
Args:
base: Non-analyzed base class
Return None if the base class does not declare type variables. Otherwise,
return the type variables.
"""
if not isinstance(base, UnboundType):
return None
unbound = base
sym = self.lookup_qualified(unbound.name, unbound)
if sym is None or sym.node is None:
return None
if (sym.node.fullname() == 'typing.Generic' or
sym.node.fullname() == 'typing.Protocol' and base.args or
sym.node.fullname() == 'typing_extensions.Protocol' and base.args):
is_proto = sym.node.fullname() != 'typing.Generic'
tvars = [] # type: TypeVarList
for arg in unbound.args:
tag = self.track_incomplete_refs()
tvar = self.analyze_unbound_tvar(arg)
if tvar:
tvars.append(tvar)
elif not self.found_incomplete_ref(tag):
self.fail('Free type variable expected in %s[...]' %
sym.node.name(), base)
return tvars, is_proto
return None
def analyze_unbound_tvar(self, t: Type) -> Optional[Tuple[str, TypeVarExpr]]:
if not isinstance(t, UnboundType):
return None
unbound = t
sym = self.lookup_qualified(unbound.name, unbound)
if sym and isinstance(sym.node, PlaceholderNode):
self.record_incomplete_ref()
if sym is None or not isinstance(sym.node, TypeVarExpr):
return None
elif sym.fullname and not self.tvar_scope.allow_binding(sym.fullname):
# It's bound by our type variable scope
return None
else:
assert isinstance(sym.node, TypeVarExpr)
return unbound.name, sym.node
def get_all_bases_tvars(self,
base_type_exprs: List[Expression],
removed: List[int]) -> TypeVarList:
"""Return all type variable references in bases."""
tvars = [] # type: TypeVarList
for i, base_expr in enumerate(base_type_exprs):
if i not in removed:
try:
base = expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
base_tvars = base.accept(TypeVariableQuery(self.lookup_qualified, self.tvar_scope))
tvars.extend(base_tvars)
return remove_dups(tvars)
def prepare_class_def(self, defn: ClassDef, info: Optional[TypeInfo] = None) -> None:
"""Prepare for the analysis of a class definition.
Create an empty TypeInfo and store it in a symbol table, or if the 'info'
argument is provided, store it instead (used for magic type definitions).
"""
if not defn.info:
defn.fullname = self.qualified_name(defn.name)
# TODO: Nested classes
info = info or self.make_empty_type_info(defn)
defn.info = info
info.defn = defn
if not self.is_func_scope():
info._fullname = self.qualified_name(defn.name)
else:
info._fullname = info.name()
self.add_symbol(defn.name, defn.info, defn)
if self.is_nested_within_func_scope():
# We need to preserve local classes, let's store them
# in globals under mangled unique names
#
# TODO: Putting local classes into globals breaks assumptions in fine-grained
# incremental mode and we should avoid it. In general, this logic is too
# ad-hoc and needs to be removed/refactored.
if '@' not in defn.info._fullname:
local_name = defn.info._fullname + '@' + str(defn.line)
if defn.info.is_named_tuple:
# Module is already correctly set in _fullname for named tuples.
defn.info._fullname += '@' + str(defn.line)
else:
defn.info._fullname = self.cur_mod_id + '.' + local_name
else:
# Preserve name from previous fine-grained incremental run.
local_name = defn.info._fullname
defn.fullname = defn.info._fullname
self.globals[local_name] = SymbolTableNode(GDEF, defn.info)
def make_empty_type_info(self, defn: ClassDef) -> TypeInfo:
if (self.is_module_scope()
and self.cur_mod_id == 'builtins'
and defn.name in CORE_BUILTIN_CLASSES):
# Special case core built-in classes. A TypeInfo was already
# created for it before semantic analysis, but with a dummy
# ClassDef. Patch the real ClassDef object.
info = self.globals[defn.name].node
assert isinstance(info, TypeInfo)
else:
info = TypeInfo(SymbolTable(), defn, self.cur_mod_id)
info.set_line(defn)
return info
def analyze_base_classes(
self,
base_type_exprs: List[Expression]) -> Optional[Tuple[List[Tuple[Type, Expression]],
bool]]:
"""Analyze base class types.
Return None if some definition was incomplete. Otherwise, return a tuple
with these items:
* List of (analyzed type, original expression) tuples
* Boolean indicating whether one of the bases had a semantic analysis error
"""
is_error = False
bases = []
for base_expr in base_type_exprs:
if (isinstance(base_expr, RefExpr) and
base_expr.fullname in ('typing.NamedTuple',) + TPDICT_NAMES):
# Ignore magic bases for now.
continue
try:
base = self.expr_to_analyzed_type(base_expr, allow_placeholder=True)
except TypeTranslationError:
self.fail('Invalid base class', base_expr)
is_error = True
continue
if base is None:
return None
bases.append((base, base_expr))
return bases, is_error
def configure_base_classes(self,
defn: ClassDef,
bases: List[Tuple[Type, Expression]]) -> None:
"""Set up base classes.
This computes several attributes on the corresponding TypeInfo defn.info
related to the base classes: defn.info.bases, defn.info.mro, and
miscellaneous others (at least tuple_type, fallback_to_any, and is_enum.)
"""
base_types = [] # type: List[Instance]
info = defn.info
info.tuple_type = None
for base, base_expr in bases:
if isinstance(base, TupleType):
actual_base = self.configure_tuple_base_class(defn, base, base_expr)
base_types.append(actual_base)
elif isinstance(base, Instance):
if base.type.is_newtype:
self.fail("Cannot subclass NewType", defn)
base_types.append(base)
elif isinstance(base, AnyType):
if self.options.disallow_subclassing_any:
if isinstance(base_expr, (NameExpr, MemberExpr)):
msg = "Class cannot subclass '{}' (has type 'Any')".format(base_expr.name)
else:
msg = "Class cannot subclass value of type 'Any'"
self.fail(msg, base_expr)
info.fallback_to_any = True
else:
self.fail('Invalid base class', base_expr)
info.fallback_to_any = True
if self.options.disallow_any_unimported and has_any_from_unimported_type(base):
if isinstance(base_expr, (NameExpr, MemberExpr)):
prefix = "Base type {}".format(base_expr.name)
else:
prefix = "Base type"
self.msg.unimported_type_becomes_any(prefix, base, base_expr)
check_for_explicit_any(base, self.options, self.is_typeshed_stub_file, self.msg,
context=base_expr)
# Add 'object' as implicit base if there is no other base class.
if (not base_types and defn.fullname != 'builtins.object'):
base_types.append(self.object_type())
info.bases = base_types
# Calculate the MRO.
if not self.verify_base_classes(defn):
# Give it an MRO consisting of just the class itself and object.
defn.info.mro = [defn.info, self.object_type().type]
return
self.calculate_class_mro(defn, self.object_type)
def configure_tuple_base_class(self,
defn: ClassDef,
base: TupleType,
base_expr: Expression) -> Instance:
info = defn.info
# There may be an existing valid tuple type from previous semanal iterations.
# Use equality to check if it is the case.
if info.tuple_type and info.tuple_type != base:
self.fail("Class has two incompatible bases derived from tuple", defn)
defn.has_incompatible_baseclass = True
info.tuple_type = base
if isinstance(base_expr, CallExpr):
defn.analyzed = NamedTupleExpr(base.partial_fallback.type)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
if base.partial_fallback.type.fullname() == 'builtins.tuple':
# Fallback can only be safely calculated after semantic analysis, since base
# classes may be incomplete. Postpone the calculation.
self.schedule_patch(PRIORITY_FALLBACKS, lambda: calculate_tuple_fallback(base))
return base.partial_fallback
def calculate_class_mro(self, defn: ClassDef,
obj_type: Optional[Callable[[], Instance]] = None) -> None:
"""Calculate method resolution order for a class.
`obj_type` may be omitted in the third pass when all classes are already analyzed.
It exists just to fill in empty base class list during second pass in case of
an import cycle.
"""
try:
calculate_mro(defn.info, obj_type)
except MroError:
self.fail_blocker('Cannot determine consistent method resolution '
'order (MRO) for "%s"' % defn.name, defn)
defn.info.mro = []
# Allow plugins to alter the MRO to handle the fact that `def mro()`
# on metaclasses permits MRO rewriting.
if defn.fullname:
hook = self.plugin.get_customize_class_mro_hook(defn.fullname)
if hook:
hook(ClassDefContext(defn, Expression(), self))
def update_metaclass(self, defn: ClassDef) -> None:
"""Lookup for special metaclass declarations, and update defn fields accordingly.
* __metaclass__ attribute in Python 2
* six.with_metaclass(M, B1, B2, ...)
* @six.add_metaclass(M)
"""
# Look for "__metaclass__ = <metaclass>" in Python 2
python2_meta_expr = None # type: Optional[Expression]
if self.options.python_version[0] == 2:
for body_node in defn.defs.body:
if isinstance(body_node, ClassDef) and body_node.name == "__metaclass__":
self.fail("Metaclasses defined as inner classes are not supported", body_node)
break
elif isinstance(body_node, AssignmentStmt) and len(body_node.lvalues) == 1:
lvalue = body_node.lvalues[0]
if isinstance(lvalue, NameExpr) and lvalue.name == "__metaclass__":
python2_meta_expr = body_node.rvalue
# Look for six.with_metaclass(M, B1, B2, ...)
with_meta_expr = None # type: Optional[Expression]
if len(defn.base_type_exprs) == 1:
base_expr = defn.base_type_exprs[0]
if isinstance(base_expr, CallExpr) and isinstance(base_expr.callee, RefExpr):
base_expr.callee.accept(self)
if (base_expr.callee.fullname == 'six.with_metaclass'
and len(base_expr.args) >= 1
and all(kind == ARG_POS for kind in base_expr.arg_kinds)):
with_meta_expr = base_expr.args[0]
defn.base_type_exprs = base_expr.args[1:]
# Look for @six.add_metaclass(M)
add_meta_expr = None # type: Optional[Expression]
for dec_expr in defn.decorators:
if isinstance(dec_expr, CallExpr) and isinstance(dec_expr.callee, RefExpr):
dec_expr.callee.accept(self)
if (dec_expr.callee.fullname == 'six.add_metaclass'
and len(dec_expr.args) == 1
and dec_expr.arg_kinds[0] == ARG_POS):
add_meta_expr = dec_expr.args[0]
break
metas = {defn.metaclass, python2_meta_expr, with_meta_expr, add_meta_expr} - {None}
if len(metas) == 0:
return
if len(metas) > 1:
self.fail("Multiple metaclass definitions", defn)
return
defn.metaclass = metas.pop()
def verify_base_classes(self, defn: ClassDef) -> bool:
info = defn.info
for base in info.bases:
baseinfo = base.type
if self.is_base_class(info, baseinfo):
self.fail('Cycle in inheritance hierarchy', defn, blocker=True)
# Clear bases to forcefully get rid of the cycle.
info.bases = []
if baseinfo.fullname() == 'builtins.bool':
self.fail("'%s' is not a valid base class" %
baseinfo.name(), defn, blocker=True)
return False
dup = find_duplicate(info.direct_base_classes())
if dup:
self.fail('Duplicate base class "%s"' % dup.name(), defn, blocker=True)
return False
return True
def is_base_class(self, t: TypeInfo, s: TypeInfo) -> bool:
"""Determine if t is a base class of s (but do not use mro)."""
# Search the base class graph for t, starting from s.
worklist = [s]
visited = {s}
while worklist:
nxt = worklist.pop()
if nxt == t:
return True
for base in nxt.bases:
if base.type not in visited:
worklist.append(base.type)
visited.add(base.type)
return False
def analyze_metaclass(self, defn: ClassDef) -> None:
if defn.metaclass:
metaclass_name = None
if isinstance(defn.metaclass, NameExpr):
metaclass_name = defn.metaclass.name
elif isinstance(defn.metaclass, MemberExpr):
metaclass_name = get_member_expr_fullname(defn.metaclass)
if metaclass_name is None:
self.fail("Dynamic metaclass not supported for '%s'" % defn.name, defn.metaclass)
return
sym = self.lookup_qualified(metaclass_name, defn.metaclass)
if sym is None:
# Probably a name error - it is already handled elsewhere
return
if isinstance(sym.node, Var) and isinstance(sym.node.type, AnyType):
# 'Any' metaclass -- just ignore it.
#
# TODO: A better approach would be to record this information
# and assume that the type object supports arbitrary
# attributes, similar to an 'Any' base class.
return
if isinstance(sym.node, PlaceholderNode):
self.defer()
return
if not isinstance(sym.node, TypeInfo) or sym.node.tuple_type is not None:
self.fail("Invalid metaclass '%s'" % metaclass_name, defn.metaclass)
return
if not sym.node.is_metaclass():
self.fail("Metaclasses not inheriting from 'type' are not supported",
defn.metaclass)
return
inst = fill_typevars(sym.node)
assert isinstance(inst, Instance)
defn.info.declared_metaclass = inst
defn.info.metaclass_type = defn.info.calculate_metaclass_type()
if any(info.is_protocol for info in defn.info.mro):
if (not defn.info.metaclass_type or
defn.info.metaclass_type.type.fullname() == 'builtins.type'):
# All protocols and their subclasses have ABCMeta metaclass by default.
# TODO: add a metaclass conflict check if there is another metaclass.
abc_meta = self.named_type_or_none('abc.ABCMeta', [])
if abc_meta is not None: # May be None in tests with incomplete lib-stub.
defn.info.metaclass_type = abc_meta
if defn.info.metaclass_type is None:
# Inconsistency may happen due to multiple baseclasses even in classes that
# do not declare explicit metaclass, but it's harder to catch at this stage
if defn.metaclass is not None:
self.fail("Inconsistent metaclass structure for '%s'" % defn.name, defn)
else:
if defn.info.metaclass_type.type.has_base('enum.EnumMeta'):
defn.info.is_enum = True
if defn.type_vars:
self.fail("Enum class cannot be generic", defn)
#
# Imports
#
def visit_import(self, i: Import) -> None:
for id, as_id in i.ids:
if as_id is not None:
self.add_module_symbol(id, as_id, module_public=True, context=i)
else:
# Modules imported in a stub file without using 'as x' won't get exported
# When implicit re-exporting is disabled, we have the same behavior as stubs.
module_public = (
not self.is_stub_file
and self.options.implicit_reexport
)
base = id.split('.')[0]
self.add_module_symbol(base, base, module_public=module_public,
context=i, module_hidden=not module_public)
self.add_submodules_to_parent_modules(id, module_public)
def add_submodules_to_parent_modules(self, id: str, module_public: bool) -> None:
"""Recursively adds a reference to a newly loaded submodule to its parent.
When you import a submodule in any way, Python will add a reference to that
submodule to its parent. So, if you do something like `import A.B` or
`from A import B` or `from A.B import Foo`, Python will add a reference to
module A.B to A's namespace.
Note that this "parent patching" process is completely independent from any
changes made to the *importer's* namespace. For example, if you have a file
named `foo.py` where you do `from A.B import Bar`, then foo's namespace will
be modified to contain a reference to only Bar. Independently, A's namespace
will be modified to contain a reference to `A.B`.
"""
while '.' in id:
parent, child = id.rsplit('.', 1)
parent_mod = self.modules.get(parent)
if parent_mod and self.allow_patching(parent_mod, child):
child_mod = self.modules.get(id)
if child_mod:
sym = SymbolTableNode(GDEF, child_mod,
module_public=module_public,
no_serialize=True)
else:
# Construct a dummy Var with Any type.
any_type = AnyType(TypeOfAny.from_unimported_type,
missing_import_name=id)
var = Var(child, any_type)
var._fullname = child
var.is_ready = True
var.is_suppressed_import = True
sym = SymbolTableNode(GDEF, var,
module_public=module_public,
no_serialize=True)
parent_mod.names[child] = sym
id = parent
def allow_patching(self, parent_mod: MypyFile, child: str) -> bool:
if child not in parent_mod.names:
return True
node = parent_mod.names[child].node
if isinstance(node, Var) and node.is_suppressed_import:
return True
return False
def visit_import_from(self, imp: ImportFrom) -> None:
import_id = self.correct_relative_import(imp)
self.add_submodules_to_parent_modules(import_id, True)
module = self.modules.get(import_id)
for id, as_id in imp.names:
node = module.names.get(id) if module else None
missing = False
possible_module_id = import_id + '.' + id
imported_id = as_id or id
# If the module does not contain a symbol with the name 'id',
# try checking if it's a module instead.
if not node:
mod = self.modules.get(possible_module_id)
if mod is not None:
kind = self.current_symbol_kind()
node = SymbolTableNode(kind, mod)
self.add_submodules_to_parent_modules(possible_module_id, True)
elif possible_module_id in self.missing_modules:
missing = True
# If it is still not resolved, check for a module level __getattr__
if (module and not node and (module.is_stub or self.options.python_version >= (3, 7))
and '__getattr__' in module.names):
# We use the fullname of the orignal definition so that we can
# detect whether two imported names refer to the same thing.
fullname = import_id + '.' + id
gvar = self.create_getattr_var(module.names['__getattr__'], imported_id, fullname)
if gvar:
self.add_symbol(imported_id, gvar, imp)
continue
if node and not node.module_hidden:
if isinstance(node.node, PlaceholderNode):
if self.final_iteration:
self.report_missing_module_attribute(import_id, id, imported_id, imp)
return
self.record_incomplete_ref()
existing_symbol = self.globals.get(imported_id)
if (existing_symbol and not isinstance(existing_symbol.node, PlaceholderNode) and
not isinstance(node.node, PlaceholderNode)):
# Import can redefine a variable. They get special treatment.
if self.process_import_over_existing_name(
imported_id, existing_symbol, node, imp):
continue
if (existing_symbol and isinstance(existing_symbol.node, MypyFile) and
existing_symbol.no_serialize): # submodule added to parent module
# Special case: allow replacing submodules with variables. This pattern
# is used by some libraries.
del self.globals[imported_id]
if existing_symbol and isinstance(node.node, PlaceholderNode):
# Imports are special, some redefinitions are allowed, so wait until
# we know what is the new symbol node.
continue
# 'from m import x as x' exports x in a stub file or when implicit
# re-exports are disabled.
module_public = (
not self.is_stub_file
and self.options.implicit_reexport
or as_id is not None
)
module_hidden = not module_public and possible_module_id not in self.modules
# NOTE: we take the original node even for final `Var`s. This is to support
# a common pattern when constants are re-exported (same applies to import *).
self.add_imported_symbol(imported_id, node, imp,
module_public=module_public,
module_hidden=module_hidden)
elif module and not missing:
self.report_missing_module_attribute(import_id, id, imported_id, imp)
else:
# Missing module.
missing_name = import_id + '.' + id
self.add_unknown_imported_symbol(imported_id, imp, target_name=missing_name)
def report_missing_module_attribute(self, import_id: str, source_id: str, imported_id: str,
context: Node) -> None:
# Missing attribute.
if self.is_incomplete_namespace(import_id):
# We don't know whether the name will be there, since the namespace
# is incomplete. Defer the current target.
self.mark_incomplete(imported_id, context)
return
message = "Module '{}' has no attribute '{}'".format(import_id, source_id)
extra = self.undefined_name_extra_info('{}.{}'.format(import_id, source_id))
if extra:
message += " {}".format(extra)
# Suggest alternatives, if any match is found.
module = self.modules.get(import_id)
if module:
alternatives = set(module.names.keys()).difference({source_id})
matches = best_matches(source_id, alternatives)[:3]
if matches:
suggestion = "; maybe {}?".format(pretty_or(matches))
message += "{}".format(suggestion)
self.fail(message, context)
self.add_unknown_imported_symbol(imported_id, context)
if import_id == 'typing':
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = 'builtins.{}'.format(source_id.lower())
if (self.lookup_fully_qualified_or_none(fullname) is None and
fullname in SUGGESTED_TEST_FIXTURES):
# Yes. Generate a helpful note.
self.add_fixture_note(fullname, context)
def process_import_over_existing_name(self,
imported_id: str, existing_symbol: SymbolTableNode,
module_symbol: SymbolTableNode,
import_node: ImportBase) -> bool:
if existing_symbol.node is module_symbol.node:
# We added this symbol on previous iteration.
return False
if (existing_symbol.kind in (LDEF, GDEF, MDEF) and
isinstance(existing_symbol.node, (Var, FuncDef, TypeInfo, Decorator, TypeAlias))):
# This is a valid import over an existing definition in the file. Construct a dummy
# assignment that we'll use to type check the import.
lvalue = NameExpr(imported_id)
lvalue.kind = existing_symbol.kind
lvalue.node = existing_symbol.node
rvalue = NameExpr(imported_id)
rvalue.kind = module_symbol.kind
rvalue.node = module_symbol.node
if isinstance(rvalue.node, TypeAlias):
# Suppress bogus errors from the dummy assignment if rvalue is an alias.
# Otherwise mypy may complain that alias is invalid in runtime context.
rvalue.is_alias_rvalue = True
assignment = AssignmentStmt([lvalue], rvalue)
for node in assignment, lvalue, rvalue:
node.set_line(import_node)
import_node.assignments.append(assignment)
return True
return False
def add_fixture_note(self, fullname: str, ctx: Context) -> None:
self.note('Maybe your test fixture does not define "{}"?'.format(fullname), ctx)
if fullname in SUGGESTED_TEST_FIXTURES:
self.note(
'Consider adding [builtins fixtures/{}] to your test description'.format(
SUGGESTED_TEST_FIXTURES[fullname]), ctx)
def correct_relative_import(self, node: Union[ImportFrom, ImportAll]) -> str:
import_id, ok = correct_relative_import(self.cur_mod_id, node.relative, node.id,
self.cur_mod_node.is_package_init_file())
if not ok:
self.fail("Relative import climbs too many namespaces", node)
return import_id
def visit_import_all(self, i: ImportAll) -> None:
i_id = self.correct_relative_import(i)
if i_id in self.modules:
m = self.modules[i_id]
if self.is_incomplete_namespace(i_id):
# Any names could be missing from the current namespace if the target module
# namespace is incomplete.
self.mark_incomplete('*', i)
self.add_submodules_to_parent_modules(i_id, True)
for name, node in m.names.items():
if node is None:
continue
# if '__all__' exists, all nodes not included have had module_public set to
# False, and we can skip checking '_' because it's been explicitly included.
if node.module_public and (not name.startswith('_') or '__all__' in m.names):
if isinstance(node.node, MypyFile):
# Star import of submodule from a package, add it as a dependency.
self.imports.add(node.node.fullname())
existing_symbol = self.lookup_current_scope(name)
if existing_symbol and not isinstance(node.node, PlaceholderNode):
# Import can redefine a variable. They get special treatment.
if self.process_import_over_existing_name(
name, existing_symbol, node, i):
continue
self.add_imported_symbol(name, node, i)
i.imported_names.append(name)
else:
# Don't add any dummy symbols for 'from x import *' if 'x' is unknown.
pass
#
# Assignment
#
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
self.statement = s
tag = self.track_incomplete_refs()
s.rvalue.accept(self)
if self.found_incomplete_ref(tag) or self.should_wait_rhs(s.rvalue):
# Initializer couldn't be fully analyzed. Defer the current node and give up.
# Make sure that if we skip the definition of some local names, they can't be
# added later in this scope, since an earlier definition should take precedence.
for expr in names_modified_by_assignment(s):
self.mark_incomplete(expr.name, expr)
return
# The r.h.s. is now ready to be classified, first check if it is a special form:
special_form = False
# * type alias
if self.check_and_set_up_type_alias(s):
s.is_alias_def = True
special_form = True
# * type variable definition
elif self.process_typevar_declaration(s):
special_form = True
# * type constructors
elif self.analyze_namedtuple_assign(s):
special_form = True
elif self.analyze_typeddict_assign(s):
special_form = True
elif self.newtype_analyzer.process_newtype_declaration(s):
special_form = True
elif self.analyze_enum_assign(s):
special_form = True
if special_form:
self.record_special_form_lvalue(s)
return
# OK, this is a regular assignment, perform the necessary analysis steps.
s.is_final_def = self.unwrap_final(s)
self.analyze_lvalues(s)
self.check_final_implicit_def(s)
self.check_classvar(s)
self.process_type_annotation(s)
self.apply_dynamic_class_hook(s)
self.store_final_status(s)
if not s.type:
self.process_module_assignment(s.lvalues, s.rvalue, s)
self.process__all__(s)
def should_wait_rhs(self, rv: Expression) -> bool:
"""Can we already classify this r.h.s. of an assignment or should we wait?
This returns True if we don't have enough information to decide whether
an assignment is just a normal variable definition or a special form.
Always return False if this is a final iteration. This will typically cause
the lvalue to be classified as a variable plus emit an error.
"""
if self.final_iteration:
# No chance, nothing has changed.
return False
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, IndexExpr) and isinstance(rv.base, RefExpr):
return self.should_wait_rhs(rv.base)
elif isinstance(rv, CallExpr) and isinstance(rv.callee, RefExpr):
# This is only relevant for builtin SCC where things like 'TypeVar'
# may be not ready.
return self.should_wait_rhs(rv.callee)
return False
def can_be_type_alias(self, rv: Expression) -> bool:
"""Is this a valid r.h.s. for an alias definition?
Note: this function should be only called for expressions where self.should_wait_rhs()
returns False.
"""
if isinstance(rv, RefExpr) and self.is_type_ref(rv, bare=True):
return True
if isinstance(rv, IndexExpr) and self.is_type_ref(rv.base, bare=False):
return True
if self.is_none_alias(rv):
return True
return False
def is_type_ref(self, rv: Expression, bare: bool = False) -> bool:
"""Does this expression refer to a type?
This includes:
* Special forms, like Any or Union
* Classes (except subscripted enums)
* Other type aliases
* PlaceholderNodes with becomes_typeinfo=True (these can be not ready class
definitions, and not ready aliases).
If bare is True, this is not a base of an index expression, so some special
forms are not valid (like a bare Union).
Note: This method should be only used in context of a type alias definition.
This method can only return True for RefExprs, to check if C[int] is a valid
target for type alias call this method on expr.base (i.e. on C in C[int]).
See also can_be_type_alias().
"""
if not isinstance(rv, RefExpr):
return False
if isinstance(rv.node, TypeVarExpr):
self.fail('Type variable "{}" is invalid as target for type alias'.format(
rv.fullname), rv)
return False
if bare:
# These three are valid even if bare, for example
# A = Tuple is just equivalent to A = Tuple[Any, ...].
valid_refs = {'typing.Any', 'typing.Tuple', 'typing.Callable'}
else:
valid_refs = type_constructors
if isinstance(rv.node, TypeAlias) or rv.fullname in valid_refs:
return True
if isinstance(rv.node, TypeInfo):
if bare:
return True
# Assignment color = Color['RED'] defines a variable, not an alias.
return not rv.node.is_enum
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
# The r.h.s. for variable definitions may not be a type reference but just
# an instance attribute, so suppress the errors.
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
return False
def is_none_alias(self, node: Expression) -> bool:
"""Is this a r.h.s. for a None alias?
We special case the assignments like Void = type(None), to allow using
Void in type annotations.
"""
if isinstance(node, CallExpr):
if (isinstance(node.callee, NameExpr) and len(node.args) == 1 and
isinstance(node.args[0], NameExpr)):
call = self.lookup_qualified(node.callee.name, node.callee)
arg = self.lookup_qualified(node.args[0].name, node.args[0])
if (call is not None and call.node and call.node.fullname() == 'builtins.type' and
arg is not None and arg.node and arg.node.fullname() == 'builtins.None'):
return True
return False
def record_special_form_lvalue(self, s: AssignmentStmt) -> None:
"""Record minimal necessary information about l.h.s. of a special form.
This exists mostly for compatibility with the old semantic analyzer.
"""
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
lvalue.is_special_form = True
if self.current_symbol_kind() == GDEF:
lvalue.fullname = self.qualified_name(lvalue.name)
lvalue.kind = self.current_symbol_kind()
def analyze_enum_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines an Enum."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, EnumCallExpr):
# Already analyzed enum -- nothing to do here.
return True
return self.enum_call_analyzer.process_enum_call(s, self.is_func_scope())
def analyze_namedtuple_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines a namedtuple."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, NamedTupleExpr):
return True # This is a valid and analyzed named tuple definition, nothing to do here.
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return False
lvalue = s.lvalues[0]
name = lvalue.name
is_named_tuple, info = self.named_tuple_analyzer.check_namedtuple(s.rvalue, name,
self.is_func_scope())
if not is_named_tuple:
return False
# Yes, it's a valid namedtuple, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_typeddict_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines a typed dict."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, TypedDictExpr):
return True # This is a valid and analyzed typed dict definition, nothing to do here.
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return False
lvalue = s.lvalues[0]
name = lvalue.name
is_typed_dict, info = self.typed_dict_analyzer.check_typeddict(s.rvalue, name,
self.is_func_scope())
if not is_typed_dict:
return False
# Yes, it's a valid typed dict, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_lvalues(self, s: AssignmentStmt) -> None:
# We cannot use s.type, because analyze_simple_literal_type() will set it.
explicit = s.unanalyzed_type is not None
if self.is_final_type(s.unanalyzed_type):
# We need to exclude bare Final.
assert isinstance(s.unanalyzed_type, UnboundType)
if not s.unanalyzed_type.args:
explicit = False
for lval in s.lvalues:
self.analyze_lvalue(lval,
explicit_type=explicit,
is_final=s.is_final_def)
def apply_dynamic_class_hook(self, s: AssignmentStmt) -> None:
if len(s.lvalues) > 1:
return
lval = s.lvalues[0]
if not isinstance(lval, NameExpr) or not isinstance(s.rvalue, CallExpr):
return
call = s.rvalue
if not isinstance(call.callee, RefExpr):
return
fname = call.callee.fullname
if fname:
hook = self.plugin.get_dynamic_class_hook(fname)
if hook:
hook(DynamicClassDefContext(call, lval.name, self))
def unwrap_final(self, s: AssignmentStmt) -> bool:
"""Strip Final[...] if present in an assignment.
This is done to invoke type inference during type checking phase for this
assignment. Also, Final[...] desn't affect type in any way -- it is rather an
access qualifier for given `Var`.
Also perform various consistency checks.
Returns True if Final[...] was present.
"""
if not s.unanalyzed_type or not self.is_final_type(s.unanalyzed_type):
return False
assert isinstance(s.unanalyzed_type, UnboundType)
if len(s.unanalyzed_type.args) > 1:
self.fail("Final[...] takes at most one type argument", s.unanalyzed_type)
invalid_bare_final = False
if not s.unanalyzed_type.args:
s.type = None
if isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs:
invalid_bare_final = True
self.fail("Type in Final[...] can only be omitted if there is an initializer", s)
else:
s.type = s.unanalyzed_type.args[0]
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], RefExpr):
self.fail("Invalid final declaration", s)
return False
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
if self.loop_depth > 0:
self.fail("Cannot use Final inside a loop", s)
if self.type and self.type.is_protocol:
self.msg.protocol_members_cant_be_final(s)
if (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs and
not self.is_stub_file and not self.is_class_scope()):
if not invalid_bare_final: # Skip extra error messages.
self.msg.final_without_value(s)
return True
def check_final_implicit_def(self, s: AssignmentStmt) -> None:
"""Do basic checks for final declaration on self in __init__.
Additional re-definition checks are performed by `analyze_lvalue`.
"""
if not s.is_final_def:
return
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
if isinstance(lval, MemberExpr):
if not self.is_self_member_ref(lval):
self.fail("Final can be only applied to a name or an attribute on self", s)
s.is_final_def = False
return
else:
assert self.function_stack
if self.function_stack[-1].name() != '__init__':
self.fail("Can only declare a final attribute in class body or __init__", s)
s.is_final_def = False
return
def store_final_status(self, s: AssignmentStmt) -> None:
"""If this is a locally valid final declaration, set the corresponding flag on `Var`."""
if s.is_final_def:
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
node = s.lvalues[0].node
if isinstance(node, Var):
node.is_final = True
node.final_value = self.unbox_literal(s.rvalue)
if (self.is_class_scope() and
(isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
node.final_unset_in_class = True
else:
# Special case: deferred initialization of a final attribute in __init__.
# In this case we just pretend this is a valid final definition to suppress
# errors about assigning to final attribute.
for lval in self.flatten_lvalues(s.lvalues):
if isinstance(lval, MemberExpr) and self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name, None)
if cur_node and isinstance(cur_node.node, Var) and cur_node.node.is_final:
assert self.function_stack
top_function = self.function_stack[-1]
if (top_function.name() == '__init__' and
cur_node.node.final_unset_in_class and
not cur_node.node.final_set_in_init and
not (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
cur_node.node.final_set_in_init = True
s.is_final_def = True
def flatten_lvalues(self, lvalues: List[Expression]) -> List[Expression]:
res = [] # type: List[Expression]
for lv in lvalues:
if isinstance(lv, (TupleExpr, ListExpr)):
res.extend(self.flatten_lvalues(lv.items))
else:
res.append(lv)
return res
def unbox_literal(self, e: Expression) -> Optional[Union[int, float, bool, str]]:
if isinstance(e, (IntExpr, FloatExpr, StrExpr)):
return e.value
elif isinstance(e, NameExpr) and e.name in ('True', 'False'):
return True if e.name == 'True' else False
return None
def process_type_annotation(self, s: AssignmentStmt) -> None:
"""Analyze type annotation or infer simple literal type."""
if s.type:
lvalue = s.lvalues[-1]
allow_tuple_literal = isinstance(lvalue, TupleExpr)
analyzed = self.anal_type(s.type, allow_tuple_literal=allow_tuple_literal)
# Don't store not ready types (including placeholders).
if analyzed is None or has_placeholder(analyzed):
self.defer()
return
s.type = analyzed
if (self.type and self.type.is_protocol and isinstance(lvalue, NameExpr) and
isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs):
if isinstance(lvalue.node, Var):
lvalue.node.is_abstract_var = True
else:
if (any(isinstance(lv, NameExpr) and lv.is_inferred_def for lv in s.lvalues) and
self.type and self.type.is_protocol and not self.is_func_scope()):
self.fail('All protocol members must have explicitly declared types', s)
# Set the type if the rvalue is a simple literal (even if the above error occurred).
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
if s.lvalues[0].is_inferred_def:
s.type = self.analyze_simple_literal_type(s.rvalue, s.is_final_def)
if s.type:
# Store type into nodes.
for lvalue in s.lvalues:
self.store_declared_types(lvalue, s.type)
def analyze_simple_literal_type(self, rvalue: Expression, is_final: bool) -> Optional[Type]:
"""Return builtins.int if rvalue is an int literal, etc.
If this is a 'Final' context, we return "Literal[...]" instead."""
if self.options.semantic_analysis_only or self.function_stack:
# Skip this if we're only doing the semantic analysis pass.
# This is mostly to avoid breaking unit tests.
# Also skip inside a function; this is to avoid confusing
# the code that handles dead code due to isinstance()
# inside type variables with value restrictions (like
# AnyStr).
return None
if isinstance(rvalue, FloatExpr):
return self.named_type_or_none('builtins.float')
value = None # type: Optional[LiteralValue]
type_name = None # type: Optional[str]
if isinstance(rvalue, IntExpr):
value, type_name = rvalue.value, 'builtins.int'
if isinstance(rvalue, StrExpr):
value, type_name = rvalue.value, 'builtins.str'
if isinstance(rvalue, BytesExpr):
value, type_name = rvalue.value, 'builtins.bytes'
if isinstance(rvalue, UnicodeExpr):
value, type_name = rvalue.value, 'builtins.unicode'
if type_name is not None:
assert value is not None
typ = self.named_type_or_none(type_name)
if typ and is_final:
return typ.copy_modified(last_known_value=LiteralType(
value=value,
fallback=typ,
line=typ.line,
column=typ.column,
))
return typ
return None
def analyze_alias(self, rvalue: Expression,
allow_placeholder: bool = False) -> Tuple[Optional[Type], List[str],
Set[str], List[str]]:
"""Check if 'rvalue' is a valid type allowed for aliasing (e.g. not a type variable).
If yes, return the corresponding type, a list of
qualified type variable names for generic aliases, a set of names the alias depends on,
and a list of type variables if the alias is generic.
An schematic example for the dependencies:
A = int
B = str
analyze_alias(Dict[A, B])[2] == {'__main__.A', '__main__.B'}
"""
dynamic = bool(self.function_stack and self.function_stack[-1].is_dynamic())
global_scope = not self.type and not self.function_stack
res = analyze_type_alias(rvalue,
self,
self.tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_unnormalized=self.is_stub_file,
allow_placeholder=allow_placeholder,
in_dynamic_func=dynamic,
global_scope=global_scope)
typ = None # type: Optional[Type]
if res:
typ, depends_on = res
found_type_vars = typ.accept(TypeVariableQuery(self.lookup_qualified, self.tvar_scope))
alias_tvars = [name for (name, node) in found_type_vars]
qualified_tvars = [node.fullname() for (name, node) in found_type_vars]
else:
alias_tvars = []
depends_on = set()
qualified_tvars = []
return typ, alias_tvars, depends_on, qualified_tvars
def check_and_set_up_type_alias(self, s: AssignmentStmt) -> bool:
"""Check if assignment creates a type alias and set it up as needed.
Return True if it is a type alias (even if the target is not ready),
or False otherwise.
Note: the resulting types for subscripted (including generic) aliases
are also stored in rvalue.analyzed.
"""
lvalue = s.lvalues[0]
if len(s.lvalues) > 1 or not isinstance(lvalue, NameExpr):
# First rule: Only simple assignments like Alias = ... create aliases.
return False
if s.unanalyzed_type is not None:
# Second rule: Explicit type (cls: Type[A] = A) always creates variable, not alias.
return False
existing = self.current_symbol_table().get(lvalue.name)
# Third rule: type aliases can't be re-defined. For example:
# A: Type[float] = int
# A = float # OK, but this doesn't define an alias
# B = int
# B = float # Error!
# Don't create an alias in these cases:
if existing and (isinstance(existing.node, Var) or # existing variable
isinstance(existing.node, TypeAlias) and not s.is_alias_def or # existing alias
(isinstance(existing.node, PlaceholderNode) and
# TODO: find a more robust way to track the order of definitions.
existing.node.node.line < s.line)): # or previous incomplete definition
# Note: if is_alias_def=True, this is just a node from previous iteration.
if isinstance(existing.node, TypeAlias) and not s.is_alias_def:
self.fail('Cannot assign multiple types to name "{}"'
' without an explicit "Type[...]" annotation'
.format(lvalue.name), lvalue)
return False
non_global_scope = self.type or self.is_func_scope()
if isinstance(s.rvalue, RefExpr) and non_global_scope:
# Fourth rule (special case): Non-subscripted right hand side creates a variable
# at class and function scopes. For example:
#
# class Model:
# ...
# class C:
# model = Model # this is automatically a variable with type 'Type[Model]'
#
# without this rule, this typical use case will require a lot of explicit
# annotations (see the second rule).
return False
rvalue = s.rvalue
if not self.can_be_type_alias(rvalue):
return False
res = None # type: Optional[Type]
if self.is_none_alias(rvalue):
res = NoneType()
alias_tvars, depends_on, qualified_tvars = \
[], set(), [] # type: List[str], Set[str], List[str]
else:
tag = self.track_incomplete_refs()
res, alias_tvars, depends_on, qualified_tvars = \
self.analyze_alias(rvalue, allow_placeholder=True)
if not res:
return False
if self.found_incomplete_ref(tag) or isinstance(res, PlaceholderType):
# Since we have got here, we know this must be a type alias (incomplete refs
# may appear in nested positions), therefore use becomes_typeinfo=True.
self.add_symbol(lvalue.name, PlaceholderNode(self.qualified_name(lvalue.name),
rvalue, becomes_typeinfo=True), s)
return True
self.add_type_alias_deps(depends_on)
# In addition to the aliases used, we add deps on unbound
# type variables, since they are erased from target type.
self.add_type_alias_deps(qualified_tvars)
# The above are only direct deps on other aliases.
# For subscripted aliases, type deps from expansion are added in deps.py
# (because the type is stored).
check_for_explicit_any(res, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
# When this type alias gets "inlined", the Any is not explicit anymore,
# so we need to replace it with non-explicit Anys.
res = make_any_non_explicit(res)
no_args = isinstance(res, Instance) and not res.args
fix_instance_types(res, self.fail)
if isinstance(s.rvalue, (IndexExpr, CallExpr)): # CallExpr is for `void = type(None)`
s.rvalue.analyzed = TypeAliasExpr(res, alias_tvars, no_args)
s.rvalue.analyzed.line = s.line
# we use the column from resulting target, to get better location for errors
s.rvalue.analyzed.column = res.column
elif isinstance(s.rvalue, RefExpr):
s.rvalue.is_alias_rvalue = True
alias_node = TypeAlias(res, self.qualified_name(lvalue.name), s.line, s.column,
alias_tvars=alias_tvars, no_args=no_args)
if existing:
# Did alias get updated?
if (isinstance(existing.node, PlaceholderNode) or
isinstance(existing.node, TypeAlias) and existing.node.target != res):
self.progress = True
# We need to defer so that this change can get propagated to base classes.
self.defer()
if isinstance(existing.node, TypeAlias):
# Copy expansion to the existing alias, this matches how we update base classes
# for a TypeInfo _in place_ if there are nested placeholders.
existing.node.target = res
existing.node.alias_tvars = alias_tvars
existing.node.no_args = no_args
else:
# Otherwise just replace existing placeholder with type alias.
existing.node = alias_node
else:
self.add_symbol(lvalue.name, alias_node, s)
if isinstance(rvalue, RefExpr) and isinstance(rvalue.node, TypeAlias):
alias_node.normalized = rvalue.node.normalized
return True
def analyze_lvalue(self,
lval: Lvalue,
nested: bool = False,
explicit_type: bool = False,
is_final: bool = False) -> None:
"""Analyze an lvalue or assignment target.
Args:
lval: The target lvalue
nested: If true, the lvalue is within a tuple or list lvalue expression
explicit_type: Assignment has type annotation
"""
if isinstance(lval, NameExpr):
self.analyze_name_lvalue(lval, explicit_type, is_final)
elif isinstance(lval, MemberExpr):
self.analyze_member_lvalue(lval, explicit_type, is_final)
if explicit_type and not self.is_self_member_ref(lval):
self.fail('Type cannot be declared in assignment to non-self '
'attribute', lval)
elif isinstance(lval, IndexExpr):
if explicit_type:
self.fail('Unexpected type declaration', lval)
lval.accept(self)
elif isinstance(lval, TupleExpr):
items = lval.items
if len(items) == 0 and isinstance(lval, TupleExpr):
self.fail("can't assign to ()", lval)
self.analyze_tuple_or_list_lvalue(lval, explicit_type)
elif isinstance(lval, StarExpr):
if nested:
self.analyze_lvalue(lval.expr, nested, explicit_type)
else:
self.fail('Starred assignment target must be in a list or tuple', lval)
else:
self.fail('Invalid assignment target', lval)
def analyze_name_lvalue(self,
lvalue: NameExpr,
explicit_type: bool,
is_final: bool) -> None:
"""Analyze an lvalue that targets a name expression.
Arguments are similar to "analyze_lvalue".
"""
if lvalue.node:
# This has been bound already in a previous iteration.
return
name = lvalue.name
if self.is_alias_for_final_name(name):
if is_final:
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.msg.cant_assign_to_final(name, self.type is not None, lvalue)
kind = self.current_symbol_kind()
names = self.current_symbol_table()
existing = names.get(name)
outer = self.is_global_or_nonlocal(name)
if (not existing or isinstance(existing.node, PlaceholderNode)) and not outer:
# Define new variable.
var = self.make_name_lvalue_var(lvalue, kind, not explicit_type)
added = self.add_symbol(name, var, lvalue)
# Only bind expression if we successfully added name to symbol table.
if added:
lvalue.is_new_def = True
lvalue.is_inferred_def = True
lvalue.kind = kind
lvalue.node = var
if kind == GDEF:
lvalue.fullname = var._fullname
else:
lvalue.fullname = lvalue.name
if self.is_func_scope():
if unmangle(name) == '_':
# Special case for assignment to local named '_': always infer 'Any'.
typ = AnyType(TypeOfAny.special_form)
self.store_declared_types(lvalue, typ)
if is_final and self.is_final_redefinition(kind, name):
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.make_name_lvalue_point_to_existing_def(lvalue, explicit_type, is_final)
def is_final_redefinition(self, kind: int, name: str) -> bool:
if kind == GDEF:
return self.is_mangled_global(name) and not self.is_initial_mangled_global(name)
elif kind == MDEF and self.type:
return unmangle(name) + "'" in self.type.names
return False
def is_alias_for_final_name(self, name: str) -> bool:
if self.is_func_scope():
if not name.endswith("'"):
# Not a mangled name -- can't be an alias
return False
name = unmangle(name)
assert self.locals[-1] is not None, "No locals at function scope"
existing = self.locals[-1].get(name)
return existing is not None and is_final_node(existing.node)
elif self.type is not None:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.type.names.get(orig_name)
return existing is not None and is_final_node(existing.node)
else:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.globals.get(orig_name)
return existing is not None and is_final_node(existing.node)
def make_name_lvalue_var(self, lvalue: NameExpr, kind: int, inferred: bool) -> Var:
"""Return a Var node for an lvalue that is a name expression."""
v = Var(lvalue.name)
v.set_line(lvalue)
v.is_inferred = inferred
if kind == MDEF:
assert self.type is not None
v.info = self.type
v.is_initialized_in_class = True
if kind != LDEF:
v._fullname = self.qualified_name(lvalue.name)
else:
# fullanme should never stay None
v._fullname = lvalue.name
v.is_ready = False # Type not inferred yet
return v
def make_name_lvalue_point_to_existing_def(
self,
lval: NameExpr,
explicit_type: bool,
is_final: bool) -> None:
"""Update an lvalue to point to existing definition in the same scope.
Arguments are similar to "analyze_lvalue".
Assume that an existing name exists.
"""
if is_final:
# Redefining an existing name with final is always an error.
self.fail("Cannot redefine an existing name as final", lval)
original_def = self.lookup(lval.name, lval, suppress_errors=True)
if original_def is None and self.type and not self.is_func_scope():
# Workaround to allow "x, x = ..." in class body.
original_def = self.type.get(lval.name)
if explicit_type:
# Don't re-bind if there is a type annotation.
self.name_already_defined(lval.name, lval, original_def)
else:
# Bind to an existing name.
if original_def:
self.bind_name_expr(lval, original_def)
else:
self.name_not_defined(lval.name, lval)
self.check_lvalue_validity(lval.node, lval)
def analyze_tuple_or_list_lvalue(self, lval: TupleExpr,
explicit_type: bool = False) -> None:
"""Analyze an lvalue or assignment target that is a list or tuple."""
items = lval.items
star_exprs = [item for item in items if isinstance(item, StarExpr)]
if len(star_exprs) > 1:
self.fail('Two starred expressions in assignment', lval)
else:
if len(star_exprs) == 1:
star_exprs[0].valid = True
for i in items:
self.analyze_lvalue(i, nested=True, explicit_type=explicit_type)
def analyze_member_lvalue(self, lval: MemberExpr, explicit_type: bool, is_final: bool) -> None:
"""Analyze lvalue that is a member expression.
Arguments:
lval: The target lvalue
explicit_type: Assignment has type annotation
is_final: Is the target final
"""
lval.accept(self)
if self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name)
node = self.type.get(lval.name)
if cur_node and is_final:
# Overrides will be checked in type checker.
self.fail("Cannot redefine an existing name as final", lval)
# On first encounter with this definition, if this attribute was defined before
# with an inferred type and it's marked with an explicit type now, give an error.
if (not lval.node and cur_node and isinstance(cur_node.node, Var) and
cur_node.node.is_inferred and explicit_type):
self.attribute_already_defined(lval.name, lval, cur_node)
# If the attribute of self is not defined in superclasses, create a new Var, ...
if (node is None
or (isinstance(node.node, Var) and node.node.is_abstract_var)
# ... also an explicit declaration on self also creates a new Var.
# Note that `explicit_type` might has been erased for bare `Final`,
# so we also check if `is_final` is passed.
or (cur_node is None and (explicit_type or is_final))):
if self.type.is_protocol and node is None:
self.fail("Protocol members cannot be defined via assignment to self", lval)
else:
# Implicit attribute definition in __init__.
lval.is_new_def = True
lval.is_inferred_def = True
v = Var(lval.name)
v.set_line(lval)
v._fullname = self.qualified_name(lval.name)
v.info = self.type
v.is_ready = False
v.explicit_self_type = explicit_type or is_final
lval.def_var = v
lval.node = v
# TODO: should we also set lval.kind = MDEF?
self.type.names[lval.name] = SymbolTableNode(MDEF, v, implicit=True)
self.check_lvalue_validity(lval.node, lval)
def is_self_member_ref(self, memberexpr: MemberExpr) -> bool:
"""Does memberexpr to refer to an attribute of self?"""
if not isinstance(memberexpr.expr, NameExpr):
return False
node = memberexpr.expr.node
return isinstance(node, Var) and node.is_self
def check_lvalue_validity(self, node: Union[Expression, SymbolNode, None],
ctx: Context) -> None:
if isinstance(node, TypeVarExpr):
self.fail('Invalid assignment target', ctx)
elif isinstance(node, TypeInfo):
self.fail(message_registry.CANNOT_ASSIGN_TO_TYPE, ctx)
def store_declared_types(self, lvalue: Lvalue, typ: Type) -> None:
if isinstance(typ, StarType) and not isinstance(lvalue, StarExpr):
self.fail('Star type only allowed for starred expressions', lvalue)
if isinstance(lvalue, RefExpr):
lvalue.is_inferred_def = False
if isinstance(lvalue.node, Var):
var = lvalue.node
var.type = typ
var.is_ready = True
# If node is not a variable, we'll catch it elsewhere.
elif isinstance(lvalue, TupleExpr):
if isinstance(typ, TupleType):
if len(lvalue.items) != len(typ.items):
self.fail('Incompatible number of tuple items', lvalue)
return
for item, itemtype in zip(lvalue.items, typ.items):
self.store_declared_types(item, itemtype)
else:
self.fail('Tuple type expected for multiple variables',
lvalue)
elif isinstance(lvalue, StarExpr):
# Historical behavior for the old parser
if isinstance(typ, StarType):
self.store_declared_types(lvalue.expr, typ.type)
else:
self.store_declared_types(lvalue.expr, typ)
else:
# This has been flagged elsewhere as an error, so just ignore here.
pass
def process_typevar_declaration(self, s: AssignmentStmt) -> bool:
"""Check if s declares a TypeVar; it yes, store it in symbol table.
Return True if this looks like a type variable declaration (but maybe
with errors), otherwise return False.
"""
call = self.get_typevar_declaration(s)
if not call:
return False
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
if s.type:
self.fail("Cannot declare the type of a type variable", s)
return False
name = lvalue.name
names = self.current_symbol_table()
existing = names.get(name)
if existing and not isinstance(existing.node, (TypeVarExpr, PlaceholderNode)):
self.fail("Cannot redefine '%s' as a type variable" % name, s)
return False
if not self.check_typevar_name(call, name, s):
return False
# Constraining types
n_values = call.arg_kinds[1:].count(ARG_POS)
values = self.analyze_types(call.args[1:1 + n_values])
res = self.process_typevar_parameters(call.args[1 + n_values:],
call.arg_names[1 + n_values:],
call.arg_kinds[1 + n_values:],
n_values,
s)
if res is None:
return False
variance, upper_bound = res
if self.options.disallow_any_unimported:
for idx, constraint in enumerate(values, start=1):
if has_any_from_unimported_type(constraint):
prefix = "Constraint {}".format(idx)
self.msg.unimported_type_becomes_any(prefix, constraint, s)
if has_any_from_unimported_type(upper_bound):
prefix = "Upper bound of type variable"
self.msg.unimported_type_becomes_any(prefix, upper_bound, s)
for t in values + [upper_bound]:
check_for_explicit_any(t, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
# mypyc suppresses making copies of a function to check each
# possible type, so set the upper bound to Any to prevent that
# from causing errors.
if values and self.options.mypyc:
upper_bound = AnyType(TypeOfAny.implementation_artifact)
# Yes, it's a valid type variable definition! Add it to the symbol table.
if existing and isinstance(existing.node, TypeVarExpr):
# Existing definition from previous semanal iteration, use it.
# TODO: This may be confused with a duplicate TypeVar definition.
# Fix this and add corresponding tests.
type_var = existing.node
type_var.values = values
type_var.upper_bound = upper_bound
type_var.variance = variance
else:
type_var = TypeVarExpr(name, self.qualified_name(name),
values, upper_bound, variance)
type_var.line = call.line
call.analyzed = type_var
self.add_symbol(name, type_var, s)
return True
def check_typevar_name(self, call: CallExpr, name: str, context: Context) -> bool:
name = unmangle(name)
if len(call.args) < 1:
self.fail("Too few arguments for TypeVar()", context)
return False
if (not isinstance(call.args[0], (StrExpr, BytesExpr, UnicodeExpr))
or not call.arg_kinds[0] == ARG_POS):
self.fail("TypeVar() expects a string literal as first argument", context)
return False
elif call.args[0].value != name:
msg = "String argument 1 '{}' to TypeVar(...) does not match variable name '{}'"
self.fail(msg.format(call.args[0].value, name), context)
return False
return True
def get_typevar_declaration(self, s: AssignmentStmt) -> Optional[CallExpr]:
"""Returns the TypeVar() call expression if `s` is a type var declaration
or None otherwise.
"""
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return None
if not isinstance(s.rvalue, CallExpr):
return None
call = s.rvalue
callee = call.callee
if not isinstance(callee, RefExpr):
return None
if callee.fullname != 'typing.TypeVar':
return None
return call
def process_typevar_parameters(self, args: List[Expression],
names: List[Optional[str]],
kinds: List[int],
num_values: int,
context: Context) -> Optional[Tuple[int, Type]]:
has_values = (num_values > 0)
covariant = False
contravariant = False
upper_bound = self.object_type() # type: Type
for param_value, param_name, param_kind in zip(args, names, kinds):
if not param_kind == ARG_NAMED:
self.fail("Unexpected argument to TypeVar()", context)
return None
if param_name == 'covariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
covariant = True
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
elif param_name == 'contravariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
contravariant = True
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
elif param_name == 'bound':
if has_values:
self.fail("TypeVar cannot have both values and an upper bound", context)
return None
try:
# We want to use our custom error message below, so we suppress
# the default error message for invalid types here.
analyzed = self.expr_to_analyzed_type(param_value,
report_invalid_types=False)
if analyzed is None:
# It is fine to simply use a temporary Any because we don't need the bound
# for anything before main pass of semantic analysis is finished. We will
# incrementally populate `TypeVarExpr` if some part is missing during main
# pass iterations.
# NOTE: It is safe to not call self.defer() here, because the only way
# we can get None from self.anal_type() is if self.found_incomplete_refs()
# returned True. In turn, the only way it can happen is if someone called
# self.record_incomplete_ref(), and the latter unconditionally calls
# self.defer().
analyzed = AnyType(TypeOfAny.special_form)
upper_bound = analyzed
if isinstance(upper_bound, AnyType) and upper_bound.is_from_error:
self.fail("TypeVar 'bound' must be a type", param_value)
# Note: we do not return 'None' here -- we want to continue
# using the AnyType as the upper bound.
except TypeTranslationError:
self.fail("TypeVar 'bound' must be a type", param_value)
return None
elif param_name == 'values':
# Probably using obsolete syntax with values=(...). Explain the current syntax.
self.fail("TypeVar 'values' argument not supported", context)
self.fail("Use TypeVar('T', t, ...) instead of TypeVar('T', values=(t, ...))",
context)
return None
else:
self.fail("Unexpected argument to TypeVar(): {}".format(param_name), context)
return None
if covariant and contravariant:
self.fail("TypeVar cannot be both covariant and contravariant", context)
return None
elif num_values == 1:
self.fail("TypeVar cannot have only a single constraint", context)
return None
elif covariant:
variance = COVARIANT
elif contravariant:
variance = CONTRAVARIANT
else:
variance = INVARIANT
return (variance, upper_bound)
def basic_new_typeinfo(self, name: str, basetype_or_fallback: Instance) -> TypeInfo:
class_def = ClassDef(name, Block([]))
if self.is_func_scope() and not self.type:
# Full names of generated classes should always be prefixed with the module names
# even if they are nested in a function, since these classes will be (de-)serialized.
# (Note that the caller should append @line to the name to avoid collisions.)
# TODO: clean this up, see #6422.
class_def.fullname = self.cur_mod_id + '.' + self.qualified_name(name)
else:
class_def.fullname = self.qualified_name(name)
info = TypeInfo(SymbolTable(), class_def, self.cur_mod_id)
class_def.info = info
mro = basetype_or_fallback.type.mro
if not mro:
# Forward reference, MRO should be recalculated in third pass.
mro = [basetype_or_fallback.type, self.object_type().type]
info.mro = [info] + mro
info.bases = [basetype_or_fallback]
return info
def analyze_types(self, items: List[Expression]) -> List[Type]:
"""Analyze types from values expressions in type variable definition."""
result = [] # type: List[Type]
for node in items:
try:
analyzed = self.anal_type(expr_to_unanalyzed_type(node))
if analyzed is not None:
result.append(analyzed)
else:
# It is fine to simply use temporary Anys because we don't need values
# for anything before main pass of semantic analysis is finished.
result.append(AnyType(TypeOfAny.special_form))
except TypeTranslationError:
self.fail('Type expected', node)
result.append(AnyType(TypeOfAny.from_error))
return result
def check_classvar(self, s: AssignmentStmt) -> None:
"""Check if assignment defines a class variable."""
lvalue = s.lvalues[0]
if len(s.lvalues) != 1 or not isinstance(lvalue, RefExpr):
return
if not s.type or not self.is_classvar(s.type):
return
if self.is_class_scope() and isinstance(lvalue, NameExpr):
node = lvalue.node
if isinstance(node, Var):
node.is_classvar = True
elif not isinstance(lvalue, MemberExpr) or self.is_self_member_ref(lvalue):
# In case of member access, report error only when assigning to self
# Other kinds of member assignments should be already reported
self.fail_invalid_classvar(lvalue)
def is_classvar(self, typ: Type) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname() == 'typing.ClassVar'
def is_final_type(self, typ: Optional[Type]) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname() in ('typing.Final',
'typing_extensions.Final')
def fail_invalid_classvar(self, context: Context) -> None:
self.fail('ClassVar can only be used for assignments in class body', context)
def process_module_assignment(self, lvals: List[Lvalue], rval: Expression,
ctx: AssignmentStmt) -> None:
"""Propagate module references across assignments.
Recursively handles the simple form of iterable unpacking; doesn't
handle advanced unpacking with *rest, dictionary unpacking, etc.
In an expression like x = y = z, z is the rval and lvals will be [x,
y].
"""
if (isinstance(rval, (TupleExpr, ListExpr))
and all(isinstance(v, TupleExpr) for v in lvals)):
# rval and all lvals are either list or tuple, so we are dealing
# with unpacking assignment like `x, y = a, b`. Mypy didn't
# understand our all(isinstance(...)), so cast them as TupleExpr
# so mypy knows it is safe to access their .items attribute.
seq_lvals = cast(List[TupleExpr], lvals)
# given an assignment like:
# (x, y) = (m, n) = (a, b)
# we now have:
# seq_lvals = [(x, y), (m, n)]
# seq_rval = (a, b)
# We now zip this into:
# elementwise_assignments = [(a, x, m), (b, y, n)]
# where each elementwise assignment includes one element of rval and the
# corresponding element of each lval. Basically we unpack
# (x, y) = (m, n) = (a, b)
# into elementwise assignments
# x = m = a
# y = n = b
# and then we recursively call this method for each of those assignments.
# If the rval and all lvals are not all of the same length, zip will just ignore
# extra elements, so no error will be raised here; mypy will later complain
# about the length mismatch in type-checking.
elementwise_assignments = zip(rval.items, *[v.items for v in seq_lvals])
for rv, *lvs in elementwise_assignments:
self.process_module_assignment(lvs, rv, ctx)
elif isinstance(rval, RefExpr):
rnode = self.lookup_type_node(rval)
if rnode and isinstance(rnode.node, MypyFile):
for lval in lvals:
if not isinstance(lval, NameExpr):
continue
# respect explicitly annotated type
if (isinstance(lval.node, Var) and lval.node.type is not None):
continue
lnode = self.current_symbol_table().get(lval.name)
if lnode:
if isinstance(lnode.node, MypyFile) and lnode.node is not rnode.node:
self.fail(
"Cannot assign multiple modules to name '{}' "
"without explicit 'types.ModuleType' annotation".format(lval.name),
ctx)
# never create module alias except on initial var definition
elif lval.is_inferred_def:
lnode.kind = self.current_symbol_kind()
lnode.node = rnode.node
def process__all__(self, s: AssignmentStmt) -> None:
"""Export names if argument is a __all__ assignment."""
if (len(s.lvalues) == 1 and isinstance(s.lvalues[0], NameExpr) and
s.lvalues[0].name == '__all__' and s.lvalues[0].kind == GDEF and
isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
#
# Misc statements
#
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
self.block_depth[-1] += 1
for s in b.body:
self.accept(s)
self.block_depth[-1] -= 1
def visit_block_maybe(self, b: Optional[Block]) -> None:
if b:
self.visit_block(b)
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
self.statement = s
s.expr.accept(self)
def visit_return_stmt(self, s: ReturnStmt) -> None:
self.statement = s
if not self.is_func_scope():
self.fail("'return' outside function", s)
if s.expr:
s.expr.accept(self)
def visit_raise_stmt(self, s: RaiseStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.from_expr:
s.from_expr.accept(self)
def visit_assert_stmt(self, s: AssertStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.msg:
s.msg.accept(self)
def visit_operator_assignment_stmt(self,
s: OperatorAssignmentStmt) -> None:
self.statement = s
s.lvalue.accept(self)
s.rvalue.accept(self)
if (isinstance(s.lvalue, NameExpr) and s.lvalue.name == '__all__' and
s.lvalue.kind == GDEF and isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
def visit_while_stmt(self, s: WhileStmt) -> None:
self.statement = s
s.expr.accept(self)
self.loop_depth += 1
s.body.accept(self)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_for_stmt(self, s: ForStmt) -> None:
self.statement = s
s.expr.accept(self)
# Bind index variables and check if they define new names.
self.analyze_lvalue(s.index, explicit_type=s.index_type is not None)
if s.index_type:
if self.is_classvar(s.index_type):
self.fail_invalid_classvar(s.index)
allow_tuple_literal = isinstance(s.index, TupleExpr)
analyzed = self.anal_type(s.index_type, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
self.store_declared_types(s.index, analyzed)
s.index_type = analyzed
self.loop_depth += 1
self.visit_block(s.body)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_break_stmt(self, s: BreakStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail("'break' outside loop", s, True, blocker=True)
def visit_continue_stmt(self, s: ContinueStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail("'continue' outside loop", s, True, blocker=True)
def visit_if_stmt(self, s: IfStmt) -> None:
self.statement = s
infer_reachability_of_if_statement(s, self.options)
for i in range(len(s.expr)):
s.expr[i].accept(self)
self.visit_block(s.body[i])
self.visit_block_maybe(s.else_body)
def visit_try_stmt(self, s: TryStmt) -> None:
self.statement = s
self.analyze_try_stmt(s, self)
def analyze_try_stmt(self, s: TryStmt, visitor: NodeVisitor[None]) -> None:
s.body.accept(visitor)
for type, var, handler in zip(s.types, s.vars, s.handlers):
if type:
type.accept(visitor)
if var:
self.analyze_lvalue(var)
handler.accept(visitor)
if s.else_body:
s.else_body.accept(visitor)
if s.finally_body:
s.finally_body.accept(visitor)
def visit_with_stmt(self, s: WithStmt) -> None:
self.statement = s
types = [] # type: List[Type]
if s.unanalyzed_type:
actual_targets = [t for t in s.target if t is not None]
if len(actual_targets) == 0:
# We have a type for no targets
self.fail('Invalid type comment', s)
elif len(actual_targets) == 1:
# We have one target and one type
types = [s.unanalyzed_type]
elif isinstance(s.unanalyzed_type, TupleType):
# We have multiple targets and multiple types
if len(actual_targets) == len(s.unanalyzed_type.items):
types = s.unanalyzed_type.items
else:
# But it's the wrong number of items
self.fail('Incompatible number of types for `with` targets', s)
else:
# We have multiple targets and one type
self.fail('Multiple types expected for multiple `with` targets', s)
new_types = [] # type: List[Type]
for e, n in zip(s.expr, s.target):
e.accept(self)
if n:
self.analyze_lvalue(n, explicit_type=s.unanalyzed_type is not None)
# Since we have a target, pop the next type from types
if types:
t = types.pop(0)
if self.is_classvar(t):
self.fail_invalid_classvar(n)
allow_tuple_literal = isinstance(n, TupleExpr)
analyzed = self.anal_type(t, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
# TODO: Deal with this better
new_types.append(analyzed)
self.store_declared_types(n, analyzed)
s.analyzed_types = new_types
self.visit_block(s.body)
def visit_del_stmt(self, s: DelStmt) -> None:
self.statement = s
s.expr.accept(self)
if not self.is_valid_del_target(s.expr):
self.fail('Invalid delete target', s)
def is_valid_del_target(self, s: Expression) -> bool:
if isinstance(s, (IndexExpr, NameExpr, MemberExpr)):
return True
elif isinstance(s, (TupleExpr, ListExpr)):
return all(self.is_valid_del_target(item) for item in s.items)
else:
return False
def visit_global_decl(self, g: GlobalDecl) -> None:
self.statement = g
for name in g.names:
if name in self.nonlocal_decls[-1]:
self.fail("Name '{}' is nonlocal and global".format(name), g)
self.global_decls[-1].add(name)
def visit_nonlocal_decl(self, d: NonlocalDecl) -> None:
self.statement = d
if not self.is_func_scope():
self.fail("nonlocal declaration not allowed at module level", d)
else:
for name in d.names:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
break
else:
self.fail("No binding for nonlocal '{}' found".format(name), d)
if self.locals[-1] is not None and name in self.locals[-1]:
self.fail("Name '{}' is already defined in local "
"scope before nonlocal declaration".format(name), d)
if name in self.global_decls[-1]:
self.fail("Name '{}' is nonlocal and global".format(name), d)
self.nonlocal_decls[-1].add(name)
def visit_print_stmt(self, s: PrintStmt) -> None:
self.statement = s
for arg in s.args:
arg.accept(self)
if s.target:
s.target.accept(self)
def visit_exec_stmt(self, s: ExecStmt) -> None:
self.statement = s
s.expr.accept(self)
if s.globals:
s.globals.accept(self)
if s.locals:
s.locals.accept(self)
#
# Expressions
#
def visit_name_expr(self, expr: NameExpr) -> None:
n = self.lookup(expr.name, expr)
if n:
self.bind_name_expr(expr, n)
def bind_name_expr(self, expr: NameExpr, sym: SymbolTableNode) -> None:
"""Bind name expression to a symbol table node."""
if isinstance(sym.node, TypeVarExpr) and self.tvar_scope.get_binding(sym):
self.fail("'{}' is a type variable and only valid in type "
"context".format(expr.name), expr)
elif isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, 'name', expr)
else:
expr.kind = sym.kind
expr.node = sym.node
expr.fullname = sym.fullname
def visit_super_expr(self, expr: SuperExpr) -> None:
if not self.type:
self.fail('"super" used outside class', expr)
return
expr.info = self.type
for arg in expr.call.args:
arg.accept(self)
def visit_tuple_expr(self, expr: TupleExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_list_expr(self, expr: ListExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_set_expr(self, expr: SetExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_dict_expr(self, expr: DictExpr) -> None:
for key, value in expr.items:
if key is not None:
key.accept(self)
value.accept(self)
def visit_star_expr(self, expr: StarExpr) -> None:
if not expr.valid:
# XXX TODO Change this error message
self.fail('Can use starred expression only as assignment target', expr)
else:
expr.expr.accept(self)
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
if not self.is_func_scope(): # not sure
self.fail("'yield from' outside function", e, True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
self.fail("'yield from' in async function", e, True, blocker=True)
else:
self.function_stack[-1].is_generator = True
if e.expr:
e.expr.accept(self)
def visit_call_expr(self, expr: CallExpr) -> None:
"""Analyze a call expression.
Some call expressions are recognized as special forms, including
cast(...).
"""
expr.callee.accept(self)
if refers_to_fullname(expr.callee, 'typing.cast'):
# Special form cast(...).
if not self.check_fixed_args(expr, 2, 'cast'):
return
# Translate first argument to an unanalyzed type.
try:
target = expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Cast target is not a type', expr)
return
# Piggyback CastExpr object to the CallExpr object; it takes
# precedence over the CallExpr semantics.
expr.analyzed = CastExpr(expr.args[1], target)
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_type'):
if not self.check_fixed_args(expr, 1, 'reveal_type'):
return
expr.analyzed = RevealExpr(kind=REVEAL_TYPE, expr=expr.args[0])
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_locals'):
# Store the local variable names into the RevealExpr for use in the
# type checking pass
local_nodes = [] # type: List[Var]
if self.is_module_scope():
# try to determine just the variable declarations in module scope
# self.globals.values() contains SymbolTableNode's
# Each SymbolTableNode has an attribute node that is nodes.Var
# look for variable nodes that marked as is_inferred
# Each symboltable node has a Var node as .node
local_nodes = [n.node
for name, n in self.globals.items()
if getattr(n.node, 'is_inferred', False)
and isinstance(n.node, Var)]
elif self.is_class_scope():
# type = None # type: Optional[TypeInfo]
if self.type is not None:
local_nodes = [st.node
for st in self.type.names.values()
if isinstance(st.node, Var)]
elif self.is_func_scope():
# locals = None # type: List[Optional[SymbolTable]]
if self.locals is not None:
symbol_table = self.locals[-1]
if symbol_table is not None:
local_nodes = [st.node
for st in symbol_table.values()
if isinstance(st.node, Var)]
expr.analyzed = RevealExpr(kind=REVEAL_LOCALS, local_nodes=local_nodes)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'typing.Any'):
# Special form Any(...) no longer supported.
self.fail('Any(...) is no longer supported. Use cast(Any, ...) instead', expr)
elif refers_to_fullname(expr.callee, 'typing._promote'):
# Special form _promote(...).
if not self.check_fixed_args(expr, 1, '_promote'):
return
# Translate first argument to an unanalyzed type.
try:
target = expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Argument 1 to _promote is not a type', expr)
return
expr.analyzed = PromoteExpr(target)
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.dict'):
expr.analyzed = self.translate_dict_call(expr)
elif refers_to_fullname(expr.callee, 'builtins.divmod'):
if not self.check_fixed_args(expr, 2, 'divmod'):
return
expr.analyzed = OpExpr('divmod', expr.args[0], expr.args[1])
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
else:
# Normal call expression.
for a in expr.args:
a.accept(self)
if (isinstance(expr.callee, MemberExpr) and
isinstance(expr.callee.expr, NameExpr) and
expr.callee.expr.name == '__all__' and
expr.callee.expr.kind == GDEF and
expr.callee.name in ('append', 'extend')):
if expr.callee.name == 'append' and expr.args:
self.add_exports(expr.args[0])
elif (expr.callee.name == 'extend' and expr.args and
isinstance(expr.args[0], (ListExpr, TupleExpr))):
self.add_exports(expr.args[0].items)
def translate_dict_call(self, call: CallExpr) -> Optional[DictExpr]:
"""Translate 'dict(x=y, ...)' to {'x': y, ...}.
For other variants of dict(...), return None.
"""
if not call.args:
return None
if not all(kind == ARG_NAMED for kind in call.arg_kinds):
# Must still accept those args.
for a in call.args:
a.accept(self)
return None
expr = DictExpr([(StrExpr(cast(str, key)), value) # since they are all ARG_NAMED
for key, value in zip(call.arg_names, call.args)])
expr.set_line(call)
expr.accept(self)
return expr
def check_fixed_args(self, expr: CallExpr, numargs: int,
name: str) -> bool:
"""Verify that expr has specified number of positional args.
Return True if the arguments are valid.
"""
s = 's'
if numargs == 1:
s = ''
if len(expr.args) != numargs:
self.fail("'%s' expects %d argument%s" % (name, numargs, s),
expr)
return False
if expr.arg_kinds != [ARG_POS] * numargs:
self.fail("'%s' must be called with %s positional argument%s" %
(name, numargs, s), expr)
return False
return True
def visit_member_expr(self, expr: MemberExpr) -> None:
base = expr.expr
base.accept(self)
# Bind references to module attributes.
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
# This branch handles the case foo.bar where foo is a module.
# In this case base.node is the module's MypyFile and we look up
# bar in its namespace. This must be done for all types of bar.
file = base.node
# TODO: Should we actually use this? Not sure if this makes a difference.
# if file.fullname() == self.cur_mod_id:
# names = self.globals
# else:
# names = file.names
n = file.names.get(expr.name, None)
if n and not n.module_hidden:
n = self.rebind_symbol_table_node(n)
if n:
if isinstance(n.node, PlaceholderNode):
self.process_placeholder(expr.name, 'attribute', expr)
return
# TODO: What if None?
expr.kind = n.kind
expr.fullname = n.fullname
expr.node = n.node
elif (file is not None and (file.is_stub or self.options.python_version >= (3, 7))
and '__getattr__' in file.names):
# If there is a module-level __getattr__, then any attribute on the module is valid
# per PEP 484.
getattr_defn = file.names['__getattr__']
if not getattr_defn:
typ = AnyType(TypeOfAny.from_error) # type: Type
elif isinstance(getattr_defn.node, (FuncDef, Var)):
if isinstance(getattr_defn.node.type, CallableType):
typ = getattr_defn.node.type.ret_type
else:
typ = AnyType(TypeOfAny.from_error)
else:
typ = AnyType(TypeOfAny.from_error)
expr.kind = GDEF
expr.fullname = '{}.{}'.format(file.fullname(), expr.name)
expr.node = Var(expr.name, type=typ)
else:
if self.is_incomplete_namespace(file.fullname()):
self.record_incomplete_ref()
return
# We only catch some errors here; the rest will be
# caught during type checking.
#
# This way we can report a larger number of errors in
# one type checker run. If we reported errors here,
# the build would terminate after semantic analysis
# and we wouldn't be able to report any type errors.
full_name = '%s.%s' % (file.fullname() if file is not None else None, expr.name)
mod_name = " '%s'" % file.fullname() if file is not None else ''
if full_name in obsolete_name_mapping:
self.fail("Module%s has no attribute %r (it's now called %r)" % (
mod_name, expr.name, obsolete_name_mapping[full_name]), expr)
elif isinstance(base, RefExpr):
# This branch handles the case C.bar (or cls.bar or self.bar inside
# a classmethod/method), where C is a class and bar is a type
# definition or a module resulting from `import bar` (or a module
# assignment) inside class C. We look up bar in the class' TypeInfo
# namespace. This is done only when bar is a module or a type;
# other things (e.g. methods) are handled by other code in
# checkmember.
type_info = None
if isinstance(base.node, TypeInfo):
# C.bar where C is a class
type_info = base.node
elif isinstance(base.node, Var) and self.type and self.function_stack:
# check for self.bar or cls.bar in method/classmethod
func_def = self.function_stack[-1]
if not func_def.is_static and isinstance(func_def.type, CallableType):
formal_arg = func_def.type.argument_by_name(base.node.name())
if formal_arg and formal_arg.pos == 0:
type_info = self.type
elif isinstance(base.node, TypeAlias) and base.node.no_args:
if isinstance(base.node.target, Instance):
type_info = base.node.target.type
if type_info:
n = type_info.names.get(expr.name)
if n is not None and isinstance(n.node, (MypyFile, TypeInfo, TypeAlias)):
if not n:
return
expr.kind = n.kind
expr.fullname = n.fullname
expr.node = n.node
def visit_op_expr(self, expr: OpExpr) -> None:
expr.left.accept(self)
if expr.op in ('and', 'or'):
inferred = infer_condition_value(expr.left, self.options)
if ((inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'and') or
(inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'or')):
expr.right_unreachable = True
return
elif ((inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'and') or
(inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'or')):
expr.right_always = True
expr.right.accept(self)
def visit_comparison_expr(self, expr: ComparisonExpr) -> None:
for operand in expr.operands:
operand.accept(self)
def visit_unary_expr(self, expr: UnaryExpr) -> None:
expr.expr.accept(self)
def visit_index_expr(self, expr: IndexExpr) -> None:
base = expr.base
base.accept(self)
if (isinstance(base, RefExpr)
and isinstance(base.node, TypeInfo)
and not base.node.is_generic()):
expr.index.accept(self)
elif ((isinstance(base, RefExpr) and isinstance(base.node, TypeAlias))
or refers_to_class_or_function(base)):
# We need to do full processing on every iteration, since some type
# arguments may contain placeholder types.
self.analyze_type_application(expr)
else:
expr.index.accept(self)
def analyze_type_application(self, expr: IndexExpr) -> None:
"""Analyze special form -- type application (either direct or via type aliasing)."""
types = self.analyze_type_application_args(expr)
if types is None:
return
base = expr.base
expr.analyzed = TypeApplication(base, types)
expr.analyzed.line = expr.line
# Types list, dict, set are not subscriptable, prohibit this if
# subscripted either via type alias...
if isinstance(base, RefExpr) and isinstance(base.node, TypeAlias):
alias = base.node
if isinstance(alias.target, Instance):
name = alias.target.type.fullname()
if (alias.no_args and # this avoids bogus errors for already reported aliases
name in nongen_builtins and not alias.normalized):
self.fail(no_subscript_builtin_alias(name, propose_alt=False), expr)
# ...or directly.
else:
n = self.lookup_type_node(base)
if n and n.fullname in nongen_builtins:
self.fail(no_subscript_builtin_alias(n.fullname, propose_alt=False), expr)
def analyze_type_application_args(self, expr: IndexExpr) -> Optional[List[Type]]:
"""Analyze type arguments (index) in a type application.
Return None if anything was incomplete.
"""
index = expr.index
tag = self.track_incomplete_refs()
self.analyze_type_expr(index)
if self.found_incomplete_ref(tag):
return None
types = [] # type: List[Type]
if isinstance(index, TupleExpr):
items = index.items
else:
items = [index]
for item in items:
try:
typearg = expr_to_unanalyzed_type(item)
except TypeTranslationError:
self.fail('Type expected within [...]', expr)
return None
# We always allow unbound type variables in IndexExpr, since we
# may be analysing a type alias definition rvalue. The error will be
# reported elsewhere if it is not the case.
analyzed = self.anal_type(typearg, allow_unbound_tvars=True,
allow_placeholder=True)
if analyzed is None:
self.defer()
return None
types.append(analyzed)
return types
def visit_slice_expr(self, expr: SliceExpr) -> None:
if expr.begin_index:
expr.begin_index.accept(self)
if expr.end_index:
expr.end_index.accept(self)
if expr.stride:
expr.stride.accept(self)
def visit_cast_expr(self, expr: CastExpr) -> None:
expr.expr.accept(self)
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_reveal_expr(self, expr: RevealExpr) -> None:
if expr.kind == REVEAL_TYPE:
if expr.expr is not None:
expr.expr.accept(self)
else:
# Reveal locals doesn't have an inner expression, there's no
# need to traverse inside it
pass
def visit_type_application(self, expr: TypeApplication) -> None:
expr.expr.accept(self)
for i in range(len(expr.types)):
analyzed = self.anal_type(expr.types[i])
if analyzed is not None:
expr.types[i] = analyzed
def visit_list_comprehension(self, expr: ListComprehension) -> None:
expr.generator.accept(self)
def visit_set_comprehension(self, expr: SetComprehension) -> None:
expr.generator.accept(self)
def visit_dictionary_comprehension(self, expr: DictionaryComprehension) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.key.accept(self)
expr.value.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def visit_generator_expr(self, expr: GeneratorExpr) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.left_expr.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def analyze_comp_for(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
"""Analyses the 'comp_for' part of comprehensions (part 1).
That is the part after 'for' in (x for x in l if p). This analyzes
variables and conditions which are analyzed in a local scope.
"""
for i, (index, sequence, conditions) in enumerate(zip(expr.indices,
expr.sequences,
expr.condlists)):
if i > 0:
sequence.accept(self)
# Bind index variables.
self.analyze_lvalue(index)
for cond in conditions:
cond.accept(self)
def analyze_comp_for_2(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
"""Analyses the 'comp_for' part of comprehensions (part 2).
That is the part after 'for' in (x for x in l if p). This analyzes
the 'l' part which is analyzed in the surrounding scope.
"""
expr.sequences[0].accept(self)
def visit_lambda_expr(self, expr: LambdaExpr) -> None:
self.analyze_function_body(expr)
def visit_conditional_expr(self, expr: ConditionalExpr) -> None:
expr.if_expr.accept(self)
expr.cond.accept(self)
expr.else_expr.accept(self)
def visit_backquote_expr(self, expr: BackquoteExpr) -> None:
expr.expr.accept(self)
def visit__promote_expr(self, expr: PromoteExpr) -> None:
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_yield_expr(self, expr: YieldExpr) -> None:
if not self.is_func_scope():
self.fail("'yield' outside function", expr, True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
if self.options.python_version < (3, 6):
self.fail("'yield' in async function", expr, True, blocker=True)
else:
self.function_stack[-1].is_generator = True
self.function_stack[-1].is_async_generator = True
else:
self.function_stack[-1].is_generator = True
if expr.expr:
expr.expr.accept(self)
def visit_await_expr(self, expr: AwaitExpr) -> None:
if not self.is_func_scope():
self.fail("'await' outside function", expr)
elif not self.function_stack[-1].is_coroutine:
self.fail("'await' outside coroutine ('async def')", expr)
expr.expr.accept(self)
#
# Lookup functions
#
def lookup(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
"""Look up an unqualified (no dots) name in all active namespaces."""
implicit_name = False
# 1a. Name declared using 'global x' takes precedence
if name in self.global_decls[-1]:
if name in self.globals:
return self.globals[name]
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 1b. Name declared using 'nonlocal x' takes precedence
if name in self.nonlocal_decls[-1]:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
return table[name]
else:
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 2. Class attributes (if within class definition)
if self.type and not self.is_func_scope() and name in self.type.names:
node = self.type.names[name]
if not node.implicit:
if self.is_active_symbol_in_class_body(node.node):
return node
else:
# Defined through self.x assignment
implicit_name = True
implicit_node = node
# 3. Local (function) scopes
for table in reversed(self.locals):
if table is not None and name in table:
return table[name]
# 4. Current file global scope
if name in self.globals:
return self.globals[name]
# 5. Builtins
b = self.globals.get('__builtins__', None)
if b:
assert isinstance(b.node, MypyFile)
table = b.node.names
if name in table:
if name[0] == "_" and name[1] != "_":
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
node = table[name]
return node
# Give up.
if not implicit_name and not suppress_errors:
self.name_not_defined(name, ctx)
else:
if implicit_name:
return implicit_node
return None
def is_active_symbol_in_class_body(self, node: Optional[SymbolNode]) -> bool:
"""Can a symbol defined in class body accessed at current statement?
Only allow access to class attributes textually after
the definition, so that it's possible to fall back to the
outer scope. Example:
class X: ...
class C:
X = X # Initializer refers to outer scope
Nested classes are an exception, since we want to support
arbitrary forward references in type annotations.
"""
# TODO: Forward reference to name imported in class body is not
# caught.
return (node is None
or node.line < self.statement.line
or not self.is_defined_in_current_module(node.fullname())
or isinstance(node, TypeInfo))
def is_defined_in_current_module(self, fullname: Optional[str]) -> bool:
if fullname is None:
return False
return module_prefix(self.modules, fullname) == self.cur_mod_id
def lookup_qualified(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
if '.' not in name:
# Simple case: look up a short name.
return self.lookup(name, ctx, suppress_errors=suppress_errors)
parts = name.split('.')
namespace = self.cur_mod_id
sym = self.lookup(parts[0], ctx, suppress_errors=suppress_errors)
if sym:
for i in range(1, len(parts)):
node = sym.node
if isinstance(node, TypeInfo):
nextsym = node.get(parts[i])
elif isinstance(node, MypyFile):
nextsym = self.get_module_symbol(node, parts[i:])
namespace = node.fullname()
elif isinstance(node, PlaceholderNode):
return sym
else:
if isinstance(node, Var) and isinstance(node.type, AnyType):
# Allow access through Var with Any type without error.
return self.implicit_symbol(sym, name, parts[i:], node.type)
# Lookup through invalid node, such as variable or function
nextsym = None
if not nextsym or nextsym.module_hidden:
if not suppress_errors:
self.name_not_defined(name, ctx, namespace=namespace)
return None
sym = nextsym
return sym
def lookup_type_node(self, expr: Expression) -> Optional[SymbolTableNode]:
try:
t = expr_to_unanalyzed_type(expr)
except TypeTranslationError:
return None
if isinstance(t, UnboundType):
n = self.lookup_qualified(t.name, expr, suppress_errors=True)
return n
return None
def get_module_symbol(self, node: MypyFile, parts: List[str]) -> Optional[SymbolTableNode]:
"""Look up a symbol from the module symbol table."""
# TODO: Use this logic in more places?
module = node.fullname()
names = node.names
# Rebind potential references to old version of current module in
# fine-grained incremental mode.
if module == self.cur_mod_id:
names = self.globals
sym = names.get(parts[0], None)
if (not sym
and '__getattr__' in names
and not self.is_incomplete_namespace(module)
and (node.is_stub or self.options.python_version >= (3, 7))):
fullname = module + '.' + '.'.join(parts)
gvar = self.create_getattr_var(names['__getattr__'],
parts[0], fullname)
if gvar:
sym = SymbolTableNode(GDEF, gvar)
return sym
def implicit_symbol(self, sym: SymbolTableNode, name: str, parts: List[str],
source_type: AnyType) -> SymbolTableNode:
"""Create symbol for a qualified name reference through Any type."""
if sym.node is None:
basename = None
else:
basename = sym.node.fullname()
if basename is None:
fullname = name
else:
fullname = basename + '.' + '.'.join(parts)
var_type = AnyType(TypeOfAny.from_another_any, source_type)
var = Var(parts[-1], var_type)
var._fullname = fullname
return SymbolTableNode(GDEF, var)
def create_getattr_var(self, getattr_defn: SymbolTableNode,
name: str, fullname: str) -> Optional[Var]:
"""Create a dummy variable using module-level __getattr__ return type.
If not possible, return None.
Note that multiple Var nodes can be created for a single name. We
can use the from_module_getattr and the fullname attributes to
check if two dummy Var nodes refer to the same thing. Reusing Var
nodes would require non-local mutable state, which we prefer to
avoid.
"""
if isinstance(getattr_defn.node, (FuncDef, Var)):
if isinstance(getattr_defn.node.type, CallableType):
typ = getattr_defn.node.type.ret_type
else:
typ = AnyType(TypeOfAny.from_error)
v = Var(name, type=typ)
v._fullname = fullname
v.from_module_getattr = True
return v
return None
def lookup_fully_qualified(self, name: str) -> SymbolTableNode:
"""Lookup a fully qualified name.
Assume that the name is defined. This happens in the global namespace --
the local module namespace is ignored.
Note that this doesn't support visibility, module-level __getattr__, or
nested classes.
"""
parts = name.split('.')
n = self.modules[parts[0]]
for i in range(1, len(parts) - 1):
next_sym = n.names[parts[i]]
assert isinstance(next_sym.node, MypyFile)
n = next_sym.node
return n.names[parts[-1]]
def lookup_fully_qualified_or_none(self, fullname: str) -> Optional[SymbolTableNode]:
"""Lookup a fully qualified name that refers to a module-level definition.
Don't assume that the name is defined. This happens in the global namespace --
the local module namespace is ignored. This does not dereference indirect
refs.
Note that this can't be used for names nested in class namespaces.
"""
# TODO: unify/clean-up/simplify lookup methods, see #4157.
# TODO: support nested classes (but consider performance impact,
# we might keep the module level only lookup for thing like 'builtins.int').
assert '.' in fullname
module, name = fullname.rsplit('.', maxsplit=1)
if module not in self.modules:
return None
filenode = self.modules[module]
result = filenode.names.get(name)
if result is None and self.is_incomplete_namespace(module):
# TODO: More explicit handling of incomplete refs?
self.record_incomplete_ref()
return result
def builtin_type(self, fully_qualified_name: str) -> Instance:
sym = self.lookup_fully_qualified(fully_qualified_name)
node = sym.node
assert isinstance(node, TypeInfo)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def object_type(self) -> Instance:
return self.named_type('__builtins__.object')
def str_type(self) -> Instance:
return self.named_type('__builtins__.str')
def named_type(self, qualified_name: str, args: Optional[List[Type]] = None) -> Instance:
sym = self.lookup_qualified(qualified_name, Context())
assert sym, "Internal error: attempted to construct unknown type"
node = sym.node
assert isinstance(node, TypeInfo)
if args:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def named_type_or_none(self, qualified_name: str,
args: Optional[List[Type]] = None) -> Optional[Instance]:
sym = self.lookup_fully_qualified_or_none(qualified_name)
if not sym or isinstance(sym.node, PlaceholderNode):
return None
node = sym.node
if isinstance(node, TypeAlias):
assert isinstance(node.target, Instance)
node = node.target.type
assert isinstance(node, TypeInfo), node
if args is not None:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.unannotated)] * len(node.defn.type_vars))
def lookup_current_scope(self, name: str) -> Optional[SymbolTableNode]:
if self.locals[-1] is not None:
return self.locals[-1].get(name)
elif self.type is not None:
return self.type.names.get(name)
else:
return self.globals.get(name)
#
# Adding symbols
#
def add_symbol(self,
name: str,
node: SymbolNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False,
can_defer: bool = True) -> bool:
"""Add symbol to the currently active symbol table.
Generally additions to symbol table should go through this method or
one of the methods below so that kinds, redefinitions, conditional
definitions, and skipped names are handled consistently.
Return True if we actually added the symbol, or False if we refused to do so
(because something is not ready).
If can_defer is True, defer current target if adding a placeholder.
"""
if self.is_func_scope():
kind = LDEF
elif self.type is not None:
kind = MDEF
else:
kind = GDEF
symbol = SymbolTableNode(kind,
node,
module_public=module_public,
module_hidden=module_hidden)
return self.add_symbol_table_node(name, symbol, context, can_defer)
def add_symbol_skip_local(self, name: str, node: SymbolNode) -> None:
"""Same as above, but skipping the local namespace.
This doesn't check for previous definition and is only used
for serialization of method-level classes.
Classes defined within methods can be exposed through an
attribute type, but method-level symbol tables aren't serialized.
This method can be used to add such classes to an enclosing,
serialized symbol table.
"""
# TODO: currently this is only used by named tuples. Use this method
# also by typed dicts and normal classes, see issue #6422.
if self.type is not None:
names = self.type.names
kind = MDEF
else:
names = self.globals
kind = GDEF
symbol = SymbolTableNode(kind, node)
names[name] = symbol
def add_symbol_table_node(self,
name: str,
symbol: SymbolTableNode,
context: Optional[Context] = None,
can_defer: bool = True) -> bool:
"""Add symbol table node to the currently active symbol table.
Return True if we actually added the symbol, or False if we refused
to do so (because something is not ready or it was a no-op).
Generate an error if there is an invalid redefinition.
If context is None, unconditionally add node, since we can't report
an error. Note that this is used by plugins to forcibly replace nodes!
TODO: Prevent plugins from replacing nodes, as it could cause problems?
Args:
name: short name of symbol
symbol: Node to add
can_defer: if True, defer current target if adding a placeholder
context: error context (see above about None value)
"""
names = self.current_symbol_table()
existing = names.get(name)
if isinstance(symbol.node, PlaceholderNode) and can_defer:
self.defer()
if (existing is not None
and context is not None
and not is_valid_replacement(existing, symbol)):
# There is an existing node, so this may be a redefinition.
# If the new node points to the same node as the old one,
# or if both old and new nodes are placeholders, we don't
# need to do anything.
old = existing.node
new = symbol.node
if not is_same_symbol(old, new):
if isinstance(new, (FuncDef, Decorator, OverloadedFuncDef, TypeInfo)):
self.add_redefinition(names, name, symbol)
if not (isinstance(new, (FuncDef, Decorator))
and self.set_original_def(old, new)):
self.name_already_defined(name, context, existing)
elif name not in self.missing_names and '*' not in self.missing_names:
names[name] = symbol
self.progress = True
return True
return False
def add_redefinition(self,
names: SymbolTable,
name: str,
symbol: SymbolTableNode) -> None:
"""Add a symbol table node that reflects a redefinition as a function or a class.
Redefinitions need to be added to the symbol table so that they can be found
through AST traversal, but they have dummy names of form 'name-redefinition[N]',
where N ranges over 2, 3, ... (omitted for the first redefinition).
Note: we always store redefinitions independently of whether they are valid or not
(so they will be semantically analyzed), the caller should give an error for invalid
redefinitions (such as e.g. variable redefined as a class).
"""
i = 1
while True:
if i == 1:
new_name = '{}-redefinition'.format(name)
else:
new_name = '{}-redefinition{}'.format(name, i)
existing = names.get(new_name)
if existing is None:
names[new_name] = symbol
return
elif existing.node is symbol.node:
# Already there
return
i += 1
def add_module_symbol(self,
id: str,
as_id: str,
module_public: bool,
context: Context,
module_hidden: bool = False) -> None:
"""Add symbol that is a reference to a module object."""
if id in self.modules:
node = self.modules[id]
self.add_symbol(as_id, node, context,
module_public=module_public,
module_hidden=module_hidden)
else:
self.add_unknown_imported_symbol(as_id, context, target_name=id)
def add_local(self, node: Union[Var, FuncDef, OverloadedFuncDef], context: Context) -> None:
"""Add local variable or function."""
assert self.is_func_scope()
name = node.name()
node._fullname = name
self.add_symbol(name, node, context)
def add_imported_symbol(self,
name: str,
node: SymbolTableNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False) -> None:
"""Add an alias to an existing symbol through import."""
symbol = SymbolTableNode(node.kind, node.node,
module_public=module_public,
module_hidden=module_hidden)
self.add_symbol_table_node(name, symbol, context)
def add_unknown_imported_symbol(self,
name: str,
context: Context,
target_name: Optional[str] = None) -> None:
"""Add symbol that we don't know what it points to because resolving an import failed.
This can happen if a module is missing, or it is present, but doesn't have
the imported attribute. The `target_name` is the name of symbol in the namespace
it is imported from. For example, for 'from mod import x as y' the target_name is
'mod.x'. This is currently used only to track logical dependencies.
"""
existing = self.current_symbol_table().get(name)
if existing and isinstance(existing.node, Var) and existing.node.is_suppressed_import:
# This missing import was already added -- nothing to do here.
return
var = Var(name)
if self.options.logical_deps and target_name is not None:
# This makes it possible to add logical fine-grained dependencies
# from a missing module. We can't use this by default, since in a
# few places we assume that the full name points to a real
# definition, but this name may point to nothing.
var._fullname = target_name
elif self.type:
var._fullname = self.type.fullname() + "." + name
var.info = self.type
else:
var._fullname = self.qualified_name(name)
var.is_ready = True
any_type = AnyType(TypeOfAny.from_unimported_type, missing_import_name=var._fullname)
var.type = any_type
var.is_suppressed_import = True
self.add_symbol(name, var, context)
#
# Other helpers
#
@contextmanager
def tvar_scope_frame(self, frame: TypeVarScope) -> Iterator[None]:
old_scope = self.tvar_scope
self.tvar_scope = frame
yield
self.tvar_scope = old_scope
def defer(self) -> None:
"""Defer current analysis target to be analyzed again.
This must be called if something in the current target is
incomplete or has a placeholder node.
This must not be called during the final analysis iteration!
Instead, an error should be generated.
"""
self.deferred = True
def track_incomplete_refs(self) -> Tag:
"""Return tag that can be used for tracking references to incomplete names."""
return self.num_incomplete_refs
def found_incomplete_ref(self, tag: Tag) -> bool:
"""Have we encountered an incomplete reference since starting tracking?"""
return self.num_incomplete_refs != tag
def record_incomplete_ref(self) -> None:
"""Record the encounter of an incomplete reference and defer current analysis target."""
self.defer()
self.num_incomplete_refs += 1
def mark_incomplete(self, name: str, node: Node,
becomes_typeinfo: bool = False) -> None:
"""Mark a definition as incomplete (and defer current analysis target).
Also potentially mark the current namespace as incomplete.
Args:
name: The name that we weren't able to define (or '*' if the name is unknown)
node: The node that refers to the name (definition or lvalue)
becomes_typeinfo: Pass this to PlaceholderNode (used by special forms like
named tuples that will create TypeInfos).
"""
self.defer()
if name == '*':
self.incomplete = True
elif name not in self.current_symbol_table() and not self.is_global_or_nonlocal(name):
fullname = self.qualified_name(name)
self.add_symbol(name,
PlaceholderNode(fullname, node, becomes_typeinfo),
context=dummy_context())
self.missing_names.add(name)
def is_incomplete_namespace(self, fullname: str) -> bool:
"""Is a module or class namespace potentially missing some definitions?
If a name is missing from an incomplete namespace, we'll need to defer the
current analysis target.
"""
return fullname in self.incomplete_namespaces
def process_placeholder(self, name: str, kind: str, ctx: Context) -> None:
"""Process a reference targeting placeholder node.
If this is not a final iteration, defer current node,
otherwise report an error.
The 'kind' argument indicates if this a name or attribute expression
(used for better error message).
"""
if self.final_iteration:
self.cannot_resolve_name(name, kind, ctx)
else:
self.defer()
def cannot_resolve_name(self, name: str, kind: str, ctx: Context) -> None:
self.fail('Cannot resolve {} "{}" (possible cyclic definition)'.format(kind, name), ctx)
def rebind_symbol_table_node(self, n: SymbolTableNode) -> Optional[SymbolTableNode]:
"""If node refers to old version of module, return reference to new version.
If the reference is removed in the new version, return None.
"""
# TODO: Handle type variables and other sorts of references
if isinstance(n.node, (FuncDef, OverloadedFuncDef, TypeInfo, Var, TypeAlias)):
# TODO: Why is it possible for fullname() to be None, even though it's not
# annotated as Optional[str]?
# TODO: Do this for all modules in the set of modified files
# TODO: This doesn't work for things nested within classes
if n.node.fullname() and get_prefix(n.node.fullname()) == self.cur_mod_id:
# This is an indirect reference to a name defined in the current module.
# Rebind it.
return self.globals.get(n.node.name())
# No need to rebind.
return n
def qualified_name(self, name: str) -> str:
if self.type is not None:
return self.type._fullname + '.' + name
elif self.is_func_scope():
return name
else:
return self.cur_mod_id + '.' + name
def enter(self, function: Union[FuncItem, GeneratorExpr, DictionaryComprehension]) -> None:
"""Enter a function, generator or comprehension scope."""
names = self.saved_locals.setdefault(function, SymbolTable())
self.locals.append(names)
self.global_decls.append(set())
self.nonlocal_decls.append(set())
# -1 since entering block will increment this to 0.
self.block_depth.append(-1)
def leave(self) -> None:
self.locals.pop()
self.global_decls.pop()
self.nonlocal_decls.pop()
self.block_depth.pop()
def is_func_scope(self) -> bool:
return self.locals[-1] is not None
def is_nested_within_func_scope(self) -> bool:
"""Are we underneath a function scope, even if we are in a nested class also?"""
return any(l is not None for l in self.locals)
def is_class_scope(self) -> bool:
return self.type is not None and not self.is_func_scope()
def is_module_scope(self) -> bool:
return not (self.is_class_scope() or self.is_func_scope())
def current_symbol_kind(self) -> int:
if self.is_class_scope():
kind = MDEF
elif self.is_func_scope():
kind = LDEF
else:
kind = GDEF
return kind
def current_symbol_table(self) -> SymbolTable:
if self.is_func_scope():
assert self.locals[-1] is not None
names = self.locals[-1]
elif self.type is not None:
names = self.type.names
else:
names = self.globals
return names
def is_global_or_nonlocal(self, name: str) -> bool:
return (self.is_func_scope()
and (name in self.global_decls[-1]
or name in self.nonlocal_decls[-1]))
def add_exports(self, exp_or_exps: Union[Iterable[Expression], Expression]) -> None:
exps = [exp_or_exps] if isinstance(exp_or_exps, Expression) else exp_or_exps
for exp in exps:
if isinstance(exp, StrExpr):
self.all_exports.append(exp.value)
def check_no_global(self,
name: str,
ctx: Context,
is_overloaded_func: bool = False) -> None:
if name in self.globals:
prev_is_overloaded = isinstance(self.globals[name], OverloadedFuncDef)
if is_overloaded_func and prev_is_overloaded:
self.fail("Nonconsecutive overload {} found".format(name), ctx)
elif prev_is_overloaded:
self.fail("Definition of '{}' missing 'overload'".format(name), ctx)
else:
self.name_already_defined(name, ctx, self.globals[name])
def name_not_defined(self, name: str, ctx: Context, namespace: Optional[str] = None) -> None:
if self.is_incomplete_namespace(namespace or self.cur_mod_id):
# Target namespace is incomplete, so it's possible that the name will be defined
# later on. Defer current target.
self.record_incomplete_ref()
return
message = "Name '{}' is not defined".format(name)
extra = self.undefined_name_extra_info(name)
if extra:
message += ' {}'.format(extra)
self.fail(message, ctx)
self.check_for_obsolete_short_name(name, ctx)
if 'builtins.{}'.format(name) in SUGGESTED_TEST_FIXTURES:
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = 'builtins.{}'.format(name)
if self.lookup_fully_qualified_or_none(fullname) is None:
# Yes. Generate a helpful note.
self.add_fixture_note(fullname, ctx)
modules_with_unimported_hints = {
name.split('.', 1)[0]
for name in TYPES_FOR_UNIMPORTED_HINTS
}
lowercased = {
name.lower(): name
for name in TYPES_FOR_UNIMPORTED_HINTS
}
for module in modules_with_unimported_hints:
fullname = '{}.{}'.format(module, name).lower()
if fullname not in lowercased:
continue
# User probably forgot to import these types.
hint = (
'Did you forget to import it from "{module}"?'
' (Suggestion: "from {module} import {name}")'
).format(module=module, name=lowercased[fullname].rsplit('.', 1)[-1])
self.note(hint, ctx)
def check_for_obsolete_short_name(self, name: str, ctx: Context) -> None:
lowercased_names_handled_by_unimported_hints = {
name.lower() for name in TYPES_FOR_UNIMPORTED_HINTS
}
matches = [
obsolete_name for obsolete_name in obsolete_name_mapping
if obsolete_name.rsplit('.', 1)[-1] == name
and obsolete_name not in lowercased_names_handled_by_unimported_hints
]
if len(matches) == 1:
self.note("(Did you mean '{}'?)".format(obsolete_name_mapping[matches[0]]), ctx)
def already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]],
noun: str) -> None:
if isinstance(original_ctx, SymbolTableNode):
node = original_ctx.node # type: Optional[SymbolNode]
elif isinstance(original_ctx, SymbolNode):
node = original_ctx
else:
node = None
if isinstance(original_ctx, SymbolTableNode) and isinstance(original_ctx.node, MypyFile):
# Since this is an import, original_ctx.node points to the module definition.
# Therefore its line number is always 1, which is not useful for this
# error message.
extra_msg = ' (by an import)'
elif node and node.line != -1 and self.is_local_name(node.fullname()):
# TODO: Using previous symbol node may give wrong line. We should use
# the line number where the binding was established instead.
extra_msg = ' on line {}'.format(node.line)
else:
extra_msg = ' (possibly by an import)'
self.fail("{} '{}' already defined{}".format(noun, unmangle(name), extra_msg), ctx)
def name_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Name')
def attribute_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Attribute')
def is_local_name(self, name: str) -> bool:
"""Does name look like reference to a definition in the current module?"""
return self.is_defined_in_current_module(name) or '.' not in name
def fail(self,
msg: str,
ctx: Context,
serious: bool = False, *,
blocker: bool = False) -> None:
if (not serious and
not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
# In case it's a bug and we don't really have context
assert ctx is not None, msg
self.errors.report(ctx.get_line(), ctx.get_column(), msg, blocker=blocker)
def fail_blocker(self, msg: str, ctx: Context) -> None:
self.fail(msg, ctx, blocker=True)
def note(self, msg: str, ctx: Context) -> None:
if (not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
self.errors.report(ctx.get_line(), ctx.get_column(), msg, severity='note')
def undefined_name_extra_info(self, fullname: str) -> Optional[str]:
if fullname in obsolete_name_mapping:
return "(it's now called '{}')".format(obsolete_name_mapping[fullname])
else:
return None
def accept(self, node: Node) -> None:
try:
node.accept(self)
except Exception as err:
report_internal_error(err, self.errors.file, node.line, self.errors, self.options)
def expr_to_analyzed_type(self,
expr: Expression,
report_invalid_types: bool = True,
allow_placeholder: bool = False) -> Optional[Type]:
if isinstance(expr, CallExpr):
expr.accept(self)
is_named_tuple, info = self.named_tuple_analyzer.check_namedtuple(expr, None,
self.is_func_scope())
if not is_named_tuple:
# Some form of namedtuple is the only valid type that looks like a call
# expression. This isn't a valid type.
raise TypeTranslationError()
elif not info:
self.defer()
return None
assert info.tuple_type, "NamedTuple without tuple type"
fallback = Instance(info, [])
return TupleType(info.tuple_type.items, fallback=fallback)
typ = expr_to_unanalyzed_type(expr)
return self.anal_type(typ, report_invalid_types=report_invalid_types,
allow_placeholder=allow_placeholder)
def analyze_type_expr(self, expr: Expression) -> None:
# There are certain expressions that mypy does not need to semantically analyze,
# since they analyzed solely as type. (For example, indexes in type alias definitions
# and base classes in class defs). External consumers of the mypy AST may need
# them semantically analyzed, however, if they need to treat it as an expression
# and not a type. (Which is to say, mypyc needs to do this.) Do the analysis
# in a fresh tvar scope in order to suppress any errors about using type variables.
with self.tvar_scope_frame(TypeVarScope()):
expr.accept(self)
def type_analyzer(self, *,
tvar_scope: Optional[TypeVarScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True) -> TypeAnalyser:
if tvar_scope is None:
tvar_scope = self.tvar_scope
tpan = TypeAnalyser(self,
tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
report_invalid_types=report_invalid_types,
allow_unnormalized=self.is_stub_file,
allow_placeholder=allow_placeholder)
tpan.in_dynamic_func = bool(self.function_stack and self.function_stack[-1].is_dynamic())
tpan.global_scope = not self.type and not self.function_stack
return tpan
def anal_type(self,
typ: Type, *,
tvar_scope: Optional[TypeVarScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True,
third_pass: bool = False) -> Optional[Type]:
"""Semantically analyze a type.
Return None only if some part of the type couldn't be bound *and* it referred
to an incomplete namespace. In case of other errors, report an error message
and return AnyType.
"""
a = self.type_analyzer(tvar_scope=tvar_scope,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
allow_placeholder=allow_placeholder,
report_invalid_types=report_invalid_types)
tag = self.track_incomplete_refs()
typ = typ.accept(a)
if self.found_incomplete_ref(tag):
# Something could not be bound yet.
return None
self.add_type_alias_deps(a.aliases_used)
return typ
def class_type(self, self_type: Type) -> Type:
return TypeType.make_normalized(self_type)
def schedule_patch(self, priority: int, patch: Callable[[], None]) -> None:
self.patches.append((priority, patch))
def report_hang(self) -> None:
self.errors.report(-1, -1,
'Internal error: maximum semantic analysis iteration count reached',
blocker=True)
def add_plugin_dependency(self, trigger: str, target: Optional[str] = None) -> None:
"""Add dependency from trigger to a target.
If the target is not given explicitly, use the current target.
"""
if target is None:
target = self.scope.current_target()
self.cur_mod_node.plugin_deps.setdefault(trigger, set()).add(target)
def add_type_alias_deps(self,
aliases_used: Iterable[str],
target: Optional[str] = None) -> None:
"""Add full names of type aliases on which the current node depends.
This is used by fine-grained incremental mode to re-check the corresponding nodes.
If `target` is None, then the target node used will be the current scope.
"""
if not aliases_used:
# A basic optimization to avoid adding targets with no dependencies to
# the `alias_deps` dict.
return
if target is None:
target = self.scope.current_target()
self.cur_mod_node.alias_deps[target].update(aliases_used)
def is_mangled_global(self, name: str) -> bool:
# A global is mangled if there exists at least one renamed variant.
return unmangle(name) + "'" in self.globals
def is_initial_mangled_global(self, name: str) -> bool:
# If there are renamed definitions for a global, the first one has exactly one prime.
return name == unmangle(name) + "'"
def parse_bool(self, expr: Expression) -> Optional[bool]:
if isinstance(expr, NameExpr):
if expr.fullname == 'builtins.True':
return True
if expr.fullname == 'builtins.False':
return False
return None
class HasPlaceholders(TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_placeholder_type(self, t: PlaceholderType) -> bool:
return True
def has_placeholder(typ: Type) -> bool:
"""Check if a type contains any placeholder types (recursively)."""
return typ.accept(HasPlaceholders())
def replace_implicit_first_type(sig: FunctionLike, new: Type) -> FunctionLike:
if isinstance(sig, CallableType):
if len(sig.arg_types) == 0:
return sig
return sig.copy_modified(arg_types=[new] + sig.arg_types[1:])
elif isinstance(sig, Overloaded):
return Overloaded([cast(CallableType, replace_implicit_first_type(i, new))
for i in sig.items()])
else:
assert False
def refers_to_fullname(node: Expression, fullname: str) -> bool:
"""Is node a name or member expression with the given full name?"""
if not isinstance(node, RefExpr):
return False
return (node.fullname == fullname or
isinstance(node.node, TypeAlias) and isinstance(node.node.target, Instance)
and node.node.target.type.fullname() == fullname)
def refers_to_class_or_function(node: Expression) -> bool:
"""Does semantically analyzed node refer to a class?"""
return (isinstance(node, RefExpr) and
isinstance(node.node, (TypeInfo, FuncDef, OverloadedFuncDef)))
def find_duplicate(list: List[T]) -> Optional[T]:
"""If the list has duplicates, return one of the duplicates.
Otherwise, return None.
"""
for i in range(1, len(list)):
if list[i] in list[:i]:
return list[i]
return None
def remove_imported_names_from_symtable(names: SymbolTable,
module: str) -> None:
"""Remove all imported names from the symbol table of a module."""
removed = [] # type: List[str]
for name, node in names.items():
if node.node is None:
continue
fullname = node.node.fullname()
prefix = fullname[:fullname.rfind('.')]
if prefix != module:
removed.append(name)
for name in removed:
del names[name]
def make_any_non_explicit(t: Type) -> Type:
"""Replace all Any types within in with Any that has attribute 'explicit' set to False"""
return t.accept(MakeAnyNonExplicit())
class MakeAnyNonExplicit(TypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if t.type_of_any == TypeOfAny.explicit:
return t.copy_modified(TypeOfAny.special_form)
return t
def apply_semantic_analyzer_patches(patches: List[Tuple[int, Callable[[], None]]]) -> None:
"""Call patch callbacks in the right order.
This should happen after semantic analyzer pass 3.
"""
patches_by_priority = sorted(patches, key=lambda x: x[0])
for priority, patch_func in patches_by_priority:
patch_func()
def names_modified_by_assignment(s: AssignmentStmt) -> List[NameExpr]:
"""Return all unqualified (short) names assigned to in an assignment statement."""
result = [] # type: List[NameExpr]
for lvalue in s.lvalues:
result += names_modified_in_lvalue(lvalue)
return result
def names_modified_in_lvalue(lvalue: Lvalue) -> List[NameExpr]:
"""Return all NameExpr assignment targets in an Lvalue."""
if isinstance(lvalue, NameExpr):
return [lvalue]
elif isinstance(lvalue, StarExpr):
return names_modified_in_lvalue(lvalue.expr)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
result = [] # type: List[NameExpr]
for item in lvalue.items:
result += names_modified_in_lvalue(item)
return result
return []
def is_same_var_from_getattr(n1: Optional[SymbolNode], n2: Optional[SymbolNode]) -> bool:
"""Do n1 and n2 refer to the same Var derived from module-level __getattr__?"""
return (isinstance(n1, Var)
and n1.from_module_getattr
and isinstance(n2, Var)
and n2.from_module_getattr
and n1.fullname() == n2.fullname())
def dummy_context() -> Context:
return TempNode(AnyType(TypeOfAny.special_form))
def is_valid_replacement(old: SymbolTableNode, new: SymbolTableNode) -> bool:
"""Can symbol table node replace an existing one?
These are the only valid cases:
1. Placeholder gets replaced with a non-placeholder
2. Placeholder that isn't known to become type replaced with a
placeholder that can become a type
"""
return (isinstance(old.node, PlaceholderNode)
and (not isinstance(new.node, PlaceholderNode)
or (not old.node.becomes_typeinfo
and new.node.becomes_typeinfo)))
def is_same_symbol(a: Optional[SymbolNode], b: Optional[SymbolNode]) -> bool:
return (a == b
or (isinstance(a, PlaceholderNode)
and isinstance(b, PlaceholderNode))
or is_same_var_from_getattr(a, b))
|
"""Multi-agent learning algorithms. Supports each single-agent learning
algorithm playing independently with itself and also supports simplified
action decoding, additive value decomposition (aka VDN), and centralized
value functions. Inheritance structure is:
MultiAgentLearner
-> IndependentQLearner
-> IndependentReinforceLearner
-> IndependentA2CLearner
Independent updates, additive value decomposition updates, and simplified
action decoding updates are implemented as functions for reuse across classes.
Centralized value function updates are implemented within the
IndependentA2CLearner, as it is the only class that uses them.
"""
from abc import ABC, abstractmethod
from enum import Enum
from typing import Optional, TypeVar, Generic
from .actors import PolicyActor
from .single_agent_learners import (
SingleAgentLearner,
QLearner,
ReinforceLearner,
A2CLearner,
ac_actor_update,
ac_critic_update,
)
T = TypeVar("T", QLearner, ReinforceLearner, A2CLearner)
class Sads(Enum):
sad = "sad"
bsad = "bsad"
asad = "asad"
psad = "psad"
class MultiAgentLearner(ABC, Generic[T]):
def __init__(self, alice: T, bob: T, sad: Optional[Sads]):
"""Base class for independent learning
Args:
alice: Player 1
bob: Player 2
sad: Which simplified action decoding variant to use (or none)
Attributes:
alice (T): Player 1
bob (T): Player 2
sad (Optional[Sads]): Which simplified action decoding variant
to use (or none)
num_episodes (int): The number of episodes to train for
num_evals (int): The number of evaluations to do
eval_schedule (tuple): Episode indices on which to do evaluations
"""
self.alice: T = alice
self.bob: T = bob
self.sad = sad
assert alice.num_episodes == bob.num_episodes
self.num_episodes = alice.num_episodes
assert alice.num_evals == bob.num_evals
self.num_evals = alice.num_evals
self.eval_schedule = alice.eval_schedule
def act(self, context: tuple, num_legal_actions: int, train: bool) -> int:
"""Return the acting player's choice of action
Requires parsing the information state from the game context.
Args:
context: Tuple of events that have occured so far
num_legal_actions: Number of legal actions for acting player
train: Whether learner is training (or evaluating)
"""
if len(context) == 2:
info_state = context[0]
return self.alice.act(info_state, num_legal_actions, train)
if len(context) == 3:
info_state = sad_transform(self.alice, context, self.sad)
return self.bob.act(info_state, num_legal_actions, train)
raise (Exception)
def update_rates(self) -> None:
"""Update learning and exploration rates for learners"""
self.alice.update_rates()
self.bob.update_rates()
@abstractmethod
def update_from_episode(self, episode: list) -> None:
"""Update learners from episode
Args:
episode: List of cards, actions and the payoffs
"""
class IndependentQLearner(MultiAgentLearner[QLearner]):
def __init__(
self, alice: QLearner, bob: QLearner, sad: Optional[Sads], avd: bool,
) -> None:
"""Independent Q-learning
Args:
See base class
avd: Whether to use additive value decomposition (aka VDN)
Attributes:
See base class
avd (bool): Whether to use additive value decomposition (aka VDN)
"""
super().__init__(alice, bob, sad)
self.avd = avd
def update_from_episode(self, episode: list) -> None:
"""Update both learners from episode
Args:
See base class
"""
if self.avd:
c1, c2, a1, a2, payoff = episode
alice_is_ = (c1, a1)
bob_is_ = sad_transform(self.alice, tuple(episode), self.sad)
# AVD requires all agents to act at every time step. For turn-based
# games, this means that the non-acting agents must take a "no-op"
# action (ie one that doesn't do anything). Alice/Bob did not see
# the infostates at which they had no-ops during the episode so we
# add them here.
self.bob.actor.params.add_state(c2, 1)
self.alice.actor.params.add_state(alice_is_, 1)
avd_update(
self.alice, self.bob, c1, c2, a1, 0, 0, alice_is_, bob_is_, False
)
avd_update(
self.alice,
self.bob,
alice_is_,
bob_is_,
0,
a2,
payoff,
None,
None,
True,
)
else:
independent_updates_from_episode(self.alice, self.bob, self.sad, episode)
class IndependentReinforceLearner(MultiAgentLearner[ReinforceLearner]):
"""Independent reinforce learning
Args:
See base class
Attributes:
See base class
"""
def __init__(
self, alice: ReinforceLearner, bob: ReinforceLearner, sad: Optional[Sads],
) -> None:
super().__init__(alice, bob, sad)
def update_from_episode(self, episode: list) -> None:
"""Update both learners from episode
Args:
See base class
"""
independent_updates_from_episode(self.alice, self.bob, self.sad, episode)
class IndependentA2CLearner(MultiAgentLearner[A2CLearner]):
def __init__(
self,
alice: A2CLearner,
bob: A2CLearner,
sad: Optional[Sads],
use_central_critic: bool,
) -> None:
"""Independent advantage actor critic learning
Alice's critic and Bob's critic must be the same.
Args:
See base class
use_central_critic: Whether to use central critic
Attributes:
See base class
use_central_critic (bool): Whether to use central critic
"""
super().__init__(alice, bob, sad)
self.use_central_critic = use_central_critic
if use_central_critic:
assert alice.critic is bob.critic
self.critic = alice.critic
def update_from_episode(self, episode: list) -> None:
"""Update both learners from episode
Args:
See base class
"""
if self.use_central_critic:
c1, c2, a1, a2, payoff = episode
p1_info_state = c1
p2_info_state = sad_transform(self.alice, tuple(episode), self.sad)
full_info1 = (c1, c2)
full_info2 = (c1, c2, a1)
ac_actor_update(
self.alice.actor, self.critic, p1_info_state, full_info1, a1
)
ac_critic_update(
self.bob.actor,
self.critic,
full_info1,
a1,
0,
full_info2,
p2_info_state,
)
ac_actor_update(self.bob.actor, self.critic, p2_info_state, full_info2, a2)
self.critic.update_params(full_info2, a2, payoff)
else:
independent_updates_from_episode(self.alice, self.bob, self.sad, episode)
def independent_updates_from_episode(
alice: SingleAgentLearner,
bob: SingleAgentLearner,
sad: Optional[Sads],
episode: list,
) -> None:
"""Update both learners from episode
Args:
episode: list cards, actions, and payoffs
"""
c1, c2, a1, a2, payoff = episode
p2_info_state = sad_transform(alice, tuple(episode), sad)
alice.update_from_episode([(c1, a1, payoff)])
bob.update_from_episode([(p2_info_state, a2, payoff)])
def avd_update(
alice: QLearner,
bob: QLearner,
alice_is: tuple,
bob_is: tuple,
alice_a: int,
bob_a: int,
r: float,
alice_is_: Optional[tuple],
bob_is_: Optional[tuple],
is_done: bool,
) -> None:
"""Perform additive value decomposition updates on Alice and Bob
AVD updates a joint Q-function parameterized by additive decomposition
Q(s_1, s_2, a_1, a_2) = Q(s_1, a_1) + Q(s_2, a2)
Args:
alice: Player 1
bob: Player 2
alice_is: Alice's information state
bob_is: Bob's information state
alice_a: Alice's action
bob_a: Bob's action
r: The reward
alice_is: Alice's next information state
bob_is_: Bob's next information state
is_done: Whether the transition was terminal
"""
alice_q1 = alice.actor.params.vals[alice_is][alice_a]
bob_q1 = bob.actor.params.vals[bob_is][bob_a]
if is_done:
q_next = 0
else:
q_next = (
alice.actor.params.vals[alice_is_].max()
+ bob.actor.params.vals[bob_is_].max()
)
alice.actor.params.update_params(alice_is, alice_a, r + q_next - bob_q1)
bob.actor.params.update_params(bob_is, bob_a, r + q_next - alice_q1)
def sad_transform(
alice: SingleAgentLearner, context: tuple, sad: Optional[Sads]
) -> tuple:
"""Transform the information state using SAD variant (or don't)
Args:
alice: Player 1
context: The events of the game
sad: Which simplified action decoding variant to use (or none)
Returns:
Player 2's information state
"""
c1, c2, a1 = context[:3]
info_state = (c2, a1)
# If not using sad variant do nothing
if not sad:
return info_state
greedy_a1 = alice.actor.act_greedily(c1)
explored = a1 != greedy_a1
signal: Optional[int] = None
# SAD adds Alice's counterfactual greedy action to Bob's infostate
if sad.value == "sad":
signal = greedy_a1
# Binary SAD adds a boolean indicating whether Alice explored to Bob's
# information state.
elif sad.value == "bsad":
signal = explored
# Action SAD adds Alice's counterfactual greedy action to Bob's infostate
# if Alice explored. Otherwise, does nothing.
elif sad.value == "asad":
if explored:
signal = greedy_a1
# Private SAD adds Alice's private information to Bob's infostate if Alice
# explored. Otherwise, does nothing.
elif sad.value == "psad":
if explored:
signal = c1
return (*info_state, signal)
|
import logging
import time
from collections import defaultdict
from os.path import join as joinpath
from typing import Dict, List, Optional
import numpy as np
from monty.json import MSONable
from monty.serialization import dumpfn
from pymatgen import Spin, Structure
from tabulate import tabulate
from amset.constants import bohr_to_cm, boltzmann_au, cm_to_bohr
from amset.constants import defaults as defaults
from amset.constants import ev_to_hartree, hartree_to_ev, spin_name
from amset.electronic_structure.common import get_angstrom_structure
from amset.electronic_structure.dos import FermiDos
from amset.electronic_structure.fd import dfdde
from amset.electronic_structure.tetrahedron import TetrahedralBandStructure
from amset.interpolation.momentum import MRTACalculator
from amset.io import write_mesh
from amset.log import log_list, log_time_taken
from amset.util import cast_dict_list, groupby, tensor_average
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
logger = logging.getLogger(__name__)
_kpt_str = "[{k[0]:.5f} {k[1]:.5f} {k[2]:.5f}]"
class AmsetData(MSONable):
def __init__(
self,
structure: Structure,
energies: Dict[Spin, np.ndarray],
vvelocities_product: Dict[Spin, np.ndarray],
velocities: Dict[Spin, np.ndarray],
kpoint_mesh: np.ndarray,
kpoints: np.ndarray,
ir_kpoints_idx: np.ndarray,
ir_to_full_kpoint_mapping: np.ndarray,
tetrahedra: np.ndarray,
ir_tetrahedra_info: np.ndarray,
efermi: float,
num_electrons: float,
is_metal: bool,
soc: bool,
vb_idx: Optional[Dict[Spin, int]] = None,
):
self.structure = structure
self.velocities_product = vvelocities_product
self.kpoint_mesh = kpoint_mesh
self.intrinsic_fermi_level = efermi
self.ir_to_full_kpoint_mapping = ir_to_full_kpoint_mapping
self._soc = soc
self.num_electrons = num_electrons
self.is_metal = is_metal
self.vb_idx = vb_idx
self.spins = list(energies.keys())
self.velocities = {s: v.transpose((0, 2, 1)) for s, v in velocities.items()}
self.dos = None
self.scattering_rates = None
self.scattering_labels = None
self.doping = None
self.temperatures = None
self.fermi_levels = None
self.electron_conc = None
self.hole_conc = None
self.conductivity = None
self.seebeck = None
self.electronic_thermal_conductivity = None
self.mobility = None
self.overlap_calculator = None
self.mrta_calculator = None
self.fd_cutoffs = None
self.grouped_ir_to_full = groupby(
np.arange(len(kpoints)), ir_to_full_kpoint_mapping
)
self.tetrahedral_band_structure = TetrahedralBandStructure.from_data(
energies,
kpoints,
tetrahedra,
structure,
ir_kpoints_idx,
ir_to_full_kpoint_mapping,
*ir_tetrahedra_info
)
logger.info("Initializing momentum relaxation time factor calculator")
self.mrta_calculator = MRTACalculator.from_data(kpoints, self.velocities)
@property
def energies(self):
return self.tetrahedral_band_structure.energies
@property
def kpoints(self):
return self.tetrahedral_band_structure.kpoints
@property
def ir_kpoints(self):
return self.kpoints[self.tetrahedral_band_structure.ir_kpoints_idx]
@property
def ir_kpoints_idx(self):
return self.tetrahedral_band_structure.ir_kpoints_idx
def set_overlap_calculator(self, overlap_calculator):
if overlap_calculator is not None:
equal = check_nbands_equal(overlap_calculator, self)
if not equal:
raise RuntimeError(
"Overlap calculator does not have the correct number of bands\n"
"If using wavefunction coefficients, ensure they were generated "
"using the same energy_cutoff (not encut)"
)
self.overlap_calculator = overlap_calculator
def calculate_dos(
self,
estep: float = defaults["dos_estep"],
progress_bar: bool = defaults["print_log"],
):
"""
Args:
estep: The DOS energy step in eV, where smaller numbers give more
accuracy but are more expensive.
progress_bar: Show a progress bar for DOS calculation.
"""
emin = np.min([np.min(spin_eners) for spin_eners in self.energies.values()])
emax = np.max([np.max(spin_eners) for spin_eners in self.energies.values()])
epoints = int(round((emax - emin) / (estep * ev_to_hartree)))
energies = np.linspace(emin, emax, epoints)
dos_weight = 1 if self._soc or len(self.spins) == 2 else 2
logger.info("DOS parameters:")
log_list(
[
"emin: {:.2f} eV".format(emin * hartree_to_ev),
"emax: {:.2f} eV".format(emax * hartree_to_ev),
"dos weight: {}".format(dos_weight),
"n points: {}".format(epoints),
]
)
logger.info("Generating tetrahedral DOS:")
t0 = time.perf_counter()
emesh, dos = self.tetrahedral_band_structure.get_density_of_states(
energies=energies, progress_bar=progress_bar
)
log_time_taken(t0)
num_electrons = self.num_electrons if self.is_metal else None
self.dos = FermiDos(
self.intrinsic_fermi_level,
emesh,
dos,
self.structure,
atomic_units=True,
dos_weight=dos_weight,
num_electrons=num_electrons,
)
def set_doping_and_temperatures(self, doping: np.ndarray, temperatures: np.ndarray):
if not self.dos:
raise RuntimeError(
"The DOS should be calculated (AmsetData.calculate_dos) before "
"setting doping levels."
)
if doping is None:
# Generally this is for metallic systems; here we use the intrinsic Fermi
# level
self.doping = [0]
print("doping is none")
else:
self.doping = doping * (1 / cm_to_bohr) ** 3
self.temperatures = temperatures
self.fermi_levels = np.zeros((len(doping), len(temperatures)))
self.electron_conc = np.zeros((len(doping), len(temperatures)))
self.hole_conc = np.zeros((len(doping), len(temperatures)))
fermi_level_info = []
tols = np.logspace(-5, 0, 6)
for n, t in np.ndindex(self.fermi_levels.shape):
for i, tol in enumerate(tols):
# Finding the Fermi level is quite fickle. Enumerate multiple
# tolerances and use the first one that works!
try:
if self.doping[n] == 0:
self.fermi_levels[n, t] = self.dos.get_fermi_from_num_electrons(
self.num_electrons,
temperatures[t],
tol=tol / 1000,
precision=10,
)
else:
(
self.fermi_levels[n, t],
self.electron_conc[n, t],
self.hole_conc[n, t],
) = self.dos.get_fermi(
self.doping[n],
temperatures[t],
tol=tol,
precision=10,
return_electron_hole_conc=True,
)
break
except ValueError:
if i == len(tols) - 1:
raise ValueError(
"Could not calculate Fermi level position."
"Try a denser k-point mesh."
)
else:
pass
fermi_level_info.append(
(doping[n], temperatures[t], self.fermi_levels[n, t] * hartree_to_ev)
)
table = tabulate(
fermi_level_info,
headers=("conc [cm⁻³]", "temp [K]", "E_fermi [eV]"),
numalign="right",
stralign="center",
floatfmt=(".2e", ".1f", ".4f"),
)
logger.info("Calculated Fermi levels:")
logger.info(table)
def calculate_fd_cutoffs(
self,
fd_tolerance: Optional[float] = 0.01,
cutoff_pad: float = 0.0,
max_moment: int = 2,
mobility_rates_only: bool = False,
):
energies = self.dos.energies
vv = {s: v.transpose((0, 3, 1, 2)) for s, v in self.velocities_product.items()}
_, vvdos = self.tetrahedral_band_structure.get_density_of_states(
energies, integrand=vv, sum_spins=True, use_cached_weights=True
)
vvdos = tensor_average(vvdos)
# vvdos = np.array(self.dos.get_densities())
# three fermi integrals govern transport properties:
# 1. df/de controls conductivity and mobility
# 2. (e-u) * df/de controls Seebeck
# 3. (e-u)^2 df/de controls electronic thermal conductivity
# take the absolute sum of the integrals across all doping and
# temperatures. this gives us the energies that are important for
# transport
if fd_tolerance:
def get_min_max_cutoff(cumsum):
min_idx = np.where(cumsum < fd_tolerance / 2)[0].max()
max_idx = np.where(cumsum > (1 - fd_tolerance / 2))[0].min()
return energies[min_idx], energies[max_idx]
min_cutoff = np.inf
max_cutoff = -np.inf
for n, t in np.ndindex(self.fermi_levels.shape):
ef = self.fermi_levels[n, t]
temp = self.temperatures[t]
dfde = -dfdde(energies, ef, temp * boltzmann_au)
for moment in range(max_moment + 1):
weight = np.abs((energies - ef) ** moment * dfde)
weight_dos = weight * vvdos
weight_cumsum = np.cumsum(weight_dos)
weight_cumsum /= np.max(weight_cumsum)
cmin, cmax = get_min_max_cutoff(weight_cumsum)
min_cutoff = min(cmin, min_cutoff)
max_cutoff = max(cmax, max_cutoff)
# import matplotlib.pyplot as plt
# ax = plt.gca()
# plt.plot(energies / units.eV, weight / weight.max())
# plt.plot(energies / units.eV, vvdos / vvdos.max())
# plt.plot(energies / units.eV, weight_dos / weight_dos.max())
# plt.plot(energies / units.eV, weight_cumsum / weight_cumsum.max())
# ax.set(xlim=(4, 7.5))
# plt.show()
else:
min_cutoff = energies.min()
max_cutoff = energies.max()
if mobility_rates_only:
vbm = max([self.energies[s][self.vb_idx[s]].max() for s in self.spins])
cbm = min([self.energies[s][self.vb_idx[s] + 1].min() for s in self.spins])
mid_gap = (cbm + vbm) / 2
if np.all(self.doping < 0):
# only electron mobility so don't calculate valence band rates
min_cutoff = max(min_cutoff, mid_gap)
elif np.all(self.doping < 0):
# only hole mobility so don't calculate conudction band rates
max_cutoff = min(max_cutoff, mid_gap)
min_cutoff -= cutoff_pad
max_cutoff += cutoff_pad
logger.info("Calculated Fermi–Dirac cut-offs:")
log_list(
[
"min: {:.3f} eV".format(min_cutoff * hartree_to_ev),
"max: {:.3f} eV".format(max_cutoff * hartree_to_ev),
]
)
self.fd_cutoffs = (min_cutoff, max_cutoff)
def set_scattering_rates(
self, scattering_rates: Dict[Spin, np.ndarray], scattering_labels: List[str]
):
for spin in self.spins:
s = (len(self.doping), len(self.temperatures)) + self.energies[spin].shape
if scattering_rates[spin].shape[1:] != s:
raise ValueError(
"Shape of scattering_type rates array does not match the "
"number of dopings, temperatures, bands, or kpoints"
)
if scattering_rates[spin].shape[0] != len(scattering_labels):
raise ValueError(
"Number of scattering_type rates does not match number of "
"scattering_type labels"
)
self.scattering_rates = scattering_rates
self.scattering_labels = scattering_labels
def fill_rates_outside_cutoffs(self, fill_value=None):
if self.scattering_rates is None:
raise ValueError("Scattering rates must be set before being filled")
min_fd, max_fd = self.fd_cutoffs
snt_fill = fill_value
for spin, spin_energies in self.energies.items():
mask = (spin_energies < min_fd) | (spin_energies > max_fd)
rate_info = defaultdict(list)
for s, n, t in np.ndindex(self.scattering_rates[spin].shape[:3]):
if fill_value is None:
# get average log rate inside cutoffs
snt_fill = np.log(self.scattering_rates[spin][s, n, t, ~mask])
snt_fill = np.exp(snt_fill.mean())
rate_info[self.scattering_labels[s]].append(snt_fill)
self.scattering_rates[spin][s, n, t, mask] = snt_fill
if len(self.spins) == 1:
logger.info("Filling scattering rates [s⁻¹] outside FD cutoffs with:")
else:
logger.info(
"Filling {} scattering rates [s⁻¹] outside FD cutoffs "
"with:".format(spin_name[spin])
)
headers = ["conc [cm⁻³]", "temp [K]"]
headers += ["{}".format(s) for s in self.scattering_labels]
rate_table = []
for i, (n, t) in enumerate(np.ndindex(self.fermi_levels.shape)):
col = [self.doping[n] * (1 / bohr_to_cm) ** 3, self.temperatures[t]]
col += [rate_info[s][i] for s in self.scattering_labels]
rate_table.append(col)
table = tabulate(
rate_table,
headers=headers,
numalign="right",
stralign="center",
floatfmt=[".2e", ".1f"] + [".2e"] * len(self.scattering_labels),
)
logger.info(table)
def set_transport_properties(
self,
conductivity: np.ndarray,
seebeck: np.ndarray,
electronic_thermal_conductivity: np.ndarray,
mobility: Optional[np.ndarray] = None,
):
self.conductivity = conductivity
self.seebeck = seebeck
self.electronic_thermal_conductivity = electronic_thermal_conductivity
self.mobility = mobility
def to_dict(self, include_mesh=defaults["write_mesh"]):
data = {
"doping": (self.doping * cm_to_bohr ** 3).round(),
"temperatures": self.temperatures,
"fermi_levels": self.fermi_levels * hartree_to_ev,
"conductivity": self.conductivity,
"seebeck": self.seebeck,
"electronic_thermal_conductivity": self.electronic_thermal_conductivity,
"mobility": self.mobility,
}
if include_mesh:
rates = self.scattering_rates
energies = self.energies
vel = self.velocities
ir_rates = {s: r[..., self.ir_kpoints_idx] for s, r in rates.items()}
ir_energies = {
s: e[:, self.ir_kpoints_idx] * hartree_to_ev
for s, e in energies.items()
}
ir_vel = {s: v[:, self.ir_kpoints_idx] for s, v in vel.items()}
mesh_data = {
"energies": ir_energies,
"kpoints": self.kpoints,
"ir_kpoints": self.ir_kpoints,
"ir_to_full_kpoint_mapping": self.ir_to_full_kpoint_mapping,
"efermi": self.intrinsic_fermi_level * hartree_to_ev,
"vb_idx": self.vb_idx,
"num_electrons": self.num_electrons,
# "dos": self.dos, # TODO: Convert dos to eV
"velocities": ir_vel, # TODO: convert units
"scattering_rates": ir_rates,
"scattering_labels": self.scattering_labels,
"is_metal": self.is_metal,
"fd_cutoffs": (
self.fd_cutoffs[0] * hartree_to_ev,
self.fd_cutoffs[1] * hartree_to_ev,
),
"structure": get_angstrom_structure(self.structure),
"soc": self._soc,
"doping": data["doping"],
"temperatures": data["temperatures"],
"fermi_levels": data["fermi_levels"],
}
data["mesh"] = mesh_data
return data
def to_data(self):
data = []
triu = np.triu_indices(3)
for n, t in np.ndindex(len(self.doping), len(self.temperatures)):
row = [
self.doping[n] * cm_to_bohr ** 3,
self.temperatures[t],
self.fermi_levels[n, t] * hartree_to_ev,
]
row.extend(self.conductivity[n, t][triu])
row.extend(self.seebeck[n, t][triu])
row.extend(self.electronic_thermal_conductivity[n, t][triu])
if self.mobility is not None:
for mob in self.mobility.values():
row.extend(mob[n, t][triu])
data.append(row)
headers = ["doping[cm^-3]", "temperature[K]", "Fermi_level[eV]"]
ds = ("xx", "xy", "xz", "yy", "yz", "zz")
# TODO: confirm unit of kappa
for prop, unit in [("cond", "S/m"), ("seebeck", "µV/K"), ("kappa", "?")]:
headers.extend(["{}_{}[{}]".format(prop, d, unit) for d in ds])
if self.mobility is not None:
for name in self.mobility.keys():
headers.extend(["{}_mobility_{}[cm^2/V.s]".format(name, d) for d in ds])
return data, headers
def to_file(
self,
directory: str = ".",
prefix: Optional[str] = None,
write_mesh_file: bool = defaults["write_mesh"],
file_format: str = defaults["file_format"],
suffix_mesh: bool = True,
):
if self.conductivity is None:
raise ValueError("Can't write AmsetData, transport properties not set")
if not prefix:
prefix = ""
else:
prefix += "_"
if suffix_mesh:
suffix = "_{}".format("x".join(map(str, self.kpoint_mesh)))
else:
suffix = ""
if file_format in ["json", "yaml"]:
data = self.to_dict()
data = cast_dict_list(data)
filename = joinpath(
directory, "{}transport{}.{}".format(prefix, suffix, file_format)
)
dumpfn(data, filename, indent=4)
elif file_format in ["csv", "txt"]:
# don't write the data as JSON, instead write raw text files
data, headers = self.to_data()
filename = joinpath(
directory, "{}transport{}.{}".format(prefix, suffix, file_format)
)
np.savetxt(filename, data, header=" ".join(headers))
else:
raise ValueError("Unrecognised output format: {}".format(file_format))
if write_mesh_file:
mesh_data = self.to_dict(include_mesh=True)["mesh"]
mesh_filename = joinpath(directory, "{}mesh{}.h5".format(prefix, suffix))
write_mesh(mesh_data, filename=mesh_filename)
return filename, mesh_filename
else:
return filename
def check_nbands_equal(interpolator, amset_data):
nbands_equal = [
amset_data.energies[s].shape[0] == interpolator.nbands[s]
for s in amset_data.spins
]
return np.all(nbands_equal)
|
<reponame>harunpehlivan/shapeshop
"""The main code for:
* creating the training data,
* building and training the neural network model,
* and image generation.
"""
from __future__ import print_function
import numpy as np
import time
from time import sleep
import random
from keras import backend as K
from keras.utils import np_utils
from keras.models import Model
from keras.layers import Input, Flatten, Dense, Convolution2D, Activation, MaxPooling2D, Dropout
import scipy
from scipy import ndimage
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
from helper import *
def preprocess(training_data_indicies):
"""Builds the dataset.
Args:
training_data_indicies: an array of 0s and 1s, where 1s indicate selected training images to include.
Returns:
X: the dataset.
Y: the dataset labels.
"""
x_data = []
y_data = []
num_of_pictures = 10
blank = np.zeros([1, 28, 28])
num_total_training_images = len(training_data_indicies)
# 0 = do not include in training data
# 1 = include in training data
for i in range(num_of_pictures):
counter = 0
# row 1
if training_data_indicies[0 % num_total_training_images] == 1:
x_data.append(boxify_center(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[1 % num_total_training_images] == 1:
x_data.append(boxify_center_hollow(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[2 % num_total_training_images] == 1:
x_data.append(lineify_center(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[3 % num_total_training_images] == 1:
x_data.append(lineify_center_horizontal(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[4 % num_total_training_images] == 1:
x_data.append(circleify_center(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[5 % num_total_training_images] == 1:
x_data.append(circleify_center_hollow(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[6 % num_total_training_images] == 1:
x_data.append(triangulify_center(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[7 % num_total_training_images] == 1:
x_data.append(triangulify_center_hollow(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
# row 2
if training_data_indicies[8 % num_total_training_images] == 1:
x_data.append(boxify_top_left(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[9 % num_total_training_images] == 1:
x_data.append(boxify_bottom_right(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[10 % num_total_training_images] == 1:
x_data.append(lineify_top_left(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[11 % num_total_training_images] == 1:
x_data.append(lineify_bottom_right(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[12 % num_total_training_images] == 1:
x_data.append(circleify_top_left(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[13 % num_total_training_images] == 1:
x_data.append(circleify_bottom_right(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[14 % num_total_training_images] == 1:
x_data.append(triangulify_top_left(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
if training_data_indicies[15 % num_total_training_images] == 1:
x_data.append(triangulify_bottom_right(np.copy(blank)))
y_data.append(counter)
counter = counter + 1
# row 3
if training_data_indicies[16 % num_total_training_images] == 1:
x_data.append(noiseify())
y_data.append(counter)
counter = counter + 1
if training_data_indicies[17 % num_total_training_images] == 1:
x_data.append(noiseify_blur())
y_data.append(counter)
counter = counter + 1
# if training_data_indicies[18 % num_total_training_images] == 1:
# x_data.append(house(np.copy(blank)))
# y_data.append(counter)
# counter = counter + 1
nb_classes = np.sum(training_data_indicies)
print(nb_classes)
X_temp = np.array(x_data)
y_temp = np.array(y_data)
print(X_temp.shape)
print(y_temp.shape)
y_temp_2 = np_utils.to_categorical(y_temp, nb_classes)
s = list(range(X_temp.shape[0]))
random.shuffle(s)
X = X_temp[s]+np.random.random(X_temp.shape)*0.01
Y = y_temp_2[s]
return X, Y
def build_and_train_model(X, Y, nb_classes, model_type, epoch):
"""Builds and trains the neural network image classifier model.
Args:
X: the dataset.
Y: the labels.
nb_classes: number of classes in the image classifier.
model_type: delineating between multilayer perceptron and convolutional neural network.
epoch: number of epochs for training.
Returns:
model: the trained model.
input_layer: the input layer of the model.
"""
batch_size = 4
nb_epoch = epoch
img_rows, img_cols = 28, 28
WIDTH = 64 * 2
input_layer = Input(shape=(1, img_rows, img_cols))
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
print(str(model_type))
print(type(str(model_type)))
print(len(str(model_type)))
if str(model_type).strip() == "MLP":
m = Flatten()(input_layer)
m = Dense(WIDTH, activation='tanh')(m)
m = Dense(WIDTH, activation='tanh')(m)
m = Dense(nb_classes, activation='softmax')(m)
if str(model_type).strip() == "CNN":
m = Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid')(input_layer)
m = Activation('relu')(m)
m = Convolution2D(nb_filters, kernel_size[0], kernel_size[1])(m)
m = Activation('relu')(m)
m = MaxPooling2D(pool_size=pool_size)(m)
m = Dropout(0.25)(m)
m = Flatten()(m)
m = Dense(128)(m)
m = Activation('relu')(m)
m = Dropout(0.5)(m)
m = Dense(nb_classes)(m)
m = Activation('softmax')(m)
model = Model(input=input_layer, output=[m])
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
print(model.summary())
model.fit(X, Y, batch_size=batch_size, nb_epoch=nb_epoch, validation_split=0.2, shuffle=True, verbose=2)
sleep(0.1)
return model, input_layer
def draw_images(img_num, model, input_layer, initial_image_indicies, step_size):
"""Performs the class activation maximization image drawing/generation process.
Args:
img_num: iterator for drawing multiple images.
model: the trained model.
input_layer: the input layer of the trained model.
initial_image_indicies: specifies which image to initialize the image generation process.
step_size: the step_size used for gradient ascent.
Returns:
If success: True, the generated image.
If failure: False.
"""
# we build a loss function
loss = model.output[0, img_num]
print(loss)
img_width = 28
img_height = 28
# we compute the gradient of the input picture with respect to this loss
grads = K.gradients(loss, input_layer)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_layer, K.learning_phase()], [loss, grads])
# create initial image
if initial_image_indicies[0] == 1:
input_img_data = np.zeros([1, 1, img_width, img_height])
print("initial image is zeros")
if initial_image_indicies[1] == 1:
input_img_data = np.ones([1, 1, img_width, img_height])
print("initial image is ones")
if initial_image_indicies[2] == 1:
input_img_data = np.random.random((1, 1, img_width, img_height))*1.0
print("initial image is random")
if initial_image_indicies[3] == 1:
input_img_data = ndimage.gaussian_filter(np.random.random((1, 1, img_width, img_height))*1.0, 1)
print("initial image is random blur")
# temp_time = time.time()
# print('Time after initialization:' , temp_time - start_time)
# we run gradient ascent
step = step_size
switched_on = True
L_PHASE = 0
loss_value = 0.0
# for idx in range(NUM_ITERS):
while loss_value <= 0.99:
# optional for zooming in when not using shapes, off by default
if not switched_on:
image2 = scipy.misc.imresize(input_img_data[0], 2.0).transpose((2, 0, 1))
d, w, h = image2.shape
m = np.mean(image2[:, (w/2 - img_width/2):(w/2 + img_width/2),
(h/2 - img_height/2):(h/2 + img_height/2)])
input_img_data[0] = image2[:, (w/2 - img_width/2):(w/2 + img_width/2), (h/2 - img_height/2):(h/2 + img_height/2)]/m
loss_value, grads_value = iterate([input_img_data, L_PHASE])
input_img_data += grads_value * step
print('Current loss value:', loss_value, '- Current intensity:', np.mean(input_img_data))
if loss_value > 0.99:
img = 1-input_img_data[0, 0]
loss_value, grads_value = iterate([input_img_data, L_PHASE])
print('Current loss value:', loss_value, '- Current intensity:', np.mean(input_img_data))
return True, img # draw an image
# for debugging
# if loss_value < 0.99:
# print('Current loss value:', loss_value, '- Current intensity:', np.mean(input_img_data))
# print('Did not make it to 0.99')
# return False # did not draw an image
def compute_error(training_data_indicies, results):
"""Computes the correlation coefficient for generated images.
Args:
training_data_indicies: an array of 0s and 1s, where 1s indicate selected training images to include.
results: the generated images.
Returns:
errors: correlation coefficients for each generated image.
"""
x_data = []
blank = np.zeros([1, 28, 28])
# row 1
x_data.append(boxify_center(np.copy(blank)))
x_data.append(boxify_center_hollow(np.copy(blank)))
x_data.append(lineify_center(np.copy(blank)))
x_data.append(lineify_center_horizontal(np.copy(blank)))
x_data.append(circleify_center(np.copy(blank)))
x_data.append(circleify_center_hollow(np.copy(blank)))
x_data.append(triangulify_center(np.copy(blank)))
x_data.append(triangulify_center_hollow(np.copy(blank)))
# row 2
x_data.append(boxify_top_left(np.copy(blank)))
x_data.append(boxify_bottom_right(np.copy(blank)))
x_data.append(lineify_top_left(np.copy(blank)))
x_data.append(lineify_bottom_right(np.copy(blank)))
x_data.append(circleify_top_left(np.copy(blank)))
x_data.append(circleify_bottom_right(np.copy(blank)))
x_data.append(triangulify_top_left(np.copy(blank)))
x_data.append(triangulify_bottom_right(np.copy(blank)))
# row 3
x_data.append(noiseify())
x_data.append(noiseify_blur())
# x_data.append(house(np.copy(blank)))
training_data_indicies_nonzero = np.nonzero(training_data_indicies)[0]
errors = []
for i in range(results.shape[0]):
# print(training_data_indicies)
# print(training_data_indicies_nonzero)
# print(training_data_indicies_nonzero[i])
org = x_data[training_data_indicies_nonzero[i]].flatten()
gen = results[i].flatten()
error = pearsonr(org, gen)
errors.append(error)
errors = np.array(np.abs(errors))
return errors[:, 0], training_data_indicies_nonzero
def save_image(data, cm, fn, dpi):
"""Saves a generated image to disk.
Args:
data: the image to save.
cm = the colormap used when saving.
fn: file name.
dpi: resolution of saved image.
Returns:
None.
"""
sizes = np.shape(data)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.matshow(data, cmap=cm)
plt.savefig(fn, dpi=dpi)
plt.close()
return None
def model(training_data_indicies, initial_image_indicies, number_of_times_clicked, step_size, model_type, epoch):
"""Computes the correlation coefficient for generated images.
Args:
training_data_indicies: an array of 0s and 1s, where 1s indicate selected training images to include.
initial_image_indicies: specifies which image to initialize the image generation process.
number_of_times_clicked: the experiment number.
step_size: the step_size used for gradient ascent.
model_type: delineating between multilayer perceptron and convolutional neural network.
epoch: number of epochs for training.
Returns:
results: the generated images.
errors: correlation coefficients for each generated image.
"""
num_of_pictures = np.sum(training_data_indicies)
nb_classes = num_of_pictures
X, Y = preprocess(training_data_indicies)
print(X.shape)
print(Y.shape)
model, input_layer = build_and_train_model(X, Y, nb_classes, model_type, epoch)
img_num = 0
results = []
while img_num < num_of_pictures:
start_time = time.time()
print('START image', str(img_num))
result_bool, img = draw_images(img_num, model, input_layer, initial_image_indicies, step_size)
end_time = time.time()
print('END image', str(img_num) + ":", end_time - start_time)
if result_bool == True:
img_num += 1
save_image(1-img, 'gray', 'static/results/' + str(number_of_times_clicked) + '_' + str(img_num) + '.png', 500)
results.append(1-img)
results = np.array(results)
errors, training_data_indicies_nonzero = compute_error(training_data_indicies, results)
return results, errors, training_data_indicies_nonzero
|
<reponame>matheuscas/pyfuzzy_toolbox
import arff
import time
import datetime
import csv
import numpy as np
from addict import Dict
def create_arff_dict(list_of_attributes_and_data, relation):
arff_dict = Dict()
arff_dict.relation = relation
arff_dict.attributes = []
arff_dict.data = []
arff_dict.attributes = list_of_attributes_and_data[0]['attributes']
for att_data in list_of_attributes_and_data:
arff_dict.data.append(att_data['data'])
return arff_dict
def create_arff_file(arff_dict, name=None, timestamp=False):
file_name = name if name else arff_dict.relation
if timestamp:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
file_name = file_name + '_' + st
file_name = file_name + '.arff'
raw_dict = arff_dict.to_dict()
arff_file = open(file_name, 'w')
arff.dump(raw_dict, arff_file)
return file_name
def create_csv_file(arff_dict, name=None, timestamp=False):
file_name = name if name else arff_dict.relation
with open(file_name, "wb") as out_file:
writer = csv.DictWriter(
out_file, delimiter=',', fieldnames=arff_dict['attributes'])
writer.writeheader()
for row in arff_dict['data']:
writer.writerow(str(row))
# UNTESTED
def split_arff_dict(arff_dict, train_ratio=0.8):
data_length = len(arff_dict['data'])
split_index = data_length - int((data_length * train_ratio) / 10)
train_arff_dict = Dict()
test_arff_dict = Dict()
train_arff_dict.relation = 'train_data_' + arff_dict.relation
test_arff_dict.relation = 'test_data_' + arff_dict.relation
train_arff_dict['attributes'] = arff_dict['attributes']
test_arff_dict['attributes'] = arff_dict['attributes']
train_arff_dict['data'] = arff_dict['data'][:split_index]
test_arff_dict['data'] = arff_dict['data'][split_index:]
return train_arff_dict, test_arff_dict
def k_fold_split_arff_dict(arff_dict, k=10):
def is_positive(d):
return True if d[len(d) - 1] == 1.0 or d[len(d) - 1] == 'positive' else False
def is_negative(d):
return True if d[len(d) - 1] == -1.0 or d[len(d) - 1] == 'negative' else False
positive_data = []
negative_data = []
for data in arff_dict['data']:
if is_positive(data):
positive_data.append(data)
elif is_negative(data):
negative_data.append(data)
size_fold = ((len(positive_data)) / k) if len(positive_data) >= len(
negative_data) else ((len(negative_data)) / k)
index_limit = len(positive_data) if len(positive_data) >= len(
negative_data) else len(negative_data)
k_folds_list = []
fold = 1
fold_data = []
for dx in range(index_limit):
fold_data.append(positive_data[dx])
fold_data.append(negative_data[dx])
if dx == (fold * size_fold - 1):
k_folds_list.append(fold_data)
fold_data = []
fold += 1
k_folds_arff_dicts = []
for k_fold_data in k_folds_list:
k_folds_arff_dicts.append(
{'attributes': arff_dict['attributes'], 'data': k_fold_data})
return k_folds_arff_dicts
def create_train_data_from_k_folds_splits(k_folds_list, k_fold_test_index):
train_data = []
for idx, fold in enumerate(k_folds_list):
if idx != k_fold_test_index:
for data in fold['data']:
train_data.append(data)
train_attributes = k_folds_list[0]['attributes']
train_data_dict = Dict()
train_data_dict.attributes = train_attributes
train_data_dict.data = train_data
return train_data_dict
def equalizer_unfiltered_arff_data(unfiltered_arff, filtered_arff):
filtered_attributes = []
for fa in filtered_arff['attributes']:
filtered_attributes.append(fa[0])
new_filtered_attributes = []
new_filtered_data = []
for d in unfiltered_arff['data']:
new_filtered_data_line = []
for idx, dd in enumerate(d):
if unfiltered_arff['attributes'][idx][0] in filtered_attributes:
new_filtered_data_line.append(dd)
new_filtered_data.append(new_filtered_data_line)
for a in unfiltered_arff['attributes']:
if a[0] in filtered_attributes:
new_filtered_attributes.append(a)
new_filtered_arff_dict = {}
new_filtered_arff_dict['relation'] = 'equalized_arff_file'
new_filtered_arff_dict['attributes'] = new_filtered_attributes
new_filtered_arff_dict['data'] = new_filtered_data
return new_filtered_arff_dict
def get_nparray_from_arff_data(arff_data, polarity='positive'):
polarity_docs = []
for doc in arff_data['data']:
if doc[len(doc) - 1] == polarity:
polarity_docs.append(doc)
lines = len(polarity_docs)
# minus polarity
columns = len(polarity_docs[0]) - 1
size = (lines, columns)
m = np.matrix(np.zeros(size))
for idx,doc in enumerate(polarity_docs):
m[idx] = doc[:len(doc)-1]
return m
|
import math
import numpy as np
import tensorflow as tf
import time
import os
import sys
sys.path.append('../')
from collections import Counter
from copy import deepcopy
from keras.utils import to_categorical
from tools.io import extract_pids, load_obj, store_obj, write_recommendations_to_file
print ('#' * 80)
print ('Track2Seq Model')
print ('#' * 80)
##################################################################
############################## SETUP #############################
##################################################################
t2s_config = load_obj('config.json', 'json')
input_folder = t2s_config['RESULTS_FOLDER'] # data of pre-processing steps
model_folder = t2s_config['T2S_MODEL_FOLDER'] # where model checkpoints are stored
model_name = t2s_config['T2S_MODEL_NAME'] # name of model
full_model_path = os.path.join(model_folder, model_name)
# generate folder
if not os.path.exists(full_model_path):
print ('Created {} ...'.format(full_model_path))
os.makedirs(full_model_path)
print ('Loading data ...')
data = load_obj(os.path.join(input_folder, 'id_sequence.pckl'), 'pickle')
vocab = load_obj(os.path.join(input_folder, 'track2id.pckl'), 'pickle')
track2int = vocab
int2track = {v:k for k,v in track2int.items()}
print ('There are {} tokens in the vocabulary'.format(len(int2track)))
##################################################################
######################### HYPER PARAMETERS #######################
##################################################################
seq_length = 50 # how long are training sequences
n_batch_size = 18 # how many sequences per batch
n_layers = 2 # amount of lstm layers
epochs = 1000 # epochs to train on
training = True # is training active - if not, recommendation process starts / continues
save_steps = 5000 # after how many steps should the progress be saved
latent_size = 128 # latent size of LSTM and embedding layer
skips = 5 # how many skips in between sequences
##################################################################
########################## TRAINING SETUP ########################
##################################################################
evaluation_set_fname = os.path.join(input_folder,'filled_dev_playlists_dict.pckl')
results_folder = 'recommendations/'
result_fname = os.path.join(results_folder, 'seq2track_recommendations.csv')
if not os.path.exists(results_folder):
print('Creating results folder: {}'.format(results_folder))
os.makedirs(results_folder)
##################################################################
####################### RECOMMENDATION SETUP #####################
##################################################################
challenge_track = t2s_config['TEAM_TRACK']
team_name = t2s_config['TEAM_NAME']
contact_info = t2s_config['TEAM_CONTACT']
##################################################################
############################# METHODS ############################
##################################################################
class DeviceCellWrapper(tf.contrib.rnn.RNNCell):
"""
Helper class if LSTM layers should be divided
on multiple GPUs.
"""
def __init__(self, device, cell):
self._cell = cell
self._device = device
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
with tf.device(self._device):
return self._cell(inputs, state, scope)
class BatchGenerator(object):
def __init__(self, data, seq_length, n_batch_size, n_vocab, step=5, test=False, store_folder='step_point/'):
"""
data: can be either training, validation or test data
seq_length: number of tracks that will be fed into the network
step: number of words to be skipped over between training samples within each batch
"""
self.data = data
self.seq_length = seq_length
self.n_batch_size = n_batch_size
self.n_vocab = n_vocab
self.store_folder = store_folder
if not os.path.exists(self.store_folder):
os.makedirs(self.store_folder)
# current_idx will save progress and serve as pointer
# will reset to 0 once end is reached
if os.path.exists(os.path.join(self.store_folder, 'global_step_point.pckl')):
self.current_idx = load_obj(os.path.join(self.store_folder, 'global_step_point.pckl'), 'pickle')
else:
self.current_idx = 0
self.step = step
# calculate steps per epoch
self.steps_per_epoch = (len(self.data)//(self.n_batch_size) - 1) // self.step
# reload or initialize epoch and step counter
if os.path.exists(os.path.join(self.store_folder, 'global_epoch_point.pckl')):
self.epoch_counter = load_obj(os.path.join(self.store_folder, 'global_epoch_point.pckl'), 'pickle')
else:
self.epoch_counter = 0
def store_step_counter(self, s):
store_obj(s, os.path.join(self.store_folder, 'global_step_point.pckl'), 'pickle')
def store_epoch_counter(self, e):
self.epoch_counter = e
store_obj(self.epoch_counter, os.path.join(self.store_folder, 'global_epoch_point.pckl'), 'pickle')
def generate(self):
x = np.zeros((self.n_batch_size, self.seq_length))
y = np.zeros((self.n_batch_size, self.seq_length))
while True:
for i in range(self.n_batch_size):
if self.current_idx + self.seq_length >= len(self.data):
# reset the index back to the start of the data set
self.current_idx = 0
x[i, :] = self.data[self.current_idx:self.current_idx + self.seq_length]
y[i, :] = self.data[self.current_idx + 1:self.current_idx + self.seq_length + 1]
self.current_idx += self.step
yield x, y
##################################################################
############################## MODEL #############################
##################################################################
class Seq2Track(object):
def __init__(self, n_batch_size, seq_length, n_vocab, n_layers, latent_size=128, recommendation=False):
self.n_batch_size = n_batch_size
self.seq_length = seq_length
self.n_vocab = n_vocab
self.n_layers = n_layers
self.latent_size = latent_size
if recommendation:
self.n_batch_size = 1
self.seq_length = 1
# define placeholders for X and y batches
self.X = tf.placeholder(tf.int32, [None, self.seq_length], name='X')
self.y = tf.placeholder(tf.int32, [None, self.seq_length], name='y')
# generate embedding matrix for data representation and initialize randomly
self.embedding_matrix = tf.get_variable('embedding_mat', [self.n_vocab, self.latent_size], tf.float32, tf.random_normal_initializer())
self.embedding_inputs = tf.nn.embedding_lookup(self.embedding_matrix, self.X)
# define an initial state for LSTM
# since LSTM contain two states c and h we're working with the second dimension is 2
self.initial_state = tf.placeholder(tf.float32, [self.n_layers, 2, self.n_batch_size, self.latent_size], name='initial_state')
# states can be represented as tuples (c, h) per layer
# to do so, we'll unstack the tensor on the layer axis
state_list = tf.unstack(self.initial_state, axis=0)
# and create a tuple representation for any (c, h) state representation per layer (n)
# tuple(LSTMStateTuple(c0, h0), LSTMStateTuple(c1, h1), ..., LSTMStateTuple(cn, hn),)
rnn_tuple_state = tuple(
[tf.contrib.rnn.LSTMStateTuple(state_list[i][0], state_list[i][1]) for i in range(self.n_layers)]
)
# in case one layer is being used
cell = tf.contrib.rnn.LSTMCell(self.latent_size, forget_bias=1.0) # different size possible?
#devices = ['/gpu:0', '/gpu:1'] # multi gpu layout - amount of devices == amount of layers
def build_cells(layers, recommendation=recommendation, dropout_prob=.6):
cells = []
for i in range(layers):
cell = tf.contrib.rnn.LSTMCell(self.latent_size, forget_bias=1., state_is_tuple=True)
#cell = DeviceCellWrapper(devices[i], tf.contrib.rnn.LSTMCell(self.latent_size, forget_bias=1., state_is_tuple=True))
if not recommendation:
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=dropout_prob)
cells.append(cell)
return cells
# otherwise create multirnn cells
if self.n_layers > 1:
cells = build_cells(self.n_layers, recommendation, .5)
cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
# generate state and y output per timestep
self.output, self.state = tf.nn.dynamic_rnn(cell, self.embedding_inputs, dtype=tf.float32, initial_state=rnn_tuple_state)
# reshape so output fits into softmax function
# [n_batch_size * seq_length, latent_size]
self.output = tf.reshape(self.output, [-1, self.latent_size])
# now we need to calculate the activations
with tf.variable_scope('lstm_vars', reuse=tf.AUTO_REUSE):
self.W = tf.get_variable('W', [self.latent_size, self.n_vocab], tf.float32, tf.random_normal_initializer())
self.b = tf.get_variable('b', [self.n_vocab], tf.float32, tf.constant_initializer(0.0))
self.logits = tf.matmul(self.output, self.W) + self.b
# seq2seq.sequence_loss method requires [n_batch_size, seq_length, n_vocab] shaped vector
self.logits = tf.reshape(self.logits, [self.n_batch_size, self.seq_length, self.n_vocab])
# targets are expected to be of shape [seq_len, 1] where the second dimension represents the class as int
# we can introduce weights regarding the tracks, this might be interesting for
# an emulated attention mechanism or if we use artist / genre level recommendations
# could also be used to weigh the first tracks or last tracks of a sequence
# with more importance
self.loss = tf.contrib.seq2seq.sequence_loss(
logits=self.logits,
targets=self.y,
weights=tf.ones([n_batch_size, seq_length], dtype=tf.float32),
average_across_timesteps=True,
average_across_batch=True)
self.cost = tf.reduce_sum(self.loss)
gradients, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tf.trainable_variables()), 1.)
# accuracy calculations follow
self.softmax = tf.nn.softmax(tf.reshape(self.logits, [-1, self.n_vocab]))
self.predict = tf.cast(tf.argmax(self.softmax, axis=1), tf.int32)
correct_predictions = tf.equal(self.predict, tf.reshape(self.y, [-1]))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
self.lr = .001
with tf.variable_scope('lstm_vars', reuse=tf.AUTO_REUSE):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.training_op = self.optimizer.apply_gradients(zip(gradients, tf.trainable_variables()))
def recommend(self, sess, start_sequence, int2track, track2int, n=100):
def reduced_argsort(arr, size=n+100):
return np.argpartition(arr, -size)[-size:]
def subsample(preds, candidates, int2track, temp=.7):
if temp <= 0:
candidates.append(int2track[np.argmax(preds)])
return
preds = np.asarray(preds[1:]).astype('float64')
preds = np.log(preds) / temp
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
sample = np.argmax(probas)
candidates.append(int2track[sample])
def artist_search(preds, candidates, int2track, seeds, c_count):
samples = reduced_argsort(preds)
for sample in samples:
track = int2track[sample]
if track in seeds:
continue
if track in c_count:
c_count[track] += preds[sample]
else:
c_count[track] = preds[sample]
candidates.append(track)
# return index of highest probability
pointer = -1
# filter out eos and unknown token for stream of conciousness
while int2track[samples[pointer]] in ['<eos>', 'unknown']:
pointer -= 1
return samples[pointer]
state = np.zeros((self.n_layers, 2, self.n_batch_size, self.latent_size))
candidates = []
c_count = {}
# iterate over seeds and generate initial state for recommendation
for track in start_sequence:
x = np.zeros((1, 1))
if track not in track2int:
continue
x[0, 0] = track2int[track]
[probabilities, state] = sess.run(
[self.softmax, self.state],
feed_dict={
self.X: x,
self.initial_state: state
})
_ = artist_search(probabilities[0], candidates, int2track, start_sequence, c_count)
track_pointer = -1
track = start_sequence[track_pointer]
while track not in track2int:
track_pointer -= 1
try:
track = start_sequence[track_pointer]
except:
return []
truth_flag = False
truth_pointer = 0
valid_sequence = [x for x in start_sequence if x in track2int]
for n in range(n):
track = np.random.choice([x for x in start_sequence if x in track2int], 1)[0]
x = np.zeros((1, 1))
x[0, 0] = track2int[track]
[probabilities, state] = sess.run(
[self.softmax, self.state],
feed_dict={
self.X: x,
self.initial_state: state
})
track_int = artist_search(probabilities[0], candidates, int2track, start_sequence, c_count)
# Semi-guided prediction
if truth_flag:
truth_flag = False
if truth_pointer == len(valid_sequence):
truth_pointer = 0
track = start_sequence[truth_pointer]
else:
truth_flag = True
track = int2track[track_int]
# return most probable candidates
return_candidates = [x[0] for x in Counter(c_count).most_common(n)]
return [x for x in return_candidates if x not in ['<eos>', 'unknown']]
##################################################################
############################## MAIN ##############################
##################################################################
def main():
# in case a specific GPU should be used
#gpu_options = tf.GPUOptions(visible_device_list='0')
#config = tf.ConfigProto(gpu_options=gpu_options)
#sess = tf.Session(config=config)
sess = tf.Session()
# initialize data generator
n_vocab = len(int2track)
bg = BatchGenerator(
data=data,
seq_length=seq_length,
n_batch_size=n_batch_size,
n_vocab=n_vocab,
step=skips,
store_folder=os.path.join(full_model_path, 'step_point'))
current_epoch = bg.epoch_counter
# intialize model for training
model = Seq2Track(
n_batch_size=n_batch_size,
seq_length=seq_length,
n_vocab=n_vocab,
n_layers=n_layers,
latent_size=latent_size)
# initialize model for prediction
# reusing scope for recommendations
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
pred_model = Seq2Track(
n_batch_size=n_batch_size,
seq_length=seq_length,
n_vocab=n_vocab,
n_layers=n_layers,
latent_size=latent_size,
recommendation=True)
# pick up the process where we left off - if possible
saver = tf.train.Saver(tf.global_variables())
init_operation = tf.global_variables_initializer()
sess.run(init_operation)
# check if a model exists, if so - load it
if os.path.exists(os.path.join(full_model_path, 'checkpoint')):
saver.restore(sess, tf.train.latest_checkpoint(full_model_path))
# training routine
if training:
# run epochs
for e in range(current_epoch, epochs):
avg_epoch_cost = [] # store average cost per epoch
# for any epoch initialize state as zeros
current_state = np.zeros((n_layers, 2, n_batch_size, latent_size))
for step in range(bg.current_idx, bg.steps_per_epoch):
X_batch, y_batch = next(bg.generate()) # generate fresh training batch
if step % 10 == 0: # show progress every 10 steps
start_time = time.time()
cost, _, current_state = sess.run(
[model.cost, model.training_op, model.state],
feed_dict={model.X: X_batch, model.y: y_batch, model.initial_state: current_state})
avg_epoch_cost.append(cost)
end_time = (time.time() - start_time)
print ('Epoch: {} - Step: {} / {} - Cost: {} - Time: {}s'.format(
e, step, bg.steps_per_epoch, np.mean(avg_epoch_cost), end_time))
elif step % 1000 == 0: # show recommendation examples every 1000 steps
start_time = time.time()
cost, _, current_state, acc = sess.run(
[model.cost, model.training_op, model.state, model.accuracy],
feed_dict={
model.X: X_batch,
model.y: y_batch,
model.initial_state: current_state})
# Compute cost and accuracy
avg_epoch_cost.append(cost)
end_time = (time.time() - start_time)
print ('Epoch: {} - Step: {} / {} - Cost: {} - Accuracy: {} - Time: {}s'.format(
e, step, bg.steps_per_epoch, np.mean(avg_epoch_cost), acc, end_time))
# Show recommendations
# can be changed to incorporate any track that's in int2track
sample_seed_sequence = [
'spotify:track:14AaSKhUMiR5qbNvhjlj9L',
'spotify:track:2tznHmp70DxMyr2XhWLOW0',
'spotify:track:0uqPG793dkDDN7sCUJJIVC']
print ('Seeds: {} '.format(x for x in sample_seed_sequence))
results = pred_model.recommend(sess, sample_seed_sequence, int2track, track2int, n=500)
print ('Recommendations: {}'.format([x for x in results]))
else:
cost, _, current_state = sess.run(
[model.cost, model.training_op, model.state],
feed_dict={
model.X: X_batch,
model.y: y_batch,
model.initial_state: current_state})
avg_epoch_cost.append(cost)
# Save the model and the vocab
if step != 0 and step % save_steps == 0:
# Save model
bg.store_step_counter(step)
bg.store_epoch_counter(e)
model_file_name = os.path.join(full_model_path, 'model')
saver.save(sess, model_file_name, global_step = step)
print('Model Saved To: {}'.format(model_file_name))
# if epoch is over
bg.store_epoch_counter(e)
bg.current_idx = 0
bg.store_step_counter(0)
model_file_name = os.path.join(full_model_path, 'model')
saver.save(sess, model_file_name, global_step = step)
print('Model Saved To: {}'.format(model_file_name))
else:
pid_collection = extract_pids(result_fname)
all_challenge_playlists = load_obj(evaluation_set_fname, 'pickle')
init = tf.global_variables_initializer()
sess.run(init)
if os.path.exists(os.path.join(full_model_path, 'checkpoint')):
saver.restore(sess, tf.train.latest_checkpoint(full_model_path))
num_playlists = 0
for k in all_challenge_playlists:
num_playlists += len(all_challenge_playlists[k])
print('Recommending tracks for {:,} playlists...'.format(num_playlists))
avg_time = []
for k in all_challenge_playlists:
for ix, playlist in enumerate(all_challenge_playlists[k]):
start_wall_time = time.time()
if playlist['pid'] in pid_collection:
continue
reco_per_playlist = []
reco_store = []
try:
reco_per_playlist = pred_model.recommend(sess, playlist['seed'], int2track, track2int, n=600)
if not reco_per_playlist:
print('Something went wrong with playlist {}'.format(playlist['pid']))
continue
except KeyboardInterrupt:
sys.exit()
except Exception as err:
print('Something went wrong with playlist {} (Error: {})'.format(playlist['pid'], err))
continue
# store recommendations
reco_per_playlist = reco_per_playlist[:500]
pid_collection.append(playlist['pid'])
time_elapsed = time.time() - start_wall_time
avg_time.append(time_elapsed)
print(
'Recommended {} songs ({} / {}). Avg time per playlist: {:.2f} seconds.'.format(
len(reco_per_playlist),
ix,
num_playlists,
np.mean(avg_time)))
write_recommendations_to_file(challenge_track, team_name, contact_info, playlist['pid'], reco_per_playlist, result_fname)
with open(result_fname, 'a') as f:
f.write(str(playlist['pid']) + ', ')
f.write(', '.join([x for x in reco_per_playlist]))
f.write('\n\n')
if __name__ == "__main__":
main()
|
#
# -*- coding: utf-8
#
# Copyright (c) 2017 <NAME>. All rights reserved.
#
@gem('Dravite.Euclid')
def gem():
#
#<copyright>
#
# Code copied from:
#
# https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
#
# As of 2017-03-02, when the code was copied, it said:
#
# "This page was last modified on 19 April 2016, at 15:15."
#
# "Text is available under the Creative Commons Attribution-ShareAlike License.; additional terms
# may apply. By using this site, you agree to the Terms of Use and Privacy Policy."
#
# The link for 'Creative Commons Attribution-ShareAlike License.' is to
#
# "https://creativecommons.org/licenses/by-sa/3.0/"
#
# As of 2017-03-02 this summary reads:
#
# (CC) Creative
# Commons
#
# Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0)
#
# This is a human-readable summary of (and not a substitute for) the license.
#
# You are free to:
#
# Share — copy and redistribute the material in any medium or format
# Adapt — remix, transform, and build upon the material
#
# for any purpose, even commercially.
#
# The licensor cannot revoke these freedoms as long as you follow the license terms.
#
# Under the following terms:
#
# Attribution — You must give appropriate credit, provide a link to the license, and indicate if
# changes were made. You may do so in any reasonable manner, but not in any way that suggests
# the licensor endorses you or your use.
#
# ShareAlike — If you remix, transform, or build upon the material, you must distribute your
# contributions under the same license as the original.
#
# No additional restrictions — You may not apply legal terms or technological measures that
# legally restrict others from doing anything the license permits.
#
# As of 2017-03-03, a copy of this summary has been saved as:
#
# "../OtherLicenses/cc/2017-03-02-by-sa_3.0.html".
#
# The actual license the summary links to is at:
#
# https://creativecommons.org/licenses/by-sa/3.0/legalcode
#
# As of 2017-03-02, a copy of this license has been saved as:
#
# "../OtherLicenses/cc/2017-03-02-by-sa_3.0_legalcode.html" in the same
#
#-----------------------------------------------------------------------------------------------------
#
# As stated above for "Creative Commons Attribution-ShareAlike 3.0 Unported" under the "Attribution"
# section:
#
# "Attribution — You must give appropriate credit, provide a link to the license, and indicate
# if changes were made. You may do so in any reasonable manner, but not in any way that
# suggests the licensor endorses you or your use.
#
# To comply with these terms:
#
# 1. Appropirate credite has been given by this copyright notice;
#
# 2. A link has been provided to the license;
#
# 3. This notice, hereby, *INDICATES THAT CHANGES HAVE BEEN* made to the original code;
#
# 4. We do NOT indicate the licensor has endorsed our copy of the code in any way.
#
#-----------------------------------------------------------------------------------------------------
#
# P.S.: An MIT license is much easier to comply with than a "Attribute-ShareAlike" license,
# which is why we use the MIT license for the Gem project for all our own original code.
#
# Of course the MIT license, does *NOT* apply to the orignal of the code, which has its
# own license as indicated above.
#
# To make things simple, all *changes* to the original code below are dual licensed under
# both (1) the MIT License that the rest of Gem is licensed; and (2) under the exact same
# "Creative Commons Attribution-ShareAlike 3.0 Unported" license as the original code.
#
# NOTE: Dual copyright only applies to the changes, not to the original code which is obviously
# only licensed under the original license.
#
@share
def greatest_common_demominator(b, n):
[x0, x1, y0, y1] = ((1, 0, 0, 1))
while n != 0:
[q, b, n] = ((b // n, n, b % n))
[x0, x1] = ((x1, x0 - q * x1))
[y0, y1] = ((y1, y0 - q * y1))
return ((b, x0, y0))
#
# x = modular_inverse(b) mod n, (x * b) % n == 1
#
@share
def modular_inverse(b, n):
[g, x, J] = greatest_common_demominator(b, n)
if g == 1:
return x % n
#</copyright>
|
#!/usr/bin/env python
#
# Copyright 2014 - 2016 The BCE Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the license.txt file.
#
import bce.utils.mathml.base as _base
import bce.utils.mathml.types as _types
OPERATOR_PLUS = 1000
OPERATOR_MINUS = 1001
OPERATOR_MULTIPLY = 1002
OPERATOR_DOT = 1003
OPERATOR_LEFT_PARENTHESIS = 1004
OPERATOR_RIGHT_PARENTHESIS = 1005
OPERATOR_EQUAL = 1006
class OperatorComponent(_base.Base):
"""Operator component."""
def __init__(self, operator_id):
"""Initialize the operator component.
:type operator_id: int
:param operator_id: The operator ID.
"""
self.__op_id = operator_id
_base.Base.__init__(self, _types.COMPONENT_TYPE_OPERATOR)
def set_operator_id(self, operator_id):
"""Set the ID of the operator.
:type operator_id: int
:param operator_id: The operator ID.
"""
self.__op_id = operator_id
def get_operator_id(self):
"""Get the ID of the operator.
:rtype : int
:return: The operator ID.
"""
return self.__op_id
def _get_operator_symbol(self):
"""Get the serialized MathML symbol of the operator.
:rtype : str
:return: The serialized symbol.
:raise ValueError: Raise this error if the operator type is invalid.
"""
if self.__op_id == OPERATOR_PLUS:
return "+"
elif self.__op_id == OPERATOR_MINUS:
return "−"
elif self.__op_id == OPERATOR_MULTIPLY:
return "×"
elif self.__op_id == OPERATOR_DOT:
return "⋅"
elif self.__op_id == OPERATOR_LEFT_PARENTHESIS:
return "("
elif self.__op_id == OPERATOR_RIGHT_PARENTHESIS:
return ")"
elif self.__op_id == OPERATOR_EQUAL:
return "="
else:
raise ValueError("Invalid operator ID.")
def is_plus(self):
"""Get whether the operator is a plus operator.
:rtype : bool
:return: Whether the operator is a plus operator.
"""
return self.__op_id == OPERATOR_PLUS
def is_minus(self):
"""Get whether the operator is a minus operator.
:rtype : bool
:return: Whether the operator is a minus operator.
"""
return self.__op_id == OPERATOR_MINUS
def is_multiply(self):
"""Get whether the operator is a multiply operator.
:rtype : bool
:return: Whether the operator is a multiply operator.
"""
return self.__op_id == OPERATOR_MULTIPLY
def is_dot(self):
"""Get whether the operator is a dot.
:rtype : bool
:return: Whether the operator is a dot.
"""
return self.__op_id == OPERATOR_DOT
def is_left_parenthesis(self):
"""Get whether the operator is a left parenthesis.
:rtype : bool
:return: Whether the operator is a left parenthesis.
"""
return self.__op_id == OPERATOR_LEFT_PARENTHESIS
def is_right_parenthesis(self):
"""Get whether the operator is a right parenthesis.
:rtype : bool
:return: Whether the operator is a right parenthesis.
"""
return self.__op_id == OPERATOR_RIGHT_PARENTHESIS
def is_equal(self):
"""Get whether the operator is an equal.
:rtype : bool
:return: Whether the operator is an equal.
"""
return self.__op_id == OPERATOR_EQUAL
def to_string(self, indent=0):
"""Serialize the component to string.
:type indent: int
:param indent: The indent space count.
:rtype : str
:return: The serialized string.
"""
return " " * indent + "<mo>" + self._get_operator_symbol() + "</mo>"
|
<filename>codewars/level5/PrimeswithTwoEvenandDoubleEvenJumps.py
'''
Think in all the primes that: if p is prime and p < n , all these following numbers (p + 2) , (p + h) and (p + 2h) are all primes, being h an even number such that: 2 <= h <= hMax
Your function, give_max_h() , will receive 2 arguments n and hMax .
It should find for which value or values of h , we encounter the maximum amount of primes that satisfy the above constraint, testing for all possible even values from 2 to hMax included.
So, give_max_h(n, hMax) should ouput a list of lists with the following structure: a) if you find a unique solution:
[[h0, max amount of primes]] being h0 such that 2 <= h0 <= hMax and is the value that has the highest amount of collected primes
b) if you have more than one solution, suposse 2 , you found two values of h: h0 , h1 such that : 2 <= ho < h1 <= hmax
[[h0, max_amount_of_primes], [h1, max_amount_of_primes]] (lists should be sorted by the value of h) Let's see some cases: For Python and Ruby:
Case 1
give_max_h(30, 8) ------> [[6, 3]]
For Javascript:
Case 1
giveMax_h(30, 8) ------> [[6, 3]]
we have 4 different sets of steps to test [2, 2, 4], [2, 4, 8], [2, 6, 12] and [2, 8, 16]
///so that we select primes p in the range (2, 30) that fulfill: p, p + 2, p + 2 and p + 4 all primes --- > only with prime 3 (1 prime)
p, p + 2, p + 4 and p + 8 all primes ----> only with prime 3 (1 prime)
p, p + 2, p + 6 and p + 12 all primes -----> passed by primes 5, 11, 17 (3 primes)
p, p + 2, p + 8 and p + 16 all primes -----> only with prime 3 (1 prime)
So h is 6 with 3 found primes (max amount of primes) ([6, 3])///
Case 2) For Python and Ruby
give_max_h(100, 10) -----> [[6, 4]] # with h = 6 we found the highest amount of primes (4) : 5, 11, 17, 41
For Javascript
giveMax_h(100, 10) -----> [[6, 4]]
Case 3) For Python and Ruby
give_max_h(1000, 100) -----> [[30, 13], [42, 13]] # we have two values of h that #procuded the highest amount of primes (13)
For Javascript
giveMax_h(1000, 100) -----> [[30, 13], [42, 13]]
///h = 30 produced the primes 11, 29, 41, 71, 107, 137, 197, 419, 431, 461, 617, 827, 881
h = 42 produced the primes 5, 17, 29, 107, 149, 197, 227, 269, 347, 419, 599, 617, 659
///
Happy coding!!
(Advise: You will need a fast prime generator. Do not use primality tests, otherwise the runtimes would exceed our allowed 6000 ms to complete tests)
(To investigate beyond this kata: Do we have a convergence for the value of h, no matter the values of n and hMax are?)
'''
import time
import numba as nb
global p
p = [2, 3, 5, 7, 11 , 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
# @nb.autojit
def genp(n):
for np in range(101, n):
isp = True
for x in p:
if np % x == 0:
isp = False
break
if isp:
p.append(np)
return 1
def give_max_h(n, kMax):
# genp(n + kMax * 2)
p = sieve1(n + kMax * 2)
if not p:
return []
result = []
for x in range (2, kMax + 1, 2):
count = 0
tmp = 2 * x
for z in p:
if z < n and (z + 2 in p) and (z + x in p) and (z + tmp in p):
count += 1
if count > 0:
result.append([x, count])
result.sort(key=lambda x: x[1], reverse=True)
if len(result) <= 1:
return result
result1 = []
for x in result:
if x[1] == result[0][1]:
result1.append(x)
return result1
# @jit
def sieve1(n):
p = [True] * n
for i in range(2, int(n ** .5) + 1):
if p[i]:
for j in range(i ** 2, n, i):
p[j] = False
result = []
for x in range(2, n):
if p[x]:
result.append(x)
return result
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
def sieve(n):
""" Input n>=6, Returns a list of primes, 2 <= p < n """
correction = (n % 6 > 1)
n = {0:n, 1:n - 1, 2:n + 4, 3:n + 3, 4:n + 2, 5:n + 1}[n % 6]
sieve = [True] * (n // 3)
sieve[0] = False
for i in range(int(n ** 0.5) // 3 + 1):
if sieve[i]:
k = 3 * i + 1 | 1
sieve[ ((k * k) // 3) ::2 * k] = [False] * ((n // 6 - (k * k) // 6 - 1) // k + 1)
sieve[(k * k + 4 * k - 2 * k * (i & 1)) // 3::2 * k] = [False] * ((n // 6 - (k * k + 4 * k - 2 * k * (i & 1)) // 6 - 1) // k + 1)
return [2, 3] + [3 * i + 1 | 1 for i in range(1, n // 3 - correction) if sieve[i]]
if __name__ == '__main__':
beg = time.time()
print(beg)
genp(100000)
# print(len(p))
# genp(100000)
mid = time.time()
print(mid)
fir = mid - beg
print(mid - beg)
# print(len(sieve1(100000)))
sieve1(100000)
# print(len(sieve(100000)))
# print(len(p))
# # print(p)
#
# print(give_max_h(1000, 100))
# print(mid - beg)
# print(time.time() - mid)
# print(1 | 1)
end = time.time()
sec = end - mid
print('new time:{}'.format(sec))
print(fir / sec)
|
<reponame>xu1991/open<filename>searx/engines/wolframalpha_api.py
# Wolfram Alpha (Science)
#
# @website https://www.wolframalpha.com
# @provide-api yes (https://api.wolframalpha.com/v2/)
#
# @using-api yes
# @results XML
# @stable yes
# @parse url, infobox
from lxml import etree
from searx.url_utils import urlencode
# search-url
search_url = 'https://api.wolframalpha.com/v2/query?appid={api_key}&{query}'
site_url = 'https://www.wolframalpha.com/input/?{query}'
api_key = '' # defined in settings.yml
# xpath variables
failure_xpath = '/queryresult[attribute::success="false"]'
input_xpath = '//pod[starts-with(attribute::id, "Input")]/subpod/plaintext'
pods_xpath = '//pod'
subpods_xpath = './subpod'
pod_primary_xpath = './@primary'
pod_id_xpath = './@id'
pod_title_xpath = './@title'
plaintext_xpath = './plaintext'
image_xpath = './img'
img_src_xpath = './@src'
img_alt_xpath = './@alt'
# pods to display as image in infobox
# this pods do return a plaintext, but they look better and are more useful as images
image_pods = {'VisualRepresentation',
'Illustration'}
# do search-request
def request(query, params):
params['url'] = search_url.format(query=urlencode({'input': query}), api_key=api_key)
params['headers']['Referer'] = site_url.format(query=urlencode({'i': query}))
return params
# replace private user area characters to make text legible
def replace_pua_chars(text):
pua_chars = {u'\uf522': u'\u2192', # rigth arrow
u'\uf7b1': u'\u2115', # set of natural numbers
u'\uf7b4': u'\u211a', # set of rational numbers
u'\uf7b5': u'\u211d', # set of real numbers
u'\uf7bd': u'\u2124', # set of integer numbers
u'\uf74c': 'd', # differential
u'\uf74d': u'\u212f', # euler's number
u'\uf74e': 'i', # imaginary number
u'\uf7d9': '='} # equals sign
for k, v in pua_chars.items():
text = text.replace(k, v)
return text
# get response from search-request
def response(resp):
results = []
search_results = etree.XML(resp.content)
# return empty array if there are no results
if search_results.xpath(failure_xpath):
return []
try:
infobox_title = search_results.xpath(input_xpath)[0].text
except:
infobox_title = ""
pods = search_results.xpath(pods_xpath)
result_chunks = []
result_content = ""
for pod in pods:
pod_id = pod.xpath(pod_id_xpath)[0]
pod_title = pod.xpath(pod_title_xpath)[0]
pod_is_result = pod.xpath(pod_primary_xpath)
subpods = pod.xpath(subpods_xpath)
if not subpods:
continue
# Appends either a text or an image, depending on which one is more suitable
for subpod in subpods:
content = subpod.xpath(plaintext_xpath)[0].text
image = subpod.xpath(image_xpath)
if content and pod_id not in image_pods:
if pod_is_result or not result_content:
if pod_id != "Input":
result_content = "%s: %s" % (pod_title, content)
# if no input pod was found, title is first plaintext pod
if not infobox_title:
infobox_title = content
content = replace_pua_chars(content)
result_chunks.append({'label': pod_title, 'value': content})
elif image:
result_chunks.append({'label': pod_title,
'image': {'src': image[0].xpath(img_src_xpath)[0],
'alt': image[0].xpath(img_alt_xpath)[0]}})
if not result_chunks:
return []
title = "Wolfram|Alpha (%s)" % infobox_title
# append infobox
results.append({'infobox': infobox_title,
'attributes': result_chunks,
'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]})
# append link to site
results.append({'url': resp.request.headers['Referer'],
'title': title,
'content': result_content})
return results
|
<reponame>lucawen/adb-perm
import os
import subprocess
import sys
import re
from shutil import which
SYSTEM_PACKAGES_REQUIRED = ['adb', 'aapt']
def parse_device_list(str_item):
raw_list = filter(None, str_item.splitlines()[1:])
devices = []
for raw_device in raw_list:
parts = raw_device.split()
extra_parts = [it.decode("utf-8") for it in parts[1:]]
devices.append((parts[0].decode("utf-8"), " ".join(extra_parts)))
return devices
def parse_app_list(str_item):
raw_list = filter(None, str_item.splitlines()[1:])
apps = []
for raw_app in raw_list:
app = raw_app.decode("utf-8")
base_app = app.split(':')[1]
app_name = base_app.split('=')[-1]
app_location = base_app.replace("="+app_name, "")
apps.append((app_name, app_location))
return apps
def get_devices():
p = subprocess.Popen(
["adb", 'devices', '-l'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if len(err) == 0:
return parse_device_list(out)
else:
return []
def select_device(devices):
for i in range(len(devices)):
id, name = devices[i]
print("(%d) - %s [%s]" % (i+1, id, name))
def num(s):
try:
return int(s)
except ValueError:
return
choosen = None
while True:
choosen = num(sys.stdin.readline())
if (choosen is not None and choosen > 0 and choosen <= len(devices)):
break
return devices[choosen - 1][0]
def get_apps():
command = "pm list packages -f -3"
p = subprocess.Popen(
"adb shell {0}".format(command),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
try:
out, err = p.communicate()
if len(err) == 0:
return parse_app_list(out)
else:
return []
except KeyboardInterrupt:
return []
def download_apk(app_location, apk_name):
p = subprocess.Popen(
"adb pull {0} {1}".format(app_location, apk_name),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if len(err) == 0:
return True
else:
return False
def parse_permissions_from_apk(str_item):
raw_list = filter(None, str_item.splitlines()[1:])
base_text = "uses-permission:"
permissions = []
for item in raw_list:
str_item = item.decode('utf-8')
if base_text in str_item:
text_arr = re.findall(r"'(.*?)'", str_item)
if text_arr:
permissions.append(text_arr[0])
return permissions
def get_permissions_from_apk(apk_location):
p = subprocess.Popen(
"aapt d permissions {0}".format(apk_location),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if len(err) == 0:
return parse_permissions_from_apk(out)
else:
return False
def grant_permissions(app_name, permissions):
for permission in permissions:
command = f"pm grant {app_name} {permission}"
p = subprocess.Popen(
"adb shell {0}".format(command),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
try:
p.communicate()
except KeyboardInterrupt:
break
def _check_required_packages(packages):
is_valid = all(which(it) is not None for it in packages)
if not is_valid:
print("required packages not found.")
print(", ".join(packages))
sys.exit(1)
def progress(count, total, suffix=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
def main():
_check_required_packages(SYSTEM_PACKAGES_REQUIRED)
devices = get_devices()
if not devices:
print("Could not find any device")
sys.exit(1)
device_selected = select_device(devices)
os.environ['ANDROID_SERIAL'] = device_selected
print("Device selected", device_selected)
print("Getting apps")
apps = get_apps()
total = len(apps)
count = 0
for app_name, app_location in apps:
count = count + 1
apk_name = f"{app_name}.apk"
is_apk = download_apk(app_location, apk_name)
if is_apk:
permissions = get_permissions_from_apk(apk_name)
os.remove(apk_name)
grant_permissions(app_name, permissions)
print(app_name, "permissions grant")
progress(count, total)
if __name__ == "__main__":
main()
|
<reponame>kishorkunal-raj/qpid-dispatch
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""Interface between python and libqpid-dispatch.so.
This module contains python ctypes definitions to directly call functions in the
libqpid-dispatch.so library from python.
The C library also adds the following C extension types to this module:
- LogAdapter: Logs to the C logging system.
- IoAdapter: Receives messages from the router into python.
This module also prevents the proton python module from being accidentally loaded.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import sys, ctypes
from ctypes import c_char_p, c_long, py_object
import qpid_dispatch_site
from .compat import IS_PY2
class CError(Exception):
"""Exception raised if there is an error in a C call"""
pass
class QdDll(ctypes.PyDLL):
"""
Load the library, set up function prototypes.
NOTE: We use the python calling convention because the C library
internally makes python calls.
"""
def __init__(self, handle):
super(QdDll, self).__init__("qpid-dispatch", handle=handle)
# Types
self.qd_dispatch_p = ctypes.c_void_p
# No check on qd_error_* functions, it would be recursive
self._prototype(self.qd_error_code, c_long, [], check=False)
self._prototype(self.qd_error_message, c_char_p, [], check=False)
self._prototype(self.qd_log_entity, c_long, [py_object])
self._prototype(self.qd_dispatch_configure_router, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_prepare, None, [self.qd_dispatch_p])
self._prototype(self.qd_dispatch_configure_listener, ctypes.c_void_p, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_connector, ctypes.c_void_p, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_ssl_profile, ctypes.c_void_p, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_sasl_plugin, ctypes.c_void_p, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_tcp_listener, ctypes.c_void_p, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_tcp_connector, ctypes.c_void_p, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_http_listener, ctypes.c_void_p, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_http_connector, ctypes.c_void_p, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_delete_tcp_listener, None, [self.qd_dispatch_p, ctypes.c_void_p])
self._prototype(self.qd_dispatch_delete_tcp_connector, None, [self.qd_dispatch_p, ctypes.c_void_p])
self._prototype(self.qd_dispatch_delete_http_listener, None, [self.qd_dispatch_p, ctypes.c_void_p])
self._prototype(self.qd_dispatch_delete_http_connector, None, [self.qd_dispatch_p, ctypes.c_void_p])
self._prototype(self.qd_connection_manager_delete_listener, None, [self.qd_dispatch_p, ctypes.c_void_p])
self._prototype(self.qd_connection_manager_delete_connector, None, [self.qd_dispatch_p, ctypes.c_void_p])
self._prototype(self.qd_connection_manager_delete_ssl_profile, ctypes.c_bool, [self.qd_dispatch_p, ctypes.c_void_p])
self._prototype(self.qd_connection_manager_delete_sasl_plugin, ctypes.c_bool, [self.qd_dispatch_p, ctypes.c_void_p])
self._prototype(self.qd_dispatch_configure_address, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_link_route, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_auto_link, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_exchange, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_binding, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_configure_policy, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_register_policy_manager, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_policy_c_counts_alloc, c_long, [], check=False)
self._prototype(self.qd_dispatch_policy_c_counts_free, None, [c_long], check=False)
self._prototype(self.qd_dispatch_policy_c_counts_refresh, None, [c_long, py_object])
self._prototype(self.qd_dispatch_policy_host_pattern_add, ctypes.c_bool, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_policy_host_pattern_remove, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_policy_host_pattern_lookup, c_char_p, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_register_display_name_service, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_dispatch_set_agent, None, [self.qd_dispatch_p, py_object])
self._prototype(self.qd_router_setup_late, None, [self.qd_dispatch_p])
self._prototype(self.qd_dispatch_router_lock, None, [self.qd_dispatch_p])
self._prototype(self.qd_dispatch_router_unlock, None, [self.qd_dispatch_p])
self._prototype(self.qd_connection_manager_start, None, [self.qd_dispatch_p])
self._prototype(self.qd_entity_refresh_begin, c_long, [py_object])
self._prototype(self.qd_entity_refresh_end, None, [])
self._prototype(self.qd_log_recent_py, py_object, [c_long])
def _prototype(self, f, restype, argtypes, check=True):
"""Set up the return and argument types and the error checker for a
ctypes function"""
def _do_check(result, func, args):
if check and self.qd_error_code():
raise CError(self.qd_error_message())
if restype is c_char_p and result and not IS_PY2:
# in python3 c_char_p returns a byte type for the error
# message. We need to convert that to a string
result = result.decode('utf-8')
return result
f.restype = restype
f.argtypes = argtypes
f.errcheck = _do_check
return f
def function(self, fname, restype, argtypes, check=True):
return self._prototype(getattr(self, fname), restype, argtypes, check)
# Prevent accidental loading of the proton python module inside dispatch.
# The proton-C library is linked with the dispatch C library, loading the proton
# python module loads a second copy of the library and mayhem ensues.
#
# Note the FORBIDDEN list is over-written to disable this tests in mock python
# testing code.
FORBIDDEN = ["proton"]
def check_forbidden():
bad = set(FORBIDDEN) & set(sys.modules)
if bad:
raise ImportError("Forbidden modules loaded: '%s'." % "', '".join(bad))
def import_check(name, *args, **kw):
if name in FORBIDDEN:
raise ImportError("Python code running inside a dispatch router cannot import '%s', use the 'dispatch' module for internal messaging" % name)
return builtin_import(name, *args, **kw)
check_forbidden()
if IS_PY2:
import __builtin__ as builtins
else:
import builtins
builtin_import = builtins.__import__
builtins.__import__ = import_check
|
<filename>networks/unet_for_TU.py
import math
from os.path import join as pjoin
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
def forward(self, x):
return self.conv(x)
class SingleConv_no_pool(nn.Module):
def __init__(self, in_channels, out_channels):
super(SingleConv_no_pool, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
if x.size()[1]== 1:
x = x.repeat(1,3,1,1)
return self.conv(x)
class SingleConv_with_pool(nn.Module):
def __init__(self, in_channels, out_channels):
super(SingleConv_with_pool, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
def forward(self, x):
return self.conv(x)
class UNET_encoder(nn.Module):
def __init__(self):
super().__init__()
width = 32
self.width = width
self.root = nn.Sequential(OrderedDict([
('unit1', SingleConv_no_pool(3, width))
]))
self.body = nn.Sequential(OrderedDict([
('block1', nn.Sequential(OrderedDict(
[('unit2', SingleConv_with_pool(width, width*2))]
))),
('block2', nn.Sequential(OrderedDict(
[('unit3', DoubleConv(width*2, width*4))]
))),
('block3', nn.Sequential(OrderedDict(
[('unit4', DoubleConv(width*4, width*8))]
))),
('block4', nn.Sequential(OrderedDict(
[('unit5', DoubleConv(width*8, width*16))]
))),
]))
def forward(self, x):
features = []
x = self.root(x)
b, c, in_size, _ = x.size()
features.append(x)
for i in range(len(self.body)-1):
x = self.body[i](x)
features.append(x)
x = self.body[-1](x)
return x, features[::-1]
class UNET_encoder_FETS(nn.Module):
def __init__(self):
super().__init__()
width = 32
self.width = width
self.root = nn.Sequential(OrderedDict([
('unit1', SingleConv_no_pool(4, width))
]))
self.body = nn.Sequential(OrderedDict([
('block1', nn.Sequential(OrderedDict(
[('unit2', SingleConv_with_pool(width, width*2))]
))),
('block2', nn.Sequential(OrderedDict(
[('unit3', DoubleConv(width*2, width*4))]
))),
('block3', nn.Sequential(OrderedDict(
[('unit4', DoubleConv(width*4, width*8))]
))),
('block4', nn.Sequential(OrderedDict(
[('unit5', DoubleConv(width*8, width*16))]
))),
]))
def forward(self, x):
features = []
x = self.root(x)
b, c, in_size, _ = x.size()
features.append(x)
for i in range(len(self.body)-1):
x = self.body[i](x)
features.append(x)
x = self.body[-1](x)
return x, features[::-1] |
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
from dash.dependencies import Input, Output
import pandas as pd
import plotly.express as px
import numpy as np
from joblib import load
# Imports from this application
from app import app
#Load pipeline
pipeline = load('./assets/final_pipeline.joblib')
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[ #Gender dropdown menu (male, female)
dcc.Markdown('## Predictions', className='mb-5'),
dcc.Markdown('Gender'),
dcc.Dropdown(
id = 'Sex',
options = [
{'label': 'Male', 'value': 'Male'},
{'label': 'Female', 'value': 'Female'},
],
className = 'mb-5',
value = 'Male',
placeholder='Select a gender'
),
#Age
html.Hr(),
html.Label('How old are you?',style={'display':'inline-block'}),
dcc.Slider(
id = 'Age',
min = 10,
max = 100,
step = 1,
value = 30,
marks = {'10': '10','25':'25','50': '50', '75': '75','100': '100'}),
html.Br(),
html.P('10', id='age-selected-selected'),
html.Br(),
#Height
html.Hr(),
html.Label('How tall are you? (in inches!)',style={'display':'inline-block'}),
dcc.Slider(
id = 'Height',
min = 50,
max = 90,
step = 1,
value = 70,
marks = {'50': '50','60':'60','70': '70', '80': '80', '90': '90'}),
html.Br(),
html.P('120', id='height-selected-selected'),
html.Br(),
#Weight
html.Hr(),
html.Label('How much do you weigh? (in lbs!)',style={'display':'inline-block'}),
dcc.Slider(
id = 'Weight',
min = 55,
max = 230,
step = 1,
value = 130,
marks = {'55': '55','75':'75','100': '100', '125': '125', '150': '150', '175': '175', '200':'200',
'230':'230'}),
html.Br(),
html.P('130', id='weight-selected-slider'),
html.Br(),
#year
html.P('Year of Olympics: ',style={'display':'inline-block'}),
daq.NumericInput(id='Year',
value = 2000,
min = 1896,
max = 2016,
style={'display':'inline-block'}),
#season
dcc.Markdown('Season'),
dcc.Dropdown(
id = 'Season',
options = [
{'label': 'Winter', 'value': 'Winter'},
{'label': 'Summer', 'value': 'Summer'},
],
className = 'mb-5',
value = 'Summer',
placeholder='Select a season'
),
],
md=4,
)
column2 = dbc.Col(
[
html.H2('Top 5 sports you would most likely medal in!', className='mb-5'),
#html.Div(id='prediction-content', className='lead')
dcc.Graph(id='prediction-content')
]
)
layout = dbc.Row([column1, column2])
@app.callback(
Output(component_id='age-selected-selected', component_property='children'),
[Input(component_id='Age', component_property='value')]
)
def update_output_div(input_value):
return 'You are "{}" years old'.format(input_value)
@app.callback(
Output(component_id='height-selected-selected', component_property='children'),
[Input(component_id='Height', component_property='value')]
)
def update_output_div(input_value):
return 'You are "{}" inches tall'.format(input_value)
@app.callback(
Output(component_id='weight-selected-slider', component_property='children'),
[Input(component_id='Weight', component_property='value')]
)
def update_output_div(input_value):
return 'You weigh "{}" pounds'.format(input_value)
@app.callback(
Output('prediction-content', 'figure'),
[Input('Sex','value'),
Input('Age', 'value'),
Input('Height', 'value'),
Input('Weight', 'value'),
Input('Year', 'value'),
Input('Season','value')],
)
# def predict(gender,age,height,weight,year,season):
# height_cm = height*2.54
# weight_kg = weight/2.2
# df = pd.DataFrame(
# columns=['Sex', 'Age', 'Height', 'Weight','Year','Season'],
# data=[[gender,age,height_cm,weight_kg,year,season]]
# )
# y_pred = pipeline.predict(df)[0]
# return y_pred
def predict(gender,age,height,weight,year,season):
Height_in = height*2.54
Weight_lb = weight/2.2
df = pd.DataFrame(
columns=['Sex', 'Age', 'Height', 'Weight','Year','Season'],
data=[[gender,age,Height_in,Weight_lb,year,season]]
)
y_pred = pipeline.predict_proba(df)[0]
sports = pipeline.classes_
top_5_idx = np.argsort(y_pred)[-5:]
top_5_probs = [y_pred[i] for i in top_5_idx]
top_5_sports = [sports[i] for i in top_5_idx]
top_5_dict = {top_5_sports[i]: top_5_probs[i] for i in range(len(top_5_sports))}
output_df = pd.DataFrame.from_dict(top_5_dict, orient = 'index').reset_index()
output_df.columns = ['Sport','Probability']
sorted_df = output_df.sort_values(by = 'Probability', ascending = False)
fig = px.bar(sorted_df, x='Sport', y='Probability')
# del sorted_df
# del output_df
# del y_pred
# del sports
return fig
|
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch
import torch.nn as nn
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from torch.autograd import Variable
import numpy as np
import misc as ms
from skimage import morphology as morph
from torch.autograd import Function
from losses import saliency
from core import blobs_utils as bu
# from core import proposals as prp
from . import base_model as bm
from addons.pycocotools import mask as maskUtils
# from core import score_functions as sf
import ann_utils as au
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
import torch
import torchvision
import argparse
import importlib
import numpy as np
from torch.utils.data import DataLoader
import scipy.misc
import torch.nn.functional as F
import os.path
import torch
import torch.nn as nn
import torch.sparse as sparse
import torch.nn.functional as F
from models import lcfcn
class WiseAffinity(bm.BaseModel):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.aff = AffinityHead()
self.aff.load_state_dict(torch.load("/mnt/datasets/public/issam/res38_aff.pth"))
self.beta = 8.0
self.logt = 8
self.alpha = 16.0
# self.cam = CAMHead()
self.lcfcn = lcfcn.LCFCN_BO(train_set)
path = "/mnt/projects/counting/Saves/main//dataset:Pascal2012_model:LCFCN_BO_metric:MAE_loss:lcfcnLoss_config:wtp//State_Dicts/best_model.pth"
self.lcfcn.load_state_dict(torch.load(path))
cropsize = 448
radius = 5
self.extract_aff_labels = ExtractAffinityLabelInRadius(cropsize=cropsize//8, radius=radius)
# # FREEZE BATCH NORMS
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
@torch.no_grad()
def predict(self, batch, predict_method="counts"):
self.eval()
img = batch["images"]
padded_size = (int(np.ceil(img.shape[2]/8)*8), int(np.ceil(img.shape[3]/8)*8))
p2d = (0, padded_size[1] - img.shape[3], 0, padded_size[0] - img.shape[2])
img = F.pad(img, p2d)
dheight = int(np.ceil(img.shape[2]/8))
dwidth = int(np.ceil(img.shape[3]/8))
n, c, h, w = img.shape
lcfcn_pointList = au.mask2pointList(batch["points"])["pointList"]
counts = np.zeros(self.n_classes-1)
if len(lcfcn_pointList) == 0:
return {"blobs": np.zeros((h,w), int), "annList":[], "counts":counts}
propDict = au.pointList2propDict(lcfcn_pointList, batch,
proposal_type="sharp",
thresh=0.5)
aff_mat = torch.pow(self.aff.forward(img.cuda(), True), self.beta)
trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True)
for _ in range(self.logt):
trans_mat = torch.matmul(trans_mat, trans_mat)
import ipdb; ipdb.set_trace() # breakpoint ac0c04d2 //
for prop in propDict["propDict"]:
mask = prop["annList"][0]["mask"]
mask = torch.FloatTensor(mask)[None]
mask = F.pad(mask, p2d)
mask_arr = F.avg_pool2d(mask, 8, 8)
mask_vec = mask_arr.view(1, -1)
mask_rw = torch.matmul(mask_vec.cuda(), trans_mat)
mask_rw = mask_rw.view(1, dheight, dwidth)
mask_rw = torch.nn.Upsample((img.shape[2], img.shape[3]), mode='bilinear')(mask_rw[None])
import ipdb; ipdb.set_trace() # breakpoint 89e7f819 //
cam_rw = torch.nn.Upsample((img.shape[2], img.shape[3]), mode='bilinear')(cam_rw)
_, cam_rw_pred = torch.max(cam_rw, 1)
res = np.uint8(cam_rw_pred.cpu().data[0])[:h, :w]
if predict_method == "annList":
pass
else:
return img, res
# scipy.misc.imsave(os.path.join(args.out_rw, name + '.png'), res)
@torch.no_grad()
def visualize(self, batch, predict_method="counts"):
img, res = self.predict(batch)
ms.images(img, res, denorm=1)
class AffinityNetBasic(bm.BaseModel):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.aff = AffinityHead()
self.aff.load_state_dict(torch.load("/mnt/datasets/public/issam/res38_aff.pth"))
self.beta = 8.0
self.logt = 8
self.alpha = 16.0
# self.cam = CAMHead()
self.lcfcn = lcfcn.LCFCN(train_set)
path = "/mnt/projects/counting/Saves/main//dataset:Pascal2012_model:LCFCN_BO_metric:MAE_loss:lcfcnLoss_config:wtp//State_Dicts/best_model.pth"
self.lcfcn.load_state_dict(torch.load(path))
cropsize = 448
radius = 5
self.extract_aff_labels = ExtractAffinityLabelInRadius(cropsize=cropsize//8, radius=radius)
# # FREEZE BATCH NORMS
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
@torch.no_grad()
def predict(self, batch, predict_method="counts"):
self.eval()
img = batch["images"]
padded_size = (int(np.ceil(img.shape[2]/8)*8), int(np.ceil(img.shape[3]/8)*8))
p2d = (0, padded_size[1] - img.shape[3], 0, padded_size[0] - img.shape[2])
img = F.pad(img, p2d)
dheight = int(np.ceil(img.shape[2]/8))
dwidth = int(np.ceil(img.shape[3]/8))
n, c, h, w = img.shape
lcfcn_pointList = au.mask2pointList(batch["points"])["pointList"]
counts = np.zeros(self.n_classes-1)
if len(lcfcn_pointList) == 0:
return {"blobs": np.zeros((h,w), int), "annList":[], "counts":counts}
propDict = au.pointList2propDict(lcfcn_pointList, batch,
proposal_type="sharp",
thresh=0.5)
aff_mat = torch.pow(self.aff.forward(img.cuda(), True), self.beta)
trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True)
for _ in range(self.logt):
trans_mat = torch.matmul(trans_mat, trans_mat)
import ipdb; ipdb.set_trace() # breakpoint ac0c04d2 //
for prop in propDict["propDict"]:
mask = prop["annList"][0]["mask"]
mask = torch.FloatTensor(mask)[None]
mask = F.pad(mask, p2d)
mask_arr = F.avg_pool2d(mask, 8, 8)
mask_vec = mask_arr.view(1, -1)
mask_rw = torch.matmul(mask_vec.cuda(), trans_mat)
mask_rw = mask_rw.view(1, dheight, dwidth)
mask_rw = torch.nn.Upsample((img.shape[2], img.shape[3]), mode='bilinear')(mask_rw[None])
import ipdb; ipdb.set_trace() # breakpoint 89e7f819 //
cam_rw = torch.nn.Upsample((img.shape[2], img.shape[3]), mode='bilinear')(cam_rw)
_, cam_rw_pred = torch.max(cam_rw, 1)
res = np.uint8(cam_rw_pred.cpu().data[0])[:h, :w]
if predict_method == "annList":
pass
else:
return img, res
# scipy.misc.imsave(os.path.join(args.out_rw, name + '.png'), res)
@torch.no_grad()
def visualize(self, batch, predict_method="counts"):
img, res = self.predict(batch)
ms.images(img, res, denorm=1)
class AffinityNet(bm.BaseModel):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.aff = AffinityHead()
self.aff.load_state_dict(torch.load("/mnt/datasets/public/issam/res38_aff.pth"))
self.beta = 8.0
self.logt = 8
self.alpha = 16.0
self.cam = CAMHead()
self.cam.load_state_dict(torch.load("/mnt/datasets/public/issam/res38_cls.pth"))
self.lcfcn = lcfcn.LCFCN(train_set)
path = "/mnt/projects/counting/Saves/main//dataset:Pascal2012_model:LCFCN_BO_metric:MAE_loss:lcfcnLoss_config:wtp//State_Dicts/best_model.pth"
self.lcfcn.load_state_dict(torch.load(path))
self.extract_aff_labels = ExtractAffinityLabelInRadius()
# # FREEZE BATCH NORMS
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
@torch.no_grad()
def predict(self, batch, predict_method="counts"):
self.eval()
img = batch["images"]
padded_size = (int(np.ceil(img.shape[2]/8)*8), int(np.ceil(img.shape[3]/8)*8))
p2d = (0, padded_size[1] - img.shape[3], 0, padded_size[0] - img.shape[2])
img = F.pad(img, p2d)
dheight = int(np.ceil(img.shape[2]/8))
dwidth = int(np.ceil(img.shape[3]/8))
n, c, h, w = img.shape
label = (batch["counts"]>0)
###### CAM
# cam = np.load(os.path.join(args.cam_dir, name + '.npy')).item()
cam = self.cam.forward_cam(img.cuda())
cam = F.interpolate(cam, (h,w), mode='bilinear', align_corners=False)[0]
# ms.images(ms.gray2cmap(cam[18]))
cam = cam.cpu().numpy() * label.clone().view(20, 1, 1).numpy()
sum_cam =cam
norm_cam = sum_cam / (np.max(sum_cam, (1, 2), keepdims=True) + 1e-5)
cam_dict = {}
for i in range(20):
if label.squeeze()[i].item() > 1e-5:
cam_dict[i] = norm_cam[i]
# bg_score = [np.ones_like(norm_cam[0])*0.2]
# pred_cam = np.argmax(np.concatenate((bg_score, norm_cam)), 0)
# ms.images(img, pred_cam, denorm=1)
######
cam_full_arr = np.zeros((21, h, w), np.float32)
for k, v in cam_dict.items():
cam_full_arr[k+1] = v
cam_full_arr[0] = (1 - np.max(cam_full_arr[1:], (0), keepdims=False))**self.alpha
cam_full_arr = np.pad(cam_full_arr, ((0, 0), (0, p2d[3]), (0, p2d[1])), mode='constant')
aff_mat = torch.pow(self.aff.forward(img.cuda(), True), self.beta)
trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True)
for _ in range(self.logt):
trans_mat = torch.matmul(trans_mat, trans_mat)
cam_full_arr = torch.from_numpy(cam_full_arr)
cam_full_arr = F.avg_pool2d(cam_full_arr, 8, 8)
cam_vec = cam_full_arr.view(21, -1)
cam_rw = torch.matmul(cam_vec.cuda(), trans_mat)
cam_rw = cam_rw.view(1, 21, dheight, dwidth)
cam_rw = torch.nn.Upsample((img.shape[2], img.shape[3]), mode='bilinear')(cam_rw)
_, cam_rw_pred = torch.max(cam_rw, 1)
# ms.images(cam_rw)
import ipdb; ipdb.set_trace() # breakpoint 723d8a42 //
cam_full_rw = torch.nn.Upsample((img.shape[2], img.shape[3]), mode='bilinear')(cam_full_arr[None])
# ms.images(img, cam_full_rw.max(1)[1], denorm=1)
# ms.images(img, cam_rw.max(1)[1], denorm=1)
res = np.uint8(cam_rw_pred.cpu().data[0])[:h, :w]
if predict_method == "annList":
pass
else:
return img, res
# scipy.misc.imsave(os.path.join(args.out_rw, name + '.png'), res)
@torch.no_grad()
def visualize(self, batch, predict_method="counts"):
img, res = self.predict(batch)
ms.images(img, res, denorm=1)
# import network.resnet38d
# from tool import pyutils
# class CAMHead(nn.Module):
# def __init__(self, num_classes):
# super().__init__()
# self.fc8 = nn.Conv2d(2048, num_classes-1, 1, bias=False)
# torch.nn.init.xavier_uniform_(self.fc8.weight)
# # # FREEZE BATCH NORMS
# for m in self.modules():
# if isinstance(m, nn.BatchNorm2d):
# m.weight.requires_grad = False
# m.bias.requires_grad = False
# def forward(self, x):
# x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0)
# x = self.fc8(x)
# x = x.view(x.size(0), -1)
# return x
# def forward_cam(self, x):
# x = F.conv2d(x, self.fc8.weight)
# x = F.relu(x)
# return x
class CAM(bm.BaseModel):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.feature_extracter = bm.FeatureExtracter()
self.cam = CAMHead(train_set.n_classes)
# # FREEZE BATCH NORMS
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
def forward(self, x):
x_8s, x_16s, x = self.feature_extracter.extract_features(x)
x = self.cam(x)
return x
def forward_cam(self, x):
x_8s, x_16s, x = self.feature_extracter.extract_features(x)
x = self.cam.forward_cam(x)
return x
@torch.no_grad()
def visualize(self, batch, cam_index=None):
cam = ms.resizeTo(self.forward_cam(batch["images"].cuda()),batch["images"])
preds = self.predict(batch, "counts")
print(preds)
if cam_index is None:
cam_index = preds["indices"][0]
image_points = ms.get_image(batch["images"], (batch["points"]==(cam_index+1)).long(), denorm=1, enlarge=1)
ms.images(image_points[0], ms.gray2cmap(ms.t2n(cam[:,cam_index])))
@torch.no_grad()
def predict(self, batch, predict_method="counts"):
self.sanity_checks(batch)
self.eval()
# ms.reload(pm)
# self.predict_dict = ms.get_functions(pm)
if predict_method == "counts":
probs = torch.sigmoid(self(batch["images"].cuda())).data
counts = probs>0.5
return {"counts":counts, "indices":np.where(counts!=0)[1].tolist()}
elif predict_method == "probs":
probs = F.softmax(self(batch["images"].cuda()),dim=1).data
return {"probs":probs}
elif predict_method == "points":
probs = F.softmax(self(batch["images"].cuda()),dim=1).data
blob_dict = bu.probs2blobs(probs)
return {"points":blob_dict["points"],
"pointList":blob_dict["pointList"],
"probs":probs}
elif predict_method == "blobs":
probs = F.softmax(self(batch["images"].cuda()),dim=1).data
blob_dict = bu.probs2blobs(probs)
return blob_dict
else:
print("Used predict method {}".format(predict_method))
return self.predict_dict[predict_method](self, batch)
class Hybrid(bm.BaseModel):
def __init__(self, train_set, **model_options):
super().__init__(train_set, **model_options)
self.feature_extracter = bm.FeatureExtracter()
self.affinity = AffinityHead(train_set.n_classes)
self.cam = CAMHead(train_set.n_classes)
# # FREEZE BATCH NORMS
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
# self.affinity = AffinityNet(train_set, **model_options)
def forward(self, x):
x_8s, x_16s, x = self.feature_extracter.extract_features(x)
# x = super().forward(x)
# x = self.dropout7(x)
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0)
x = self.fc8(x)
x = x.view(x.size(0), -1)
import ipdb; ipdb.set_trace() # breakpoint 3e137813 //
return x
def forward_cam(self, x):
x_8s, x_16s, x = self.feature_extracter.extract_features(x)
x = F.conv2d(x, self.fc8.weight)
x = F.relu(x)
return x
# def get_parameter_groups(self):
# groups = ([], [], [], [])
# for m in self.modules():
# if (isinstance(m, nn.Conv2d) or isinstance(m, nn.modules.normalization.GroupNorm)):
# if m.weight.requires_grad:
# if m in self.from_scratch_layers:
# groups[2].append(m.weight)
# else:
# groups[0].append(m.weight)
# if m.bias is not None and m.bias.requires_grad:
# if m in self.from_scratch_layers:
# groups[3].append(m.bias)
# else:
# groups[1].append(m.bias)
# return groups
def get_indices_of_pairs(radius, size):
search_dist = []
for x in range(1, radius):
search_dist.append((0, x))
for y in range(1, radius):
for x in range(-radius + 1, radius):
if x * x + y * y < radius * radius:
search_dist.append((y, x))
radius_floor = radius - 1
full_indices = np.reshape(np.arange(0, size[0]*size[1], dtype=np.int64),
(size[0], size[1]))
cropped_height = size[0] - radius_floor
cropped_width = size[1] - 2 * radius_floor
indices_from = np.reshape(full_indices[:-radius_floor, radius_floor:-radius_floor],
[-1])
indices_to_list = []
for dy, dx in search_dist:
indices_to = full_indices[dy:dy + cropped_height,
radius_floor + dx:radius_floor + dx + cropped_width]
indices_to = np.reshape(indices_to, [-1])
indices_to_list.append(indices_to)
concat_indices_to = np.concatenate(indices_to_list, axis=0)
return indices_from, concat_indices_to
class ExtractAffinityLabelInRadius():
def __init__(self, cropsize, radius=5):
self.radius = radius
self.search_dist = []
for x in range(1, radius):
self.search_dist.append((0, x))
for y in range(1, radius):
for x in range(-radius+1, radius):
if x*x + y*y < radius*radius:
self.search_dist.append((y, x))
self.radius_floor = radius-1
self.crop_height = cropsize - self.radius_floor
self.crop_width = cropsize - 2 * self.radius_floor
return
def __call__(self, label):
labels_from = label[:-self.radius_floor, self.radius_floor:-self.radius_floor]
labels_from = np.reshape(labels_from, [-1])
labels_to_list = []
valid_pair_list = []
for dy, dx in self.search_dist:
labels_to = label[dy:dy+self.crop_height, self.radius_floor+dx:self.radius_floor+dx+self.crop_width]
labels_to = np.reshape(labels_to, [-1])
valid_pair = np.logical_and(np.less(labels_to, 255), np.less(labels_from, 255))
labels_to_list.append(labels_to)
valid_pair_list.append(valid_pair)
bc_labels_from = np.expand_dims(labels_from, 0)
concat_labels_to = np.stack(labels_to_list)
concat_valid_pair = np.stack(valid_pair_list)
pos_affinity_label = np.equal(bc_labels_from, concat_labels_to)
bg_pos_affinity_label = np.logical_and(pos_affinity_label, np.equal(bc_labels_from, 0)).astype(np.float32)
fg_pos_affinity_label = np.logical_and(np.logical_and(pos_affinity_label, np.not_equal(bc_labels_from, 0)), concat_valid_pair).astype(np.float32)
neg_affinity_label = np.logical_and(np.logical_not(pos_affinity_label), concat_valid_pair).astype(np.float32)
return torch.from_numpy(bg_pos_affinity_label), torch.from_numpy(fg_pos_affinity_label), torch.from_numpy(neg_affinity_label)
def get_indices_in_radius(height, width, radius):
search_dist = []
for x in range(1, radius):
search_dist.append((0, x))
for y in range(1, radius):
for x in range(-radius+1, radius):
if x*x + y*y < radius*radius:
search_dist.append((y, x))
full_indices = np.reshape(np.arange(0, height * width, dtype=np.int64),
(height, width))
radius_floor = radius-1
cropped_height = height - radius_floor
cropped_width = width - 2 * radius_floor
indices_from = np.reshape(full_indices[:-radius_floor, radius_floor:-radius_floor], [-1])
indices_from_to_list = []
for dy, dx in search_dist:
indices_to = full_indices[dy:dy + cropped_height, radius_floor + dx:radius_floor + dx + cropped_width]
indices_to = np.reshape(indices_to, [-1])
indices_from_to = np.stack((indices_from, indices_to), axis=1)
indices_from_to_list.append(indices_from_to)
concat_indices_from_to = np.concatenate(indices_from_to_list, axis=0)
return concat_indices_from_to
class ResBlock(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels, stride=1, first_dilation=None, dilation=1):
super(ResBlock, self).__init__()
self.same_shape = (in_channels == out_channels and stride == 1)
if first_dilation == None: first_dilation = dilation
self.bn_branch2a = nn.BatchNorm2d(in_channels)
self.conv_branch2a = nn.Conv2d(in_channels, mid_channels, 3, stride,
padding=first_dilation, dilation=first_dilation, bias=False)
self.bn_branch2b1 = nn.BatchNorm2d(mid_channels)
self.conv_branch2b1 = nn.Conv2d(mid_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False)
if not self.same_shape:
self.conv_branch1 = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False)
def forward(self, x, get_x_bn_relu=False):
branch2 = self.bn_branch2a(x)
branch2 = F.relu(branch2)
x_bn_relu = branch2
if not self.same_shape:
branch1 = self.conv_branch1(branch2)
else:
branch1 = x
branch2 = self.conv_branch2a(branch2)
branch2 = self.bn_branch2b1(branch2)
branch2 = F.relu(branch2)
branch2 = self.conv_branch2b1(branch2)
x = branch1 + branch2
if get_x_bn_relu:
return x, x_bn_relu
return x
def __call__(self, x, get_x_bn_relu=False):
return self.forward(x, get_x_bn_relu=get_x_bn_relu)
class ResBlock_bot(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, dilation=1, dropout=0.):
super(ResBlock_bot, self).__init__()
self.same_shape = (in_channels == out_channels and stride == 1)
self.bn_branch2a = nn.BatchNorm2d(in_channels)
self.conv_branch2a = nn.Conv2d(in_channels, out_channels//4, 1, stride, bias=False)
self.bn_branch2b1 = nn.BatchNorm2d(out_channels//4)
self.dropout_2b1 = torch.nn.Dropout2d(dropout)
self.conv_branch2b1 = nn.Conv2d(out_channels//4, out_channels//2, 3, padding=dilation, dilation=dilation, bias=False)
self.bn_branch2b2 = nn.BatchNorm2d(out_channels//2)
self.dropout_2b2 = torch.nn.Dropout2d(dropout)
self.conv_branch2b2 = nn.Conv2d(out_channels//2, out_channels, 1, bias=False)
if not self.same_shape:
self.conv_branch1 = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False)
def forward(self, x, get_x_bn_relu=False):
branch2 = self.bn_branch2a(x)
branch2 = F.relu(branch2)
x_bn_relu = branch2
branch1 = self.conv_branch1(branch2)
branch2 = self.conv_branch2a(branch2)
branch2 = self.bn_branch2b1(branch2)
branch2 = F.relu(branch2)
branch2 = self.dropout_2b1(branch2)
branch2 = self.conv_branch2b1(branch2)
branch2 = self.bn_branch2b2(branch2)
branch2 = F.relu(branch2)
branch2 = self.dropout_2b2(branch2)
branch2 = self.conv_branch2b2(branch2)
x = branch1 + branch2
if get_x_bn_relu:
return x, x_bn_relu
return x
def __call__(self, x, get_x_bn_relu=False):
return self.forward(x, get_x_bn_relu=get_x_bn_relu)
class Normalize():
def __init__(self, mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225)):
self.mean = mean
self.std = std
def __call__(self, img):
imgarr = np.asarray(img)
proc_img = np.empty_like(imgarr, np.float32)
proc_img[..., 0] = (imgarr[..., 0] / 255. - self.mean[0]) / self.std[0]
proc_img[..., 1] = (imgarr[..., 1] / 255. - self.mean[1]) / self.std[1]
proc_img[..., 2] = (imgarr[..., 2] / 255. - self.mean[2]) / self.std[2]
return proc_img
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1a = nn.Conv2d(3, 64, 3, padding=1, bias=False)
self.b2 = ResBlock(64, 128, 128, stride=2)
self.b2_1 = ResBlock(128, 128, 128)
self.b2_2 = ResBlock(128, 128, 128)
self.b3 = ResBlock(128, 256, 256, stride=2)
self.b3_1 = ResBlock(256, 256, 256)
self.b3_2 = ResBlock(256, 256, 256)
self.b4 = ResBlock(256, 512, 512, stride=2)
self.b4_1 = ResBlock(512, 512, 512)
self.b4_2 = ResBlock(512, 512, 512)
self.b4_3 = ResBlock(512, 512, 512)
self.b4_4 = ResBlock(512, 512, 512)
self.b4_5 = ResBlock(512, 512, 512)
self.b5 = ResBlock(512, 512, 1024, stride=1, first_dilation=1, dilation=2)
self.b5_1 = ResBlock(1024, 512, 1024, dilation=2)
self.b5_2 = ResBlock(1024, 512, 1024, dilation=2)
self.b6 = ResBlock_bot(1024, 2048, stride=1, dilation=4, dropout=0.3)
self.b7 = ResBlock_bot(2048, 4096, dilation=4, dropout=0.5)
self.bn7 = nn.BatchNorm2d(4096)
self.not_training = [self.conv1a]
self.normalize = Normalize()
return
def forward(self, x):
return self.forward_as_dict(x)['conv6']
def forward_as_dict(self, x):
x = self.conv1a(x)
x = self.b2(x)
x = self.b2_1(x)
x = self.b2_2(x)
x = self.b3(x)
x = self.b3_1(x)
x = self.b3_2(x)
x = self.b4(x)
x = self.b4_1(x)
x = self.b4_2(x)
x = self.b4_3(x)
x = self.b4_4(x)
x = self.b4_5(x)
x, conv4 = self.b5(x, get_x_bn_relu=True)
x = self.b5_1(x)
x = self.b5_2(x)
x, conv5 = self.b6(x, get_x_bn_relu=True)
x = self.b7(x)
conv6 = F.relu(self.bn7(x))
return dict({'conv4': conv4, 'conv5': conv5, 'conv6': conv6})
class CAMHead(Net):
def __init__(self):
super().__init__()
self.dropout7 = torch.nn.Dropout2d(0.5)
self.fc8 = nn.Conv2d(4096, 20, 1, bias=False)
torch.nn.init.xavier_uniform_(self.fc8.weight)
self.not_training = [self.conv1a, self.b2, self.b2_1, self.b2_2]
self.from_scratch_layers = [self.fc8]
def forward(self, x):
x = super().forward(x)
x = self.dropout7(x)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=0)
x = self.fc8(x)
x = x.view(x.size(0), -1)
return x
def forward_cam(self, x):
x = super().forward(x)
x = F.conv2d(x, self.fc8.weight)
x = F.relu(x)
return x
class AffinityHead(Net):
def __init__(self):
super().__init__()
self.f8_3 = torch.nn.Conv2d(512, 64, 1, bias=False)
self.f8_4 = torch.nn.Conv2d(1024, 128, 1, bias=False)
self.f8_5 = torch.nn.Conv2d(4096, 256, 1, bias=False)
self.f9 = torch.nn.Conv2d(448, 448, 1, bias=False)
torch.nn.init.kaiming_normal_(self.f8_3.weight)
torch.nn.init.kaiming_normal_(self.f8_4.weight)
torch.nn.init.kaiming_normal_(self.f8_5.weight)
torch.nn.init.xavier_uniform_(self.f9.weight, gain=4)
self.predefined_featuresize = int(448//8)
self.ind_from, self.ind_to = get_indices_of_pairs(radius=5, size=(self.predefined_featuresize, self.predefined_featuresize))
self.ind_from = torch.from_numpy(self.ind_from); self.ind_to = torch.from_numpy(self.ind_to)
self.beta = 8.0
self.logt = 8
self.alpha = 16.0
# # FREEZE BATCH NORMS
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.requires_grad = False
m.bias.requires_grad = False
@torch.no_grad()
def get_trans_mat(self, batch):
img = ms.pad_image(batch["images"])
n, c, h, w = img.shape
aff_mat = torch.pow(self.forward(img.cuda(), True), self.beta)
trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True)
for _ in range(self.logt):
trans_mat = torch.matmul(trans_mat, trans_mat)
return {"trans_mat":trans_mat, "h":h/8, "w":w/8}
def forward_trans(self, images):
# img = ms.pad_image(images)
# n, c, h, w = img.shape
aff_mat = torch.pow(self.forward(images, False), self.beta)
trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True)
for _ in range(self.logt):
trans_mat = torch.matmul(trans_mat, trans_mat)
return trans_mat
# @torch.no_grad()
# def apply_affinity(self, batch, blob_probs=None):
# # img = ms.pad_image(batch["images"])
# img = batch["images"]
# n, c, h, w = img.shape
# dheight = int(np.ceil(img.shape[2]/8))
# dwidth = int(np.ceil(img.shape[3]/8))
# aff_mat = torch.pow(self.aff.forward(img.cuda(), True), self.beta)
# trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True)
# for _ in range(self.logt):
# trans_mat = torch.matmul(trans_mat, trans_mat)
# blob_probs_rw = torch.matmul(blob_probs.cuda(), trans_mat)
# blob_probs_rw = blob_probs_rw.view(1, dheight, dwidth)
# return blob_probs_rw
def forward_as_dict(self, x):
self.eval()
x = self.conv1a(x)
x = self.b2(x)
x = self.b2_1(x)
x = self.b2_2(x)
x = self.b3(x)
x = self.b3_1(x)
x = self.b3_2(x)
x = self.b4(x)
x = self.b4_1(x)
x = self.b4_2(x)
x = self.b4_3(x)
x = self.b4_4(x)
x = self.b4_5(x)
x, conv4 = self.b5(x, get_x_bn_relu=True)
x = self.b5_1(x)
x = self.b5_2(x)
x, conv5 = self.b6(x, get_x_bn_relu=True)
x = self.b7(x)
conv6 = F.relu(self.bn7(x))
return dict({'conv4': conv4, 'conv5': conv5, 'conv6': conv6})
def forward(self, x_input, to_dense=False):
self.eval()
d = self.forward_as_dict(x_input)
f8_3 = F.elu(self.f8_3(d['conv4']))
f8_4 = F.elu(self.f8_4(d['conv5']))
f8_5 = F.elu(self.f8_5(d['conv6']))
x = F.elu(self.f9(torch.cat([f8_3, f8_4, f8_5], dim=1)))
if x.size(2) == self.predefined_featuresize and x.size(3) == self.predefined_featuresize:
ind_from = self.ind_from
ind_to = self.ind_to
else:
ind_from, ind_to = get_indices_of_pairs(5, (x.size(2), x.size(3)))
ind_from = torch.from_numpy(ind_from); ind_to = torch.from_numpy(ind_to)
x = x.view(x.size(0), x.size(1), -1)
ff = torch.index_select(x, dim=2, index=ind_from.cuda(non_blocking=True))
ft = torch.index_select(x, dim=2, index=ind_to.cuda(non_blocking=True))
ff = torch.unsqueeze(ff, dim=2)
ft = ft.view(ft.size(0), ft.size(1), -1, ff.size(3))
aff = torch.exp(-torch.mean(torch.abs(ft-ff), dim=1))
if to_dense:
aff = aff.view(-1).cpu()
ind_from_exp = torch.unsqueeze(ind_from, dim=0).expand(ft.size(2), -1).contiguous().view(-1)
indices = torch.stack([ind_from_exp, ind_to])
indices_tp = torch.stack([ind_to, ind_from_exp])
area = x.size(2)
indices_id = torch.stack([torch.arange(0, area).long(), torch.arange(0, area).long()])
aff_mat = sparse.FloatTensor(torch.cat([indices, indices_id, indices_tp], dim=1),
torch.cat([aff, torch.ones([area]), aff])).to_dense().cuda()
return aff_mat
else:
aff_mat = torch.zeros((x.shape[-1],x.shape[-1])).cuda()
aff = aff.view(-1)
ind_from_exp = torch.unsqueeze(ind_from, dim=0).expand(ft.size(2), -1).contiguous().view(-1)
indices = torch.stack([ind_from_exp, ind_to])
indices_tp = torch.stack([ind_to, ind_from_exp])
area = x.size(2)
indices_id = torch.stack([torch.arange(0, area).long(), torch.arange(0, area).long()])
rows_cols = torch.cat([indices.cuda(), indices_id.cuda(), indices_tp.cuda()], dim=1)
values = torch.cat([aff, torch.ones([area]).cuda(), aff])
aff_mat[rows_cols[0], rows_cols[1]] = values
return aff_mat
# return aff
class AFFNet(bm.BaseModel):
def __init__(self, train_set, **model_options):
from models.iprm import PRM
super().__init__(train_set, **model_options)
self.cam = CAMHead()
self.aff = AffinityHead()
self.cam.load_state_dict(torch.load("/mnt/datasets/public/issam/res38_cls.pth"))
self.aff.load_state_dict(torch.load("/mnt/datasets/public/issam/res38_aff.pth"))
self.prm = PRM(train_set)
self.beta = 8.0
self.logt = 8
self.alpha = 16.0
# def forward(self, x, to_dense=False):
# x_8s, x_16s, x_32s = self.feature_extracter.extract_features(x)
# import ipdb; ipdb.set_trace() # breakpoint b2b482cc //
# self.aff([x_8s, x_16s, x_32s])
@torch.no_grad()
def predict(self, batch, predict_method="counts"):
self.eval()
img = batch["images"]
padded_size = (int(np.ceil(img.shape[2]/8)*8), int(np.ceil(img.shape[3]/8)*8))
p2d = (0, padded_size[1] - img.shape[3], 0, padded_size[0] - img.shape[2])
img = F.pad(img, p2d)
dheight = int(np.ceil(img.shape[2]/8))
dwidth = int(np.ceil(img.shape[3]/8))
n, c, h, w = img.shape
label = (batch["counts"]>0)
###### CAM
# cam = np.load(os.path.join(args.cam_dir, name + '.npy')).item()
cam = self.cam.forward_cam(img.cuda())
cam = F.interpolate(cam, (h,w), mode='bilinear', align_corners=False)[0]
# ms.images(ms.gray2cmap(cam[18]))
cam = cam.cpu().numpy() * label.clone().view(20, 1, 1).numpy()
sum_cam =cam
norm_cam = sum_cam / (np.max(sum_cam, (1, 2), keepdims=True) + 1e-5)
cam_dict = {}
for i in range(20):
if label.squeeze()[i].item() > 1e-5:
cam_dict[i] = norm_cam[i]
# bg_score = [np.ones_like(norm_cam[0])*0.2]
# pred_cam = np.argmax(np.concatenate((bg_score, norm_cam)), 0)
# ms.images(img, pred_cam, denorm=1)
######
cam_full_arr = np.zeros((21, h, w), np.float32)
for k, v in cam_dict.items():
cam_full_arr[k+1] = v
cam_full_arr[0] = (1 - np.max(cam_full_arr[1:], (0), keepdims=False))**self.alpha
cam_full_arr = np.pad(cam_full_arr, ((0, 0), (0, p2d[3]), (0, p2d[1])), mode='constant')
aff_mat = torch.pow(self.aff.forward(img.cuda(), True), self.beta)
trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True)
for _ in range(self.logt):
trans_mat = torch.matmul(trans_mat, trans_mat)
cam_full_arr = torch.from_numpy(cam_full_arr)
cam_full_arr = F.avg_pool2d(cam_full_arr, 8, 8)
cam_vec = cam_full_arr.view(21, -1)
cam_rw = torch.matmul(cam_vec.cuda(), trans_mat)
cam_rw = cam_rw.view(1, 21, dheight, dwidth)
cam_rw = torch.nn.Upsample((img.shape[2], img.shape[3]), mode='bilinear')(cam_rw)
_, cam_rw_pred = torch.max(cam_rw, 1)
res = np.uint8(cam_rw_pred.cpu().data[0])[:h, :w]
if predict_method == "annList":
pass
else:
return img, res
# scipy.misc.imsave(os.path.join(args.out_rw, name + '.png'), res)
@torch.no_grad()
def visualize(self, batch, predict_method="counts"):
img, res = self.predict(batch)
ms.images(img, res, denorm=1)
|
<filename>library/selenium_actions.py
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import Select
from library.tools import Tools
'''
Selenium Methods
'''
class SeleniumActions:
##########
#####
## Browser
#####
##########
@staticmethod
def refresh_page(web_driver):
"""Refreshes the page
:param WebDriver web_driver:
:rtype: bool
:return: boolean
"""
try:
web_driver.navigate().refresh()
Tools.sleep(1)
return True
except Exception as error:
return False
##########
#####
## Visibility
#####
##########
@staticmethod
def wait_for_element(web_driver, web_element):
"""waits for element
:param WebDriver web_driver:
:param WebElement web_element:
:rtype: bool
:return: boolean
"""
try:
WebDriverWait(web_driver, 3).until(expected_conditions.presence_of_element_located(web_element))
return True
except Exception as error:
return False
@staticmethod
def element_is_visible(web_driver, web_element):
"""checks to see if element is visible
:param WebDriver web_driver:
:param WebElement web_element:
:rtype: bool
:return: boolean
"""
SeleniumActions.wait_for_element(web_driver, web_element)
if web_element is not None and web_element.is_displayed():
return True
else:
return False
@staticmethod
def element_is_not_visible(web_element):
"""checks to see if element is not visible
:param WebElement web_element:
:rtype: bool
:return: boolean
"""
if web_element is None or not web_element.is_displayed():
return True
else:
return False
##########
#####
## Click
#####
##########
@staticmethod
def click_element(web_driver, web_element):
"""Tries to click a WebElement three times
:param WebDriver web_driver:
:param WebElement web_element:
:rtype: bool
:return: boolean
"""
if SeleniumActions.element_is_visible(web_driver, web_element):
has_click_passed = SeleniumActions.click_was_successful(web_element)
if not has_click_passed:
SeleniumActions.move_to_element(web_driver, web_element)
has_click_passed_now = SeleniumActions.click_was_successful(web_element)
if not has_click_passed_now:
has_click_passed_finally = SeleniumActions.send_enter_to_element(web_driver, web_element)
if not has_click_passed_finally:
return False
else:
return False
@staticmethod
def click_was_successful(web_element):
"""WebElement click was successful
:param WebElement web_element:
:rtype: bool
:return: boolean
"""
try:
web_element.click()
return True
except Exception as error:
return False
@staticmethod
def move_to_element(web_driver, web_element):
"""Moves to WebElement
:param WebDriver web_driver:
:param WebElement web_element:
:rtype: bool
:return: boolean
"""
try:
move_to = ActionChains(web_driver).move_to_element(web_element)
move_to.perform()
return True
except Exception as error:
return False
@staticmethod
def send_enter_to_element(web_driver, web_element):
"""Sends the text enter to a WebElement
:param WebDriver web_driver:
:param WebElement web_element:
:rtype: bool
:return: boolean
"""
if SeleniumActions.element_is_visible(web_driver, web_element):
try:
web_element.sendKeys(Keys.RETURN)
return False
except Exception as error:
return True
##########
#####
## Select
#####
##########
@staticmethod
def select_by_index(web_element, index):
try:
select = Select(web_element)
select.select_by_index(index)
return True
except Exception as error:
return False
@staticmethod
def select_by_value(web_element, value):
try:
select = Select(web_element)
select.select_by_value(value)
return True
except Exception as error:
return False
@staticmethod
def select_by_text(web_element, visible_text):
try:
select = Select(web_element)
select.select_by_visible_text(visible_text)
return True
except Exception as error:
return False
##########
#####
## Read
#####
##########
@staticmethod
def read_web_element_text(web_element):
"""
:param WebElement web_element:
:return:
"""
try:
text = web_element.text
return text
except Exception as error:
return None
@staticmethod
def read_select_text(web_element):
try:
select = Select(web_element)
return select.first_selected_option.text
except Exception as error:
return None
##########
#####
## Write
#####
##########
@staticmethod
def write_to_element(web_driver, web_element, write_text):
"""Writes to a WebElement
:param WebDriver web_driver:
:param WebElement web_element:
:param str write_text:
:rtype: bool
:return: boolean
"""
if SeleniumActions.element_is_visible(web_driver, web_element):
try:
web_element.send_keys(write_text)
return True
except Exception as error:
return False
else:
return False
##########
#####
## Clear Text
#####
##########
@staticmethod
def clearTextField(web_driver, web_element):
"""Clears the text from a WebElement
:param WebDriver web_driver:
:param WebElement web_element:
:rtype: bool
:return: boolean
"""
if SeleniumActions.element_is_visible(web_driver, web_element):
try:
web_element.clear()
return True
except Exception as error:
return False
##########
#####
## Navigate
#####
##########
@staticmethod
def navigate_to_url(web_driver, url):
"""Navigates to a url
:param WebDriver web_driver:
:param str url:
:rtype: bool
:return: boolean
"""
try:
web_driver.get(url)
return True
except Exception as error:
return False
##########
#####
## Capture Element
#####
##########
@staticmethod
def fetch_web_element(web_driver, element_path):
"""Fetches a web element object from a object path. Uses multiple methods to allows for different kinds of paths
:param WebDriver web_driver:
:param str element_path:
:rtype: WebElement
:return: WebElement
"""
if Tools.object_has_value(element_path):
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
if web_element is None:
web_element = SeleniumActions.find_by_css_path(web_driver, element_path)
'''
if web_element is None:
web_element = SeleniumActions.find_by_id(web_driver, element_path)
if web_element is None:
web_element = SeleniumActions.find_by_class_name(web_driver, element_path)
'''
if web_element is not None:
return web_element
else:
error = "No web element"
Tools.raise_exception(error)
return None
else:
error = "No element path"
Tools.raise_exception(error)
return None
##########
#####
## Find
#####
##########
@staticmethod
def find_by_partial_link_text(web_driver, element_path):
"""Finds a WebElement from a partial link text
:param WebDriver web_driver:
:param str element_path:
:rtype: WebElement
:return: WebElement
"""
try:
my_element = web_driver.find_element(By.PARTIAL_LINK_TEXT, element_path)
return my_element
except Exception as error:
print("Error :: " + str(error) + "\nElement Path: \n" + str(element_path))
return None
@staticmethod
def find_by_xpath(web_driver, element_path):
"""Finds a WebElement from an xpath
:param WebDriver web_driver:
:param str element_path:
:rtype: WebElement
:return: WebElement
"""
try:
my_element = web_driver.find_element(By.XPATH, element_path)
return my_element
except Exception as error:
print("Error :: " + str(error) + "\nElement Path: \n" + str(element_path))
return None
@staticmethod
def find_by_css_path(web_driver, element_path):
"""finds a WebElement from a css path
:param WebDriver web_driver:
:param str element_path:
:return: WebElement
"""
try:
return web_driver.find_element_by_css_selector(element_path)
except Exception as error:
print("Error :: " + str(error) + "\nElement Path: \n" + str(element_path))
return None
@staticmethod
def find_by_id(web_driver, element_path):
"""finds a WebElement from an id
:param WebDriver web_driver:
:param str element_path:
:return: WebElement
"""
try:
return web_driver.find_element_by_id(element_path)
except Exception as error:
print("Error :: " + str(error) + "\nElement Path: \n" + str(element_path))
return None
@staticmethod
def find_by_class_name(web_driver, element_path):
"""finds a WebElement from a class
:param WebDriver web_driver:
:param str element_path:
:return: WebElement
"""
try:
return web_driver.find_element_by_class_name(element_path)
except Exception as error:
print("Error :: " + str(error) + "\nElement Path: \n" + str(element_path))
return None
@staticmethod
def check_element_exists(web_driver, element_path):
"""Check if element exists
:param WebDriver web_driver:
:param str element_path:
:rtype: WebElement
:return: WebElement
"""
try:
my_element = web_driver.find_elements(By.XPATH, element_path)
return my_element
except Exception as error:
print("Error :: " + str(error) + "\nElement Path: \n" + str(element_path))
return None
|
<reponame>qosf/quantum-bench<filename>quenchmark/meta_analysis.py
import datetime as dt
from functools import reduce
from difflib import SequenceMatcher as SM
from cached_property import cached_property
from github import Github
from config import OAUTH_TOKEN
class Repository(object):
osi_license_ids = ["MPL-2.0", "GPL-2.0", "MIT", "LGPL-3.0", "BSD-2-Clause",
"EPL-2.0", "Apache-2.0", "BSD-3-Clause", "GPL-3.0", "LGPL-2.1"]
def __init__(self, user_name, repo_name):
"""
Gets the repo with repo_name from user_name's profile
and already retrieves a list of all commits.
"""
# create a Github instance with OAuth token
self.github = Github(OAUTH_TOKEN)
# adding @ ensures finding users exactly
self.user = self.github.search_users('@'+user_name)[0]
self.repo = self.user.get_repo(repo_name)
self.commits = [commit for commit in self.repo.get_commits()]
@cached_property
def commit_count(self):
"""
Returns the total number of commits.
"""
return len([commit for commit in self.commits])
@cached_property
def is_young(self):
"""
Returns True if the repository
is younger than one year.
"""
return (dt.datetime.now() - self.repo.created_at) > dt.timedelta(weeks=52)
@cached_property
def has_recent_commits(self):
"""
Returns True if the repository had at
least 20 commits in the past year.
"""
last_twenty_commits = self.commits[:19]
extract_date = lambda text_date: dt.datetime.strptime(text_date.stats.last_modified[5:16], "%d %b %Y")
in_past_year = lambda date: (dt.datetime.now() - date) < dt.timedelta(weeks=52)
return any(map(in_past_year, map(extract_date, last_twenty_commits)))
@cached_property
def contributor_count(self):
"""
Returns the number of contributors
for this project.
"""
return len(self.get_contributors())
@cached_property
def osi_license(self):
"""
Returns True if the license associated
with this repository is a valid OSI license.
"""
license_name = self.repo.get_license().license.spdx_id
return license_name in self.osi_license_ids
@cached_property
def has_xtrnl_issues_or_prs(self):
"""
Returns True if the repository has Issues
and Pull Requests from external people.
"""
if not self.repo.has_issues or len(self.get_xtrnl_issues_and_prs()) == 0:
return False
else:
return True
@cached_property
def core_developers(self):
"""
Returns a list of names of core
developers (>10% of total additions OR deletions OR >15% of total commits).
"""
adds_and_dels = self.get_total_adds_and_dels()
is_core_dev = lambda contributor: (contributor['additions']/adds_and_dels['additions'] > 0.10) \
or (contributor['deletions']/adds_and_dels['deletions']*-1 > 0.10) \
or (contributor['commits']/self.commit_count > 0.15)
return list(filter(is_core_dev, self.get_contributors()))
@cached_property
def has_ignored_issues_and_prs(self):
"""
Returns True if more than 50% of external Issues and PRs
were ignored within their first month of existence.
"""
external_issues_and_prs = self.get_xtrnl_issues_and_prs()
core_dev_names = [dev['name'] for dev in self.core_developers]
ignorance_counter = 0
for ext_issue in external_issues_and_prs:
replied_on_time = False
for comment in ext_issue.get_comments():
if ext_issue.created_at - comment.created_at > dt.timedelta(weeks=4):
break # no one replied for one month
if comment.user.login in core_dev_names or self.is_part_of_company(comment.user):
replied_on_time = True
break
if replied_on_time == False:
ignorance_counter += 1
# if more than 50% of Issues and PRs were ignored we consider the project abandoned
if ignorance_counter/len(external_issues_and_prs) > 0.50:
return True
else:
return False
def get_contributors(self):
"""
Returns a list of dictionaries. Each dictionary represents
as single contributor and provides their name and their
total number of additions and deletions.
"""
extract = lambda list_of_weeks, param: sum([week.a if param == 'a' else
(week.d if param == 'd' else week.c)
for week in list_of_weeks]
)
return [{'name': contributor.author.login,
'additions': extract(contributor.weeks, 'a'),
'deletions': extract(contributor.weeks, 'd'),
'commits': extract(contributor.weeks, 'c')}
for contributor in self.repo.get_stats_contributors()
]
def get_xtrnl_issues_and_prs(self):
"""
Returns a list of Issues and PRs from external
developers.
"""
# get all issues and names of core developers
issues = [issue for issue in self.repo.get_issues(state='all')]
core_dev_names = [dev['name'] for dev in self.core_developers]
external_issues_and_prs = [] # initialize an empty list
for issue in issues:
if issue.user == self.repo.owner:
continue
# check if author of issue or PR is from the same company that owns the repo
if self.is_part_of_company(issue.user):
continue
# check if author of issue is a core developer
if issue.user.login in core_dev_names:
continue
else:
external_issues_and_prs.append(issue)
return external_issues_and_prs
def is_part_of_company(self, user):
"""
Returns True if user is part
of the company that owns the repo.
"""
# define lambda for fuzzy string comparison (yields true if >90% overlap)
fuzzy_compare = lambda string1, string2: SM(None, string1, string2).ratio() > 0.9
if user.company is None:
return False
elif fuzzy_compare(user.company, self.user.login):
# we use fuzzy string comparison to account for spelling or punctuation diffs
return True
elif self.user.name is not None: # some users have only login and no name
if fuzzy_compare(user.company, self.user.name):
return True
else:
return False
def get_total_adds_and_dels(self):
"""
Returns a dictionary with the total
number of additions and deletions
in this repository.
"""
stats = {}
stats['additions'] = reduce(lambda x, y: (x + y.additions) if type(x) is int \
else (x.additions + y.additions), self.repo.get_stats_code_frequency()
)
stats['deletions'] = reduce(lambda x, y: (x + y.deletions) if type(x) is int \
else (x.deletions + y.deletions), self.repo.get_stats_code_frequency()
)
return stats
def is_valid(self):
"""
Executes the entire decision tree and yields True
if the repo satisfies all conditions. Hence, the repo
is a valid OSS repo that is valuable to other and also
obeys good practices.
"""
if not self.osi_license:
print('license not okay')
return False
print('license okay')
if self.contributor_count == 1:
print('only 1 contributor')
return False
print('>1 contributor')
if not self.is_young and self.commit_count < 100:
print('project is old and has less than 100 commits.')
return False
print('project either old and has more than 100 commits or young.')
if self.is_young and not self.has_recent_commits:
print('project is young but has too little recent commits')
return False
print('project maybe young and enough recent commits')
if not self.has_xtrnl_issues_or_prs:
print('project has no xtrnl issues')
return False
print('project has xtrnl issues')
if self.has_ignored_issues_and_prs:
print('project is ignorant')
return False
print('project not ignorant')
return True # since it has satisfied all previous conditions
|
r"""@package motsfinder.exprs.basics
Collection of basic numexpr.NumericExpression subclasses.
"""
from builtins import range
import math
from mpmath import mp
from ..numutils import binomial_coeffs
from .numexpr import NumericExpression, SimpleExpression
from .evaluators import EvaluatorBase, EvaluatorFactory, TrivialEvaluator
__all__ = [
"ConstantExpression",
"IdentityExpression",
"ScaleExpression",
"ProductExpression",
"ProductExpression2D",
"DivisionExpression",
"DivisionExpression2D",
"OffsetExpression",
"SumExpression",
"BlendExpression2D",
"EmbedExpression2D",
"SimpleSinExpression",
"SimpleCosExpression",
"SinSquaredExpression",
"SimpleSinhExpression",
"SimpleCoshExpression",
]
class ConstantExpression(NumericExpression):
"""Represent an expression that is a constant.
Represents an expression of the form \f$ f(x) = c = \mathrm{const} \f$.
The value of the constant can be accessed through the `c` property.
"""
def __init__(self, value=0, name='const'):
r"""Init function.
@param value
The constant value.
@param name
Name of the expression (e.g. for print_tree()).
"""
super(ConstantExpression, self).__init__(name=name)
## The constant value this expression represents.
self.c = value
def _expr_str(self):
return "%r" % self.c
@property
def nice_name(self):
return "%s (%r)" % (self.name, self.c)
def is_zero_expression(self):
return self.c == 0
def _evaluator(self, use_mp):
c = self.c
if c == 0:
return (self.zero,)
return (lambda x: c, self.zero)
class IdentityExpression(NumericExpression):
r"""Identity expression with an optional multiplication factor.
Represents an expression of the form \f$ f(x) = a x \f$.
To multiply another expression use the ScaleExpression instead.
"""
def __init__(self, a=1.0, name='Id'):
r"""Init function.
@param a
Factor to multiply the argument with.
@param name
Name of the expression (e.g. for print_tree()).
"""
super(IdentityExpression, self).__init__(name=name)
## Factor to multiply the argument with.
self.a = a
def _expr_str(self):
return ("a x, where a=%r" % self.a) if self.a != 1.0 else "x"
@property
def nice_name(self):
if self.a != 1.0:
return "%r * %s" % (self.a, self.name)
return self.name
def is_zero_expression(self):
return self.a == 0
def _evaluator(self, use_mp):
a = self.a
if a == 0:
return (self.zero,)
return (lambda x: a * x, lambda x: a, self.zero)
class ScaleExpression(NumericExpression):
r"""Scale another expression by a factor.
Represents an expression of the form \f$ f(x) = a g(x) \f$.
"""
def __init__(self, expr, a, name='scale'):
r"""Init function.
@param expr
The expression to scale.
@param a
Factor to multiply the `expr` with.
@param name
Name of the expression (e.g. for print_tree()).
"""
super(ScaleExpression, self).__init__(e=expr, domain=expr.domain,
verbosity=expr.verbosity,
name=name)
## Factor to scale the expression by.
self.a = a
def _expr_str(self):
if self.a == 0.0:
return "0"
return "a f(x), where a=%r, f(x)=%s" % (self.a, self.e.str())
@property
def nice_name(self):
return "%s (%r)" % (self.name, self.a)
def is_zero_expression(self):
return self.a == 0
def _evaluator(self, use_mp):
a = self.a
if a == 0:
return (self.zero,)
e = self.e.evaluator(use_mp)
def factory(n):
return lambda x: a * e.diff(x, n)
return EvaluatorFactory(self, factory, [e])
class ProductExpression(NumericExpression):
r"""Multiply two expressions of one variable.
Represents an expression of the form \f$ f(x) = g(x) h(x) \f$.
"""
def __init__(self, expr1, expr2, name='mult'):
r"""Init function.
@param expr1
First expression.
@param expr2
Second expression.
@param name
Name of the expression (e.g. for print_tree()).
"""
super(ProductExpression, self).__init__(e1=expr1, e2=expr2,
domain=expr1.domain,
name=name)
def _expr_str(self):
return "e1 * e2, where e1 = %s, e2 = %s" % (self.e1.str(), self.e2.str())
def _evaluator(self, use_mp):
e1 = self.e1.evaluator(use_mp)
e2 = self.e2.evaluator(use_mp)
fsum = mp.fsum if use_mp else math.fsum
def f(x):
return e1(x) * e2(x)
def df(x):
return e1.diff(x, 1) * e2(x) + e1(x) * e2.diff(x, 1)
def ddf(x):
e1x = e1(x)
de1x = e1.diff(x, 1)
dde1x = e1.diff(x, 2)
e2x = e2(x)
de2x = e2.diff(x, 1)
dde2x = e2.diff(x, 2)
return dde1x * e2x + 2 * de1x * de2x + e1x * dde2x
def factory(n):
if n == 0: return f
if n == 1: return df
if n == 2: return ddf
coeffs = binomial_coeffs(n)
return lambda x: fsum(coeffs[k] * e1.diff(x, n-k) * e2.diff(x, k)
for k in range(0, n+1))
return EvaluatorFactory(self, factory, [e1, e2])
class ProductExpression2D(NumericExpression):
r"""Multiply two expressions of one or two variables each.
Represents an expression of the form (e.g.)
\f$ f(\mathbf{x}) = g(x) h(y) \f$ or
\f$ f(\mathbf{x}) = g(y) h(\mathbf{x}) \f$, etc.
"""
def __init__(self, expr1, expr2, variables=('both', 'both'), name='mult'):
r"""Init function.
@param expr1
First expression.
@param expr2
Second expression.
@param variables
tuple/list of two elements each bein either ``'both'``, `0`, or
`1`. The elements correspond to `expr1` and `expr2`, respectively.
``'both'`` means that the expression is a function of two
variables, while `0` or `1` specify the functions to depend only
on `x` or `y`, respectively.
@param name
Name of the expression (e.g. for print_tree()).
"""
super(ProductExpression2D, self).__init__(e1=expr1, e2=expr2, name=name)
self._v1, self._v2 = variables
if self._v1 not in ('both', 0, 1) or self._v2 not in ('both', 0, 1):
raise TypeError("Expressions can only depend on element 0, 1 or both.")
self.domain = self._get_domain()
@property
def v1(self):
r"""Variable(s) of first expression (one of 'both', 0, 1)."""
return self._v1
@property
def v2(self):
r"""Variable(s) of second expression (one of 'both', 0, 1)."""
return self._v2
def _get_domain(self):
if self._v1 == 'both':
return self.e1.domain
if self._v2 == 'both':
return self.e2.domain
result = [None] * (max(self._v1, self._v2) + 1)
result[self._v1] = self.e1.domain
result[self._v2] = self.e2.domain
return result
def _expr_str(self):
v1 = 'x' if self._v1 == 'both' else 'x%d' % (self._v1 + 1)
v2 = 'x' if self._v2 == 'both' else 'x%d' % (self._v2 + 1)
return ("e1(%s) * e2(%s), where e1=%s, e2=%s"
% (v1, v2, self.e1.str(), self.e2.str()))
def _evaluator(self, use_mp):
return _ProductExpression2DEval(self, use_mp)
class _ProductExpression2DEval(EvaluatorBase):
r"""Evaluator for ProductExpression2D.
Derivatives are implemented up to second order in both variables
(separately) if supported by the sub-expressions.
"""
def __init__(self, expr, use_mp):
e1 = expr.e1.evaluator(use_mp)
e2 = expr.e2.evaluator(use_mp)
super(_ProductExpression2DEval, self).__init__(expr, use_mp, [e1, e2])
self._v1 = None if expr.v1 == 'both' else expr.v1
self._v2 = None if expr.v2 == 'both' else expr.v2
self.e1 = e1
self.e2 = e2
def prepare_evaluation_at(self, pts, orders=(0,)):
ordersXY = self.orders_for_2d_axes(orders)
if self._v1 is None:
self.e1.prepare_evaluation_at(pts, orders=orders)
else:
pts1d = self.unique_for_axis(self._v1, pts)
self.e1.prepare_evaluation_at(pts1d, orders=ordersXY[self._v1])
if self._v2 is None:
self.e2.prepare_evaluation_at(pts, orders=orders)
else:
pts1d = self.unique_for_axis(self._v2, pts)
self.e2.prepare_evaluation_at(pts1d, orders=ordersXY[self._v2])
def _x_changed(self, x):
pass
def _evaluate_evaluator(self, e, x, v, n):
if v is None:
return e.diff(x, n)
x = x[v]
if n == 0:
return e(x)
nX, nY = self.unpack_2d_diff_order(n)
if v == 0:
return 0. if nY != 0 else e.diff(x, nX)
if v == 1:
return 0. if nX != 0 else e.diff(x, nY)
def _eval(self, n=0):
e1, e2 = self.e1, self.e2
v1, v2 = self._v1, self._v2
x = self._x
e1x = self._evaluate_evaluator(e1, x, v1, n=0)
e2x = self._evaluate_evaluator(e2, x, v2, n=0)
if n == 0:
return e1x * e2x
if n == 1:
dx1 = self._evaluate_evaluator(e1, x, v1, n=1)
dx2 = self._evaluate_evaluator(e2, x, v2, n=1)
return dx1 * e2x + e1x * dx2
if n == 2:
dy1 = self._evaluate_evaluator(e1, x, v1, n=2)
dy2 = self._evaluate_evaluator(e2, x, v2, n=2)
return dy1 * e2x + e1x * dy2
if n == 3:
dx1 = self._evaluate_evaluator(e1, x, v1, n=1)
dx2 = self._evaluate_evaluator(e2, x, v2, n=1)
dxx1 = self._evaluate_evaluator(e1, x, v1, n=3)
dxx2 = self._evaluate_evaluator(e2, x, v2, n=3)
return dxx1 * e2x + 2 * dx1 * dx2 + e1x * dxx2
if n == 4:
dx1 = self._evaluate_evaluator(e1, x, v1, n=1)
dx2 = self._evaluate_evaluator(e2, x, v2, n=1)
dy1 = self._evaluate_evaluator(e1, x, v1, n=2)
dy2 = self._evaluate_evaluator(e2, x, v2, n=2)
dxy1 = self._evaluate_evaluator(e1, x, v1, n=4)
dxy2 = self._evaluate_evaluator(e2, x, v2, n=4)
return dxy1*e2x + dy1*dx2 + dx1*dy2 + e1x*dxy2
if n == 5:
dy1 = self._evaluate_evaluator(e1, x, v1, n=2)
dy2 = self._evaluate_evaluator(e2, x, v2, n=2)
dyy1 = self._evaluate_evaluator(e1, x, v1, n=5)
dyy2 = self._evaluate_evaluator(e2, x, v2, n=5)
return dyy1 * e2x + 2 * dy1 * dy2 + e1x * dyy2
raise NotImplementedError
class DivisionExpression(NumericExpression):
r"""Divide one 1D expression by another.
Represents an expression of the form `f(x) = g(x)/h(x)`.
"""
def __init__(self, expr1, expr2, name='divide'):
r"""Init function.
@param expr1 Numerator expression.
@param expr2 Denominator expression.
"""
super().__init__(e1=expr1, e2=expr2, domain=expr1.domain, name=name)
def _expr_str(self):
return "e1 / e2, where e1 = %s, e2 = %s" % (self.e1.str(), self.e2.str())
def _evaluator(self, use_mp):
f_ev = self.e1.evaluator(use_mp)
g_ev = self.e2.evaluator(use_mp)
def d0func(x):
return f_ev(x) / g_ev(x)
def d1func(x):
g = g_ev(x)
return f_ev.diff(x, 1) / g - f_ev(x)*g_ev.diff(x, 1) / g**2
def d2func(x):
f = f_ev(x)
g = g_ev(x)
df = f_ev.diff(x, 1)
ddf = f_ev.diff(x, 2)
dg = g_ev.diff(x, 1)
ddg = g_ev.diff(x, 2)
return ddf/g - (2*df*dg + f*ddg)/g**2 + 2*f*dg**2/g**3
def d3func(x):
f = f_ev(x)
g = g_ev(x)
df, ddf, d3f = [f_ev.diff(x, n) for n in range(1, 4)]
dg, ddg, d3g = [g_ev.diff(x, n) for n in range(1, 4)]
return (
d3f/g - (3*(ddf*dg+df*ddg) + f*d3g)/g**2
+ 6 * (f*dg*ddg + df*dg**2) / g**3
- 6 * f * dg**3 / g**4
)
def d4func(x):
f = [f_ev.diff(x, n) for n in range(5)]
g = [g_ev.diff(x, n) for n in range(5)]
return (
- 24*f[1]*g[1]**3 / g[0]**4
+ f[4] / g[0]
+ 6*f[0]*g[2]**2 / g[0]**3
+ 24*f[0]*g[1]**4 / g[0]**5
+ g[1]**2 * (12*f[2]/g[0]**3 - 36*f[0]*g[2]/g[0]**4)
+ (-6*f[2]*g[2] - 4*f[1]*g[3] - 4*f[3]*g[1] - f[0]*g[4]) / g[0]**2
+ (24*f[1]*g[2]*g[1] + 8*f[0]*g[1]*g[3]) / g[0]**3
)
return TrivialEvaluator(
expr=self, f=[d0func, d1func, d2func, d3func, d4func],
sub_evaluators=[f_ev, g_ev]
)
class DivisionExpression2D(NumericExpression):
r"""Divide one expression by another.
Represents an expression of the form (e.g.)
\f$ f(\mathbf{x}) = g(x)/h(y) \f$ or
\f$ f(\mathbf{x}) = g(y)/h(\mathbf{x}) \f$, etc.
"""
def __init__(self, expr1, expr2, variables=('both', 'both'),
singularity_handling='raise', eps=None, name='divide'):
r"""Init function.
@param expr1
First expression.
@param expr2
Second expression.
@param variables
tuple/list of two elements each bein either ``'both'``, `0`, or
`1`. The elements correspond to `expr1` and `expr2`, respectively.
``'both'`` means that the expression is a function of two
variables, while `0` or `1` specify the functions to depend only
on `x` or `y`, respectively.
@param singularity_handling
How to deal with the case when `expr2` vanishes at a certain
point. Possible values are:
* `"raise"` (default) raises a `ZeroDivisionError`
* `"zero"` returns `0.0`, which may be useful if you
(analytically) can determine that the respective
limit exists and is zero
* `"one"` returns `1.0` (rarely useful)
* `"+inf"` positive infinity
* `"-inf"` negative infinity
@param eps
Small value below which to treat `expr2` to be zero. By default,
only exactly zero is considered zero.
@param name
Name of the expression (e.g. for print_tree()).
"""
super(DivisionExpression2D, self).__init__(e1=expr1, e2=expr2, name=name)
self._v1, self._v2 = variables
if self._v1 not in ('both', 0, 1) or self._v2 not in ('both', 0, 1):
raise TypeError("Expressions can only depend on element 0, 1 or both.")
if singularity_handling not in ("raise", "zero", "one", "+inf", "-inf"):
raise TypeError("Singularity handling must be one of "
"'raise', 'zero', 'one', '+inf', '-inf'.")
self._sing_handling = singularity_handling
self._eps = eps
self.domain = self._get_domain()
def _get_domain(self):
if self._v1 == 'both':
return self.e1.domain
if self._v2 == 'both':
return self.e2.domain
result = [None] * (max(self._v1, self._v2) + 1)
result[self._v1] = self.e1.domain
result[self._v2] = self.e2.domain
return result
def _expr_str(self):
v1 = 'x' if self._v1 == 'both' else 'x%d' % (self._v1 + 1)
v2 = 'x' if self._v2 == 'both' else 'x%d' % (self._v2 + 1)
sing_handling = self._sing_handling
if self._eps is not None:
sing_handling += " (eps=%r)" % self._eps
return ("e1(%s) / e2(%s), where e1=%s, e2=%s, singularity handling = %s"
% (v1, v2, self.e1.str(), self.e2.str(), sing_handling))
@property
def v1(self):
r"""Variable(s) of first expression (one of 'both', 0, 1)."""
return self._v1
@property
def v2(self):
r"""Variable(s) of second expression (one of 'both', 0, 1)."""
return self._v2
@property
def singularity_handling(self):
r"""Specified singularity handling (see #__init__())."""
return self._sing_handling
@property
def eps(self):
r"""Specified epsilon (see #__init__())."""
return self._eps
def _evaluator(self, use_mp):
return _DivisionExpression2DEval(self, use_mp)
class _DivisionExpression2DEval(EvaluatorBase):
r"""Evaluator for DivisionExpression2D.
Derivatives are implemented up to second order in both variables
(separately) if supported by the sub-expressions.
"""
def __init__(self, expr, use_mp):
e1 = expr.e1.evaluator(use_mp)
e2 = expr.e2.evaluator(use_mp)
super(_DivisionExpression2DEval, self).__init__(expr, use_mp, [e1, e2])
self._v1 = None if expr.v1 == 'both' else expr.v1
self._v2 = None if expr.v2 == 'both' else expr.v2
if expr.singularity_handling == 'zero':
self._sing_handling = mp.zero if use_mp else 0.
elif expr.singularity_handling == 'one':
self._sing_handling = mp.one if use_mp else 1.
elif expr.singularity_handling == '+inf':
self._sing_handling = mp.inf if use_mp else float('+inf')
elif expr.singularity_handling == '-inf':
self._sing_handling = -mp.inf if use_mp else float('-inf')
else:
self._sing_handling = None
self._eps = expr.eps
if self._eps is None:
self._eps = 0.0
self.e1 = e1
self.e2 = e2
def prepare_evaluation_at(self, pts, orders=(0,)):
ordersXY = self.orders_for_2d_axes(orders)
if self._v1 is None:
self.e1.prepare_evaluation_at(pts, orders=orders)
else:
pts1d = self.unique_for_axis(self._v1, pts)
self.e1.prepare_evaluation_at(pts1d, orders=ordersXY[self._v1])
if self._v2 is None:
self.e2.prepare_evaluation_at(pts, orders=orders)
else:
pts1d = self.unique_for_axis(self._v2, pts)
self.e2.prepare_evaluation_at(pts1d, orders=ordersXY[self._v2])
def _x_changed(self, x):
pass
def _evaluate_evaluator(self, e, x, v, n):
if v is None:
return e.diff(x, n)
x = x[v]
if n == 0:
return e(x)
nX, nY = self.unpack_2d_diff_order(n)
if v == 0:
return 0. if nY != 0 else e.diff(x, nX)
if v == 1:
return 0. if nX != 0 else e.diff(x, nY)
def _eval(self, n=0):
e1, e2 = self.e1, self.e2
v1, v2 = self._v1, self._v2
eps = self._eps
x = self._x
e1x = self._evaluate_evaluator(e1, x, v1, n=0)
e2x = self._evaluate_evaluator(e2, x, v2, n=0)
if e2x <= eps:
if self._sing_handling is None:
raise ZeroDivisionError
return self._sing_handling
if n == 0:
return e1x/e2x
if n == 1: # del_x
dx1 = self._evaluate_evaluator(e1, x, v1, n=1)
dx2 = self._evaluate_evaluator(e2, x, v2, n=1)
return dx1/e2x - dx2*e1x/e2x**2
if n == 2: # del_y
dy1 = self._evaluate_evaluator(e1, x, v1, n=2)
dy2 = self._evaluate_evaluator(e2, x, v2, n=2)
return dy1/e2x - dy2*e1x/e2x**2
if n == 3: # del_x del_x
dx1 = self._evaluate_evaluator(e1, x, v1, n=1)
dx2 = self._evaluate_evaluator(e2, x, v2, n=1)
dxx1 = self._evaluate_evaluator(e1, x, v1, n=3)
dxx2 = self._evaluate_evaluator(e2, x, v2, n=3)
return (-2*dx1*dx2/e2x + 2*dx2**2*e1x/e2x**2 + dxx1 - dxx2*e1x/e2x)/e2x
if n == 4: # del_x del_y
dx1 = self._evaluate_evaluator(e1, x, v1, n=1)
dx2 = self._evaluate_evaluator(e2, x, v2, n=1)
dy1 = self._evaluate_evaluator(e1, x, v1, n=2)
dy2 = self._evaluate_evaluator(e2, x, v2, n=2)
dxy1 = self._evaluate_evaluator(e1, x, v1, n=4)
dxy2 = self._evaluate_evaluator(e2, x, v2, n=4)
return (-dx1*dy2/e2x - dx2*dy1/e2x + 2*dx2*dy2*e1x/e2x**2 + dxy1 - dxy2*e1x/e2x)/e2x
if n == 5: # del_y del_y
dy1 = self._evaluate_evaluator(e1, x, v1, n=2)
dy2 = self._evaluate_evaluator(e2, x, v2, n=2)
dyy1 = self._evaluate_evaluator(e1, x, v1, n=5)
dyy2 = self._evaluate_evaluator(e2, x, v2, n=5)
return (-2*dy1*dy2/e2x + 2*dy2**2*e1x/e2x**2 + dyy1 - dyy2*e1x/e2x)/e2x
raise NotImplementedError
class OffsetExpression(NumericExpression):
r"""Sum of an expression and a constant.
Represents an expression of the form \f$ f(x) = g(x) + c \f$.
The value `c` can be set/retrieved using the property `c`.
"""
def __init__(self, expr, c, name='offset'):
r"""Init function.
@param expr Expression to add to.
@param c Constant to add.
@param name Name of the expression (e.g. for print_tree()).
"""
super().__init__(e=expr, domain=expr.domain, verbosity=expr.verbosity,
name=name)
self.c = c
def _expr_str(self):
op = "+" if self.c >= 0 else "-"
return "f(x) %s %r, where f(x)=%s" % (op, abs(self.c), self.e.str())
@property
def nice_name(self):
op = "+" if self.c >= 0 else "-"
return "%s (e %s %r)" % (self.name, op, abs(self.c))
def _evaluator(self, use_mp):
e = self.e.evaluator(use_mp)
if self.c == 0:
return e
c = self.c
def factory(n):
if n > 0:
return lambda x: e.diff(x, n)
return lambda x: e.diff(x, n) + c
return EvaluatorFactory(self, factory, [e])
class SumExpression(NumericExpression):
r"""Sum of two expressions is an optional coefficient for the second term.
Represents an expression of the form \f$ f(x) = g(x) + a h(x) \f$.
The coefficient \f$ a \f$ can be set/retrieved using the `coeff` property.
"""
def __init__(self, expr1, expr2, coeff=1.0, name='add'):
r"""Init function.
@param expr1
First expression.
@param expr2
Second expression.
@param coeff
Coefficient for the second expression. Default is `1.0`.
@param name
Name of the expression (e.g. for print_tree()).
"""
super(SumExpression, self).__init__(e1=expr1, e2=expr2, name=name)
self._coeff = coeff
self.domain = self.e1.domain
@property
def coeff(self):
r"""Coefficient of the second term in the sum."""
return self._coeff
@coeff.setter
def coeff(self, value):
self._coeff = value
def _expr_str(self):
where = "e1=%s, e2=%s" % (self.e1.str(), self.e2.str())
if self._coeff == 1.0:
op = "+"
elif self._coeff == -1.0:
op = "-"
else:
op = "+ c"
where += ", c=%r" % self._coeff
return "e1 %s e2, where %s" % (op, where)
@property
def nice_name(self):
if self._coeff == 1.0:
return "%s (e1 + e2)" % self.name
elif self._coeff == -1.0:
return "%s (e1 - e2)" % self.name
op = "+" if self._coeff >= 0 else "-"
return "%s (e1 %s %r * e2)" % (self.name, op, abs(self._coeff))
def _evaluator(self, use_mp):
e1 = self.e1.evaluator(use_mp)
e2 = self.e2.evaluator(use_mp)
c = self._coeff
if c == 1.0:
def factory(n):
return lambda x: e1.diff(x, n) + e2.diff(x, n)
else:
def factory(n):
return lambda x: e1.diff(x, n) + c * e2.diff(x, n)
return EvaluatorFactory(self, factory, [e1, e2])
class BlendExpression2D(SumExpression):
r"""Blend between two expressions based on a third.
Represents an expression of the form \f[
f(\mathbf{x}) = f_1(\mathbf{x}) (1 - \beta(\mathbf{x}))
+ \beta(\mathbf{x}) f_2(\mathbf{x}).
\f]
"""
def __init__(self, expr1, expr2, blend_expr, variables=('both', 'both', 'both'),
name='blend'):
r"""Init function.
@param expr1
First expression (\f$ f_1 \f$).
@param expr2
Second expression (\f$ f_2 \f$).
@param blend_expr
Blending expression (\f$ \beta \f$).
@param variables
As in ProductExpression2D, but with a third element for the
`blend_expr`.
@param name
Name of the expression (e.g. for print_tree()).
"""
v1, v2, v3 = variables
first = ProductExpression2D(expr1,
SumExpression(1, blend_expr, coeff=-1.0),
variables=(v1, v3))
second = ProductExpression2D(expr2, blend_expr, variables=(v2, v3))
super(BlendExpression2D, self).__init__(expr1=first, expr2=second, name=name)
class EmbedExpression2D(NumericExpression):
r"""Embed a 1D function into 2D along the x- or y-axis.
Represents the expression
\f$ \mathbf{x} \mapsto f(x) \f$ or
\f$ \mathbf{x} \mapsto f(y) \f$.
"""
def __init__(self, expr, axis=0, name='embedding'):
r"""Init function.
@param expr
Expression to embed.
@param axis
`0` to embed along the x-axis, `1` for the y-axis.
@param name
Name of the expression (e.g. for print_tree()).
"""
super(EmbedExpression2D, self).__init__(e=expr, name=name)
self._axis = axis
self.domain = self._get_domain()
def _get_domain(self):
domain = [None, None]
domain[self._axis] = self.e.domain
return domain
def _expr_str(self):
return "f(x%d), where f(x)=%s" % (self._axis, self.e.str())
def is_zero_expression(self):
return self.e.is_zero_expression()
def _evaluator(self, use_mp):
e = self.e.evaluator(use_mp)
axis = self._axis
def factory(n):
nX, nY = self.unpack_2d_diff_order(n)
if (axis == 0 and nY != 0) or (axis == 1 and nX != 0):
return self.zero
n_axis = (nX, nY)[axis]
return lambda x: e.diff(x[axis], n_axis)
return EvaluatorFactory(self, factory, [e])
class SimpleSinExpression(SimpleExpression):
r"""Sine expression with arbitrary derivatives."""
def __init__(self, domain=None, name='sin'):
def mp_factory(n):
return (mp.sin, mp.cos, lambda x: -mp.sin(x), lambda x: -mp.cos(x))[n % 4]
def fp_factory(n):
return (math.sin, math.cos, lambda x: -math.sin(x), lambda x: -math.cos(x))[n % 4]
super(SimpleSinExpression, self).__init__(
mp_terms=mp_factory,
fp_terms=fp_factory,
desc="sin(x)",
domain=domain,
name=name
)
class SimpleCosExpression(SimpleExpression):
r"""Cosine expression with arbitrary derivatives."""
def __init__(self, domain=None, name='cos'):
def mp_factory(n):
return (mp.cos, lambda x: -mp.sin(x), lambda x: -mp.cos(x), mp.sin)[n % 4]
def fp_factory(n):
return (math.cos, lambda x: -math.sin(x), lambda x: -math.cos(x), math.sin)[n % 4]
super(SimpleCosExpression, self).__init__(
mp_terms=mp_factory,
fp_terms=fp_factory,
desc="cos(x)",
domain=domain,
name=name
)
class SinSquaredExpression(SimpleExpression):
r"""Sine squared (`sin(x)**2`) expression with first two derivatives."""
def __init__(self, domain=None, name='sin^2'):
super(SinSquaredExpression, self).__init__(
mp_terms=[lambda x: mp.sin(x)**2,
lambda x: mp.sin(2*x),
lambda x: 2*mp.cos(2*x)],
fp_terms=[lambda x: math.sin(x)**2,
lambda x: math.sin(2*x),
lambda x: 2*math.cos(2*x)],
desc="sin^2(x)",
domain=domain,
name=name
)
class SimpleSinhExpression(SimpleExpression):
r"""Hyperbolic sine (sinh) expression with arbitrary derivatives."""
def __init__(self, domain=None, name='sinh'):
def mp_factory(n):
return (mp.sinh, mp.cosh)[n % 2]
def fp_factory(n):
return (math.sinh, math.cosh)[n % 2]
super().__init__(
mp_terms=mp_factory,
fp_terms=fp_factory,
desc="sinh(x)",
domain=domain,
name=name
)
class SimpleCoshExpression(SimpleExpression):
r"""Hyperbolic cosine (cosh) expression with arbitrary derivatives."""
def __init__(self, domain=None, name='cosh'):
def mp_factory(n):
return (mp.cosh, mp.sinh)[n % 2]
def fp_factory(n):
return (math.cosh, math.sinh)[n % 2]
super().__init__(
mp_terms=mp_factory,
fp_terms=fp_factory,
desc="cosh(x)",
domain=domain,
name=name
)
|
<reponame>paulbeka/bank2Budget
from __future__ import print_function
import ynab
from ynab.rest import ApiException
from pprint import pprint
import json
import itertools
import os
from datetime import datetime
# from urllib.parse import urljoin
# import os
# import sys
# # insert at 1, 0 is the script path (or '' in REPL)
# sys.path.insert(1, urljoin(os.getcwd(), 'config'))
# from config import ynabConfig
import settings
configuration = ynab.Configuration()
configuration.api_key['Authorization'] = settings.YNAB['APIKEY'] # api_key is a token you get in Developer settings
# # Uncomment below to setup prefix (e.g. Bearer) for API key, if needed
configuration.api_key_prefix['Authorization'] = 'Bearer'
api_instance = ynab.TransactionsApi(ynab.ApiClient(configuration))
class Ynab():
def sayHello(self):
print("hello")
def loadJSONfromFile(self, file): # will load JSON from a JSON file
with open(file) as f:
r = json.load(f)
return r
def JSON_OPS_2_YNAB(self, src): # will post json bank transactions to YNAB
# while True:
# stuff()
# if fail_condition:
# break
for op in src: # itertools.islice(src , 0, 3): umber of transactions that should be posted
date = op['effectiveDate']
amount = int(op['amount'] * 1000) # amount is in milliunit of currency
import_id = op['id'] #"YNAB:" + amount + ":" + date + ":" #[occurrence]
approved = True
payee_name = (op['detail'][:97] + '...') if len(op['detail']) > 99 else op['detail'] # limit to 100 characters
now = datetime.now()
current_time = now.strftime("%d-%m-%y @ %H:%M:%S")
transaction = ynab.SaveTransactionWrapper(
{
'account_id': settings.YNAB['BANKACCOUNTID'],
'date': date,
'amount': amount,
'import_id': import_id,
'payee_name': payee_name,
'memo': 'brought by Python script on ' + current_time,
'approved': True # this property is not taken into account!?
}
)
try:
# List transactions
## api_response = api_instance.get_transactions(budget_id) #, since_date = since_date, type = type
# see https://github.com/deanmcgregor/ynab-python/blob/master/docs/SaveTransaction.md for info on transactions
api_response = api_instance.create_transaction(settings.YNAB['BUDGETID'], transaction)
# pprint(api_response)
except ApiException as e:
print("Exception when calling TransactionsApi->get_transactions: %s\n" % e)
break
# a transaction example: {
# account_id str
# date date
# amount float The transaction amount in milliunits format
# payee_id str The payee for the transaction. Transfer payees are not permitted and will be ignored if supplied. [optional]
# payee_name str The payee name. If a payee_name value is provided and payee_id is not included or has a null value, payee_name will be used to create or use an existing payee. [optional]
# category_id str The category for the transaction. Split and Credit Card Payment categories are not permitted and will be ignored if supplied. [optional]
# memo str [optional]
# cleared str The cleared status of the transaction [optional]
# approved bool Whether or not the transaction is approved. If not supplied, transaction will be unapproved by default. [optional]
# flag_color str The transaction flag [optional]
# import_id str If specified for a new transaction, the transaction will be treated as Imported and assigned this import_id. If another transaction on the same account with this same import_id is later attempted to be created, it will be skipped to prevent duplication. Transactions imported through File Based Import or Direct Import and not through the API, are assigned an import_id in the format: 'YNAB:[milliunit_amount]:[iso_date]:[occurrence]'. For example, a transaction dated 2015-12-30 in the amount of -$294.23 USD would have an import_id of 'YNAB:-294230:2015-12-30:1'. If a second transaction on the same account was imported and had the same date and same amount, its import_id would be 'YNAB:-294230:2015-12-30:2'. Using a consistent format will prevent duplicates through Direct Import and File Based Import. If import_id is specified as null, the transaction will be treated as a user entered transaction.
# } |
<filename>comment_reporter/comment_report_nlg_service.py
import logging
import random
from collections import defaultdict
from typing import Dict, Iterable, List, Optional, Tuple
from .resources.general_topic_modeling_resource import GeneralTopicModelingResource
from .resources.sentiment_stats_resource import SentimentStatsResource
from .resources.general_summary_resource import GeneralSummaryResource
from .resources.hate_speech_stats_resource import HateSpeechResource
from .resources.generic_stats_resource import GenericStatsResource
from .constants import CONJUNCTIONS, get_error_message
from .core.aggregator import Aggregator
from .core.document_planner import NoInterestingMessagesException
from .core.models import Template
from .core.morphological_realizer import MorphologicalRealizer
from .core.pipeline import NLGPipeline, NLGPipelineComponent
from .core.realize_slots import SlotRealizer
from .core.registry import Registry
from .core.surface_realizer import BodyHTMLSurfaceRealizer, HeadlineHTMLSurfaceRealizer
from .core.template_reader import read_templates
from .core.template_selector import TemplateSelector
from .comment_report_document_planner import CommentReportBodyDocumentPlanner, CommentReportHeadlineDocumentPlanner
from .comment_report_importance_allocator import CommentReportImportanceSelector
from .comment_report_message_generator import CommentReportMessageGenerator, NoMessagesForSelectionException
from .english_uralicNLP_morphological_realizer import EnglishUralicNLPMorphologicalRealizer
from .finnish_uralicNLP_morphological_realizer import FinnishUralicNLPMorphologicalRealizer
from .resources.processor_resource import ProcessorResource
log = logging.getLogger("root")
class CommentReportNlgService(object):
processor_resources: List[ProcessorResource] = []
# These are (re)initialized every time run_pipeline is called
body_pipeline = None
headline_pipeline = None
def __init__(self, random_seed: int = None) -> None:
"""
:param random_seed: seed for random number generation, for repeatability
"""
# New registry and result importer
self.registry = Registry()
# Per-processor resources
self.processor_resources = [
GenericStatsResource(),
HateSpeechResource(),
GeneralSummaryResource(),
SentimentStatsResource(),
GeneralTopicModelingResource(),
]
# Templates
self.registry.register("templates", self._load_templates())
# Misc language data
self.registry.register("CONJUNCTIONS", CONJUNCTIONS)
# PRNG seed
self._set_seed(seed_val=random_seed)
# Message Parsers
self.registry.register("message-parsers", [])
for processor_resource in self.processor_resources:
self.registry.get("message-parsers").append(processor_resource.generate_messages)
# Slot Realizers Components
self.registry.register("slot-realizers", [])
for processor_resource in self.processor_resources:
components = [component(self.registry) for component in processor_resource.slot_realizer_components()]
self.registry.get("slot-realizers").extend(components)
def _load_templates(self) -> Dict[str, List[Template]]:
log.info("Loading templates")
templates: Dict[str, List[Template]] = defaultdict(list)
for resource in self.processor_resources:
for language, new_templates in read_templates(resource.templates_string())[0].items():
templates[language].extend(new_templates)
return templates
@staticmethod
def _get_components(type: str) -> Iterable[NLGPipelineComponent]:
yield CommentReportMessageGenerator()
yield CommentReportImportanceSelector()
if type == "headline":
yield CommentReportHeadlineDocumentPlanner()
else:
yield CommentReportBodyDocumentPlanner()
yield TemplateSelector()
yield Aggregator()
yield SlotRealizer()
yield MorphologicalRealizer(
{"fi": FinnishUralicNLPMorphologicalRealizer(), "en": EnglishUralicNLPMorphologicalRealizer()}
)
if type == "headline":
yield HeadlineHTMLSurfaceRealizer()
else:
yield BodyHTMLSurfaceRealizer()
def run_pipeline(
self, output_language: str, comments: List[str], comment_language: Optional[str]
) -> Tuple[str, List[str]]:
log.info("Configuring Body NLG Pipeline")
self.body_pipeline = NLGPipeline(self.registry, *self._get_components("body"))
self.headline_pipeline = NLGPipeline(self.registry, *self._get_components("headline"))
if not comment_language:
comment_language = "all"
errors: List[str] = []
log.info("Running Body NLG pipeline: language={}".format(output_language))
try:
body = self.body_pipeline.run(
(comments, comment_language), output_language, prng_seed=self.registry.get("seed")
)
log.info("Body pipeline complete")
except NoMessagesForSelectionException as ex:
log.error("%s", ex)
body = get_error_message(output_language, "no-messages-for-selection")
errors.append("NoMessagesForSelectionException")
except NoInterestingMessagesException as ex:
log.info("%s", ex)
body = get_error_message(output_language, "no-interesting-messages-for-selection")
errors.append("NoInterestingMessagesException")
except Exception as ex:
log.exception("%s", ex)
body = get_error_message(output_language, "general-error")
errors.append("{}: {}".format(ex.__class__.__name__, str(ex)))
return body, errors
def _set_seed(self, seed_val: Optional[int] = None) -> None:
log.info("Selecting seed for NLG pipeline")
if not seed_val:
seed_val = random.randint(1, 10000000)
log.info("No preset seed, using random seed {}".format(seed_val))
else:
log.info("Using preset seed {}".format(seed_val))
self.registry.register("seed", seed_val)
def get_languages(self) -> List[str]:
return list(self.registry.get("templates").keys())
|
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import os
tree = ET.parse('Chapter11_XML02.xml') # 读取文件
root = tree.getroot() # 获取根元素
print(root.tag, root.attrib) # 根元素的标签和属性
print(root[1][2].text) # 通过索引访问特定的元素
for child in root: # 迭代子节点的标签和属性
print(child.tag, child.attrib)
for neighbor in root.iter('neighbor'): # 递归遍历指定元素的标签、属性和值
print(neighbor.tag, neighbor.text, neighbor.attrib)
for country in root.findall('country'): # 找到符合的直接子元素对象
rank = country.find('rank').text # 找到符合的第一个子元素,并访问内容
name = country.get('name') # 访问属性
print(name, rank)
for rank in root.iter('rank'):
new_rank = int(rank.text) + 1
rank.text = str(new_rank) # 改变文本
rank.set('updated', 'yes') # 修改属性
for country in root.findall('country'):
rank = int(country.find('rank').text)
if rank > 50:
root.remove(country) # 删除元素
new_xml = 'Chapter11_XML02-new.xml'
tree.write(new_xml) # 写入更改并构建XML文档
os.remove(new_xml)
# 通过XPath读取XML内容
xml = r"""<?xml version="1.0"?>
<data>
<country name="Liechtenstein">
<rank>1</rank>
<year>2008</year>
<gdppc>141100</gdppc>
<neighbor name="Austria" direction="E"/>
<neighbor name="Switzerland" direction="W"/>
</country>
<country name="Singapore">
<rank>4</rank>
<year>2011</year>
<gdppc>59900</gdppc>
<neighbor name="Malaysia" direction="N"/>
</country>
<country name="Panama">
<rank>68</rank>
<year>2011</year>
<gdppc>13600</gdppc>
<neighbor name="<NAME>" direction="W"/>
<neighbor name="Colombia" direction="E"/>
</country>
</data>
"""
root2 = ET.fromstring(xml) # 读取字符串
print(root2.findall(".")) # 找到根元素
print(root2.findall("./country/neighbor")) # 找到根元素下指定的元素
print(root2.findall(".//neighbor[2]")) # 第二个子元素下的所有neighbor元素
print(root2.findall(".//year/..[@name='Singapore']")) # 找到名为Singapore的元素,并且包含一个year子元素
print(root2.findall(".//*[@name='Singapore']/year")) # 找到Singapore元素下的year元素
print(root2.findall(".//*[@name='Singapore']/year")[0].text) # 访问Singapore元素下的year元素的文本内容
# 生成XML内容
r = ET.Element("root") # 创建根元素
a = ET.SubElement(r, 'sub1', attrib={'name': 'AAA', 'num': "111"}, con="test") # 创建子元素并添加属性
b = ET.SubElement(r, "sub2") # 创建子元素
b.attrib = {"name": "BBB"} # 添加属性
c = ET.SubElement(r, "sub3") # 创建子元素
c.text = "test3" # 添加文本
ET.dump(r) # 在标准输出显示元素的树结构(建议只用于调试)
tree2 = ET.ElementTree(r) # 创建ElementTree对象
new_xml2 = 'Chapter13_StandradLibrary11_XML02-new2.xml'
tree2.write(new_xml2) # 构建XML文档
os.remove(new_xml2)
# ### xml package
# - XML Processing Modules
# - https://docs.python.org/3/library/xml.html
#
# ### 标准库xml.etree.ElementTree模块
# - The ElementTree XML API(实现用于解析和创建XML数据的简单有效的API)
# - https://docs.python.org/3/library/xml.etree.elementtree.html
|
<filename>test/xiaoshizhi.py
# coding: utf-8
'''
小市值择时买卖
配置指定频率的调仓日,在调仓日每日指定时间,计算沪深300指数和中证500指数当前的20日涨
幅,如果2个指数的20日涨幅有一个为正,则进行选股调仓,之后如此循环往复。
止损策略:每日指定时间,计算沪深300指数和中证500指数当前的20日涨幅,如果2个指数涨幅
都为负,则清仓,重置调仓计数,待下次调仓条件满足再操作
版本:v1.2.7
日期:2016.08.13
作者:Morningstar
'''
import tradestat
#from blacklist import *
# blacklist.py
# 建议在研究里建立文件blacklist.py,然后将这段代码拷贝进blacklist.py
# 模拟运行的时候只需要更新研究里的数据即可,这样可在不修改模拟运行代码的情况下
# 修改黑名单
# 配置股票黑名单
# 列出当且极不适宜购买的股票
# 注:1. 黑名单有时效性,回测的时候最好不使用,模拟交易建议使用
# 2. 用一模块或者大数据分析收集这类股票,定时更新
def get_blacklist():
# 黑名单一览表,更新时间 2016.7.10 by 沙米
# 科恒股份、太空板业,一旦2016年继续亏损,直接面临暂停上市风险
blacklist = ["600656.XSHG", "300372.XSHE", "600403.XSHG", "600421.XSHG",
"600733.XSHG", "300399.XSHE", "600145.XSHG", "002679.XSHE",
"000020.XSHE", "002330.XSHE", "300117.XSHE", "300135.XSHE",
"002566.XSHE", "002119.XSHE", "300208.XSHE", "002237.XSHE",
"002608.XSHE", "000691.XSHE", "002694.XSHE", "002715.XSHE",
"002211.XSHE", "000788.XSHE", "300380.XSHE", "300028.XSHE",
"000668.XSHE", "300033.XSHE", "300126.XSHE", "300340.XSHE",
"300344.XSHE", "002473.XSHE"]
return blacklist
def before_trading_start(context):
log.info("---------------------------------------------")
#log.info("==> before trading start @ %s", str(context.current_dt))
pass
def after_trading_end(context):
#log.info("==> after trading end @ %s", str(context.current_dt))
g.trade_stat.report(context)
# 得到当前未完成订单
orders = get_open_orders()
for _order in orders.values():
log.info("canceled uncompleted order: %s" % (_order.order_id))
pass
def initialize(context):
log.info("==> initialize @ %s", str(context.current_dt))
# 设置手续费率
set_commission(PerTrade(buy_cost=0.0003, sell_cost=0.0013, min_cost=5))
# 设置基准指数:沪深300指数 '000300.XSHG'
set_benchmark('000300.XSHG')
# 使用真实价格回测(模拟盘推荐如此,回测请注释)
set_option('use_real_price', True)
g.period = 10 # 调仓频率,单位:日
g.day_count = 0 # 调仓日计数器,单位:日
# 配置选股参数
g.selected_stock_count = 100 # 备选股票数目
g.buy_stock_count = 5 # 买入股票数目
# 配置是否根据市盈率选股
# 此回测如果不按pe选股,收益更高,回撤也稍大,个人取舍
g.select_by_pe = True
if g.select_by_pe:
g.max_pe = 200
g.min_pe = 2
g.filter_gem = True # 配置是否过滤创业板股票
g.filter_blacklist = False # 配置是否过滤黑名单股票,回测建议关闭,模拟运行时开启
# 输出各类参数
log.info("调仓日频率: %d 日" % (g.period))
log.info("备选股票数目: %d" % (g.selected_stock_count))
log.info("购买股票数目: %d" % (g.buy_stock_count))
log.info("是否根据PE选股: %s" % (g.select_by_pe))
if g.select_by_pe:
log.info("最大PE: %s" % (g.max_pe))
log.info("最小PE: %s" % (g.min_pe))
log.info("是否过滤创业板股票: %s" % (g.filter_gem))
log.info("是否过滤黑名单股票: %s" % (g.filter_blacklist))
if g.filter_blacklist:
log.info("当前股票黑名单:%s" % str(get_blacklist()))
# 加载统计模块
g.trade_stat = tradestat.trade_stat()
# 每天下午14:52执行
run_daily(do_handle_data, '14:52')
'''
# 按分钟回测
def handle_data(context, data):
# 获得当前时间
hour = context.current_dt.hour
minute = context.current_dt.minute
# 每天下午14:53调仓
if hour == 14 and minute == 53:
'''
def do_handle_data(context):
log.info("调仓日计数 [%d]" % (g.day_count))
# 回看指数前20天的涨幅
hs300 = '000300.XSHG' # 沪深300指数,表示二,大盘股
zz500 = '000905.XSHG' # 中证500指数,表示八,小盘股
gr_hs300 = get_growth_rate(hs300)
gr_zz500 = get_growth_rate(zz500)
log.info("当前沪深300指数的20日涨幅 [%.2f%%]" % (gr_hs300 * 100))
log.info("当前中证500指数的20日涨幅 [%.2f%%]" % (gr_zz500 * 100))
# 前20日两指数涨幅均小于0,卖出所有持仓股票
#
# 如果跌停没有卖出,则第二天策略执行时继续根据大盘判定该卖则卖,如果第二天继
# 续跌,还是多吃了一个跌
#
# 前20日若有一个指数涨幅大于0,买入靠前的小市值股票
if gr_hs300 <= 0 and gr_zz500 <= 0:
if context.portfolio.positions:
clear_position(context)
g.day_count = 0
else: #if ret_hs300 > 0 or ret_zz500 > 0:
if g.day_count % g.period == 0:
log.info("==> 满足条件进行调仓")
buy_stocks = select_stocks(context)
log.info("选股后可买股票: %s" % (buy_stocks))
adjust_position(context, buy_stocks)
g.day_count += 1
# 获取股票n日以来涨幅,根据当前价计算
# n 默认20日
def get_growth_rate(security, n=20):
lc = get_close_price(security, n)
#c = data[security].close
c = get_close_price(security, 1, '1m')
return (c - lc) / lc
# 获取前n个单位时间当时的收盘价
def get_close_price(security, n, unit='1d'):
return attribute_history(security, n, unit, ('close'), True)['close'][0]
# 自定义下单
# 根据Joinquant文档,当前报单函数都是阻塞执行,报单函数(如order_target_value)返回即表示报单完成
# 报单成功并全部成交,返回True
# 报单失败或者报单成功但被取消,返回False
def order_target_value_(security, value):
if value == 0:
log.debug("Selling out %s" % (security))
else:
log.debug("Order %s to value %f" % (security, value))
# 如果股票停牌,创建报单会失败,order_target_value 返回False
# 如果股票涨跌停,创建报单会成功,order_target_value 返回True,但是报单会取消
order = order_target_value(security, value)
if order != None and order.status == OrderStatus.held:
return True
else:
return False
# 开仓,买入指定价值的证券
def open_position(security, value):
return order_target_value_(security, value)
# 平仓,卖出指定持仓
def close_position(position):
# 平仓成功后立即统计更新盈亏
security = position.security
if order_target_value_(security, 0): # 可能会因停牌失败
g.trade_stat.watch(security, position)
return True
return False
# 清空卖出所有持仓
def clear_position(context):
log.info("==> 清仓,卖出所有股票")
for stock in context.portfolio.positions.keys():
position = context.portfolio.positions[stock]
close_position(position)
# 过滤停牌、ST类股票及其他具有退市标签的股票
def filter_paused_and_st_stock(stock_list):
current_data = get_current_data()
return [stock for stock in stock_list
if not current_data[stock].paused and not current_data[stock].is_st
and 'ST' not in current_data[stock].name and '*' not in
current_data[stock].name and '退' not in current_data[stock].name]
# 过滤涨停的股票
def filter_limitup_stock(context, stock_list):
last_prices = history(
1, unit='1m', field='close', security_list=stock_list)
current_data = get_current_data()
# 已存在于持仓的股票即使涨停也不过滤,避免此股票再次可买,但因被过滤而导致选择别的股票
return [stock for stock in stock_list
if stock in context.portfolio.positions.keys() or last_prices[
stock][-1] < current_data[stock].high_limit]
# 过滤跌停的股票
def filter_limitdown_stock(context, stock_list):
last_prices = history(
1, unit='1m', field='close', security_list=stock_list)
current_data = get_current_data()
return [stock for stock in stock_list
if stock in context.portfolio.positions.keys() or last_prices[
stock][-1] > current_data[stock].low_limit]
#return [stock for stock in stock_list if last_prices[stock][-1] > current_data[stock].low_limit]
# 过滤黑名单股票
def filter_blacklist_stock(context, stock_list):
blacklist = get_blacklist()
return [stock for stock in stock_list if stock not in blacklist]
# 过滤创业版股票
def filter_gem_stock(context, stock_list):
return [stock for stock in stock_list if stock[0:3] != '300']
# 过滤20日增长率为负的股票
def filter_by_growth_rate(stock_list, n):
return [stock for stock in stock_list if get_growth_rate(stock, n) > 0]
# 选股
# 选取指定数目的小市值股票,再进行过滤,最终挑选指定可买数目的股票
def select_stocks(context):
q = None
if g.select_by_pe:
q = query(valuation.code).filter(
valuation.pe_ratio > g.min_pe,
valuation.pe_ratio < g.max_pe).order_by(valuation.market_cap.asc(
)).limit(g.selected_stock_count)
else:
q = query(valuation.code).order_by(valuation.market_cap.asc()).limit(
g.selected_stock_count)
df = get_fundamentals(q)
stock_list = list(df['code'])
if g.filter_gem:
stock_list = filter_gem_stock(context, stock_list)
if g.filter_blacklist:
stock_list = filter_blacklist_stock(context, stock_list)
stock_list = filter_paused_and_st_stock(stock_list)
stock_list = filter_limitup_stock(context, stock_list)
stock_list = filter_limitdown_stock(context, stock_list)
# 根据20日股票涨幅过滤效果不好,故注释
#stock_list = filter_by_growth_rate(stock_list, 15)
# 选取指定可买数目的股票
stock_list = stock_list[:g.buy_stock_count]
return stock_list
# 根据待买股票创建或调整仓位
# 对于因停牌等原因没有卖出的股票则继续持有
# 始终保持持仓数目为g.buy_stock_count
def adjust_position(context, buy_stocks):
for stock in context.portfolio.positions.keys():
if stock not in buy_stocks:
log.info("stock [%s] in position is not buyable" % (stock))
position = context.portfolio.positions[stock]
close_position(position)
else:
log.info("stock [%s] is already in position" % (stock))
# 根据股票数量分仓
# 此处只根据可用金额平均分配购买,不能保证每个仓位平均分配
position_count = len(context.portfolio.positions)
if g.buy_stock_count > position_count:
value = context.portfolio.cash / (g.buy_stock_count - position_count)
for stock in buy_stocks:
if context.portfolio.positions[stock].total_amount == 0:
if open_position(stock, value):
if len(context.portfolio.positions) == g.buy_stock_count:
break
|
<filename>app.py
# import necessary modules
import streamlit as st
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stat
import glob as glob
import os
import time
import altair as alt
# import local .py scripts with function definitions/declarations
import Compare_IPA as ipa
def folder_selector(folder_path='./', key=0):
folder_names = os.listdir(folder_path)
#x = 5
#for index, folder in enumerate(folder_names):
#if folder == "transcriptions":
#x = index
selected_filename = st.selectbox('Select folder:', folder_names, key = key)#, format_func=lambda x: folder_names[x])
return selected_filename
def is_this_correct(key=0):
yes_or_no = ['Yes', 'No']
correct = st.selectbox('Is this correct?', yes_or_no, key = key)
if correct == 'Yes':
boolean = True
else:
boolean = False
return(boolean)
#st.write("Please select the folder with your transcriptions:")
#transcription_folder = folder_selector(key = 1)
#st.write('Your transcriptions are in /`%s`' % transcription_folder)
#st.write("Please select the folder with your descriptive transcriptions:")
#descriptive_transcript_folder = folder_selector(transcription_folder, key =2)
#st.write('Your descriptive transcriptions are in /`%s`' % transcription_folder, '/`%s`' % descriptive_transcript_folder)
#st.write("Please select the folder with your prescriptive transcription:")
#prescriptive_transcript_folder = folder_selector(transcription_folder, key =3)
#st.write('Your descriptive transcriptions are in /`%s`' % transcription_folder, '/`%s`' % prescriptive_transcript_folder)
transcription_folder = 'transcriptions'
descriptive_transcript_folder = 'descriptive'
prescriptive_transcript_folder = 'prescriptive'
st.write('By default your prescriptive transcription is in `%s`' % transcription_folder, '/`%s`' % prescriptive_transcript_folder, '/')
st.write('By default your descriptive transcriptions are in `%s`' % transcription_folder, '/`%s`' % descriptive_transcript_folder, '/')
#correct = is_this_correct(key = 1)
#if correct:
desc_folder_path = transcription_folder + "/" + descriptive_transcript_folder + "/"
presc_folder_path = transcription_folder + "/" + prescriptive_transcript_folder + "/"
#else:
#st.write("Please alter your information.")
# st.write()
desc_transcript_files = glob.glob(desc_folder_path + '*.txt') # take in all desc filepaths
presc_transcript_file = glob.glob(presc_folder_path + '*.txt') # take in presc transc file
desc_dictionaries = ipa.bring_in_data(desc_transcript_files)
presc_dictionary = ipa.bring_in_data(presc_transcript_file)
for index,dictionary in enumerate(desc_dictionaries):
temp_desc_transcript = desc_dictionaries[index]["clean_transcript"]
presc_transcript = presc_dictionary[0]["clean_transcript"]
temp_df = ipa.string_list_phoneme_compare(temp_desc_transcript,
presc_transcript)
desc_dictionaries[index]['DF'] = temp_df
survey_data = pd.read_csv("survey_data.csv") # import the data from the survey
survey_dicts = survey_data.to_dict('record') # turn each row into a respective dictionary entry
for dct_index,dct in enumerate(desc_dictionaries):
partic = dct['file_name']
for new_dct_index,new_dct in enumerate(survey_dicts): # the double loop allows this to
# funciton even though the range does not match between the results and the survey
# dictionaries (some participants never finished their participation)
if new_dct['partic_number'] == partic:
desc_dictionaries[dct_index] = {**desc_dictionaries[dct_index], **survey_dicts[new_dct_index]}
partic_names = []
study_data = []
years_instruct = []
order = []
word_data = pd.read_csv("dictionary.csv")
for index, dictionary in enumerate(desc_dictionaries):
df = dictionary['DF']
partic_name = dictionary['partic_number']
years = dictionary['years_formal_instruct']
order.append(index)
partic_names.append(partic_name)
study_data.append(df)
years_instruct.append(years)
st.write("### Prescriptive Transcript:")
st.markdown("> " + presc_dictionary[0]['raw_transcript'])
data_explore_keys = ["Nothing", "View participant data", "View descriptive statistics"]
data_explore = st.selectbox("What information would you like to look at?", data_explore_keys)
if data_explore == data_explore_keys[0]:
st.write("Please select an option to view data!")
elif data_explore == data_explore_keys[1]: # view particpant data
partic_keys = []
for i, dictionary in enumerate(desc_dictionaries):
temp_key = dictionary['partic_number']
partic_keys.append(temp_key)
show_dictionary = st.selectbox("Select a participant's dictionary:", partic_keys)
for i, dictionary in enumerate(desc_dictionaries):
if dictionary['partic_number'] == show_dictionary:
st.write("## Participant MetaData: ")
st.write("### Participant Name: ")
st.write(dictionary['partic_number'])
st.write("### File Name & Path: ")
st.write(dictionary['full_path'])
st.write("## Summary of Pronunciation")
st.write("### Raw Transcript: ")
st.markdown("> " + dictionary['raw_transcript'])
st.write("### Participants Summary: ")
st.write("#### (How this participant matched up against the prescriptive IPA transcript)")
st.write("")
st.write(dictionary['DF'])
st.write("## Survey Results")
# questions in the survey (change method of import)
questions=["What is your name?",
"What is your age?",
"How would you self-identify in terms of your Spanish language ability?",
"Have you ever traveled to a Spanish-speaking country, and, if so, did you communicate in Spanish while in that Spanish-speaking country?",
"Have you ever traveled to a Spanish-speaking country on an education-focused travel-abroad program?",
"Did you have any significant exposure to Spanish before the age of 10? (significant could mean: a family member spoke to you; you lived in a Spanish-speaking country; you took a Spanish class; etc.)",
"Have you ever received formal Spanish language instruction?",
"If you have you received formal Spanish language instruction, approximately how many years? (Assume each college semester = 1 year; each High School course = 1 year) If not, please enter '0'.",
"If you have you ever received formal Spanish language instruction, have you every been explicitly taught Spanish pronunciation?",
"How often do you have exposure to Spanish (outside of a classroom setting)?",
"How often do you speak in Spanish (outside of a classroom setting)?",
"When was the last time you spoke Spanish (outside of a classroom setting)?",
"Are you currently trying to learn Spanish?"]
question_keys = []
question_answers = []
for i, key in enumerate(list(survey_data.keys())):
question_keys.append(key)
question_answers.append(dictionary[key])
# st.selectbox("Please select the question: ", questions)
partic_survey_results = pd.DataFrame(zip(question_keys, questions, question_answers), columns = ['question_key', 'questions', 'answer'])
st.table(partic_survey_results)
st.write("## Total Results")
st.write("### Filter results by:")
st.write("**Phoneme criteria:**")
dict = dictionary['DF']
trick_ls = [dict]
allophone0_bool = st.checkbox('/a/')
if allophone0_bool:
allophone0 = 'a'
else:
allophone0 = ''
allophone1_bool = st.checkbox('/e/')
if allophone1_bool:
allophone1 = 'e'
else:
allophone1 = ''
allophone2_bool = st.checkbox('/i/')
if allophone2_bool:
allophone2 = 'i'
else:
allophone2 = ''
allophone3_bool = st.checkbox('/o/')
if allophone3_bool:
allophone3 = 'o'
else:
allophone3 = ''
allophone4_bool = st.checkbox('/u/')
if allophone4_bool:
allophone4 = 'u'
else:
allophone4 = ''
filtered_by_allophone = ipa.filter_by_allophone(trick_ls, allophone0 = allophone0, allophone1 = allophone1,
allophone2 = allophone2, allophone3 = allophone3, allophone4 = allophone4)
st.write("**Dictionary criteria:**")
cognate_bool = st.checkbox('Cognates')
if cognate_bool:
column_criteria0 = 'cognate'
equivelancy_criteria0 = '1'
else:
column_criteria0 = ''
equivelancy_criteria0 = ''
noncognate_bool = st.checkbox('Non-cognates')
if noncognate_bool:
column_criteria1 = ''
equivelancy_criteria1 = '0'
else:
column_criteria1 = ''
equivelancy_criteria1 = ''
term_vowel_bool = st.checkbox('Terminal vowels')
if term_vowel_bool:
column_criteria2 = 'term_vowel'
equivelancy_criteria2 = '1'
else:
column_criteria2 = ''
equivelancy_criteria2 = ''
non_term_vowel_bool = st.checkbox("Non-terminal vowels")
if non_term_vowel_bool:
column_criteria3 = 'term_vowel'
equivelancy_criteria3 = '0'
else:
column_criteria3 = ''
equivelancy_criteria3 = ''
init_vowel_bool = st.checkbox("Vowel initial")
if init_vowel_bool:
column_criteria4 = 'term_vowel'
equivelancy_criteria4 = '1'
else:
column_criteria4 = ''
equivelancy_criteria4 = ''
non_init_vowel_bool = st.checkbox("Non-vowel initial")
if init_vowel_bool:
column_criteria5 = 'term_vowel'
equivelancy_criteria5 = '0'
else:
column_criteria5 = ''
equivelancy_criteria5 = ''
filtered_by_dict = ipa.filter_by_dictionary_mult_criteria(word_data, filtered_by_allophone, column_criteria0 = column_criteria0, equivelancy_criteria0 = equivelancy_criteria0, column_criteria1 = column_criteria1, equivelancy_criteria1 = equivelancy_criteria1, column_criteria2 = column_criteria2, equivelancy_criteria2 = equivelancy_criteria2, column_criteria3 = column_criteria3, equivelancy_criteria3 = equivelancy_criteria3, column_criteria4 = column_criteria4, equivelancy_criteria4 = equivelancy_criteria4, column_criteria5 = column_criteria5, equivelancy_criteria5 = equivelancy_criteria5)
st.write(filtered_by_dict[0])
st.write("[note] if you want to view data filtering for *only* phoneme criteria or *only* dictionary criteria, select all boxes in the opposite criteria.")
elif data_explore == data_explore_keys[2]:
descriptive_stats_options = ["Nothing","The wordlist", "Survey results", "Pronunciation outcomes"]
descriptive_stats_choice = st.selectbox("Chose what stats you would like to explore", descriptive_stats_options)
if descriptive_stats_choice == descriptive_stats_options[1]:
st.write(word_data.keys())
words = word_data['word']
word_size = len(words)
def filter_dict(dictionary_df, column_criteria, equivelancy_criteria):
row_selects = dictionary_df[dictionary_df[column_criteria] == equivelancy_criteria]
words = row_selects['word']
return(words)
terminal_vowels = filter_dict(word_data, 'term_vowel', 1)
terminal_vowel_prop = len(terminal_vowels) / word_size
init_vowels = filter_dict(word_data, 'init_vowel', 1)
init_vowels_prop = len(init_vowels) / word_size
cognates = filter_dict(word_data, 'cognate', 1)
ls_types = ['initial letter', 'terminal letter', 'cognate status', 'initial letter', 'terminal letter', 'cognate status']
alt.Chart(word_data).mark_text(filled=True).encode(
alt.X('term_vowel:O', axis=None),
alt.Y('animal:O', axis=None),
alt.Row('country:N', header=alt.Header(title='')),
alt.SizeValue(60),
text='emoji'
).properties(width=800, height=200)
# https://vega.github.io/vega-lite/examples/isotype_bar_chart_emoji.html
elif descriptive_stats_choice == descriptive_stats_options[3]:
# write information on total dataset
total_accuracy = ipa.get_proportions(study_data)
total_accuracy_mean = np.mean(total_accuracy)
total_accuracy_std = np.std(total_accuracy)
st.write("### All participants")
st.write("Across all of the sample, the particpants scored an average of " + str(round((total_accuracy_mean * 100), 2)) + "% vowel pronunciation accuracy with a standard deviation of: " + str(round((total_accuracy_std * 100), 2)) + "%")
# time.sleep()
# write information on cognates
non_cognate_dfs = ipa.filter_by_dictionary(word_data, "cognate", 0, study_data)
non_cognate_accuracy = ipa.get_proportions(non_cognate_dfs)
non_cognate_accuracy_mean = np.mean(non_cognate_accuracy)
non_cognate_accuracy_std = np.std(non_cognate_accuracy)
st.write("For non-cognates words, the particpants scored an average of " + str(round((non_cognate_accuracy_mean * 100), 2)) + "% vowel pronunciation accuracy with a standard deviation of: " + str(round((non_cognate_accuracy_std * 100), 2)) + "%")
# calculate mean and std for cognate pronunciation accuracy across the *entrie* sample
cognate_dfs = ipa.filter_by_dictionary(word_data, "cognate", 1, study_data) # create a list of dfs that only accounts for the cognates in the study
cognate_accuracy = ipa.get_proportions(cognate_dfs)
cognate_accuracy_mean = np.mean(cognate_accuracy)
cognate_accuracy_std = np.std(cognate_accuracy)
st.write("For cognates, the particpants scored an average of " + str(round((cognate_accuracy_mean * 100), 2)) + "% vowel pronunciation accuracy with a standard deviation of: " + str(round((cognate_accuracy_std * 100), 2)) + "%")
stat = stat.ttest_ind(cognate_accuracy, non_cognate_accuracy, equal_var=False)
pvalue = stat[1]
st.write("According to a two sample t-test (p < 0.05, equal varience = False):")
if pvalue >= 0.05:
st.markdown("> We fail to reject the null hypothesis. There is not enough evidence (p = " + str(pvalue) + ") to support a statistically significant difference of the average pronunciation accuarcy between cognates and non-cognaates in the sample.")
else:
st.markdown("> We choose to reject the null hypothesis. There is enough evidence (p = " + str(pvalue) + ") to support a statistically significant difference of the average pronunciation accuarcy between cognates and non-cognaates in the sample.")
st.write("## Total Results")
st.write("**Phoneme criteria:**")
dict = dictionary['DF']
trick_ls = [dict]
allophone0_bool = st.checkbox('/a/')
if allophone0_bool:
allophone0 = 'a'
else:
allophone0 = ''
allophone1_bool = st.checkbox('/e/')
if allophone1_bool:
allophone1 = 'e'
else:
allophone1 = ''
allophone2_bool = st.checkbox('/i/')
if allophone2_bool:
allophone2 = 'i'
else:
allophone2 = ''
allophone3_bool = st.checkbox('/o/')
if allophone3_bool:
allophone3 = 'o'
else:
allophone3 = ''
allophone4_bool = st.checkbox('/u/')
if allophone4_bool:
allophone4 = 'u'
else:
allophone4 = ''
filtered_by_allophone = ipa.filter_by_allophone(trick_ls, allophone0 = allophone0, allophone1 = allophone1,
allophone2 = allophone2, allophone3 = allophone3, allophone4 = allophone4)
st.write("**Dictionary criteria:**")
cognate_bool = st.checkbox('Cognates')
if cognate_bool:
column_criteria0 = 'cognate'
equivelancy_criteria0 = '1'
else:
column_criteria0 = ''
equivelancy_criteria0 = ''
noncognate_bool = st.checkbox('Non-cognates')
if noncognate_bool:
column_criteria1 = ''
equivelancy_criteria1 = '0'
else:
column_criteria1 = ''
equivelancy_criteria1 = ''
term_vowel_bool = st.checkbox('Terminal vowels')
if term_vowel_bool:
column_criteria2 = 'term_vowel'
equivelancy_criteria2 = '1'
else:
column_criteria2 = ''
equivelancy_criteria2 = ''
non_term_vowel_bool = st.checkbox("Non-terminal vowels")
if non_term_vowel_bool:
column_criteria3 = 'term_vowel'
equivelancy_criteria3 = '0'
else:
column_criteria3 = ''
equivelancy_criteria3 = ''
init_vowel_bool = st.checkbox("Vowel initial")
if init_vowel_bool:
column_criteria4 = 'term_vowel'
equivelancy_criteria4 = '1'
else:
column_criteria4 = ''
equivelancy_criteria4 = ''
non_init_vowel_bool = st.checkbox("Non-vowel initial")
if init_vowel_bool:
column_criteria5 = 'term_vowel'
equivelancy_criteria5 = '0'
else:
column_criteria5 = ''
equivelancy_criteria5 = ''
filtered_by_dict = ipa.filter_by_dictionary_mult_criteria(word_data, filtered_by_allophone, column_criteria0 = column_criteria0, equivelancy_criteria0 = equivelancy_criteria0, column_criteria1 = column_criteria1, equivelancy_criteria1 = equivelancy_criteria1, column_criteria2 = column_criteria2, equivelancy_criteria2 = equivelancy_criteria2, column_criteria3 = column_criteria3, equivelancy_criteria3 = equivelancy_criteria3, column_criteria4 = column_criteria4, equivelancy_criteria4 = equivelancy_criteria4, column_criteria5 = column_criteria5, equivelancy_criteria5 = equivelancy_criteria5)
st.write(filtered_by_dict[0])
st.write("[note] if you want to view data filtering for *only* phoneme criteria or *only* dictionary criteria, select all boxes in the opposite criteria.")
#st.write("Files in " + desc_folder_path + ": ")
#for i,e in enumerate(desc_transcript_files):
# st.write(e)
#st.write("Files in " + presc_folder_path + ": ")
#st.write(presc_transcript_file)
|
import unittest
import mock
import redisobj
class TestRedisDB(unittest.TestCase):
@mock.patch("redis.StrictRedis")
def setUp(self, redis_conn):
self.rdb = redisobj.RedisDB()
self.mock_db = redis_conn.return_value
def test_repr(self):
self.assertEquals(str(self.rdb), "<RedisDB host:'localhost' port:'6379' db:'0' >")
def test_contains(self):
# mock redis' exists method
mock_exists_results = [True, False]
self.mock_db.exists.side_effect = lambda k: mock_exists_results.pop(0)
# test in-operator uses redis' exists method
self.assertTrue("test_key1" in self.rdb)
self.assertFalse("test_key2" in self.rdb)
def test_get_item(self):
# mock redis' exists & get method
self.mock_db.exists.return_value = True
# test __getitem__ strings
self.mock_db.type.return_value = "string"
mock_get_results = ["1", "2"]
self.mock_db.get.side_effect = lambda k: mock_get_results.pop(0)
for k, v in (("test_key1", "1"), ("test_key2", "2")):
self.assertEqual(self.rdb[k], v)
self.assertTrue(('get', (k,), {}) in self.mock_db.method_calls)
# test __getitem__ lists
self.mock_db.type.return_value = "list"
mock_lrange_results = [['a','b','c'], ['1','2','3']]
self.mock_db.lrange.side_effect = lambda k,i,j: mock_lrange_results.pop(0)
for k, v in (("test_list1", ['a','b','c']), ("test_list2", ['1','2','3'])):
self.assertEqual(self.rdb[k], v)
self.assertTrue(('lrange', (k,0,-1), {}) in self.mock_db.method_calls)
# test __getitem__ sets
self.mock_db.type.return_value = "set"
mock_smembers_results = [set(['a','b','c']), set(['1','2','3'])]
self.mock_db.smembers.side_effect = lambda k: mock_smembers_results.pop(0)
for k, v in (("test_set1", set(['a','b','c'])), ("test_set2", set(['1','2','3']))):
self.assertEqual(self.rdb[k], v)
self.assertTrue(('smembers', (k,), {}) in self.mock_db.method_calls)
# test __getitem__ hash
self.mock_db.type.return_value = "hash"
mock_hgetall_results = [{'a':'b','c':'d'}, {'a':'1','2':'3'}]
self.mock_db.hgetall.side_effect = lambda k: mock_hgetall_results.pop(0)
for k, v in (("test_hash1", {'a':'b','c':'d'}), ("test_hash2", {'a':'1','2':'3'})):
self.assertEqual(self.rdb[k], v)
self.assertTrue(('hgetall', (k,), {}) in self.mock_db.method_calls)
def test_set_item(self):
# test __setitem__ strings
for k, v in (("test_key1", "3"), ("test_key2", "4")):
self.rdb[k] = v
self.assertTrue(('set', (k,v), {}) in self.mock_db.method_calls)
# test __setitem__ lists
self.mock_db.get.return_value = True
self.mock_db.delete.return_value = True
self.mock_db.type.return_value = "list"
for k, v in (("test_list1", ['d','e','f']), ("test_list2", [4,5,6])):
self.rdb[k] = v
for i in v:
self.assertTrue(('rpush', (k,i), {}) in self.mock_db.method_calls)
# test __setitem__ sets
for k, v in (("test_set1", set(['d','e','f'])), ("test_set2", set(['4','5','6']))):
self.rdb[k] = v
for i in v:
self.assertTrue(('sadd', (k,i), {}) in self.mock_db.method_calls)
# test __setitem__ hash
for k, v in (("test_hash1", {'e':'f','g':'h'}), ("test_hash2", {'b':2,'3':4})):
self.rdb[k] = v
self.assertTrue(('hmset', (k,v), {}) in self.mock_db.method_calls)
def test_del_item(self):
# test __delitem__
self.mock_db.get.return_value = True
self.mock_db.delete.return_value = True
self.mock_db.exists.return_value = True
self.mock_db.type.return_value = "string"
del self.rdb["key"]
self.assertTrue(('delete', ("key",), {}) in self.mock_db.method_calls)
def test_keys(self):
# test key-retrieval
self.mock_db.keys.return_value = ["key1", "key2"]
self.assertEquals(self.rdb.keys(), ["key1", "key2"])
self.assertTrue(('keys', (), {}) in self.mock_db.method_calls)
def test_clear(self):
# test clear
self.rdb.clear()
self.assertTrue(('flushdb', (), {}) in self.mock_db.method_calls)
def test_keyerrors(self):
self.mock_db.exists.return_value = False
self.assertRaises(KeyError, lambda key: self.rdb[key], 'invalid_key')
def test_invalid_data_types(self):
# helper functions
def set_key(key, value): self.rdb[key] = value
def get_key(key): return self.rdb[key]
def del_key(key): del self.rdb[key]
# invalid value data type
self.assertRaises(redisobj.InvalidDataType, set_key, "valid_key", 1.5)
# invalid key data type
self.assertRaises(redisobj.InvalidDataType, set_key, 1, "test")
self.assertRaises(redisobj.InvalidDataType, get_key, 1)
self.assertRaises(redisobj.InvalidDataType, del_key, 1)
# invalid lists data type
self.assertRaises(redisobj.InvalidDataType, set_key, "valid_key", [1,2,[]])
self.assertRaises(redisobj.InvalidDataType, set_key, "valid_key", [1,2,set([])])
self.assertRaises(redisobj.InvalidDataType, set_key, "valid_key", [1,2,{}])
# invalid sets data type
self.assertRaises(redisobj.InvalidDataType, set_key, "valid_key", set([1,2,1.5]))
# invalid hash data type
self.assertRaises(redisobj.InvalidDataType, set_key, "valid_key", {1:'a'})
self.assertRaises(redisobj.InvalidDataType, set_key, "valid_key", {1:['a']})
self.assertRaises(redisobj.InvalidDataType, set_key, "valid_key", {1:{'a':2}})
|
from __future__ import unicode_literals
from mopidy import httpclient, models
from mopidy_jellyfin.utils import cache
import mopidy_jellyfin
from .http import JellyfinHttpClient
from unidecode import unidecode
import os
import logging
from collections import OrderedDict, defaultdict
import sys
if sys.version.startswith('3'):
from urllib.parse import (
parse_qs,
quote,
urlencode,
urljoin,
urlsplit,
urlunsplit
)
else:
from urllib import urlencode
from urllib2 import quote
from urlparse import parse_qs, urljoin, urlsplit, urlunsplit
logger = logging.getLogger(__name__)
class JellyfinHandler(object):
def __init__(self, config):
self.config = config
proxy = config.get('proxy')
jellyfin = config.get('jellyfin')
self.hostname = jellyfin.get('hostname')
self.username = jellyfin.get('username')
self.password = jellyfin.get('password', '')
self.libraries = jellyfin.get('libraries')
# If no libraries are provided, default to 'Music'
if not self.libraries:
self.libraries = 'Music'
self.albumartistsort = jellyfin.get('albumartistsort')
# If not overridden, default to using Album Artist sort method
# This _really_ shouldn't be necessary, but it is for reasons
if self.albumartistsort not in ['False', 'false']:
self.albumartistsort = True
else:
self.albumartistsort = False
max_bitrate = jellyfin.get('max_bitrate')
if max_bitrate:
self.max_bitrate = str(max_bitrate * 1024)
else:
self.max_bitrate = '140000000'
self.watched_status = jellyfin.get('watched_status')
cert = None
client_cert = jellyfin.get('client_cert', None)
client_key = jellyfin.get('client_key', None)
if client_cert is not None and client_key is not None:
cert = (client_cert, client_key)
self.album_format = jellyfin.get('album_format', False)
if not self.album_format:
self.album_format = '{Name}'
# create authentication headers
self.auth_data = self._auth_payload()
headers = self._create_headers()
self.http = JellyfinHttpClient(headers, cert, proxy)
response_url = self.http.check_redirect(self.hostname)
if self.hostname != response_url:
self.hostname = response_url
self._login()
def _save_token(self, token):
# Save the authentication token where the frontend can also access it
cache_dir = mopidy_jellyfin.Extension.get_cache_dir(self.config)
token_file = os.path.join(cache_dir, 'token')
with open(token_file, 'w') as f:
f.write(token)
def _login(self):
"""Return token for a user.
"""
url = self.api_url('/Users/AuthenticateByName')
auth_details = self.http.post(
url, self.auth_data)
token = auth_details.get('AccessToken')
if token:
self.user_id = auth_details.get('User').get('Id')
headers = {'x-mediabrowser-token': token}
self.http.session.headers.update(headers)
self._save_token(token)
self.token = token
else:
logger.error('Unable to login to Jellyfin')
def _auth_payload(self):
"""Returns a dict with username and password.
"""
return {
'username': self.username,
'Pw': self.password
}
def _create_headers(self, token=None):
"""Return header dict that is needed to talk to the Jellyfin API.
"""
headers = {}
authorization = (
'MediaBrowser , '
'Client="Mopidy", '
'Device="{device}", '
'DeviceId="{device_id}", '
'Version="{version}"'
).format(
device=mopidy_jellyfin.Extension.device_name,
device_id=mopidy_jellyfin.Extension.device_id,
version=mopidy_jellyfin.__version__
)
headers['x-emby-authorization'] = authorization
if token:
headers['x-mediabrowser-token'] = self.token
return headers
def api_url(self, endpoint, url_params={}):
"""Returns a joined url.
Takes host, and endpoint and generates a valid jellyfin API url.
"""
scheme, netloc, path, query_string, fragment = urlsplit(self.hostname)
query_params = parse_qs(query_string)
path = path + endpoint
query_params['format'] = 'json'
query_params.update(url_params)
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_music_root(self):
url = self.api_url(
'/Users/{}/Views'.format(self.user_id)
)
data = self.http.get(url)
media_folders = [
{'Name': library.get('Name'),
'Id': library.get('Id'),
'CollectionType': library.get('CollectionType')}
for library in data.get('Items')
if library.get('CollectionType') in ['books', 'music']
]
if media_folders:
logging.debug('Jellyfin: Found libraries')
return media_folders
else:
logging.debug(
'Jellyfin: All directories found: {}'.format(
[i.get('CollectionType')
for i in data.get('Items')
if 'CollectionType' in i.items()]
)
)
raise Exception('Jellyfin: Cant find music root directory')
def get_library_roots(self):
libraries = self.get_music_root()
return [
models.Ref.directory(
uri='jellyfin:directory:{}'.format(i.get('Id')),
name=i.get('Name')
) for i in libraries if i
]
def get_playlists(self):
url = self.api_url(
'/Users/{}/Views'.format(self.user_id)
)
data = self.http.get(url)
library_id = [
library.get('Id') for library in data.get('Items')
if library.get('Name') == 'Playlists'
]
if library_id:
library_id = library_id[0]
else:
return []
raw_playlists = self.get_directory(library_id)
return raw_playlists.get('Items')
def get_playlist_contents(self, playlist_id):
url_params = { 'UserId': self.user_id }
url = self.api_url('/Playlists/{}/Items'.format(playlist_id), url_params)
data = self.http.get(url).get('Items', [])
return data
def create_playlist(self, name):
url = self.api_url('/Playlists')
payload = {
'Name': name,
'UserId': self.user_id,
'MediaType': 'Audio'
}
return self.http.post(url, payload)
def delete_playlist(self, playlist_id):
url_params = { 'UserId': self.user_id }
url = self.api_url('/Items/{}'.format(playlist_id), url_params)
result = self.http.delete(url)
return result.ok
def update_playlist(self, playlist_id, new_ids):
curr_tracks = self.get_playlist_contents(playlist_id)
curr_length = len(curr_tracks)
new_length = len(new_ids)
if curr_length == new_length:
# If the playlist is the same length, assume a track has moved
self.move_playlist_items(playlist_id, curr_tracks, new_ids)
elif curr_length > new_length:
# If the new playlist is shorter than the old, delete tracks
self.delete_from_playlist(playlist_id, curr_tracks, new_ids)
elif curr_length < new_length:
# If the new playlist is longer than the old, add new tracks
self.add_to_playlist(playlist_id, curr_tracks, new_ids)
def move_playlist_items(self, playlist_id, curr_tracks, new_ids):
# Loop through the current and new list, finding the moved track
for index,item in enumerate(curr_tracks, 0):
if item['Id'] != new_ids[index]:
new_index = new_ids.index(item['Id'])
# If an item has only moved down 1 slot, it was likely caused
# by another track being moved above it
if new_index - index != 1:
break
# Playlists have their own unique item IDs
item_id = item['PlaylistItemId']
url = self.api_url(f'/Playlists/{playlist_id}/Items/{item_id}/Move/{new_index}')
self.http.post(url)
def delete_from_playlist(self, playlist_id, curr_tracks, new_ids):
curr_ids = [ track['Id'] for track in curr_tracks ]
# Find items that are in the old playlist but missing from the new one
del_items = list(set(curr_ids) - set(new_ids))
# Get the PlaylistItemId of each track to be deleted
del_ids = [ track['PlaylistItemId'] for track in curr_tracks if track['Id'] in del_items ]
url_params = {
'UserId': self.user_id,
'EntryIds': ','.join(del_ids)
}
del_url = self.api_url(f'Playlists/{playlist_id}/Items', url_params)
self.http.delete(del_url)
def add_to_playlist(self, playlist_id, curr_tracks, new_ids):
curr_ids = [ track['Id'] for track in curr_tracks ]
# Find items in the new playlist that are missing from the old one
add_ids = list(set(new_ids) - set(curr_ids))
url_params = {
'UserId': self.user_id,
'Ids': ','.join(add_ids)
}
new_url = self.api_url(f'/Playlists/{playlist_id}/Items', url_params)
self.http.post(new_url)
def get_favorites(self):
'''
Pulls a list of favorite audio related content from the server and
build playlists from it
'''
# Types of playlists to build
playlists = {
'All': [],
'Tracks': [],
'Albums': [],
'Artists': []
}
url_params = {
'Recursive': 'true',
'Filters': 'IsFavorite',
}
fav_items_url = self.api_url(f'/Users/{self.user_id}/Items', url_params)
fav_items = self.http.get(fav_items_url).get('Items', [])
for item in fav_items:
item_type = item.get('Type')
if item_type == 'Audio':
playlists['Tracks'].append(self.create_track(item))
elif item_type == 'MusicAlbum':
# Get tracks from the favorited album
tracks = self.get_directory(item.get('Id')).get('Items', [])
playlists['Albums'].extend([
self.create_track(track) for track in tracks])
# User ID needed for the artists query
url_params['UserId'] = self.user_id
# Artists aren't available in the previous call and have to be separate
fav_artists_url = self.api_url(f'/Artists', url_params)
fav_artists = self.http.get(fav_artists_url).get('Items', [])
for artist in fav_artists:
# Get tracks from the favorited artist
artist_id = artist.get('Id')
playlists['Artists'].extend(self.lookup_artist(artist_id))
# 'All' should include the other 3 lists combined
playlists['All'].extend(playlists['Tracks'])
playlists['All'].extend(playlists['Albums'])
playlists['All'].extend(playlists['Artists'])
return playlists
@cache()
def browse_item(self, item_id):
item = self.get_item(item_id)
if item.get('CollectionType', '') == 'music':
# Pull all artists for this library
artists = self.get_library_artists(item_id)
ret_value = [self.get_artist_as_ref(artist) for artist in artists]
elif item.get('Type', '') == 'MusicArtist':
# Pull list of albums for a given artist
ret_value = self.get_artist_contents(item_id)
else:
# Browse the directory tree
contents = self.get_directory(item_id).get('Items')
ret_value = []
# Create an entry for each item depending on it's type
for item in contents:
if item.get('Type') in ('Audio', 'AudioBook'):
# Create tracks
ret_value.append(self.get_track_as_ref(item))
elif item.get('Type') == 'MusicArtist':
# Create artists (probably never used)
ret_value.append(self.get_artist_as_ref(item))
elif item.get('Type') in ('MusicAlbum', 'Folder'):
# Create browsable folders
ret_value.append(self.get_album_as_ref(item))
return ret_value
@cache()
def get_all_artists(self):
# Get a list of all artists in the server. Used for mopidy-iris
artists = []
libraries = self.get_music_root()
for library in libraries:
if library.get('Name') in self.libraries:
library_id = library.get('Id')
artists += self.get_library_artists(library_id)
return artists
@cache()
def get_artist_contents(self, artist_id):
# Get a list of albums for the given artist
contents = []
ret_val = []
# Get album list
url_params = {
'UserId': self.user_id,
'IncludeItemTypes': 'MusicAlbum',
'Recursive': 'true'
}
if self.albumartistsort:
url_params['AlbumArtistIds'] = artist_id
else:
url_params['ArtistIds'] = artist_id
url = self.api_url('/Items', url_params)
result = self.http.get(url)
if result:
contents = result.get('Items')
ret_val = [self.get_album_as_ref(album) for album in contents]
return ret_val
@cache()
def get_library_artists(self, library_id):
# Get a list of all artists in the given library
url_params = {
'ParentId': library_id,
'UserId': self.user_id
}
if self.albumartistsort:
url = self.api_url('/Artists/AlbumArtists', url_params)
else:
url = self.api_url('/Artists', url_params)
artists = self.http.get(url).get('Items')
return artists
@cache()
def get_artist_as_ref(self, artist):
# Convert artist into mopidy object
artist_ref = models.Ref.artist(
uri='jellyfin:artist:{}'.format(
artist.get('Id')
),
name=artist.get('Name')
)
return artist_ref
@cache()
def get_album_as_ref(self, album):
# Convert album into mopidy object
return models.Ref.album(
uri='jellyfin:album:{}'.format(
album.get('Id')
),
name=album.get('Name')
)
def get_track_as_ref(self, track):
# Convert track into mopidy object
return models.Ref.track(
uri='jellyfin:track:{}'.format(
track.get('Id')
),
name=track.get('Name')
)
@cache()
def get_albums(self, query):
# Check query for artist name
if 'artist' in query:
raw_artist = query.get('artist')
elif 'albumartist' in query:
raw_artist = query.get('albumartist')
else:
return []
# URL encode artist string
artist = quote(raw_artist[0].encode('utf8')).replace('/', '-')
url_params= {
'UserId': self.user_id
}
url = self.api_url('/Artists/{}'.format(artist), url_params)
# Pull out artist_id
artist_data = self.http.get(url)
artist_id = artist_data.get('Id')
url_params = {
'UserId': self.user_id,
'IncludeItemTypes': 'MusicAlbum',
'Recursive': 'true'
}
# Get album list
if self.albumartistsort:
url_params['AlbumArtistIds'] = artist_id
else:
url_params['ArtistIds'] = artist_id
url = self.api_url('/Items', url_params)
result = self.http.get(url)
if result:
albums = result.get('Items')
return albums
@cache()
def get_all_albums(self):
# Get a list of all albums in the library. Used for mopidy-iris
url_params = {
'UserId': self.user_id,
'IncludeItemTypes': 'MusicAlbum',
'Recursive': 'true'
}
url = self.api_url('/Items', url_params)
albums = self.http.get(url).get('Items')
return albums
@cache()
def get_directory(self, id):
"""Get directory from Jellyfin API.
:param id: Directory ID
:type id: int
:returns Directory
:rtype: dict
"""
url_params= {
'ParentId': id,
'SortOrder': 'Ascending'
}
url = self.api_url('/Users/{}/Items'.format(self.user_id), url_params)
return self.http.get(url)
@cache()
def get_item(self, id):
"""Get item from Jellyfin API.
:param id: Item ID
:type id: int
:returns: Item
:rtype: dict
"""
data = self.http.get(
self.api_url(
'/Users/{}/Items/{}'.format(self.user_id, id)
)
)
logger.debug('Jellyfin item: {}'.format(data))
return data
def create_track(self, track):
"""Create track from Jellyfin API track dict.
:param track: Track from Jellyfin API
:type track: dict
:returns: Track
:rtype: mopidy.models.Track
"""
# TODO: add more metadata
name = track.get('Name')
if self.watched_status and track.get('Type') == 'AudioBook':
if track['UserData'].get('PlayCount'):
name = f'[X] - {name}'
else:
name = f'[] - {name}'
return models.Track(
uri='jellyfin:track:{}'.format(track.get('Id')),
name=name,
track_no=track.get('IndexNumber', 0),
disc_no=track.get('ParentIndexNumber'),
genre=','.join(track.get('Genres', [])),
artists=self.create_artists(track),
album=self.create_album(track),
length=self.ticks_to_milliseconds(track.get('RunTimeTicks', 0))
)
def create_album(self, item):
"""Create album object from Jellyfin item.
:param track: item
:type track: dict
:returns: Album
:rtype: mopidy.models.Album
"""
item_type = item.get('Type')
if item_type == 'Audio':
return models.Album(
name=item.get('Album'),
artists=self.create_artists(item),
uri=f'jellyfin:album:{item.get("AlbumId")}'
)
elif item_type == 'MusicAlbum':
return models.Album(
name=item.get('Name'),
artists=self.create_artists(item),
uri=f'jellyfin:album:{item.get("Id")}'
)
def create_artists(self, item={}, name=None):
"""Create artist object from jellyfin item.
:param track: item
:type track: dict
:param name: Name
:type name: str
:returns: List of artists
:rtype: list of mopidy.models.Artist
"""
item_type = item.get('Type', '')
if item_type == 'MusicArtist':
# Artists have a slightly different structure
return [
models.Artist(name=item.get('Name'),
uri=f'jellyfin:artist:{item.get("Id")}')
]
elif item_type:
# For tracks and albums
return [
models.Artist(name=artist, uri=f'jellyfin:artist:{item.get("Id")}')
for artist in item.get('Artists', [])
]
else:
# In case we only get a name
return [ models.Artist(name=name) ]
@cache()
def get_track(self, track_id):
"""Get track.
:param track_id: ID of a Jellyfin track
:type track_id: int
:returns: track
:rtype: mopidy.models.Track
"""
track = self.get_item(track_id)
return self.create_track(track)
def _get_search(self, itemtype, term):
"""Gets search data from Jellyfin API.
:param itemtype: Type to search for
:param term: Search term
:type itemtype: str
:type term: str
:returns: List of result dicts
:rtype: list
"""
if itemtype == 'any':
search_query = 'Audio,MusicAlbum,MusicArtist'
elif itemtype == 'artist' or itemtype == 'albumartist':
search_query = 'MusicArtist'
elif itemtype == 'album':
search_query = 'MusicAlbum'
elif itemtype == 'track_name':
search_query = 'Audio'
else:
raise Exception('Jellyfin search: no itemtype {}'.format(itemtype))
url_params = {
'SearchTerm': quote(term.encode('utf-8')),
'IncludeItemTypes': search_query
}
url = self.api_url('/Search/Hints', url_params)
data = self.http.get(url)
return [i for i in data.get('SearchHints', [])]
@cache()
def search(self, query):
"""Search Jellyfin for a term.
:param query: Search query
:type query: dict
:returns: Search results
:rtype: mopidy.models.SearchResult
"""
logger.debug('Searching in Jellyfin for {}'.format(query))
# something to store the results in
data = []
tracks = []
albums = []
artists = []
for itemtype, term in query.items():
for item in term:
data.extend(
self._get_search(itemtype, item)
)
# walk through all items and create stuff
for item in data:
if item.get('Type') == 'Audio':
tracks.append(self.create_track(item))
elif item.get('Type') == 'MusicAlbum':
albums.append(self.create_album(item))
elif item.get('Type') == 'MusicArtist':
artists.extend(self.create_artists(item))
return models.SearchResult(
uri='jellyfin:search',
tracks=tracks,
artists=artists,
albums=albums
)
@cache()
def exact_search(self, query):
# Variable prep
tracks = []
raw_artist = ''
artist_ref = []
albums = []
# Check query for artist name
if 'artist' in query:
raw_artist = query.get('artist')
elif 'albumartist' in query:
raw_artist = query.get('albumartist')
# Use if query has artist name
if raw_artist:
# URL encode artist string
artist = quote(raw_artist[0].encode('utf8')).replace('/', '-')
artist_ref = self.create_artists(name=raw_artist[0])
url_params = { 'UserId': self.user_id }
url = self.api_url('/Artists/{}'.format(artist), url_params)
artist_data = self.http.get(url)
artist_id = artist_data.get('Id')
url_params = {
'IncludeItemTypes': 'MusicAlbum',
'Recursive': 'true',
'UserId': self.user_id
}
# Get album list
if self.albumartistsort:
url_params['AlbumArtistIds'] = artist_id
else:
url_params['ArtistIds'] = artist_id
album_url = self.api_url('/Items', url_params)
album_data = self.http.get(album_url)
if album_data:
contents = album_data.get('Items')
for item in contents:
if item.get('Type') == 'MusicAlbum':
album_obj = models.Album(
name=item.get('Name'),
artists=self.create_artists(item),
uri='jellyfin:album:{}'.format(item.get('Id'))
)
if album_obj not in albums:
albums.append(album_obj)
# Get artist tracks
url_params['IncludeItemTypes'] = 'Audio'
track_url = self.api_url('/Items', url_params)
track_data = self.http.get(track_url)
if track_data:
# If the query has an album, only match those tracks
if query.get('album'):
tracks = [
self.create_track(track)
for track in track_data.get('Items')
if track.get('Album') == query.get('album')[0]
]
# Otherwise return all tracks
else:
tracks = [
self.create_track(track)
for track in track_data.get('Items')
]
# Use if query only has an album name
elif 'album' in query:
album_name = query.get('album')[0]
url_params = {
'IncludeItemTypes': 'MusicAlbum',
'IncludeMedia': 'true',
'Recursive': 'true',
'searchTerm': album_name
}
url = self.api_url('/Users/{}/Items'.format(self.user_id), url_params)
album_data = self.http.get(url).get('Items')
tracks = []
# This can lead to false matches, but all we have at this point
# is an album name to match against. Options are limited
for album in album_data:
if album.get('Name') == album_name:
album_obj = models.Album(
name=album.get('Name'),
artists=self.create_artists(album),
uri='jellyfin:album:{}'.format(album.get('Id'))
)
if album_obj not in albums:
albums.append(album_obj)
raw_tracks = self.get_directory(album.get('Id'))
tracks += [self.create_track(track)
for track in raw_tracks.get('Items', [])]
return models.SearchResult(
uri='jellyfin:search',
tracks=tracks,
albums=albums,
artists=artist_ref,
)
@cache()
def get_search_tracks(self, artist_ref, album_id):
tracks = []
url_params = {
'IncludeItemTypes': 'Audio',
'Recursive': 'true',
'AlbumIds': album_id,
'UserId': self.user_id
}
url = self.api_url('/Items', url_params)
result = self.http.get(url)
if result:
raw_tracks = result.get('Items')
if artist_ref:
# If the artist was in the query,
# ensure all tracks belong to that artist
tracks = [
self.create_track(track)
for track in raw_tracks
if unidecode(artist_ref[0].name.lower()) in (
artist.lower() for artist in track.get('Artists'))
]
else:
# If the query doesn't contain an artist, return all tracks
tracks = [
self.create_track(track)
for track in raw_tracks
]
return tracks
def format_album(self, item):
# If an error occurs when parsing the custom album format for a given
# item, fallback to just using the name
try:
return self.album_format.format(**item)
except:
return item.get('Name')
def lookup_artist(self, artist_id):
"""Lookup all artist tracks and sort them.
:param artist_id: Artist ID
:type artist_id: int
:returns: List of tracks
:rtype: list
"""
url_params = {
'SortOrder': 'Ascending',
'Recursive': 'true',
'IncludeItemTypes': 'Audio'
}
if self.albumartistsort:
url_params['AlbumArtistIds'] = artist_id
else:
url_params['ArtistIds'] = artist_id
url = self.api_url('/Users/{}/Items'.format(self.user_id), url_params)
items = self.http.get(url)
# sort tracks into album keys
album_dict = defaultdict(list)
for track in items.get('Items'):
album_dict[track.get('Album')].append(track)
# order albums in alphabet
album_dict = OrderedDict(sorted(album_dict.items()))
# sort tracks in album dict
tracks = []
for album, track_list in album_dict.items():
track_list.sort(
key=lambda k: (k.get('IndexNumber', 0), k.get('Name'))
)
# add tracks to list
tracks.extend(track_list)
return [self.create_track(i) for i in tracks]
@staticmethod
def ticks_to_milliseconds(ticks):
"""Converts Jellyfin track length ticks to milliseconds.
:param ticks: Ticks
:type ticks: int
:returns: Milliseconds
:rtype: int
"""
return int(ticks / 10000)
@staticmethod
def milliseconds_to_ticks(milliseconds):
"""Converts milliseconds to ticks.
:param milliseconds: Milliseconds
:type milliseconds: int
:returns: Ticks
:rtype: int
"""
return milliseconds * 10000
|
#!/usr/bin/env python3
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from unittest import mock
from cros.factory.device import device_utils
class VPDTest(unittest.TestCase):
# pylint: disable=no-value-for-parameter
def setUp(self):
self.dut = device_utils.CreateDUTInterface()
self.vpd = self.dut.vpd
@mock.patch('cros.factory.gooftool.vpd.VPDTool.GetAllData')
@mock.patch('cros.factory.gooftool.vpd.VPDTool.GetValue')
def testGet(self, get_value_mock, get_all_data_mock):
def GetValueSideEffect(*args, **unused_kwargs):
if args[0] == 'a':
return 'aa'
if args[0] == 'b':
return 123
return None
get_all_data_mock.return_value = dict(a='b', foo='bar', empty='')
get_value_mock.side_effect = GetValueSideEffect
self.assertEqual(dict(a='b', foo='bar', empty=''), self.vpd.rw.GetAll())
get_all_data_mock.assert_called_once_with(partition='RW_VPD')
self.assertEqual('aa', self.vpd.ro.get('a'))
get_value_mock.assert_called_with('a', default_value=None,
partition='RO_VPD')
self.assertEqual(123, self.vpd.ro.get('b', 123))
get_value_mock.assert_called_with('b', default_value=123,
partition='RO_VPD')
@mock.patch('cros.factory.gooftool.vpd.VPDTool.GetAllData')
@mock.patch('cros.factory.gooftool.vpd.VPDTool.UpdateData')
def testUpdate(self, update_data_mock, get_all_data_mock):
get_all_data_mock.return_value = dict(a='b', foo='bar', empty='')
self.vpd.rw.Update(dict(w='x', y='z', foo=None))
get_all_data_mock.assert_called_once_with(partition='RW_VPD')
update_data_mock.assert_called_once_with(dict(w='x', y='z', foo=None),
partition='RW_VPD')
@mock.patch('cros.factory.gooftool.vpd.VPDTool.GetAllData')
@mock.patch('cros.factory.gooftool.vpd.VPDTool.UpdateData')
def testUpdatePartial(self, update_data_mock, get_all_data_mock):
# "a"="b" is already in vpd, update will skip it.
# "unset" is already not in vpd, update will skip it.
get_all_data_mock.return_value = dict(a='b', foo='bar', empty='')
self.vpd.rw.Update(dict(a='b', w='x', y='z', unset=None))
get_all_data_mock.assert_called_once_with(partition='RW_VPD')
update_data_mock.assert_called_once_with(dict(w='x', y='z'),
partition='RW_VPD')
@mock.patch('cros.factory.gooftool.vpd.VPDTool.UpdateData')
def testDeleteOne(self, update_data_mock):
self.vpd.rw.Delete('a')
update_data_mock.assert_called_once_with(dict(a=None), partition='RW_VPD')
@mock.patch('cros.factory.gooftool.vpd.VPDTool.UpdateData')
def testDeleteTwo(self, update_data_mock):
self.vpd.rw.Delete('a', 'b')
update_data_mock.assert_called_once_with(dict(a=None, b=None),
partition='RW_VPD')
@mock.patch('cros.factory.gooftool.vpd.VPDTool.GetAllData')
def testGetPartition(self, get_all_data_mock):
get_all_data_mock.return_value = dict(foo='bar')
self.assertEqual(dict(foo='bar'),
self.vpd.GetPartition('rw').GetAll())
get_all_data_mock.assert_called_with(partition='RW_VPD')
get_all_data_mock.return_value = dict(bar='foo')
self.assertEqual(dict(bar='foo'),
self.vpd.GetPartition('ro').GetAll())
get_all_data_mock.assert_called_with(partition='RO_VPD')
if __name__ == '__main__':
unittest.main()
|
"""Test configuration functions."""
import pytest
import logging
import ambianic
from ambianic.server import AmbianicServer
from ambianic import server
import os
import pathlib
def test_no_config():
conf = server._configure('/')
assert not conf
def test_log_config_with_file():
log_config = {
'file': '/tmp/test-log.txt'
}
server._configure_logging(config=log_config)
handlers = logging.getLogger().handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
log_fn = h.baseFilename
assert log_fn == log_config['file']
def test_log_config_without_file():
log_config = {
}
server._configure_logging(config=log_config)
handlers = logging.getLogger().handlers
for h in handlers:
assert not isinstance(h, logging.FileHandler)
def test_log_config_with_debug_level():
log_config = {
'level': 'DEBUG'
}
server._configure_logging(config=log_config)
root_logger = logging.getLogger()
effective_level = root_logger.getEffectiveLevel()
lname = logging.getLevelName(effective_level)
assert lname == log_config['level']
def test_log_config_with_warning_level():
log_config = {
'level': 'WARNING'
}
server._configure_logging(config=log_config)
root_logger = logging.getLogger()
effective_level = root_logger.getEffectiveLevel()
lname = logging.getLevelName(effective_level)
assert lname == log_config['level']
def test_log_config_without_level():
log_config = {}
server._configure_logging(config=log_config)
root_logger = logging.getLogger()
effective_level = root_logger.getEffectiveLevel()
assert effective_level == server.DEFAULT_LOG_LEVEL
def test_log_config_bad_level1():
log_config = {
'level': '_COOCOO_'
}
server._configure_logging(config=log_config)
root_logger = logging.getLogger()
effective_level = root_logger.getEffectiveLevel()
assert effective_level == server.DEFAULT_LOG_LEVEL
def test_log_config_bad_level2():
log_config = {
'level': 2.56
}
server._configure_logging(config=log_config)
root_logger = logging.getLogger()
effective_level = root_logger.getEffectiveLevel()
assert effective_level == server.DEFAULT_LOG_LEVEL
def test_config_with_secrets():
server.SECRETS_FILE = 'test-config-secrets.yaml'
server.CONFIG_FILE = 'test-config.yaml'
dir = os.path.dirname(os.path.abspath(__file__))
conf = server._configure(dir)
assert conf
assert conf['logging']['level'] == 'DEBUG'
assert conf['sources']['front_door_camera']['uri'] == 'secret_uri'
def test_config_without_secrets_failed_ref():
server.SECRETS_FILE = '__no__secrets__.lmay__'
server.CONFIG_FILE = 'test-config.yaml'
dir = os.path.dirname(os.path.abspath(__file__))
conf = server._configure(dir)
assert not conf
def test_config_without_secrets_no_ref():
server.SECRETS_FILE = '__no__secrets__.lmay__'
server.CONFIG_FILE = 'test-config2.yaml'
dir = os.path.dirname(os.path.abspath(__file__))
conf = server._configure(dir)
assert conf
assert conf['logging']['level'] == 'DEBUG'
assert conf['sources']['front_door_camera']['uri'] == 'no_secret_uri'
def test_no_pipelines():
server.CONFIG_FILE = 'test-config-no-pipelines.yaml'
dir = os.path.dirname(os.path.abspath(__file__))
conf = server._configure(dir)
assert not conf
|
import numpy as np
import keras.backend as K
import os, shutil
########################### Sentences loading ##############################
class MySentences(object):
def __init__(self, dirname):
"""
Sentences loading class
A memory-friendly iterator for word2vec model.
# Arguments
dirname : directory path of sentencens/data files.
# Returns
Sentences.
"""
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
for line in open(os.path.join(self.dirname, fname)):
yield line.split()
def read_sentences(path):
"""
Sentences loading function.
A simple reader/loader for the attention model.
# Arguments
path : directory path of sentencens/data files.
# Returns
Sentences.
"""
tmp = []
with open(path) as file:
for sentence in file.readlines():
tmp.append(sentence.strip())
return np.array(tmp)
########################## For Keras layers #################################
def softmax(x, axis=1):
"""
Softmax activation function.
# Arguments
x : Tensor.
axis: Integer, axis along which the softmax normalization is applied.
# Returns
Tensor, output of softmax transformation.
# Raises
ValueError: In case `dim(x) == 1`.
"""
ndim = K.ndim(x)
if ndim == 2:
return K.softmax(x)
elif ndim > 2:
e = K.exp(x - K.max(x, axis=axis, keepdims=True))
s = K.sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D')
########################## For Keras layers #################################
def save_model_json(model, word_dim):
"""
serialize model to json.
# Arguments
model : keras model.
word_dim : w2v word length
# Returns
file_name : the model file name
"""
model_json = model.to_json()
with open("model_{}.json".format(word_dim), "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model_{}.h5".format(word_dim))
print("Saved model to disk")
file_name = 'model_{}'.format(word_dim)
return file_name
def load_model_json(file_name):
"""
load model from json.
# Arguments
model : keras model.
word_dim : w2v word length
# Returns
model : keras model
"""
json_file = open(file_name+".json", "r")
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(file_name+".h5")
print("Loaded model from disk")
return model
############################## Glove model ################################
def similar_posneg(model, positive, negative, topn=10):
"""
Doc is not available
"""
mean_vecs = []
for word in positive:
mean_vecs.append(model.word_vectors[model.dictionary[word]])
for word in negative:
mean_vecs.append(-1*model.word_vectors[model.dictionary[word]])
mean = np.array(mean_vecs).mean(axis=0)
mean /= np.linalg.norm(mean)
dists = np.dot(model.word_vectors, mean)
best = np.argsort(dists)[::-1]
results = [(model.inverse_dictionary[i], dists[i]) for i in best if (model.inverse_dictionary[i] not in positive and
model.inverse_dictionary[i] not in negative)][:topn]
return results
############################### More utils #################################
def get_by_address(address):
"""
get a variable by its address function.
# Arguments
address : Variable Adress.
# Returns
Variable.
# Raises
Error removing a file.
"""
return [x for x in globals().values() if id(x)==address]
def delete_weights(folder='./models'):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e) |
import os
import sys
import time
import subprocess
import win32gui
import re
import win32com.client as comclt
import sublime
from .helper import SingleHwnd, SingleProcess, WinProcess, move_mouse_to
from SasSubmit.settings import SessionInfo
def standardize_name(name):
if name == "chrome":
return "chrome.exe"
elif name == "firefox":
return "firefox.exe"
elif name == "ie":
return "iexplorer.exe"
else:
return name
class StudioSession:
def __init__(self):
pass
def update_session_info(self):
self.meta = SessionInfo()
self.browser = self.meta.get("browser")
self.browser_name = standardize_name(self.browser)
self.settings = sublime.load_settings("SasSubmit.sublime-settings")
self.link = self.settings.get("studio_address")
self.browser_path = self.settings.get(self.browser+"_path")
self.activate_subl_after_submit = self.settings.get("activate_subl_after_submit")
self.subl_path = self.settings.get("subl_path")
def get_win_process(self):
return WinProcess()
def get_browser_process(self):
return self.get_win_process().filter_by_name(self.browser_name, require_hwnd=True)
def activate_via_looping(self, require_studio):
activation_success = False
pses = self.get_browser_process()
for ps in pses:
for sh in ps.get_hwnds():
time.sleep(0.1)
try:
if require_studio:
sh.activate_if_title_icontains("sas studio",with_mouse=True,x_w=0.2,y_w=0.2)
else:
sh.activate(with_mouse=True,x_w=0.2,y_w=0.2)
activation_success = True
self.last_hwnd = sh.hwnd
break
except Exception as e:
pass
if activation_success:
pass
else:
raise ValueError("Activating browser failed")
def activate_via_hwnd(self, require_studio):
sh = SingleHwnd(self.last_hwnd)
if SingleProcess(sh.get_pid()).get_name() != self.browser_name:
raise ValueError("browser changed since last check")
if require_studio:
sh.activate_if_title_icontains("sas studio",with_mouse=True,x_w=0.2,y_w=0.2)
else:
sh.activate(with_mouse=True,x_w=0.2,y_w=0.2)
def activate(self, require_studio):
try:
self.activate_via_hwnd(require_studio=require_studio)
except:
self.activate_via_looping(require_studio=require_studio)
def new_browser(self):
if os.path.isfile(self.browser_path):
proc = subprocess.Popen([self.browser_path, self.link])
self.meta.set("pid",proc.pid,"studio")
else:
sublime.message_dialog("%s setting is not valid!" % (self.browser+"_path"))
def new_instance(self, instance, root_path):
self.update_session_info()
self.meta.new("studio")
self.new_browser()
def submit_to_broswer(self):
for i in range(10):
handle = win32gui.GetForegroundWindow()
title = SingleHwnd(handle).get_title()
submit_success = False
error_sent = False
if re.match("SAS Studio", title):
driver = comclt.Dispatch("WScript.Shell")
time.sleep(0.01)
# go to the code tab
driver.SendKeys("%4")
# insert new code
time.sleep(0.01)
driver.SendKeys("^a")
time.sleep(0.01)
driver.SendKeys("^v")
# submit
time.sleep(0.5)
driver.SendKeys("{F3}")
submit_success = True
break
else:
try:
self.activate(require_studio=True)
except:
error_sent = True
sublime.message_dialog("Activating %s failed, check if it is running!" % self.browser)
break
time.sleep(0.1)
if submit_success:
if self.activate_subl_after_submit:
time.sleep(0.1)
try:
_ = os.popen('\"%s\" --command "sas_submit_activate"' % self.subl_path)
except Exception as e:
sublime.message_dialog(e)
elif error_sent == False:
sublime.message_dialog("Cannot submit to SAS, check if SAS is running!")
def submit(self, instance, root_path):
flags, hcursor, (x,y) = win32gui.GetCursorInfo()
self.update_session_info()
self.submit_to_broswer()
move_mouse_to(x,y,with_click=False)
# SingleHwnd(19860206).activate()
# time.sleep(1)
# SingleHwnd(win32gui.GetForegroundWindow()).get_title()
# rect = win32gui.GetWindowRect(19860206) |
<filename>gtk/position-logger/position_logger.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
version = '0.1'
import os, sys
import gtk
import time
import linuxcnc
import gobject
class app:
def __init__(self):
self.builder = gtk.Builder()
self.path = os.path.abspath(os.path.dirname(sys.argv[0]))
self.ui = os.path.join(self.path, 'position_logger.glade')
self.builder.add_from_file(self.ui)
self.builder.connect_signals(self)
self.statusbar = self.builder.get_object("statusbar")
self.context_id = self.statusbar.get_context_id("status")
self.status_count = 0
self.s = linuxcnc.stat() # create a connection to the status channel
try:
self.s.poll()
except linuxcnc.error:
#print"error", detail
#print type(detail)
self.statusbar.push(self.context_id, 'Start LinuxCNC then run this Program')
self.emc = False
else:
self.statusbar.push(self.context_id, 'LinuxCNC is running')
self.emc = True
self.display_cb = self.builder.get_object('display_cb')
self.display = 'Relative'
self.display_ls = gtk.ListStore(int,str)
self.display_ls.append([0,"Relative"])
self.display_ls.append([1,"Absolute"])
self.display_cb.set_model(self.display_ls)
self.cell = gtk.CellRendererText()
self.display_cb.pack_start(self.cell, True)
self.display_cb.add_attribute(self.cell, 'text', 1)
self.display_cb.set_active(0)
self.log_0 = self.builder.get_object('log_0')
self.log_1 = self.builder.get_object('log_1')
self.log_2 = self.builder.get_object('log_2')
self.log_3 = self.builder.get_object('log_3')
self.log_4 = self.builder.get_object('log_4')
self.log_5 = self.builder.get_object('log_5')
self.log_6 = self.builder.get_object('log_6')
self.log_7 = self.builder.get_object('log_7')
self.log_8 = self.builder.get_object('log_8')
axis_names = {0:'X', 1:'Y', 2:'Z', 3:'A', 4:'B', 5:'C', 6:'U', 7:'V', 8:'W', }
if self.emc:
for i in range(self.s.axes):
eval('self.log_' + str(i) + '.set_active(True)')
#print axis_names[i] , '{:.4f}'.format(self.s.position[i]-self.s.g5x_offset[i])
#print self.s.position[0]
gobject.timeout_add(100, self.update)
self.pos_0 = self.builder.get_object('pos_0')
self.pos_1 = self.builder.get_object('pos_1')
self.pos_2 = self.builder.get_object('pos_2')
self.pos_3 = self.builder.get_object('pos_3')
self.pos_4 = self.builder.get_object('pos_4')
self.pos_5 = self.builder.get_object('pos_5')
self.pos_6 = self.builder.get_object('pos_6')
self.pos_7 = self.builder.get_object('pos_7')
self.pos_8 = self.builder.get_object('pos_8')
self.insert_text = self.builder.get_object('insert_text')
self.textview = self.builder.get_object('textview')
self.textbuffer = self.textview.get_buffer()
self.textbuffer.set_text('; Generated by Position Logger')
self.g0_rb = self.builder.get_object('g0_rb')
self.g1_rb = self.builder.get_object('g1_rb')
self.window = self.builder.get_object('window')
self.window.show_all()
def log_clicked(self, widget):
if self.g0_rb.get_active():
que = 'G0 '
else:
que = 'G1 '
if self.log_0.get_active():
que += 'X' + self.pos_0.get_text() + ' '
if self.log_1.get_active():
que += 'Y' + self.pos_1.get_text() + ' '
if self.log_2.get_active():
que += 'Z' + self.pos_2.get_text() + ' '
if self.log_3.get_active():
que += 'A' + self.pos_3.get_text() + ' '
if self.log_4.get_active():
que += 'B' + self.pos_4.get_text() + ' '
if self.log_5.get_active():
que += 'C' + self.pos_5.get_text() + ' '
if self.log_6.get_active():
que += 'U' + self.pos_6.get_text() + ' '
if self.log_7.get_active():
que += 'V' + self.pos_7.get_text() + ' '
if self.log_8.get_active():
que += 'W' + self.pos_8.get_text() + ' '
end_iter = self.textbuffer.get_end_iter()
self.textbuffer.insert(end_iter, '\n' + que)
def insert_btn_clicked(self, widget):
end_iter = self.textbuffer.get_end_iter()
self.textbuffer.insert(end_iter, '\n' + self.insert_text.get_text())
def on_display_cb_changed(self, widget, data=None):
# get the index of the changed row
self.index = widget.get_active()
# get the model
self.model = widget.get_model()
# retrieve the item from column 1
self.item = self.model[self.index][1]
self.display = self.item
def update(self):
self.s.poll()
if self.display == 'Relative':
for i in range(self.s.axes):
eval('self.pos_' + str(i) + '.set_text("{:.4f}".format(self.s.position[' + str(i) + ']-self.s.g5x_offset[' + str(i) + ']))')
else:
for i in range(self.s.axes):
eval('self.pos_' + str(i) + '.set_text("{:.4f}".format(self.s.axis[' + str(i) + ']["output"]))')
return True
def window_destroy(self, *args):
# do some checks to see if unsaved and annoy
gtk.main_quit(*args)
main = app()
gtk.main()
|
<gh_stars>1-10
from OCC.Extend.TopologyUtils import TopologyExplorer
from OCC.Core.GProp import GProp_GProps
from OCC.Core.BRepAdaptor import BRepAdaptor_Curve, BRepAdaptor_Surface, BRepAdaptor_CompCurve, BRepAdaptor_Curve2d
from OCC.Core.gp import *
from OCC.Core.BRepTools import *
from OCC.Core.BRep import *
from OCC.Core.TopoDS import *
import gmsh
import sys
import os
import numpy as np
import json
import glob
import fileinput
import random
import yaml
from OCC.Core.Bnd import Bnd_Box
from OCC.Core.BRepBndLib import brepbndlib_Add
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeBox, BRepPrimAPI_MakeCylinder
from OCC.Core.BRepMesh import BRepMesh_IncrementalMesh
from OCC.Core.STEPControl import STEPControl_Reader, STEPControl_Writer, STEPControl_AsIs
from OCC.Core.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity
from OCC.Core.TColStd import TColStd_Array1OfReal, TColStd_Array2OfReal
from OCC.Core.TColgp import TColgp_Array1OfPnt, TColgp_Array2OfPnt, TColgp_Array1OfPnt2d
from OCC.Core.BRepGProp import (brepgprop_SurfaceProperties,
brepgprop_VolumeProperties)
from OCC.Core.Geom2dAdaptor import Geom2dAdaptor_Curve
from OCCUtils.edge import Edge
from OCCUtils.Topology import Topo
np.set_printoptions(precision=17)
def read_step_file(filename, return_as_shapes=False, verbosity=False):
assert os.path.isfile(filename)
step_reader = STEPControl_Reader()
status = step_reader.ReadFile(filename)
if status == IFSelect_RetDone: # check status
if verbosity:
failsonly = False
step_reader.PrintCheckLoad(failsonly, IFSelect_ItemsByEntity)
step_reader.PrintCheckTransfer(failsonly, IFSelect_ItemsByEntity)
shapes = []
try:
total_roots = step_reader.NbRootsForTransfer();
for nr in range(1,total_roots+1):
ok = step_reader.TransferRoot(nr)
if not ok:
break
_nbs = step_reader.NbShapes()
shapes.append(step_reader.Shape(nr)) # a compound
#assert not shape_to_return.IsNull()
except:
print("No Shape", nr)
else:
raise AssertionError("Error: can't read file.")
#if return_as_shapes:
# shape_to_return = TopologyExplorer(shape_to_return).solids()
return shapes
def get_boundingbox(shape, tol=1e-6, use_mesh=True):
""" return the bounding box of the TopoDS_Shape `shape`
Parameters
----------
shape : TopoDS_Shape or a subclass such as TopoDS_Face
the shape to compute the bounding box from
tol: float
tolerance of the computed boundingbox
use_mesh : bool
a flag that tells whether or not the shape has first to be meshed before the bbox
computation. This produces more accurate results
"""
bbox = Bnd_Box()
bbox.SetGap(tol)
if use_mesh:
mesh = BRepMesh_IncrementalMesh()
mesh.SetParallelDefault(True)
mesh.SetShape(shape)
mesh.Perform()
assert mesh.IsDone()
brepbndlib_Add(shape, bbox, use_mesh)
xmin, ymin, zmin, xmax, ymax, zmax = bbox.Get()
return [xmin, ymin, zmin, xmax, ymax, zmax, xmax-xmin, ymax-ymin, zmax-zmin]
edge_map = {0: "Line", 1: "Circle", 2: "Ellipse", 3: "Hyperbola", 4: "Parabola", 5: "Bezier", 6: "BSpline", 7: "Other"}
surf_map = {0: "Plane", 1: "Cylinder", 2: "Cone", 3: "Sphere", 4: "Torus", 5: "Bezier", 6: "BSpline", 7: "Revolution", 8: "Extrusion", 9: "Other"}
gmsh_map = {"Surface of Revolution": "Revolution", "Surface of Extrusion": "Extrusion", "Plane": "Plane", "Cylinder": "Cylinder",\
"Cone": "Cone", "Torus": "Torus", "Sphere": "Sphere", "Bezier surface": "Bezier", "BSpline surface": "BSpline", "Unknown": "Other"}
# def convert_curve(curve):
# d1_feat = {"type": edge_map[curve.GetType()]}
# c_type = d1_feat["type"]
# if c_type == "Line":
# c = curve.Line()
# d1_feat["location"] = list(c.Location().Coord())
# d1_feat["direction"] = list(c.Direction().Coord())
# scale_factor = 1000.0
# #occ_node_s = occ_brt.Pnt(topods_Vertex(list(occ_topo.vertices())[elemNodeTags[0][0]-1 - occ_offset]))
# #occ_node_e = occ_brt.Pnt(topods_Vertex(list(occ_topo.vertices())[elemNodeTags[0][-1]-1 - occ_offset]))
# #print(occ_node_s.Coord(), curve.Value(curve.FirstParameter()).Coord(), v_nodes[elemNodeTags[0][0]-1], occ_node_e.Coord(), curve.Value(curve.LastParameter()).Coord(), v_nodes[elemNodeTags[0][-1]-1])
# #print(c.Location().Coord(), c.Direction().Coord())
# #print("E", np.allclose(np.array(curve.Value(curve.LastParameter()).Coord()), np.array(c.Location().Coord())+curve.LastParameter()*np.array(c.Direction().Coord())))
# #print("S", np.allclose(np.array(curve.Value(curve.FirstParameter()).Coord()), np.array(c.Location().Coord())+curve.FirstParameter()*np.array(c.Direction().Coord())))
# #print(e, nodeTags, nodeCoords, nodeParams, gmsh.model.getType(e[0], e[1]), elemTypes, elemTags, elemNodeTags)
# elif c_type == "Circle":
# c = curve.Circle()
# d1_feat["location"] = list(c.Location().Coord())
# d1_feat["z_axis"] = list(c.Axis().Direction().Coord())
# d1_feat["radius"] = c.Radius()
# d1_feat["x_axis"] = list(c.XAxis().Direction().Coord())
# d1_feat["y_axis"] = list(c.YAxis().Direction().Coord())
# scale_factor = 1.0
# #print(c.Location().Coord(), c.Axis().Direction().Coord(), c.Radius())
# elif c_type == "Ellipse":
# c = curve.Ellipse()
# d1_feat["focus1"] = list(c.Focus1().Coord())
# d1_feat["focus2"] = list(c.Focus2().Coord())
# d1_feat["x_axis"] = list(c.XAxis().Direction().Coord())
# d1_feat["y_axis"] = list(c.YAxis().Direction().Coord())
# d1_feat["z_axis"] = list(c.Axis().Direction().Coord())
# d1_feat["maj_radius"] = c.MajorRadius()
# d1_feat["min_radius"] = c.MinorRadius()
# scale_factor = 1.0
# #print(c.Focus1().Coord(), c.Focus2().Coord(), c.XAxis().Direction().Coord(), c.YAxis().Direction().Coord(), c.Axis().Direction().Coord(), c.MajorRadius(), c.MinorRadius())
# elif c_type == "BSpline":
# c = curve.BSpline()
# #print(dir(c))
# c.SetNotPeriodic()
# d1_feat["rational"] = c.IsRational()
# d1_feat["closed"] = c.IsClosed()
# #d1_feat["periodic"] = c.IsPeriodic()
# d1_feat["continuity"] = c.Continuity()
# d1_feat["degree"] = c.Degree()
# p = TColgp_Array1OfPnt(1, c.NbPoles())
# c.Poles(p)
# points = []
# for pi in range(p.Length()):
# points.append(list(p.Value(pi+1).Coord()))
# d1_feat["poles"] = points
#
# k = TColStd_Array1OfReal(1, c.NbPoles() + c.Degree() + 1)
# c.KnotSequence(k)
# knots = []
# for ki in range(k.Length()):
# knots.append(k.Value(ki+1))
# d1_feat["knots"] = knots
#
# w = TColStd_Array1OfReal(1, c.NbPoles())
# c.Weights(w)
# weights = []
# for wi in range(w.Length()):
# weights.append(w.Value(wi+1))
# d1_feat["weights"] = weights
#
# scale_factor = 1.0
# #print(c.Knots())
# #d1_feat[""] =
# #d1_feat[""] =
# #d1_feat[""] =
# #d1_feat[""] =
# #print(c.IsRational(), c.IsClosed(), c.IsPeriodic(), c.Continuity(), c.Degree())
# else:
# print("Unsupported type", c_type)
# return d1_feat
# def convert_2dcurve(curve):
# d1_feat = {"type": edge_map[curve.GetType()], "interval": [curve.FirstParameter(), curve.LastParameter()]}
# c_type = d1_feat["type"]
# if c_type == "Line":
# c = curve.Line()
# d1_feat["location"] = list(c.Location().Coord())
# d1_feat["direction"] = list(c.Direction().Coord())
# #scale_factor = 1000.0
# elif c_type == "Circle":
# c = curve.Circle()
# d1_feat["location"] = list(c.Location().Coord())
# d1_feat["radius"] = c.Radius()
# d1_feat["x_axis"] = list(c.XAxis().Direction().Coord())
# d1_feat["y_axis"] = list(c.YAxis().Direction().Coord())
# elif c_type == "Ellipse":
# c = curve.Ellipse()
# d1_feat["focus1"] = list(c.Focus1().Coord())
# d1_feat["focus2"] = list(c.Focus2().Coord())
# d1_feat["x_axis"] = list(c.XAxis().Direction().Coord())
# d1_feat["y_axis"] = list(c.YAxis().Direction().Coord())
# #d1_feat["z_axis"] = list(c.Axis().Direction().Coord())
# d1_feat["maj_radius"] = c.MajorRadius()
# d1_feat["min_radius"] = c.MinorRadius()
# #scale_factor = 1.0
# elif c_type == "BSpline":
# c = curve.BSpline()
# c.SetNotPeriodic()
# d1_feat["rational"] = c.IsRational()
# d1_feat["closed"] = c.IsClosed()
# #d1_feat["periodic"] = c.IsPeriodic()
# d1_feat["continuity"] = c.Continuity()
# d1_feat["degree"] = c.Degree()
# p = TColgp_Array1OfPnt2d(1, c.NbPoles())
# c.Poles(p)
# points = []
# for pi in range(p.Length()):
# points.append(list(p.Value(pi+1).Coord()))
# d1_feat["poles"] = points
#
# k = TColStd_Array1OfReal(1, c.NbPoles() + c.Degree() + 1)
# c.KnotSequence(k)
# knots = []
# for ki in range(k.Length()):
# knots.append(k.Value(ki+1))
# d1_feat["knots"] = knots
#
# w = TColStd_Array1OfReal(1, c.NbPoles())
# c.Weights(w)
# weights = []
# for wi in range(w.Length()):
# weights.append(w.Value(wi+1))
# d1_feat["weights"] = weights
# else:
# print("Unsupported type", c_type)
# return d1_feat
def convert_surface(surf):
d2_feat = {"type": surf_map[surf.GetType()]}
# s_type = d2_feat["type"]
# if s_type == "Plane":
# s = surf.Plane()
# d2_feat["location"] = list(s.Location().Coord())
# d2_feat["z_axis"] = list(s.Axis().Direction().Coord())
# d2_feat["x_axis"] = list(s.XAxis().Direction().Coord())
# d2_feat["y_axis"] = list(s.YAxis().Direction().Coord())
# d2_feat["coefficients"] = list(s.Coefficients())
#
# elif s_type == "Cylinder":
# s = surf.Cylinder()
# d2_feat["location"] = list(s.Location().Coord())
# d2_feat["z_axis"] = list(s.Axis().Direction().Coord())
# d2_feat["x_axis"] = list(s.XAxis().Direction().Coord())
# d2_feat["y_axis"] = list(s.YAxis().Direction().Coord())
# d2_feat["coefficients"] = list(s.Coefficients())
# d2_feat["radius"] = s.Radius()
#
# elif s_type == "Cone":
# s = surf.Cone()
# d2_feat["location"] = list(s.Location().Coord())
# d2_feat["z_axis"] = list(s.Axis().Direction().Coord())
# d2_feat["x_axis"] = list(s.XAxis().Direction().Coord())
# d2_feat["y_axis"] = list(s.YAxis().Direction().Coord())
# d2_feat["coefficients"] = list(s.Coefficients())
# d2_feat["radius"] = s.RefRadius()
# d2_feat["angle"] = s.SemiAngle()
# d2_feat["apex"] = list(s.Apex().Coord())
#
# elif s_type == "Sphere":
# s = surf.Sphere()
# d2_feat["location"] = list(s.Location().Coord())
# d2_feat["x_axis"] = list(s.XAxis().Direction().Coord())
# d2_feat["y_axis"] = list(s.YAxis().Direction().Coord())
# d2_feat["coefficients"] = list(s.Coefficients())
# d2_feat["radius"] = s.Radius()
#
# elif s_type == "Torus":
# s = surf.Torus()
# d2_feat["location"] = list(s.Location().Coord())
# d2_feat["z_axis"] = list(s.Axis().Direction().Coord())
# d2_feat["x_axis"] = list(s.XAxis().Direction().Coord())
# d2_feat["y_axis"] = list(s.YAxis().Direction().Coord())
# #d2_feat["coefficients"] = list(s.Coefficients())
# d2_feat["max_radius"] = s.MajorRadius()
# d2_feat["min_radius"] = s.MinorRadius()
#
#
# elif s_type == "Bezier":
# print("BEZIER SURF")
#
# elif s_type == "BSpline":
# c = surf.BSpline()
# c.SetUNotPeriodic()
# c.SetVNotPeriodic()
# d2_feat["u_rational"] = c.IsURational()
# d2_feat["v_rational"] = c.IsVRational()
# d2_feat["u_closed"] = c.IsUClosed()
# d2_feat["v_closed"] = c.IsVClosed()
# #d2_feat["u_periodic"] = c.IsUPeriodic()
# #d2_feat["v_periodic"] = c.IsVPeriodic()
# d2_feat["continuity"] = c.Continuity()
# d2_feat["u_degree"] = c.UDegree()
# d2_feat["v_degree"] = c.VDegree()
#
# p = TColgp_Array2OfPnt(1, c.NbUPoles(), 1, c.NbVPoles())
# c.Poles(p)
# points = []
# for pi in range(p.ColLength()):
# elems = []
# for pj in range(p.RowLength()):
# elems.append(list(p.Value(pi+1, pj+1).Coord()))
# points.append(elems)
# d2_feat["poles"] = points
#
# k = TColStd_Array1OfReal(1, c.NbUPoles() + c.UDegree() + 1)
# c.UKnotSequence(k)
# knots = []
# for ki in range(k.Length()):
# knots.append(k.Value(ki+1))
# d2_feat["u_knots"] = knots
#
# k = TColStd_Array1OfReal(1, c.NbVPoles() + c.VDegree() + 1)
# c.VKnotSequence(k)
# knots = []
# for ki in range(k.Length()):
# knots.append(k.Value(ki+1))
# d2_feat["v_knots"] = knots
#
# w = TColStd_Array2OfReal(1, c.NbUPoles(), 1, c.NbVPoles())
# c.Weights(w)
# weights = []
# for wi in range(w.ColLength()):
# elems = []
# for wj in range(w.RowLength()):
# elems.append(w.Value(wi+1, wj+1))
# weights.append(elems)
# d2_feat["weights"] = weights
#
# scale_factor = 1.0
#
# elif s_type == "Revolution":
# s = surf.AxeOfRevolution()
# c = surf.BasisCurve()
# #print(surf, dir(surf), dir(c))
# d1_feat = convert_curve(c)
# d2_feat["location"] = list(s.Location().Coord())
# d2_feat["z_axis"] = list(s.Direction().Coord())
# d2_feat["curve"] = d1_feat
#
# elif s_type == "Extrusion":
# # print(dir(surf.Direction()))
# c = surf.BasisCurve()
# d1_feat = convert_curve(c)
# d2_feat["direction"] = list(surf.Direction().Coord())
# d2_feat["curve"] = d1_feat
#
# else:
# print("Unsupported type", s_type)
return d2_feat
def mesh_model(model, max_size=1e-5, tolerance=1e-7, repair=False, terminal=1):
# In/Output definitions
#fil = model.split("/")[-1][:-5]
#folder = "/".join(model.split("/")[:-1])
scale_factor = 1000.0
verts = []
#norms = []
faces = []
#curvs = []
vert_map = {}
#d1_feats = []
d2_feats = []
#t_curves = []
#norm_map = {}
with fileinput.FileInput(model, inplace=True) as fi:
for line in fi:
print(line.replace("UNCERTAINTY_MEASURE_WITH_UNIT( LENGTH_MEASURE( 1.00000000000000E-06 )",
"UNCERTAINTY_MEASURE_WITH_UNIT( LENGTH_MEASURE( 1.00000000000000E-17 )"), end='')
#stats = {}
# OCC definitions
occ_steps = read_step_file(model)
total_edges = 0
total_surfs = 0
for l in range(len(occ_steps)):
topo = TopologyExplorer(occ_steps[l])
total_edges += len(list(topo.edges()))
total_surfs += len(list(topo.faces()))
# vol = brepgprop_VolumeProperties(occ_steps[l], occ_props, tolerance)
# print(dir(occ_props), dir(occ_props.PrincipalProperties()), dir(occ_props.volume()), occ_props.Mass())
# sur = brepgprop_SurfaceProperties(occ_steps[l], occ_props, tolerance)
# print(vol, "Test", sur)
if (total_surfs > 300):
print("Skipping model {}, too many surfaces: {}".format(os.path.basename(model), total_surfs))
return
#print(total_surfs, "surfaces")
#stats["#parts"] = len(occ_steps)
#stats["model"] = model
#print("Reading step %s with %i parts."%(model,len(occ_steps)))
#tot = 0
#for s in occ_steps:
# occ_topo = TopologyExplorer(s)
# print(s)
# print(len(list(occ_topo.edges())))
# tot += len(list(occ_topo.edges()))
occ_cnt = 0
bbox = get_boundingbox(occ_steps[occ_cnt], use_mesh=True)
diag = np.sqrt(bbox[6]**2+bbox[7]**2+bbox[8]**2)
max_length = diag * max_size#, 9e-06
tolerance = diag * tolerance
#print(fil, diag, max_length, tolerance)
# stats["bbox"] = bbox
# stats["max_length"] = float(max_length)
# stats["tolerance"] = float(tolerance)
# stats["diag"] = float(diag)
occ_topo = TopologyExplorer(occ_steps[occ_cnt])
#occ_top = Topo(occ_steps[occ_cnt])
#occ_props = GProp_GProps()
#occ_brt = BRep_Tool()
# Gmsh definitions
gmsh.initialize()
gmsh.option.setNumber("General.Terminal", terminal)
gmsh.clear()
if (tolerance > 1e6):
print("Ignoring large tolerance:", tolerance)
else:
gmsh.option.setNumber("Geometry.Tolerance", tolerance)
gmsh.option.setNumber("Geometry.OCCFixDegenerated", 0)
gmsh.option.setNumber("Geometry.OCCFixSmallEdges", 0)
gmsh.option.setNumber("Geometry.OCCFixSmallFaces", 0)
gmsh.option.setNumber("Geometry.OCCSewFaces", 0)
#gmsh.option.setNumber("Mesh.MeshSizeMax", max_length)
gmsh.option.setNumber("Mesh.MeshSizeFactor", max_size)
gmsh.option.setNumber("Mesh.AlgorithmSwitchOnFailure", 0) # Fallback to Mesh-Adapt ends hanging up sometimes.
# gmsh.option.setNumber("General.NumThreads", 6)
# gmsh.option.setNumber("Mesh.MaxNumThreads1D",6)
# gmsh.option.setNumber("Mesh.MaxNumThreads2D",6)
gmsh.option.setNumber("Mesh.MeshSizeFromCurvature", 1)
gmsh.option.setNumber("Mesh.MinimumElementsPerTwoPi",8)
gmsh.option.setNumber("General.ExpertMode",1)
gmsh.open(model)
gmsh_edges = gmsh.model.getEntities(1)
gmsh_surfs = gmsh.model.getEntities(2)
gmsh_entities = gmsh.model.getEntities()
# stats["#edges"] = total_edges
# stats["#surfs"] = total_surfs
# stats["volume"] = vol
# stats["surface"] = sur
# stats["curves"] = []
# stats["surfs"] = []
# stats["#points"] = 0
#print("Number of surfaces: %i, Number of curves: %i"%(total_surfs, total_edges))
#print(total_edges, total_surfs, len(gmsh_edges), len(gmsh_surfs))
if not total_edges == len(gmsh_edges):
print("Skipping due to wrong EDGES", model)
return
if not total_surfs == len(gmsh_surfs):
print("Skipping due to wrong SURFS", model)
return
gmsh.model.mesh.generate(2)
#print("Reading curvature")
v_cnt = 1
#v_nodes = []
occ_offset = 0
invalid_model = False
#c_cnt = 0
#v_cont_cnt = 0
#print(len(list(occ_topo.edges())), len(list(occ_topo.solids())), len(list(occ_topo.faces())), len(list(occ_topo.vertices())))
for e in gmsh_entities[:]:
#print(e)
nodeTags, nodeCoords, nodeParams = gmsh.model.mesh.getNodes(e[0], e[1], True)
#elemTypes, elemTags, elemNodeTags = gmsh.model.mesh.getElements(e[0], e[1])
n_id = e[1] - occ_offset
#print(e, occ_offset, n_id)
#print(e, nodeTags, nodeCoords, nodeParams, gmsh.model.getType(e[0], e[1]), elemTypes, elemTags, elemNodeTags)
if e[0] == 0: # Process points
#print(e[1], nodeCoords)
vert_map[e[1]] = v_cnt
verts.append([nodeCoords[0] * 1000.0, nodeCoords[1] * 1000.0, nodeCoords[2] * 1000.0])
v_cnt += 1
#stats["#points"] += 1
#pass
if e[0] == 1: # Process contours
if n_id - 1 == len(list(occ_topo.edges())):
#print("CNT", occ_cnt)
occ_cnt += 1
occ_offset = e[1] - 1
#n_id = 1
occ_topo = TopologyExplorer(occ_steps[occ_cnt])
#occ_top = Topo(occ_steps[occ_cnt])
#print("Defunct curve", n_id, len(list(occ_topo.edges())))
#continue
#print(n_id)
#curve = BRepAdaptor_Curve(list(occ_topo.edges())[n_id-1])
# Add type and parametric nodes/indices
#print("type", edge_map[curve.GetType()])
if gmsh.model.getType(e[0], e[1]) == "Unknown":
#print("Skipping OtherCurve", nodeTags)
continue
for i, n in enumerate(nodeTags):
if n >= v_cnt:
vert_map[n] = v_cnt
verts.append([nodeCoords[i*3] * 1000.0, nodeCoords[i*3+1] * 1000.0, nodeCoords[i*3+2] * 1000.0])
v_cnt += 1
#else:
#print(n, v_cnt)
#print(v_ind, type(v_ind), v_par, type(v_par))
#stats["curves"].append(edge_map[curve.GetType()])
#print(n_id, edge_map[curve.GetType()], gmsh.model.getType(e[0], e[1]))
#print(list(occ_topo.edges()), n_id-1)
#c_type = edge_map[curve.GetType()]#gmsh.model.getType(e[0], e[1])
# if not gmsh.model.getType(e[0], e[1]) == edge_map[curve.GetType()]:
# print("Skipped due to non matching edges ", model, gmsh.model.getType(e[0], e[1]), edge_map[curve.GetType()])
# #invalid_model = True
# #break
#d1_feat = convert_curve(curve)
#edg = list(occ_topo.edges())[n_id-1]
#for f in occ_top.faces_from_edge(edg):
#ee = (e)
#print(dir(ee))
#d1_feat = {}
#su = BRepAdaptor_Surface(f)
#c = BRepAdaptor_Curve2d(edg, f)
#t_curve = {"surface": f, "3dcurve": c_cnt, "2dcurve": convert_2dcurve(c)}
#print(edge_map[c.GetType()], surf_map[su.GetType()], edge_map[curve.GetType()])
#d1f = convert_2dcurve(c)
#print(d1f)
#ccnt += 1
#print(d1_feat)
#t_curves.append(t_curve)
# if len(elemNodeTags) > 0:
# #v_ind = [int(elemNodeTags[0][0]) - 1] # first vertex
# v_ind = [int(nodeTags[-2])-1]
# for no in nodeTags[:-2]:
# v_ind.append(int(no) - 1) # interior vertices
# v_ind.append(int(nodeTags[-1])-1)
# #v_ind.append(int(elemNodeTags[0][-1]) - 1) # last vertex
# #d1_feat["vert_indices"] = v_ind
# #v_par = [float(curve.FirstParameter())] # first param
# v_par = [float(nodeParams[-2]*scale_factor)]
# for no in nodeParams[:-2]:
# v_par.append(float(no*scale_factor)) # interior params
# v_par.append(float(nodeParams[-1]*scale_factor))
# #v_par.append(float(curve.LastParameter())) # last param
# #d1_feat["vert_parameters"] = v_par
# else:
# print("No nodetags", edge_map[curve.GetType()], elemNodeTags)
#print("VERTS", len(d1_feat["vert_indices"]), len(d1_feat["vert_parameters"]))
#d1_feats.append(d1_feat)
#c_cnt += 1
#t_curve = curve.Trim(curve.FirstParameter(), curve.LastParameter(), 0.0001).GetObject()
#print(curve.FirstParameter(), curve.LastParameter())
gmsh_entities = gmsh.model.getEntities(2)
#print("Processing {} surfaces".format(len(gmsh_entities)))
n_cnt = 1
occ_offset = 0
occ_cnt = 0
occ_topo = TopologyExplorer(occ_steps[occ_cnt])
#occ_top = Topo(occ_steps[occ_cnt])
f_cnt = 0
f_sum = 0
#first_face = True
#mean_curv = 0.0
#curv_cnt = 0
#gaus_curv = 0.0
s_cnt = 0
for e in gmsh_entities[:]:
#print(e)
nodeTags, nodeCoords, nodeParams = gmsh.model.mesh.getNodes(e[0], e[1], True)
elemTypes, elemTags, elemNodeTags = gmsh.model.mesh.getElements(e[0], e[1])
n_id = e[1] - occ_offset
#print(e, occ_offset, n_id)
#print(e, nodeTags, nodeCoords, nodeParams, gmsh.model.getType(e[0], e[1]), elemTypes, elemTags, elemNodeTags)
if e[0] == 2:
#print(e, gmsh.model.getType(e[0], e[1]), elemTypes)
if n_id - 1 == len(list(occ_topo.faces())):
#print("CNT", occ_cnt)
occ_cnt += 1
occ_offset = e[1] - 1
n_id = 1
occ_topo = TopologyExplorer(occ_steps[occ_cnt])
#occ_top = Topo(occ_steps[occ_cnt])
# if "getNormals" in dir(gmsh.model):
# nls = gmsh.model.getNormals(e[1], nodeParams)
# else:
# nls = gmsh.model.getNormal(e[1], nodeParams)
#curvMax, curvMin, dirMax, dirMin = gmsh.model.getPrincipalCurvatures(e[1], nodeParams)
#surf = BRepAdaptor_Surface(list(occ_topo.faces())[n_id-1])
norm_map = {}
for i, n in enumerate(nodeTags):
#norms.append([nls[i*3], nls[i*3+1], nls[i*3+2]])
#curvs.append([curvMin[i], curvMax[i], dirMin[i*3], dirMin[i*3+1], dirMin[i*3+2], dirMax[i*3], dirMax[i*3+1], dirMax[i*3+2]])
#curv_cnt += 1
#mean_curv += (curvMin[i] + curvMax[i])/2.0
#gaus_curv += (curvMin[i] * curvMax[i])
norm_map[n] = n_cnt
n_cnt += 1
if n in vert_map.keys():
#v = verts[vert_map[n]-1]
#print("Vert contained", n)
#v_cont_cnt += 1
# assert(v[0] == nodeCoords[i*3] * 1000.0 and v[1] == nodeCoords[i*3+1] * 1000.0 and v[2] == nodeCoords[i*3+2] * 1000.0)
continue
else:
vert_map[n] = v_cnt
#occ_node = surf.Value(nodeParams[i], nodeParams[i+1])
#vertices.append([occ_node.X(), occ_node.Y(), occ_node.Z()])
verts.append([nodeCoords[i*3] * 1000.0, nodeCoords[i*3+1] * 1000.0, nodeCoords[i*3+2] * 1000.0])
#print("S", occ_node.Coord(), [nodeCoords[i*3]*1000, nodeCoords[i*3+1]*1000, nodeCoords[i*3+2]*1000])
#print(occ_node.Coord(), nodeCoords[i*3:(i+1)*3])
v_cnt += 1
d2_faces = []
for i, t in enumerate(elemTypes):
for j in range(len(elemTags[i])):
faces.append([vert_map[elemNodeTags[i][j*3]], vert_map[elemNodeTags[i][j*3+1]], vert_map[elemNodeTags[i][j*3+2]], norm_map[elemNodeTags[i][j*3]], norm_map[elemNodeTags[i][j*3+1]], norm_map[elemNodeTags[i][j*3+2]]])
d2_faces.append(f_cnt)
f_cnt += 1
#print(len(list(occ_topo.faces())), n_id-1)
surf = BRepAdaptor_Surface(list(occ_topo.faces())[n_id-1])
#print("type", edge_map[curve.GetType()])
#if gmsh.model.getType(e[0], e[1]) == "Unknown":
# print("Skipping OtherCurve", nodeTags)
# continue
#print(surf)
g_type = gmsh_map[gmsh.model.getType(e[0], e[1])]
if g_type != "Other" and not g_type == surf_map[surf.GetType()]:
print("Skipped due to non matching surfaces ", model, g_type, surf_map[surf.GetType()])
return
#invalid_model = True
#break
#stats["surfs"].append(surf_map[surf.GetType()])
d2_feat = convert_surface(surf)
d2_feat["face_indices"] = d2_faces
# for tc in t_curves:
# if tc["surface"] == list(occ_topo.faces())[n_id-1]:
# tc["surface"] = s_cnt
# if len(elemNodeTags) > 0:
# #print(len(elemNodeTags[0]), len(nodeTags), len(nodeParams))
# v_ind = []#int(elemNodeTags[0][0])] # first vertex
# for no in nodeTags:
# v_ind.append(int(no) - 1) # interior vertices
# #v_ind.append(int(elemNodeTags[0][-1])) # last vertex
# d2_feat["vert_indices"] = v_ind
# v_par = []#float(surf.FirstParameter())] # first param
# for io in range(int(len(nodeParams)/2)):
# v_par.append([float(nodeParams[io*2]*scale_factor), float(nodeParams[io*2+1]*scale_factor)]) # interior params
# #v_par.append(float(surf.LastParameter())) # last param
# d2_feat["vert_parameters"] = v_par
# else:
# print("No nodetags", edge_map[surf.GetType()], elemNodeTags)
f_sum += len(d2_feat["face_indices"])
d2_feats.append(d2_feat)
s_cnt += 1
if invalid_model:
return
#stats["#sharp"] = 0
#stats["gaus_curv"] = float(gaus_curv / curv_cnt)
#stats["mean_curv"] = float(mean_curv / curv_cnt)
if not f_sum == len(faces):
print("Skipping due to wrong FACES", model)
return
# sharp flags not needed
# if True:
# vert2norm = {}
# for f in faces:
# #print(f)
# for fii in range(3):
# if f[fii] in vert2norm:
# vert2norm[f[fii]].append(f[fii+3])
# else:
# vert2norm[f[fii]] = [f[fii+3]]
# for d1f in d1_feats:
# sharp = True
# for vi in d1f["vert_indices"][1:-1]:
# #print(vi, vert2norm.keys())
# nos = list(set(vert2norm[vi + 1]))
# if len(nos) == 2:
# n0 = np.array(norms[nos[0]])
# n1 = np.array(norms[nos[1]])
# #print(np.linalg.norm(n0), np.linalg.norm(n1))
# if np.abs(n0.dot(n1)) > 0.95:
# sharp = False
# #break
# else:
# sharp = False
# if sharp:
# stats["#sharp"] += 1
# d1f["sharp"] = sharp
#stats["#verts"] = len(verts)
#stats["#faces"] = len(faces)
#stats["#norms"] = len(norms)
#with open("results/" + file + ".json", "w") as fil:
# json.dump(d1_feats, fil, sort_keys=True, indent=2)
#with open("results/" + file + "_faces.json", "w") as fil:
# json.dump(d2_feats, fil, sort_keys=True, indent=2)
features = {"surfaces": d2_feats}
if True:
# res_path = folder.replace("/step/", "/feat/")
# fip = fil.replace("_step_", "_features_")
# print("%s/%s.yml"%(res_path, fip))
# with open("%s/%s.yml"%(res_path, fip), "w") as fili:
# yaml.dump(features, fili, indent=2)
# res_path = folder.replace("/step/", "/stat/")
# fip = fil.replace("_step_", "_stats_")
# with open("%s/%s.yml"%(res_path, fip), "w") as fili:
# yaml.dump(stats, fili, indent=2)
# print("Generated model with %i vertices and %i faces." %(len(verts), len(faces)))
# res_path = folder.replace("/step/", "/obj/")
# fip = fil.replace("_step_", "_trimesh_")
# with open("%s/%s.obj"%(res_path, fip), "w") as fili:
# for v in verts:
# fili.write("v %f %f %f\n"%(v[0], v[1], v[2]))
# for vn in norms:
# #print(np.linalg.norm(vn))
# fili.write("vn %f %f %f\n"%(vn[0], vn[1], vn[2]))
# for vn in curvs:
# fili.write("vc %f %f %f %f %f %f %f %f\n"%(vn[0], vn[1], vn[2], vn[3], vn[4], vn[5], vn[6], vn[7]))
# for f in faces:
# fili.write("f %i//%i %i//%i %i//%i\n"%(f[0], f[3], f[1], f[4], f[2], f[5]))
faces = np.array(faces)
face_indices = faces[:, :3] - 1
#norm_indices = faces[:, 3:] - 1
gmsh.clear()
gmsh.finalize()
#print(curvs)
return {"features": features, "vertices": np.array(verts), "faces": faces, "face_indices": face_indices}
|
import numpy as np
def get_unit_drift_rate(raw_voltage_backend,
fftlength,
int_factor=1):
"""
Calculate drift rate corresponding to a 1x1 pixel shift in the final data product.
This is equivalent to dividing the fine channelized frequency resolution with the
time resolution.
Parameters
----------
raw_voltage_backend : RawVoltageBackend
Backend object to infer observation parameters
fftlength : int
FFT length to be used in fine channelization
int_factor : int, optional
Integration factor to be used in fine channelization
Returns
-------
unit_drift_rate : float
Drift rate in Hz / s
"""
df = raw_voltage_backend.chan_bw / fftlength
dt = raw_voltage_backend.tbin * fftlength * int_factor
return df / dt
def get_level(snr,
raw_voltage_backend,
fftlength,
obs_length=None,
num_blocks=None,
length_mode='obs_length',):
"""
Calculate required signal level as a function of desired SNR, assuming initial noise
variance of 1. This is calculated for a single polarization. This further assumes the signal
is non-drifting and centered on a finely channelized bin.
Parameters
----------
snr : float
Signal-to-noise ratio (SNR)
raw_voltage_backend : RawVoltageBackend
Backend object to infer observation parameters
fftlength : int, optional
FFT length to be used in fine channelization
obs_length : float, optional
Length of observation in seconds, if in `obs_length` mode
num_blocks : int, optional
Number of data blocks to record, if in `num_blocks` mode
length_mode : str, optional
Mode for specifying length of observation, either `obs_length` in seconds or `num_blocks` in data blocks
Returns
-------
level : float
Level, or amplitude, for a real voltage cosine signal
"""
if length_mode == 'obs_length':
if obs_length is None:
raise ValueError("Value not given for 'obs_length'.")
num_blocks = raw_voltage_backend.get_num_blocks(obs_length)
elif length_mode == 'num_blocks':
if num_blocks is None:
raise ValueError("Value not given for 'num_blocks'.")
pass
else:
raise ValueError("Invalid option given for 'length_mode'.")
# Get amplitude required for cosine signal to get required SNR
int_factor = 1 # level has no dependence on integration factor
dt = raw_voltage_backend.tbin * fftlength * int_factor
tchans = int(raw_voltage_backend.time_per_block * num_blocks / dt)
chi_df = 2 * raw_voltage_backend.num_pols * int_factor
# main_mean = (raw_voltage_backend.requantizer.target_sigma)**2 * chi_df * raw_voltage_backend.filterbank.max_mean_ratio
I_per_SNR = np.sqrt(2 / chi_df) / tchans**0.5
signal_level = 1 / (raw_voltage_backend.num_branches * fftlength / 4)**0.5 * (snr * I_per_SNR)**0.5
return signal_level
def get_leakage_factor(f_start,
raw_voltage_backend,
fftlength):
"""
Get factor to scale up signal amplitude from spectral leakage based on the
position of a signal in a fine channel. This calculates an inverse normalized
sinc value based on the position of the signal with respect to finely channelized bins.
Since intensity goes as voltage squared, this gives a scaling proportional to 1/sinc^2
in finely channelized data products; this is the standard fine channel spectral response.
Parameters
----------
f_start : float
Signal frequency, in Hz
raw_voltage_backend : RawVoltageBackend
Backend object to infer observation parameters
fftlength : int, optional
FFT length to be used in fine channelization
Returns
-------
leakage_factor : float
Factor to multiply to signal level / amplitude
"""
spectral_bin_frac = np.modf((f_start - raw_voltage_backend.fch1) / (raw_voltage_backend.chan_bw / fftlength))[0]
spectral_bin_frac = np.min([spectral_bin_frac, 1 - spectral_bin_frac])
return 1 / np.sinc(spectral_bin_frac)
def get_total_obs_num_samples(obs_length=None,
num_blocks=None,
length_mode='obs_length',
num_antennas=1,
sample_rate=3e9,
block_size=134217728,
num_bits=8,
num_pols=2,
num_branches=1024,
num_chans=64):
"""
Calculate number of required real voltage time samples for as given `obs_length` or `num_blocks`, without directly
using a `RawVoltageBackend` object.
Parameters
----------
obs_length : float, optional
Length of observation in seconds, if in `obs_length` mode
num_blocks : int, optional
Number of data blocks to record, if in `num_blocks` mode
length_mode : str, optional
Mode for specifying length of observation, either `obs_length` in seconds or `num_blocks` in data blocks
num_antennas : int
Number of antennas
sample_rate : float
Sample rate in Hz
block_size : int
Block size used in recording GUPPI RAW files
num_bits : int
Number of bits in requantized data (for saving into file). Can be 8 or 4.
num_pols : int
Number of polarizations recorded
num_branches : int
Number of branches in polyphase filterbank
num_chans : int
Number of coarse channels written to file
Returns
-------
num_samples : int
Number of samples
"""
tbin = num_branches / sample_rate
chan_bw = 1 / tbin
bytes_per_sample = 2 * num_pols * num_bits / 8
if length_mode == 'obs_length':
if obs_length is None:
raise ValueError("Value not given for 'obs_length'.")
num_blocks = int(obs_length * chan_bw * num_antennas * num_chans * bytes_per_sample / block_size)
elif length_mode == 'num_blocks':
if num_blocks is None:
raise ValueError("Value not given for 'num_blocks'.")
pass
else:
raise ValueError("Invalid option given for 'length_mode'.")
return num_blocks * int(block_size / (num_antennas * num_chans * bytes_per_sample)) * num_branches
|
import numpy as np
import torch
from hbconfig import Config
from sklearn.datasets import make_moons
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import datasets, transforms
from AutoAugment.autoaugment import ImageNetPolicy
from miniimagenet_loader import read_dataset
def get_loader(mode):
"""Builds and returns Dataloader for MNIST and SVHN dataset."""
global train_loader, valid_loader
config = Config
transform_list_train = []
transform_list_test = []
is_train = mode == "train"
if config.train.use_augmentation:
transform_list_train.extend([transforms.Resize((config.data.image_size, config.data.image_size)), ImageNetPolicy()])
transform_list_train.extend([transforms.Resize((config.data.image_size, config.data.image_size)), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transform_train = transforms.Compose(transform_list_train)
if config.predict.use_augmentation:
transform_list_test.extend([transforms.Resize((config.data.image_size, config.data.image_size)), ImageNetPolicy()])
transform_list_test.extend([transforms.Resize((config.data.image_size, config.data.image_size)), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transform_test = transforms.Compose(transform_list_test)
if config.model.dataset == "mnist":
mnist = datasets.MNIST(root=config.data.mnist_path, download=True, transform=transform_train, train=is_train)
# train-validation split
train_mnist, valid_mnist = train_valid_split(mnist)
train_loader = DataLoader(dataset=train_mnist, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
valid_loader = DataLoader(dataset=valid_mnist, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
if config.model.dataset == "svhn":
svhn = datasets.SVHN(root=config.data.svhn_path, download=True, transform=transform_train, split=mode)
train_svhn, valid_svhn = train_valid_split(svhn)
train_loader = DataLoader(dataset=train_svhn, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
valid_loader = DataLoader(dataset=valid_svhn, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
if config.model.dataset == "cifar10":
cifar10 = datasets.CIFAR10(root=config.data.cifar10_path, download=True, transform=transform_train, train=is_train)
train_cifar, valid_cifar = train_valid_split(cifar10)
train_loader = DataLoader(dataset=train_cifar, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
valid_loader = DataLoader(dataset=valid_cifar, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
if config.model.dataset == "moons":
train_moons, valid_moons = train_valid_split(moons_dataset())
train_loader = DataLoader(dataset=train_moons, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
valid_loader = DataLoader(dataset=valid_moons, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
if config.model.dataset == "miniimagenet":
train_loader, valid_loader = read_dataset(Config.data.miniimagenet_path, transform_train, transform_test) # transform_train(train_loader)
if config.model.dataset == "miniimagenet_all":
train_loader = datasets.ImageFolder(root=config.data.miniimagenet_path_train, transform=transform_train)
valid_loader = datasets.ImageFolder(root=config.data.miniimagenet_path_valid, transform=transform_list_test)
# # test_imagenet = datasets.ImageFolder(root=config.data.miniimagenet_path_test)
train_loader = DataLoader(dataset=train_loader, batch_size=Config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
valid_loader = DataLoader(dataset=valid_loader, batch_size=Config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
# # test_loader = DataLoader(dataset=test_imagenet, batch_size=config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
if config.model.dataset == "miniimagenet_concat":
concat_dataset = ConcatDataset([datasets.ImageFolder(config.data.miniimagenet_path_train, transform=transform_train),
datasets.ImageFolder(config.data.miniimagenet_path_valid, transform=transform_train)])
train_, valid_ = train_valid_split(concat_dataset)
train_loader = torch.utils.data.DataLoader(train_, batch_size=config.train.batch_size, shuffle=True, num_workers=config.data.num_workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_, batch_size=config.train.batch_size, shuffle=True, num_workers=config.data.num_workers, pin_memory=True)
if config.model.dataset == "imagenet":
train_loader = datasets.ImageFolder(root=config.data.imagenet_path_train, transform=transform_train)
valid_loader = datasets.ImageFolder(root=config.data.imagenet_path_val, transform=transform_list_test)
# # test_imagenet = datasets.ImageFolder(root=config.data.miniimagenet_path_test)
train_loader = DataLoader(dataset=train_loader, batch_size=Config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
valid_loader = DataLoader(dataset=valid_loader, batch_size=Config.train.batch_size, shuffle=config.train.shuffle, num_workers=config.data.num_workers)
return train_loader, valid_loader
"""
Create train, valid, test iterators for CIFAR-10 [1].
Easily extended to MNIST, CIFAR-100 and Imagenet.
[1]: https://discuss.pytorch.org/t/feedback-on-pytorch-for-kaggle-competitions/2252/4
"""
class GenHelper(Dataset):
def __init__(self, mother, length, mapping):
# here is a mapping from this index to the mother ds index
self.mapping = mapping
self.length = length
self.mother = mother
def __getitem__(self, index):
return self.mother[self.mapping[index]]
def __len__(self):
return self.length
def train_valid_split(dataset, split_fold=10, random_seed=None):
'''
This is a pytorch generic function that takes a data.Dataset object and splits it to validation and training
efficiently.
:return:
'''
if random_seed != None:
np.random.seed(random_seed)
dslen = len(dataset)
indices = list(range(dslen))
valid_size = dslen // split_fold
np.random.shuffle(indices)
train_mapping = indices[valid_size:]
valid_mapping = indices[:valid_size]
train = GenHelper(dataset, dslen - valid_size, train_mapping)
valid = GenHelper(dataset, valid_size, valid_mapping)
return train, valid
def to_categorical(y, num_classes):
"""1-hot encodes a tensor"""
return np.eye(num_classes, dtype='uint8')[y]
class PrepareData(Dataset):
def __init__(self, X, y):
if not torch.is_tensor(X):
self.X = torch.from_numpy(X)
if not torch.is_tensor(y):
self.y = torch.from_numpy(y)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
def moons_dataset():
X, y = make_moons(n_samples=1000, noise=.1)
y = to_categorical(y, 2)
ds = PrepareData(X=X, y=y)
return ds # ds = DataLoader(ds, batch_size=50, shuffle=True)
|
import requests
from datetime import datetime, timedelta
import apiKey
import json
keyMash = apiKey.apiMashape()
def trovaGiornata():
ri = requests.get("https://sportsop-soccer-sports-open-data-v1.p.mashape.com/v1/leagues/serie-a/seasons/16-17/rounds", headers={"X-Mashape-Key": keyMash, "Accept": "application/json"})
ri = ri.json()
giorn=1
dc = datetime.now()
dc = datetime(dc.year, dc.month, dc.day)
for i in ri['data']['rounds']:
df = i['end_date']
df = datetime(int(df[:4]), int(df[5:7]), int(df[8:10]))
if dc<=df:
break
else:
giorn+=1
return giorn
def partiteGior(gior):
giorS = 'giornata-'+str(gior)
ri = requests.get(("https://sportsop-soccer-sports-open-data-v1.p.mashape.com/v1/leagues/serie-a/seasons/16-17/rounds/"+giorS+"/matches"), headers={"X-Mashape-Key": keyMash, "Accept": "application/json"})
ri = ri.json()
d=''
for i in ri['data']['matches']:
squad1=i['home']['team']
squad2=i['away']['team']
risultato=i['match_result']
r=squad1+'-'+squad2+' '+risultato+'\n'
d+=r
mes = 'Ecco le partite della '+str(gior)+'a giornata'
ret=[mes, d]
return ret
def partiteOggiDom(gior, st):
giorS = 'giornata-'+str(gior)
ri = requests.get(("https://sportsop-soccer-sports-open-data-v1.p.mashape.com/v1/leagues/serie-a/seasons/16-17/rounds/"+giorS), headers={"X-Mashape-Key": keyMash, "Accept": "application/json"})
ri = ri.json()
now = datetime.now()
if st == 'oggi':
dt = datetime(now.year, now.month, now.day)
else:
dt = datetime(now.year, now.month, now.day)+timedelta(days=1)
el = ''
for i in ri['data']['rounds'][0]['matches']:
dp = i['date_match']
dg = datetime(int(dp[:4]), int(dp[5:7]), int(dp[8:10]))
if dg == dt:
squad1=i['home_team']
squad2=i['away_team']
ora=i['date_match'][11:16]
part = (ora+' '+squad1+'-'+squad2+' '+'\n')
el = el+part
return el
def classifica():
ri = requests.get("https://sportsop-soccer-sports-open-data-v1.p.mashape.com/v1/leagues/serie-a/seasons/16-17/standings", headers={"X-Mashape-Key":keyMash, "Accept": "application/json"})
ri = ri.json()
cl=''
for i in ri['data']['standings']:
pos=str(i['position'])
point=str(i['overall']['points'])
team=(i['team'])
v=str(i['overall']['wins'])
p=str(i['overall']['draws'])
per=str(i['overall']['losts'])
lin=(pos+'. '+team+' ('+point+')(V:'+v+',P:'+per+',Pa:'+p+')\n')
cl=cl+lin
return cl
def live(giorn, num):
cont = num
mes = 'Ecco i risultati live delle partite: '
if cont < 20:
ri = requests.get("https://heisenbug-seriea-essentials-v1.p.mashape.com/api/live/seriea?gamenumber="+str(giorn), headers={"X-Mashape-Key":keyMash, "Accept": "application/json"})
d = ri.json()
for i in d['matches']:
gt1=''; gt2=''; st1=''; st2=''
if 'teamScore' in i['team1']:
st1 = str(i['team1']['teamScore'])
if 'goals' in i['team1']:
for g in i['team1']['goals']:
if gt1 != '':
gt1+='\n'+g['minute']+' '+g['player']
else:
gt1=g['minute']+' '+g['player']
if 'teamScore' in i['team2']:
st2 = str(i['team2']['teamScore'])
if 'goals' in i['team2']:
for g in i['team2']['goals']:
if gt2 != '':
gt2+='\n'+g['minute']+' '+g['player']
else:
gt2=g['minute']+' '+g['player']
if st1 == '':
continue
else:
mes+='\n<b>'+i['team1']['teamName']+' '+st1+' - '+st2+' '+i['team2']['teamName']+'</b>'
if gt1 != '':mes+='\n'+gt1
if gt2 != '':mes+='\n'+gt2
if mes == 'Ecco i risultati live delle partite: ':
mes = 'Le partite della prossima giornata non sono ancora iniziate'
else:
mes='Numero massimo richieste gionaliere raggiunto\nContatta @infopz per maggiori infomazioni'
return mes
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from utils.collections import AttrDict
import six
import yaml
import torch
import torch.nn as nn
from torch.nn import init
import numpy as np
import copy
from ast import literal_eval
__C = AttrDict()
cfg = __C
__C.MODEL = AttrDict()
__C.MODEL.NUM_CLASSES = -1
__C.MODEL.TYPE = ''
__C.MODEL.SIZE = '300'
__C.MODEL.CONV_BODY = ''
__C.MODEL.REFINE = False
__C.MODEL.LOAD_PRETRAINED_WEIGHTS = False
__C.MODEL.PRETRAIN_WEIGHTS = ''
__C.MODEL.OBJECT_SCORE = 0.01
__C.TRAIN = AttrDict()
__C.TRAIN.OVERLAP = 0.5
__C.TRAIN.OHEM = True
__C.TRAIN.NEG_RATIO = 3
__C.TRAIN.FOCAL_LOSS = False
__C.TRAIN.FOCAL_LOSS_TYPE = 'SOFTMAX'
__C.TRAIN.BGR_MEAN = [104, 117, 123]
__C.TRAIN.BATCH_SIZE = 1
__C.TRAIN.CHANNEL_SIZE = '48'
__C.TRAIN.WARMUP = True
__C.TRAIN.WARMUP_EPOCH = 2
__C.TRAIN.DEVICE_IDS = [0]
__C.TRAIN.TRAIN_ON = True
__C.SMALL = AttrDict()
__C.SMALL.FEATURE_MAPS = [[38, 38], [19, 19], [10, 10], [5, 5], [3, 3], [1, 1]]
__C.SMALL.ARM_CHANNELS = [512, 1024, 512, 256, 256, 256]
__C.SMALL.ODM_CHANNELS = [256, 256, 256, 256]
__C.SMALL.NUM_ANCHORS = [4, 6, 6, 6, 4, 4]
__C.SMALL.STEPS = [[8, 8], [16, 16], [32, 32], [64, 64], [100, 100],
[300, 300]]
__C.SMALL.MIN_SIZES = [30, 60, 111, 162, 213, 264]
__C.SMALL.MAX_SIZES = [60, 111, 162, 213, 264, 315]
__C.SMALL.ASPECT_RATIOS = [[2, 0.5], [2, 3, 0.5, 0.333], [2, 3, 0.5, 0.333],
[2, 3, 0.5, 0.333], [2, 0.5], [2, 0.5]]
__C.SMALL.VARIANCE = [0.1, 0.2]
__C.SMALL.CLIP = True
__C.SMALL.IMG_WH = [300, 300]
__C.SMALL.INPUT_FIXED = True
__C.SMALL.USE_MAX_SIZE = True
__C.BIG = AttrDict()
__C.BIG.FEATURE_MAPS = [[64, 64], [32, 32], [16, 16], [8, 8], [4, 4], [2, 2],
[1, 1]]
__C.BIG.ARM_CHANNELS = [512, 1024, 512, 256, 256, 256, 256]
__C.BIG.ODM_CHANNELS = [256, 256, 256, 256]
__C.BIG.NUM_ANCHORS = [4, 6, 6, 6, 6, 4, 4]
__C.BIG.STEPS = [[8, 8], [16, 16], [32, 32], [64, 64], [128, 128], [256, 256],
[512, 512]]
__C.BIG.MIN_SIZES = [35.84, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8]
__C.BIG.MAX_SIZES = [76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6]
__C.BIG.ASPECT_RATIOS = [[2, 0.5], [2, 3, 0.5, 0.333], [2, 3, 0.5, 0.333],
[2, 3, 0.5, 0.333], [2, 3, 0.5, 0.333], [2, 0.5],
[2, 0.5]]
__C.BIG.VARIANCE = [0.1, 0.2]
__C.BIG.CLIP = True
__C.BIG.IMG_WH = [512, 512]
__C.BIG.INPUT_FIXED = True
__C.BIG.USE_MAX_SIZE = True
__C.SOLVER = AttrDict()
__C.SOLVER.WEIGHT_DECAY = 0.0005
__C.SOLVER.BASE_LR = 0.001
__C.SOLVER.GAMMA = 0.1
__C.SOLVER.MOMENTUM = 0.9
__C.SOLVER.EPOCH_STEPS = []
__C.SOLVER.END_EPOCH = 1
__C.SOLVER.START_EPOCH = 0
__C.DATASETS = AttrDict()
VOCROOT = 'data/datasets/VOCdevkit0712/'
COCOROOT = 'data/datasets/coco2015'
__C.DATASETS.TRAIN_TYPE = []
__C.DATASETS.VAL_TYPE = []
__C.DATASETS.DATAROOT = VOCROOT
__C.DATASETS.DATA_TYPE = ''
__C.DATASETS.SETS = AttrDict()
__C.DATASETS.SETS.VOC = [['0712', '0712_trainval']]
__C.DATASETS.SETS.VOC0712PLUS = [['0712', '0712_trainval_test']]
__C.DATASETS.SETS.VOC0712 = [['2012', '2012_trainval']]
__C.DATASETS.SETS.VOC2007 = [['0712', "2007_test"]]
__C.DATASETS.SETS.COCO = [['2014', 'train'], ['2014', 'valminusminival']]
__C.DATASETS.SETS.COCOval = [['2014', 'minival']]
__C.DATASETS.SETS.VOCROOT = VOCROOT
__C.DATASETS.SETS.COCOROOT = COCOROOT
__C.TEST = AttrDict()
__C.TEST.INPUT_WH = [300, 300]
__C.TEST.CONFIDENCE_THRESH = 0.01
__C.TEST.NMS_TYPE = 'NMS'
__C.TEST.NMS_OVERLAP = 0.45
__C.TEST.BATCH_SIZE = 16
VOC_CLASSES = (
'__background__', # always index 0
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor')
COCO_CLASSES = ('__background__', 'person', 'bicycle', 'car', 'motorbike',
'aeroplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'sofa', 'pottedplant', 'bed',
'diningtable', 'toilet', 'tvmonitor', 'laptop', 'mouse',
'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush')
def merge_cfg_from_file(cfg_filename):
"""Load a yaml config file and merge it into the global config."""
with open(cfg_filename, 'r') as f:
yaml_cfg = AttrDict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
cfg_from_file = merge_cfg_from_file
def merge_cfg_from_cfg(cfg_other):
"""Merge `cfg_other` into the global config."""
_merge_a_into_b(cfg_other, __C)
def _merge_a_into_b(a, b, stack=None):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict'
assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict'
for k, v_ in a.items():
full_key = '.'.join(stack) + '.' + k if stack is not None else k
# a must specify keys that are in b
if k not in b:
raise KeyError('Non-existent config key: {}'.format(full_key))
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, AttrDict):
try:
stack_push = [k] if stack is None else stack + [k]
_merge_a_into_b(v, b[k], stack=stack_push)
except BaseException:
raise
else:
b[k] = v
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to AttrDict objects
if isinstance(v, dict):
return AttrDict(v)
# All remaining processing is only applied to strings
if not isinstance(v, six.string_types):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
"""Checks that `value_a`, which is intended to replace `value_b` is of the
right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
# The types must match (with some exceptions)
type_b = type(value_b)
type_a = type(value_a)
if type_a is type_b:
return value_a
# Exceptions: numpy arrays, strings, tuple<->list
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, six.string_types):
value_a = str(value_a)
elif isinstance(value_a, tuple) and isinstance(value_b, list):
value_a = list(value_a)
elif isinstance(value_a, list) and isinstance(value_b, tuple):
value_a = tuple(value_a)
else:
raise ValueError(
'Type mismatch ({} vs. {}) with values ({} vs. {}) for config '
'key: {}'.format(type_b, type_a, value_b, value_a, full_key))
return value_a |
from datetime import datetime, timedelta
import dateutil
import time
import prometheus_client as pc
from sqlalchemy import asc, desc
from flask import (
render_template,
flash,
redirect,
url_for,
request,
g,
jsonify,
current_app,
Response,
)
from app import db, documents
from app.models import (
Provider,
Circuit,
Maintenance,
MaintCircuit,
ApschedulerJobs,
MaintUpdate,
)
from app.main import bp
from app.main.forms import AddCircuitForm, AddCircuitContract, EditCircuitForm
from app.jobs.main import process, failed_messages
# @todo: expose some interesting metrics
MAIN_REQUESTS = pc.Counter(
'main_page_requests_total', 'total requests for the / route.'
)
@bp.route('/metrics')
def metrics():
return Response(
pc.generate_latest(), mimetype='text/plain; version=0.0.4; charset=utf-8'
)
@bp.route('/', methods=['GET', 'POST'])
def main():
MAIN_REQUESTS.inc()
next_run = db.session.query(ApschedulerJobs).filter_by(id='run_loop').first()
if next_run:
next_run = datetime.utcfromtimestamp(next_run.next_run_time)
if request.method == 'POST':
current_app.apscheduler.add_job(
id='run_now', replace_existing=True, func=process
)
flash('emails are currently being processed')
return redirect(url_for('main.main'))
page = request.args.get('page', 1, type=int)
now = datetime.now().date()
last_week = now - timedelta(days=7)
maintenances = (
MaintCircuit.query.filter(MaintCircuit.date > now)
.order_by(asc(MaintCircuit.date))
.paginate(page, current_app.config['POSTS_PER_PAGE'], False)
)
recent = (
MaintCircuit.query.filter(MaintCircuit.date <= now)
.filter(MaintCircuit.date > last_week)
.order_by(desc(MaintCircuit.date))
.all()
)
next_url = (
url_for('main.main', page=maintenances.next_num)
if maintenances.has_next
else None
)
prev_url = (
url_for('main.main', page=maintenances.prev_num)
if maintenances.has_prev
else None
)
return render_template(
'main.html',
title='main',
upcoming=maintenances.items,
recent=recent,
prev_url=prev_url,
next_url=next_url,
next_run=next_run,
)
@bp.route('/maintenances', methods=['GET', 'POST'])
def maintenances():
page = request.args.get('page', 1, type=int)
maintenances = Maintenance.query.paginate(
page, current_app.config['POSTS_PER_PAGE'], False
)
next_url = (
url_for('main.maintenances', page=maintenances.next_num)
if maintenances.has_next
else None
)
prev_url = (
url_for('main.maintenances', page=maintenances.prev_num)
if maintenances.has_prev
else None
)
return render_template(
'maintenances.html',
title='main',
maintenances=maintenances.items,
prev_url=prev_url,
next_url=next_url,
)
@bp.route('/providers', methods=['GET', 'POST'])
def providers():
page = request.args.get('page', 1, type=int)
providers = Provider.query.paginate(
page, current_app.config['POSTS_PER_PAGE'], False
)
next_url = (
url_for('main.providers', page=providers.next_num)
if providers.has_next
else None
)
prev_url = (
url_for('main.providers', page=providers.prev_num)
if providers.has_prev
else None
)
return render_template(
'providers.html',
title='providers',
providers=providers.items,
next_url=next_url,
prev_url=prev_url,
)
@bp.route('/circuits', methods=['GET', 'POST'])
def circuits():
providers = Provider.query.all()
choices = [(p.id, p.name) for p in providers]
form = AddCircuitForm()
form.provider.choices = choices
if form.validate_on_submit():
filename = documents.save(request.files['circuit_contract'])
circuit = Circuit(
provider_cid=form.provider_cid.data,
a_side=form.a_side.data,
z_side=form.z_side.data,
provider_id=form.provider.data,
contract_filename=filename,
)
db.session.add(circuit)
db.session.commit()
flash('circuit added successfully!')
return redirect(url_for('main.circuits'))
page = request.args.get('page', 1, type=int)
circuits = Circuit.query.paginate(page, current_app.config['POSTS_PER_PAGE'], False)
next_url = (
url_for('main.circuits', page=circuits.next_num) if circuits.has_next else None
)
prev_url = (
url_for('main.circuits', page=circuits.prev_num) if circuits.has_prev else None
)
return render_template(
'circuits.html',
title='circuits',
form=form,
circuits=circuits.items,
next_url=next_url,
prev_url=prev_url,
)
@bp.route('/maintenances/<maintenance_id>', methods=['GET', 'POST'])
def maintenance_detail(maintenance_id):
maintenance = Maintenance.query.filter_by(id=maintenance_id).first_or_404()
updates = MaintUpdate.query.filter_by(maintenance_id=maintenance.id).all()
page = request.args.get('page', 1, type=int)
circuits = MaintCircuit.query.filter_by(maint_id=maintenance_id).all()
return render_template(
'maintenance_detail.html',
title='Maintenance Info',
circuits=circuits,
maintenance=maintenance,
updates=updates,
)
@bp.route('/circuits/<circuit_id>', methods=['GET', 'POST'])
def circuit_detail(circuit_id):
circuit = Circuit.query.filter_by(id=circuit_id).first_or_404()
providers = Provider.query.all()
choices = [(p.id, p.name) for p in providers]
form = EditCircuitForm()
form.provider.choices = choices
if form.validate_on_submit():
if request.files.get('circuit_contract'):
filename = documents.save(request.files['circuit_contract'])
circuit.contract_filename = filename
circuit.a_side = form.a_side.data
circuit.z_side = form.z_side.data
circuit.provider_id = form.provider.data
db.session.add(circuit)
db.session.commit()
flash('circuit updated successfully!')
return redirect(url_for('main.circuit_detail', circuit_id=circuit_id))
page = request.args.get('page', 1, type=int)
maints = (
MaintCircuit.query.filter_by(circuit_id=circuit_id)
.order_by(desc(MaintCircuit.date))
.paginate(page, current_app.config['POSTS_PER_PAGE'], False)
)
next_url = (
url_for('main.circuit_detail', page=maints.next_num, circuit_id=circuit_id)
if maints.has_next
else None
)
prev_url = (
url_for('main.circuit_detail', page=maints.prev_num, circuit_id=circuit_id)
if maints.has_prev
else None
)
return render_template(
'circuit_detail.html',
circuit=circuit,
maints=maints.items,
next_url=next_url,
prev_url=prev_url,
form=form,
)
@bp.route('/providers/<provider_id>')
def provider_detail(provider_id):
page = request.args.get('page', 1, type=int)
provider = Provider.query.filter_by(id=provider_id).first_or_404()
all_circuits = Circuit.query.filter_by(provider_id=provider_id).all()
circuits = Circuit.query.filter_by(provider_id=provider_id).paginate(
page, current_app.config['POSTS_PER_PAGE'], False
)
next_url = (
url_for('main.provider_detail', page=circuits.next_num, provider_id=provider_id)
if circuits.has_next
else None
)
prev_url = (
url_for('main.provider_detail', page=circuits.prev_num, provider_id=provider_id)
if circuits.has_prev
else None
)
return render_template(
'provider_detail.html',
circuits=circuits,
provider=provider,
next_url=next_url,
prev_url=prev_url,
all_circuits=all_circuits,
)
@bp.route('/failed')
def failed():
return render_template('failed.html')
@bp.route('/failedmessages')
def failedmessages():
return failed_messages()
|
#!/usr/bin/env python
# coding: utf-8
import simpy
import datetime
import pandas as pd
import logging
from enum import Enum
import random
from itertools import repeat
from ruamel.yaml import YAML
from datetime import timedelta
log_filename = "logs-10.log"
mainLogger = logging.getLogger()
fhandler = logging.FileHandler(filename=log_filename, mode='w')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
mainLogger.addHandler(fhandler)
mainLogger.setLevel(logging.DEBUG)
mainLogger.debug("test")
class Metric(Enum):
RW = "Requests Waiting"
BS = "Busy Slots"
AU = "Active Users"
class User:
def __init__(self, id, scenario, world):
self.id = id
self.scenario = scenario
self._world = world
self.taskid = 0
self.create()
# Start the run process everytime an instance is created.
# create itself as a processs
self.action = self._world.env.process(self.run())
def create(self):
self.enteringAt = self._world.env.now
self.name = "User-%03d" % self.id
mainLogger.info(f"user created {self.name}")
self._world.user_monitor.report_new_user(self)
def run_old(self):
while True:
self.taskid += 1
for task in self.scenario.tasks:
taskname = task['Name']
task_duration = task['Duration']
mark = self._world.env.now
mainLogger.debug(f"{self.name} starts task {taskname} at %d" % mark)
if 'Res' in task:
self._world.user_monitor.report_start(
self.name,
self.scenario,
taskname,
self.taskid)
# We yield the process that process() returns
# to wait for it to finish
amount = task['Res']
yield self._world.env.process(self.process_task(task_duration, amount))
self._world.user_monitor.report_stop(
self.name,
self.scenario,
taskname,
self.taskid)
else:
# wait some time even if no tracked
yield self._world.env.timeout(task_duration)
mainLogger.debug(f"{self.name} ends task {taskname} at %d" % mark)
def run(self):
scenario = self.scenario
mainLogger.debug(f"entering scenario: {scenario['name']}")
mainLogger.debug(f"steps: {scenario['steps']}")
if 'init' in scenario['steps']:
mainLogger.debug("has init")
mainLogger.debug("run_step_tasks init")
process = self.run_step_tasks(scenario['steps']['init']['tasks'])
yield self._world.env.process(process)
if 'loop' in scenario['steps']:
mainLogger.debug("has loop")
step_loop = scenario['steps']['loop']
if 'repeat' in step_loop:
counter = 0
while counter < step_loop['repeat']:
mainLogger.debug("run_step_tasks loop")
process = self.run_step_tasks(scenario['steps']['loop']['tasks'])
yield self._world.env.process(process)
counter += 1
else:
mainLogger.debug("run_step_tasks loop infinite")
process = self.run_step_tasks(scenario['steps']['loop']['tasks'])
yield self._world.env.process(process)
if 'finally' in scenario['steps']:
mainLogger.debug("has finally")
mainLogger.debug("run_step_tasks finally")
process = self.run_step_tasks(scenario['steps']['finally']['tasks'])
yield self._world.env.process(process)
def run_step_tasks(self, tasks):
mainLogger.debug(f"entering run_step_tasks {tasks}")
for task in tasks:
mainLogger.debug(f"run_step_tasks::task: {task}")
yield self._world.env.process(self.run_task(task))
def run_task(self, task):
mainLogger.debug(f"entering run_task {task} id:{self.taskid}")
max_count = 1
if 'repeat' in task:
max_count = task['repeat']
counter = 0
while counter < max_count:
self.taskid += 1
mainLogger.debug(f"run task {task['name']} for {task['duration']}")
if 'resources' in task:
res_amount = task['resources']
if 'parallel' in task:
mainLogger.debug("run_task in parallel")
res_amount = res_amount * task['parallel']
mainLogger.debug(f"task resources amount {res_amount}")
self._world.user_monitor.report_start(
self.name,
self.scenario['name'],
task['name'],
self.taskid)
process = self.process_task(task['duration'], res_amount)
yield self._world.env.process(process)
self._world.user_monitor.report_stop(
self.name,
self.scenario['name'],
task['name'],
self.taskid)
mainLogger.debug("task processing completed")
else:
mainLogger.debug(f"wait after task for {task['duration']}")
yield self._world.env.timeout(task['duration'])
mainLogger.debug("wait after task completed")
if 'wait' in task:
mainLogger.debug(f"manual task for {task['wait']}")
yield self._world.env.timeout(task['wait'])
mainLogger.debug("manual task completed")
# increment counter
counter += 1
def process_task(self, duration, amount):
mainLogger.debug("entering process task at %d" % self._world.env.now)
with Job(self._world.res, amount) as req:
yield req
yield self._world.env.timeout(duration)
mainLogger.debug("exiting process task at %d" % self._world.env.now)
class Clock:
def __init__(self, tick_interval):
self.tick_interval = tick_interval
self.base_epoch = datetime.datetime.now().timestamp()
mainLogger.info(f"Clock created - base {self.base_epoch}")
def to_date(self, tick):
delta = tick * self.tick_interval
datetime_time = datetime.datetime.fromtimestamp(self.base_epoch) + delta
return datetime_time
class UsersMonitor:
def __init__(self, world):
self._world = world
# init parameters are self reported
# start and stop events
self.start_data = []
self.stop_data = []
# list of users
self.users = []
def report_new_user(self, user):
self.users.append(user)
def report_start(self, username, scenarioname, taskname, taskid):
mark = self._world.env.now
self.start_data.append(
dict(
StartMark=mark,
Start=self._world.clock.to_date(mark),
Username=username,
Scenario=scenarioname,
Task=taskname,
TaskId=taskid
)
)
def report_stop(self, username, scenarioname, taskname, taskid):
mark = self._world.env.now
self.stop_data.append(
dict(
FinishMark=mark,
Finish=self._world.clock.to_date(mark),
Username=username,
Scenario=scenarioname,
Task=taskname,
TaskId=taskid
)
)
def collect(self):
df_start = pd.DataFrame(self.start_data)
df_stop = pd.DataFrame(self.stop_data)
df = pd.merge(df_start, df_stop, how='left',
on=['Username', 'Scenario', 'Task', 'TaskId'])
df['Duration'] = df['FinishMark'] - df['StartMark']
return df
# wake up every tick and collect
class UsersGenerator:
def __init__(self, world, max_nb_users=10, rampup_batch_size=1):
self._world = world
self._max_nb_users = max_nb_users
self._rampup_batch_size = rampup_batch_size
mainLogger.info("creating user generator for %s users", self._max_nb_users)
self.data = []
self.active_users = []
self.user_count = 0
# this will be used as a process
self.action = world.env.process(self.run())
def run(self):
while True:
if self.user_count < self._max_nb_users:
for counter in range(1, self._rampup_batch_size): # batch size
self.create_user()
self.create_user()
self.report()
tick_duration = 1
yield self._world.env.timeout(tick_duration)
def create_user(self):
i_scenario_index = self.user_count % len(self._world.scenarios_index)
i_scenario = self._world.scenarios_index[i_scenario_index]
scenario = self._world.scenarios[i_scenario]
# first user is labelled -001
self.user_count += 1
user = User(self.user_count,
scenario,
self._world)
self.active_users.append(user)
mark = self._world.env.now
mainLogger.debug(f"{len(self.active_users)} active users at %d" % mark)
def report(self):
mark = self._world.env.now
active_users_count = len(self.active_users)
self.data.append(
dict(
Mark=mark,
Timestamp=self._world.clock.to_date(mark),
Metric=Metric.AU.value,
Value=active_users_count
)
)
def collect(self):
return pd.DataFrame(self.data)
# In[13]:
class Job:
def __init__(self, res, items=1):
self.res = res
self.items = items
mainLogger.debug(f"creating job with amount {self.items}")
def __enter__(self):
mainLogger.debug("__enter__")
return self.res.get(self.items).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
mainLogger.debug("__exit__")
mainLogger.debug("exc_type {exc_type} exc_val {exc_val} exc_tb {exc_tb}")
self.res.put(self.items).__exit__(exc_type, exc_val, exc_tb)
class SystemResource(simpy.resources.container.Container):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
mainLogger.info(f"create resource with capacity {self.capacity}")
self.processing_data = []
self.waiting_data = []
mainLogger.info(f"initial level {self.level}")
def get(self, *args, **kwargs):
amount = args[0]
mainLogger.debug(f"received request resource - amount {amount} at %d" % self._env.now)
mainLogger.debug(f"level (available) {self.level} at %d" % self._env.now)
mainLogger.debug(f"{len(self.get_queue)} waiting at %d" % self._env.now)
mainLogger.debug(f"{self.used()} processing at %d" % self._env.now)
self.processing_data.append((self._env.now, self.used()))
self.waiting_data.append((self._env.now, len(self.get_queue)))
return super().get(*args, **kwargs)
def put(self, *args, **kwargs):
amount = args[0]
mainLogger.debug(f"received release resource - amount {amount} at %d" % self._env.now)
mainLogger.debug(f"level (available) {self.level} at %d" % self._env.now)
mainLogger.debug(f"{len(self.get_queue)} waiting at %d" % self._env.now)
mainLogger.debug(f"{self.used()} processing at %d" % self._env.now)
self.processing_data.append((self._env.now, self.used()))
self.waiting_data.append((self._env.now, len(self.get_queue)))
return super().put(*args, **kwargs)
def used(self):
return self.capacity - self.level
class SystemResource_old(simpy.Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
mainLogger.info(f"create resource with capacity {self.capacity}")
def request(self, *args, **kwargs):
mainLogger.debug("request resource at %d" % self._env.now)
return super().request(*args, **kwargs)
def release(self, *args, **kwargs):
mainLogger.debug("release resource at %d" % self._env.now)
return super().release(*args, **kwargs)
# wake up every tick and collect
class SystemMonitoringAgent:
def __init__(self, world):
self._world = world
mainLogger.info("creating agent")
self.data = []
# this will be used as a process
self.action = world.env.process(self.run())
def run(self):
while True:
mark = self._world.env.now
occupied_slots = self._world.res.used()
requests_waiting = len(self._world.res.get_queue)
mainLogger.debug(f"level {self._world.res.level} at %d" % mark)
mainLogger.debug(f"{occupied_slots} occupied slots at %d" % mark)
mainLogger.debug(f"{requests_waiting} requests waiting at %d" % mark)
self.data.append(
dict(
Mark=mark,
Timestamp=self._world.clock.to_date(mark),
Metric=Metric.BS.value,
Value=occupied_slots
)
)
self.data.append(
dict(
Mark=mark,
Timestamp=self._world.clock.to_date(mark),
Metric=Metric.RW.value,
Value=requests_waiting
)
)
tick_duration = 1
yield self._world.env.timeout(tick_duration)
def collect(self):
return pd.DataFrame(self.data)
class World:
def __init__(self,
session_configuration,
nb_users=20,
resource_capacity=5,
rampup_batch_size=1,
tick_interval=timedelta(minutes=1)):
mainLogger.info("creating simulation")
self.load_scenarios(session_configuration)
self._tick_interval = tick_interval
self.env = simpy.Environment()
self.clock = Clock()
self.res = SystemResource(self.env,
init=resource_capacity,
capacity=resource_capacity)
self.user_monitor = UsersMonitor(self)
self.user_gen = UsersGenerator(self,
max_nb_users=nb_users,
rampup_batch_size=rampup_batch_size)
self.res_agent = SystemMonitoringAgent(self)
# new
def load_scenarios(self, session_configuration):
yaml = YAML(typ='safe') # default, if not specfied, is 'rt' (round-trip)
session = yaml.load(session_configuration)
self.session_name = session['session']['name']
mainLogger.info(f"session name: {self.session_name}")
self.scenarios = session['session']['scenarios']
self.scenarios_index = []
for i in range(len(self.scenarios)):
weight = self.scenarios[i]['weight']
self.scenarios_index.extend(repeat(i, weight))
# randomize index
random.shuffle(self.scenarios_index)
mainLogger.info(f"scenarios_index: {self.scenarios_index}")
def start(self, sim_duration=20):
mainLogger.info("starting simulation")
self.env.run(until=sim_duration)
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: evmos/inflation/v1/genesis.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from evmosproto.gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from evmosproto.evmos.inflation.v1 import inflation_pb2 as evmos_dot_inflation_dot_v1_dot_inflation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='evmos/inflation/v1/genesis.proto',
package='evmos.inflation.v1',
syntax='proto3',
serialized_options=b'Z*github.com/tharsis/evmos/x/inflation/types',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n evmos/inflation/v1/genesis.proto\x12\x12\x65vmos.inflation.v1\x1a\x14gogoproto/gogo.proto\x1a\"evmos/inflation/v1/inflation.proto\"\x85\x01\n\x0cGenesisState\x12\x30\n\x06params\x18\x01 \x01(\x0b\x32\x1a.evmos.inflation.v1.ParamsB\x04\xc8\xde\x1f\x00\x12\x0e\n\x06period\x18\x02 \x01(\x04\x12\x18\n\x10\x65poch_identifier\x18\x03 \x01(\t\x12\x19\n\x11\x65pochs_per_period\x18\x04 \x01(\x03\"\xc6\x01\n\x06Params\x12\x12\n\nmint_denom\x18\x01 \x01(\t\x12Q\n\x17\x65xponential_calculation\x18\x02 \x01(\x0b\x32*.evmos.inflation.v1.ExponentialCalculationB\x04\xc8\xde\x1f\x00\x12O\n\x16inflation_distribution\x18\x03 \x01(\x0b\x32).evmos.inflation.v1.InflationDistributionB\x04\xc8\xde\x1f\x00:\x04\x98\xa0\x1f\x00\x42,Z*github.com/tharsis/evmos/x/inflation/typesb\x06proto3'
,
dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,evmos_dot_inflation_dot_v1_dot_inflation__pb2.DESCRIPTOR,])
_GENESISSTATE = _descriptor.Descriptor(
name='GenesisState',
full_name='evmos.inflation.v1.GenesisState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='params', full_name='evmos.inflation.v1.GenesisState.params', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='period', full_name='evmos.inflation.v1.GenesisState.period', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='epoch_identifier', full_name='evmos.inflation.v1.GenesisState.epoch_identifier', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='epochs_per_period', full_name='evmos.inflation.v1.GenesisState.epochs_per_period', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=248,
)
_PARAMS = _descriptor.Descriptor(
name='Params',
full_name='evmos.inflation.v1.Params',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='mint_denom', full_name='evmos.inflation.v1.Params.mint_denom', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='exponential_calculation', full_name='evmos.inflation.v1.Params.exponential_calculation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='inflation_distribution', full_name='evmos.inflation.v1.Params.inflation_distribution', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\230\240\037\000',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=251,
serialized_end=449,
)
_GENESISSTATE.fields_by_name['params'].message_type = _PARAMS
_PARAMS.fields_by_name['exponential_calculation'].message_type = evmos_dot_inflation_dot_v1_dot_inflation__pb2._EXPONENTIALCALCULATION
_PARAMS.fields_by_name['inflation_distribution'].message_type = evmos_dot_inflation_dot_v1_dot_inflation__pb2._INFLATIONDISTRIBUTION
DESCRIPTOR.message_types_by_name['GenesisState'] = _GENESISSTATE
DESCRIPTOR.message_types_by_name['Params'] = _PARAMS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenesisState = _reflection.GeneratedProtocolMessageType('GenesisState', (_message.Message,), {
'DESCRIPTOR' : _GENESISSTATE,
'__module__' : 'evmos.inflation.v1.genesis_pb2'
# @@protoc_insertion_point(class_scope:evmos.inflation.v1.GenesisState)
})
_sym_db.RegisterMessage(GenesisState)
Params = _reflection.GeneratedProtocolMessageType('Params', (_message.Message,), {
'DESCRIPTOR' : _PARAMS,
'__module__' : 'evmos.inflation.v1.genesis_pb2'
# @@protoc_insertion_point(class_scope:evmos.inflation.v1.Params)
})
_sym_db.RegisterMessage(Params)
DESCRIPTOR._options = None
_GENESISSTATE.fields_by_name['params']._options = None
_PARAMS.fields_by_name['exponential_calculation']._options = None
_PARAMS.fields_by_name['inflation_distribution']._options = None
_PARAMS._options = None
# @@protoc_insertion_point(module_scope)
|
from __future__ import unicode_literals
import csv
import datetime
import json
import logging
from collections import defaultdict
from enum import Enum, unique
from random import randint
from django.conf import settings
from django.contrib import messages
from django.core import serializers
from django.core.mail import EmailMultiAlternatives
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db import models
from django.db.models import Case, IntegerField, Q, Value, When
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.template.loader import get_template
from django.utils.encoding import python_2_unicode_compatible
from django.utils.text import slugify
from localflavor.us.models import USStateField
from modelcluster.fields import ParentalKey
from wagtail.contrib.table_block.blocks import TableBlock
from wagtail.contrib.wagtailfrontendcache.utils import purge_page_from_cache
from wagtail.contrib.wagtailroutablepage.models import RoutablePageMixin, route
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
PageChooserPanel,
StreamFieldPanel
)
from wagtail.wagtailcore import blocks
from wagtail.wagtailcore.fields import RichTextField, StreamField
from wagtail.wagtailcore.models import Orderable, Page
from wagtail.wagtailcore.signals import page_published, page_unpublished
from wagtail.wagtailimages.blocks import ImageChooserBlock
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
from local_groups.forms import GroupCreateForm
from local_groups.models import Group
logger = logging.getLogger(__name__)
DEFAULT_FROM_EMAIL = settings.DEFAULT_FROM_EMAIL
SPLASH_DONATE_URL_DEFAULT = settings.SPLASH_DONATE_URL_DEFAULT
@unique
class AlertLevels(Enum):
success = (1, 'Success (Green)')
info = (2, 'Info (Blue)')
warning = (3, 'Warning (Yellow)')
danger = (4, 'Danger (Red)')
class AboutPage(Page):
board_description = RichTextField()
board_list = RichTextField(
blank=True,
null=True
)
donors_description = RichTextField()
staff_description = RichTextField()
staff_list = RichTextField(
blank=True,
null=True
)
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels + [
FieldPanel('board_description'),
FieldPanel('board_list'),
FieldPanel('staff_description'),
FieldPanel('staff_list'),
FieldPanel('donors_description')
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class BasePage(Page):
body = RichTextField(null=True, blank=True)
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels + [
FieldPanel('body', classname="full")
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class MemberNewsletterIndexPage(Page):
parent_page_types = ['pages.IndexPage']
subpage_types = ['pages.MemberNewsletterPage']
class MemberNewsletterPage(Page):
button_colors = (
('blue', 'Blue'),
('green', 'Green'),
('red', 'Red'),
)
header = RichTextField(
blank=True,
null=True
)
body = StreamField([
('white_block', blocks.RichTextBlock()),
('blue_block', blocks.RichTextBlock()),
('button_block', blocks.StructBlock([
(('content'), blocks.RichTextBlock(
required=False
)),
(('button_copy'), blocks.CharBlock(
max_length=16,
required=True
)),
(('button_url'), blocks.URLBlock(
required=True
)),
(('button_color'), blocks.ChoiceBlock(
choices=button_colors,
max_length=16,
required=False
))
])),
('image_block', blocks.StructBlock([
('header', blocks.RichTextBlock(required=False)),
('image', ImageChooserBlock()),
('caption', blocks.RichTextBlock(required=False)),
])),
('table_block', blocks.StructBlock([
('header', blocks.RichTextBlock(required=False)),
('table', TableBlock()),
('caption', blocks.RichTextBlock(required=False)),
])),
])
# max length is based on Twitter 280 minus a link which is max 24
share_copy = models.CharField(
max_length=256,
blank=True,
null=True,
help_text="""
Copy that will be included in social posts when the share
buttons at the bottom of the email are used."""
)
content_panels = Page.content_panels + [
FieldPanel('header'),
FieldPanel('share_copy'),
StreamFieldPanel('body'),
]
parent_page_types = ['pages.MemberNewsletterIndexPage']
subpage_types = []
class MicrositePage(Page):
color_help_text = '6 digit CSS color code.'
accent_border_color = button_background_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
button_background_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
button_text = models.CharField(max_length=128, blank=True, null=True)
button_text_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
button_url = models.URLField(null=True, blank=True)
button_url_new_window = models.BooleanField(
default=False,
help_text='Open new window for button url.'
)
custom_favicon = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
custom_footer_background_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
custom_footer_background_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
custom_footer_content = RichTextField(null=True, blank=True)
custom_footer_show = models.BooleanField(
default=False,
help_text='Show custom footer.'
)
custom_footer_text_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
custom_header_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
custom_header_show = models.BooleanField(
default=False,
help_text='Show custom header with image, button, links etc.'
)
custom_header_background_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
facebook_url = models.URLField(null=True, blank=True)
primary_content = RichTextField()
primary_content_background_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
primary_content_background_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
primary_content_embed_code = models.TextField(
blank=True,
null=True,
help_text='Raw HTML embed code for signup form, etc.'
)
primary_content_text_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
secondary_content = RichTextField(null=True, blank=True)
secondary_content_background_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
secondary_content_background_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
secondary_content_show = models.BooleanField(
default=False,
help_text='Show secondary content.'
)
secondary_content_text_color = models.CharField(
max_length=6,
blank=True,
null=True,
help_text=color_help_text
)
show_accent_border = models.BooleanField(
default=False,
help_text='Show solid accent border at top of page.'
)
social_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
standard_header_show = models.BooleanField(
default=True,
help_text='Show standard global header at top of page.'
)
standard_footer_show = models.BooleanField(
default=True,
help_text='Show standard global footer at bottom of page.'
)
twitter_url = models.URLField(null=True, blank=True)
content_panels = Page.content_panels + [
ImageChooserPanel('custom_favicon'),
MultiFieldPanel(
[
FieldPanel('show_accent_border'),
FieldPanel('accent_border_color'),
FieldPanel('standard_header_show'),
FieldPanel('custom_header_show'),
FieldPanel('custom_header_background_color'),
FieldPanel('twitter_url'),
FieldPanel('facebook_url'),
ImageChooserPanel('custom_header_image'),
FieldPanel('button_text'),
FieldPanel('button_url'),
FieldPanel('button_url_new_window'),
FieldPanel('button_background_color'),
FieldPanel('button_text_color'),
],
heading="Header",
classname="collapsible"
),
MultiFieldPanel(
[
FieldPanel('primary_content'),
FieldPanel('primary_content_embed_code'),
FieldPanel('primary_content_background_color'),
FieldPanel('primary_content_text_color'),
ImageChooserPanel('primary_content_background_image'),
],
heading="Primary Content",
classname="collapsible"
),
MultiFieldPanel(
[
FieldPanel('secondary_content_show'),
FieldPanel('secondary_content'),
FieldPanel('secondary_content_background_color'),
FieldPanel('secondary_content_text_color'),
ImageChooserPanel('secondary_content_background_image'),
],
heading="Secondary Content",
classname="collapsible"
),
MultiFieldPanel(
[
FieldPanel('standard_footer_show'),
FieldPanel('custom_footer_show'),
FieldPanel('custom_footer_content'),
FieldPanel('custom_footer_background_color'),
FieldPanel('custom_footer_text_color'),
ImageChooserPanel('custom_footer_background_image'),
],
heading="Footer",
classname="collapsible"
)
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
@register_snippet
@python_2_unicode_compatible # provide equivalent __unicode__ and __str__ methods on Python 2
class NotificationBanner(models.Model):
content = models.CharField(max_length=128)
link_text = models.CharField(max_length=128)
link_url = models.URLField()
show = models.BooleanField(
default=False,
help_text='Show notification banner on all pages.'
)
panels = [
FieldPanel('content'),
FieldPanel('link_text'),
FieldPanel('link_url'),
FieldPanel('show'),
]
def __str__(self):
return self.content
@register_snippet
@python_2_unicode_compatible # provide equivalent __unicode__ and __str__ methods on Python 2
class SplashModal(models.Model):
button_text_max = 128
color_help_text = '6 digit CSS color code.'
color_max_length = 6
donate_button_text_default = 'Donate'
donate_button_text_help_text = 'Defaults to "Donate" if field is empty.'
donate_recurring_help_text = 'Make recurring donation the default.'
donate_url_help_text = (
'%s is the default if field is empty.' % SPLASH_DONATE_URL_DEFAULT
)
show_help_text = 'Show splash modal on all pages.'
title_help_text = 'Internal title for CMS use. Not public.'
title_max_length = 128
background_color = models.CharField(
blank=True,
help_text=color_help_text,
max_length=color_max_length,
null=True,
)
background_image = models.ForeignKey(
'wagtailimages.Image',
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='+'
)
body = RichTextField()
donate_button_text = models.CharField(
blank=True,
help_text=donate_button_text_help_text,
max_length=button_text_max,
null=True,
)
donate_recurring = models.BooleanField(
default=False,
help_text=donate_recurring_help_text,
)
donate_url = models.URLField(
blank=True,
help_text=donate_url_help_text,
null=True,
)
show = models.BooleanField(
default=False,
help_text=show_help_text,
)
title = models.CharField(
help_text=title_help_text,
max_length=title_max_length,
)
panels = [
FieldPanel('title'),
FieldPanel('show'),
FieldPanel('body'),
ImageChooserPanel('background_image'),
FieldPanel('background_color'),
FieldPanel('donate_button_text'),
FieldPanel('donate_url'),
FieldPanel('donate_recurring'),
]
def __str__(self):
return self.title
class TemplatePage(Page):
template = models.CharField(max_length=128)
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
def get_template(self, request):
if self.template:
return self.template
return super(TemplatePage, self).get_template(request)
content_panels = Page.content_panels + [
FieldPanel('template')
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class IndexPage(Page):
body = StreamField([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('raw_html', blocks.RawHTMLBlock())
])
background_color_default = '218fff'
block_text_help_text = '''
Main copy in content block/module to provide information on the
call-to-action.
'''
block_1_text_max_length = 140
block_2_text_max_length = 100
block_3_text_max_length = 60
button_colors = (
('blue', 'Blue'),
('green', 'Green'),
('red', 'Red'),
)
button_text_help_text = '''
Call-to-action text to display on the button. Use action-oriented verbs if
possible.
'''
button_text_max_length = 16
button_url_help_text = '''
Button will display if both url and text fields are filled in.
'''
button_url_new_window_help_text = 'Open new window for button url.'
color_css_help_text = '6 digit CSS color code.'
color_css_max_length = 6
color_select_max_length = 128
embed_code_help_text = 'Raw HTML embed code for video, etc.'
block_1_background_color = models.CharField(
default=background_color_default,
max_length=color_css_max_length,
help_text=color_css_help_text
)
block_1_background_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
block_1_button_color = models.CharField(
blank=True,
max_length=color_select_max_length,
null=True,
choices=button_colors,
)
block_1_button_text = models.CharField(
blank=True,
help_text=button_text_help_text,
max_length=button_text_max_length,
null=True,
)
block_1_button_url = models.URLField(
blank=True,
help_text=button_url_help_text,
null=True,
)
block_1_button_url_new_window = models.BooleanField(
default=False,
help_text=button_url_new_window_help_text
)
block_1_embed_code = models.TextField(
blank=True,
null=True,
help_text=embed_code_help_text
)
block_1_text = models.CharField(
blank=True,
help_text=block_text_help_text,
max_length=block_1_text_max_length,
null=True,
)
block_2_background_color = models.CharField(
default=background_color_default,
max_length=color_css_max_length,
help_text=color_css_help_text
)
block_2_background_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
block_2_button_color = models.CharField(
blank=True,
max_length=color_select_max_length,
null=True,
choices=button_colors,
)
block_2_button_text = models.CharField(
blank=True,
help_text=button_text_help_text,
max_length=button_text_max_length,
null=True,
)
block_2_button_url = models.URLField(
blank=True,
help_text=button_url_help_text,
null=True,
)
block_2_button_url_new_window = models.BooleanField(
default=False,
help_text=button_url_new_window_help_text
)
block_2_embed_code = models.TextField(
blank=True,
null=True,
help_text=embed_code_help_text
)
block_2_show = models.BooleanField(
default=False
)
block_2_text = models.CharField(
blank=True,
help_text=block_text_help_text,
max_length=block_2_text_max_length,
null=True,
)
block_3_background_color = models.CharField(
default=background_color_default,
max_length=color_css_max_length,
help_text=color_css_help_text
)
block_3_background_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
block_3_button_color = models.CharField(
blank=True,
max_length=color_select_max_length,
null=True,
choices=button_colors,
)
block_3_button_text = models.CharField(
blank=True,
help_text=button_text_help_text,
max_length=button_text_max_length,
null=True,
)
block_3_button_url = models.URLField(
blank=True,
help_text=button_url_help_text,
null=True,
)
block_3_button_url_new_window = models.BooleanField(
default=False,
help_text=button_url_new_window_help_text
)
block_3_embed_code = models.TextField(
blank=True,
null=True,
help_text=embed_code_help_text
)
block_3_show = models.BooleanField(
default=False
)
block_3_text = models.CharField(
blank=True,
help_text=block_text_help_text,
max_length=block_3_text_max_length,
null=True,
)
content_panels = Page.content_panels + [
MultiFieldPanel(
[
FieldPanel('block_1_text'),
FieldPanel('block_1_button_url'),
FieldPanel('block_1_button_url_new_window'),
FieldPanel('block_1_button_text'),
FieldPanel('block_1_button_color'),
ImageChooserPanel('block_1_background_image'),
FieldPanel('block_1_background_color'),
FieldPanel('block_1_embed_code'),
],
heading="Content Block 1",
classname="collapsible"
),
MultiFieldPanel(
[
FieldPanel('block_2_show'),
FieldPanel('block_2_text'),
FieldPanel('block_2_button_url'),
FieldPanel('block_2_button_url_new_window'),
FieldPanel('block_2_button_text'),
FieldPanel('block_2_button_color'),
ImageChooserPanel('block_2_background_image'),
FieldPanel('block_2_background_color'),
FieldPanel('block_2_embed_code'),
],
heading="Content Block 2",
classname="collapsible"
),
MultiFieldPanel(
[
FieldPanel('block_3_show'),
FieldPanel('block_3_text'),
FieldPanel('block_3_button_url'),
FieldPanel('block_3_button_url_new_window'),
FieldPanel('block_3_button_text'),
FieldPanel('block_3_button_color'),
ImageChooserPanel('block_3_background_image'),
FieldPanel('block_3_background_color'),
FieldPanel('block_3_embed_code'),
],
heading="Content Block 3",
classname="collapsible"
),
]
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
parent_page_types = ['wagtailcore.Page']
def get_context(self, *args, **kwargs):
context = super(IndexPage, self).get_context(*args, **kwargs)
try:
"""Get 1st 3 news posts from NewsIndex page"""
news_posts = NewsIndex.objects.live().first().get_news_posts()[0:3]
context['news'] = news_posts
except Page.DoesNotExist:
pass
return context
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
# CANDIDATES
class CandidateEndorsementPage(Page):
result_choices = (
('win', 'Win'),
('loss', 'Loss'),
)
result_max_length = 16
body = RichTextField(verbose_name="Bio")
candidate = models.ForeignKey(
'endorsements.Candidate',
help_text='Ignore - legacy field for old pages.',
null=True,
blank=True,
on_delete=models.SET_NULL
)
donate_url = models.URLField(blank=True, null=True)
election = models.ForeignKey(
'endorsements.Election',
null=True,
blank=True,
on_delete=models.SET_NULL
)
facebook_url = models.URLField(blank=True, null=True)
general_election_date = models.DateField(blank=True, null=True)
general_election_result = models.CharField(
max_length=result_max_length,
choices=result_choices,
null=True,
blank=True
)
instagram_url = models.URLField(blank=True, null=True)
office = models.CharField(blank=True, max_length=128, null=True)
photo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
primary_election_date = models.DateField(blank=True, null=True)
primary_election_result = models.CharField(
max_length=result_max_length,
choices=result_choices,
null=True,
blank=True
)
social_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
state_or_territory = USStateField(blank=True, null=True)
twitter_url = models.URLField(blank=True, null=True)
volunteer_url = models.URLField(blank=True, null=True)
website_url = models.URLField(blank=True, null=True)
youtube_url = models.URLField(blank=True, null=True)
parent_page_types = ['pages.CandidateEndorsementIndexPage']
content_panels = Page.content_panels + [
MultiFieldPanel(
[
FieldPanel('body', classname="full"),
ImageChooserPanel('photo'),
],
heading="Candidate",
classname="collapsible"
),
MultiFieldPanel(
[
FieldPanel('office'),
FieldPanel('state_or_territory'),
FieldPanel('election'),
FieldPanel('primary_election_date'),
FieldPanel('primary_election_result'),
FieldPanel('general_election_date'),
FieldPanel('general_election_result'),
],
heading="Election",
classname="collapsible"
),
MultiFieldPanel(
[
FieldPanel('donate_url'),
FieldPanel('volunteer_url'),
FieldPanel('website_url'),
FieldPanel('twitter_url'),
FieldPanel('facebook_url'),
FieldPanel('youtube_url'),
FieldPanel('instagram_url'),
],
heading="Links",
classname="collapsible"
),
MultiFieldPanel(
[FieldPanel('candidate')],
heading="Legacy fields",
classname="collapsible collapsed"
),
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
'''
Get election date for general or primary depending on which is relevant
'''
def _get_election_date(self):
# Return primary date if it is active
if (
self.general_election_result is None and
self.primary_election_date is not None and
self.primary_election_result is None
):
return self.primary_election_date
# Return primary date if candidate lost primary
elif (
self.general_election_result is None and
self.primary_election_result == 'loss'
):
return self.primary_election_date
# Return general date otherwise
else:
return self.general_election_date
election_date = property(_get_election_date)
'''
Get election result for general or primary depending on which is relevant
Only return result for when the endorsed campaign is over. If endorsed
campaign won primary and is moving on to the general, then return None.
'''
def _get_result(self):
# Return general election result if it exists
if (self.general_election_result is not None):
return self.general_election_result
# Return primary election result if candidate lost primary
elif (self.primary_election_result == 'loss'):
return self.primary_election_result
# Return None otherwise
else:
return None
result = property(_get_result)
"""Check if there is a pending election result"""
def _has_pending_result(self):
"""Set a cutoff date for 1 day after election date"""
days_offset = 1
cutoff_date = self.election_date + datetime.timedelta(days=days_offset)
today = datetime.date.today()
return today > cutoff_date
has_pending_result = property(_has_pending_result)
'''
Purge candidate endorsement index & results pages when endorsement changes
http://docs.wagtail.io/en/v1.10.1/reference/contrib/frontendcache.html
'''
def candidate_endorsement_page_changed(candidate_endorsement_page):
"""Purge Candidate Endorsement Index page"""
for candidate_index_page in CandidateEndorsementIndexPage.objects.live():
purge_page_from_cache(candidate_index_page)
"""Purge results"""
for election_tracking_page in ElectionTrackingPage.objects.live():
purge_page_from_cache(election_tracking_page)
@receiver(pre_delete, sender=CandidateEndorsementPage)
def candidate_endorsement_deleted_handler(instance, **kwargs):
candidate_endorsement_page_changed(instance)
@receiver(page_published, sender=CandidateEndorsementPage)
def candidate_endorsement_published_handler(instance, **kwargs):
candidate_endorsement_page_changed(instance)
@receiver(page_unpublished, sender=CandidateEndorsementPage)
def candidate_endorsement_unpublished_handler(instance, **kwargs):
candidate_endorsement_page_changed(instance)
class CandidateEndorsementIndexPage(Page):
body = RichTextField(blank=True, null=True)
content_heading = models.CharField(max_length=128, blank=True, null=True)
secondary_copy = RichTextField(
blank=True,
null=True,
help_text='Copy to go below Past Election Results section.'
)
button_show = models.BooleanField(
default=False,
help_text="""Show nominations platform Get Started button. Will only
display if secondary copy is present."""
)
content_panels = Page.content_panels + [
FieldPanel('content_heading'),
FieldPanel('body'),
MultiFieldPanel(
[
FieldPanel('secondary_copy'),
FieldPanel('button_show'),
],
heading="Secondary Content"
),
]
social_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
subpage_types = ['pages.CandidateEndorsementPage']
def get_context(self, *args, **kwargs):
context = super(CandidateEndorsementIndexPage, self).get_context(
*args,
**kwargs
)
# Filter out legacy pages and past elections
candidates = self.get_children().live().filter(
candidateendorsementpage__candidate__isnull=True,
candidateendorsementpage__general_election_result__isnull=True,
).exclude(
candidateendorsementpage__primary_election_result='loss',
).select_related(
'candidateendorsementpage',
).order_by(
'candidateendorsementpage__state_or_territory',
'candidateendorsementpage__title',
)
"""Sort by election date, with pending candidates at bottom of list"""
candidates_sorted = sorted(
candidates,
key=lambda x: (
x.candidateendorsementpage.has_pending_result,
x.candidateendorsementpage.election_date,
),
)
context['candidates'] = candidates_sorted
return context
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
# INITIATIVES
class InitiativeEndorsementPage(Page):
category_choices = (
('corporate-tax', 'Corporate Tax'),
('death-penalty', 'Death Penalty'),
('education', 'Education'),
('election-reform', 'Election Reform'),
('environment', 'Environment'),
('health-care', 'Health Care'),
('labor', 'Labor'),
('marijuana', 'Marijuana'),
('minimum-wage', 'Minimum Wage'),
('money-in-politics', 'Money in Politics'),
)
category_max_length = 32
initiative_name_max_length = 128
initiative_title_max_length = 128
result_choices = (
('win', 'Win'),
('loss', 'Loss'),
)
result_max_length = 16
body = RichTextField()
category = models.CharField(
blank=True,
choices=category_choices,
null=True,
max_length=32
)
election = models.ForeignKey(
'endorsements.Election',
null=True,
blank=True,
on_delete=models.SET_NULL
)
election_date = models.DateField(blank=True, null=True)
election_result = models.CharField(
blank=True,
choices=result_choices,
max_length=result_max_length,
null=True,
)
featured = models.BooleanField(
default=False,
help_text='Check box to feature initiative at top of list.',
)
how_to_vote = models.BooleanField(
default=True,
help_text='Ignore - legacy field for old pages.',
verbose_name="Vote Yes?",
)
initiative = models.ForeignKey(
'endorsements.Initiative',
blank=True,
help_text='Ignore - legacy field for old pages.',
null=True,
on_delete=models.SET_NULL
)
initiative_name = models.CharField(
blank=True,
help_text='Ignore - legacy field for old pages.',
null=True,
max_length=initiative_name_max_length,
)
initiative_title = models.CharField(
blank=True,
help_text='Ignore - legacy field for old pages.',
null=True,
max_length=initiative_title_max_length,
)
signup_tagline = models.CharField(
blank=True,
help_text='Ignore - legacy field for old pages.',
max_length=128,
null=True
)
social_image = models.ForeignKey(
'wagtailimages.Image',
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
state_or_territory = USStateField(blank=True, null=True)
website_url = models.URLField(blank=True, null=True)
content_panels = Page.content_panels + [
MultiFieldPanel(
[
FieldPanel('body', classname="full"),
FieldPanel('website_url'),
FieldPanel('featured'),
],
heading="Initiative",
classname="collapsible"
),
MultiFieldPanel(
[
FieldPanel('state_or_territory'),
FieldPanel('election'),
FieldPanel('election_date'),
FieldPanel('election_result'),
],
heading="Election",
classname="collapsible"
),
MultiFieldPanel(
[
FieldPanel('initiative_title'),
FieldPanel('initiative_name'),
FieldPanel('initiative'),
FieldPanel('signup_tagline'),
FieldPanel('category'),
FieldPanel('how_to_vote'),
],
heading="Legacy fields",
classname="collapsible collapsed"
),
]
parent_page_types = ['pages.InitiativeEndorsementIndexPage']
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
def _get_display_title(self):
"""Build title for initiative and support legacy pages"""
if self.initiative_title and self.initiative_name:
initiative_title = (
str("Yes" if self.how_to_vote else "No") + ' on ' + self.initiative_title
+ ': ' + self.initiative_name
)
else:
initiative_title = self.title
return initiative_title
get_display_title = property(_get_display_title)
def get_context(self, *args, **kwargs):
"""Support legacy pages too"""
if self.initiative:
state_or_territory = self.initiative.state_initials
else:
state_or_territory = self.state_or_territory
state_initiatives = InitiativeEndorsementPage.objects.live().filter(
initiative__isnull=True,
election_result__isnull=True,
state_or_territory=state_or_territory
).order_by(
'-featured',
'initiative_title',
).exclude(id=self.id)
context = super(InitiativeEndorsementPage, self).get_context(
*args,
**kwargs
)
context['state_initiatives'] = state_initiatives
return context
'''
Purge initiative endorsement index & results pages when endorsement changes
http://docs.wagtail.io/en/v1.10.1/reference/contrib/frontendcache.html
'''
def initiative_endorsement_page_changed(initiative_endorsement_page):
"""Purge Initiative Endorsement Index page"""
parent_page = initiative_endorsement_page.get_parent()
purge_page_from_cache(parent_page)
"""Purge results"""
for election_tracking_page in ElectionTrackingPage.objects.live():
purge_page_from_cache(election_tracking_page)
@receiver(page_published, sender=InitiativeEndorsementPage)
def initiative_endorsement_published_handler(instance, **kwargs):
initiative_endorsement_page_changed(instance)
@receiver(page_unpublished, sender=InitiativeEndorsementPage)
def initiative_endorsement_unpublished_handler(instance, **kwargs):
initiative_endorsement_page_changed(instance)
class InitiativeEndorsementIndexPage(Page):
social_image = models.ForeignKey(
'wagtailimages.Image',
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
subpage_types = ['pages.InitiativeEndorsementPage']
def get_context(self, *args, **kwargs):
context = super(InitiativeEndorsementIndexPage, self).get_context(
*args,
**kwargs
)
# Filter out legacy pages and past elections
context['initiatives'] = self.get_children().live().filter(
initiativeendorsementpage__initiative__isnull=True,
initiativeendorsementpage__election_result__isnull=True,
).select_related(
'initiativeendorsementpage'
).order_by(
'-initiativeendorsementpage__featured',
'initiativeendorsementpage__state_or_territory',
'initiativeendorsementpage__initiative_title',
)
return context
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
## ISSUES
class IssuePage(Page):
body = RichTextField()
issue = models.ForeignKey('endorsements.Issue', null=True, blank=True, on_delete=models.SET_NULL)
signup_tagline = models.CharField(max_length=128, blank=True, null=True)
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
parent_page_types = ['pages.IssueIndexPage']
content_panels = Page.content_panels + [
FieldPanel('body', classname="full"),
FieldPanel('issue'),
FieldPanel('signup_tagline')
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class IssueIndexPage(Page):
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
subpage_types = ['pages.IssuePage']
def serve(self, request):
# trickeryyyy...
return IssuePage.objects.get(title='Income Inequality').serve(request)
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
# News / Statements / Press Releases
class NewsIndex(Page):
social_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
parent_page_types = ['pages.IndexPage']
subpage_types = ['pages.NewsPost']
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
'''
Add extra paths for pagination
Return url fragments after main page path, such as '/' and '/?page=1'
for urls '/press/' and '/press/?page=1'
http://docs.wagtail.io/en/v1.10.1/reference/contrib/frontendcache.html
'''
def get_cached_paths(self):
# Yield the main URL
yield '/'
# Yield one URL per page in paginator to make sure all pages are purged
for page_number in range(1, self.get_news_paginator().num_pages + 1):
yield '/?page=' + str(page_number)
def get_context(self, request):
context = super(NewsIndex, self).get_context(request)
# context['news_posts'] = self.get_children().live().order_by('-id')
paginator = self.get_news_paginator()
page = request.GET.get('page')
try:
resources = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
resources = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
resources = paginator.page(paginator.num_pages)
context['resources'] = resources
return context
def get_news_paginator(self):
# Show 5 resources per page
count = 5
return Paginator(self.get_news_posts(), count)
def get_news_posts(self):
all_posts = NewsPost.objects.live()
"""Sort by most recent first. Use go_live_at for legacy pages"""
sorted_posts = sorted(
all_posts,
key=lambda x: (x.public_date_time if x.public_date_time else x.go_live_at),
reverse=True,
)
return sorted_posts
class NewsPost(Page):
POST_TYPE_CHOICES = (
('news', 'News'),
('statement', 'Statement'),
('press-release', 'Press Release'),
)
display_date_time = models.DateTimeField(
blank=True,
null=True,
verbose_name='Date & Time for display',
)
post_type = models.CharField(choices=POST_TYPE_CHOICES, null=True, blank=True, max_length=32, default='news')
header_photo = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
header_photo_byline = models.CharField(max_length=256, blank=True, null=True)
abstract = RichTextField()
body = RichTextField()
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
parent_page_types = ['pages.NewsIndex']
subpage_types = []
content_panels = Page.content_panels + [
FieldPanel('display_date_time'),
FieldPanel('post_type'),
ImageChooserPanel('header_photo'),
FieldPanel('header_photo_byline'),
FieldPanel('abstract'),
FieldPanel('body', classname="full"),
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
'''
Get date & time for NewsPost, for public display and sorting purposes
'''
def _get_public_date_time(self):
"""
Return display date if available, otherwise first published at date
"""
if self.display_date_time:
date = self.display_date_time
else:
date = self.first_published_at
return date
public_date_time = property(_get_public_date_time)
'''
Purge news index page and homepage whenever a news post changes
http://docs.wagtail.io/en/v1.10.1/reference/contrib/frontendcache.html
'''
def news_post_changed(news_post):
# Purge NewsIndex page
for news_index in NewsIndex.objects.live():
purge_page_from_cache(news_index)
# Purge homepage
for index_page in IndexPage.objects.live():
purge_page_from_cache(index_page)
@receiver(page_published, sender=NewsPost)
def news_published_handler(instance, **kwargs):
news_post_changed(instance)
@receiver(page_unpublished, sender=NewsPost)
def news_unpublished_handler(instance, **kwargs):
news_post_changed(instance)
@receiver(pre_delete, sender=NewsPost)
def news_deleted_handler(instance, **kwargs):
news_post_changed(instance)
@register_snippet
class CandidateRace(models.Model):
RESULT_CHOICES = (
(None, ''),
('win', 'Win'),
('lose', 'Lose'),
)
candidate = models.ForeignKey('endorsements.Candidate', null=True, blank=True, on_delete=models.SET_NULL)
candidate_endorsement_page = models.ForeignKey(
'pages.CandidateEndorsementPage',
null=True,
blank=True,
on_delete=models.SET_NULL,
)
result = models.CharField(max_length=5, choices=RESULT_CHOICES, null=True, blank=True)
candidate_votes = models.IntegerField(default=0)
opponent_votes = models.IntegerField(default=0)
other_votes = models.IntegerField(default=0)
margin_win_loss = models.CharField(max_length=128, null=True, blank=True)
source = models.URLField(null=True, blank=True)
notes = RichTextField(blank=True)
last_updated = models.DateTimeField(auto_now=True)
'''
Get candidate name
'''
def _get_candidate_name(self):
# Support legacy candidate model
if self.candidate:
return self.candidate.name
# Return page title
elif self.candidate_endorsement_page:
return self.candidate_endorsement_page.title
# Otherwise return empty string - this should not happen in practice
else:
return ''
candidate_name = property(_get_candidate_name)
'''
Get candidate photo_url
'''
def _get_candidate_photo_url(self):
# Support legacy candidate model
if self.candidate:
return self.candidate.photo.url
# Return page photo url
elif self.candidate_endorsement_page:
return self.candidate_endorsement_page.photo.file.url
# Otherwise return empty string - this should not happen in practice
else:
return ''
candidate_photo_url = property(_get_candidate_photo_url)
'''
Get office
'''
def _get_office(self):
# Support legacy candidate model
if self.candidate:
return self.candidate.office
# Return page office
elif self.candidate_endorsement_page:
return self.candidate_endorsement_page.office
# Otherwise return empty string - this should not happen in practice
else:
return ''
office = property(_get_office)
'''
Get state or territory
'''
def _get_state_or_territory(self):
# Support legacy candidate model
if self.candidate:
state_or_territory = self.candidate.state
if self.candidate.district is not None:
state_or_territory += ' ' + self.candidate.district
return state_or_territory
# Return page state or territory
elif self.candidate_endorsement_page:
return self.candidate_endorsement_page.get_state_or_territory_display
# Otherwise return empty string - this should not happen in practice
else:
return ''
state_or_territory = property(_get_state_or_territory)
def __unicode__(self):
return self.candidate_name
def candidate_votes_percentage(self):
try:
value = self.candidate_votes / float(self.candidate_votes + self.opponent_votes + self.other_votes)
except:
value = 0
return "{0:.0%}".format(value)
def opponent_votes_percentage(self):
try:
value = self.opponent_votes / float(self.candidate_votes + self.opponent_votes + self.other_votes)
except:
value = 0
return "{0:.0%}".format(value)
panels = [
FieldPanel('candidate'),
PageChooserPanel(
'candidate_endorsement_page',
'pages.CandidateEndorsementPage'
),
FieldPanel('result'),
FieldPanel('candidate_votes'),
FieldPanel('opponent_votes'),
FieldPanel('other_votes'),
FieldPanel('margin_win_loss'),
FieldPanel('source'),
FieldPanel('notes')
]
@register_snippet
class InitiativeRace(models.Model):
RESULT_CHOICES = (
(None, ''),
('win', 'Win'),
('lose', 'Lose'),
)
initiative = models.ForeignKey('endorsements.Initiative', null=True, blank=True, on_delete=models.SET_NULL)
initiative_endorsement_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
)
result = models.CharField(max_length=5, choices=RESULT_CHOICES, null=True, blank=True)
initiative_votes = models.IntegerField(default=0)
opponent_votes = models.IntegerField(default=0)
other_votes = models.IntegerField(default=0)
margin_win_loss = models.CharField(max_length=128, null=True, blank=True)
source = models.URLField(null=True, blank=True)
notes = RichTextField(blank=True)
last_updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.initiative.name
def initiative_votes_percentage(self):
try:
value = self.initiative_votes / float(self.initiative_votes + self.opponent_votes + self.other_votes)
except:
value = 0
return "{0:.0%}".format(value)
def opponent_votes_percentage(self):
try:
value = self.opponent_votes / float(self.initiative_votes + self.opponent_votes + self.other_votes)
except:
value = 0
return "{0:.0%}".format(value)
panels = [
FieldPanel('initiative'),
PageChooserPanel(
'initiative_endorsement_page',
'pages.InitiativeEndorsementPage'
),
FieldPanel('result'),
FieldPanel('initiative_votes'),
FieldPanel('opponent_votes'),
FieldPanel('other_votes'),
FieldPanel('margin_win_loss'),
FieldPanel('source'),
FieldPanel('notes')
]
class CandidateRaceSnippet(Orderable, models.Model):
page = ParentalKey('pages.ElectionTrackingPage', related_name='candidate_race_snippets')
candidate_race = models.ForeignKey('pages.CandidateRace', related_name='+')
class Meta:
verbose_name = "Candidate Race"
panels = [
SnippetChooserPanel('candidate_race'),
]
def __unicode__(self):
return unicode(self.candidate_race)
class InitiativeeRaceSnippet(Orderable, models.Model):
page = ParentalKey('pages.ElectionTrackingPage', related_name='initiative_race_snippets')
initiative_race = models.ForeignKey('pages.InitiativeRace', related_name='+')
class Meta:
verbose_name = "Initiative Race"
panels = [
SnippetChooserPanel('initiative_race'),
]
def __unicode__(self):
return unicode(self.initiative_race)
class ElectionTrackingPage(RoutablePageMixin, Page):
abstract = RichTextField()
body = RichTextField()
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels + [
FieldPanel('abstract'),
FieldPanel('body', classname="full"),
InlinePanel(
'candidate_race_snippets',
label="Candidates (Ignore - legacy field for old pages)"
),
InlinePanel('initiative_race_snippets', label="Initiatives"),
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
def get_context(self, *args, **kwargs):
context = super(ElectionTrackingPage, self).get_context(*args, **kwargs)
"""
Get year from URL for non-legacy pages. Otherwise default to 2019.
TODO: rethink how to handle this going forward. This is a temp solution
for 2019.
"""
if self.url == settings.RESULTS_2016_URL:
year = '2016'
elif self.url == settings.RESULTS_2017_URL:
year = '2017'
elif 'year' in kwargs:
year = kwargs['year']
else:
year = '2019'
context['year'] = year
"""Set page title based on year"""
context['custom_page_title'] = '%s Election Results' % year
"""
Get list of endorsements published with results
TODO: remove legacy support once we have consolidated results pages
"""
if self.url in [settings.RESULTS_2016_URL, settings.RESULTS_2017_URL]:
context['primary_pages'] = []
context['candidate_endorsement_pages'] = []
context['initiative_endorsement_pages'] = []
else:
"""Get primary victories that don't have general result yet"""
primary_pages = CandidateEndorsementPage.objects.live().filter(
general_election_result__isnull=True,
primary_election_result='win'
).order_by(
'-primary_election_date',
'state_or_territory',
'office',
'title',
)
context['primary_pages'] = primary_pages
candidate_pages = CandidateEndorsementPage.objects.live().filter(
Q(general_election_result__isnull=False) |
Q(primary_election_result='loss')
).order_by(
'state_or_territory',
'office',
'title',
)
candidate_pages_sorted = sorted(
candidate_pages,
key=lambda x: x.election_date,
reverse=True,
)
context['candidate_endorsement_pages'] = candidate_pages_sorted
initiative_pages = InitiativeEndorsementPage.objects.live().filter(
election_result__isnull=False,
).order_by(
'-election_result',
'-election_date',
'state_or_territory',
'initiative_title',
)
context['initiative_endorsement_pages'] = initiative_pages
context['candidate_race_snippets'] = self.candidate_race_snippets.select_related('candidate_race', 'candidate_race__candidate').annotate(win_sort_order=Case(When(candidate_race__result='win', then=Value(1)), When(candidate_race__result=None, then=Value(2)), When(candidate_race__result='lose', then=Value(3)), output_field=IntegerField())
).order_by(
'win_sort_order',
'-candidate_race__candidate__primary_date',
'candidate_race__candidate__state',
'candidate_race__candidate__office',
'candidate_race__candidate__district',
'candidate_race__candidate__name'
)
context['initiative_race_snippets'] = self.initiative_race_snippets.select_related('initiative_race', 'initiative_race__initiative').annotate(win_sort_order=Case(When(initiative_race__result='win', then=Value(1)), When(initiative_race__result=None, then=Value(2)), When(initiative_race__result='lose', then=Value(3)), output_field=IntegerField())
).order_by(
'win_sort_order',
'-initiative_race__last_updated',
'initiative_race__initiative__state',
'initiative_race__initiative__title',
)
return context
@route(r'^$')
def default_view(self, request, view=None, *args, **kwargs):
return super(ElectionTrackingPage, self).serve(request)
@route(r'^(?P<year>\d+)\/?$')
def year_view(self, request, year, view=None, *args, **kwargs):
kwargs['year'] = year
return super(ElectionTrackingPage, self).serve(request, view, args, kwargs)
class TypeformPage(Page):
abstract = RichTextField()
body = RichTextField(null=True, blank=True)
typeform_url = models.URLField()
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels + [
FieldPanel('abstract', classname="full"),
FieldPanel('body', classname="full"),
FieldPanel('typeform_url'),
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class YoutubePage(Page):
abstract = RichTextField()
body = StreamField([
('rich_text', blocks.RichTextBlock()),
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('raw_html', blocks.RawHTMLBlock())
])
youtube_video_id = models.CharField(max_length=30)
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels + [
FieldPanel('abstract', classname="full"),
StreamFieldPanel('body'),
FieldPanel('youtube_video_id'),
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class StateSplashPage(Page):
abstract = RichTextField()
body = RichTextField(null=True, blank=True)
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels + [
FieldPanel('abstract', classname="full"),
FieldPanel('body', classname="full"),
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class ContentPage(Page):
abstract = RichTextField()
body = StreamField([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('raw_html', blocks.RawHTMLBlock())
])
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels + [
FieldPanel('abstract', classname="full"),
StreamFieldPanel('body')
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class FullContentPage(Page):
abstract = RichTextField()
body = StreamField([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('raw_html', blocks.RawHTMLBlock())
])
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels + [
FieldPanel('abstract', classname="full"),
StreamFieldPanel('body')
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class DonationPage(Page):
abstract = RichTextField(null=True, blank=True)
body = RichTextField(null=True, blank=True)
csv_file = models.FileField(null=True, blank=True)
content_panels = Page.content_panels + [
FieldPanel('abstract', classname="full"),
FieldPanel('body', classname="full"),
FieldPanel('csv_file'),
]
def get_context(self, *args, **kwargs):
context = super(DonationPage, self).get_context(*args, **kwargs)
reader = csv.DictReader(
self.csv_file,
fieldnames=[
'first_name_2016',
'last_name_2016',
'first_name_q1_2017',
'last_name_q1_2017',
'first_name_q2_2017',
'last_name_q2_2017',
'first_name_q3_2017',
'last_name_q3_2017',
'first_name_q4_2017',
'last_name_q4_2017',
'first_name_q1_2018',
'last_name_q1_2018',
'first_name_q2_2018',
'last_name_q2_2018',
'first_name_q3_2018',
'last_name_q3_2018',
'first_name_q4_2018',
'last_name_q4_2018',
'first_name_q1_2019',
'last_name_q1_2019',
]
)
reader.next()
context['donations'] = list(reader)
return context
# LOCAL GROUPS
class GroupPage(RoutablePageMixin, Page):
featured_groups_show = models.BooleanField(
default=False,
help_text='Show Featured Groups list.'
)
content_panels = Page.content_panels + [
FieldPanel('featured_groups_show'),
]
@route(r'^$')
def index_view(self, request):
"""Get approved Local Groups and order by GROUP_TYPES tuple in model"""
groups = Group.objects.filter(status__exact='approved').order_by(
'-group_type'
)
geojson_data = serializers.serialize("geojson", groups)
data = json.loads(geojson_data)
for d in data['features']:
del d['properties']['rep_postal_code']
del d['properties']['last_meeting']
del d['properties']['constituency']
del d['properties']['pk']
d['properties']['signup_date'] = str(d['properties']['signup_date'])
groups_data = json.dumps(data)
"""Get featured groups if enabled, and sort/group by state"""
if self.featured_groups_show:
groups_sorted = sorted(
groups,
key=lambda x: (
(x.state if x.state is not None else 'ZZZ'),
(x.get_country_display() if x.country is not None else 'ZZZ'),
(x.city if x.city is not None else 'ZZZ'),
x.name,
),
)
featured_groups_by_state = defaultdict(list)
for group in groups_sorted:
"""Add to list if group rating is 3 or better"""
if group.group_rating is not None and group.group_rating >= 3:
featured_groups_by_state[group.state].append(
group
)
featured_groups = sorted(
featured_groups_by_state.iteritems(),
key=lambda (k, v): (
(k if k is not None else 'ZZZ'),
),
)
else:
featured_groups = None
return render(request, 'pages/group_index_page.html', {
'page': self,
'groups': groups_data,
'featured_groups': featured_groups,
})
@route(r'^new/$')
def add_group_view(self, request):
# if this is a POST request we need to process the form data
form = GroupCreateForm(request.POST or None)
if request.method == 'POST':
# create a form instance and populate it with data from the request:
# check whether it's valid:
if form.is_valid():
group = form.save(commit=False)
# Get new group id
group.group_id = str(self.get_new_group_id())
slug = slugify(group.name)
# TODO: unique slugs that aren't ugly
if not Group.objects.exclude(pk=group.pk).filter(slug=slug).exists():
group.slug = slug
group.save()
form.save_m2m()
# process the data in form.cleaned_data as required
plaintext = get_template('pages/email/add_group_success.txt')
htmly = get_template('pages/email/add_group_success.html')
d = {'group_id': group.group_id}
subject="Let's get your group on the map!"
from_email = 'Our Revolution <%s>' % DEFAULT_FROM_EMAIL
to_email = ['"%s %s" <%s>' % (
form.cleaned_data['rep_first_name'],
form.cleaned_data['rep_last_name'],
form.cleaned_data['rep_email'],
)]
text_content = plaintext.render(d)
html_content = htmly.render(d)
msg = EmailMultiAlternatives(
subject,
text_content,
from_email,
to_email,
)
msg.attach_alternative(html_content, "text/html")
msg.send()
# redirect to a new URL:
return HttpResponseRedirect('/groups/success')
else:
print form.errors
messages.error(request, 'Please correct the errors marked in the form below.')
return render(request, 'pages/add_group.html', {'form': form})
'''
Get new group id that is random and not in use
'''
def get_new_group_id(self):
'''
Find random integer between 1000 and 9998 and not in use by another group
TODO: more scalable solution that doesn't use random guess approach and
supports more than 8999 groups
'''
group_id = None
while (group_id is None or Group.objects.filter(group_id=str(group_id)).exists()):
group_id = randint(1000,9998)
return group_id
@route(r'^success/$')
def group_success_view(self, request):
return render(request, 'pages/group_success_page.html', {
'page': self
})
@route(r'^(.+)/$')
def group_view(self, request, group_slug):
group = get_object_or_404(Group, slug=group_slug)
if group.status != 'approved':
raise Http404
return render(request, 'pages/group_page.html', {
'page': self,
'group':group
})
# Organizing Hub Resource Page
class GroupResourcePage(Page):
body = RichTextField(
help_text='''
All H# tags will be automatically converted to a table of contents.
'''
)
parent_page_types = ['pages.IndexPage', 'pages.GroupResourcePage']
sub_heading = models.TextField(
blank=True,
null=True,
help_text='Optional text content to appear below page title.'
)
subpage_types = ['pages.GroupResourcePage']
social_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
content_panels = Page.content_panels + [
FieldPanel('sub_heading'),
FieldPanel('body'),
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class PeoplesSummitStreamPage(Page):
stream_id = models.CharField(max_length=30)
facebook_stream_url = models.CharField(max_length=250, null=True, blank=True)
livestream_title = models.TextField()
livestream_time = models.TextField()
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
is_over = models.BooleanField(default=False)
content_panels = Page.content_panels + [
FieldPanel('stream_id'),
FieldPanel('facebook_stream_url'),
FieldPanel('livestream_title'),
FieldPanel('livestream_time'),
FieldPanel('is_over'),
]
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
class PeoplesSummitIndexPage(Page):
social_image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
content_panels = Page.content_panels
promote_panels = Page.promote_panels + [
ImageChooserPanel('social_image')
]
|
<reponame>gungorbudak/seten-cli<filename>seten/cli.py<gh_stars>0
"""
This file is part of Seten which is released under the MIT License (MIT).
See file LICENSE or go to https://github.com/gungorbudak/seten-cli/blob/master/LICENSE
for full license details.
"""
import os
import argparse
from time import time
from seten.mapping import generate
from seten.enrichment import collect_collections
from seten.enrichment import collect_scores
from seten.enrichment import enrichment_handler
from seten.utils import output_results
def main():
# parse terminal arguments
parser = argparse.ArgumentParser(
description='Gene set enrichment on \
CLIP-seq RNA-binding protein binding signals datasets',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# all available gene set collections
COLLS_CHOICES = [
'biocarta', 'kegg', 'reactome', # pathways
'gobp', 'gomf', 'gocc', # gene ontology
'hpo', # phenotype
'malacards' # disease
]
# all available organisms
ORG_CHOICES = [
'hsa_hg19', 'hsa_hg38', # human builds
'mmu_mm10', # mouse
'rno_rn6', # rat
'dme_dme3', # fruit fly
'cel_cel235', # worm
'sce_r6411' # yeast
]
# enrichment, scoring and correction choices
ENR_CHOICES = ['gse', 'fe']
SCR_CHOICES = ['min', 'max', 'mean', 'median', 'sum']
CORR_CHOICES = ['fdr', 'bh', 'by', 'bon']
parser.add_argument(
'data',
help='path to an input file or a directory of input files, \
input files can be UCSC BED formatted text files, or \
two-column gene - score pairs'
)
parser.add_argument(
'--colls',
metavar='LIST',
default=['kegg', 'gobp'],
choices=COLLS_CHOICES,
nargs='+',
help='gene set collections to do enrichment analysis on'
)
parser.add_argument(
'--coll-file',
metavar='FILE',
default=None,
help='GMT-formatted gene set collection file \
to do enrichment analysis on'
)
org_group = parser.add_mutually_exclusive_group()
org_group.add_argument(
'--org',
metavar='ORG',
default='hsa_hg19',
choices=ORG_CHOICES,
help='organism'
)
org_group.add_argument(
'--org-file',
metavar='FILE',
default=None,
help='organism file, Tab-separated rows of \
chromosomal location and gene name'
)
parser.add_argument(
'--enr-mtd',
metavar='MTD',
default='gse',
choices=ENR_CHOICES,
help='enrichment method, gene set enrichment (gse) or \
functional enrichment (fe) using Fisher\'s exact test'
)
parser.add_argument(
'--scr-mtd',
metavar='MTD',
default='max',
choices=SCR_CHOICES,
help='method to compute a gene level score from \
multiple binding scores for the same gene'
)
parser.add_argument(
'--corr-mtd',
metavar='MTD',
default='fdr',
choices=CORR_CHOICES,
help='correction method after Fisher\'s exact test for \
functional enrichment, Benjamini & Hochberg (fdr or bh), \
Benjamini & Yekutieli (by) and Bonferroni (bon)'
)
parser.add_argument(
'--pc',
metavar='PVAL',
default=0.05,
type=float,
help='p-value cutoff for significant gene set enrichment \
or corrected functional enrichment results'
)
parser.add_argument(
'--gsc',
metavar='NUM',
default=350,
type=int,
help='gene set cutoff, maximum number of genes in \
gene sets in selected gene set collections'
)
parser.add_argument(
'--oc',
metavar='NUM',
default=5,
type=int,
help='overlap cutoff, minimum number of overlapping genes \
between the dataset and each gene set'
)
parser.add_argument(
'--sc',
metavar='PVAL',
default=0.05,
type=float,
help='significance cutoff for significant Mann-Whitney U test \
result in gene set enrichment iterations'
)
parser.add_argument(
'--iter',
metavar='NUM',
default=1000,
type=int,
help='number of iterations for gene set enrichment analysis'
)
parser.add_argument(
'--proc',
metavar='NUM',
default=4,
type=int,
help='number of processes to use for analyses'
)
parser.add_argument(
'--out',
metavar='DIR',
default='output',
help='path to the output directory for storing results'
)
args = parser.parse_args()
# timer starts
start_time = time()
# collect paths to data files here
data_paths = []
# is the data a directory?
if os.path.isdir(args.data):
for data_file in os.listdir(args.data):
data_paths.append(os.path.join(args.data, data_file))
else:
data_paths.append(args.data)
# generate the mapping
mapping = generate(args.org, args.org_file)
# collect gene sets and collection size
colls, colls_size = collect_collections(
args.org, args.org_file, args.colls, args.coll_file, COLLS_CHOICES)
print '[#]', colls_size, 'unique genes found in gene set collections'
# run for each data file
for data_path in data_paths:
# collect scores
scores = collect_scores(
data_path, mapping=mapping, scr_method=args.scr_mtd)
print '[#]', len(scores.keys()), 'unique genes found in', data_path
# start analyses for each collection
for coll in colls:
# collection timer
start_collection_time = time()
# collect results
results = enrichment_handler(scores, coll, colls_size,
gene_set_cutoff=args.gsc, overlap_cutoff=args.oc,
significance_cutoff=args.sc, iters=args.iter,
corr_method=args.corr_mtd, enr_method=args.enr_mtd,
processes=args.proc)
# output results
output_results(results, data_path, args.out,
coll['collectionId'], args.enr_mtd, args.pc)
# collection timer ends
print ' '.join([
'[#] Completed in',
str(round(time() - start_collection_time, 2)),
'seconds'
])
# timer ends
print '[#] Took', round(time() - start_time, 2), 'seconds in total'
if __name__ == '__main__':
main()
|
<filename>tools/blender26x/mh_utils/import_obj.py
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Project Name: MakeHuman
# Product Home Page: http://www.makehuman.org/
# Code Home Page: http://code.google.com/p/makehuman/
# Authors: <NAME>
# Script copyright (C) MakeHuman Team 2001-2011
# Coding Standards: See http://sites.google.com/site/makehumandocs/developers-guide
import bpy
import os
import sys
import math
import random
from mathutils import Vector
from bpy.props import *
from bpy_extras.io_utils import ExportHelper, ImportHelper
from . import globvars as the
from . import utils
from . import proxy
#----------------------------------------------------------
# importBaseObj(context):
# Simple obj importer which reads only verts, faces, and texture verts
#----------------------------------------------------------
def importBaseObj(context):
the.Proxy = None
filepath = os.path.join(context.scene.MhProgramPath, "data/3dobjs/base.obj")
ob = importObj(filepath, context)
ob["NTargets"] = 0
ob["ProxyFile"] = 0
ob["ObjFile"] = filepath
ob["MhxMesh"] = True
utils.setupVertexPairs(context, True)
print("Base object imported")
return ob
def importBaseMhclo(context):
the.Proxy = proxy.CProxy()
filepath = os.path.join(context.scene.MhProgramPath, "data/3dobjs/base.mhclo")
the.Proxy.read(filepath)
ob = importObj(the.Proxy.obj_file, context)
ob["NTargets"] = 0
ob["ProxyFile"] = filepath
ob["ObjFile"] = the.Proxy.obj_file
ob["MhxMesh"] = True
utils.setupVertexPairs(context, True)
print("Base object imported")
print(the.Proxy)
return ob
#----------------------------------------------------------
# importObj(filepath, context):
# Simple obj importer which reads only verts, faces, and texture verts
#----------------------------------------------------------
def importObj(filepath, context):
scn = context.scene
obname = utils.nameFromPath(filepath)
fp = open(filepath, "rU")
print("Importing %s" % filepath)
verts = []
faces = []
texverts = []
texfaces = []
groups = {}
materials = {}
group = []
matlist = []
nf = 0
for line in fp:
words = line.split()
if len(words) == 0:
pass
elif words[0] == "v":
verts.append( (float(words[1]), -float(words[3]), float(words[2])) )
elif words[0] == "vt":
texverts.append( (float(words[1]), float(words[2])) )
elif words[0] == "f":
(f,tf) = parseFace(words)
faces.append(f)
if tf:
texfaces.append(tf)
group.append(nf)
matlist.append(nf)
nf += 1
elif words[0] == "g":
name = words[1]
try:
group = groups[name]
except KeyError:
group = []
groups[name] = group
elif words[0] == "usemtl":
name = words[1]
try:
matlist = materials[name]
except KeyError:
matlist = []
materials[name] = matlist
else:
pass
print("%s successfully imported" % filepath)
fp.close()
me = bpy.data.meshes.new(obname)
me.from_pydata(verts, [], faces)
me.update()
ob = bpy.data.objects.new(obname, me)
try:
me.polygons
the.BMeshAware = True
print("Using BMesh")
except:
the.BMeshAware = False
print("Not using BMesh")
if texverts:
if the.BMeshAware:
addUvLayerBMesh(obname, me, texverts, texfaces)
else:
addUvLayerNoBMesh(obname, me, texverts, texfaces)
if scn.MhLoadMaterial == 'Groups':
addMaterials(groups, me, "Group")
elif scn.MhLoadMaterial == 'Materials':
addMaterials(materials, me, "Material")
for (name,group) in groups.items():
vgrp = ob.vertex_groups.new(name=name)
if vgrp.name != name:
print("WARNING: Group name %s => %s" % (name, vgrp.name))
if the.BMeshAware:
for nf in group:
f = me.polygons[nf]
for v in f.vertices:
vgrp.add([v], 1.0, 'REPLACE')
else:
for nf in group:
f = me.faces[nf]
for v in f.vertices:
vgrp.add([v], 1.0, 'REPLACE')
scn.objects.link(ob)
ob.select = True
scn.objects.active = ob
ob.shape_key_add(name="Basis")
bpy.ops.object.shade_smooth()
return ob
def parseFace(words):
face = []
texface = []
for n in range(1, len(words)):
li = words[n].split("/")
face.append( int(li[0])-1 )
try:
texface.append( int(li[1])-1 )
except:
pass
return (face, texface)
def addUvLayerBMesh(obname, me, texverts, texfaces):
uvtex = me.uv_textures.new(name=obname)
uvloop = me.uv_layers[-1]
data = uvloop.data
n = 0
for tf in texfaces:
data[n].uv = texverts[tf[0]]
n += 1
data[n].uv = texverts[tf[1]]
n += 1
data[n].uv = texverts[tf[2]]
n += 1
if len(tf) == 4:
data[n].uv = texverts[tf[3]]
n += 1
return
def addUvLayerNoBMesh(obname, me, texverts, texfaces):
uvtex = me.uv_textures.new(name=obname)
data = uvtex.data
for n in range(len(texfaces)):
tf = texfaces[n]
data[n].uv1 = texverts[tf[0]]
data[n].uv2 = texverts[tf[1]]
data[n].uv3 = texverts[tf[2]]
if len(tf) == 4:
data[n].uv4 = texverts[tf[3]]
def addMaterials(groups, me, string):
mn = 0
for (name,group) in groups.items():
try:
mat = bpy.data.materials[name]
except:
mat = bpy.data.materials.new(name=name)
if mat.name != name:
print("WARNING: %s name %s => %s" % (string, name, mat.name))
mat.diffuse_color = (random.random(), random.random(), random.random())
me.materials.append(mat)
if the.BMeshAware:
for nf in group:
f = me.polygons[nf]
f.material_index = mn
else:
for nf in group:
f = me.faces[nf]
f.material_index = mn
mn += 1
return
def init():
bpy.types.Scene.MhLoadMaterial = EnumProperty(
items = [('None','None','None'), ('Groups','Groups','Groups'), ('Materials','Materials','Materials')],
name="Load as materials",
default = 'None')
|
import argparse as ap
import sys
import gym
import numpy as np
import pong
import pgagent
import actorcriticagent2
import actorcriticagent
class Params(object):
def __init__(self):
self.gamma = None
self.lr = None
self.lr2 = None
self.er = None
self.verbose = False
self.state_dim = None
self.actions_dim = None
self.agent = None
def get_params(args, state_dim, actions_dim):
params = Params()
params.gamma = args.gamma
params.lr = args.lr
params.lr2 = args.lr2
params.er = args.er
params.verbose = args.v
params.state_dim = state_dim
params.actions_dim = actions_dim
params.agent = args.agent
params.swa = args.swa
return params
def get_env(name):
if name == 'cartpole':
env = gym.make('CartPole-v0')
env._max_episode_steps = 500
return env
elif name == 'pong':
return pong.Pong(800, 600, int(400/2), int(200/2))
def get_agent(params):
if params.agent == 'ac2':
return actorcriticagent2.DummyAgent(params.state_dim,
params.actions_dim,
params.gamma,
params.lr,
params.lr2,
params.er,
layers_actor=[64, 256, 512, 1024, 2048, 256, 64],
layers_critic=[64, 256, 512, 64])
if params.agent == 'pg':
return pgagent.DummyAgent(
params.state_dim,
params.actions_dim,
params.gamma,
params.lr,
params.er,
layers=[64, 256, 512, 1024, 2048, 256, 64],
swa=params.swa
)
if __name__ == "__main__":
parser = ap.ArgumentParser(description="Pong game RL")
parser.add_argument("--gamma", type=float, required=True)
parser.add_argument("--lr", help="Learning rate",
type=float, required=True)
parser.add_argument("--lr2", help="Learning rate", type=float)
parser.add_argument("--er", help="Entropy rate", type=float, required=True)
parser.add_argument("--env", help="Environment name",
type=str, default='cartpole')
parser.add_argument("--agent", help="Agent name",
type=str, default='ac2')
parser.add_argument("-i", help="Model to load", type=str)
parser.add_argument("-o", help="Model to save to", type=str)
parser.add_argument("-v", help="Visualize pong game",
action="store_true", default=False)
parser.add_argument("--swa", help="Enable stochastic weights average",
action="store_true", default=False)
args = parser.parse_args()
# Setup numpy print options
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
env = get_env(args.env)
params = get_params(
args, env.observation_space.shape[0], env.action_space.n)
agent = get_agent(params)
try:
agent.load(args.i)
except:
print("########## Could no load model")
pass
score_history = []
episodes = 100000
for i in range(episodes):
score = 0
done = False
obs = env.reset()
while not done:
action, probs = agent.action(obs)
obs_next, reward, done, info = env.step(action)
agent.record(obs, action, probs, reward)
obs = obs_next
score += reward
if params.verbose:
env.render()
if (i + 1) % 10 == 0:
try:
agent.save(args.o)
except:
pass
score_history.append(score)
agent.train()
avg_score = np.mean(score_history[-100:])
print(f"Episode {i}, score {score}, avg_score {avg_score}")
print(score_history)
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""High level API for extracting OBO content."""
from functools import lru_cache
from typing import List, Mapping, Optional, Tuple, Union
import pandas as pd
from .cache_utils import cached_df, cached_mapping, cached_multidict
from .getters import get
from .identifier_utils import normalize_curie
from .path_utils import prefix_directory_join
from .struct import Reference, TypeDef, get_reference_tuple
__all__ = [
# Nomenclature
'get_name_id_mapping',
'get_id_name_mapping',
# Synonyms
'get_id_synonyms_mapping',
# Properties
'get_properties_df',
'get_filtered_properties_df',
'get_filtered_properties_mapping',
# Relations
'get_filtered_relations_df',
'get_id_multirelations_mapping',
'get_relations_df',
# Xrefs
'get_filtered_xrefs',
'get_xrefs_df',
]
def get_name_by_curie(curie: str) -> Optional[str]:
"""Get the name for a CURIE, if possible."""
prefix, identifier = normalize_curie(curie)
if prefix and identifier:
return get_name(prefix, identifier)
def get_name(prefix: str, identifier: str) -> Optional[str]:
"""Get the name for an entity."""
return get_id_name_mapping(prefix).get(identifier)
@lru_cache()
def get_id_name_mapping(prefix: str, **kwargs) -> Mapping[str, str]:
"""Get an identifier to name mapping for the OBO file."""
path = prefix_directory_join(prefix, 'cache', "names.tsv")
@cached_mapping(path=path, header=[f'{prefix}_id', 'name'])
def _get_id_name_mapping() -> Mapping[str, str]:
obo = get(prefix, **kwargs)
return obo.get_id_name_mapping()
return _get_id_name_mapping()
@lru_cache()
def get_name_id_mapping(prefix: str, **kwargs) -> Mapping[str, str]:
"""Get a name to identifier mapping for the OBO file."""
return {
name: identifier
for identifier, name in get_id_name_mapping(prefix=prefix, **kwargs).items()
}
def get_id_synonyms_mapping(prefix: str, **kwargs) -> Mapping[str, List[str]]:
"""Get the OBO file and output a synonym dictionary."""
path = prefix_directory_join(prefix, 'cache', "synonyms.tsv")
header = [f'{prefix}_id', 'synonym']
@cached_multidict(path=path, header=header)
def _get_multidict() -> Mapping[str, List[str]]:
obo = get(prefix, **kwargs)
return obo.get_id_synonyms_mapping()
return _get_multidict()
def get_properties_df(prefix: str, **kwargs) -> pd.DataFrame:
"""Extract properties."""
path = prefix_directory_join(prefix, 'cache', "properties.tsv")
@cached_df(path=path, dtype=str)
def _df_getter() -> pd.DataFrame:
obo = get(prefix, **kwargs)
df = obo.get_properties_df()
df.dropna(inplace=True)
return df
return _df_getter()
def get_filtered_properties_mapping(prefix: str, prop: str, **kwargs) -> Mapping[str, str]:
"""Extract a single property for each term as a dictionary."""
path = prefix_directory_join(prefix, 'cache', 'properties', f"{prop}.tsv")
@cached_mapping(path=path, header=[f'{prefix}_id', prop])
def _mapping_getter() -> Mapping[str, str]:
obo = get(prefix, **kwargs)
return obo.get_filtered_properties_mapping(prop)
return _mapping_getter()
def get_filtered_properties_df(prefix: str, prop: str, **kwargs) -> pd.DataFrame:
"""Extract a single property for each term."""
path = prefix_directory_join(prefix, 'cache', 'properties', f"{prop}.tsv")
@cached_df(path=path, dtype=str)
def _df_getter() -> pd.DataFrame:
obo = get(prefix, **kwargs)
return obo.get_filtered_properties_df(prop)
return _df_getter()
def get_relations_df(prefix: str, **kwargs) -> pd.DataFrame:
"""Get all relations from the OBO."""
path = prefix_directory_join(prefix, 'cache', 'relations.tsv')
@cached_df(path=path, dtype=str)
def _df_getter() -> pd.DataFrame:
obo = get(prefix, **kwargs)
return obo.get_relations_df()
return _df_getter()
def get_filtered_relations_df(
prefix: str,
relation: Union[Reference, TypeDef, Tuple[str, str]],
**kwargs,
) -> pd.DataFrame:
"""Get all of the given relation."""
relation = get_reference_tuple(relation)
path = prefix_directory_join(prefix, 'cache', 'relations', f'{relation[0]}:{relation[1]}.tsv')
@cached_df(path=path, dtype=str)
def _df_getter() -> pd.DataFrame:
obo = get(prefix, **kwargs)
return obo.get_filtered_relations_df(relation)
return _df_getter()
def get_id_multirelations_mapping(prefix: str, type_def: TypeDef, **kwargs) -> Mapping[str, List[Reference]]:
"""Get the OBO file and output a synonym dictionary."""
obo = get(prefix, **kwargs)
return obo.get_id_multirelations_mapping(type_def)
def get_filtered_xrefs(prefix: str, xref_prefix: str, **kwargs) -> Mapping[str, str]:
"""Get xrefs to a given target."""
path = prefix_directory_join(prefix, 'cache', 'xrefs', f"{xref_prefix}.tsv")
header = [f'{prefix}_id', f'{xref_prefix}_id']
@cached_mapping(path=path, header=header)
def _get_mapping() -> Mapping[str, str]:
obo = get(prefix, **kwargs)
return obo.get_filtered_xrefs_mapping(xref_prefix)
return _get_mapping()
def get_xrefs_df(prefix: str, **kwargs) -> pd.DataFrame:
"""Get all xrefs."""
path = prefix_directory_join(prefix, 'cache', 'xrefs.tsv')
@cached_df(path=path, dtype=str)
def _df_getter() -> pd.DataFrame:
obo = get(prefix, **kwargs)
return obo.get_xrefs_df()
return _df_getter()
|
<reponame>zhangxl97/leetcode<filename>1_100/Q_51_60.py<gh_stars>1-10
from typing import List
from tabulate import tabulate
from time import time
class Solution:
# N-Queens
def solveNQueens(self, n: int) -> List[List[str]]:
if n == 1:
return [["Q"]]
elif n == 2:
return []
# Time Limited
# from copy import deepcopy
# def check(tmp, n):
# flag_vertical = {i:0 for i in range(n)}
# flag_horizontal = {i:0 for i in range(n)}
# flag_leftup2rightdown = {}
# flag_rightup2leftdown = {}
# for i in range(n):
# for j in range(n):
# if tmp[i][j] == "Q":
# flag_vertical[j] += 1
# if flag_vertical[j] > 1:
# return False
# flag_horizontal[i] += 1
# if flag_horizontal[i] > 1:
# return False
# if flag_leftup2rightdown.get(i - j) is None:
# flag_leftup2rightdown[i - j] = 1
# else:
# flag_leftup2rightdown[i - j] += 1
# if flag_leftup2rightdown[i - j] > 1:
# return False
# if flag_rightup2leftdown.get(i + j) is None:
# flag_rightup2leftdown[i + j] = 1
# else:
# flag_rightup2leftdown[i + j] += 1
# if flag_rightup2leftdown[i + j] > 1:
# return False
# for v in flag_vertical.values():
# if v != 1:
# return False
# for v in flag_horizontal.values():
# if v != 1:
# return False
# for v in flag_leftup2rightdown.values():
# if v != 1:
# return False
# for v in flag_rightup2leftdown.values():
# if v != 1:
# return False
# return True
# def search(tmp, start_row, res):
# if start_row == n and check(tmp, n):
# res.append(deepcopy(tmp))
# return
# for i in range(start_row, n):
# for j in range(n):
# tmp[i][j] = "Q"
# search(tmp, start_row+1, res)
# tmp[i][j] = "."
# res = []
# tmp = [["." for _ in range(n)] for _ in range(n)]
# search(tmp, 0, res)
# for result in res:
# for i in range(n):
# result[i] = "".join(result[i])
# return res
from collections import defaultdict
board = [['.' for j in range(n)] for i in range(n)]
rows = defaultdict(bool)
cols = defaultdict(bool)
diag1 = defaultdict(bool) # rightup2leftdown
diag2 = defaultdict(bool) # leftup2rightdown
def available(x, y):
return not rows[x] and not cols[y] and not diag1[x+y] and not diag2[x-y]
def update(x, y, flag):
rows[x] = flag
cols[y] = flag
diag1[x+y] = flag
diag2[x-y] = flag
board[x][y] = 'Q' if flag==True else '.'
def dfs(x):
if x == n:
res.append([''.join(lst) for lst in board])
return
for y in range(n):
if available(x , y):
update(x, y, True)
dfs(x+1)
update(x, y, False)
res = []
dfs(0)
return res
# 52. N-Queens II
def totalNQueens(self, n: int) -> int:
from collections import defaultdict
board = [['.' for j in range(n)] for i in range(n)]
rows = defaultdict(bool)
cols = defaultdict(bool)
diag1 = defaultdict(bool) # rightup2leftdown
diag2 = defaultdict(bool) # leftup2rightdown
def available(x, y):
return not rows[x] and not cols[y] and not diag1[x+y] and not diag2[x-y]
def update(x, y, flag):
rows[x] = flag
cols[y] = flag
diag1[x+y] = flag
diag2[x-y] = flag
board[x][y] = 'Q' if flag==True else '.'
def dfs(x):
if x == n:
res.append(True)
return
for y in range(n):
if available(x , y):
update(x, y, True)
dfs(x+1)
update(x, y, False)
res = []
dfs(0)
return len(res)
# 53 Maximum Subarray, Easy
def maxSubArray(self, nums: List[int]) -> int:
# dp = [0]*len(nums)
# for i,num in enumerate(nums):
# dp[i] = max(dp[i-1] + num, num)
# return max(dp)
max_sum_until_i = max_sum= nums[0]
for num in nums[1:]:
max_sum_until_i = max(max_sum_until_i+num, num)
max_sum = max(max_sum, max_sum_until_i)
return max_sum
# 54 Spiral Matrix, Medium
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
# # very slow
# if matrix == [] or matrix == [[]]:
# return []
# elif len(matrix) == 1:
# return matrix[0]
# import numpy as np
# matrix = np.array(matrix)
# row, col = matrix.shape
# up, down, left, right = 0, row - 1, 0, col - 1
# res = []
# while up <= down and left <= right:
# res.extend(matrix[up, left : right + 1])
# res.extend(matrix[up + 1 : down + 1, right])
# if down > up:
# res.extend(matrix[down, left : right][::-1])
# if right > left:
# res.extend(matrix[up + 1 : down, left][::-1])
# up += 1
# down -= 1
# left += 1
# right -= 1
# return res
def rotate(m):
# zip(*m)将其转换为按列对应的迭代器
# map()根据提供的函数对指定序列做映射,python3返回迭代器
m = list(map(list, zip(*m))) # ==> 转置操作!!!
m.reverse() # 上下颠倒,类似np.flipud()
return m
res = []
while matrix:
res += matrix[0]
matrix = rotate(matrix[1:])
return res
# 55 Jump Game, Medium
def canJump(self, nums: List[int]) -> bool:
curr = 0
size = len(nums)
for i in range(size):
if nums[i] == 0:
if curr > i:
continue
else:
break
curr = max(curr, i+nums[i])
return curr >= size - 1
# 56 Merge Intervals, Medium
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals.sort()
size = len(intervals)
if size <= 1:
return intervals
ans = []
start_pre, end_pre = intervals[0]
for i in range(1, size):
start, end = intervals[i]
if start <= end_pre:
end_pre = max(end, end_pre)
else:
ans.append([start_pre, end_pre])
start_pre = start
end_pre = end
ans.append([start_pre, end_pre])
return ans
# 57 Insert Interval, Medium
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
intervals.append(newInterval)
intervals.sort()
size = len(intervals)
if size <= 1:
return intervals
ans = []
start_pre, end_pre = intervals[0]
for i in range(1, size):
start, end = intervals[i]
if start <= end_pre:
end_pre = max(end, end_pre)
else:
ans.append([start_pre, end_pre])
start_pre = start
end_pre = end
ans.append([start_pre, end_pre])
return ans
# 58 Length of Last Word, Easy
def lengthOfLastWord(self, s: str) -> int:
s = s.strip().split(' ')
if s[-1]:
return len(s[-1])
else:
return 0
# 59 Spiral Matrix II
def generateMatrix(self, n: int) -> List[List[int]]:
tmp = [[j + i * n + 1 for j in range(n)] for i in range(n)]
res = tmp
rotate_order = []
while tmp:
rotate_order += tmp[0]
tmp = tmp[1:]
tmp = list(map(list, zip(*tmp)))
tmp.reverse()
for i, num in enumerate(rotate_order):
num -= 1
res[num//n][num%n] = i + 1
return res
# 60 Permutation Sequence
def getPermutation(self, n: int, k: int) -> str:
# from itertools import permutations
# per = permutations(range(1, n + 1))
# last = None
# for i in range(k):
# last = next(per)
# return ''.join(str(c) for c in last)
import math
res = ''
digits = [str(i + 1) for i in range(n)]
t = k - 1
for i in range(n, 0, -1):
ind = t//math.factorial(i - 1)
t%=math.factorial(i - 1)
if t == 0:
res += digits[ind] + "".join(digits[:ind] + digits[ind + 1:])
return res
else:
res += digits[ind]
del digits[ind]
return res
def main():
s = Solution()
# 51
# tic = time()
# print(s.solveNQueens(5))
# print(time() - tic)
# 52
# print(s.totalNQueens(4))
# 53
# print(s.maxSubArray([-2,1,-3,4,-1,2,1,-5,4]))
# 54
# print(s.spiralOrder(
# [
# [ 1, 2, 3 ],
# [ 4, 5, 6 ],
# [ 7, 8, 9 ]
# ]))
# 55
# print(s.canJump([3,0,8,2,0,0,1]))
# 56
# print(s.merge([[5,42],[4,6]]))
# 57
# print(s.insert(intervals = [[1,5]], newInterval = [2,7]))
# 58
# print(s.lengthOfLastWord(" "))
# 59
# print(s.generateMatrix(5))
# 60
print(s.getPermutation(4,9))
if __name__ == "__main__":
main()
|
from IPython.core.display import HTML
from IPython.core.display import display
import os
import copy
from qtpy.QtWidgets import QMainWindow, QFileDialog
from qtpy import QtGui
from collections import OrderedDict
from __code import load_ui
from .initialization import Initializer
from .event_handler import MetadataTableHandler
from __code.metadata_overlapping_images.export_images import ExportImages
from .display import DisplayImages, DisplayScalePyqtUi, DisplayMetadataPyqtUi
from .export_table import ExportTable
from __code.metadata_overlapping_images import HELP_PAGE
class MetadataOverlappingImagesUi(QMainWindow):
x_axis_column_index = 0
y_axis_column_index = 2
xy_axis_menu_logo = {'enable': u"\u2713 ", # \u25CF (dark circle)
'disable': " "}
metadata_operation = {0: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
2: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
3: {"first_part_of_string_to_remove": "",
"last_part_of_string_to_remove": "",
"math_1": "+",
"value_1": "",
"math_2": "+",
"value_2": "",
"index_of_metadata": -1,
},
}
data_dict = {}
data_dict_raw = {}
timestamp_dict = {}
default_scale_roi = None
rotation_angle = 0
histogram_level = []
# scale pyqtgraph
scale_pyqt_ui = None
scale_legend_pyqt_ui = None
metadata1_pyqt_ui = None # metadata 1 text
metadata2_pyqt_ui = None # metadata 2 text
graph_pyqt_ui = None
# size of tables
guide_table_width = [40, 400, 150, 150]
live_image = []
display_ui = []
# guide and profile pg ROIs
list_guide_pyqt_roi = list()
list_profile_pyqt_roi = list()
list_table_widget_checkbox = list()
list_metadata = []
dict_list_metadata = OrderedDict() # {0: '10', 1: 'hfir', ...}
list_scale_units = ["mm", u"\u00B5m", "nm"]
list_scale_units = {'string': ["mm", u"\u00B5m", "nm"],
'html': ["mm", "<span>µm</span>", "nm"]}
rgba_color = {'white': (255, 255, 255, 255, None),
'red': (255, 0, 0, 255, None),
'green': (0, 255, 0, 255, None),
'blue': (0, 0, 255, 255, None),
'black': (0, 0, 0, 255, None)}
rgb_color = {'white': (255, 255, 255),
'red': (255, 0, 0),
'green': (0, 255, 0),
'blue': (0, 0, 255),
'black': (0, 0, 0)}
html_color = {'white': "#FFF",
'red': "#F00",
'green': "#0F0",
'blue': "#00F",
'black': "#000"}
# ui of pop up window that allows to define metadata column value (format it)
metadata_string_format_ui = None
def __init__(self, parent=None, working_dir='', data_dict=None):
display(HTML('<span style="font-size: 20px; color:blue">Check UI that popped up \
(maybe hidden behind this browser!)</span>'))
super(MetadataOverlappingImagesUi, self).__init__(parent)
ui_full_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
os.path.join('ui', 'ui_metadata_overlapping_images.ui'))
self.ui = load_ui(ui_full_path, baseinstance=self)
self.setWindowTitle("Metadata Overlapping Images")
self.working_dir = working_dir
self.data_dict = data_dict # Normalization data dictionary {'file_name': [],
#'data': [[...],[...]]],
#'metadata': [],
#'shape': {}}
# untouched array of images (used to move and rotate images)
self.data_dict_raw = copy.deepcopy(data_dict)
# initialization
o_initialization = Initializer(parent=self)
o_initialization.pyqtgraph()
o_initialization.parameters()
o_initialization.statusbar()
o_initialization.table()
o_initialization.widgets()
o_initialization.event()
# display first images
self.slider_file_changed(0)
self.text_metadata_1_enable_pressed(self.ui.checkBox.isChecked())
self.text_metadata_2_enable_pressed(self.ui.checkBox_2.isChecked())
# ========================================================================================
# MAIN UI EVENTs
def metadata_table_right_click(self, position):
o_metadata_table = MetadataTableHandler(parent=self)
o_metadata_table.right_click(position)
def previous_image_button_clicked(self):
self.change_slider(offset=-1)
self.update_metadata_pyqt_ui()
def next_image_button_clicked(self):
self.change_slider(offset = +1)
self.update_metadata_pyqt_ui()
def help_button_clicked(self):
import webbrowser
webbrowser.open(HELP_PAGE)
def closeEvent(self, event=None):
if self.metadata_string_format_ui:
self.metadata_string_format_ui.close()
def slider_file_changed(self, slider_value):
self.display_image()
self.ui.image_slider_value.setText(str(slider_value))
self.check_status_next_prev_image_button()
self.update_metadata_pyqt_ui()
def slider_file_clicked(self):
current_slider_value = self.ui.file_slider.value()
self.slider_file_changed(current_slider_value)
self.update_metadata_pyqt_ui()
def scale_checkbox_clicked(self, status):
self.ui.scale_groupbox.setEnabled(status)
self.ui.scale_position_frame.setEnabled(status)
o_display = DisplayScalePyqtUi(parent=self)
o_display.run()
def metadata_checkbox_clicked(self, status):
self.ui.metadata_groupbox.setEnabled(status)
self.ui.metadata_position_frame.setEnabled(status)
self.ui.enable_graph_checkbox.setEnabled(status)
self.ui.text_graph_tabWidget.setEnabled(status)
self.ui.toolBox.setEnabled(status)
if status:
self.ui.graph_groupBox.setEnabled(self.ui.enable_graph_checkbox.isChecked())
else:
self.ui.graph_groupBox.setEnabled(False)
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.run()
def select_metadata_checkbox_clicked(self, status):
self.ui.select_metadata_combobox.setEnabled(status)
self.update_metadata_pyqt_ui()
def font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_pressed(self):
self.update_metadata_pyqt_ui()
def graph_font_size_slider_moved(self, value):
self.update_metadata_pyqt_ui()
def metadata_list_changed(self, index, column):
o_event = MetadataTableHandler(parent=self)
o_event.metadata_list_changed(index, column)
def scale_orientation_clicked(self):
o_init = Initializer(parent=self)
o_init.set_scale_spinbox_max_value()
self.update_scale_pyqt_ui()
def scale_thickness_value_changed(self, value):
self.update_scale_pyqt_ui()
def scale_color_changed(self, value):
self.update_scale_pyqt_ui()
def scale_size_changed(self, value):
self.update_scale_pyqt_ui()
def scale_real_size_changed(self):
"""update the label of the scale"""
self.update_scale_pyqt_ui()
def scale_units_changed(self):
self.update_scale_pyqt_ui()
def scale_position_moved(self, new_value):
self.update_scale_pyqt_ui()
def scale_position_clicked(self):
self.update_scale_pyqt_ui()
def metadata_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata2_position_moved(self, new_value):
self.update_metadata_pyqt_ui()
def metadata2_position_clicked(self):
self.update_metadata_pyqt_ui()
def metadata_color_changed(self, value):
self.update_metadata_pyqt_ui()
def metadata_name_return_pressed(self):
self.update_metadata_pyqt_ui()
def graph_position_moved(self, value):
self.update_metadata_pyqt_ui()
def graph_position_clicked(self):
self.update_metadata_pyqt_ui()
def graph_color_changed(self, value):
self.update_metadata_pyqt_ui()
def graph_axis_label_changed(self, new_value):
self.update_metadata_pyqt_ui()
def metadata_text_or_graph_clicked(self):
status = self.ui.metadata_graph_option.isChecked()
self.ui.metadata_graph_size_label.setVisible(status)
self.ui.metadata_graph_size_slider.setVisible(status)
self.update_metadata_pyqt_ui()
def metadata_graph_size_pressed(self):
self.update_metadata_pyqt_ui()
def metadata_graph_size_moved(self, slider_value):
self.update_metadata_pyqt_ui()
def table_cell_changed(self, row, column):
self.update_metadata_pyqt_ui()
def export_table_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportTable(parent=self,
export_folder=_export_folder)
o_export.run()
def export_button_clicked(self):
_export_folder = QFileDialog.getExistingDirectory(self,
directory=os.path.dirname(self.working_dir),
caption="Select Output Folder",
options=QFileDialog.ShowDirsOnly)
QtGui.QGuiApplication.processEvents()
if _export_folder:
o_export = ExportImages(parent=self,
export_folder=_export_folder)
o_export.run()
# def import_table_pressed(self):
# _table_file = QFileDialog.getOpenFileName(self,
# directory=os.path.dirname(self.working_dir),
# caption="Select Input File")
# QtGui.QGuiApplication.processEvents()
#
# if type(_table_file) is tuple:
# _table_file = _table_file[0]
#
# if _table_file:
# o_import = TableLoader(parent=self,
# filename=str(_table_file))
# o_import.load_table()
# o_import.populate()
# self.update_metadata_pyqt_ui()
def enable_graph_button_clicked(self, new_state):
self.ui.graph_groupBox.setEnabled(new_state)
self.ui.metadata_position_frame_3.setEnabled(new_state)
self.ui.graph_position_y.setEnabled(new_state)
self.ui.graph_position_x.setEnabled(new_state)
self.ui.label_15.setEnabled(new_state)
self.ui.label_16.setEnabled(new_state)
self.update_metadata_pyqt_ui()
def display_red_vertical_marker_clicked(self):
self.update_metadata_pyqt_ui()
def text_metadata_1_enable_pressed(self, status):
self.ui.metadata_position_frame.setEnabled(status)
self.ui.metadata_position_x.setEnabled(status)
self.ui.metadata_position_y.setEnabled(status)
self.ui.label_10.setEnabled(status)
self.ui.label_11.setEnabled(status)
self.ui.label_14.setEnabled(status)
self.ui.font_size_slider.setEnabled(status)
self.ui.prefix_label_1.setEnabled(status)
self.ui.suffix_label_1.setEnabled(status)
self.ui.prefix_lineEdit_1.setEnabled(status)
self.ui.suffix_lineEdit_1.setEnabled(status)
self.ui.metadata_1_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def text_metadata_2_enable_pressed(self, status):
self.ui.metadata_position_frame_2.setEnabled(status)
self.ui.metadata_position_x_2.setEnabled(status)
self.ui.metadata_position_y_2.setEnabled(status)
self.ui.label_18.setEnabled(status)
self.ui.label_19.setEnabled(status)
self.ui.label_20.setEnabled(status)
self.ui.font_size_slider_2.setEnabled(status)
self.ui.prefix_label_2.setEnabled(status)
self.ui.suffix_label_2.setEnabled(status)
self.ui.prefix_lineEdit_2.setEnabled(status)
self.ui.suffix_lineEdit_2.setEnabled(status)
self.ui.metadata_2_name_groupBox.setEnabled(status)
self.update_metadata_pyqt_ui()
def metadata_1_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
def metadata_2_suffix_prefix_changed(self, new_text):
self.update_metadata_pyqt_ui()
# ========================================================================================
def update_metadata_pyqt_ui(self):
o_display = DisplayMetadataPyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def update_scale_pyqt_ui(self):
# if self.scale_pyqt_ui:
# self.ui.image_view.removeItem(self.scale_pyqt_ui)
# if self.scale_legend_pyqt_ui:
# self.ui.image_view.removeItem(self.scale_legend_pyqt_ui)
o_display = DisplayScalePyqtUi(parent=self)
o_display.clear_pyqt_items()
o_display.run()
def display_image(self, recalculate_image=False):
"""display the image selected by the file slider"""
DisplayImages(parent=self, recalculate_image=recalculate_image)
def check_status_next_prev_image_button(self):
"""this will enable or not the prev or next button next to the slider file image"""
current_slider_value = self.ui.file_slider.value()
min_slider_value = self.ui.file_slider.minimum()
max_slider_value = self.ui.file_slider.maximum()
_prev = True
_next = True
if current_slider_value == min_slider_value:
_prev = False
elif current_slider_value == max_slider_value:
_next = False
self.ui.previous_image_button.setEnabled(_prev)
self.ui.next_image_button.setEnabled(_next)
def change_slider(self, offset=+1):
self.ui.file_slider.blockSignals(True)
current_slider_value = self.ui.file_slider.value()
new_row_selected = current_slider_value + offset
self.ui.image_slider_value.setText(str(new_row_selected))
self.ui.file_slider.setValue(new_row_selected)
self.check_status_next_prev_image_button()
self.display_image()
self.ui.file_slider.blockSignals(False)
|
<filename>eemt/eemt/parser.py
from subprocess import Popen, PIPE
from math import pow
import os
import re
import sys
import math
import decimal
class TiffParser(object):
def __init__(self):
""" Read tiff file info via gdalinfo command."""
# store file name
self.fileName = ""
# coords list [upleft, lowerleft, upright, lowerright, center]
self.projCoords = list()
self.deciCoords = list()
# number of x and y pixels
self.nPixelX = 0
self.nPixelY = 0
self.proj_info = dict()
def getDecimalCoords(self):
return self.deciCoords
def getProjCoords(self):
return self.projCoords
def getName(self):
return self.fileName
def getProjInfo(self):
return self.proj_info
def loadTiff(self, tiffFile):
""" Read dem file info via gdalinfo command."""
# store file name
self.fileName = os.path.basename(tiffFile.split('.tif')[0])
# initialize daymetR package
cmdInfo = ['gdalinfo', tiffFile]
# Regular experssions for upper left coords extraction
ulCoords = re.compile(r"""Upper\s+Left\s+\(\s*(\-?\d+\.\d+),\s(-?\d+\.\d+)\)\s+\(-?(\d+)d\s*(\d+)\'(\s?\d+\.\d+)\"W,\s-?(\d+)d\s*(\d+)\'(\s?\d+\.\d+)\"N""", re.X | re.I)
# Regular experssions for lower right coords extraction
lrCoords = re.compile(r"""Lower\s+Right\s+\(\s*(\-?\d+\.\d+),\s(-?\d+\.\d+)\)\s+\(-?(\d+)d\s*(\d+)\'(\s?\d+\.\d+)\"W,\s-?(\d+)d\s*(\d+)\'(\s?\d+\.\d+)\"N""", re.X | re.I)
# Execute the command
process = Popen(cmdInfo, stdout=PIPE, shell=False)
output, err = process.communicate()
if process.returncode != 0:
raise RuntimeError("%r failed, status code %s stdout %r stderr %r" % (cmdInfo, process.returncode, output, err))
# Process gdalinfo output by lines
output = output.split('\n')
lx = uly = rx = lry = 0
for i in xrange(len(output) - 1, -1, -1):
if output[i].startswith("Size is"):
# Extract # of pixels along X,Y axis
self.nPixelX = int(output[i].split(' ')[2][:-1])
self.nPixelY = int(output[i].split(' ')[3])
break
if output[i].startswith("Upper Left"):
temp = output[i].split('(')
temp2 = output[i+3].split('(')
lx = float(temp[1].split(',')[0].strip())
uly = float(temp[1].split(',')[1].split(')')[0].strip())
rx = float(temp2[1].split(',')[0].strip())
lry = float(temp2[1].split(',')[1].split(')')[0].strip())
bottom_right = `rx`+","+`lry`
top_left = `lx`+","+`uly`
self.projCoords.append((bottom_right,top_left))
match = lrCoords.search(output[i])
if match:
lat = 0.0
lon = 0.0
# caculate lon & lat in decimal
for j in range(3):
lon -= float(match.group(j + 3)) / pow(60, j)
lat += float(match.group(j + 6)) / pow(60, j)
self.deciCoords.append((lat, lon))
# upper left is three lines above
match = ulCoords.search(output[i-3])
lat = 0.0
lon = 0.0
for j in range(3):
lon -= float(match.group(j + 3)) / pow(60, j)
lat += float(match.group(j + 6)) / pow(60, j)
self.deciCoords.append((lat, lon))
def read_meta(self,dem):
"""
Uses gdalinfo output to determine the projection zone and region of the original data.
Then passes this information to convert_opentopo() to convert the data to Daymet's projection.
"""
# Try opening the file and searching
#proj_info = dict()
# Add the filenames to the end of the list
command = ['gdalinfo', dem]
# Execute the gdalinfo command
process = Popen(command, stdout=PIPE, shell=False)
# Check for errors
stdout, stderr = process.communicate()
if process.returncode != 0:
print stderr
print 'Failed to get original projection information from input data. Aborting'
sys.exit(1)
stdout = stdout.split('\n')
for line in stdout:
# Zone Information
if line.startswith('PROJCS'):
# Remove the punctation and break the individual words apart
line = line.translate(None, ',[]"/')
line = line.split()
line = line[-1]
# Remove the last character for North
self.proj_info['zone'] = line[:-1]
# Region Information
elif line.startswith(' AUTHORITY'):
# Strip out the punctuation and split into space separated words
line = ' '.join(re.split('[,"]', line))
line = line.split()
print(line[-2])
self.proj_info['region'] = line[-2]
elif line.startswith(' AUTHORITY'):
# Strip out the punctuation and split into space separated words
line = ' '.join(re.split('[,"]', line))
line = line.split()
print(line[-2])
self.proj_info['region'] = line[-2]
# Convert the DEMs to Daymet's projection
print 'Converting DEM to Daymet\'s projection.'
#convert_opentopo(proj_info)
print 'Finished warping OpenTopography.\n'
def convert_opentopo(self,proj_dir,tiff):
"""
Creates another .tif file with the name .converted.tif for every .tif file located
in the passed directory.The converted.tif file is supposed to be converted into the Daymet
custom projection. Depends on theread_meta() method executing correctly. It doesn't check
for the converted files before executing. Once the files are generated, script will call
gdalinfo and try to parse the new coordinates from the output. The corner coordinates are
returned in a list. Since everything is related to Daymet, it assumes the data is in the
North and West hemispheres.
"""
# Command string to convert the DEM files from Open Topography to DAYMET's projection
#command = ['gdalwarp', '-s_srs', 'EPSG:' + self.proj_info['region'], '-overwrite', '-t_srs',"+proj=lcc +lat_1=25 +lat_2=60 +lat_0=42.5 +lon_0=-100 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs",'-r', 'bilinear', '-of', 'GTiff', '-tr', '10', '-10']
#Warp DEM to WGS84 Web Mercator Projection
dem_file=tiff
command = 'gdalwarp -overwrite -t_srs EPSG:3857 -r bilinear -of GTiff -dstnodata nan '
dem_temp=proj_dir + "/" + "temp_warped.tif"
command=command+ " " + dem_file
command=command+ " " + dem_temp
print(command)
process = Popen(command, stdout=PIPE,shell=True)
stdout,stderr = process.communicate()
#Compress Output
dem_output=proj_dir + "/" + self.getName() + "_converted.tif"
command="gdal_translate -co compress=LZW " + dem_temp + " " + dem_output
print(os.getcwd())
print(command)
process=Popen(command,stdout=PIPE,shell=True)
stdout,stderr = process.communicate()
#Remove the temporary warped file
command= "rm " + dem_temp
print(command)
#process=Popen(command,stdout=PIPE,shell=True)
#stdout,stderr = process.communicate()
return dem_output
def window_daymet(self):
coords = self.projCoords
ul = [str(math.floor(decimal.Decimal(coords[1][0]) / 1000) * 1000), str(math.ceil(decimal.Decimal(coords[1][1]) / 1000) * 1000)]
lr = [str(math.ceil(decimal.Decimal(coords[0][0]) / 1000) * 1000), str(math.floor(decimal.Decimal(coords[0][1]) / 1000) * 1000)]
command = ['gdal_translate', '-projwin', ul[0], ul[1], lr[0], lr[1], 'na_dem.tif', os.path.join(output, 'na_dem.part.tif')]
print(command)
|
import numpy as np
import matplotlib.pyplot as plot
from week2.lr_utils import pre_process_data
from week2.lr_utils import sigmoid
# 初始化权重 w 和偏置单元 b 为一定维度的0向量
def initialize_with_zeros(dim):
# dim, 1 外必须加(), np.zeros(dim, 1): 报错!
w = np.zeros((dim, 1))
# 对应偏置单元bias的标量
b = 0
return w, b
# 梯度下降辅助函数, 用于激活函数
def propagate(w, b, x, y):
"""
w -- weights: (num_px * num_px * 3, 1)
b -- bias, 标量
x -- 数据规模: (num_px * num_px * 3, number of examples)
y -- 训练集X对应的label
"""
# 输入的实例个数
m = x.shape[1]
# 计算激活函数
a = sigmoid(np.dot(w.T, x) + b)
# 计算代价函数
cost = -1 / m * np.sum(y * np.log(a) + (1 - y) * np.log(1 - a))
# 反向传播
dw = 1 / m * np.dot(x, (a - y).T)
db = 1 / m * np.sum(a - y)
# 代价值, 用于后续输出调试
# np.squeeze(x): 去除x中shape为1的维度
cost = np.squeeze(cost)
grads = {"dw": dw,
"db": db}
return grads, cost
# 梯度下降: gradient descent algorithm
# 超参数: num_iterations, learning_rate
def optimize(w, b, x, y, num_iterations, learning_rate, print_cost=False):
"""
w -- weights: (num_px * num_px * 3, 1)
b -- bias, 标量
x -- 数据规模: (num_px * num_px * 3, number of examples)
y -- 训练集X对应的结果
"""
# 用于绘制学习曲线
costs = []
# w, b的梯度值(导数值)
dw = db = 0
for i in range(num_iterations):
# 获取梯度和代价
grads, cost = propagate(w, b, x, y)
# 从grads中获取梯度
dw = grads["dw"]
db = grads["db"]
# 同时更新
w = w - learning_rate * dw
b = b - learning_rate * db
# 记录cost代价值
if i % 100 == 0:
costs.append(cost)
# print输出
if print_cost and i % 100 == 0:
print("Cost after iteration %i: %f" % (i, cost))
params = {"w": w,
"b": b}
# 此处返回的梯度, 不用于后续计算, 仅仅是为了以后展示模型信息
grads = {"dw": dw,
"db": db}
return params, grads, costs
# 利用学习到的参数(w, b) 预测函数
def predict(w, b, x):
"""
w -- weights: (num_px * num_px * 3, 1)
b -- bias, 标量
x -- 数据规模: (num_px * num_px * 3, number of examples)
y_prediction -- 模型预测的结果向量
"""
# 输入实例个数
m = x.shape[1]
y_prediction = np.zeros((1, m))
w = w.reshape(x.shape[0], 1)
# a: 预测概率
a = sigmoid(np.dot(w.T, x) + b)
for i in range(a.shape[1]):
# 根据阈值, 将0 ~ 1之间的值, 转化成0, 1
if a[0, i] > 0.5:
y_prediction[0, i] = 1
else:
y_prediction[0, i] = 0
return y_prediction
# 集成所有函数, 构成模型
# 超参数: num_iterations, learning_rate
def model(x_train, y_train, x_test, y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):
"""
x_train -- (num_px * num_px * 3, m_train)
y_train -- 训练标签 (vector) of shape (1, m_train)
x_test -- (num_px * num_px * 3, m_test)
y_test -- 测试标签 (vector) of shape (1, m_test)
info -- 模型的详细信息
"""
# 初始化参数
w, b = initialize_with_zeros(x_train.shape[0])
# 梯度下降
parameters, grads, costs = optimize(w, b, x_train, y_train, num_iterations, learning_rate, print_cost)
# 获得W, b
w = parameters["w"]
b = parameters["b"]
# 预测的训练集 / 测试集的结果
y_prediction_test = predict(w, b, x_test)
y_prediction_train = predict(w, b, x_train)
# print精度
# np.mean: 求平均值
print("train accuracy: {}%".format(100 - np.mean(np.abs(y_prediction_train - y_train)) * 100))
print("test accuracy: {}%".format(100 - np.mean(np.abs(y_prediction_test - y_test)) * 100))
info = {"costs": costs,
"Y_prediction_test": y_prediction_test,
"y_prediction_train": y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"num_iterations": num_iterations}
return info
def main():
train_set_x, train_set_y, test_set_x, test_set_y = pre_process_data()
info = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=2000, learning_rate=0.005,
print_cost=True)
index = 3
plot.imshow(test_set_x[:, index].reshape((64, 64, 3)))
# 不加无法可视化
plot.show()
print(info)
main()
|
import logging
from typing import Sequence, Any, Mapping, MutableMapping
import copy
import json
from uuid import uuid4
from enum import Enum, auto
from dss.stepfunctions import _step_functions_start_execution
from dss.util.time import RemainingTime
from dss.util.types import JSON
logger = logging.getLogger(__name__)
class DSSVisitationException(Exception):
pass
class DSSVisitationExceptionRetry(DSSVisitationException):
pass
class WalkerStatus(Enum):
init = auto()
walk = auto()
finished = auto()
end = auto()
Spec = Mapping[str, Any]
class Visitation:
"""
Base class vor AWS Step Function job-workers datastore batch processing. This is meant to serve as a highly
parallelized, high throughput architecture to visit blobs in the datastore and perform generic processing.
Although Visitation is somewhat specialized for datastore processing, subclasses may largely override the
propagated state and behaviour of the job and walker step functions, hijacking the parallel architecture for
other purposes.
Subclasses should be registered in registered_visitations to make them available to the job and walker step
functions.
"""
"""Step function state specification shared by job and workers"""
_state_spec = dict(
_visitation_class_name=str,
_status=WalkerStatus.init.name,
_number_of_workers=int,
execution_name=str,
work_ids=list,
work_id=str,
work_result=None
)
state_spec: Spec = dict()
walker_state_spec: Spec = dict()
def __init__(self, state_spec: Spec, state: Spec, remaining_time: RemainingTime) -> None:
"""
Pull in fields defined in state specifications and set as instance properties
"""
self.state_spec = state_spec
self._remaining_time = remaining_time
state = copy.deepcopy(state)
self.work_result: MutableMapping[str, Any] = None
for k, default in state_spec.items():
v = state.get(k, None)
if v is None:
if callable(default):
v = default()
else:
v = copy.deepcopy(default)
setattr(self, k, v)
@classmethod
def _with_state(cls, state: dict, remaining_time: RemainingTime) -> 'Visitation':
"""
Pull in state specific to the job.
"""
state_spec = {
** Visitation._state_spec,
** cls.state_spec,
** cls.walker_state_spec,
}
return cls(state_spec, state, remaining_time)
def get_state(self) -> dict:
"""
Return step function state at the end of each lambda defined in the job and walker step functions.
"""
return {
k: getattr(self, k)
for k in self.state_spec
}
@classmethod
def start(cls, number_of_workers: int, **kwargs) -> JSON:
name = '{}--{}'.format(cls.__name__, str(uuid4()))
execution_input = {
**kwargs,
'_visitation_class_name': cls.__name__,
'_number_of_workers': number_of_workers,
'execution_name': name
}
# Invoke directly without reaper/retry
execution = _step_functions_start_execution('dss-visitation-{stage}', name, json.dumps(execution_input))
return dict(arn=execution['executionArn'], name=name, input=execution_input)
def job_initialize(self) -> None:
"""
Implement for initialization or sanity checking for a job.
"""
pass
def job_finalize(self) -> None:
"""
Implement for finalization work for a successful job. Called once each worker has completed. The default
implementation aggregates the work results.
"""
work_result = self.work_result
if isinstance(work_result, Sequence):
work_result = self._aggregate(work_result)
self.work_result = work_result
def _aggregate(self, work_result: Sequence) -> Any:
"""
Aggregates the given work results and returns the aggregate. Subclasses may want to override this method in
order to customize how work results are aggregated. The default implementation returns the argument.
"""
return work_result
def job_finalize_failed(self) -> None:
"""
Implement for finalization work for a failed job. This is your opportunity to cry, notify, and ruminate.
"""
pass
def walker_initialize(self) -> None:
"""
Implement this method for initialization or sanity checking specifically for a walker.
"""
pass
def walker_walk(self) -> None:
"""
Subclasses must implement this method. Called for walker thread.
"""
raise NotImplementedError
def walker_finalize(self) -> None:
"""
Implement this method for finalization work specific to a walker.
"""
pass
def walker_finalize_failed(self) -> None:
"""
Aliment this method for finalization work specific to a failed walker.
"""
pass
def remaining_runtime(self) -> float:
return self._remaining_time.get()
# See MyPy recomendations for silencing spurious warnings of missing properties that have been mixed in:
# https://mypy.readthedocs.io/en/latest/cheat_sheet.html#when-you-re-puzzled-or-when-things-are-complicated
def __getattribute__(self, name: str) -> Any:
return super().__getattribute__(name)
def __setattr__(self, key: str, val: Any) -> None:
super().__setattr__(key, val)
|
<reponame>brl0/kartothek
import math
import types
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from kartothek.io.eager import store_dataframes_as_dataset
from kartothek.io_components.metapartition import SINGLE_TABLE, MetaPartition
from kartothek.io_components.read import dispatch_metapartitions
def test_dispatch_metapartitions(dataset, store_session):
part_generator = dispatch_metapartitions(dataset.uuid, store_session)
assert isinstance(part_generator, types.GeneratorType)
partitions = OrderedDict([(part.label, part) for part in part_generator])
assert len(partitions) == 2
mp = partitions["cluster_1"]
assert isinstance(mp, MetaPartition)
assert dict(mp.dataset_metadata) == dict(dataset.metadata)
mp = partitions["cluster_2"]
assert isinstance(mp, MetaPartition)
assert dict(mp.dataset_metadata) == dict(dataset.metadata)
assert set(mp.table_meta.keys()) == {SINGLE_TABLE, "helper"}
def test_dispatch_metapartitions_label_filter(dataset, store_session):
def label_filter(part_label):
return "cluster_1" in part_label
part_generator = dispatch_metapartitions(
dataset.uuid, store_session, label_filter=label_filter
)
assert isinstance(part_generator, types.GeneratorType)
partitions = OrderedDict([(part.label, part) for part in part_generator])
assert len(partitions) == 1
mp = partitions["cluster_1"]
assert isinstance(mp, MetaPartition)
assert dict(mp.dataset_metadata) == dict(dataset.metadata)
def test_dispatch_metapartitions_without_dataset_metadata(dataset, store_session):
part_generator = dispatch_metapartitions(
dataset.uuid, store_session, load_dataset_metadata=False
)
assert isinstance(part_generator, types.GeneratorType)
partitions = list(part_generator)
assert len(partitions) == 2
mp = partitions[0]
assert mp.dataset_metadata == {}
mp = partitions[1]
assert mp.dataset_metadata == {}
@pytest.mark.parametrize(
"predicates,error_msg",
[([], "Empty predicates"), ([[]], "Invalid predicates: Conjunction 0 is empty")],
)
def test_dispatch_metapartition_undefined_behaviour(
dataset, store_session, predicates, error_msg
):
with pytest.raises(ValueError, match=error_msg):
list(
dispatch_metapartitions(dataset.uuid, store_session, predicates=predicates)
)
@pytest.mark.parametrize(
"predicates",
[
[[("P", "==", 2)]],
[[("P", "in", [2])]],
[[("P", "!=", 1)]],
[[("P", ">", 1)]],
# Only apply filter to columns for which we have an index
[[("P", ">=", 2), ("TARGET", "==", 500)]],
],
)
def test_dispatch_metapartitions_query_partition_on(
dataset_partition_keys, store_session, predicates
):
generator = dispatch_metapartitions(
dataset_partition_keys.uuid, store_session, predicates=predicates
)
partitions = list(generator)
assert len(partitions) == 1
assert partitions[0].label == "P=2/cluster_2"
@pytest.mark.parametrize(
"predicates",
[
# These predicates are OR connected, therefore they need to allow all partitions
[[("P", "==", 2)], [("TARGET", "==", 500)]],
[[("P", "in", [2])], [("TARGET", "in", [500])]],
[[("L", "==", 2)], [("TARGET", "==", 500)]],
],
)
def test_dispatch_metapartitions_query_no_effect(
dataset_partition_keys, store_session, predicates
):
# These predicates should still lead to loading the whole set of partitions
generator = dispatch_metapartitions(
dataset_partition_keys.uuid, store_session, predicates=predicates
)
partitions = list(generator)
assert len(partitions) == 2
def test_dispatch_metapartitions_concat_regression(store):
dataset = store_dataframes_as_dataset(
dfs=[pd.DataFrame({"p": [0], "x": [0]}), pd.DataFrame({"p": [0], "x": [1]})],
dataset_uuid="test",
store=store,
partition_on=["p"],
)
mps = list(
dispatch_metapartitions(
dataset.uuid, store, concat_partitions_on_primary_index=False
)
)
assert len(mps) == 2
with pytest.deprecated_call():
mps = list(
dispatch_metapartitions(
dataset.uuid, store, concat_partitions_on_primary_index=True
)
)
assert len(mps) == 1
mps = list(dispatch_metapartitions(dataset.uuid, store, dispatch_by=["p"]))
assert len(mps) == 1
def test_dispatch_metapartitions_dups_with_predicates(store):
dataset = store_dataframes_as_dataset(
dfs=[pd.DataFrame({"p": [0, 1], "x": 0})],
dataset_uuid="test",
store=store,
secondary_indices=["p"],
)
wout_preds = list(dispatch_metapartitions(dataset.uuid, store))
w_preds = list(
dispatch_metapartitions(dataset.uuid, store, predicates=[[("p", "in", [0, 1])]])
)
assert wout_preds == w_preds
def test_dispatch_metapartitions_dups_with_predicates_dispatch_by(store):
dataset = store_dataframes_as_dataset(
dfs=[pd.DataFrame({"p": [0, 1], "x": 0})],
dataset_uuid="test",
store=store,
secondary_indices=["p", "x"],
)
wout_preds = list(dispatch_metapartitions(dataset.uuid, store, dispatch_by="x"))
w_preds = list(
dispatch_metapartitions(
dataset.uuid, store, predicates=[[("p", "in", [0, 1])]], dispatch_by="x"
)
)
assert wout_preds == w_preds
def test_dispatch_metapartitions_sorted_dispatch_by(store):
df = pd.DataFrame(
{"p": np.random.randint(high=100000, low=-100000, size=(100,)), "x": 0}
)
# Integers are sorted when using too small values (maybe connected to the
# singleton implementation of integers in CPython??)
# Verify this is not happening, otherwise we'll get immediately a sorted
# index (which is nice in this case but not generally true, of course)
arr = set(df["p"].unique())
assert list(arr) != sorted(arr)
dataset = store_dataframes_as_dataset(
dfs=[df], dataset_uuid="test", store=store, secondary_indices=["p", "x"]
)
wout_preds = list(dispatch_metapartitions(dataset.uuid, store, dispatch_by="p"))
last = -math.inf
for mps in wout_preds:
for mp in mps:
current = mp.logical_conjunction
assert len(current) == 1
current = current[0][2]
assert current > last
last = current
|
<filename>api/anubis/lms/submissions.py
from datetime import datetime
from typing import Dict, List, Optional, Tuple, Union
from anubis.lms.assignments import get_assignment_due_date
from anubis.models import (
Assignment,
AssignmentRepo,
AssignmentTest,
Course,
InCourse,
Submission,
SubmissionBuild,
SubmissionTestResult,
User,
db,
)
from anubis.rpc.batch import rpc_bulk_regrade
from anubis.utils.cache import cache
from anubis.utils.data import is_debug, split_chunks
from anubis.utils.http import error_response, success_response
from anubis.utils.logging import logger
from anubis.utils.rpc import enqueue_autograde_pipeline, rpc_enqueue
def bulk_regrade_submissions(submissions: List[Submission]) -> List[dict]:
"""
Regrade a batch of submissions
:param submissions:
:return:
"""
# Running list of regrade dictionaries
response = []
# enqueue regrade jobs for each submissions
for submission in submissions:
response.append(regrade_submission(submission, queue="regrade"))
# Pass back a list of all the regrade return dictionaries
return response
def regrade_submission(submission: Union[Submission, str], queue: str = "default") -> dict:
"""
Regrade a submission
:param submission: Union[Submissions, str]
:param queue:
:return: dict response
"""
# If the submission is a string, then we consider it to be a submission id
if isinstance(submission, str):
# Try to query for the submission
submission = Submission.query.filter(
Submission.id == submission,
).first()
# If there was no submission found, then return an error status
if submission is None:
return error_response("could not find submission")
# If the submission is already marked as in processing state, then
# we can skip regrading this submission.
if not submission.processed:
return error_response("submission currently being processed")
# Update the submission fields to reflect the regrade
submission.processed = False
submission.state = "regrading"
submission.last_updated = datetime.now()
# Reset the accompanying database objects
init_submission(submission)
# Enqueue the submission job
enqueue_autograde_pipeline(submission.id, queue=queue)
return success_response({"message": "regrade started"})
def fix_dangling():
"""
Try to connect repos that do not have an owner.
A dangling submission is a submission that has not been matched to
a student. This happens when a student either does not give anubis
a github username, or provides an incorrect one. When this happens,
all submissions that come in for that repo are tracked, but not graded.
The purpose of this function is to try to match assignment repos to
submissions that lack an owner.
:return:
"""
# Running list of fixed submissions
fixed = []
# Find Assignment Repos that do not have an owner_id
dangling_repos = AssignmentRepo.query.filter(
AssignmentRepo.owner_id == None,
).all()
# Iterate over all dangling repos
for dangling_repo in dangling_repos:
# Attempt to find an owner
owner = User.query.filter(User.github_username == dangling_repo.github_username).first()
# If an owner was found, then fix it
if owner is not None:
# Update the dangling repo
dangling_repo.owner_id = owner.id
db.session.add_all((dangling_repo, owner))
db.session.commit()
# Find all the submissions that belong to that
# repo, fix then grade them.
for submission in dangling_repo.submissions:
# Give the submission an owner
submission.owner_id = owner.id
db.session.add(submission)
db.session.commit()
# Update running tally of fixed submissions
fixed.append(submission.data)
# Get the due date
due_date = get_assignment_due_date(owner.id, dangling_repo.assignment.id)
# Check if the submission should be accepted
if dangling_repo.assignment.accept_late and submission.created < due_date:
# Enqueue a autograde job for the submission
enqueue_autograde_pipeline(submission.id)
# Reject the submission if it was late
else:
reject_late_submission(submission)
# Find dangling submissions
dangling_submissions = Submission.query.filter(Submission.owner_id == None).all()
# Iterate through all submissions lacking an owner
for submission in dangling_submissions:
# Try to find a repo to match
dangling_repo = AssignmentRepo.query.filter(AssignmentRepo.id == submission.assignment_repo_id).first()
# Try to find an owner student
owner = User.query.filter(User.github_username == dangling_repo.github_username).first()
# If an owner was found, then fix and regrade all relevant
if owner is not None:
# Give the repo an owner
dangling_repo.owner_id = owner.id
db.session.add_all((dangling_repo, owner))
db.session.commit()
# Give the submission an owner
submission.owner_id = owner.id
db.session.add(submission)
db.session.commit()
# Update running tally of fixed submissions
fixed.append(submission.data)
# Get the due date
due_date = get_assignment_due_date(owner.id, submission.assignment.id)
# Check if the submission should be accepted
if submission.assignment.accept_late and submission.created < due_date:
# Enqueue a autograde job for the submission
enqueue_autograde_pipeline(submission.id)
# Reject the submission if it was late
else:
reject_late_submission(submission)
return fixed
@cache.memoize(timeout=5, unless=is_debug, source_check=True)
def get_submissions(
user_id=None,
course_id=None,
assignment_id=None,
limit=None,
offset=None,
) -> Optional[Tuple[List[Dict[str, str]], int]]:
"""
Get all submissions for a given netid. Cache the results. Optionally specify
a class_name and / or assignment_name for additional filtering.
:param offset:
:param limit:
:param user_id:
:param course_id:
:param assignment_id: id of assignment
:return:
"""
# Load user
owner = User.query.filter(User.id == user_id).first()
# Verify user exists
if owner is None:
return None
# Build filters
filters = []
if course_id is not None and course_id != "":
filters.append(Course.id == course_id)
if user_id is not None and user_id != "":
filters.append(User.id == user_id)
if assignment_id is not None:
filters.append(Assignment.id == assignment_id)
query = (
Submission.query.join(Assignment)
.join(Course)
.join(InCourse)
.join(User)
.filter(Submission.owner_id == owner.id, *filters)
.order_by(Submission.created.desc())
)
all_total = query.count()
if limit is not None:
query = query.limit(limit)
if offset is not None:
query = query.offset(offset)
submissions = query.all()
return [s.full_data for s in submissions], all_total
def recalculate_late_submissions(student: User, assignment: Assignment):
"""
Recalculate the submissions that need to be
switched from accepted to rejected.
:param student:
:param assignment:
:return:
"""
# Get the due date for this student
due_date = get_assignment_due_date(student, assignment, grace=True)
# Get the submissions that need to be rejected
s_reject = Submission.query.filter(
Submission.created > due_date,
Submission.accepted == True,
).all()
# Get the submissions that need to be accepted
s_accept = Submission.query.filter(
Submission.created < due_date,
Submission.accepted == False,
).all()
# Go through, and reset and enqueue regrade
s_accept_ids = list(map(lambda x: x.id, s_accept))
for chunk in split_chunks(s_accept_ids, 32):
rpc_enqueue(rpc_bulk_regrade, "regrade", args=[chunk])
# Reject the submissions that need to be updated
for submission in s_reject:
reject_late_submission(submission)
# Commit the changes
db.session.commit()
def reject_late_submission(submission: Submission):
"""
Set all the fields that need to be set when
rejecting a submission.
* Does not commit changes *
:return:
"""
# Go through test results, and set them to rejected
for test_result in submission.test_results:
test_result: SubmissionTestResult
test_result.passed = False
test_result.message = "Late submissions not accepted"
test_result.stdout = ""
db.session.add(test_result)
# Go through build results, and set them to rejected
submission.build.passed = False
submission.build.stdout = "Late submissions not accepted"
db.session.add(submission.build)
# Set the fields on self to be rejected
submission.accepted = False
submission.processed = True
submission.state = "Late submissions not accepted"
db.session.add(submission)
def init_submission(submission: Submission, commit: bool = True):
"""
Create adjacent submission models.
:return:
"""
logger.debug("initializing submission {}".format(submission.id))
# If the models already exist, yeet
if len(submission.test_results) != 0:
SubmissionTestResult.query.filter_by(submission_id=submission.id).delete()
if submission.build is not None:
SubmissionBuild.query.filter_by(submission_id=submission.id).delete()
if commit:
# Commit deletions (if necessary)
db.session.commit()
# Find tests for the current assignment
tests = AssignmentTest.query.filter_by(assignment_id=submission.assignment_id).all()
logger.debug("found tests: {}".format(list(map(lambda x: x.data, tests))))
for test in tests:
tr = SubmissionTestResult(submission_id=submission.id, assignment_test_id=test.id)
db.session.add(tr)
sb = SubmissionBuild(submission_id=submission.id)
db.session.add(sb)
submission.accepted = True
submission.processed = False
submission.state = "Waiting for resources..."
db.session.add(submission)
if commit:
# Commit new models
db.session.commit()
|
<filename>tests/test_transpy.py<gh_stars>0
from typing import Tuple
from logging import getLogger, NullHandler, Logger
import unittest
import unittest.mock as mock
from transpydata.TransPy import TransPy
from transpydata.config.datainput import IDataInput
from transpydata.config.dataprocess import IDataProcess
from transpydata.config.dataoutput import IDataOutput
class TestTransPy(unittest.TestCase):
def test_initialized_and_disposed(self):
datainput, dataprocess, dataoutput = self._get_mocked_dataservices()
trans_py = self._get_transpy_instance(datainput, dataprocess, dataoutput)
trans_py.run()
datainput.initialize.assert_called_once()
datainput.dispose.assert_called_once()
dataprocess.initialize.assert_called_once()
dataprocess.dispose.assert_called_once()
dataoutput.initialize.assert_called_once()
dataoutput.dispose.assert_called_once()
def test_batch_run(self):
datainput, dataprocess, dataoutput = self._get_mocked_dataservices()
datainput.get_all.return_value = ['dinA', 'dinB']
dataprocess.process_all.return_value = ['dprA', 'dprB']
dataoutput.send_all.return_value = ['doutA', 'doutB']
config = {
'datainput_by_one': False,
'dataprocess_by_one': False,
'dataoutput_by_one': False,
'datainput_source': ['inA', 'inB']
}
trans_py = self._get_transpy_instance(datainput, dataprocess,
dataoutput, config)
result = trans_py.run()
self.assertFalse(datainput.get_one.called,
'One item processing should not be called')
datainput.get_all.assert_called_once()
self.assertFalse(dataprocess.process_one.called,
'One item processing should not be called')
dataprocess.process_all.assert_called_once_with(datainput.get_all.return_value)
self.assertFalse(dataoutput.send_one.called,
'One item processing should not be called')
dataoutput.send_all.assert_called_once_with(dataprocess.process_all.return_value)
self.assertListEqual(result, dataoutput.send_all.return_value)
def test_by_one_run(self):
datainput, dataprocess, dataoutput = self._get_mocked_dataservices()
datainput_returns = ['dinA', 'dinB']
dataprocess_returns = ['dprA', 'dprB']
dataoutput_returns = ['doutA', 'doutB']
datainput.get_one.side_effect = datainput_returns
dataprocess.process_one.side_effect = dataprocess_returns
dataoutput.send_one.side_effect = dataoutput_returns
config = {
'datainput_by_one': True,
'dataprocess_by_one': True,
'dataoutput_by_one': True,
'datainput_source': ['inA', 'inB']
}
trans_py = self._get_transpy_instance(datainput, dataprocess,
dataoutput, config)
result = trans_py.run()
self.assertFalse(datainput.get_all.called,
'Batch processing should not be called')
datainput.get_one.assert_any_call(config['datainput_source'][0])
datainput.get_one.assert_any_call(config['datainput_source'][1])
self.assertFalse(dataprocess.process_all.called,
'Batch processing should not be called')
dataprocess.process_one.assert_any_call(datainput_returns[0])
dataprocess.process_one.assert_any_call(datainput_returns[1])
self.assertFalse(dataoutput.send_all.called,
'Batch processing should not be called')
dataoutput.send_one.assert_any_call(dataprocess_returns[0])
dataoutput.send_one.assert_any_call(dataprocess_returns[1])
self.assertListEqual(result, dataoutput_returns)
def test_input_and_process_by_one(self):
datainput, dataprocess, dataoutput = self._get_mocked_dataservices()
datainput_returns = ['dinA', 'dinB']
dataprocess_returns = ['dprA', 'dprB']
dataoutput_returns = ['doutA', 'doutB']
datainput.get_one.side_effect = datainput_returns
dataprocess.process_one.side_effect = dataprocess_returns
dataoutput.send_all.return_value = dataoutput_returns
config = {
'datainput_by_one': True,
'dataprocess_by_one': True,
'dataoutput_by_one': False,
'datainput_source': ['inA', 'inB']
}
trans_py = self._get_transpy_instance(datainput, dataprocess,
dataoutput, config)
result = trans_py.run()
self.assertFalse(datainput.get_all.called,
'Batch processing should not be called')
datainput.get_one.assert_any_call(config['datainput_source'][0])
datainput.get_one.assert_any_call(config['datainput_source'][1])
self.assertFalse(dataprocess.process_all.called,
'Batch processing should not be called')
dataprocess.process_one.assert_any_call(datainput_returns[0])
dataprocess.process_one.assert_any_call(datainput_returns[1])
self.assertFalse(dataoutput.send_one.called,
'One item processing should not be called')
dataoutput.send_all.assert_called_once_with(dataprocess_returns)
self.assertListEqual(result, dataoutput_returns)
def test_process_and_output_by_one(self):
datainput, dataprocess, dataoutput = self._get_mocked_dataservices()
datainput_returns = ['dinA', 'dinB']
dataprocess_returns = ['dprA', 'dprB']
dataoutput_returns = ['doutA', 'doutB']
datainput.get_all.return_value = datainput_returns
dataprocess.process_one.side_effect = dataprocess_returns
dataoutput.send_one.side_effect = dataoutput_returns
config = {
'datainput_by_one': False,
'dataprocess_by_one': True,
'dataoutput_by_one': True,
'datainput_source': ['inA', 'inB']
}
trans_py = self._get_transpy_instance(datainput, dataprocess,
dataoutput, config)
result = trans_py.run()
self.assertFalse(datainput.get_one.called,
'One item processing should not be called')
datainput.get_all.assert_called_once()
self.assertFalse(dataprocess.process_all.called,
'Batch processing should not be called')
dataprocess.process_one.assert_any_call(datainput_returns[0])
dataprocess.process_one.assert_any_call(datainput_returns[1])
self.assertFalse(dataoutput.send_all.called,
'Batch processing should not be called')
dataoutput.send_one.assert_any_call(dataprocess_returns[0])
dataoutput.send_one.assert_any_call(dataprocess_returns[1])
self.assertListEqual(result, dataoutput_returns)
def test_logging(self):
datainput, dataprocess, dataoutput = self._get_mocked_dataservices()
datainput_returns = ['dinA', 'dinB']
dataprocess_returns = ['dprA', 'dprB']
dataoutput_returns = ['doutA', 'doutB']
datainput.get_all.return_value = datainput_returns
dataprocess.process_one.side_effect = dataprocess_returns
dataoutput.send_one.side_effect = dataoutput_returns
config = {
'datainput_by_one': False,
'dataprocess_by_one': False,
'dataoutput_by_one': False
}
trans_py = self._get_transpy_instance(datainput, dataprocess,
dataoutput, config)
mock_logger = mock.create_autospec(Logger)
trans_py.logger = mock_logger
result = trans_py.run()
mock_logger.info.assert_called()
def _get_mocked_dataservices(self) -> Tuple[mock.NonCallableMagicMock,
mock.NonCallableMagicMock,
mock.NonCallableMagicMock]:
datainput = mock.create_autospec(IDataInput)
datainput.process_one_method_name.return_value = 'get_one'
datainput.process_all_method_name.return_value = 'get_all'
dataprocess = mock.create_autospec(IDataProcess)
dataprocess.process_one_method_name.return_value = 'process_one'
dataprocess.process_all_method_name.return_value = 'process_all'
dataoutput = mock.create_autospec(IDataOutput)
dataoutput.process_one_method_name.return_value = 'send_one'
dataoutput.process_all_method_name.return_value = 'send_all'
return (datainput, dataprocess, dataoutput)
def _get_transpy_instance(self, datainput, dataprocess,
dataoutput, config: dict = None) -> TransPy:
trans_py = TransPy()
trans_py.logger = self._get_null_logger()
trans_py.datainput = datainput
trans_py.dataprocess = dataprocess
trans_py.dataoutput = dataoutput
if config is not None:
trans_py.configure(config)
return trans_py
def _get_null_logger(self):
logger = getLogger('dummy')
if not logger.hasHandlers():
logger.addHandler(NullHandler())
return logger
|
<gh_stars>0
from django.contrib.auth.models import User
from django.views import View
from articles.models import Tag
from utils.pages import Paginator
from django.template import loader
from articles.models import Article
from django.http import JsonResponse
from utils.decorators import fail_safe_api
from utils.models import nested_model_to_dict
from utils.request import parse_body, set_user
from utils.koora import getValueFor, setTagsFor, uploadImageFor
class ListAPIView(View):
def dispatch(self, request, *args, **kwargs):
set_user(request)
if request.user.is_authenticated:
parse_body(request, for_method=request.method)
return super(ListAPIView, self).dispatch(request, *args, **kwargs)
@fail_safe_api(for_model=Article)
def get(self, request):
searchQuery = request.GET.get("searchQuery", False)
tag = request.GET.get("tag", False)
category = request.GET.get("category", False)
user=request.user
visitee=None
uid = request.GET.get("uid", False)
if uid:
visitee = User.objects.get(id=uid)
is_viewing_self = (user.username == visitee.username) if visitee else True
atype = request.GET.get('atype', False) if is_viewing_self else 'public'
to_show = visitee if uid else user
required_articles = getattr(Article.objects, atype or 'public')(user=to_show if atype else None)
query = {}
if searchQuery:
required_articles = list(filter(lambda article : article.contains_tag(searchQuery), required_articles))
query['searchQuery'] = searchQuery
if category:
required_articles = list(filter(lambda article : getValueFor(article.category) == category, required_articles))
query['category'] = category
if tag:
required_articles = list(filter(lambda article : article.has_tag(tag), required_articles))
query['tag'] = tag
template = loader.get_template("articles/articles.html")
try:
page = int(request.GET.get("page", 1))
except:
page = 1
try:
size = int(request.GET.get("size", 3))
except:
size = 3
paginator = Paginator(required_articles, size)
articles_count = len(required_articles)
required_page = paginator.page(page)
content = {
"status" : 200,
"data" : {
"page" : nested_model_to_dict(required_page),
"page_range" : list(paginator.page_range()) if required_articles else None,
"query" : list(query.items()),
"hasResults" : (articles_count > 0)
},
"meta" : {
"count" : articles_count
}
}
return JsonResponse(content)
@fail_safe_api(for_model=Article, needs_authentication=True)
def post(self, request):
title = request.POST['title']
content = request.POST['content']
category = request.POST['category']
image = request.FILES.get('article_image', False)
post_type=request.POST.get('post_type', 'public')
post_mode=request.POST.get('post_mode', 'publish')
is_private = post_type == 'private'
is_drafted = post_mode == 'draft'
article = Article.objects.create(user=request.user, title=title, content=content, category=category, is_drafted = is_drafted, is_private=is_private)
if (image):
uploadImageFor(article, image, article.slug)
tags = request.POST.get('tags', '').strip().split(",")
setTagsFor(article, tags)
article.save()
content = {
"status": 200,
"message" : "article {}".format('drafted' if is_drafted else 'created'),
"data": {
"article": nested_model_to_dict(article),
},
"meta": {
"count": 1,
}
}
return JsonResponse(content)
|
<filename>structureimpute/explore/plot_two_shape_common_tx_pct.py
from __future__ import print_function
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="ticks")
sns.set_context("poster")
plt.rcParams["font.family"] = "Helvetica"
import sys, os
from nested_dict import nested_dict
import pandas as pd
import numpy as np
from pyfasta import Fasta
import os, subprocess
import re
import torch
import time
from termcolor import colored
import util
import argparse
def plot_shape_tx_null_pct(out1=None, out2=None, out1_label='True', out2_label='Predict', savefn=None, species='human'):
out_dict1 = util.read_icshape_out(out1)
out_dict2 = util.read_icshape_out(out2)
tx_common = set(out_dict1.keys()) & set(out_dict2.keys())
null_pct1_ls = []
null_pct2_ls = []
for tx in tx_common:
null_pct1 = (out_dict1[tx]['reactivity_ls'].count('NULL')+out_dict1[tx]['reactivity_ls'].count('-1.0')+out_dict1[tx]['reactivity_ls'].count('-1')) / float(out_dict1[tx]['length'])
null_pct2 = (out_dict2[tx]['reactivity_ls'].count('NULL')+out_dict2[tx]['reactivity_ls'].count('-1.0')+out_dict1[tx]['reactivity_ls'].count('-1')) / float(out_dict2[tx]['length'])
null_pct1_ls.append(null_pct1)
null_pct2_ls.append(null_pct2)
print('{}: n={}'.format(out1, len(out_dict1)))
print('{}: n={}'.format(out2, len(out_dict2)))
print('common tx: n={}'.format(len(tx_common)))
fa_dict = util.read_fa(fa=None, species=species, pureID=1)
stat1 = util.shape_dict_stat(out_dict1, fa_dict, None, RNA_type=None, trim5Len=5, trim3Len=30)
stat2 = util.shape_dict_stat(out_dict2, fa_dict, None, RNA_type=None, trim5Len=5, trim3Len=30)
print(pd.DataFrame.from_dict(stat1,orient='index'), pd.DataFrame.from_dict(stat2, orient='index'))
df = pd.DataFrame.from_dict({out1_label:null_pct1_ls, out2_label:null_pct2_ls})
print(df.head())
fig,ax=plt.subplots(figsize=(6,6))
sns.scatterplot(x=out1_label, y=out2_label, data=df, ax=ax, s=10)
plt.xlabel('{} (null_pct: {:.2f})'.format(out1_label, stat1['total_bases(NULL_pct)']))
plt.ylabel('{} (null_pct: {:.2f})'.format(out2_label, stat2['total_bases(NULL_pct)']))
plt.tight_layout()
plt.savefig(savefn)
plt.close()
return stat1,stat2
def main():
####################################################################
### define parser of arguments
parser = argparse.ArgumentParser(description='Plot null pct scatter of common tx between two icshape.out')
parser.add_argument('--icshape1', type=str, default='/home/gongjing/project/shape_imputation/data/hek_wc_vivo/3.shape/shape.c200T2M0m0.out', help='icSHAPE out file1')
parser.add_argument('--icshape2', type=str, default='/home/gongjing/project/shape_imputation/data/hek_wc_vivo/3.shape/shape.c200T2M0m0.allfragment.0.5+exceed0.5.txt2.predict.out', help='icSHAPE out file2')
parser.add_argument('--out1_label', type=str, default='True', help='icSHAPE out file1 label')
parser.add_argument('--out2_label', type=str, default='Predict', help='icSHAPE out file2 label')
parser.add_argument('--savefn', type=str, default='/home/gongjing/project/shape_imputation/data/hek_wc_vivo/3.shape/shape.c200T2M0m0.allfragment.0.5+exceed0.5.txt2.predict.out.scatter.pdf', help='Save plot file')
parser.add_argument('--species', type=str, default='human', help='Species')
# get args
args = parser.parse_args()
util.print_args('Plot null pct scatter of common tx between two icshape.out', args)
plot_shape_tx_null_pct(out1=args.icshape1, out2=args.icshape2, out1_label=args.out1_label, out2_label=args.out2_label, savefn=args.savefn, species=args.species)
if __name__ == '__main__':
main() |
<filename>torchreid/models/motnet.py<gh_stars>0
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import re
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils import model_zoo
import torchvision.models as tvm
class MOTNet(nn.Module):
def __init__(self, backbone_config={}, groups=16, feat_dim=16, **kwargs):
super(MOTNet, self).__init__()
self.backbone = tvm.resnet18(**backbone_config)
self._classifier = nn.Sequential(
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 2),
)
self.a_fc = nn.Linear(1000, 1024)
self.groups = groups
self.feat_dim = feat_dim
self.wave_length = 1000.0
def init_weights(self):
for fc in self._classifier:
if isinstance(fc, nn.Linear):
nn.init.kaiming_normal_(fc.weight, a=1)
nn.init.constant_(fc.bias, 0)
for fc in [self.a_fc]:
nn.init.kaiming_normal_(fc.weight, a=1)
nn.init.constant_(fc.bias, 0)
def pos_vec(self, dts):
n, m, c = dts.shape
dts = dts.reshape(n * m, c)
xmin, ymin, xmax, ymax = torch.chunk(dts, 4, dim=1)
bbox_width_ref = xmax - xmin
bbox_height_ref = ymax - ymin
center_x_ref = 0.5 * (xmin+xmax)
center_y_ref = 0.5 * (ymin+ymax)
delta_x = center_x_ref - center_x_ref.transpose(0, 1)
delta_x = delta_x / bbox_width_ref
delta_x = (delta_x.abs() + 1e-3).log()
delta_y = center_y_ref - center_y_ref.transpose(0, 1)
delta_y = delta_y / bbox_height_ref
delta_y = (delta_y.abs() + 1e-3).log()
delta_width = bbox_width_ref / bbox_width_ref.transpose(0, 1)
delta_width = delta_width.log()
delta_height = bbox_height_ref / bbox_height_ref.transpose(0, 1)
delta_height = delta_height.log()
position_matrix = torch.stack(
[delta_x, delta_y, delta_width, delta_height], dim=2
)
feat_range = torch.arange(
0, self.feat_dim / 8, device=position_matrix.device
)
dim_mat = torch.full(
(len(feat_range), ), self.wave_length, device=position_matrix.device
).pow(8.0 / self.feat_dim * feat_range)
dim_mat = dim_mat.view(1, 1, 1, -1).expand(*position_matrix.shape, -1)
position_mat = position_matrix.unsqueeze(3).expand(
-1, -1, -1, dim_mat.shape[3]
)
position_mat = position_mat * 100.0
div_mat = position_mat / dim_mat
sin_mat, cos_mat = div_mat.sin(), div_mat.cos()
# [num_rois, num_nongt_rois, 4, feat_dim / 4]
embedding = torch.cat([sin_mat, cos_mat], dim=3)
# [num_rois, num_nongt_rois, feat_dim]
embedding = embedding.reshape(
embedding.shape[0], embedding.shape[1],
embedding.shape[2] * embedding.shape[3]
)
embedding = embedding.permute(2, 0, 1)
out = []
for i in range(n):
out.append(embedding[:, i*m:i*m+m, i*m:i*m+m].reshape(-1))
out = torch.stack(out)
return out
def appr_vec(self, v):
n, m, c = v.shape
out = []
for i in range(n):
v_ = v[i].reshape(m, self.groups, -1)
v_ = v_.permute(1, 0, 2)
mat = torch.bmm(v_, v_.transpose(1, 2))
out.append(mat.reshape(-1))
# v = v.reshape(n * m, self.groups, -1)
# v = v.permute(1, 0, 2)
# mat = torch.bmm(v, v.transpose(1, 2))
# for i in range(n):
# out.append(mat[:, i*m:i*m+m, i*m:i*m+m].reshape(-1))
out = torch.stack(out)
return out
def classifier(self, x, softmax=False):
out = self._classifier(x)
if softmax:
return F.softmax(out, dim=1)
else:
return out
def features(self, im_tiles):
return self.a_fc(F.relu(self.backbone(im_tiles)))
def forward(self, data, raw=True):
if raw:
# print(data['cur_im'].shape)
# print(data['ref_im'].shape)
im_tiles = torch.cat([data['cur_im'], data['ref_im']], dim=1)
n, m, c, h, w = im_tiles.shape
im_tiles = im_tiles.reshape(n * m, c, h, w)
feats = self.features(im_tiles)
feats = feats.reshape(n, m, -1)
else:
feats = torch.cat([data['cur_im'], data['ref_im']], dim=1)
# print(feats.shape)
# import time
# st = time.time()
appr_features = self.appr_vec(feats)
# en = time.time()
# print(en - st)
dt_tiles = torch.cat(
[data['cur_dt'].unsqueeze(1), data['ref_dt']], dim=1
)
pos_features = self.pos_vec(dt_tiles)
# print(appr_features.shape, appr_features.dtype)
# print(pos_features.shape, pos_features.dtype)
features = torch.cat([appr_features, pos_features], dim=1)
# print(features.shape)
out = self.classifier(features, softmax=not self.training)
return out
def loss(self, out, targets):
return F.cross_entropy(out, targets)
|
<reponame>koconnor4/pyDIA
import sys
import os
import numpy as np
from astropy.io import fits
from pyraf import iraf
from io_functions import read_fits_file, write_image
from image_functions import compute_saturated_pixel_mask, subtract_sky
def transform_coeffs(deg, dx, xx, yy):
a = np.zeros((deg + 1, deg + 1))
nterms = (deg + 1) * (deg + 2) / 2
M = np.zeros((nterms, nterms))
v = np.zeros(nterms)
i = 0
for m in range(deg + 1):
for n in range(deg + 1 - m):
v[i] = np.sum(dx * xx ** m * yy ** n)
j = 0
for p in range(deg + 1):
for q in range(deg + 1 - p):
M[i, j] = np.sum(xx ** (m + p) * yy ** (n + q))
j += 1
i += 1
c = np.linalg.solve(M, v)
i = 0
for m in range(deg + 1):
for n in range(deg + 1 - m):
a[m, n] = c[i]
i += 1
return a
def compute_xy_shift(pos1, pos2, threshold, dx=0.0, dy=0.0, degree=0):
x1 = pos1[:, 0]
y1 = pos1[:, 1]
x2 = pos2[:, 0]
y2 = pos2[:, 1]
xx = (x1 - np.mean(x1)) / np.mean(x1)
yy = (y1 - np.mean(y1)) / np.mean(y1)
print('Matching positions for', len(x1), 'stars')
match = np.zeros_like(x1, dtype=np.int32)
deltax = np.zeros_like(x1)
deltay = np.zeros_like(x1)
for deg in range(degree + 1):
a = np.zeros((deg + 1, deg + 1))
b = np.zeros((deg + 1, deg + 1))
if deg == 0:
a[0, 0] = dx
b[0, 0] = dy
else:
for m in range(deg):
for n in range(deg - m):
a[m, n] = a_prev[m, n]
b[m, n] = b_prev[m, n]
for scale in range(21, 0, -4):
xoffset = np.zeros_like(x1)
yoffset = np.zeros_like(x1)
for m in range(deg + 1):
for n in range(deg + 1 - m):
xoffset += a[m, n] * (xx ** m) * (yy ** n)
yoffset += b[m, n] * (xx ** m) * (yy ** n)
for j1 in range(len(x1)):
r2 = (x1[j1] - x2 - xoffset[j1]) ** 2 + (
y1[j1] - y2 - yoffset[j1]) ** 2
mm = np.where(r2 == np.min(r2))
try:
match[j1] = np.where(r2 == np.min(r2))[0][0]
except:
print( r2)
print( np.min(np.sqrt(r2)))
print( mm)
print( mm[0])
sys.exit(0)
deltax[j1] = x1[j1] - x2[match[j1]] - xoffset[j1]
deltay[j1] = y1[j1] - y2[match[j1]] - yoffset[j1]
deltar = np.sqrt(deltax ** 2 + deltay ** 2)
good = np.where(deltar < scale * threshold)[0]
dx = x1 - x2[match]
dy = y1 - y2[match]
a = transform_coeffs(deg, dx[good], xx[good], yy[good])
b = transform_coeffs(deg, dy[good], xx[good], yy[good])
print('degree', deg, 'using', good.shape[0], 'stars')
print('threshold = ', scale * threshold, 'pixels')
print('a = ', a)
print('b = ', b)
print('std = ', np.std(deltar), '(all) ',
np.std(deltar[good]), '(matched)')
a_prev = a
b_prev = b
return a, b
def detect_stars(f, params):
print('Detecting stars in', f.name)
print('Current directory is', os.getcwd())
fp = params.loc_output + os.path.sep
fn = f.fullname
iraf.digiphot()
iraf.daophot()
print('FWHM = ', f.fw)
nstars = 0
thresh = 100
while (nstars < 2 * params.nstamps) and (thresh > 1.5):
print('thresh = ', thresh)
for d in ['temp.stars', 'temp.phot']:
if os.path.exists(fp + d):
os.system('/bin/rm ' + fp + d)
iraf.daofind(image=fn, output=fp + 'temp.stars', interactive='no',
verify='no', threshold=thresh, sigma=30, fwhmpsf=f.fw,
datamin=params.pixel_min, datamax=params.pixel_max,
epadu=params.gain, readnoise=params.readnoise,
noise='poisson')
iraf.phot(image=fn, output=fp + 'temp.phot', coords=fp + 'temp.stars',
interactive='no', verify='no', sigma=30, fwhmpsf=f.fw,
datamin=params.pixel_min, datamax=params.pixel_max,
epadu=params.gain, readnoise=params.readnoise,
noise='poisson', Stdout='/dev/null')
nstars = 0
if os.path.exists(fp + 'temp.phot'):
iraf.psort(infiles=fp + 'temp.phot', field='MAG')
iraf.prenumber(infile=fp + 'temp.phot')
s = iraf.pdump(infiles=fp + 'temp.phot', Stdout=1,
fields='ID,XCENTER,YCENTER,MAG', expr='yes')
stars = np.zeros([len(s), 3])
i = 0
for line in s:
mag = line.split()[3]
if not (mag == 'INDEF'):
stars[i, :] = np.array(map(float, line.split()[1:4]))
i += 1
nstars = i
thresh = thresh * 0.5
if nstars == 0:
print('Error: could not detect stars in', fn)
return None
stars = stars[:i, :].copy()
sys.old_stdout = sys.stdout
return stars
def choose_stamps(f, params):
mask = compute_saturated_pixel_mask(f.image, 6, params)
stars = detect_stars(f, params)
(xmax, ymax) = f.image.shape
n_good = 0
snum = np.zeros(params.nstamps).astype(np.int)
md = params.stamp_edge_distance
q = np.where(
(stars[:, 0] > md) & (stars[:, 0] < xmax - md) & (stars[:, 1] > md) & (
stars[:, 1] < ymax - md))
if len(q[0]) >= params.nstamps:
gstars = stars[q]
else:
print('Warning: using stamps close to edge of detector')
gstars = stars
md = int(params.stamp_half_width)
i = 0
while (n_good < params.nstamps) & (i < gstars.shape[0]):
if ((gstars[i, 0] > md) & (gstars[i, 0] < xmax - md) & (
gstars[i, 1] > md) & (gstars[i, 1] < ymax - md)):
mstamp = mask[
int(gstars[i, 0] + 0.5) - md:int(gstars[i, 0] + 0.5) + md,
int(gstars[i, 1] + 0.5) - md:int(gstars[i, 1] + 0.5) + md]
q = np.where(mstamp < 1)
if len(q[0]) == 0:
snum[n_good] = i
n_good += 1
i += 1
if n_good < params.nstamps:
print('Warning: stamps may contain saturated pixels')
stamps = gstars[:params.nstamps, :]
else:
stamps = gstars[snum]
return stamps
def rewrite_psg(file1, file2):
min_separation = 100.0
q = open(file2, 'w')
lastgroup = -1
for line in open(file1, 'r'):
if line[0] == '#':
q.write(line)
else:
group = int(line.split()[1])
if group > lastgroup:
lastgroup = group
x0 = float(line.split()[2])
y0 = float(line.split()[2])
else:
x = float(line.split()[2])
y = float(line.split()[2])
separation = np.sqrt((x - x0) ** 2 + (y - y0) ** 2)
if separation < min_separation:
min_separation = separation
q.write(line)
q.close()
return int(min_separation)
def compute_psf_image(params, g, psf_deg=1, psf_rad=8, star_file='phot.mags',
psf_image='psf.fits', edge_dist=5):
iraf.digiphot()
iraf.daophot()
fp = params.loc_output + os.path.sep
f_im = g.image * g.mask
f = fp + 'temp.ref.fits'
write_image(f_im, f)
g.fw = np.max([1.5, g.fw])
logfile = fp + 'psf.log'
fd = fits.getdata(f)
xmax = fd.shape[0] - edge_dist
ymax = fd.shape[1] - edge_dist
for d in ['temp.stars', 'temp.phot', 'temp.phot1', 'temp.phot2',
'temp.pst', 'temp.opst', 'temp.opst2', 'temp.psf.fits',
'temp.psf1.fits', 'temp.psf2.fits', 'temp.psg', 'temp.psg2',
'temp.psg3', 'temp.psg5', 'temp.rej', 'temp.rej2',
'temp.sub.fits', 'temp.sub1.fits', 'temp.sub2.fits',
'temp.opst1', 'temp.opst3', 'temp.rej3', 'temp.nst',
'temp.stars1', 'ref.mags', psf_image, 'temp.als', 'temp.als2']:
if os.path.exists(fp + d):
os.remove(fp + d)
# locate stars
iraf.daofind(image=f, output=fp + 'temp.stars', interactive='no',
verify='no', threshold=3, sigma=params.star_detect_sigma,
fwhmpsf=g.fw, datamin=1, datamax=params.pixel_max,
epadu=params.gain, readnoise=params.readnoise,
noise='poisson')
if params.star_file:
als_recenter = 'no'
all_template_stars = np.genfromtxt(params.star_file)
all_new_stars = np.genfromtxt(fp + 'temp.stars')
if all_new_stars.shape[0] > params.star_file_number_match:
new_stars = all_new_stars[all_new_stars[:, 2].argsort()][
:params.star_file_number_match]
else:
new_stars = all_new_stars
if all_template_stars.shape[0] > params.star_file_number_match:
template_stars = all_template_stars[
all_template_stars[:, 3].argsort()][
:params.star_file_number_match]
else:
template_stars = all_template_stars
tx, ty = compute_xy_shift(new_stars, template_stars[:, 1:3], 0.5,
degree=params.star_file_transform_degree)
if params.star_file_has_magnitudes:
star_positions = all_template_stars[:, 1:4]
xx = (star_positions[:, 0] - np.mean(new_stars[:, 0])) / np.mean(
new_stars[:, 0])
yy = (star_positions[:, 1] - np.mean(new_stars[:, 1])) / np.mean(
new_stars[:, 1])
for m in range(params.star_file_transform_degree + 1):
for n in range(params.star_file_transform_degree + 1 - m):
star_positions[:, 0] += tx[m, n] * xx ** m * yy ** n
star_positions[:, 1] += ty[m, n] * xx ** m * yy ** n
np.savetxt(fp + 'temp.stars.1', star_positions,
fmt='%10.3f %10.3f %10.3f')
else:
star_positions = all_template_stars[:, 1:3]
xx = (star_positions[:, 0] - np.mean(new_stars[:, 0])) / np.mean(
new_stars[:, 0])
yy = (star_positions[:, 1] - np.mean(new_stars[:, 1])) / np.mean(
new_stars[:, 1])
for m in range(params.star_file_transform_degree + 1):
for n in range(params.star_file_transform_degree + 1 - m):
star_positions[:, 0] += tx[m, n] * xx ** m * yy ** n
star_positions[:, 1] += ty[m, n] * xx ** m * yy ** n
np.savetxt(fp + 'temp.stars.1', star_positions,
fmt='%10.3f %10.3f')
all_template_stars[:, 1] = star_positions[:, 0]
all_template_stars[:, 2] = star_positions[:, 1]
else:
als_recenter = 'yes'
star_positions = np.genfromtxt(fp + 'temp.stars')
np.savetxt(fp + 'temp.stars.1', star_positions[:, :2],
fmt='%10.3f %10.3f')
iraf.phot(image=f, output=fp + 'temp.phot', coords=fp + 'temp.stars.1',
interactive='no', verify='no', sigma=params.star_detect_sigma,
fwhmpsf=g.fw, apertures=g.fw, datamin=1,
datamax=2 * params.pixel_max, epadu=params.gain,
annulus=3 * g.fw, dannulus=3.0, readnoise=params.readnoise,
noise='poisson')
print('fw = ', g.fw)
# fw = np.max([4.0,fw])
# print('fw = ',fw)
# select PSF stars
iraf.pstselect(image=f, photfile=fp + 'temp.phot', pstfile=fp + 'temp.pst',
maxnpsf=40, interactive='no', verify='no', datamin=1,
fitrad=2.0, datamax=params.pixel_max, epadu=params.gain,
psfrad=np.max([4.0, g.fw]), readnoise=params.readnoise,
noise='poisson')
if params.star_file and params.star_file_has_magnitudes:
# We don't need to do the photometry - only make the PSF
# Initial PSF estimate to generate PSF groups
# psfrad=3*np.max([g.fw,1.8])
iraf.psf(image=f, photfile=fp + 'temp.phot', pstfile=fp + 'temp.pst',
psfimage=fp + 'temp.psf', function=params.psf_profile_type,
opstfile=fp + 'temp.opst', groupfile=fp + 'temp.psg',
interactive='no', verify='no', varorder=0,
psfrad=2 * np.max([g.fw, 1.8]), datamin=-10000,
datamax=0.95 * params.pixel_max, scale=1.0)
# construct a file of the psf neighbour stars
slist = []
psf_stars = np.loadtxt(fp + 'temp.opst', usecols=(0, 1, 2))
for star in range(psf_stars.shape[0]):
xp = psf_stars[star, 1]
yp = psf_stars[star, 2]
xmin = np.max([np.int(xp - 10 * g.fw), 0])
xmax = np.min([np.int(xp + 10 * g.fw), f_im.shape[0]])
ymin = np.max([np.int(yp - 10 * g.fw), 0])
ymax = np.min([np.int(yp + 10 * g.fw), f_im.shape[1]])
p = star_positions[np.logical_and(
np.logical_and(star_positions[:, 0] > xmin,
star_positions[:, 0] < xmax),
np.logical_and(star_positions[:, 1] > ymin,
star_positions[:, 1] < ymax))]
slist.append(p)
group_stars = np.concatenate(slist)
np.savetxt(fp + 'temp.nst', group_stars, fmt='%10.3f %10.3f %10.3f')
# subtract PSF star neighbours
iraf.substar(image=f, photfile=fp + 'temp.nst',
psfimage=fp + 'temp.psf', exfile=fp + 'temp.opst',
fitrad=2.0, subimage=fp + 'temp.sub1', verify='no',
datamin=1, datamax=params.pixel_max, epadu=params.gain,
readnoise=params.readnoise, noise='poisson')
# final PSF
iraf.psf(image=fp + 'temp.sub1', photfile=fp + 'temp.phot',
pstfile=fp + 'temp.opst', psfimage=psf_image, psfrad=5 * g.fw,
function=params.psf_profile_type, opstfile=fp + 'temp.opst2',
groupfile=fp + 'temp.psg2', interactive='no', verify='no',
varorder=0, datamin=1, datamax=0.95 * params.pixel_max,
scale=1.0)
np.savetxt(fp + 'ref.mags', all_template_stars,
fmt='%7d %10.3f %10.3f %10.3f')
stars = all_template_stars
else:
# initial PSF estimate
iraf.psf(image=f, photfile=fp + 'temp.phot', pstfile=fp + 'temp.pst',
psfimage=fp + 'temp.psf', function=params.psf_profile_type,
opstfile=fp + 'temp.opst', groupfile=fp + 'temp.psg1',
interactive='no', verify='no', varorder=0, psfrad=5 * g.fw,
datamin=1, datamax=0.95 * params.pixel_max, scale=1.0)
# separation distance of near neighbours
separation = np.max(
[rewrite_psg(fp + 'temp.psg1', fp + 'temp.psg2'), 3])
print('separation = ', separation)
# subtract all stars using truncated PSF
iraf.allstar(image=f, photfile=fp + 'temp.phot',
psfimage=fp + 'temp.psf', allstarfile=fp + 'temp.als',
rejfile='', subimage=fp + 'temp.sub', verify='no',
psfrad=3 * g.fw, fitrad=2.0, recenter='yes',
groupsky='yes', fitsky='yes', sannulus=7, wsannulus=10,
datamin=1, datamax=params.pixel_max, epadu=params.gain,
readnoise=params.readnoise, noise='poisson')
if params.star_file:
os.system('cp ' + fp + 'temp.phot ' + fp + 'temp.phot2')
else:
# locate new stars
iraf.daofind(image=fp + 'temp.sub', output=fp + 'temp.stars1',
interactive='no', verify='no', threshold=3,
sigma=params.star_detect_sigma, fwhmpsf=3 * g.fw,
datamin=1, datamax=params.pixel_max,
epadu=params.gain, readnoise=params.readnoise,
noise='poisson')
# magnitudes for new stars
iraf.phot(image=fp + 'temp.sub', output=fp + 'temp.phot1',
coords=fp + 'temp.stars1', interactive='no', verify='no',
sigma=params.star_detect_sigma, fwhmpsf=g.fw, datamin=1,
datamax=params.pixel_max, epadu=params.gain,
readnoise=params.readnoise, noise='poisson')
# join star lists together
iraf.pconcat(infiles=fp + 'temp.phot,' + fp + 'temp.phot1',
outfile=fp + 'temp.phot2')
# new PSF estimate to generate PSF groups
iraf.psf(image=f, photfile=fp + 'temp.phot2', pstfile=fp + 'temp.pst',
psfimage=fp + 'temp.psf2', function=params.psf_profile_type,
opstfile=fp + 'temp.opst2', groupfile=fp + 'temp.psg3',
interactive='no', verify='no', varorder=0, psfrad=5 * g.fw,
datamin=-10000, datamax=0.95 * params.pixel_max, scale=1.0)
# magnitudes for PSF group stars
iraf.nstar(image=f, groupfile=fp + 'temp.psg3',
psfimage=fp + 'temp.psf2', nstarfile=fp + 'temp.nst',
rejfile='', verify='no', psfrad=5 * g.fw, fitrad=2.0,
recenter='no', groupsky='yes', fitsky='yes', sannulus=7,
wsannulus=10, datamin=1, datamax=params.pixel_max,
epadu=params.gain, readnoise=params.readnoise,
noise='poisson')
# subtract PSF star neighbours
iraf.substar(image=f, photfile=fp + 'temp.nst',
psfimage=fp + 'temp.psf2', exfile=fp + 'temp.opst2',
fitrad=2.0, subimage=fp + 'temp.sub1', verify='no',
datamin=1, datamax=params.pixel_max, epadu=params.gain,
readnoise=params.readnoise, noise='poisson')
# final PSF
iraf.psf(image=fp + 'temp.sub1', photfile=fp + 'temp.phot2',
pstfile=fp + 'temp.opst2', psfimage=psf_image,
psfrad=5 * g.fw, function=params.psf_profile_type,
opstfile=fp + 'temp.opst3', groupfile=fp + 'temp.psg5',
interactive='no', verify='no', varorder=0, datamin=1,
datamax=0.95 * params.pixel_max, scale=1.0)
# final photometry
iraf.allstar(image=g.fullname, photfile=fp + 'temp.phot2',
psfimage=psf_image, allstarfile=fp + 'temp.als2',
rejfile='', subimage=fp + 'temp.sub2', verify='no',
psfrad=5 * g.fw, recenter=als_recenter, groupsky='yes',
fitsky='yes', sannulus=7, wsannulus=10, fitrad=3.0,
datamin=params.pixel_min, datamax=params.pixel_max,
epadu=params.gain, readnoise=params.readnoise,
noise='poisson')
psfmag = 10.0
for line in open(fp + 'temp.als2', 'r'):
sline = line.split()
if sline[1] == 'PSFMAG':
psfmag = float(sline[3])
break
if params.star_file:
iraf.psort(infiles=fp + 'temp.als2', field='ID')
os.system('cp ' + fp + 'temp.als2 ' + fp + 'temp.als3')
else:
selection = 'XCE >= ' + str(edge_dist) + ' && XCE <= ' + str(
xmax) + ' && YCE >= ' + str(edge_dist) + ' && YCE <= ' + str(
ymax) + ' && MAG != INDEF'
iraf.pselect(infiles=fp + 'temp.als2', outfiles=fp + 'temp.als3',
expr=selection)
iraf.psort(infiles=fp + 'temp.als3', field='MAG')
iraf.prenumber(infile=fp + 'temp.als3')
s = iraf.pdump(infiles=fp + 'temp.als3', Stdout=1,
fields='ID,XCENTER,YCENTER,MAG,MERR,MSKY,SHARPNESS,CHI',
expr='yes')
sf = [k.replace('INDEF', '-1') for k in s]
stars = np.zeros([len(sf), 5])
for i, line in enumerate(sf):
stars[i, :] = np.array(map(float, sf[i].split()[1:6]))
s = iraf.pdump(infiles=fp + 'temp.als3', Stdout=1,
fields='ID,XCENTER,YCENTER,MAG,MERR,SHARPNESS,CHI,MSKY',
expr='yes')
sf = [k.replace('INDEF', '-1') for k in s]
with open(fp + 'ref.mags', 'w') as fid:
for s in sf:
fid.write(s + '\n')
return stars
def group_stars_ccd(params, star_positions, reference):
print('grouping stars')
d, h = read_fits_file(reference)
ccd_size = d.shape
print( d.shape)
xpos = np.abs(star_positions[:, 0])
ypos = np.abs(star_positions[:, 1])
g_size = params.ccd_group_size
n_groups_x = (ccd_size[1] - 1) / g_size + 1
n_groups_y = (ccd_size[0] - 1) / g_size + 1
print( np.min(xpos), np.min(ypos))
print( np.max(xpos), np.max(ypos))
print( n_groups_x, n_groups_y)
indx = (xpos * 0).astype(np.int)
c = 0
k = 0
mposx = np.zeros(n_groups_x * n_groups_y)
mposy = np.zeros(n_groups_x * n_groups_y)
g_bound = np.zeros(n_groups_x * n_groups_y).astype(np.int)
for i in range(n_groups_x):
for j in range(n_groups_y):
print('group', i, j, i * g_size, (i + 1) * g_size, j * g_size,
(j + 1) * g_size)
mposx[k] = (i + 0.5) * g_size
mposy[k] = (j + 0.5) * g_size
p = np.where((xpos >= i * g_size) & (xpos < (i + 1) * g_size) & (
ypos >= j * g_size) & (ypos < (j + 1) * g_size))[0]
if p.shape[0]:
pn = p.shape[0]
indx[c:c + pn] = p
c += pn
print( k, pn, c)
g_bound[k] = c
k += 1
return indx, g_bound, mposx, mposy
|
<reponame>yuriyshapovalov/Prototypes
# datetime - basic date and time types
import datetime
class DatetimeTest:
def main():
print("Date object")
x = datetime.date(2012, 11, 4)
print("datetime.date(2012, 11, 4) = {}".format(x))
x = datetime.date.today()
print("datetime.date.today() = {}".format(x))
x = datetime.date.fromtimestamp(1234956789)
print("datetime.date.fromtimestamp(1234956789) = {}".format(x))
x = datetime.date.fromordinal(1111211)
print("datetime.date.fromordinal(1111211) = {}".format(x))
x = datetime.date.max
print("datetime.date.max = {}".format(x))
x = datetime.date.min
print("datetime.date.min = {}".format(x))
x = datetime.date.resolution
print("datetime.date.resolution = {}".format(x))
d = datetime.date(2012, 11, 28)
x = d.year
print("({}) datetime.date.year = {}".format(d, x))
x = d.month
print("({}) datetime.date.month = {}".format(d, x))
x = d.day
print("({}) datetime.date.day = {}".format(d, x))
x = d.replace(year=2013)
print("({}) datetime.date.replace(year=2013) = {}".format(d, x))
x = d.timetuple()
print("({}) datetime.date.timetuple() = {}".format(d, x))
x = d.toordinal()
print("({}) datetime.date.toordinal() = {}".format(d, x))
x = d.weekday()
print("({}) datetime.date.weekday() = {}".format(d, x))
x = d.isoweekday()
print("({}) datetime.date.isoweekday() = {}".format(d, x))
x = d.isocalendar()
print("({}) datetime.date.isocalendar() = {}".format(d, x))
x = d.isoformat()
print("({}) datetime.date.isoformat() = {}".format(d, x))
x = d.__str__()
print("({}) datetime.date.__str__ = {}".format(d, x))
x = d.ctime()
print("({}) datetime.date.ctime() = {}".format(d, x))
x = d.strftime("%d %a %b")
print("({}) datetime.date.strftime('%d %a %b') = {}".format(d, x))
x = d.__format__("%A-%B-%w")
print("({}) datetime.date.__format__() = {}".format(d, x))
print("\nDatetime object")
d = datetime.datetime(2013, 10, 28, 17, 45, 29)
print("datetime(2013, 10, 28, 17, 45, 29) = {}".format(d))
x = datetime.datetime.today()
print("datetime.today() = {}".format(x))
x = datetime.datetime.now()
print("datetime.now() = {}".format(x))
x = datetime.datetime.utcnow()
print("datetime.utcnow() = {}".format(x))
x = datetime.datetime.fromtimestamp(1239456789)
print("datetime.fromtimestamp(1239456789) = {}".format(x))
x = datetime.datetime.utcfromtimestamp(347283)
print("datetime.utcfromtimestamp(347283) = {}".format(x))
t = datetime.time(17, 31, 10, 391)
x = datetime.datetime.combine(d, t)
print("datetime.combine('date', 'time') = {}".format(x))
x = d.strptime("2013 11", "%Y %m")
print("({}) datetime.strptime('2013 11', '%Y %m') = {}".format(d, x))
print("\nTime object")
t = datetime.time(21, 54, 11, 2318)
print("datetime.time(21, 54, 11) = {}".format(t))
y = t.min
print("datetime.min = {}".format(y))
y = t.max
print("datetime.max = {}".format(y))
y = t.resolution
print("datetime.resolution = {}".format(y))
y = t.hour
print("datetime.hour = {}".format(y))
y = t.minute
print("datetime.minute = {}".format(y))
y = t.second
print("datetime.second = {}".format(y))
y = t.microsecond
print("datetime.microsecond = {}".format(y))
y = t.tzinfo
print("datetime.tzinfo = {}".format(y))
y = t.replace(hour=7, second=21)
print("({}) datetime.replace(hour=7, seconds=21) = {}".format(t, y))
y = t.isoformat()
print("datetime.isoformat = {}".format(y))
y = t.utcoffset()
print("datetime.utcoffset = {}".format(y))
y = t.dst()
print("datetime.dst = {}".format(y))
if __name__ == '__main__':
DatetimeTest().main()
|
#===============================================================================
# Copyright 2007 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""Support functions and classes implementing the KinoSearch-like external sort
merging model. This module does not contain any user-level objects.
"""
import os, tempfile
from heapq import heapify, heapreplace, heappop
from whoosh.filedb.structfile import StructFile, pack_ushort, unpack_ushort
from whoosh.system import _INT_SIZE, _USHORT_SIZE
from whoosh.util import utf8encode, utf8decode
from whoosh.util.struct2 import Struct
# Utility functions
_2int_struct = Struct("!II")
pack2ints = _2int_struct.pack
unpack2ints = _2int_struct.unpack
def encode_posting(fieldnum, text, doc, freq, datastring):
"""Encodes a posting as a string, for sorting.
"""
return "".join([pack_ushort(fieldnum),
utf8encode(text)[0],
chr(0),
pack2ints(doc, freq),
datastring
])
def decode_posting(posting):
"""Decodes an encoded posting string into a
(field_number, text, document_number, datastring) tuple.
"""
fieldnum = unpack_ushort(posting[:_USHORT_SIZE])[0]
zero = posting.find(chr(0), _USHORT_SIZE)
text = utf8decode(posting[_USHORT_SIZE:zero])[0]
metastart = zero + 1
metaend = metastart + _INT_SIZE * 2
doc, freq = unpack2ints(posting[metastart:metaend])
datastring = posting[metaend:]
return fieldnum, text, doc, freq, datastring
def merge(run_readers, max_chunk_size):
# Initialize a list of terms we're "current"ly looking at, by taking the
# first posting from each buffer.
#
# The format of the list is [("encoded_posting", reader_number), ...]
#
# The list is sorted, and the runs are already sorted, so the first term in
# this list should be the absolute "lowest" term.
current = [(r.next(), i) for i, r
in enumerate(run_readers)]
heapify(current)
# The number of active readers (readers with more postings to available),
# initially equal to the total number of readers/buffers.
active = len(run_readers)
# Initialize the output buffer, and a variable to keep track of the output
# buffer size. This buffer accumulates postings from the various buffers in
# proper sorted order.
output = []
outputBufferSize = 0
while active > 0:
# Get the first ("encoded_posting", reader_number) pair and add it to
# the output buffer.
p, i = current[0]
output.append(p)
outputBufferSize += len(p)
# If the output buffer is full, "flush" it by yielding the accumulated
# postings back to the parent writer and clearing the output buffer.
if outputBufferSize > max_chunk_size:
for p in output:
yield decode_posting(p)
output = []
outputBufferSize = 0
# We need to replace the posting we just added to the output by getting
# the next posting from the same buffer.
if run_readers[i] is not None:
# Take the first posting from buffer i and insert it into the
# "current" list in sorted order. The current list must always stay
# sorted, so the first item is always the lowest.
p = run_readers[i].next()
if p:
heapreplace(current, (p, i))
else:
heappop(current)
active -= 1
# If there are still terms in the "current" list after all the readers are
# empty, dump them into the output buffer.
if len(current) > 0:
output.extend([p for p, i in current])
# If there's still postings in the output buffer, yield them all to the
# parent writer.
if len(output) > 0:
for p in output:
yield decode_posting(p)
# Classes
class RunReader(object):
"""An iterator that yields posting strings from a "run" on disk.
This class buffers the reads to improve efficiency.
"""
def __init__(self, stream, count, buffer_size):
"""
:param stream: the file from which to read.
:param count: the number of postings in the stream.
:param buffer_size: the size (in bytes) of the read buffer to use.
"""
self.stream = stream
self.count = count
self.buffer_size = buffer_size
self.buffer = []
self.pointer = 0
self.finished = False
def close(self):
self.stream.close()
def _fill(self):
# Clears and refills the buffer.
# If this reader is exhausted, do nothing.
if self.finished:
return
# Clear the buffer.
buffer = self.buffer = []
# Reset the index at which the next() method
# reads from the buffer.
self.pointer = 0
# How much we've read so far.
so_far = 0
count = self.count
while so_far < self.buffer_size:
if count <= 0:
break
p = self.stream.read_string2()
buffer.append(p)
so_far += len(p)
count -= 1
self.count = count
def __iter__(self):
return self
def next(self):
assert self.pointer <= len(self.buffer)
if self.pointer == len(self.buffer):
self._fill()
# If after refilling the buffer is still empty, we're at the end of the
# file and should stop. Probably this should raise StopIteration
# instead of returning None.
if len(self.buffer) == 0:
self.finished = True
return None
r = self.buffer[self.pointer]
self.pointer += 1
return r
class PostingPool(object):
"""Represents the "pool" of all postings to be sorted. As documents are
added, this object writes out "runs" of sorted encoded postings. When all
documents have been added, this object merge sorts the runs from disk,
yielding decoded postings to the SegmentWriter.
"""
def __init__(self, limit):
"""
:param limit: the maximum amount of memory to use at once for adding
postings and the merge sort.
"""
self.limit = limit
self.size = 0
self.postings = []
self.finished = False
self.runs = []
self.tempfilenames = []
self.count = 0
def add_posting(self, field_num, text, doc, freq, datastring):
"""Adds a posting to the pool.
"""
if self.finished:
raise Exception("Can't add postings after you iterate over the pool")
if self.size >= self.limit:
#print "Flushing..."
self._flush_run()
posting = encode_posting(field_num, text, doc, freq, datastring)
self.size += len(posting)
self.postings.append(posting)
self.count += 1
def _flush_run(self):
# Called when the memory buffer (of size self.limit) fills up.
# Sorts the buffer and writes the current buffer to a "run" on disk.
if self.size > 0:
tempfd, tempname = tempfile.mkstemp(".whooshrun")
runfile = StructFile(os.fdopen(tempfd, "w+b"))
self.postings.sort()
for p in self.postings:
runfile.write_string2(p)
runfile.flush()
runfile.seek(0)
self.runs.append((runfile, self.count))
self.tempfilenames.append(tempname)
#print "Flushed run:", self.runs
self.postings = []
self.size = 0
self.count = 0
def __iter__(self):
# Iterating the PostingPool object performs a merge sort of the runs
# that have been written to disk and yields the sorted, decoded
# postings.
if self.finished:
raise Exception("Tried to iterate on PostingPool twice")
run_count = len(self.runs)
if self.postings and run_count == 0:
# Special case: we never accumulated enough postings to flush to
# disk, so the postings are still in memory: just yield them from
# there.
self.postings.sort()
for p in self.postings:
yield decode_posting(p)
return
if not self.postings and run_count == 0:
# No postings at all
return
if self.postings:
self._flush_run()
run_count = len(self.runs)
#This method does an external merge to yield postings from the (n > 1)
#runs built up during indexing and merging.
# Divide up the posting pool's memory limit between the number of runs
# plus an output buffer.
max_chunk_size = int(self.limit / (run_count + 1))
run_readers = [RunReader(run_file, count, max_chunk_size)
for run_file, count in self.runs]
for decoded_posting in merge(run_readers, max_chunk_size):
yield decoded_posting
for rr in run_readers:
assert rr.count == 0
rr.close()
for tempfilename in self.tempfilenames:
os.remove(tempfilename)
# And we're done.
self.finished = True
|
"""
https://stackoverflow.com/questions/29362142/django-rest-framework-hyperlinkedidentityfield-with-multiple-lookup-args
http://www.tomchristie.com/rest-framework-2-docs/api-guide/relations
https://github.com/miki725/formslayer/blob/master/formslayer/pdf/relations.py#L7-L46
https://stackoverflow.com/questions/32038643/custom-hyperlinked-url-field-for-more-than-one-lookup-field-in-a-serializer-of-d
https://stackoverflow.com/questions/43964007/django-rest-framework-get-or-create-for-primarykeyrelatedfield
"""
from rest_framework.fields import SkipField
from rest_framework.relations import PKOnlyObject
from collections import OrderedDict
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.http import Http404
from rest_framework.serializers import (
Field,
HyperlinkedRelatedField,
HyperlinkedIdentityField,
HyperlinkedModelSerializer,
ImageField,
ValidationError,
ListSerializer,
LIST_SERIALIZER_KWARGS,
)
from .utils import (
deep_update,
get_real_path,
get_real_field_path,
get_class_name,
get_model_field_path,
get_path_options,
get_model_path,
get_mapped_path,
get_nested_attr,
has_circular_reference,
is_model_field,
assert_no_none,
has_ancestor,
HashableList,
HashableDict,
# get_nested,
DictDiffer,
)
class RepresentationMixin:
def to_representation(self, instance, *args, **kwargs):
ret = OrderedDict()
fields = self._readable_fields
for field in fields:
try:
obj = field.get_attribute(instance)
except SkipField:
continue
check_for_none = obj.pk if isinstance(obj, PKOnlyObject) else obj
if check_for_none is None:
representation = None
else:
representation = self.to_representation_for_field(
field, obj, *args, **kwargs
)
ret[field.field_name] = representation
return ret
def to_representation_for_field(self, field, obj, *args, **kwargs):
return field.to_representation(obj, *args, **kwargs)
class ConditionalFieldsMixin(RepresentationMixin):
"""
Returns serializer fields if conditions pass.
"""
conditional_fields = None
def filter_conditional_fields(self, representation, obj):
if self.conditional_fields is None:
return representation
new_rep = OrderedDict()
request = self.context["request"]
for k, v in representation.items():
if k in self.conditional_fields:
condition_classes = self.conditional_fields[k]
conditions = [c() for c in condition_classes]
results = [
c.has_object_condition(k, v, obj, representation, request)
for c in conditions
]
if any([x is False for x in results]):
continue
elif "default" in self.conditional_fields:
condition_classes = self.conditional_fields["default"]
conditions = [c() for c in condition_classes]
results = [
c.has_object_condition(k, v, obj, representation, request)
for c in conditions
]
if any([x is False for x in results]):
continue
new_rep[k] = v
return new_rep
def to_representation(self, obj):
representation = super().to_representation(obj)
filtered = self.filter_conditional_fields(representation, obj)
return filtered
class ExplicitFieldsMixin(RepresentationMixin):
"""
Remove all non-specified fields from the serializer output.
"""
explicit_fields_query_param = "fields"
implicit_fields_query_param_value = "all"
implicit_fields_allowed = True
def get_explicit_field_path(self, field_name):
model = self.Meta.model
model_name = model.__name__.lower()
return "{}.{}".format(model_name, field_name)
@property
def all_query_params(self):
"""
When testing, the request object that is used is an HTTPRequest.
When using the server, rest framework wraps this with a Request object, and this
object has the 'query_params' attribute.
"""
req = self.context["request"]
if hasattr(req, "query_params"):
return req.query_params
return req.GET
@property
def explicit_field_paths_requested(self):
target_param = self.explicit_fields_query_param
return self.all_query_params.get(target_param, "").split(",")
@property
def explicit_field_paths_allowed(self):
results = []
for field_name in self.Meta.fields:
results.append(self.get_explicit_field_path(field_name))
return results
@property
def explicit_fields(self):
paths_allowed = self.explicit_field_paths_allowed
paths_requested = self.explicit_field_paths_requested
implicit_param = self.implicit_fields_query_param_value
all_paths = self.get_explicit_field_path(implicit_param)
if all_paths in paths_requested:
if self.implicit_fields_allowed is True:
return paths_allowed
results = []
for path_requested in paths_requested:
if path_requested in paths_allowed:
results.append(path_requested)
return results
def filter_explicit_fields(self, representation):
filtered = OrderedDict()
for field_name, field_value in representation.items():
field_path = self.get_explicit_field_path(field_name)
if field_path in self.explicit_fields:
filtered[field_name] = field_value
return filtered
def to_representation(self, obj):
representation = super().to_representation(obj)
filtered = self.filter_explicit_fields(representation)
return filtered
class DebugOnlyResponseMixin:
"""
Returns the response if in DEBUG mode, otherwise raises a 404.
"""
def get(self, request, format=None):
if settings.DEBUG is False:
return Http404()
return super().get(request, format)
class EndpointsAllowedMixin:
"""
Only returns endpoints that are allowed.
"""
endpoints_allowed = []
def get_endpoints(self, request, format):
endpoints = super().get_endpoints(request, format)
allowed = self.endpoints_allowed
for name, _ in endpoints.items():
if name not in allowed:
del endpoints[name]
return endpoints
class EndpointsRemovedMixin:
"""
Removes the named endpoints from the response.
"""
endpoints_removed = []
def get_endpoints(self, request, format):
endpoints = super().get_endpoints(request, format)
removed = self.endpoints_removed
for name, _ in endpoints.items():
if name in removed:
del endpoints[name]
return endpoints
class SkippedFieldsMixin:
"""
Dynamically removes fields from serializer.
https://stackoverflow.com/questions/27935558/dynamically-exclude-or-include-a-field-in-django-rest-framework-serializer
"""
def __init__(self, *args, **kwargs):
skipped_fields = kwargs.pop("skipped_fields", None)
super().__init__(*args, **kwargs)
self.remove_skipped_fields(skipped_fields)
def remove_skipped_fields(self, skipped_fields=None):
if skipped_fields is not None:
for field_name in skipped_fields:
if field_name in self.fields:
self.fields.pop(field_name)
class GetOrCreateMixin:
"""
Allows a get or create of an object.
https://stackoverflow.com/questions/25026034/django-rest-framework-modelserializer-get-or-create-functionality
"""
def is_valid(self, raise_exception=False):
if hasattr(self, "initial_data"):
# if we are instantiating with data={something}.
try:
# try to get the object in question.
obj = self.Meta.model.objects.get(**self.initial_data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# except not find the object or the data being ambigious
# for defining it. Then validate the data as usual.
return super().is_valid(raise_exception)
else:
# If the object is found, add it to the serializer.
# Then, validate the data as usual.
self.instance = obj
return super().is_valid(raise_exception)
else:
# If the serializer was instantiated with just an object,
# and no data={something} proceed as usual.
return super().is_valid(raise_exception)
class OrderByFieldNameMixin:
"""
Returns querysets ordered by the field name specified.
"""
order_by_field_name = None
def get_queryset(self):
queryset = super().get_queryset()
if self.order_by_field_name is not None:
queryset = queryset.order_by(self.order_by_field_name)
return queryset
class ExcludeKwargsMixin:
"""
Returns querysets that exclude specified kwargs.
"""
exclude_kwargs = {}
def get_queryset(self):
queryset = super().get_queryset()
queryset = queryset.exclude(**self.exclude_kwargs)
return queryset
class CheckQuerysetObjectPermissionsMixin:
"""
Check object permissions for each object in queryset.
NOTE: Requires that the permission classes include an object permission check.
"""
def get_queryset(self):
queryset = super().get_queryset()
for obj in queryset:
self.check_object_permissions(self.request, obj)
return queryset
class ValidateCurrentUserMixin:
"""
Adds a current_user property to the object.
"""
@property
def current_user(self):
context = getattr(self, "context", None)
if context is None:
raise AttributeError("There is no context.")
request = context.get("request", None)
if request is None:
raise KeyError("There is not request in context.")
user = getattr(request, "user", None)
if user is None:
raise AttributeError("There is not user in the request")
return user
def validate_with_current_user(self, value):
if self.current_user != value:
raise ValidationError(
"The user specified does not match the current session."
)
class NestedUserFieldsValidatorsMixin(ValidateCurrentUserMixin):
"""
Creates a validator for specified fields. Validates the fields value against the
current user.
"""
nested_user_fields = {}
def create_validator_for_nested_user_field(self, bits):
def validator(value):
attr = value
last = value
for bit in bits:
last = attr
attr = getattr(attr, bit, None)
if attr is None:
raise AttributeError(
"The attribute '{}' does not exist on object {}.".format(
bit, last
)
)
self.validate_with_current_user(attr)
return value
return validator
def set_validators_for_nested_user_fields(self):
for field_name, path in self.nested_user_fields.items():
validator_name = "validate_{}".format(field_name)
bits = path.split(".")
validator = getattr(self, validator_name, None)
if validator is None:
validator = self.create_validator_for_nested_user_field(bits)
setattr(self, validator_name, validator)
def __init__(self, *args, **kwargs):
self.set_validators_for_nested_user_fields()
super().__init__(*args, **kwargs)
class ValidateUserFieldMixin(ValidateCurrentUserMixin):
"""
Adds a validator for a 'user' field on a serializer.
"""
def validate_user(self, value):
self.validate_with_current_user(value)
return value
class SerializerClassByActionMixin:
"""
Return the serializer class based on the action verb.
"""
serializer_class_by_action = {}
def get_serializer_class(self):
attr = self.serializer_class_by_action
return attr.get(self.action, super().get_serializer_class())
class PermissionClassesByActionMixin:
"""
Returns a list of permission classes to use based on the action verb.
https://stackoverflow.com/questions/36001485/django-rest-framework-different-permission-per-methods-within-same-view
"""
permission_classes_by_action = {}
def get_permissions(self):
attr = self.permission_classes_by_action
for_all = attr.get("all", [])
for_action = attr.get(self.action, attr.get("default", []))
permission_classes = list(set(for_all + for_action))
if permission_classes:
return [p() for p in permission_classes]
return super().get_permissions()
class MultipleFieldLookupMixin:
"""
Apply this mixin to any view or viewset to get multiple field filtering based on a
`lookup_fields` attribute, instead of the default single field filtering.
"""
def get_object(self):
queryset = self.get_queryset() # Get the base queryset
queryset = self.filter_queryset(queryset) # Apply any filter backends
filter = {}
for field in self.lookup_fields:
if self.kwargs[field]: # Ignore empty fields.
filter[field] = self.kwargs[field]
return get_object_or_404(queryset, **filter) # Lookup the object
class HyperlinkListMixin:
"""
List URL attribute from each object.
"""
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
result = [obj["url"] for obj in serializer.data]
return self.get_paginated_response(result)
serializer = self.get_serializer(queryset, many=True)
result = [obj["url"] for obj in serializer.data]
return Response(result)
class ParameterisedViewMixin:
"""
Used in conjunction with the ParameterisedFieldMixin to enable multiple custom
lookup_fields for queries.
"""
lookup_fields = [("pk", "pk")]
def __init__(self, *args, **kwargs):
self.lookup_fields = kwargs.pop("lookup_fields", self.lookup_fields)
super().__init__(*args, **kwargs)
def get_object_kwargs(self):
object_kwargs = {}
for lookup_field, lookup_url_kwarg in self.lookup_fields:
if "." in lookup_field:
model_name, field_name = lookup_field.split(".")
lookup_field = lookup_field.replace(".", "__")
object_kwargs[lookup_field] = self.kwargs[lookup_url_kwarg]
else:
object_kwargs[lookup_field] = self.kwargs[lookup_url_kwarg]
return object_kwargs
def get_object(self):
"""
Filter the queryset to return an object using the parameterised procedure
instead of the default, so queries can involve more than a single string.
"""
queryset = self.get_queryset()
queryset = self.filter_queryset(queryset)
object_kwargs = self.get_object_kwargs()
obj = get_object_or_404(queryset, **object_kwargs)
self.check_object_permissions(self.request, obj)
return obj
class ParameterisedFieldMixin:
"""
Used in conjunction with the ParameterisedViewMixin to enable multiple custom
lookup_fields for serializing.
"""
lookup_fields = [("pk", "pk")]
def __init__(self, *args, **kwargs):
self.lookup_fields = kwargs.pop("lookup_fields", self.lookup_fields)
super().__init__(*args, **kwargs)
def use_pk_only_optimization(self):
""" Return true if all lookup fields for the models is its PK """
result = False
for field_tuple in self.lookup_fields:
if field_tuple[0] and field_tuple[1] == "pk":
result = True
return result
def get_object_kwargs(self, view_kwargs):
lookup_kwargs = {}
for lookup_field, lookup_url_kwarg in self.lookup_fields:
if "." in lookup_field:
lookup_field = lookup_field.replace(".", "__")
lookup_kwargs[lookup_field] = view_kwargs[lookup_url_kwarg]
return lookup_kwargs
def get_object(self, view_name, view_args, view_kwargs):
""" Given a URL, return a corresponding object. """
queryset = self.get_queryset()
queryset = self.filter_queryset(queryset)
lookup_kwargs = self.get_object_kwargs(view_kwargs)
return get_object_or_404(queryset, **lookup_kwargs)
def get_url_kwargs(self, obj):
url_kwargs = {}
for model_field, url_param in self.lookup_fields:
attr = obj
for field in model_field.split("."):
attr = getattr(attr, field)
url_kwargs[url_param] = attr
return url_kwargs
def get_url(self, obj, view_name, request, format):
"""
Given an object, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
# # Unsaved objects will not yet have a valid URL.
if hasattr(obj, "pk") and obj.pk in (None, ""):
return None
url_kwargs = self.get_url_kwargs(obj)
return self.reverse(
view_name, kwargs=url_kwargs, request=request, format=format
)
class MeAliasMixin:
def initial(self, request, *args, **kwargs):
"""
This is the 'dispatch' method for rest_framework. This has <request.data> etc.
This augments the request.data to change any values from "me" to
request.user.username.
(TODO: Check what the url_kwarg is to determine what part of request.user.<attr>
to use)
NOTE: This affects multipart/form-data when we augment its contents and causes
the formData to be invalid/corrupt.
"""
if request.user.is_authenticated:
for k, v in request.data.items():
if isinstance(v, str):
if "/me/" in v:
request.data[k] = v.replace(
"/me/",
"/{}/".format(
getattr(request.user, self.me_alias_lookup_field)
),
)
elif "me" == v:
request.data[k] = v.replace(
"me", getattr(request.user, self.me_alias_lookup_field)
)
return super().initial(request, *args, **kwargs)
def dispatch(self, request, *args, **kwargs):
# Duplicate and replace the query params
new_kwargs = dict(**kwargs)
new_query_params = request.GET.copy()
if request.user.is_authenticated:
for k, v in new_query_params.items():
if v == "me":
new_query_params[k] = getattr(
request.user, self.me_alias_lookup_field
)
request.GET = new_query_params
# Duplicate and replace the kwargs
for k, v in new_kwargs.items():
if v == "me":
k_bits = k.split("__")
suffix = k_bits.pop()
if suffix:
new_kwargs[k] = getattr(request.user, suffix)
else:
if hasattr(request.user, k):
new_kwargs[k] = getattr(request.user, k)
else:
new_kwargs[k] = request.user
return super().dispatch(request, *args, **new_kwargs)
|
import sys
from board import *
from gameConfig import *
import gameConfig
from bomberman import *
from getch import *
from bomb import bomb_plant
import brick
import os
import time
import random
from enemy import Enemy
from termcolor import colored
# This is the main game file where game is running &print game score & lives of bomberman
# Update bomb, enemies etc.
# print board and bricks in board
class Game:
def __init__(self, level):
self.level = level
Game_obj = Game(gameConfig.level)
count = 1
flag = 0
tr = 0
brick_obj = brick.Brick(Game_obj)
def print_board():
global flag1
if(obj.lives <= 0):
loose()
# os.system('tput reset')
# if(len(enemybin)==0):
# youwin()
print(colored("\t\t\t\t BOMBERMAN \t\t\t\t\t", "green", attrs=["bold"]))
bp.make_board()
brick_obj.entity_build()
obj.curr()
for i in bomb_bin:
i.set_bomb()
for i in enemybin:
i.curr_Enemy()
# bricks.Bricks()
check_Game_Score()
for x in range(Length):
for y in range(Width):
if global_arr[x][y] == 'E':
print(colored(global_arr[x][y], "red", attrs=["bold"]), end="")
elif global_arr[x][y].isdigit():
print(
colored(
global_arr[x][y],
"yellow",
attrs=["bold"]),
end="")
elif global_arr[x][y] == '%' or global_arr[x][y] == 'B':
print(
colored(
global_arr[x][y],
"yellow",
attrs=[
"bold",
"dark"]),
end="")
elif (global_arr[x][y] == 'e'):
print(
colored(
global_arr[x][y],
"green",
attrs=[
"bold",
"dark"]),
end="")
elif (global_arr[x][y] == 'P'):
print(
colored(
global_arr[x][y],
"blue",
attrs=[
"bold",
"dark"]),
end="")
else:
print(global_arr[x][y], end="")
print()
# print("".join(str(global_arr[x][y]) for y in range(Width)))
print(
colored(
"Lives", "blue"), colored(
obj.lives, "blue"), " ", colored(
"Score", "green"), colored(
obj.score, "green"))
p = brick_obj.get_level()
print(colored("Level", "green"), colored(p - 1, "yellow"))
def enemy_update():
# print(gameConfig.level)
if not len(enemybin):
# brick_obj.set_level()
os.system('tput reset')
q = brick_obj.get_level()
# print(q)
getch.Set_Frame_rate(q) # Speed of enemy increase in every level
if(q <= 5 and q > 0):
print(
colored(
"Level", 'green'), ' ', colored(
q, 'yellow'))
time.sleep(1)
levelup()
if q > 5:
you_win()
for entity in enemybin:
[r, t] = entity.get_pos()
[p, q] = obj.get_pos()
# print(r,t)
if(entity.check_murder(p, q, r, t, obj)):
obj.set_Pos(2, 2)
obj.curr()
entity.remove_Enemy()
locomotion = random.randint(0, 3)
if locomotion == 1:
entity.left_Move()
elif locomotion == 2:
entity.right_Move()
elif locomotion == 3:
entity.up_Move()
else:
entity.down_Move()
entity.curr_Enemy()
def levelup():
brick_obj.Enemymade = True
brick_obj.Wallmade = True
getchar = getch
bp.make_board()
brick_obj.entity_build()
for k in range(len(enemybin2)):
enemybin.append(Enemy())
enemybin[k].set_Pos(enemybin2[k][0], enemybin2[k][1])
# print(len(enemybin2))
for k in range(len(enemybin2)):
enemybin.append(Enemy())
enemybin[k].set_Pos(enemybin2[k][0], enemybin2[k][1])
def check_Game_Score():
for entity in bomb_bin:
explode_time = 3 + tr - int(time.time())
if(explode_time >= 0):
bomb_plant.start_counter(explode_time - 1)
if(explode_time < 1):
bomb_plant.start_counter('B')
bomb_plant.explosion()
elif(explode_time < 0):
bomb_plant.explode()
if(obj.hanged_bomber(entity.present_Coordiate_Bomb())):
obj.set_Pos(2, 2)
obj.curr()
for j in enemybin:
if(j.hanged_enemy(entity.present_Coordiate_Bomb())):
enemybin.remove(j)
obj.score += 100
del bomb_bin[:]
def loose():
count = 0
print(colored("!!!!!! GAME OVER !!!!!!", 'red'))
sys.exit()
def you_win():
count = 0
print(colored("!! -_- ^_^ You Win -_- ^_^ !!", 'green'))
sys.exit()
while(count != 0):
enemy_update()
print_board()
ch = getch()
if(ch == 'q'):
count = 0
elif(ch == 'w'):
obj.remove()
obj.up_Move()
obj.curr()
elif(ch == 's'):
obj.remove()
obj.down_Move()
obj.curr()
elif(ch == 'a'):
obj.remove()
obj.left_Move()
obj.curr()
elif(ch == 'd'):
obj.remove()
obj.right_Move()
obj.curr()
elif(ch == 'b'):
if(len(bomb_bin) == 0):
bomb_bin.append(bomb_plant)
bomb_bin[0].set_coordinate()
tr = int(time.time())
os.system('clear')
|
from typing import List, Mapping
from .text import get_env_file_names, capitalize, snake, add_python_indentation
import os
class CodeGen():
def __init__(self):
self.file_content_dict: Mapping[str, str] = {}
self.replacement_dict: Mapping[str, str] = {}
def _create_dir_if_not_exist(self, file_name: str):
abs_file_name = os.path.abspath(file_name)
abs_dir = os.path.dirname(abs_file_name)
if not os.path.exists(abs_dir):
os.makedirs(abs_dir)
def read_file_from_disk(self, file_name: str) -> str:
f_read = open(file_name, 'r')
content = f_read.read()
f_read.close()
return content
def write_file_to_disk(self, file_name: str, content: str):
f_write = open(file_name, 'w')
f_write.write(content)
f_write.close()
def get_content(self, file_name: str) -> str:
if file_name not in self.file_content_dict:
raise ValueError('{} is not exist in file_content_dict'.format(file_name))
content = self.file_content_dict[file_name]
return content
def set_content(self, file_name: str, content: str):
self.file_content_dict[file_name] = content
def replace_text(self, text: str, replacement_dict: Mapping[str, str]) -> str:
new_text = text
for key, val in replacement_dict.items():
new_text = new_text.replace(key, val)
return new_text
def load_from_template(self, template_dir_name: str):
self._load(template_dir_name)
def load(self, dir_name: str):
return self._load(dir_name)
def _load(self, dir_name: str):
for file_name in self.file_content_dict:
abs_file_name = os.path.abspath(os.path.join(dir_name, file_name))
content = self.read_file_from_disk(abs_file_name)
self.file_content_dict[file_name] = content
def save(self, dir_name: str):
for file_name, content in self.file_content_dict.items():
abs_file_name = os.path.abspath(os.path.join(dir_name, file_name))
self._create_dir_if_not_exist(abs_file_name)
self.write_file_to_disk(abs_file_name, content)
def generate(self, dir_name: str):
new_file_content_dict: Mapping[str, str] = {}
for file_name, content in self.file_content_dict.items():
for key, val in self.replacement_dict.items():
content = content.replace(key, val)
file_name = file_name.replace(key, val)
new_file_content_dict[file_name] = content
self.file_content_dict = new_file_content_dict
self.save(dir_name)
class FastApiService(CodeGen):
def __init__(self, service_name: str):
super().__init__()
self.service_name = service_name
def load(self, dir_name: str):
self.file_content_dict = {
'{}/main.py'.format(self.service_name): '',
}
super().load(dir_name)
def load_from_template(self, template_dir_name: str):
self.file_content_dict = {
'zarubaServiceName/Dockerfile': '',
'zarubaServiceName/main.py': '',
'zarubaServiceName/Pipfile': '',
'zarubaServiceName/Pipfile.lock': '',
'zarubaServiceName/start.sh': '',
'zarubaServiceName/template.env': '',
'zarubaServiceName/helpers/__init__.py': '',
'zarubaServiceName/helpers/transport/__init__.py': '',
'zarubaServiceName/helpers/transport/interface.py': '',
'zarubaServiceName/helpers/transport/local.py': '',
'zarubaServiceName/helpers/transport/rmq.py': '',
'zarubaServiceName/repos/__init__.py': '',
'zarubaServiceName/schemas/__init__.py': '',
'zarubaServiceName/.gitignore': '',
'zarubaServiceName/.dockerignore': '',
}
super().load_from_template(template_dir_name)
def generate(self, dir_name: str):
self.replacement_dict = {
'zarubaServiceName': self.service_name,
'ZARUBA_SERVICE_NAME': snake(self.service_name).upper(),
}
super().generate(dir_name)
class FastApiModule(CodeGen):
def __init__(self, service_name: str, module_name: str):
super().__init__()
self.service_name = service_name
self.module_name = module_name
self.import_module_partial = ''
self.load_module_partial = ''
self.handle_rpc_partial = ''
self.handle_event_parial = ''
self.handle_route = ''
def load(self, dir_name: str):
self.file_content_dict = {
'{}/{}/controller.py'.format(self.service_name, self.module_name): '',
}
super().load(dir_name)
def load_from_template(self, template_dir_name: str):
self.file_content_dict = {
'zarubaServiceName/zarubaModuleName/__init__.py': '',
'zarubaServiceName/zarubaModuleName/controller.py': '',
}
super().load_from_template(template_dir_name)
partial_path = os.path.join(os.path.abspath(template_dir_name), 'partials')
self.import_module_partial = self.read_file_from_disk(os.path.join(partial_path, 'import_module.py'))
self.load_module_partial = self.read_file_from_disk(os.path.join(partial_path, 'load_module.py'))
self.handle_rpc_partial = self.read_file_from_disk(os.path.join(partial_path, 'handle_rpc.py'))
self.handle_event_partial = self.read_file_from_disk(os.path.join(partial_path, 'handle_event.py'))
self.handle_route_partial = self.read_file_from_disk(os.path.join(partial_path, 'handle_route.py'))
def add_route(self, dir_name: str, http_method: str, url: str):
handle_route_script = self.replace_text(
add_python_indentation(self.handle_route_partial, 2),
{
'zarubaHttpMethod': http_method,
'zarubaUrl': url,
'zaruba_url': snake(url.replace('/', '_').replace('-', '_')).strip('_'),
}
)
handle_route_script = '\n{}\n'.format(handle_route_script)
controller_file_name = self._get_controller_file_name(dir_name)
controller_file_content = self.read_file_from_disk(controller_file_name)
controller_script_lines = controller_file_content.split('\n')
insert_index = -1
for line_index, line in enumerate(controller_script_lines):
if line.startswith(add_python_indentation('def handle_route', 1)):
insert_index = line_index + 1
break
if insert_index == -1:
raise ValueError('Cannot find handle_route method in {}'.format(controller_file_name))
controller_script_lines.insert(insert_index, handle_route_script)
controller_file_content = '\n'.join(controller_script_lines)
self.write_file_to_disk(controller_file_name, controller_file_content)
def add_event_handler(self, dir_name: str, event_name: str):
handle_event_script = self.replace_text(
add_python_indentation(self.handle_event_partial, 2),
{
'zarubaEventName': event_name,
'zaruba_event_name': snake(event_name),
}
)
handle_event_script = '\n{}\n'.format(handle_event_script)
controller_file_name = self._get_controller_file_name(dir_name)
controller_file_content = self.read_file_from_disk(controller_file_name)
controller_script_lines = controller_file_content.split('\n')
insert_index = -1
for line_index, line in enumerate(controller_script_lines):
if line.startswith(add_python_indentation('def handle_event', 1)):
insert_index = line_index + 1
break
if insert_index == -1:
raise ValueError('Cannot find handle_event method in {}'.format(controller_file_name))
controller_script_lines.insert(insert_index, handle_event_script)
controller_file_content = '\n'.join(controller_script_lines)
self.write_file_to_disk(controller_file_name, controller_file_content)
def add_rpc_handler(self, dir_name: str, rpc_name: str):
handle_rpc_script = self.replace_text(
add_python_indentation(self.handle_rpc_partial, 2),
{
'zarubaEventName': rpc_name,
'zaruba_event_name': snake(rpc_name),
}
)
handle_rpc_script = '\n{}\n'.format(handle_rpc_script)
controller_file_name = self._get_controller_file_name(dir_name)
controller_file_content = self.read_file_from_disk(controller_file_name)
controller_script_lines = controller_file_content.split('\n')
insert_index = -1
for line_index, line in enumerate(controller_script_lines):
if line.startswith(add_python_indentation('def handle_event', 1)):
insert_index = line_index + 1
break
if insert_index == -1:
raise ValueError('Cannot find handle_event method {}'.format(controller_file_name))
controller_script_lines.insert(insert_index, handle_rpc_script)
controller_file_content = '\n'.join(controller_script_lines)
self.write_file_to_disk(controller_file_name, controller_file_content)
def _get_controller_file_name(self, dir_name: str) -> str:
controller_file_name = '{}/{}/controller.py'.format(self.service_name, self.module_name)
return os.path.abspath(os.path.join(dir_name, controller_file_name))
def add_python_indentation(self, text: str, level: int) -> str:
spaces = (level * 4) * ' '
indented_lines = [spaces + line for line in text.split('\n')]
return '\n'.join(indented_lines)
def generate(self, dir_name: str):
self.replacement_dict = {
'zarubaServiceName': self.service_name,
'zarubaModuleName': self.module_name,
}
super().generate(dir_name)
self._register_module(dir_name)
def _register_module(self, dir_name: str):
# prepare partials
replacement_dict = {
'zarubaModuleName': self.module_name,
'ZarubaModuleName': capitalize(self.module_name),
'zaruba_module_name': snake(self.module_name),
}
import_module_script = self.replace_text(self.import_module_partial, replacement_dict)
load_module_script = self.replace_text(self.load_module_partial, replacement_dict)
# load service
service = FastApiService(self.service_name)
service.load(dir_name)
main_file_name = '{}/main.py'.format(self.service_name)
main_file_content = service.get_content(main_file_name)
main_file_lines = main_file_content.split('\n')
main_file_lines = self._insert_import_module_script(main_file_lines, import_module_script)
main_file_lines = self._insert_load_module_script(main_file_lines, load_module_script)
main_file_content = '\n'.join(main_file_lines)
service.set_content(main_file_name, main_file_content)
service.save(dir_name)
def _insert_import_module_script(self, lines: List[str], import_module_script: str) -> List[str]:
import_module_line_index = 0
for line_index, line in enumerate(lines):
if not line.startswith('from '):
import_module_line_index = line_index
break
lines.insert(import_module_line_index, import_module_script)
return lines
def _insert_load_module_script(self, lines: List[str], load_module_script: str) -> List[str]:
lines.append('')
lines.append(load_module_script)
return lines
class FastApiCrud(CodeGen):
def __init__(self, service_name: str, module_name: str, entity_name: str, field_names: List[str]):
super().__init__()
self.service_name = service_name
self.module_name = module_name
self.entity_name = entity_name
self.field_names = field_names
self.controller_handle_event_partial = ''
self.controller_import_partial = ''
self.repo_field_declaration_partial = ''
self.repo_field_update_partial = ''
self.repo_field_insert_partial = ''
self.init_repo_partial = ''
self.controller_handle_route_partial = ''
self.controller_init_property_partial = ''
self.import_repo_partial = ''
self.schema_field_declaration_partial = ''
def load(self, dir_name: str):
self.file_content_dict = {
'{}/repos/db{}.py'.format(self.service_name, capitalize(self.entity_name)): '',
'{}/repos/{}.py'.format(self.service_name, self.entity_name): '',
'{}/schemas/{}.py'.format(self.service_name, self.entity_name): '',
'{}/{}/handle{}Event.py'.format(self.service_name, self.module_name, capitalize(self.entity_name)): '',
'{}/{}/handle{}Route.py'.format(self.service_name, self.module_name, capitalize(self.entity_name)): '',
}
super().load(dir_name)
def load_from_template(self, template_dir_name: str):
self.file_content_dict = {
'zarubaServiceName/repos/dbZarubaEntityName.py': '',
'zarubaServiceName/repos/zarubaEntityName.py': '',
'zarubaServiceName/schemas/zarubaEntityName.py': '',
'zarubaServiceName/zarubaModuleName/handleZarubaEntityNameEvent.py': '',
'zarubaServiceName/zarubaModuleName/handleZarubaEntityNameRoute.py': '',
}
super().load_from_template(template_dir_name)
partial_path = os.path.join(os.path.abspath(template_dir_name), 'partials')
self.controller_handle_event_partial = self.read_file_from_disk(os.path.join(partial_path, 'controller_handle_event.py'))
self.controller_import_partial = self.read_file_from_disk(os.path.join(partial_path, 'controller_import.py'))
self.repo_field_declaration_partial = self.read_file_from_disk(os.path.join(partial_path, 'repo_field_declaration.py'))
self.repo_field_update_partial = self.read_file_from_disk(os.path.join(partial_path, 'repo_field_update.py'))
self.repo_field_insert_partial = self.read_file_from_disk(os.path.join(partial_path, 'repo_field_insert.py'))
self.init_repo_partial = self.read_file_from_disk(os.path.join(partial_path, 'init_repo.py'))
self.controller_handle_route_partial = self.read_file_from_disk(os.path.join(partial_path, 'controller_handle_route.py'))
self.controller_init_property_partial = self.read_file_from_disk(os.path.join(partial_path, 'controller_init_property.py'))
self.import_repo_partial = self.read_file_from_disk(os.path.join(partial_path, 'import_repo.py'))
self.schema_field_declaration_partial = self.read_file_from_disk(os.path.join(partial_path, 'schema_field_declaration.py'))
def generate(self, dir_name: str):
self.replacement_dict = {
'zarubaServiceName': self.service_name,
'zarubaModuleName': self.module_name,
'zarubaEntityName': self.entity_name,
'ZarubaEntityName': capitalize(self.entity_name),
'zaruba_entity_name': snake(self.entity_name),
'zaruba_field_name': self.field_names[0] if len(self.field_names) > 0 else 'id',
}
self._complete_repo()
self._complete_schema()
super().generate(dir_name)
self._register_handler(dir_name)
self._adjust_service(dir_name)
def _adjust_service(self, dir_name: str):
replace_dict = {
'zarubaEntityName': self.entity_name,
'ZarubaEntityName': capitalize(self.entity_name),
'zaruba_entity_name': snake(self.entity_name),
}
# get import script
import_script = self.replace_text(self.import_repo_partial, replace_dict).strip()
init_script = self.replace_text(self.init_repo_partial, replace_dict).strip()
# load service
service = FastApiService(self.service_name)
service.load(dir_name)
main_file_name = '{}/main.py'.format(self.service_name)
main_script = service.get_content(main_file_name)
main_lines = main_script.split('\n')
# add import
insert_import_index = 0
for line_index, line in enumerate(main_lines):
if not line.startswith('from '):
insert_import_index = line_index
break
main_lines.insert(insert_import_index, import_script)
# adjust init script
controller_declaration_index = -1
for line_index, line in enumerate(main_lines):
if line.startswith('{}_controller ='.format(snake(self.module_name))):
controller_declaration_index = line_index
break
if controller_declaration_index == -1:
raise ValueError('Cannot find {}_controller declaration {}'.format(snake(self.module_name), main_file_name))
# adjust controller declaration
controller_declaration_line = main_lines[controller_declaration_index]
controller_declaration_line = controller_declaration_line.replace(')', ', {entity_repo}={entity_repo})'.format(
entity_repo = '{}_repo'.format(snake(self.entity_name)),
))
main_lines[controller_declaration_index] = controller_declaration_line
# add repo init
main_lines.insert(controller_declaration_index, init_script)
# save changes
main_script = '\n'.join(main_lines)
service.set_content(main_file_name, main_script)
service.save(dir_name)
def _register_handler(self, dir_name: str):
module = FastApiModule(self.service_name, self.module_name)
module.load(dir_name)
controller_file_name = '{}/{}/controller.py'.format(self.service_name, self.module_name)
self._insert_controller_import(module, controller_file_name)
self._adjust_controller_constructor(module, controller_file_name)
self._add_controller_event_handler(module, controller_file_name)
self._add_controller_route_handler(module, controller_file_name)
module.save(dir_name)
def _insert_controller_import(self, module: FastApiModule, controller_file_name: str):
import_script = self.replace_text(
self.controller_import_partial,
{
'zarubaModuleName': self.module_name,
'zarubaEntityName': self.entity_name,
'ZarubaEntityName': capitalize(self.entity_name),
'zaruba_entity_name': snake(self.entity_name),
}
).strip()
controller_script = module.get_content(controller_file_name)
controller_script_lines = controller_script.split('\n')
insert_index = 0
for line_index, line in enumerate(controller_script_lines):
if not line.startswith('from '):
insert_index = line_index
break
controller_script_lines.insert(insert_index, import_script)
controller_script = '\n'.join(controller_script_lines)
module.set_content(controller_file_name, controller_script)
def _adjust_controller_constructor(self, module: FastApiModule, controller_file_name: str):
init_property_script = self.replace_text(
self.controller_init_property_partial,
{
'zaruba_entity_name': snake(self.entity_name),
}
).strip()
init_property_script = add_python_indentation(init_property_script, 2)
controller_script = module.get_content(controller_file_name)
controller_script_lines = controller_script.split('\n')
controller_class_index = -1
constructor_index = -1
insert_index = -1
for line_index, line in enumerate(controller_script_lines):
if line.startswith('class Controller('):
controller_class_index = line_index
continue
if controller_class_index > -1 and line.startswith(add_python_indentation('def __init__(', 1)):
constructor_index = line_index
insert_index = line_index + 1
continue
if constructor_index > -1 and line.startswith(add_python_indentation('self.enable_event', 2)):
insert_index = line_index + 1
break
if constructor_index > -1 and not line.startswith(add_python_indentation('', 2)):
break
if insert_index == -1:
raise ValueError('Cannot find Controller constructor in {}'.format(controller_file_name))
# update constructor
constructor_line = controller_script_lines[constructor_index]
constructor_line = constructor_line.replace('):', ', {entity_name}_repo: {EntityName}Repo):'.format(
entity_name = snake(self.entity_name),
EntityName = capitalize(self.entity_name)
))
controller_script_lines[constructor_index] = constructor_line
# insert
controller_script_lines.insert(insert_index, init_property_script)
controller_script = '\n'.join(controller_script_lines)
module.set_content(controller_file_name, controller_script)
def _add_controller_event_handler(self, module: FastApiModule, controller_file_name: str):
handler_script = self.replace_text(
self.controller_handle_event_partial,
{
'zaruba_entity_name': snake(self.entity_name),
}
).strip()
handler_script = add_python_indentation(handler_script, 3)
controller_script = module.get_content(controller_file_name)
controller_script_lines = controller_script.split('\n')
controller_class_index = -1
controller_start_index = -1
for line_index, line in enumerate(controller_script_lines):
if line.startswith('class Controller('):
controller_class_index = line_index
continue
if controller_class_index > -1 and line.startswith(add_python_indentation('def start(', 1)):
controller_start_index = line_index
insert_index = line_index + 1
continue
if controller_start_index > -1 and line.startswith(add_python_indentation('if self.enable_event', 2)):
insert_index = line_index + 1
break
if controller_start_index > -1 and not line.startswith(add_python_indentation('', 2)):
break
if insert_index == -1:
raise ValueError('Cannot find Controller constructor in {}'.format(controller_file_name))
# insert
controller_script_lines.insert(insert_index, handler_script)
controller_script = '\n'.join(controller_script_lines)
module.set_content(controller_file_name, controller_script)
def _add_controller_route_handler(self, module: FastApiModule, controller_file_name: str):
handler_script = self.replace_text(
self.controller_handle_route_partial,
{
'zaruba_entity_name': snake(self.entity_name),
}
).strip()
handler_script = add_python_indentation(handler_script, 3)
controller_script = module.get_content(controller_file_name)
controller_script_lines = controller_script.split('\n')
controller_class_index = -1
controller_start_index = -1
for line_index, line in enumerate(controller_script_lines):
if line.startswith('class Controller('):
controller_class_index = line_index
continue
if controller_class_index > -1 and line.startswith(add_python_indentation('def start(', 1)):
controller_start_index = line_index
insert_index = line_index + 1
continue
if controller_start_index > -1 and line.startswith(add_python_indentation('if self.enable_route', 2)):
insert_index = line_index + 1
break
if controller_start_index > -1 and not line.startswith(add_python_indentation('', 2)):
break
if insert_index == -1:
raise ValueError('Cannot find Controller constructor in {}'.format(controller_file_name))
# insert
controller_script_lines.insert(insert_index, handler_script)
controller_script = '\n'.join(controller_script_lines)
module.set_content(controller_file_name, controller_script)
def _complete_repo(self):
if len(self.field_names) == 0:
return
self._complete_repo_field_declaration()
self._complete_repo_field_insert()
self._complete_repo_field_update()
def _complete_repo(self):
if len(self.field_names) == 0:
return
self._complete_repo_field_declaration()
self._complete_repo_field_insert()
self._complete_repo_field_update()
def _complete_repo_field_declaration(self):
# get field declaration
field_declaration_lines = []
for field_name in self.field_names:
new_line = self.replace_text(
self.repo_field_declaration_partial,
{
'zaruba_field_name': snake(field_name),
}
)
new_line = add_python_indentation(new_line.strip('\n'), 1)
field_declaration_lines.append(new_line)
field_declaration_script = '\n'.join(field_declaration_lines)
# get db repo script
db_repo_file_name = 'zarubaServiceName/repos/dbZarubaEntityName.py'
db_repo_script = self.get_content(db_repo_file_name)
db_repo_lines = db_repo_script.split('\n')
# look for insert index
entity_class_index = -1
insert_index = -1
table_name_index = -1
for line_index, line in enumerate(db_repo_lines):
if line.startswith('class DBZarubaEntityNameEntity'):
entity_class_index = line_index
if entity_class_index != -1 and line.startswith(add_python_indentation('__tablename__', 1)):
table_name_index = line_index
if entity_class_index != -1 and line.startswith(add_python_indentation('id = Column(', 1)):
insert_index = line_index + 1
break
if insert_index == -1 and table_name_index != -1:
insert_index = table_name_index + 1
if insert_index == -1 and entity_class_index != -1:
insert_index = entity_class_index + 1
if insert_index == -1:
raise ValueError('Cannot find DBZarubaEntityNameEntity class in {}'.fomrat(db_repo_file_name))
# insert new line
db_repo_lines.insert(insert_index, field_declaration_script)
db_repo_script = '\n'.join(db_repo_lines)
self.set_content(db_repo_file_name, db_repo_script)
def _complete_repo_field_insert(self):
# get field insert
field_insert_lines = []
for field_name in self.field_names:
new_line = self.replace_text(
self.repo_field_insert_partial,
{
'zaruba_field_name': snake(field_name),
}
)
new_line = add_python_indentation(new_line.strip('\n'), 4)
field_insert_lines.append(new_line)
field_insert_script = '\n'.join(field_insert_lines)
# get db repo script
db_repo_file_name = 'zarubaServiceName/repos/dbZarubaEntityName.py'
db_repo_script = self.get_content(db_repo_file_name)
db_repo_lines = db_repo_script.split('\n')
# look for insert index
repo_class_index = -1
method_index = -1
instance_index = -1
insert_index = -1
for line_index, line in enumerate(db_repo_lines):
if line.startswith('class DBZarubaEntityNameRepo'):
repo_class_index = line_index
if repo_class_index != -1 and line.startswith(add_python_indentation('def insert(self,', 1)):
method_index = line_index
if method_index != -1 and line.startswith(add_python_indentation('db_entity = DBZarubaEntityNameEntity', 3)):
instance_index = line_index
if instance_index != -1 and line.startswith(add_python_indentation('id=str(', 4)):
insert_index = line_index + 1
break
if insert_index == -1 and instance_index != -1:
insert_index = instance_index + 1
if insert_index == -1:
raise ValueError('Cannot find data-insert on DBZarubaEntityNameRepo.insert in {}'.fomrat(db_repo_file_name))
# insert new line
db_repo_lines.insert(insert_index, field_insert_script)
db_repo_script = '\n'.join(db_repo_lines)
self.set_content(db_repo_file_name, db_repo_script)
def _complete_repo_field_update(self):
# get field update
field_update_lines = []
for field_name in self.field_names:
new_line = self.replace_text(
self.repo_field_update_partial,
{
'zaruba_field_name': snake(field_name),
'zaruba_entity_name': snake(self.entity_name),
}
)
new_line = add_python_indentation(new_line.strip('\n'), 3)
field_update_lines.append(new_line)
field_update_script = '\n'.join(field_update_lines)
# get db repo script
db_repo_file_name = 'zarubaServiceName/repos/dbZarubaEntityName.py'
db_repo_script = self.get_content(db_repo_file_name)
db_repo_lines = db_repo_script.split('\n')
# look for update index
repo_class_index = -1
method_index = -1
insert_index = -1
for line_index, line in enumerate(db_repo_lines):
if line.startswith('class DBZarubaEntityNameRepo'):
repo_class_index = line_index
if repo_class_index != -1 and line.startswith(add_python_indentation('def update(self,', 1)):
method_index = line_index
if method_index != -1 and line.startswith(add_python_indentation('db_entity.updated_at', 3)):
insert_index = line_index
break
if method_index != -1 and insert_index == -1 and line.startswith(add_python_indentation('db.add(db_entity)', 3)):
insert_index = line_index
break
if insert_index == -1:
raise ValueError('Cannot find data-update on DBZarubaEntityNameRepo.update in {}'.fomrat(db_repo_file_name))
# update new line
db_repo_lines.insert(insert_index, field_update_script)
db_repo_script = '\n'.join(db_repo_lines)
self.set_content(db_repo_file_name, db_repo_script)
def _complete_schema(self):
if len(self.field_names) == 0:
return
# get schema field declaration
schema_field_declaration_lines = []
for field_name in self.field_names:
new_line = self.replace_text(
self.schema_field_declaration_partial,
{
'zaruba_field_name': snake(field_name),
}
)
new_line = add_python_indentation(new_line.strip('\n'), 1)
schema_field_declaration_lines.append(new_line)
schema_field_declaration = '\n'.join(schema_field_declaration_lines)
# get schema script
schema_script_file_name = 'zarubaServiceName/schemas/zarubaEntityName.py'
schema_script = self.get_content(schema_script_file_name)
schema_script_lines = schema_script.split('\n')
# insert schema field declaration to schema script
insert_index = -1
for line_index, line in enumerate(schema_script_lines):
if line.startswith('class ZarubaEntityNameData('):
insert_index = line_index + 1
if schema_script_lines[insert_index] == add_python_indentation('pass', 1):
schema_script_lines.pop(insert_index)
break
if insert_index == -1:
raise ValueError('Cannot find ZarubaEntityNameData class in {}'.format(schema_script_file_name))
schema_script_lines.insert(insert_index, schema_field_declaration)
schema_script = '\n'.join(schema_script_lines)
self.set_content(schema_script_file_name, schema_script) |
#!/usr/bin/env python3
import re
from columnplot.utility import file_exists
from columnplot.variables import INVALID_DATA
import dateutil.parser
from datetime import datetime
class ColumnGenerator(object):
def __init__(self, datapath, params):
file_exists(datapath)
self.__enable_titleline = params['enable_graphtitle']
self.__force_titleline = params['force_graphtitle']
self.__max_strelms = params['max_strelms']
self.__timefmt = params['timefmt']
self.__ignore_colnrs = params['ignore_colnrs']
delimiter = params['delimiter']
self.__datapath = datapath
self.__comment = self.__get_comment()
self.__delimiter = self.__get_delimiter(datapath, delimiter)
self.__data = self.__get_data(datapath, self.__comment, self.__delimiter, self.__timefmt)
self.__title = self.__get_title(datapath, self.__comment, self.__delimiter)
#
# private
#
def __check_datatype(self, c_datatype, p_datatype, i, data_type, linenr, data_str):
if p_datatype == 'ini':
data_type[i] = c_datatype
return
else:
print('[Error] Detect different type at line {} in column {} : {}'.format(str(linenr), str(i + 1), data_str))
exit(1)
def __get_iscsv(self, datapath):
return True if re.match('.*\.csv$', datapath) else False
def __get_comment(self):
return '^\s*#'
def __get_delimiter(self, datapath, delimiter):
if delimiter:
return delimiter
is_csv = self.__get_iscsv(datapath)
return ',' if is_csv else '\s+'
def __get_data(self, datapath, comment, delimiter, timefmt):
data = list()
data_str = list()
data_type = list()
label_count = list()
is_invalid_data = list()
ignore_firstnr = self.__ignore_colnrs[0] if self.__ignore_colnrs else 0
ignore_lastnr = self.__ignore_colnrs[-1] if self.__ignore_colnrs else 0
is_comment = re.compile(comment)
pat_number = re.compile('\d')
linenr = 0
load_first_data = False
with open(datapath, 'r') as f:
if self.__force_titleline:
linenr = 1
f.readline()
for line in f:
linenr += 1
if is_comment.match(line):
continue
idx_invalid_nr = 0
invalid_nr = ignore_firstnr
ignore_data = False
splitdata = list()
for i, sd in enumerate(re.split(delimiter, line.strip('\n')), start=1):
if i == invalid_nr:
if i < ignore_lastnr:
idx_invalid_nr += 1
invalid_nr = self.__ignore_colnrs[idx_invalid_nr]
continue
sd = sd.strip()
if sd == '':
ignore_data = True
break
splitdata.append(sd)
if ignore_data or not splitdata:
continue
if not load_first_data:
for _ in splitdata:
data.append(list())
data_type.append('ini') # the value is 'ini', 'num', 'date', or 'str'
data_str.append(list())
label_count.append(0)
is_invalid_data.append(False)
load_first_data = True
for i, d in enumerate(splitdata):
if is_invalid_data[i]:
continue
try:
# save numerical value
data[i].append(float(d))
if data_type[i] != 'num':
self.__check_datatype('num', data_type[i], i, data_type, linenr, d)
continue
except Exception:
pass
if timefmt != ['']:
try:
if timefmt:
for fmt in timefmt:
try:
dt = datetime.strptime(d, fmt)
break
except Exception:
pass
else:
raise
else:
if not re.search(pat_number, d):
raise
# takes a long time
dt = dateutil.parser.parse(d)
# save datetime
data[i].append(dt)
if data_type[i] != 'date':
self.__check_datatype('date', data_type[i], i, data_type, linenr, d)
continue
except Exception:
pass
if d not in data_str[i]:
label_count[i] += 1
data_str[i].append(d)
if label_count[i] > self.__max_strelms:
# invalidate column i
data[i].insert(0, INVALID_DATA)
is_invalid_data[i] = True
else:
# save string
data[i].append(d)
if data_type[i] != 'str':
self.__check_datatype('str', data_type[i], i, data_type, linenr, d)
return data
def __get_title(self, datapath, comment, delimiter):
title = list()
is_comment = re.compile(comment)
with open(datapath, 'r') as f:
for line in f:
if self.__force_titleline or (self.__enable_titleline and is_comment.match(line)):
line = re.sub(comment, '', line.strip('\n'))
splittitle = re.split(delimiter, line)
for i, t in enumerate(splittitle, start=1):
if i in self.__ignore_colnrs:
continue
title.append(t.strip())
else:
splitnr = len(re.split(delimiter, line))
for i in range(1, splitnr + 1):
if i in self.__ignore_colnrs:
continue
title.append(i)
break
return title
#
# public
#
def get_data(self):
return self.__data
def get_title(self):
return self.__title
if __name__ == '__main__':
# test1
from config import create_columnplot_param
params = create_columnplot_param()
ColumnGenerator('data/test.csv', params)
exit()
print(ColumnGenerator('data/simple.csv', params).get_data())
print(ColumnGenerator('data/graph.dat', params).get_data())
print(ColumnGenerator('data/graph_notmatchx.dat', params).get_title())
# test2
import time
params_datefmt = params.copy()
params_datefmt['timefmt'] = ['%Y-%m-%d %H:%M:%S.%f']
t1 = time.time()
ColumnGenerator('data/test.csv', params_datefmt)
t2 = time.time()
ColumnGenerator('data/test.csv', params)
t3 = time.time()
print("with fmt : %f[s]" % (t2 - t1))
print("without fmt : %f[s]" % (t3 - t2))
|
import unittest
from bin_optimize import optimize
class Testing(unittest.TestCase):
def test_happy_path(self):
test_set = [
{'b1': [('a1', 600), ('a5', 250), ('a10', 400)],
'b2': [('a2', 400), ('a6', 500), ('a11', 200)],
'b3': [('a3', 700), ('a7', 200), ('a12', 300)],
'b4': [('a4', 200), ('a8', 200), ('a13', 200),
('a7', 200), ('a9', 400)]},
{'b1': [('a1', 300), ('a5', 300), ('a9', 250), ('a12', 400)],
'b2': [('a2', 400), ('a6', 500), ('a10', 200)],
'b3': [('a3', 700), ('a7', 200), ('a11', 300)],
'b4': [('a4', 800), ('a8', 400)]
},
{'b1': [('a1', 6), ('a5', 4.5), ('a9', 4)],
'b2': [('a2', 4), ('a6', 5), ('a10', 2)],
'b3': [('a3', 7), ('a7', 2), ('a11', 3)],
'b4': [('a4', 2), ('a8', 2), ('a12', 2), ('a13', 2), ('a15', 4)]}
]
expected = \
[{'b2': [('a2', 400), ('a6', 500), ('a11', 200), ('a1', 600)],
'b3': [('a3', 700), ('a7', 200), ('a12', 300), ('a10', 400)],
'b4': [('a4', 200), ('a8', 200), ('a13', 200), ('a7', 200),
('a9', 400), ('a5', 250)]},
{'b1': [('a1', 600), ('a5', 250), ('a10', 400), ('a6', 500)],
'b3': [('a3', 700), ('a7', 200), ('a12', 300), ('a11', 200)],
'b4': [('a4', 200), ('a8', 200), ('a13', 200), ('a7', 200),
('a9', 400), ('a2', 400)]},
{'b1': [('a1', 600), ('a5', 250), ('a10', 400), ('a7', 200)],
'b2': [('a2', 400), ('a6', 500), ('a11', 200), ('a12', 300)],
'b4': [('a4', 200), ('a8', 200), ('a13', 200), ('a7', 200),
('a9', 400), ('a3', 700)]},
{'b1': [('a1', 600), ('a5', 250), ('a10', 400), ('a7', 200)],
'b2': [('a2', 400), ('a6', 500), ('a11', 200), ('a13', 200),
('a8', 200)],
'b3': [('a3', 700), ('a7', 200), ('a12', 300), ('a4', 200),
('a9', 400)]},
{'b2': [('a2', 400), ('a6', 500), ('a10', 200), ('a1', 300),
('a9', 250)],
'b3': [('a3', 700), ('a7', 200), ('a11', 300), ('a12', 400)],
'b4': [('a4', 800), ('a8', 400), ('a5', 300)]},
{'b1': [('a1', 300), ('a5', 300), ('a9', 250), ('a12', 400),
('a6', 500)],
'b3': [('a3', 700), ('a7', 200), ('a11', 300), ('a10', 200)],
'b4': [('a4', 800), ('a8', 400), ('a2', 400)]},
{'b1': [('a1', 300), ('a5', 300), ('a9', 250), ('a12', 400),
('a7', 200)],
'b2': [('a2', 400), ('a6', 500), ('a10', 200), ('a11', 300)],
'b4': [('a4', 800), ('a8', 400), ('a3', 700)]},
{'b1': [('a1', 300), ('a5', 300), ('a9', 250), ('a12', 400)],
'b2': [('a2', 400), ('a6', 500), ('a10', 200), ('a4', 800)],
'b3': [('a3', 700), ('a7', 200), ('a11', 300), ('a8', 400)]},
{'b2': [('a2', 4), ('a6', 5), ('a10', 2), ('a1', 6)],
'b3': [('a3', 7), ('a7', 2), ('a11', 3), ('a5', 4.5)],
'b4': [('a4', 2), ('a8', 2), ('a12', 2), ('a13', 2), ('a15', 4),
('a9', 4)]},
{'b1': [('a1', 6), ('a5', 4.5), ('a9', 4)],
'b3': [('a3', 7), ('a7', 2), ('a11', 3), ('a10', 2), ('a6', 5)],
'b4': [('a4', 2), ('a8', 2), ('a12', 2), ('a13', 2), ('a15', 4),
('a2', 4)]},
{'b1': [('a1', 6), ('a5', 4.5), ('a9', 4)],
'b2': [('a2', 4), ('a6', 5), ('a10', 2), ('a11', 3), ('a7', 2)],
'b4': [('a4', 2), ('a8', 2), ('a12', 2), ('a13', 2), ('a15', 4),
('a3', 7)]},
{'b1': [('a1', 6), ('a5', 4.5), ('a9', 4), ('a8', 2)],
'b2': [('a2', 4), ('a6', 5), ('a10', 2), ('a12', 2), ('a15', 4)],
'b3': [('a3', 7), ('a7', 2), ('a11', 3), ('a13', 2), ('a4', 2)]}]
result = []
for rec in test_set:
for k, _ in rec.items():
resp = optimize(rec, k)
result.append(resp)
self.assertEqual(expected, result)
def test_nbr_bins(self):
bins = {123: [('a1', 6), ('a2', 4.5), ('a3', 4)],
345: [('a4', 4), ('a5', 5), ('a6', 2)],
567: [('a7', 7), ('a8', 2), ('a9', 3)],
789: [('a10', 2), ('a11', 2),
('a12', 2), ('a13', 2), ('a14', 4)]}
expected = {345: [('a4', 4), ('a5', 5), ('a6', 2), ('a1', 6)],
567: [('a7', 7), ('a8', 2), ('a9', 3), ('a2', 4.5)],
789: [('a10', 2), ('a11', 2),
('a12', 2), ('a13', 2), ('a14', 4), ('a3', 4)]}
resp = optimize(bins, 123)
self.assertEqual(expected, resp)
def test_nbr_bins_empty_size(self):
bins = {123: [('a1', 0), ('a2', 4.5), ('a3', )],
345: [('a4', 4), ('a5', 5), ('a6', 2)],
567: [('a7', 0), ('a8', 2), ('a9', 3)],
789: [('a10', 2), ('a11', 2),
('a12', 2), ('a13', 2), ('a14', 4)]}
expected = {345: [('a4', 4), ('a5', 5), ('a6', 2)],
567: [('a7', 0), ('a8', 2), ('a9', 3), ('a1', 0),
('a2', 4.5), ('a3', )],
789: [('a10', 2), ('a11', 2), ('a12', 2), ('a13', 2),
('a14', 4)]}
resp = optimize(bins, 123)
self.assertEqual(expected, resp)
def test_nbr_weights(self):
bins = {123: [(102, 6), (107, 4.5), (112, 4)],
345: [(103, 4), (108, 5), (113, 2)],
567: [(104, 7), (109, 2), (114, 3)],
789: [(105, 2), (110, 2),
(106, 2), (111, 2), (115, 4)]}
expected = {345: [(103, 4), (108, 5), (113, 2), (102, 6)],
567: [(104, 7), (109, 2), (114, 3), (107, 4.5)],
789: [(105, 2), (110, 2),
(106, 2), (111, 2), (115, 4), (112, 4)]}
resp = optimize(bins, 123)
self.assertEqual(expected, resp)
def test_incorrect_tuple(self):
test_set = {'b1': [2], 'b2': 4}
with self.assertRaises(TypeError):
optimize(test_set, 'b1')
def test_incorrect_bin(self):
test_set = {'b1': [2], 'b2': [4]}
with self.assertRaises(KeyError):
optimize(test_set, 'E')
def test_empty_bin(self):
test_set = {'b1': [('a1', 2)], 'b2': []}
self.assertEqual({'b1': [('a1', 2)]}, optimize(test_set, 'b2'))
|
import unittest
from unittest.mock import patch
from string import printable
import json
from cdflow import (
CDFLOW_IMAGE_ID, InvalidURLError, fetch_account_scheme, get_image_id,
parse_s3_url
)
import boto3
from moto import mock_s3
from hypothesis import assume, given
from hypothesis.strategies import dictionaries, fixed_dictionaries, text
from test.strategies import VALID_ALPHABET, image_id, s3_bucket_and_key
class TestGetReleaseCommandsImage(unittest.TestCase):
@given(dictionaries(
keys=text(alphabet=printable), values=text(alphabet=printable)
))
def test_get_default_image_id(self, environment):
assume('CDFLOW_IMAGE_ID' not in environment)
image_id = get_image_id(environment, {})
assert image_id == CDFLOW_IMAGE_ID
@given(fixed_dictionaries({
'environment': dictionaries(
keys=text(alphabet=printable), values=text(alphabet=printable)
),
'image_id': image_id(),
}))
def test_get_image_id_from_environment(self, fixtures):
environment = fixtures['environment']
environment['CDFLOW_IMAGE_ID'] = fixtures['image_id']
image_id = get_image_id(environment, {})
assert image_id == fixtures['image_id']
def test_get_image_id_from_config_file(self):
terraform_version = '0.12.18'
config = {
'terraform-version': terraform_version
}
image_id = get_image_id({}, config)
assert image_id == \
f'mergermarket/cdflow-commands:terraform{terraform_version}'
class TestParseS3Url(unittest.TestCase):
@given(s3_bucket_and_key())
def test_gets_bucket_name_and_key(self, s3_bucket_and_key):
expected_bucket = s3_bucket_and_key[0]
expected_key = s3_bucket_and_key[1]
s3_url = 's3://{}/{}'.format(expected_bucket, expected_key)
bucket, key = parse_s3_url(s3_url)
assert bucket == expected_bucket
assert key == expected_key
@given(text())
def test_invalid_url_protocol_throws_exception(self, invalid_url):
assume(not invalid_url.startswith('s3://'))
self.assertRaises(InvalidURLError, parse_s3_url, invalid_url)
@given(text(alphabet=VALID_ALPHABET))
def test_invalid_url_format_throws_exception(self, invalid_url):
assume('/' not in invalid_url)
self.assertRaises(
InvalidURLError, parse_s3_url, 's3://{}'.format(invalid_url)
)
class TestFetchAccountScheme(unittest.TestCase):
def setUp(self):
self.mock_s3 = mock_s3()
self.mock_s3.start()
def tearDown(self):
self.mock_s3.stop()
@given(fixed_dictionaries({
's3_bucket_and_key': s3_bucket_and_key(),
'account_prefix': text(alphabet=VALID_ALPHABET, min_size=1),
}))
def test_fetches_without_forwarding_account_scheme(self, fixtures):
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
account_prefix = fixtures['account_prefix']
bucket = fixtures['s3_bucket_and_key'][0]
key = fixtures['s3_bucket_and_key'][1]
s3_client.create_bucket(Bucket=bucket)
account_scheme_content = {
'accounts': {
f'{account_prefix}dev': {
'id': '222222222222',
'role': 'admin'
},
f'{account_prefix}prod': {
'id': '111111111111',
'role': 'admin'
}
},
'release-account': f'{account_prefix}dev',
'release-bucket': f'{account_prefix}-account-resources',
'environments': {
'live': f'{account_prefix}prod',
'*': f'{account_prefix}dev'
},
'default-region': 'eu-west-12',
'ecr-registry': '1234567.dkr.ecr.eu-west-1.amazonaws.com',
'lambda-bucket': 'cdflow-lambda-releases',
'upgrade-account-scheme': {
'team-whitelist': [],
'component-whitelist': [],
'new-url': 's3://new_bucket/new_key',
}
}
account_scheme_object = s3_resource.Object(bucket, key)
account_scheme_object.put(
Body=json.dumps(account_scheme_content).encode('utf-8'),
)
team = 'a-team'
component = 'a-component'
account_scheme = fetch_account_scheme(
s3_resource, bucket, key, team, component,
)
expected_keys = sorted(account_scheme_content.keys())
assert list(sorted(account_scheme.keys())) == expected_keys
release_bucket = account_scheme_content['release-bucket']
assert account_scheme['release-bucket'] == release_bucket
def test_fetches_forwarded_account_scheme_if_component_whitelisted(self):
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
team = 'a-team'
component = 'a-component'
old_bucket = 'releases'
old_key = 'account-scheme.json'
new_bucket = 'new-releases'
new_key = 'upgraded-account-scheme.json'
s3_client.create_bucket(Bucket=old_bucket)
s3_client.create_bucket(Bucket=new_bucket)
old_account_scheme_content = json.dumps({
'accounts': {
'orgdev': {
'id': '222222222222',
'role': 'admin'
},
'orgprod': {
'id': '111111111111',
'role': 'admin'
}
},
'release-account': 'orgdev',
'release-bucket': 'org-account-resources',
'environments': {
'live': 'orgprod',
'*': 'orgdev'
},
'default-region': 'eu-west-12',
'ecr-registry': '1234567.dkr.ecr.eu-west-1.amazonaws.com',
'lambda-bucket': 'cdflow-lambda-releases',
'upgrade-account-scheme': {
'team-whitelist': [],
'component-whitelist': [component],
'new-url': f's3://{new_bucket}/{new_key}',
}
})
new_account_scheme_content = {
'accounts': {
'orgprod': {
'id': '0987654321',
'role': 'admin-role',
},
'orgrelease': {
'id': '1234567890',
'role': 'test-role',
'region': 'region-override',
},
},
'environments': {},
'release-account': 'orgrelease',
'release-bucket': new_bucket,
'default-region': 'test-region-1',
'terraform-backend-s3-bucket': 'backend-bucket',
'terraform-backend-s3-dynamodb-table': 'backend-table',
'lambda-buckets': {
'test-region-1': 'test-bucket-1',
'test-region-2': 'test-bucket-2'
},
}
old_account_scheme_object = s3_resource.Object(old_bucket, old_key)
old_account_scheme_object.put(
Body=old_account_scheme_content.encode('utf-8'),
)
new_account_scheme_object = s3_resource.Object(new_bucket, new_key)
new_account_scheme_object.put(
Body=json.dumps(new_account_scheme_content).encode('utf-8'),
)
account_scheme = fetch_account_scheme(
s3_resource, old_bucket, old_key, team, component,
)
expected_keys = sorted(new_account_scheme_content.keys())
assert list(sorted(account_scheme.keys())) == expected_keys
assert account_scheme['release-bucket'] == new_bucket
def test_fetches_forwarded_account_scheme_if_team_whitelisted(self):
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
team = 'a-team'
component = 'a-component'
old_bucket = 'releases'
old_key = 'account-scheme.json'
new_bucket = 'new-releases'
new_key = 'upgraded-account-scheme.json'
s3_client.create_bucket(Bucket=old_bucket)
s3_client.create_bucket(Bucket=new_bucket)
old_account_scheme_content = json.dumps({
'accounts': {
'orgdev': {
'id': '222222222222',
'role': 'admin'
},
'orgprod': {
'id': '111111111111',
'role': 'admin'
}
},
'release-account': 'orgdev',
'release-bucket': 'org-account-resources',
'environments': {
'live': 'orgprod',
'*': 'orgdev'
},
'default-region': 'eu-west-12',
'ecr-registry': '1234567.dkr.ecr.eu-west-1.amazonaws.com',
'lambda-bucket': 'cdflow-lambda-releases',
'upgrade-account-scheme': {
'team-whitelist': [team],
'component-whitelist': [],
'new-url': f's3://{new_bucket}/{new_key}',
}
})
new_account_scheme_content = {
'accounts': {
'orgprod': {
'id': '0987654321',
'role': 'admin-role',
},
'orgrelease': {
'id': '1234567890',
'role': 'test-role',
'region': 'region-override',
},
},
'environments': {},
'release-account': 'orgrelease',
'release-bucket': new_bucket,
'default-region': 'test-region-1',
'terraform-backend-s3-bucket': 'backend-bucket',
'terraform-backend-s3-dynamodb-table': 'backend-table',
'lambda-buckets': {
'test-region-1': 'test-bucket-1',
'test-region-2': 'test-bucket-2'
},
}
old_account_scheme_object = s3_resource.Object(old_bucket, old_key)
old_account_scheme_object.put(
Body=old_account_scheme_content.encode('utf-8'),
)
new_account_scheme_object = s3_resource.Object(new_bucket, new_key)
new_account_scheme_object.put(
Body=json.dumps(new_account_scheme_content).encode('utf-8'),
)
account_scheme = fetch_account_scheme(
s3_resource, old_bucket, old_key, team, component,
)
expected_keys = sorted(new_account_scheme_content.keys())
assert list(sorted(account_scheme.keys())) == expected_keys
assert account_scheme['release-bucket'] == new_bucket
@patch('cdflow.sys')
def test_does_not_forward_account_scheme_if_component_flag_passed(
self, mock_sys,
):
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
team = 'a-team'
component = 'a-component'
mock_sys.argv = ['--component', component]
old_bucket = 'releases'
old_key = 'account-scheme.json'
new_bucket = 'new-releases'
new_key = 'upgraded-account-scheme.json'
s3_client.create_bucket(Bucket=old_bucket)
old_account_scheme_content = {
'accounts': {
'orgdev': {
'id': '222222222222',
'role': 'admin'
},
'orgprod': {
'id': '111111111111',
'role': 'admin'
}
},
'release-account': 'orgdev',
'release-bucket': old_bucket,
'environments': {
'live': 'orgprod',
'*': 'orgdev'
},
'default-region': 'eu-west-12',
'ecr-registry': '1234567.dkr.ecr.eu-west-1.amazonaws.com',
'lambda-bucket': 'cdflow-lambda-releases',
'upgrade-account-scheme': {
'team-whitelist': [team],
'component-whitelist': [],
'new-url': f's3://{new_bucket}/{new_key}',
}
}
old_account_scheme_object = s3_resource.Object(old_bucket, old_key)
old_account_scheme_object.put(
Body=json.dumps(old_account_scheme_content).encode('utf-8'),
)
account_scheme = fetch_account_scheme(
s3_resource, old_bucket, old_key, team, component,
)
expected_keys = sorted(old_account_scheme_content.keys())
assert list(sorted(account_scheme.keys())) == expected_keys
assert account_scheme['release-bucket'] == old_bucket
|
from __future__ import unicode_literals
import re
import json
import uuid
import types
import inspect
import six
import sqlalchemy
from sqlalchemy import event
from sqlalchemy.ext import declarative
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import Query, sessionmaker, configure_mappers
from sqlalchemy.types import TypeDecorator, String, DateTime, CHAR, Unicode
from sideboard.lib import log, config
__all__ = ['UUID', 'JSON', 'CoerceUTF8', 'declarative_base', 'SessionManager',
'CrudException', 'crudable', 'crud_validation', 'text_length_validation', 'regex_validation']
def _camelcase_to_underscore(value):
""" Converts camelCase string to underscore_separated (aka joined_lower).
>>> _camelcase_to_underscore('fooBarBaz')
'foo_bar_baz'
>>> _camelcase_to_underscore('fooBarBazXYZ')
'foo_bar_baz_xyz'
"""
s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', value)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _underscore_to_camelcase(value, cap_segment=None):
""" Converts underscore_separated string (aka joined_lower) into camelCase string.
>>> _underscore_to_camelcase('foo_bar_baz')
'FooBarBaz'
>>> _underscore_to_camelcase('foo_bar_baz', cap_segment=0)
'FOOBarBaz'
>>> _underscore_to_camelcase('foo_bar_baz', cap_segment=1)
'FooBARBaz'
>>> _underscore_to_camelcase('foo_bar_baz', cap_segment=1000)
'FooBarBaz'
"""
return "".join([s.title() if idx != cap_segment else s.upper() for idx, s in enumerate(value.split('_'))])
class CoerceUTF8(TypeDecorator):
"""
Safely coerce Python bytestrings to Unicode
before passing off to the database.
"""
impl = Unicode
def process_bind_param(self, value, dialect):
if isinstance(value, type(b'')):
value = value.decode('utf-8')
return value
class UUID(TypeDecorator):
"""
Platform-independent UUID type.
Uses Postgresql's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return uuid.UUID(value).hex
else:
return value.hex
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return str(uuid.UUID(value))
class JSON(TypeDecorator):
impl = String
def __init__(self, comparator=None):
self.comparator = comparator
super(JSON, self).__init__()
def process_bind_param(self, value, dialect):
if value is None:
return None
elif isinstance(value, six.string_types):
return value
else:
return json.dumps(value)
def process_result_value(self, value, dialect):
if value is None:
return None
return json.loads(str(value))
def copy_value(self, value):
if self.mutable:
return json.loads(json.dumps(value))
else:
return value
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
try:
from pytz import UTC
except ImportError:
pass
else:
class UTCDateTime(TypeDecorator):
impl = DateTime
def process_bind_param(self, value, engine):
if value is not None:
return value.astimezone(UTC).replace(tzinfo=None)
def process_result_value(self, value, engine):
if value is not None:
return value.replace(tzinfo=UTC)
__all__.append('UTCDateTime')
def check_constraint_naming_convention(constraint, table):
"""Creates a unique name for an unnamed CheckConstraint.
The generated name is the SQL text of the CheckConstraint with
non-alphanumeric, non-underscore operators converted to text, and all
other non-alphanumeric, non-underscore substrings replaced by underscores.
If the generated name is longer than 32 characters, a uuid5 based on the
generated name will be returned instead.
>>> check_constraint_naming_convention(CheckConstraint('failed_logins > 3'), Table('account', MetaData()))
'failed_logins_gt_3'
See: http://docs.sqlalchemy.org/en/latest/core/constraints.html#configuring-constraint-naming-conventions
"""
# The text of the replacements doesn't matter, so long as it's unique
replacements = [
('||/', 'cr'), ('<=', 'le'), ('>=', 'ge'), ('<>', 'nq'), ('!=', 'ne'),
('||', 'ct'), ('<<', 'ls'), ('>>', 'rs'), ('!!', 'fa'), ('|/', 'sr'),
('@>', 'cn'), ('<@', 'cb'), ('&&', 'an'), ('<', 'lt'), ('=', 'eq'),
('>', 'gt'), ('!', 'ex'), ('"', 'qt'), ('#', 'hs'), ('$', 'dl'),
('%', 'pc'), ('&', 'am'), ('\'', 'ap'), ('(', 'lpr'), (')', 'rpr'),
('*', 'as'), ('+', 'pl'), (',', 'cm'), ('-', 'da'), ('.', 'pd'),
('/', 'sl'), (':', 'co'), (';', 'sc'), ('?', 'qn'), ('@', 'at'),
('[', 'lbk'), ('\\', 'bs'), (']', 'rbk'), ('^', 'ca'), ('`', 'tk'),
('{', 'lbc'), ('|', 'pi'), ('}', 'rbc'), ('~', 'td')]
constraint_name = str(constraint.sqltext).strip()
for operator, text in replacements:
constraint_name = constraint_name.replace(operator, text)
constraint_name = re.sub('[\W\s]+', '_', constraint_name)
if len(constraint_name) > 32:
constraint_name = uuid.uuid5(uuid.NAMESPACE_OID, str(constraint_name)).hex
return constraint_name
# SQLAlchemy doesn't expose its default constructor as a nicely importable
# function, so we grab it from the function defaults.
if six.PY2:
_spec_args, _spec_varargs, _spec_kwargs, _spec_defaults = inspect.getargspec(declarative.declarative_base)
else:
_declarative_spec = inspect.getfullargspec(declarative.declarative_base)
_spec_args, _spec_defaults = _declarative_spec.args, _declarative_spec.defaults
declarative_base_constructor = dict(zip(reversed(_spec_args), reversed(_spec_defaults)))['constructor']
def declarative_base(*orig_args, **orig_kwargs):
"""
Replacement for SQLAlchemy's declarative_base, which adds these features:
1) This is a decorator.
2) This allows your base class to set a constructor.
3) This provides a default constructor which automatically sets defaults
instead of waiting to do that until the object is committed.
4) Automatically setting __tablename__ to snake-case.
5) Automatic integration with the SessionManager class.
"""
orig_args = list(orig_args)
def _decorate_base_class(klass):
class Mixed(klass, CrudMixin):
def __init__(self, *args, **kwargs):
"""
Variant on SQLAlchemy model __init__ which sets default values on
initialization instead of immediately before the model is saved.
"""
if '_model' in kwargs:
assert kwargs.pop('_model') == self.__class__.__name__
declarative_base_constructor(self, *args, **kwargs)
for attr, col in self.__table__.columns.items():
if kwargs.get(attr) is None and col.default:
self.__dict__.setdefault(attr, col.default.execute())
orig_kwargs['cls'] = Mixed
if 'name' not in orig_kwargs:
orig_kwargs['name'] = klass.__name__
if 'constructor' not in orig_kwargs:
orig_kwargs['constructor'] = klass.__init__ if '__init__' in klass.__dict__ else Mixed.__init__
Mixed = declarative.declarative_base(*orig_args, **orig_kwargs)
Mixed.BaseClass = _SessionInitializer._base_classes[klass.__module__] = Mixed
Mixed.__tablename__ = declarative.declared_attr(lambda cls: _camelcase_to_underscore(cls.__name__))
return Mixed
is_class_decorator = not orig_kwargs and \
len(orig_args) == 1 and \
inspect.isclass(orig_args[0]) and \
not isinstance(orig_args[0], sqlalchemy.engine.Connectable)
if is_class_decorator:
return _decorate_base_class(orig_args.pop())
else:
return _decorate_base_class
class _SessionInitializer(type):
_base_classes = {}
def __new__(cls, name, bases, attrs):
SessionClass = type.__new__(cls, name, bases, attrs)
if hasattr(SessionClass, 'engine'):
if not hasattr(SessionClass, 'BaseClass'):
for module, bc in _SessionInitializer._base_classes.items():
if module == SessionClass.__module__:
SessionClass.BaseClass = bc
break
else:
raise AssertionError('no BaseClass specified and @declarative_base was never invoked in {}'.format(SessionClass.__module__))
if not hasattr(SessionClass, 'session_factory'):
SessionClass.session_factory = sessionmaker(bind=SessionClass.engine, autoflush=False, autocommit=False,
query_cls=SessionClass.QuerySubclass)
SessionClass.initialize_db()
SessionClass.crud = make_crud_service(SessionClass)
return SessionClass
@six.add_metaclass(_SessionInitializer)
class SessionManager(object):
class SessionMixin(object):
pass
class QuerySubclass(Query):
pass
def __init__(self):
self.session = self.session_factory()
for name, val in self.SessionMixin.__dict__.items():
if not name.startswith('__'):
assert not hasattr(self.session, name) and hasattr(val, '__call__')
setattr(self.session, name, types.MethodType(val, self.session))
def __enter__(self):
return self.session
def __exit__(self, exc_type, exc_value, traceback):
try:
if exc_type is None:
self.session.commit()
finally:
self.session.close()
def __del__(self):
if self.session.transaction._connections:
log.error('SessionManager went out of scope without underlying connection being closed; did you forget to use it as a context manager?')
self.session.close()
@classmethod
def initialize_db(cls, drop=False, create=True):
configure_mappers()
cls.BaseClass.metadata.bind = cls.engine
if drop:
cls.BaseClass.metadata.drop_all(cls.engine, checkfirst=True)
if create:
cls.BaseClass.metadata.create_all(cls.engine, checkfirst=True)
@classmethod
def all_models(cls):
return cls.BaseClass.__subclasses__() # TODO: subclasses of subclasses; this needs to be recursive or something
@classmethod
def resolve_model(cls, name):
if inspect.isclass(name) and issubclass(name, cls.BaseClass):
return name
subclasses = {ModelClass.__name__: ModelClass for ModelClass in cls.all_models()}
permutations = [name, _underscore_to_camelcase(name), _underscore_to_camelcase(name, cap_segment=0)]
for name in permutations:
if name in subclasses:
return subclasses[name]
if name.lower().endswith('s'):
singular = name.rstrip('sS')
if singular in subclasses:
return subclasses[singular]
if name.lower().endswith('ies'):
singular = name[:-3] + 'y'
if singular in subclasses:
return subclasses[singular]
for name in permutations:
if name in cls.BaseClass.metadata.tables:
return cls.BaseClass.metadata.tables[name]
raise ValueError('Unrecognized model: {}'.format(name))
if six.PY2:
__all__ = [s.encode('ascii') for s in __all__]
from sideboard.lib.sa._crud import CrudMixin, make_crud_service, crudable, CrudException, crud_validation, text_length_validation, regex_validation
|
<filename>bbavectors/datasets/dataset_custom.py
from .base import BaseDataset
import os
import cv2
import glob
import numpy as np
from DOTA_devkit.ResultMerge_multi_process import mergebypoly
class CUSTOM(BaseDataset):
def __init__(self, data_dir, phase, input_h=None, input_w=None, down_ratio=None):
super(CUSTOM, self).__init__(
data_dir, phase, input_h, input_w, down_ratio)
self.category = [
'small-vehicle',
'medium-vehicle',
'large-vehicle'
]
self.color_pans = [
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
]
self.num_classes = len(self.category)
self.cat_ids = {cat: i for i, cat in enumerate(self.category)}
self.img_ids = self.load_img_ids()
self.image_path = os.path.join(data_dir, 'images')
self.label_path = os.path.join(data_dir, 'labelTxt')
def load_img_ids(self):
files = os.listdir(os.path.join(self.data_dir, 'images'))
image_lists = [f.strip().rsplit(".", 1)[0] for f in files]
return image_lists
def load_image(self, index):
img_id = self.img_ids[index]
imgFile = glob.glob(os.path.join(self.image_path, img_id+'.*'))[0]
assert os.path.exists(imgFile), 'image {} not existed'.format(imgFile)
img = cv2.imread(imgFile)
return img
def load_annoFolder(self, img_id):
return os.path.join(self.label_path, img_id+'.txt')
def load_annotation(self, index):
image = self.load_image(index)
h, w, c = image.shape
valid_pts = []
valid_cat = []
valid_dif = []
with open(self.load_annoFolder(self.img_ids[index]), 'r') as f:
for i, line in enumerate(f.readlines()):
obj = line.split(' ') # list object
if len(obj) > 8:
x1 = min(max(float(obj[0]), 0), w - 1)
y1 = min(max(float(obj[1]), 0), h - 1)
x2 = min(max(float(obj[2]), 0), w - 1)
y2 = min(max(float(obj[3]), 0), h - 1)
x3 = min(max(float(obj[4]), 0), w - 1)
y3 = min(max(float(obj[5]), 0), h - 1)
x4 = min(max(float(obj[6]), 0), w - 1)
y4 = min(max(float(obj[7]), 0), h - 1)
# TODO: filter small instances
xmin = max(min(x1, x2, x3, x4), 0)
xmax = max(x1, x2, x3, x4)
ymin = max(min(y1, y2, y3, y4), 0)
ymax = max(y1, y2, y3, y4)
if ((xmax - xmin) > 10) and ((ymax - ymin) > 10):
valid_pts.append(
[[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
valid_cat.append(self.cat_ids[obj[8]])
valid_dif.append(int(obj[9]))
f.close()
annotation = {}
annotation['pts'] = np.asarray(valid_pts, np.float32)
annotation['cat'] = np.asarray(valid_cat, np.int32)
annotation['dif'] = np.asarray(valid_dif, np.int32)
return annotation
def merge_crop_image_results(self, result_path, merge_path):
mergebypoly(result_path, merge_path)
|
<reponame>cjsteel/python3-venv-ansible-2.10.5
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_proxy_policy
short_description: Configure proxy policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and proxy_policy category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.8"
author:
- <NAME> (@chillancezen)
- <NAME> (@JieX19)
- <NAME> (@fgtdev-hblu)
- <NAME> (@frankshen01)
- <NAME> (@mamunozgonzalez)
- <NAME> (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_proxy_policy:
description:
- Configure proxy policies.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
action:
description:
- Accept or deny traffic matching the policy parameters.
type: str
choices:
- accept
- deny
- redirect
application_list:
description:
- Name of an existing Application list. Source application.list.name.
type: str
av_profile:
description:
- Name of an existing Antivirus profile. Source antivirus.profile.name.
type: str
comments:
description:
- Optional comments.
type: str
disclaimer:
description:
- 'Web proxy disclaimer setting: by domain, policy, or user.'
type: str
choices:
- disable
- domain
- policy
- user
dlp_sensor:
description:
- Name of an existing DLP sensor. Source dlp.sensor.name.
type: str
dstaddr:
description:
- Destination address objects.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name firewall.proxy-address.name firewall.proxy-addrgrp.name
firewall.vip.name firewall.vipgrp.name firewall.vip46.name firewall.vipgrp46.name system.external-resource.name.
required: true
type: str
dstaddr_negate:
description:
- When enabled, destination addresses match against any address EXCEPT the specified destination addresses.
type: str
choices:
- enable
- disable
dstaddr6:
description:
- IPv6 destination address objects.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name firewall.vip64.name
firewall.vipgrp64.name system.external-resource.name.
required: true
type: str
dstintf:
description:
- Destination interface names.
type: list
suboptions:
name:
description:
- Interface name. Source system.interface.name system.zone.name.
required: true
type: str
global_label:
description:
- Global web-based manager visible label.
type: str
groups:
description:
- Names of group objects.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
http_tunnel_auth:
description:
- Enable/disable HTTP tunnel authentication.
type: str
choices:
- enable
- disable
icap_profile:
description:
- Name of an existing ICAP profile. Source icap.profile.name.
type: str
internet_service:
description:
- Enable/disable use of Internet Services for this policy. If enabled, destination address and service are not used.
type: str
choices:
- enable
- disable
internet_service_custom:
description:
- Custom Internet Service name.
type: list
suboptions:
name:
description:
- Custom name. Source firewall.internet-service-custom.name.
required: true
type: str
internet_service_id:
description:
- Internet Service ID.
type: list
suboptions:
id:
description:
- Internet Service ID. Source firewall.internet-service.id.
required: true
type: int
internet_service_negate:
description:
- When enabled, Internet Services match against any internet service EXCEPT the selected Internet Service.
type: str
choices:
- enable
- disable
ips_sensor:
description:
- Name of an existing IPS sensor. Source ips.sensor.name.
type: str
label:
description:
- VDOM-specific GUI visible label.
type: str
logtraffic:
description:
- Enable/disable logging traffic through the policy.
type: str
choices:
- all
- utm
- disable
logtraffic_start:
description:
- Enable/disable policy log traffic start.
type: str
choices:
- enable
- disable
policyid:
description:
- Policy ID.
required: true
type: int
poolname:
description:
- Name of IP pool object.
type: list
suboptions:
name:
description:
- IP pool name. Source firewall.ippool.name.
required: true
type: str
profile_group:
description:
- Name of profile group. Source firewall.profile-group.name.
type: str
profile_protocol_options:
description:
- Name of an existing Protocol options profile. Source firewall.profile-protocol-options.name.
type: str
profile_type:
description:
- Determine whether the firewall policy allows security profile groups or single profiles only.
type: str
choices:
- single
- group
proxy:
description:
- Type of explicit proxy.
type: str
choices:
- explicit-web
- transparent-web
- ftp
- ssh
- ssh-tunnel
- wanopt
redirect_url:
description:
- Redirect URL for further explicit web proxy processing.
type: str
replacemsg_override_group:
description:
- Authentication replacement message override group. Source system.replacemsg-group.name.
type: str
scan_botnet_connections:
description:
- Enable/disable scanning of connections to Botnet servers.
type: str
choices:
- disable
- block
- monitor
schedule:
description:
- Name of schedule object. Source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name.
type: str
service:
description:
- Name of service objects.
type: list
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
service_negate:
description:
- When enabled, services match against any service EXCEPT the specified destination services.
type: str
choices:
- enable
- disable
session_ttl:
description:
- TTL in seconds for sessions accepted by this policy (0 means use the system ).
type: int
spamfilter_profile:
description:
- Name of an existing Spam filter profile. Source spamfilter.profile.name.
type: str
srcaddr:
description:
- Source address objects (must be set when using Web proxy).
type: list
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name firewall.proxy-address.name firewall.proxy-addrgrp.name system
.external-resource.name.
required: true
type: str
srcaddr_negate:
description:
- When enabled, source addresses match against any address EXCEPT the specified source addresses.
type: str
choices:
- enable
- disable
srcaddr6:
description:
- IPv6 source address objects.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name system.external-resource.name.
required: true
type: str
srcintf:
description:
- Source interface names.
type: list
suboptions:
name:
description:
- Interface name. Source system.interface.name system.zone.name.
required: true
type: str
ssh_filter_profile:
description:
- Name of an existing SSH filter profile. Source ssh-filter.profile.name.
type: str
ssl_ssh_profile:
description:
- Name of an existing SSL SSH profile. Source firewall.ssl-ssh-profile.name.
type: str
status:
description:
- Enable/disable the active status of the policy.
type: str
choices:
- enable
- disable
transparent:
description:
- Enable to use the IP address of the client to connect to the server.
type: str
choices:
- enable
- disable
users:
description:
- Names of user objects.
type: list
suboptions:
name:
description:
- Group name. Source user.local.name.
required: true
type: str
utm_status:
description:
- Enable the use of UTM profiles/sensors/lists.
type: str
choices:
- enable
- disable
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
waf_profile:
description:
- Name of an existing Web application firewall profile. Source waf.profile.name.
type: str
webcache:
description:
- Enable/disable web caching.
type: str
choices:
- enable
- disable
webcache_https:
description:
- Enable/disable web caching for HTTPS (Requires deep-inspection enabled in ssl-ssh-profile).
type: str
choices:
- disable
- enable
webfilter_profile:
description:
- Name of an existing Web filter profile. Source webfilter.profile.name.
type: str
webproxy_forward_server:
description:
- Name of web proxy forward server. Source web-proxy.forward-server.name web-proxy.forward-server-group.name.
type: str
webproxy_profile:
description:
- Name of web proxy profile. Source web-proxy.profile.name.
type: str
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Configure proxy policies.
fortios_firewall_proxy_policy:
vdom: "{{ vdom }}"
state: "present"
access_token: "<your_own_value>"
firewall_proxy_policy:
action: "accept"
application_list: "<your_own_value> (source application.list.name)"
av_profile: "<your_own_value> (source antivirus.profile.name)"
comments: "<your_own_value>"
disclaimer: "disable"
dlp_sensor: "<your_own_value> (source dlp.sensor.name)"
dstaddr:
-
name: "default_name_10 (source firewall.address.name firewall.addrgrp.name firewall.proxy-address.name firewall.proxy-addrgrp.name firewall.vip
.name firewall.vipgrp.name firewall.vip46.name firewall.vipgrp46.name system.external-resource.name)"
dstaddr_negate: "enable"
dstaddr6:
-
name: "default_name_13 (source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name firewall.vip64.name firewall
.vipgrp64.name system.external-resource.name)"
dstintf:
-
name: "default_name_15 (source system.interface.name system.zone.name)"
global_label: "<your_own_value>"
groups:
-
name: "default_name_18 (source user.group.name)"
http_tunnel_auth: "enable"
icap_profile: "<your_own_value> (source icap.profile.name)"
internet_service: "enable"
internet_service_custom:
-
name: "default_name_23 (source firewall.internet-service-custom.name)"
internet_service_id:
-
id: "25 (source firewall.internet-service.id)"
internet_service_negate: "enable"
ips_sensor: "<your_own_value> (source ips.sensor.name)"
label: "<your_own_value>"
logtraffic: "all"
logtraffic_start: "enable"
policyid: "31"
poolname:
-
name: "default_name_33 (source firewall.ippool.name)"
profile_group: "<your_own_value> (source firewall.profile-group.name)"
profile_protocol_options: "<your_own_value> (source firewall.profile-protocol-options.name)"
profile_type: "single"
proxy: "explicit-web"
redirect_url: "<your_own_value>"
replacemsg_override_group: "<your_own_value> (source system.replacemsg-group.name)"
scan_botnet_connections: "disable"
schedule: "<your_own_value> (source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name)"
service:
-
name: "default_name_43 (source firewall.service.custom.name firewall.service.group.name)"
service_negate: "enable"
session_ttl: "45"
spamfilter_profile: "<your_own_value> (source spamfilter.profile.name)"
srcaddr:
-
name: "default_name_48 (source firewall.address.name firewall.addrgrp.name firewall.proxy-address.name firewall.proxy-addrgrp.name system
.external-resource.name)"
srcaddr_negate: "enable"
srcaddr6:
-
name: "default_name_51 (source firewall.address6.name firewall.addrgrp6.name system.external-resource.name)"
srcintf:
-
name: "default_name_53 (source system.interface.name system.zone.name)"
ssh_filter_profile: "<your_own_value> (source ssh-filter.profile.name)"
ssl_ssh_profile: "<your_own_value> (source firewall.ssl-ssh-profile.name)"
status: "enable"
transparent: "enable"
users:
-
name: "default_name_59 (source user.local.name)"
utm_status: "enable"
uuid: "<your_own_value>"
waf_profile: "<your_own_value> (source waf.profile.name)"
webcache: "enable"
webcache_https: "disable"
webfilter_profile: "<your_own_value> (source webfilter.profile.name)"
webproxy_forward_server: "<your_own_value> (source web-proxy.forward-server.name web-proxy.forward-server-group.name)"
webproxy_profile: "<your_own_value> (source web-proxy.profile.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_firewall_proxy_policy_data(json):
option_list = ['action', 'application_list', 'av_profile',
'comments', 'disclaimer', 'dlp_sensor',
'dstaddr', 'dstaddr_negate', 'dstaddr6',
'dstintf', 'global_label', 'groups',
'http_tunnel_auth', 'icap_profile', 'internet_service',
'internet_service_custom', 'internet_service_id', 'internet_service_negate',
'ips_sensor', 'label', 'logtraffic',
'logtraffic_start', 'policyid', 'poolname',
'profile_group', 'profile_protocol_options', 'profile_type',
'proxy', 'redirect_url', 'replacemsg_override_group',
'scan_botnet_connections', 'schedule', 'service',
'service_negate', 'session_ttl', 'spamfilter_profile',
'srcaddr', 'srcaddr_negate', 'srcaddr6',
'srcintf', 'ssh_filter_profile', 'ssl_ssh_profile',
'status', 'transparent', 'users',
'utm_status', 'uuid', 'waf_profile',
'webcache', 'webcache_https', 'webfilter_profile',
'webproxy_forward_server', 'webproxy_profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_proxy_policy(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_proxy_policy'] and data['firewall_proxy_policy']['state']:
state = data['firewall_proxy_policy']['state']
else:
state = True
firewall_proxy_policy_data = data['firewall_proxy_policy']
filtered_data = underscore_to_hyphen(filter_firewall_proxy_policy_data(firewall_proxy_policy_data))
if state == "present":
return fos.set('firewall',
'proxy-policy',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'proxy-policy',
mkey=filtered_data['policyid'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_proxy_policy']:
resp = firewall_proxy_policy(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('firewall_proxy_policy'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = 'policyid'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_proxy_policy": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"action": {"required": False, "type": "str",
"choices": ["accept",
"deny",
"redirect"]},
"application_list": {"required": False, "type": "str"},
"av_profile": {"required": False, "type": "str"},
"comments": {"required": False, "type": "str"},
"disclaimer": {"required": False, "type": "str",
"choices": ["disable",
"domain",
"policy",
"user"]},
"dlp_sensor": {"required": False, "type": "str"},
"dstaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"dstaddr_negate": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"dstaddr6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"dstintf": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"global_label": {"required": False, "type": "str"},
"groups": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"http_tunnel_auth": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"icap_profile": {"required": False, "type": "str"},
"internet_service": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"internet_service_custom": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service_id": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"internet_service_negate": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"ips_sensor": {"required": False, "type": "str"},
"label": {"required": False, "type": "str"},
"logtraffic": {"required": False, "type": "str",
"choices": ["all",
"utm",
"disable"]},
"logtraffic_start": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"policyid": {"required": True, "type": "int"},
"poolname": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"profile_group": {"required": False, "type": "str"},
"profile_protocol_options": {"required": False, "type": "str"},
"profile_type": {"required": False, "type": "str",
"choices": ["single",
"group"]},
"proxy": {"required": False, "type": "str",
"choices": ["explicit-web",
"transparent-web",
"ftp",
"ssh",
"ssh-tunnel",
"wanopt"]},
"redirect_url": {"required": False, "type": "str"},
"replacemsg_override_group": {"required": False, "type": "str"},
"scan_botnet_connections": {"required": False, "type": "str",
"choices": ["disable",
"block",
"monitor"]},
"schedule": {"required": False, "type": "str"},
"service": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"service_negate": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"session_ttl": {"required": False, "type": "int"},
"spamfilter_profile": {"required": False, "type": "str"},
"srcaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"srcaddr_negate": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"srcaddr6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"srcintf": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"ssh_filter_profile": {"required": False, "type": "str"},
"ssl_ssh_profile": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"transparent": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"users": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"utm_status": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"uuid": {"required": False, "type": "str"},
"waf_profile": {"required": False, "type": "str"},
"webcache": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"webcache_https": {"required": False, "type": "str",
"choices": ["disable",
"enable"]},
"webfilter_profile": {"required": False, "type": "str"},
"webproxy_forward_server": {"required": False, "type": "str"},
"webproxy_profile": {"required": False, "type": "str"}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_firewall(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
import time
from flask.globals import request
from app.home import blueprint
from flask import render_template, redirect, url_for
from flask_login import login_required, current_user
from app import login_manager
from jinja2 import TemplateNotFound
from flask import jsonify
import matplotlib.pyplot as plt
import nltk
import pandas as pd
import praw
import squarify
from flask import Flask, render_template
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import os
from app.settings import APP_STATIC
from data import *
from app.base.models import User, Picks
from app import db
nltk.download('stopwords')
set(stopwords.words('english'))
@blueprint.route('/index')
#@login_required
def index1():
return render_template('core/reddit-index.html')
@blueprint.route('/index1')
#@login_required
def index():
# db.drop_all()
# db.create_all()
#found=Picks.query.all()
arr=[]
for i in Picks.query.all():
print(i.__dict__)
temp = i
#temp.time = int(time.mktime(temp.time.timetuple())) * 1000
del temp._sa_instance_state
arr.append(temp.__dict__)
return render_template('index.html', time=12345, df=arr)
@blueprint.route('/reddit-index')
def my_form():
return render_template('core/reddit-index.html')
@blueprint.route('/reddit-index', methods=['POST'])
def my_form_input():
input = {
'subs': request.form['subs'] if request.form['subs'] else ['wallstreetbets'],
'post_flairs': request.form['post_flairs'] if request.form['post_flairs'] else {'Daily Discussion', 'Weekend Discussion', 'Discussion'},
'goodAuth': request.form['goodAuth'] if request.form['goodAuth'] else{'AutoModerator'},
'uniqueCmt': request.form['uniqueCmt'] if request.form['uniqueCmt'] else True,
'ignoreAuthP': request.form['ignoreAuthP'] if request.form['ignoreAuthP'] else {'example'},
'ignoreAuthC': request.form['ignoreAuthC'] if request.form['ignoreAuthC'] else {'example,'},
'upvoteRatio': request.form['upvoteRatio'] if request.form['upvoteRatio'] else 0.70,
'ups': request.form['ups'] if request.form['ups'] else 20,
'limit': request.form['limit'] if request.form['limit'] else 500,
'upvotes': request.form['upvotes'] if request.form['upvotes'] else 2,
'picks': request.form['picks'] if request.form['picks'] else 10,
'picks_ayz': request.form['picks_ayz'] if request.form['picks_ayz'] else 5,
}
print("input is", input)
return render_template('core/reddit-index.html')
@ blueprint.route('/data', methods=['POST', 'GET'])
def my_form_post():
import time
start_time = time.time()
ctime = time.ctime()
print('time is', time.ctime())
reddit = praw.Reddit(user_agent="Comment Extraction",
client_id="ZM9jcd0nyXvtlA",
client_secret="<KEY>",
username="",
password="")
'''############################################################################'''
# set the program parameters
subs = ['wallstreetbets'] # sub-reddit to search
# posts flairs to search || None flair is automatically considered
post_flairs = {'Daily Discussion', 'Weekend Discussion', 'Discussion'}
# authors whom comments are allowed more than once
goodAuth = {'AutoModerator'}
uniqueCmt = True # allow one comment per author per symbol
ignoreAuthP = {'example'} # authors to ignore for posts
ignoreAuthC = {'example'} # authors to ignore for comment
upvoteRatio = 0.70 # upvote ratio for post to be considered, 0.70 = 70%
ups = 20 # define # of upvotes, post is considered if upvotes exceed this #
limit = 5 # define the limit, comments 'replace more' limit
upvotes = 2 # define # of upvotes, comment is considered if upvotes exceed this #
picks = 10 # define # of picks here, prints as "Top ## picks are:"
picks_ayz = 5 # define # of picks for sentiment analysis
'''############################################################################'''
posts, count, c_analyzed, tickers, titles, a_comments = 0, 0, 0, {}, [], {}
cmt_auth = {}
num = 0
comm = 0
for sub in subs:
subreddit = reddit.subreddit(sub)
hot_python = subreddit.hot() # sorting posts by hot
# Extracting comments, symbols from subreddit
print("running", str(hot_python))
for submission in hot_python:
flair = submission.link_flair_text
author = submission.author.name
# custom write func
file = open(os.path.join(APP_STATIC, "output/sample.py"),
"w", encoding='utf-8')
hotlist = [i for i in hot_python]
file.write("start time was %s num is %d and hotlist is %s " %
(str(time.ctime()), num, str(hotlist)))
print('num is', num)
file.close()
num += 1
# checking: post upvote ratio # of upvotes, post flair, and author
if submission.upvote_ratio >= upvoteRatio and submission.ups > ups and (flair in post_flairs or flair is None) and author not in ignoreAuthP:
submission.comment_sort = 'new'
comments = submission.comments
titles.append(submission.title)
posts += 1
try:
submission.comments.replace_more(limit=limit)
for comment in comments:
file = open(os.path.join(
APP_STATIC, "output/sample.py"), "a", encoding='utf-8')
file.write("comnum is %d and comm is %s " %
(comm, str(comment)))
file.close()
comm += 1
#print("comnum is", comm)
# try except for deleted account?
try:
auth = comment.author.name
except:
pass
c_analyzed += 1
# checking: comment upvotes and author
if comment.score > upvotes and auth not in ignoreAuthC:
split = comment.body.split(" ")
for word in split:
word = word.replace("$", "")
# upper = ticker, length of ticker <= 5, excluded words,
if word.isupper() and len(word) <= 5 and word not in blacklist and word in us:
# unique comments, try/except for key errors
if uniqueCmt and auth not in goodAuth:
try:
if auth in cmt_auth[word]:
break
except:
pass
# counting tickers
if word in tickers:
tickers[word] += 1
a_comments[word].append(comment.body)
cmt_auth[word].append(auth)
count += 1
else:
tickers[word] = 1
cmt_auth[word] = [auth]
a_comments[word] = [comment.body]
count += 1
except Exception as e:
print(e)
# sorts the dictionary
symbols = dict(
sorted(tickers.items(), key=lambda item: item[1], reverse=True))
top_picks = list(symbols.keys())[0:picks]
time = (time.time() - start_time)
# print top picks
print("It took {t:.2f} seconds to analyze {c} comments in {p} posts in {s} subreddits.\n".format(
t=time, c=c_analyzed, p=posts, s=len(subs)))
print("Posts analyzed saved in titles")
# for i in titles: print(i) # prints the title of the posts analyzed
print(f"\n{picks} most mentioned picks: ")
times = []
top = []
for i in top_picks:
print(f"{i}: {symbols[i]}")
times.append(symbols[i])
top.append(f"{i}: {symbols[i]}")
# Applying Sentiment Analysis
scores, s = {}, {}
vader = SentimentIntensityAnalyzer()
# adding custom words from data.py
vader.lexicon.update(new_words)
picks_sentiment = list(symbols.keys())[0:picks_ayz]
for symbol in picks_sentiment:
stock_comments = a_comments[symbol]
for cmnt in stock_comments:
score = vader.polarity_scores(cmnt)
if symbol in s:
s[symbol][cmnt] = score
else:
s[symbol] = {cmnt: score}
if symbol in scores:
for key, _ in score.items():
scores[symbol][key] += score[key]
else:
scores[symbol] = score
# calculating avg.
for key in score:
scores[symbol][key] = scores[symbol][key] / symbols[symbol]
scores[symbol][key] = "{pol:.3f}".format(pol=scores[symbol][key])
picksdb = Picks(pick=scores)
timesdb = Picks(pick=[times, top, top_picks])
# print(picks)
db.session.add(picksdb)
db.session.add(timesdb)
db.session.commit()
# printing sentiment analysis
print(f"\nSentiment analysis of top {picks_ayz} picks:")
df = pd.DataFrame(scores)
df.index = ['Bearish', 'Neutral', 'Bullish', 'Total/Compound']
df = df.T
print(df)
# Date Visualization
# most mentioned picks
squarify.plot(sizes=times, label=top, alpha=.7)
plt.axis('off')
plt.title(f"{picks} most mentioned picks")
# plt.show()
# Sentiment analysis
df = df.astype(float)
colors = ['red', 'springgreen', 'forestgreen', 'coral']
df.plot(kind='bar', color=colors,
title=f"Sentiment analysis of top {picks_ayz} picks:")
# plt.show()
print('done')
file = open(os.path.join(APP_STATIC, "output/final_output.py"),
"w", encoding='utf-8')
file.write("start time was %s /n/n top picks are %s and df is %s" %
(str(ctime), str(top_picks), str(df)))
print('num is', num)
file.close()
return render_template('core/reddit-data.html', result='done', final=df, t=ctime, c=c_analyzed, p=posts, s=len(subs))
@ blueprint.route('/visualize', methods=['POST', 'GET'])
def visualize():
return render_template('core/reddit-data.html', result='done', final='ok')
@ blueprint.route('/status_bar', methods=['POST', 'GET'])
def status_bar():
file = open(os.path.join(APP_STATIC, "output/sample.py"), "r")
stat = file.read()
file.close()
admin = User(username='admin', email='<EMAIL>', password='<PASSWORD>')
db.session.add(admin)
print(User.query.all())
return render_template('core/reddit-data.html', final=stat, result='read complete')
@ blueprint.route('/output', methods=['POST', 'GET'])
def output():
file = open(os.path.join(APP_STATIC, 'output/final_output.py'), "r")
stat = file.read()
print("stat is %s" % stat)
file.close()
return render_template('core/reddit-output.html', arg=stat)
@ blueprint.route('/test', methods=['POST', 'GET'])
def test():
picks = Picks(pick='hoho', bearish='whooter', bullish='what')
db.session.add(picks)
db.session.commit()
return jsonify({'result': 'ohk'})
@ blueprint.route('/test2', methods=['POST', 'GET'])
def test2():
hoho = 'hoho'
found=Picks.query.filter_by(pick='hoho').first()
print((Picks.query.filter_by(pick='hoho').first()))
return 'ohkk'
@ blueprint.route('/core/settings', methods=['GET'])
def settingsGet():
return render_template('core/settings.html',delete_db=delete_db, create_db=create_db)
@ blueprint.route('/core/settings', methods=['POST'])
def settings():
query = request.form['query']
found = Picks.query.filter_by(id=query).first()
print(found)
return render_template('core/settings.html', found=found, delete_db=delete_db, create_db=create_db)
def delete_db():
#db.drop_all()
return 'DB deleted'
def create_db():
db.create_all()
return 'All DB created'
@ blueprint.route('/core/<template>')
def route_core_template(template):
try:
if not template.endswith('.html'):
core='core/'
template += '.html'
template=core+template
return render_template(template)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
@ blueprint.route('/<template>')
def route_template(template):
try:
if not template.endswith('.html'):
template += '.html'
return render_template(template)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
|
<reponame>wimp-project/backend
"""empty message
Revision ID: b6c3c9b60c69
Revises:
Create Date: 2020-03-28 15:59:25.954564
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b6c3c9b60c69'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('commercial_activity',
sa.Column('commercial_activity_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('address', sa.String(), nullable=True),
sa.Column('position_lat', sa.Float(), nullable=True),
sa.Column('position_lon', sa.Float(), nullable=True),
sa.Column('queue_time', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('commercial_activity_id')
)
op.create_table('feedback',
sa.Column('feedback_id', sa.Integer(), nullable=False),
sa.Column('feedback_type', sa.Enum('low_availability', 'no_availability', 'queue_awaiting', name='feedbacktype'), nullable=True),
sa.Column('feedback_value', sa.String(), nullable=True),
sa.Column('comment', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('feedback_id')
)
op.create_table('product',
sa.Column('product_id', sa.Integer(), nullable=False),
sa.Column('product_description', sa.String(), nullable=False),
sa.Column('product_image_url', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('product_id')
)
op.create_table('user',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('surname', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('password', sa.String(), nullable=True),
sa.Column('salt', sa.String(), nullable=True),
sa.Column('position_lat', sa.Float(), nullable=True),
sa.Column('position_lon', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('user_id')
)
op.create_table('offer',
sa.Column('offer_id', sa.Integer(), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=True),
sa.Column('commercial_activity_id', sa.Integer(), nullable=True),
sa.Column('availability', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['commercial_activity_id'], ['commercial_activity.commercial_activity_id'], ),
sa.ForeignKeyConstraint(['product_id'], ['product.product_id'], ),
sa.PrimaryKeyConstraint('offer_id')
)
op.create_table('visit',
sa.Column('visit_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('comercial_activity_id', sa.Integer(), nullable=True),
sa.Column('visit_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['comercial_activity_id'], ['commercial_activity.commercial_activity_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.user_id'], ),
sa.PrimaryKeyConstraint('visit_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('visit')
op.drop_table('offer')
op.drop_table('user')
op.drop_table('product')
op.drop_table('feedback')
op.drop_table('commercial_activity')
# ### end Alembic commands ###
|
<gh_stars>100-1000
import numpy as np
import pytest
import torch
from PIL import Image
from torchvision.transforms import transforms
from continuum.datasets import InMemoryDataset
from continuum.scenarios import TransformationIncremental
NB_CLASSES = 6
@pytest.fixture
def numpy_data():
nb_data = 100 # not too small to have all classes
x_train = []
y_train = []
x_train.append(
np.array([np.random.randint(100, size=(2, 2, 3)).astype(dtype=np.uint8)] * nb_data)
)
y_train.append(np.random.randint(NB_CLASSES, size=(nb_data)))
x_train = np.concatenate(x_train)
y_train = np.concatenate(y_train)
return x_train, y_train.astype(int)
'''
Test the initialization with three tasks
'''
def test_init(numpy_data):
x, y = numpy_data
dummy = InMemoryDataset(x, y, train='train')
Trsf_0 = []
Trsf_1 = [transforms.RandomAffine(degrees=[45, 45])]
Trsf_2 = [transforms.RandomAffine(degrees=[90, 90])]
list_transf = [Trsf_0, Trsf_1, Trsf_2]
scenario = TransformationIncremental(
cl_dataset=dummy, incremental_transformations=list_transf
)
ref_data = None
raw_ref_data = None
for task_id, taskset in enumerate(scenario):
samples, _, _ = taskset.get_random_samples(10)
# we need raw data to apply same transformation as the TransformationIncremental class
raw_samples, _, _ = taskset.get_raw_samples(range(10))
if task_id == 0:
ref_data = samples
raw_ref_data = raw_samples
else:
# we verify that data has changed
assert not torch.all(ref_data.eq(samples))
assert (raw_samples == raw_ref_data
).all() # raw data should be the same in this scenario
# we test transformation on one data point and verify if it is applied
trsf = list_transf[task_id][0]
raw_sample = Image.fromarray(raw_ref_data[0].astype("uint8"))
trsf_data = trsf(raw_sample)
trsf_data = transforms.ToTensor()(trsf_data)
assert torch.all(trsf_data.eq(samples[0]))
'''
Test the initialization with three tasks with degree range
'''
def test_init_range(numpy_data):
x, y = numpy_data
dummy = InMemoryDataset(x, y)
Trsf_0 = []
Trsf_1 = [transforms.RandomAffine(degrees=[40, 50])]
Trsf_2 = [transforms.RandomAffine(degrees=[85, 95])]
list_transf = [Trsf_0, Trsf_1, Trsf_2]
scenario = TransformationIncremental(
cl_dataset=dummy, incremental_transformations=list_transf
)
@pytest.mark.parametrize("shared_label_space", [False, True])
def test_init_shared_label_space(numpy_data, shared_label_space):
x, y = numpy_data
dummy = InMemoryDataset(x, y)
Trsf_0 = []
Trsf_1 = [transforms.RandomAffine(degrees=[40, 50])]
Trsf_2 = [transforms.RandomAffine(degrees=[85, 95])]
dummy_transf = [Trsf_0, Trsf_1, Trsf_2]
scenario = TransformationIncremental(
cl_dataset=dummy,
incremental_transformations=dummy_transf,
shared_label_space=shared_label_space
)
for task_id, taskset in enumerate(scenario):
assert taskset.nb_classes == NB_CLASSES
classes = taskset.get_classes()
if shared_label_space:
assert classes.max() == NB_CLASSES - 1
assert classes.min() == 0
else:
assert classes.max() == (NB_CLASSES * (task_id + 1)) - 1
assert classes.min() == (NB_CLASSES * task_id)
def test_get_task_transformation(numpy_data):
x, y = numpy_data
dummy = InMemoryDataset(x, y)
Trsf_0 = []
Trsf_1 = [transforms.RandomAffine(degrees=[40, 50])]
Trsf_2 = [transforms.RandomAffine(degrees=[85, 95])]
dummy_transf = [Trsf_0, Trsf_1, Trsf_2]
base_transformations = [
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]
scenario = TransformationIncremental(
cl_dataset=dummy,
incremental_transformations=dummy_transf,
base_transformations=base_transformations
)
for task_id, taskset in enumerate(scenario):
# first task specific transformation then base_transformation
tot_transf_task = transforms.Compose(dummy_transf[task_id] + base_transformations)
# we compare the str representation of the composition
assert tot_transf_task.__repr__() == scenario.get_task_transformation(task_id).__repr__()
def test_init_fail2(numpy_data):
train = numpy_data
dummy = InMemoryDataset(*train)
# No transformation is set
with pytest.raises(TypeError):
scenario = TransformationIncremental(cl_dataset=dummy)
def test_indexing():
x = np.zeros((20, 2, 2, 3), dtype=np.uint8)
x[:, 0, 0] = 1 # add a 1 on the top-left
y = np.ones((20,), dtype=np.int32)
dataset = InMemoryDataset(x, y)
trsfs = [
[_discrete_rotation(0)],
[_discrete_rotation(1)],
[_discrete_rotation(2)],
[_discrete_rotation(3)],
]
scenario = TransformationIncremental(
cl_dataset=dataset,
incremental_transformations=trsfs
)
for task_id in range(len(scenario)):
task_set = scenario[task_id]
x, _, t = task_set[0]
_check_rotation(x, task_id)
@pytest.mark.parametrize("indexes_slice", [
slice(0, 1), slice(0, 3),
slice(0, 4, 2)
])
def test_advanced_indexing(indexes_slice):
"""
This code creates dummy images of 2x2 all likewise:
[
1 0
0 0
]
Then we apply discrete rotations to produce the four possible variations
(1 on the top-right, bottom-right, bottom-left in addition of the original
top-left). We then sample multiple tasks together and check that the associated
task label of the sample matches the rotations it was applied to.
"""
x = np.zeros((20, 2, 2, 3), dtype=np.uint8)
x[:, 0, 0] = 1 # add a 1 on the top-left
y = np.ones((20,), dtype=np.int32)
dataset = InMemoryDataset(x, y)
trsfs = [
[_discrete_rotation(0)],
[_discrete_rotation(1)],
[_discrete_rotation(2)],
[_discrete_rotation(3)],
]
scenario = TransformationIncremental(
cl_dataset=dataset,
incremental_transformations=trsfs
)
start = indexes_slice.start if indexes_slice.start is not None else 0
stop = indexes_slice.stop if indexes_slice.stop is not None else len(scenario) + 1
step = indexes_slice.step if indexes_slice.step is not None else 1
task_index = set(list(range(start, stop, step)))
task_set = scenario[indexes_slice]
seen_tasks = set()
for i in range(len(task_set)):
x, _, t = task_set[i]
_check_rotation(x, t)
seen_tasks.add(t)
assert seen_tasks == task_index
def _discrete_rotation(rot):
def _fun(x):
if rot == 0:
one = (0, 0)
elif rot == 1:
one = (0, 1)
elif rot == 2:
one = (1, 1)
elif rot == 3:
one = (1, 0)
x = np.array(x)
x.fill(0)
x[one[0], one[1], :] = 1
return Image.fromarray(x.astype(np.uint8))
return _fun
def _check_rotation(x, rot):
if rot == 0:
one = (0, 0)
elif rot == 1:
one = (0, 1)
elif rot == 2:
one = (1, 1)
elif rot == 3:
one = (1, 0)
else:
assert False, rot
for i in range(2):
for j in range(2):
if (i, j) == one:
v = 1
else:
v = 0
assert int(255 * x[0, i, j]) == v, (x[0, i, j], rot, (i, j), v)
|
import os
import pdb
import random
import time
import torch
from collections import OrderedDict
from options.train_options import TrainOptions
from semia.dataset import ImgDataset, TestImgDataset
from semia.model import SemIAModel
from util.util import read_image, pil2tensor, pil2np, np2tensor
from util.visualizer import Visualizer
from util.visualizer import feat_vis, vis_patch_match
if __name__ == "__main__":
# Load configuration
opt = TrainOptions().parse()
# Load dataset from single image
dataset = ImgDataset()
dataset.initialize(opt)
# record input_image size
opt.width, opt.height = dataset.width, dataset.height
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1, drop_last=True)
# Load test dataset
test_dataset = TestImgDataset()
test_dataset.initialize(opt)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=1,
drop_last=False)
# Create complete model
model = SemIAModel(opt)
visualizer = Visualizer(opt)
total_steps = 0
# Main training loop
for i, data in enumerate(dataloader):
total_steps += 1
start_time = time.time()
if i % opt.zero_rec_freq == 0:
# Zero reconstruction: using augmented image as input and condition
mode_g = 'generator_rec'
model.set_input(data, mode_g)
else:
# Sample mode: using input_image as input, tgt_image as condition
mode_g = 'generator'
model.set_input(data, mode_g)
# train discriminator once before optimizing generator
for j in range(opt.Dsteps):
model.run_discriminator_one_step()
# print([[d.mean() for d in p] for p in model.d_preds])
# Record fake image before optimizing generator(same as sampling)
if total_steps % opt.display_freq == 0: # or total_steps % (opt.zero_rec_freq * 10) == 0:
visuals = OrderedDict([('0_Sample/tgt_img', model.tgt_img),
('0_Sample/src_img', model.src_img),
('0_Sample/src_seg', model.src_seg),
('0_Sample/tgt_seg', model.tgt_seg),
('0_Sample/fake_sample', model.get_latest_generated())])
# Training
# train auxclassifier
if opt.use_aux:
if mode_g == 'generator_rec':
for j in range(opt.Asteps):
model.run_aux_one_step()
# train generator
for j in range(opt.Gsteps):
model.run_generator_one_step(mode_g=mode_g)
# train discriminator once after optimizing generator
for j in range(opt.Dsteps):
model.run_discriminator_one_step()
iter_time = time.time() - start_time
# display sample results after optimization and features
if total_steps % opt.display_freq == 0: # or i % (opt.zero_rec_freq * 10) == 0:
visuals.update({'0_Sample/fake_image': model.get_latest_generated()})
if opt.E_use_FiLM or opt.D_use_FiLM:
for j, feats in enumerate(model.rel_feats):
visuals.update({'Feats/rel_feats_ratio_{}'.format(str(j)): feat_vis(feats[0])})
visuals.update({'Feats/rel_feats_diff_{}'.format(str(j)): feat_vis(feats[1])})
visuals.update({'Feats/alphas_{}'.format(str(j)): feat_vis(feats[2])})
visuals.update({'Feats/betas_{}'.format(str(j)): feat_vis(feats[3])})
else:
# rel_feats is attn_maps
pass
visuals.update({'D_preds/d_preds_0_real_patch': model.real_patch})
visuals.update({'D_preds/d_preds_1_fake_patch': model.fake_patch})
for k, preds in enumerate(model.d_preds):
for l, p in enumerate(preds):
visuals.update({'D_preds/d_preds_{}_{}'.format(str(k), str(l)): feat_vis(p)})
visualizer.display_current_results(visuals, total_steps)
# display reconstruction results
if i % (opt.display_freq * opt.zero_rec_freq) == 0: # or total_steps % (opt.zero_rec_freq * 10) == 0:
visuals = OrderedDict([('Rec/src_img', model.src_img),
('Rec/fake_sample', model.get_latest_generated())])
visualizer.display_current_results(visuals, total_steps)
# save patch match pairs to exp_path
if opt.debug and total_steps % opt.vis_patch_freq == 0:
if model.patch_vis is not None:
vis_patch_match(data['src_img'], model.get_latest_generated(), model.patch_vis,
opt.exp_path, total_steps)
# inference(evaluation) during training
if total_steps % opt.inference_freq == 0:
visuals = OrderedDict([('Eval/0_src_img', data['src_img'])])
for i, test_data in enumerate(test_dataloader):
name = test_dataset.test_names[i]
model.set_input(test_data, 'inference')
eval_image = model.evaluate()
# print(eval_image)
visuals.update({'Eval/0_tgt_seg/{}'.format(name): test_data['tgt_seg'],
'Eval/0_tgt_img/{}'.format(name): eval_image})
visualizer.display_current_results(visuals, total_steps)
if total_steps > opt.stable_iter:
save_dir = os.path.join(opt.output_dir, str(total_steps))
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
visualizer.save_images(save_dir, visuals, "img")
# loss curve and console log
if total_steps % opt.print_freq == 0 or i % (opt.print_freq * opt.zero_rec_freq) == 0:
losses = model.get_latest_losses()
visualizer.print_current_errors(total_steps,
losses, iter_time)
visualizer.plot_current_errors(losses, total_steps)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (total_steps %d)' %
(total_steps))
model.save('latest')
if total_steps % opt.save_model_freq == 0:
print('saving the model (total_steps %d)' %
(total_steps))
model.save(total_steps)
model.update_learning_rate(i)
print('Training was successfully finished.')
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import rospy
import tf
import message_filters
import cv2
import numpy as np
import torch
import torch.nn as nn
import threading
import sys
from Queue import Queue
from fcn.config import cfg
from fcn.train_test import test_image
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import String
from sensor_msgs.msg import Image, CameraInfo
from transforms3d.quaternions import mat2quat, quat2mat, qmult
from scipy.optimize import minimize
from utils.blob import pad_im, chromatic_transform, add_noise
from geometry_msgs.msg import PoseStamped, PoseArray
from ycb_renderer import YCBRenderer
from utils.se3 import *
from utils.nms import nms
lock = threading.Lock()
class ImageListener:
def __init__(self, network, dataset):
self.net = network
self.dataset = dataset
self.cv_bridge = CvBridge()
self.count = 0
self.objects = []
self.frame_names = []
self.frame_lost = []
self.renders = dict()
self.num_lost = 50
self.queue_size = 10
# input
self.im = None
self.depth = None
self.rgb_frame_id = None
topic_prefix = '/deepim'
suffix = '_%02d' % (cfg.instance_id)
prefix = '%02d_' % (cfg.instance_id)
self.suffix = suffix
self.prefix = prefix
self.topic_prefix = topic_prefix
# initialize a node
rospy.init_node('deepim_image_listener' + suffix)
self.br = tf.TransformBroadcaster()
self.listener = tf.TransformListener()
rospy.sleep(3.0)
self.pose_pub = rospy.Publisher('deepim_pose_image' + suffix, Image, queue_size=1)
# create pose publisher for each known object class
self.pubs = []
for i in range(self.dataset.num_classes):
if self.dataset.classes[i][3] == '_':
cls = prefix + self.dataset.classes[i][4:]
else:
cls = prefix + self.dataset.classes[i]
self.pubs.append(rospy.Publisher(topic_prefix + '/raw/objects/prior_pose/' + cls, PoseStamped, queue_size=1))
if cfg.TEST.ROS_CAMERA == 'D435':
# use RealSense D435
rgb_sub = message_filters.Subscriber('/camera/color/image_raw', Image, queue_size=10)
depth_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image, queue_size=10)
msg = rospy.wait_for_message('/camera/color/camera_info', CameraInfo)
self.target_frame = 'measured/camera_color_optical_frame'
elif cfg.TEST.ROS_CAMERA == 'Azure':
rgb_sub = message_filters.Subscriber('/rgb/image_raw', Image, queue_size=10)
depth_sub = message_filters.Subscriber('/depth_to_rgb/image_raw', Image, queue_size=10)
msg = rospy.wait_for_message('/rgb/camera_info', CameraInfo)
self.target_frame = 'rgb_camera_link'
else:
# use kinect
rgb_sub = message_filters.Subscriber('/%s/rgb/image_color' % (cfg.TEST.ROS_CAMERA), Image, queue_size=2)
depth_sub = message_filters.Subscriber('/%s/depth_registered/image' % (cfg.TEST.ROS_CAMERA), Image, queue_size=2)
msg = rospy.wait_for_message('/%s/rgb/camera_info' % (cfg.TEST.ROS_CAMERA), CameraInfo)
self.target_frame = '%s_depth_optical_frame' % (cfg.TEST.ROS_CAMERA)
# update camera intrinsics
K = np.array(msg.K).reshape(3, 3)
self.dataset._intrinsic_matrix = K
print(self.dataset._intrinsic_matrix)
# initialize tensors for testing
num = dataset.num_classes
height = cfg.TRAIN.SYN_HEIGHT
width = cfg.TRAIN.SYN_WIDTH
input_blob_color = torch.cuda.FloatTensor(num, 6, height, width).detach()
image_real_blob_color = torch.cuda.FloatTensor(num, 3, height, width).detach()
image_tgt_blob_color = torch.cuda.FloatTensor(num, 3, height, width).detach()
image_src_blob_color = torch.cuda.FloatTensor(num, 3, height, width).detach()
input_blob_depth = torch.cuda.FloatTensor(num, 6, height, width).detach()
image_real_blob_depth = torch.cuda.FloatTensor(num, 3, height, width).detach()
image_tgt_blob_depth = torch.cuda.FloatTensor(num, 3, height, width).detach()
image_src_blob_depth = torch.cuda.FloatTensor(num, 3, height, width).detach()
affine_matrices = torch.cuda.FloatTensor(num, 2, 3).detach()
zoom_factor = torch.cuda.FloatTensor(num, 4).detach()
flow_blob = torch.cuda.FloatTensor(num, 2, height, width).detach()
pcloud_tgt_cuda = torch.cuda.FloatTensor(height, width, 3).detach()
pcloud_src_cuda = torch.cuda.FloatTensor(height, width, 3).detach()
flow_map_cuda = torch.cuda.FloatTensor(height, width, 2).detach()
self.test_data = {'input_blob_color': input_blob_color,
'image_real_blob_color': image_real_blob_color,
'image_tgt_blob_color': image_tgt_blob_color,
'image_src_blob_color': image_src_blob_color,
'input_blob_depth': input_blob_depth,
'image_real_blob_depth': image_real_blob_depth,
'image_tgt_blob_depth': image_tgt_blob_depth,
'image_src_blob_depth': image_src_blob_depth,
'affine_matrices': affine_matrices,
'zoom_factor': zoom_factor,
'flow_blob': flow_blob,
'pcloud_tgt_cuda': pcloud_tgt_cuda,
'pcloud_src_cuda': pcloud_src_cuda,
'flow_map_cuda': flow_map_cuda}
queue_size = 1
slop_seconds = 0.1
ts = message_filters.ApproximateTimeSynchronizer([rgb_sub, depth_sub], queue_size, slop_seconds)
ts.registerCallback(self.callback_rgbd)
# callback to save images
def callback_rgbd(self, rgb, depth):
if depth.encoding == '32FC1':
depth_cv = self.cv_bridge.imgmsg_to_cv2(depth)
elif depth.encoding == '16UC1':
depth_cv = self.cv_bridge.imgmsg_to_cv2(depth).copy().astype(np.float32)
depth_cv /= 1000.0
else:
rospy.logerr_throttle(
1, 'Unsupported depth type. Expected 16UC1 or 32FC1, got {}'.format(
depth.encoding))
return
im = self.cv_bridge.imgmsg_to_cv2(rgb, 'bgr8')
with lock:
self.im = im.copy()
self.depth = depth_cv.copy()
self.rgb_frame_id = rgb.header.frame_id
def average_poses(self):
num = len(self.objects)
poses = np.zeros((num, 9), dtype=np.float32)
flags = np.zeros((num, ), dtype=np.int32)
for i in range(num):
plist = list(self.objects[i]['poses'].queue)
n = len(plist)
quaternions = np.zeros((n, 4), dtype=np.float32)
translations = np.zeros((n, 3), dtype=np.float32)
for j in range(n):
quaternions[j, :] = plist[j][2:6]
translations[j, :] = plist[j][6:]
poses[i, 0] = plist[0][0]
poses[i, 1] = plist[0][1]
poses[i, 2:6] = averageQuaternions(quaternions)
poses[i, 6:] = np.mean(translations, axis=0)
if self.objects[i]['detected']:
flags[i] = 1
return poses, flags
# find posecnn pose estimation results
def query_posecnn_detection(self):
# detection information of the target object
frame_names = []
frame_lost = []
rois_est = np.zeros((0, 7), dtype=np.float32)
poses_est = np.zeros((0, 9), dtype=np.float32)
# look for multiple object instances
max_objects = 5
for i in range(self.dataset.num_classes):
# check posecnn frame
if self.dataset.classes[i][3] == '_':
source_frame_base = 'posecnn/' + self.prefix + self.dataset.classes[i][4:]
else:
source_frame_base = 'posecnn/' + self.prefix + self.dataset.classes[i]
for object_id in range(max_objects):
# check posecnn frame
suffix_frame = '_%02d' % (object_id)
source_frame = source_frame_base + suffix_frame
try:
# detection
trans, rot = self.listener.lookupTransform(self.target_frame, source_frame + '_roi', rospy.Time(0))
n = trans[0]
secs = trans[1]
now = rospy.Time.now()
if abs(now.secs - secs) > 1.0:
print 'posecnn pose for %s time out %f %f' % (source_frame, now.secs, secs)
continue
roi = np.zeros((1, 7), dtype=np.float32)
roi[0, 0] = 0
roi[0, 1] = i
roi[0, 2] = rot[0] * n
roi[0, 3] = rot[1] * n
roi[0, 4] = rot[2] * n
roi[0, 5] = rot[3] * n
roi[0, 6] = trans[2]
rois_est = np.concatenate((rois_est, roi), axis=0)
# pose
trans, rot = self.listener.lookupTransform(self.target_frame, source_frame, rospy.Time(0))
pose = np.zeros((1, 9), dtype=np.float32)
pose[0, 0] = 0
pose[0, 1] = i
pose[0, 2] = rot[3]
pose[0, 3] = rot[0]
pose[0, 4] = rot[1]
pose[0, 5] = rot[2]
pose[0, 6:] = trans
poses_est = np.concatenate((poses_est, pose), axis=0)
frame_names.append(source_frame)
frame_lost.append(0)
print('find posecnn detection ' + source_frame)
except:
continue
if rois_est.shape[0] > 0:
# non-maximum suppression within class
index = nms(rois_est, 0.2)
rois_est = rois_est[index, :]
poses_est = poses_est[index, :]
frame_names = [frame_names[i] for i in index]
frame_lost = [frame_lost[i] for i in index]
return frame_names, frame_lost, rois_est, poses_est
# run deepim
def run_network(self):
with lock:
if self.im is None:
return
im = self.im.copy()
depth_cv = self.depth.copy()
rgb_frame_id = self.rgb_frame_id
thread_name = threading.current_thread().name
if not thread_name in self.renders:
print(thread_name)
self.renders[thread_name] = YCBRenderer(width=cfg.TRAIN.SYN_WIDTH, height=cfg.TRAIN.SYN_HEIGHT, gpu_id=cfg.gpu_id, render_marker=False)
self.renders[thread_name].load_objects(self.dataset.model_mesh_paths_target,
self.dataset.model_texture_paths_target,
self.dataset.model_colors_target)
self.renders[thread_name].set_camera_default()
self.renders[thread_name].set_light_pos([0, 0, 0])
self.renders[thread_name].set_light_color([1, 1, 1])
print self.dataset.model_mesh_paths_target
cfg.renderer = self.renders[thread_name]
# check the posecnn pose
frame_names, frame_lost, rois_est, poses_est = self.query_posecnn_detection()
# cannot initialize
if len(self.objects) == 0 and poses_est.shape[0] == 0:
return
# initialization
if len(self.objects) == 0:
self.frame_names = frame_names
self.frame_lost = frame_lost
self.objects = []
for i in range(poses_est.shape[0]):
obj = {'frame_name': frame_names[i], 'poses': Queue(maxsize=self.queue_size), 'detected': True}
obj['poses'].put(poses_est[i, :])
self.objects.append(obj)
else:
# match detection and tracking (simple version)
# for each detected objects
flags_detection = np.zeros((len(frame_names), ), dtype=np.int32)
flags_tracking = np.zeros((len(self.frame_names), ), dtype=np.int32)
for i in range(len(frame_names)):
for j in range(len(self.frame_names)):
if frame_names[i] == self.frame_names[j]:
# data associated
flags_detection[i] = 1
flags_tracking[j] = 1
self.objects[j]['detected'] = True
break
# undetected
index = np.where(flags_tracking == 0)[0]
index_remove = []
for i in range(len(index)):
ind = index[i]
self.frame_lost[ind] += 1
self.objects[ind]['detected'] = False
if self.frame_lost[ind] >= self.num_lost:
index_remove.append(ind)
# remove item
num = len(self.frame_names)
if len(index_remove) > 0:
self.frame_names = [self.frame_names[i] for i in range(num) if i not in index_remove]
self.frame_lost = [self.frame_lost[i] for i in range(num) if i not in index_remove]
self.objects = [self.objects[i] for i in range(num) if i not in index_remove]
# add new object to track
ind = np.where(flags_detection == 0)[0]
if len(ind) > 0:
for i in range(len(ind)):
self.frame_names.append(frame_names[ind[i]])
self.frame_lost.append(0)
obj = {'frame_name': frame_names[i], 'poses': Queue(maxsize=self.queue_size), 'detected': True}
obj['poses'].put(poses_est[ind[i], :])
self.objects.append(obj)
if len(self.objects) == 0:
return
# run network
poses, flags = self.average_poses()
# only refine pose for detected objects
index = np.where(flags == 1)[0]
if len(index) == 0:
return
poses_input = poses[index, :]
im_pose_color, pose_result = test_image(self.net, self.dataset, im, depth_cv, poses_input, self.test_data)
pose_msg = self.cv_bridge.cv2_to_imgmsg(im_pose_color)
pose_msg.header.stamp = rospy.Time.now()
pose_msg.header.frame_id = rgb_frame_id
pose_msg.encoding = 'rgb8'
self.pose_pub.publish(pose_msg)
points = self.dataset._points_all
intrinsic_matrix = self.dataset._intrinsic_matrix
# add poses to queue
poses = pose_result['poses_est'][-1]
for i in range(poses.shape[0]):
ind = index[i]
if self.objects[ind]['poses'].full():
self.objects[ind]['poses'].get()
self.objects[ind]['poses'].put(poses[i, :])
poses, flags = self.average_poses()
# poses
for i in range(poses.shape[0]):
cls = int(poses[i, 1])
if cls >= 0:
quat = [poses[i, 3], poses[i, 4], poses[i, 5], poses[i, 2]]
name = self.frame_names[i].replace('posecnn', 'deepim')
print self.dataset.classes[cls], name
self.br.sendTransform(poses[i, 6:], quat, rospy.Time.now(), name, self.target_frame)
# create pose msg
msg = PoseStamped()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = self.target_frame
msg.pose.orientation.x = poses[i, 3]
msg.pose.orientation.y = poses[i, 4]
msg.pose.orientation.z = poses[i, 5]
msg.pose.orientation.w = poses[i, 2]
msg.pose.position.x = poses[i, 6]
msg.pose.position.y = poses[i, 7]
msg.pose.position.z = poses[i, 8]
pub = self.pubs[cls]
pub.publish(msg)
#'''
# reinitialization if necessary
if poses_est.shape[0] > 0:
# extract 3D points
x3d = np.ones((4, points.shape[1]), dtype=np.float32)
x3d[0, :] = points[cls,:,0]
x3d[1, :] = points[cls,:,1]
x3d[2, :] = points[cls,:,2]
# projection 1
RT = np.zeros((3, 4), dtype=np.float32)
RT[:3, :3] = quat2mat(poses[i, 2:6])
RT[:, 3] = poses[i, 6:]
x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d))
x = np.divide(x2d[0, :], x2d[2, :])
y = np.divide(x2d[1, :], x2d[2, :])
x1 = np.min(x)
y1 = np.min(y)
x2 = np.max(x)
y2 = np.max(y)
area = (x2 - x1 + 1) * (y2 - y1 + 1)
# posecnn roi
ind = np.where(rois_est[:, 1] == cls)[0]
if len(ind) > 0:
x1_p = rois_est[ind, 2]
y1_p = rois_est[ind, 3]
x2_p = rois_est[ind, 4]
y2_p = rois_est[ind, 5]
area_p = (x2_p - x1_p + 1) * (y2_p - y1_p + 1)
# compute overlap
xx1 = np.maximum(x1, x1_p)
yy1 = np.maximum(y1, y1_p)
xx2 = np.minimum(x2, x2_p)
yy2 = np.minimum(y2, y2_p)
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
overlap = inter / (area + area_p - inter)
max_overlap = np.max(overlap)
max_ind = np.argmax(overlap)
print('overlap with posecnn box %.2f' % (max_overlap))
if max_overlap < 0.4:
self.objects[i]['poses'].queue.clear()
self.objects[i]['poses'].put(poses_est[ind[max_ind], :].flatten())
print('===================================reinitialize=======================================')
|
<reponame>tahmidbintaslim/screenlamp
# <NAME> 2017
#
# screenlamp is a Python toolkit
# for hypothesis-driven virtual screening.
#
# Copyright (C) 2017 Michigan State University
# License: Apache v2
#
# Software author: <NAME> <http://sebastianraschka.com>
# Software author email: <EMAIL>
#
# Software source repository: https://github.com/rasbt/screenlamp
# Documentation: https://psa-lab.github.io/screenlamp
#
# screenlamp was developed in the
# Protein Structural Analysis & Design Laboratory
# (http://www.kuhnlab.bmb.msu.edu)
#
# If you are using screenlamp in your research, please cite
# the following journal article:
#
# Raschka, Sebastian, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>,
# and <NAME>. 2017
#
# Enabling the hypothesis-driven prioritization of
# ligand candidates in big databases:
# Screenlamp and its application to GPCR inhibitor
# discovery for invasive species control.
#
import subprocess
import os
import argparse
import yaml
###############################################################################
parser = argparse.ArgumentParser(
description='An example screenlamp pipeline ... [placeholder].',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-c', '--config_file',
type=str,
required=True,
default=0,
help='Path to the pipeline configuration file')
parser.add_argument('-s', '--start_at_step',
type=int,
required=False,
default=0,
help='Start the pipeline at a particular step')
parser.add_argument('-i', '--incremental',
type=str,
required=False,
default='false',
help='incremental mode. If enabled, stops before each step'
' to ask the user to continue')
args = parser.parse_args()
start_at = args.start_at_step
config_path = args.config_file
print(args.incremental)
if args.incremental.lower() not in {'true', 'false'}:
raise AttributeError('incremental must be true or false')
if args.incremental == 'true':
incremental = True
else:
incremental = False
with open(config_path, 'r') as stream:
ymldct = yaml.load(stream)
PROJECT_PATH = ymldct['general settings']['project output directory']
SCREENLAMP_TOOLS_DIR = ymldct['general settings']['screenlamp tools directory']
INPUT_MOL2_PATH = ymldct['general settings']['input mol2 directory']
N_CPUS = str(ymldct['general settings']['number of cpus'])
DATATABLE_PATH = ymldct['molecule property filter settings']['datatable path']
DATATABLE_FILTER = ymldct['molecule property filter settings']['column filter']
FUNCTIONAL_GROUP_PRESENCE = ymldct[
'functional group presence filter settings']['selection key']
FUNCTIONAL_GROUP_DISTANCE_SELECTION = ymldct[
'functional group distance filter settings']['selection key']
FUNCTIONAL_GROUP_DISTANCE = ymldct[
'functional group distance filter settings']['distance']
OMEGA_EXECUTABLE = ymldct['OMEGA settings']['OMEGA executable']
ROCS_EXECUTABLE = ymldct['ROCS settings']['ROCS executable']
ROCS_RANKBY = ymldct['ROCS settings']['ROCS run rankby']
ROCS_SORTBY = ymldct['ROCS settings']['ROCS results sort by']
ROCS_THRESHOLD = ymldct['ROCS settings']['ROCS score threshold']
QUERY_PATH = ymldct['ROCS settings']['query molecule path']
FGROUP_MATCH_DISTANCE = str(ymldct['functional group matching '
'selection settings'][
'maximum pairwise atom distance'])
WRITE_MATCH_OVERLAYS = False
if ymldct['functional group match selection settings']['write mol2 files'] in (
'true', True):
WRITE_MATCH_OVERLAYS = True
FGROUP_ATOMTYPE = ymldct['functional group match selection settings'][
'atomtype selection keys']
FGROUP_CHARGE = ymldct['functional group match selection settings'][
'charge selection keys']
if not os.path.exists(PROJECT_PATH):
os.makedirs(PROJECT_PATH)
###############################################################################
if start_at <= 0:
s = """
################################################
COUNT MOLECULES IN DATATABLE_PATH
################################################
"""
print(s)
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'count_mol2.py'),
'--input', INPUT_MOL2_PATH]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
###############################################################################
if start_at <= 1:
s = """
################################################
Step 01: SELECT MOLECULES FROM DATA TABLE
################################################
"""
print(s)
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'datatable_to_id.py'),
'--input', DATATABLE_PATH,
'--output', os.path.join(PROJECT_PATH, '01_ids_from_database.txt'),
'--id_column', 'ZINC_ID',
'--selection', DATATABLE_FILTER]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
print('\n\n')
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'id_to_mol2.py'),
'--input', INPUT_MOL2_PATH,
'--id_file', os.path.join(PROJECT_PATH, '01_ids_from_database.txt'),
'--output', os.path.join(PROJECT_PATH, '01_selected-mol2s'),
'--includelist', 'True']
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
print('\n\nSELECTED MOL2s:')
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'count_mol2.py'),
'--input', os.path.join(PROJECT_PATH, '01_selected-mol2s')]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
###############################################################################
if start_at <= 2:
s = """
################################################
Step 02: PREFILTER BY FUNCTIONAL GROUP PRESENCE
################################################
"""
print(s)
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR,
'funcgroup_presence_to_id.py'),
'--input', os.path.join(PROJECT_PATH, '01_selected-mol2s'),
'--output', os.path.join(PROJECT_PATH,
'02_fgroup-presence_mol2ids.txt'),
'--selection', FUNCTIONAL_GROUP_PRESENCE,
'--processes', N_CPUS]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
print('\n\n')
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'id_to_mol2.py'),
'--input', os.path.join(PROJECT_PATH, '01_selected-mol2s'),
'--id_file', os.path.join(PROJECT_PATH,
'02_fgroup-presence_mol2ids.txt'),
'--output', os.path.join(PROJECT_PATH, '02_fgroup-presence_mol2s'),
'--includelist', 'True']
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
print('\n\nSELECTED MOL2s:')
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'count_mol2.py'),
'--input', os.path.join(PROJECT_PATH, '02_fgroup-presence_mol2s')]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
###############################################################################
if start_at <= 3:
s = """
################################################
Step 03: PREFILTER BY FUNCTIONAL GROUP DISTANCE
################################################
"""
print(s)
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR,
'funcgroup_distance_to_id.py'),
'--input', os.path.join(PROJECT_PATH, '02_fgroup-presence_mol2s'),
'--output', os.path.join(PROJECT_PATH,
'03_fgroup_distance_mol2ids.txt'),
'--selection', FUNCTIONAL_GROUP_DISTANCE_SELECTION,
'--distance', FUNCTIONAL_GROUP_DISTANCE,
'--processes', N_CPUS]
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
print('\n\n')
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'id_to_mol2.py'),
'--input', os.path.join(PROJECT_PATH, '02_fgroup-presence_mol2s'),
'--id_file', os.path.join(PROJECT_PATH,
'03_fgroup_distance_mol2ids.txt'),
'--output', os.path.join(PROJECT_PATH,
'03_fgroup_distance_mol2s'),
'--includelist', 'True']
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
print('\n\nSELECTED MOL2s:')
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'count_mol2.py'),
'--input', os.path.join(PROJECT_PATH,
'03_fgroup_distance_mol2s')]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
###############################################################################
if start_at <= 4:
s = """
################################################
Step 04: OMEGA conformers
################################################
"""
print(s)
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'generate_conformers_omega.py'),
'--input', os.path.join(PROJECT_PATH,
'03_fgroup_distance_mol2s'),
'--output', os.path.join(PROJECT_PATH, '04_omega_conformers'),
'--executable', OMEGA_EXECUTABLE,
'--processes', N_CPUS]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
print('\n\nSELECTED MOL2s:')
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'count_mol2.py'),
'--input', os.path.join(PROJECT_PATH, '04_omega_conformers')]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
###############################################################################
if start_at <= 5:
s = """
################################################
Step 05: ROCS OVERLAYS
################################################
"""
print(s)
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'overlay_molecules_rocs.py'),
'--input', os.path.join(PROJECT_PATH, '04_omega_conformers'),
'--output', os.path.join(PROJECT_PATH, '05_rocs_overlays'),
'--executable', ROCS_EXECUTABLE,
'--query', QUERY_PATH,
'--settings', ('-rankby %s -maxhits 0'
' -besthits 0 -progress percent' %
ROCS_RANKBY),
'--processes', N_CPUS]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'count_mol2.py'),
'--input', os.path.join(PROJECT_PATH, '05_rocs_overlays')]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
###############################################################################
if start_at <= 6:
s = """
################################################
Step 06: SORT ROCS OVERLAYS
################################################
"""
print(s)
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR, 'sort_rocs_mol2.py'),
'--input', os.path.join(PROJECT_PATH, '05_rocs_overlays'),
'--output', os.path.join(PROJECT_PATH, '06_rocs_overlays_sorted'),
'--query', QUERY_PATH,
'--sortby', ROCS_SORTBY,
'--selection', ROCS_THRESHOLD]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
###############################################################################
if start_at <= 7:
s = """
################################################
Step 07: MATCHING FUNCTIONAL GROUPS
################################################
"""
print(s)
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR,
'funcgroup_matching.py'),
'--input', os.path.join(PROJECT_PATH, '06_rocs_overlays_sorted'),
'--output', os.path.join(PROJECT_PATH, '07_funcgroup_matching'),
'--max_distance', FGROUP_MATCH_DISTANCE,
'--processes', N_CPUS]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd)
###############################################################################
if start_at <= 8:
s = """
################################################
Step 08: SELECTING FUNCTIONAL GROUP MATCHES
################################################
"""
print(s)
if WRITE_MATCH_OVERLAYS:
in_path = os.path.join(PROJECT_PATH, '06_rocs_overlays_sorted')
else:
in_path = ''
cmd = ['python', os.path.join(SCREENLAMP_TOOLS_DIR,
'funcgroup_matching_selection.py'),
'--input', os.path.join(PROJECT_PATH, '07_funcgroup_matching'),
'--output', os.path.join(PROJECT_PATH, '08_funcgroup_selection'),
'--atomtype_selection', FGROUP_ATOMTYPE,
'--charge_selection', FGROUP_CHARGE,
'--input_mol2', in_path]
print('Running command:\n%s\n' % ' '.join(cmd))
if incremental:
input('Press Enter to proceed or CTRL+C to quit')
subprocess.call(cmd) |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
yum install python3-devel
pip3 install psutil prometheus_client pyyaml
*/1 * * * * /usr/bin/python3 /opt/monit/linux_proc.py
"""
import sys,os,socket,psutil,yaml,datetime,urllib
from collections import Counter
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
cur_path = os.path.dirname(os.path.realpath(__file__))
yaml_path = os.path.join(cur_path, "linux_proc.yaml")
if len(sys.argv) == 2:
print(f'pid:{sys.argv[1]}')
ps = psutil.Process(int(sys.argv[1]))
iexe = ps.cmdline()[0]
iparam = ps.cmdline()[-1]
icwd = ps.cwd()
psdict = {'iexe': iexe,'iparam': iparam, 'icwd': icwd}
if not os.path.exists(yaml_path):
try:
res = urllib.request.urlopen('http://100.100.100.200/latest/meta-data/instance-id',timeout=1)
iid = res.read().decode('utf-8')
except:
iid = f"{socket.gethostname()}_{[(s.connect(('114.114.114.114', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]}"
cfg = {'instance': iid, 'apps': [psdict]}
else:
with open(yaml_path, 'r') as fy:
cfg = yaml.load(fy, Loader=yaml.FullLoader)
cfg['apps'].append(psdict)
with open(yaml_path, 'w+') as fw:
yaml.dump(cfg, fw)
sys.exit()
with open(yaml_path, 'r') as fy:
cfg = yaml.load(fy, Loader=yaml.FullLoader)
if datetime.datetime.now().timestamp() - os.path.getmtime(yaml_path) > 86400:
try:
res = urllib.request.urlopen('http://100.100.100.200/latest/meta-data/instance-id',timeout=1)
iid = res.read().decode('utf-8')
except:
iid = f"{socket.gethostname()}_{[(s.connect(('172.16.31.10', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]}"
cfg['instance'] = iid
with open(yaml_path, 'w') as fw:
yaml.dump(cfg, fw)
print('update:' + yaml_path)
print(cfg)
REGISTRY = CollectorRegistry(auto_describe=False)
linux_proc_error = Gauge(f'linux_proc_error', f"LINUX_进程异常指标", ["instance", "A00_iid", "iexe", "iparam", "icwd"],registry=REGISTRY)
linux_proc_info_list = ["instance", "A00_iid", "iexe", "iparam", "icwd", "pid", "name", "status", "is_running", "exe", "cmdline", "parent", "username", "port"]
linux_proc_info = Gauge("linux_proc_info", "LINUX_进程信息指标", linux_proc_info_list,registry=REGISTRY)
metric_list = ["io_read_count","io_write_count","io_read_bytes","io_write_bytes","cpu_user","cpu_system","cpu_children_user","cpu_children_system","cpu_iowait","memory_rss","memory_vms","memory_shared","memory_swap","memory_text","memory_data","num_open_files","num_fds_limit","num_fds","cpu_num","num_threads","num_children","cpu_percent","memory_percent","durn"]
metric_dict = {}
for li in metric_list:
metric_dict[li] = {}
instance = cfg['instance']
A00_iid = cfg['instance']
inum = 0
cpu_count = psutil.cpu_count()
for app in cfg['apps']:
iexe = app['iexe']
iparam = app['iparam']
icwd = app['icwd']
proc_app = [i for i in psutil.process_iter() if icwd == i.cwd() and iparam in i.cmdline() and iexe in i.cmdline()]
if len(proc_app) >= 1:
inum = inum + 1
if len(proc_app) > 1:
pids = [i for i in proc_app if i.ppid() == 1]
if len(pids) >= 1:
appinfo = pids[0]
else:
app_pid = Counter([i.ppid() for i in proc_app]).most_common(1)[0][0]
appinfo = psutil.Process(app_pid)
print(iexe,iparam,'ppid:',app_pid)
if appinfo in proc_app:
pass
else:
# 进程有多个,父进程不在列表中,取列表中的第一个监控
appinfo = proc_app[0]
#linux_proc_error.labels(instance, A00_iid, iexe, iparam, icwd).set(len(proc_app))
#continue
else:
appinfo = proc_app[0]
pid = appinfo.pid
name = appinfo.name()
status = appinfo.status()
is_running = appinfo.is_running()
exe = appinfo.exe()
cmdline = ' '.join(appinfo.cmdline())
parent = f'{appinfo.parent().pid}/{appinfo.parent().name()}'
durn = datetime.datetime.now().timestamp() - appinfo.create_time()
username = appinfo.username()
connections = appinfo.connections('all')
port = '/'.join(sorted([f'{x.laddr.port}' for x in connections if x.status == 'LISTEN'],key=int))
linux_proc_info.labels(instance, A00_iid, iexe, iparam, icwd, pid, name, status, is_running, exe, cmdline, parent, username, port).set(1)
io_counters = appinfo.io_counters()
metric_dict["io_read_count"][pid] = io_counters.read_count
metric_dict["io_write_count"][pid] = io_counters.write_count
metric_dict["io_read_bytes"][pid] = io_counters.read_bytes
metric_dict["io_write_bytes"][pid] = io_counters.write_bytes
cpu_times = appinfo.cpu_times()
metric_dict["cpu_user"][pid] = cpu_times.user
metric_dict["cpu_system"][pid] = cpu_times.system
metric_dict["cpu_children_user"][pid] = cpu_times.children_user
metric_dict["cpu_children_system"][pid] = cpu_times.children_system
metric_dict["cpu_iowait"][pid] = cpu_times.iowait
memory_info = appinfo.memory_full_info()
metric_dict["memory_rss"][pid] = memory_info.rss
metric_dict["memory_vms"][pid] = memory_info.vms
metric_dict["memory_shared"][pid] = memory_info.shared
metric_dict["memory_swap"][pid] = memory_info.swap
metric_dict["memory_text"][pid] = memory_info.text
metric_dict["memory_data"][pid] = memory_info.data
metric_dict["num_open_files"][pid] = len(appinfo.open_files())
metric_dict["num_fds_limit"][pid] = appinfo.rlimit(psutil.RLIMIT_NOFILE)[0]
metric_dict["num_fds"][pid] = appinfo.num_fds()
metric_dict["cpu_num"][pid] = appinfo.cpu_num()
metric_dict["num_threads"][pid] = appinfo.num_threads()
metric_dict["num_children"][pid] = len(appinfo.children())
metric_dict["cpu_percent"][pid] = appinfo.cpu_percent(interval=1)
#metric_dict["cpu_total_percent"][pid] = round(metric_dict["cpu_percent"][pid] / (cpu_count * 100),2) * 100
metric_dict["memory_percent"][pid] = appinfo.memory_percent()
metric_dict["durn"][pid] = datetime.datetime.now().timestamp() - appinfo.create_time()
connections_sum = Counter([con.status for con in connections])
for k,v in connections_sum.items():
if f'conn_{k.lower()}' not in metric_dict:
metric_dict[f'conn_{k.lower()}'] = {pid:v}
else:
metric_dict[f'conn_{k.lower()}'][pid] = v
else:
linux_proc_error.labels(instance, A00_iid, iexe, iparam, icwd).set(len(proc_app))
#print(inum, metric_dict)
if inum != 0:
for mk,mv in metric_dict.items():
linux_proc_metric = Gauge(f'linux_proc_{mk}', f"LINUX_进程指标:{mk}", ["instance", "A00_iid", "pid"],registry=REGISTRY)
for ik,iv in mv.items():
linux_proc_metric.labels(instance, A00_iid, ik).set(iv)
push_to_gateway('172.23.0.83:9091', job='push_linux_proc', grouping_key={'instance': instance}, registry=REGISTRY)
|
"""
Provide the meta model for Asset Administration Shell V3.0 Release Candidate 2.
We could not implement the following constraints since they depend on registry
and can not be verified without it:
* :constraintref:`AASd-006`
* :constraintref:`AASd-007`
Some of the constraints are not enforceable as they depend on the wider context
such as language understanding, so we could not formalize them:
* :constraintref:`AASd-012`
We could not formalize the constraints which prescribed how to deal with
the default values as the semantic of the default values has not been defined
in the meta-model:
* :constraintref:`AASd-115`
"""
from enum import Enum
from re import match
from typing import List, Optional
from icontract import invariant, DBC
from aas_core_meta.marker import (
abstract,
serialization,
implementation_specific,
reference_in_the_book,
is_superset_of,
verification,
)
__book_url__ = (
"https://plattform-i40.coyocloud.com/files/"
"442df5bf-72e7-4ad4-ade1-4ddee70dd392/4d299251-a723-4858-afe7-9f656af07bcb/"
"DetailsOfTheAssetAdministrationShell_Part1_"
"V3%200RC02_EN%20Working%20Version%20docx"
)
__book_version__ = "V3.0RC02"
# region Verification
@verification
def matches_xs_date_time_stamp_utc(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:dateTimeStamp``.
The time zone must be fixed to UTC. We verify only that the ``text`` matches
a pre-defined pattern. We *do not* verify that the day of month is
correct nor do we check for leap seconds.
See: https://www.w3.org/TR/xmlschema11-2/#dateTimeStamp
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
digit = "[0-9]"
year_frag = f"-?(([1-9]{digit}{digit}{digit}+)|(0{digit}{digit}{digit}))"
month_frag = f"((0[1-9])|(1[0-2]))"
day_frag = f"((0[1-9])|([12]{digit})|(3[01]))"
hour_frag = f"(([01]{digit})|(2[0-3]))"
minute_frag = f"[0-5]{digit}"
second_frag = f"([0-5]{digit})(\\.{digit}+)?"
end_of_day_frag = "24:00:00(\\.0+)?"
timezone_frag = "Z"
date_time_stamp_lexical_rep = (
f"{year_frag}-{month_frag}-{day_frag}"
f"T"
f"(({hour_frag}:{minute_frag}:{second_frag})|{end_of_day_frag})"
f"{timezone_frag}"
)
pattern = f"^{date_time_stamp_lexical_rep}$"
return match(pattern, text) is not None
# noinspection PyUnusedLocal
@verification
@implementation_specific
def is_xs_date_time_stamp_utc(text: str) -> bool:
"""
Check that :paramref:`text` is a ``xs:dateTimeStamp`` with time zone set to UTC.
The ``text`` is assumed to match a pre-defined pattern for ``xs:dateTimeStamp`` with
the time zone set to UTC. In this function, we check for days of month (*e.g.*,
February 29th).
See: https://www.w3.org/TR/xmlschema11-2/#dateTimeStamp
:param text: Text to be checked
:returns: True if the :paramref:`text` is a valid ``xs:dateTimeStamp`` in UTC
"""
raise NotImplementedError()
@verification
def matches_MIME_type(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of MIME type.
The definition has been taken from:
https://www.rfc-editor.org/rfc/rfc7231#section-3.1.1.1,
https://www.rfc-editor.org/rfc/rfc7230#section-3.2.3 and
https://www.rfc-editor.org/rfc/rfc7230#section-3.2.6.
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
tchar = "[!#$%&'*+\\-.^_`|~0-9a-zA-Z]"
token = f"({tchar})+"
type = f"{token}"
subtype = f"{token}"
ows = "[ \t]*"
obs_text = "[\\x80-\\xff]"
qd_text = f"([\t !#-\\[\\]-~]|{obs_text})"
quoted_pair = f"\\\\([\t !-~]|{obs_text})"
quoted_string = f'"({qd_text}|{quoted_pair})*"'
parameter = f"{token}=({token}|{quoted_string})"
media_type = f"^{type}/{subtype}({ows};{ows}{parameter})*$"
return match(media_type, text) is not None
# noinspection SpellCheckingInspection
@verification
def matches_RFC_8089_path(text: str) -> bool:
"""
Check that :paramref:`text` is a path conforming to the pattern of RFC 8089.
The definition has been taken from:
https://datatracker.ietf.org/doc/html/rfc8089
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
h16 = "[0-9A-Fa-f]{1,4}"
dec_octet = "([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
ipv4address = f"{dec_octet}\\.{dec_octet}\\.{dec_octet}\\.{dec_octet}"
ls32 = f"({h16}:{h16}|{ipv4address})"
ipv6address = (
f"(({h16}:){{6}}{ls32}|::({h16}:){{5}}{ls32}|({h16})?::({h16}:){{4}}"
f"{ls32}|(({h16}:)?{h16})?::({h16}:){{3}}{ls32}|(({h16}:){{2}}{h16})?::"
f"({h16}:){{2}}{ls32}|(({h16}:){{3}}{h16})?::{h16}:{ls32}|(({h16}:){{4}}"
f"{h16})?::{ls32}|(({h16}:){{5}}{h16})?::{h16}|(({h16}:){{6}}{h16})?::)"
)
unreserved = "[a-zA-Z0-9\\-._~]"
sub_delims = "[!$&'()*+,;=]"
ipvfuture = f"[vV][0-9A-Fa-f]+\\.({unreserved}|{sub_delims}|:)+"
ip_literal = f"\\[({ipv6address}|{ipvfuture})\\]"
pct_encoded = "%[0-9A-Fa-f][0-9A-Fa-f]"
reg_name = f"({unreserved}|{pct_encoded}|{sub_delims})*"
host = f"({ip_literal}|{ipv4address}|{reg_name})"
file_auth = f"(localhost|{host})"
pchar = f"({unreserved}|{pct_encoded}|{sub_delims}|[:@])"
segment_nz = f"({pchar})+"
segment = f"({pchar})*"
path_absolute = f"/({segment_nz}(/{segment})*)?"
auth_path = f"({file_auth})?{path_absolute}"
local_path = f"{path_absolute}"
file_hier_part = f"(//{auth_path}|{local_path})"
file_scheme = "file"
file_uri = f"{file_scheme}:{file_hier_part}"
pattern = f"^{file_uri}$"
return match(pattern, text) is not None
# noinspection SpellCheckingInspection
@verification
def matches_BCP_47(text: str) -> bool:
"""
Check that :paramref:`text` is a valid BCP 47 language tag.
See: https://en.wikipedia.org/wiki/IETF_language_tag
"""
alphanum = "[a-zA-Z0-9]"
singleton = "[0-9A-WY-Za-wy-z]"
extension = f"{singleton}(-({alphanum}){{2,8}})+"
extlang = "[a-zA-Z]{3}(-[a-zA-Z]{3}){2}"
irregular = (
"(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|"
"i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|"
"i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)"
)
regular = (
"(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|"
"zh-min|zh-min-nan|zh-xiang)"
)
grandfathered = f"({irregular}|{regular})"
language = f"([a-zA-Z]{{2,3}}(-{extlang})?|[a-zA-Z]{{4}}|[a-zA-Z]{{5,8}})"
script = "[a-zA-Z]{4}"
region = "([a-zA-Z]{2}|[0-9]{3})"
variant = f"(({alphanum}){{5,8}}|[0-9]({alphanum}){{3}})"
privateuse = f"[xX](-({alphanum}){{1,8}})+"
langtag = (
f"{language}(-{script})?(-{region})?(-{variant})*(-{extension})*(-"
f"{privateuse})?"
)
language_tag = f"({langtag}|{privateuse}|{grandfathered})"
pattern = f"^{language_tag}$"
return match(pattern, text) is not None
@verification
@implementation_specific
def lang_strings_have_unique_languages(lang_strings: List["Lang_string"]) -> bool:
"""
Check that the :paramref:`lang_strings` do not have overlapping
:attr:`~Lang_string.language`'s
"""
# NOTE (mristin, 2022-04-7):
# This implementation will not be transpiled, but is given here as reference.
language_set = set()
for lang_string in lang_strings:
if lang_string.language in language_set:
return False
language_set.add(lang_string.language)
return True
@verification
@implementation_specific
def qualifier_types_are_unique(qualifiers: List["Qualifier"]) -> bool:
"""
Check that :attr:`~Qualifier.type`'s of :paramref:`qualifiers` are unique.
:param qualifiers: to be checked
:return: True if all :attr:`~Qualifier.type`'s are unique
"""
# NOTE (mristin, 2022-04-1):
# This implementation is given here only as reference. It needs to be adapted
# for each implementation separately.
observed_types = set()
for qualifier in qualifiers:
if qualifier.type in observed_types:
return False
observed_types.add(qualifier.type)
return True
# noinspection SpellCheckingInspection
@verification
def matches_xs_any_URI(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:anyURI``.
See: https://www.w3.org/TR/xmlschema11-2/#anyURI and
https://datatracker.ietf.org/doc/html/rfc3987
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
scheme = "[a-zA-Z][a-zA-Z0-9+\\-.]*"
ucschar = (
"[\\xa0-\\ud7ff\\uf900-\\ufdcf\\ufdf0-\\uffef\\u10000-\\u1fffd"
"\\u20000-\\u2fffd\\u30000-\\u3fffd\\u40000-\\u4fffd"
"\\u50000-\\u5fffd\\u60000-\\u6fffd\\u70000-\\u7fffd"
"\\u80000-\\u8fffd\\u90000-\\u9fffd\\ua0000-\\uafffd"
"\\ub0000-\\ubfffd\\uc0000-\\ucfffd\\ud0000-\\udfffd"
"\\ue1000-\\uefffd]"
)
iunreserved = f"([a-zA-Z0-9\\-._~]|{ucschar})"
pct_encoded = "%[0-9A-Fa-f][0-9A-Fa-f]"
sub_delims = "[!$&'()*+,;=]"
iuserinfo = f"({iunreserved}|{pct_encoded}|{sub_delims}|:)*"
h16 = "[0-9A-Fa-f]{1,4}"
dec_octet = "([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
ipv4address = f"{dec_octet}\\.{dec_octet}\\.{dec_octet}\\.{dec_octet}"
ls32 = f"({h16}:{h16}|{ipv4address})"
ipv6address = (
f"(({h16}:){{6}}{ls32}|::({h16}:){{5}}{ls32}|({h16})?::({h16}:){{4}}"
f"{ls32}|(({h16}:)?{h16})?::({h16}:){{3}}{ls32}|(({h16}:){{2}}{h16})?::"
f"({h16}:){{2}}{ls32}|(({h16}:){{3}}{h16})?::{h16}:{ls32}|(({h16}:){{4}}"
f"{h16})?::{ls32}|(({h16}:){{5}}{h16})?::{h16}|(({h16}:){{6}}{h16})?::)"
)
unreserved = "[a-zA-Z0-9\\-._~]"
ipvfuture = f"[vV][0-9A-Fa-f]+\\.({unreserved}|{sub_delims}|:)+"
ip_literal = f"\\[({ipv6address}|{ipvfuture})\\]"
ireg_name = f"({iunreserved}|{pct_encoded}|{sub_delims})*"
ihost = f"({ip_literal}|{ipv4address}|{ireg_name})"
port = "[0-9]*"
iauthority = f"({iuserinfo}@)?{ihost}(:{port})?"
ipchar = f"({iunreserved}|{pct_encoded}|{sub_delims}|[:@])"
isegment = f"({ipchar})*"
ipath_abempty = f"(/{isegment})*"
isegment_nz = f"({ipchar})+"
ipath_absolute = f"/({isegment_nz}(/{isegment})*)?"
ipath_rootless = f"{isegment_nz}(/{isegment})*"
ipath_empty = f"({ipchar}){{0}}"
ihier_part = (
f"(//{iauthority}{ipath_abempty}|{ipath_absolute}|"
f"{ipath_rootless}|{ipath_empty})"
)
iprivate = "[\\ue000-\\uf8ff\\uf0000-\\uffffd\\u100000-\\u10fffd]"
iquery = f"({ipchar}|{iprivate}|[/?])*"
ifragment = f"({ipchar}|[/?])*"
isegment_nz_nc = f"({iunreserved}|{pct_encoded}|{sub_delims}|@)+"
ipath_noscheme = f"{isegment_nz_nc}(/{isegment})*"
irelative_part = (
f"(//{iauthority}{ipath_abempty}|{ipath_absolute}|"
f"{ipath_noscheme}|{ipath_empty})"
)
irelative_ref = f"{irelative_part}(\\?{iquery})?(\\#{ifragment})?"
iri = f"{scheme}:{ihier_part}(\\?{iquery})?(\\#{ifragment})?"
iri_reference = f"({iri}|{irelative_ref})"
pattern = f"^{iri_reference}$"
return match(pattern, text) is not None
# noinspection SpellCheckingInspection
@verification
def matches_xs_base_64_binary(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:base64Binary``.
See: https://www.w3.org/TR/xmlschema11-2/#base64Binary
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# Base64 characters whose bit-string value ends in '0000'
b04_char = "[AQgw]"
b04 = f"{b04_char}\\x20?"
# Base64 characters whose bit-string value ends in '00'
b16_char = "[AEIMQUYcgkosw048]"
b16 = f"{b16_char}\\x20?"
b64_char = "[A-Za-z0-9+/]"
b64 = f"{b64_char}\\x20?"
b64quad = f"({b64}{b64}{b64}{b64})"
# b64_final_quad represents three octets of binary data without trailing space.
b64_final_quad = f"({b64}{b64}{b64}{b64_char})"
# padded_8 represents a single octet at the end of the data.
padded_8 = f"{b64}{b04}=\x20?="
# padded_16 represents a two-octet at the end of the data.
padded_16 = f"{b64}{b64}{b16}="
b64final = f"({b64_final_quad}|{padded_16}|{padded_8})"
base64_binary = f"({b64quad}*{b64final})?"
pattern = f"^{base64_binary}$"
return match(pattern, text) is not None
@verification
def matches_xs_boolean(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:boolean``.
See: https://www.w3.org/TR/xmlschema11-2/#boolean
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
pattern = "^(true|false|1|0)$"
return match(pattern, text) is not None
@verification
def matches_xs_date(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:date``.
See: https://www.w3.org/TR/xmlschema11-2/#date
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
digit = "[0-9]"
year_frag = f"-?(([1-9]{digit}{digit}{digit}+)|(0{digit}{digit}{digit}))"
month_frag = f"((0[1-9])|(1[0-2]))"
day_frag = f"((0[1-9])|([12]{digit})|(3[01]))"
minute_frag = f"[0-5]{digit}"
timezone_frag = rf"(Z|(\+|-)(0{digit}|1[0-3]):{minute_frag}|14:00)"
date_lexical_rep = f"{year_frag}-{month_frag}-{day_frag}{timezone_frag}?"
pattern = f"^{date_lexical_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_date_time(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:dateTime``.
See: https://www.w3.org/TR/xmlschema11-2/#dateTime
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
digit = "[0-9]"
year_frag = f"-?(([1-9]{digit}{digit}{digit}+)|(0{digit}{digit}{digit}))"
month_frag = f"((0[1-9])|(1[0-2]))"
day_frag = f"((0[1-9])|([12]{digit})|(3[01]))"
hour_frag = f"(([01]{digit})|(2[0-3]))"
minute_frag = f"[0-5]{digit}"
second_frag = f"([0-5]{digit})(\\.{digit}+)?"
end_of_day_frag = "24:00:00(\\.0+)?"
timezone_frag = rf"(Z|(\+|-)(0{digit}|1[0-3]):{minute_frag}|14:00)"
date_time_lexical_rep = (
f"{year_frag}-{month_frag}-{day_frag}"
f"T"
f"(({hour_frag}:{minute_frag}:{second_frag})|{end_of_day_frag})"
f"{timezone_frag}?"
)
pattern = f"^{date_time_lexical_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_date_time_stamp(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:dateTimeStamp``.
See: https://www.w3.org/TR/xmlschema11-2/#dateTimeStamp
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
digit = "[0-9]"
year_frag = f"-?(([1-9]{digit}{digit}{digit}+)|(0{digit}{digit}{digit}))"
month_frag = f"((0[1-9])|(1[0-2]))"
day_frag = f"((0[1-9])|([12]{digit})|(3[01]))"
hour_frag = f"(([01]{digit})|(2[0-3]))"
minute_frag = f"[0-5]{digit}"
second_frag = f"([0-5]{digit})(\\.{digit}+)?"
end_of_day_frag = "24:00:00(\\.0+)?"
timezone_frag = rf"(Z|(\+|-)(0{digit}|1[0-3]):{minute_frag}|14:00)"
date_time_stamp_lexical_rep = (
f"{year_frag}-{month_frag}-{day_frag}"
f"T"
f"(({hour_frag}:{minute_frag}:{second_frag})|{end_of_day_frag})"
f"{timezone_frag}"
)
pattern = f"^{date_time_stamp_lexical_rep}$"
return match(pattern, text) is not None
# noinspection SpellCheckingInspection
@verification
def matches_xs_decimal(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:decimal``.
See: https://www.w3.org/TR/xmlschema11-2/#decimal
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
digit = "[0-9]"
unsigned_no_decimal_pt_numeral = f"{digit}+"
no_decimal_pt_numeral = rf"(\+|-)?{unsigned_no_decimal_pt_numeral}"
frac_frag = f"{digit}+"
unsigned_decimal_pt_numeral = (
rf"({unsigned_no_decimal_pt_numeral}\.{frac_frag}|\.{frac_frag})"
)
decimal_pt_numeral = rf"(\+|-)?{unsigned_decimal_pt_numeral}"
decimal_lexical_rep = f"({decimal_pt_numeral}|{no_decimal_pt_numeral})"
pattern = f"^{decimal_lexical_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_double(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:double``.
See: https://www.w3.org/TR/xmlschema11-2/#double
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See: https://www.w3.org/TR/xmlschema11-2/#nt-doubleRep
double_rep = (
r"(\+|-)?([0-9]+(\.[0-9]*)?|\.[0-9]+)([Ee](\+|-)?[0-9]+)?|(\+|-)?INF|NaN"
)
pattern = f"^{double_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_duration(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:duration``.
See: https://www.w3.org/TR/xmlschema11-2/#duration
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See https://www.w3.org/TR/xmlschema11-2/#nt-durationRep
# fmt: off
duration_rep = (
r"-?P((([0-9]+Y([0-9]+M)?([0-9]+D)?"
r"|([0-9]+M)([0-9]+D)?"
r"|([0-9]+D)"
r")"
r"(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?"
r"|([0-9]+M)([0-9]+(\.[0-9]+)?S)?"
r"|([0-9]+(\.[0-9]+)?S)"
r")"
r")?"
r")"
r"|(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?"
r"|([0-9]+M)([0-9]+(\.[0-9]+)?S)?"
r"|([0-9]+(\.[0-9]+)?S)"
r")"
r")"
r")"
)
# fmt: on
pattern = f"^{duration_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_float(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:float``.
See: https://www.w3.org/TR/xmlschema11-2/#float
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
float_rep = (
r"(\+|-)?([0-9]+(\.[0-9]*)?|\.[0-9]+)([Ee](\+|-)?[0-9]+)?" r"|(\+|-)?INF|NaN"
)
pattern = f"^{float_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_g_day(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:gDay``.
See: https://www.w3.org/TR/xmlschema11-2/#gDay
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See https://www.w3.org/TR/xmlschema11-2/#nt-gDayRep
g_day_lexical_rep = (
r"---(0[1-9]|[12][0-9]|3[01])(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?"
)
pattern = f"^{g_day_lexical_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_g_month(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:gMonth``.
See: https://www.w3.org/TR/xmlschema11-2/#gMonth
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See https://www.w3.org/TR/xmlschema11-2/#nt-gMonthRep
g_month_lexical_rep = (
r"--(0[1-9]|1[0-2])(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?"
)
pattern = f"^{g_month_lexical_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_g_month_day(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:gMonthDay``.
See: https://www.w3.org/TR/xmlschema11-2/#gMonthDay
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See https://www.w3.org/TR/xmlschema11-2/#nt-gMonthDayRep
g_month_day_rep = (
r"--(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])"
r"(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?"
)
pattern = f"^{g_month_day_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_g_year(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:gYear``.
See: https://www.w3.org/TR/xmlschema11-2/#gYear
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See https://www.w3.org/TR/xmlschema11-2/#nt-gYearRep
g_year_rep = (
r"-?([1-9][0-9]{3,}|0[0-9]{3})(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?"
)
pattern = f"^{g_year_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_g_year_month(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:gYearMonth``.
See: https://www.w3.org/TR/xmlschema11-2/#gYearMonth
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See https://www.w3.org/TR/xmlschema11-2/#nt-gYearMonthRep
g_year_month_rep = (
r"-?([1-9][0-9]{3,}|0[0-9]{3})-(0[1-9]|1[0-2])"
r"(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?"
)
pattern = f"^{g_year_month_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_hex_binary(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:hexBinary``.
See: https://www.w3.org/TR/xmlschema11-2/#hexBinary
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See https://www.w3.org/TR/xmlschema11-2/#nt-hexBinary
hex_binary = r"([0-9a-fA-F]{2})*"
pattern = f"^{hex_binary}$"
return match(pattern, text) is not None
@verification
def matches_xs_time(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:time``.
See: https://www.w3.org/TR/xmlschema11-2/#time
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See https://www.w3.org/TR/xmlschema11-2/#nt-timeRep
time_rep = (
r"(([01][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\.[0-9]+)?|(24:00:00(\.0+)?))"
r"(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?"
)
pattern = f"^{time_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_day_time_duration(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:dayTimeDuration``.
See: https://www.w3.org/TR/xmlschema11-2/#dayTimeDuration
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
# NOTE (mristin, 2022-04-6):
# See https://www.w3.org/TR/xmlschema11-2/#nt-durationRep and
# https://www.w3.org/TR/xmlschema11-2/#dayTimeDuration related to pattern
# intersection
# fmt: off
day_time_duration_rep = (
r"-?P(("
r"([0-9]+D)"
r"(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?"
r"|([0-9]+M)([0-9]+(\.[0-9]+)?S)?"
r"|([0-9]+(\.[0-9]+)?S)"
r")"
r")?"
r")"
r"|(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?"
r"|([0-9]+M)([0-9]+(\.[0-9]+)?S)?"
r"|([0-9]+(\.[0-9]+)?S)"
r")"
r")"
r")"
)
# fmt: on
pattern = f"^{day_time_duration_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_year_month_duration(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:yearMonthDuration``.
See: https://www.w3.org/TR/xmlschema11-2/#yearMonthDuration
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
year_month_duration_rep = r"-?P((([0-9]+Y)([0-9]+M)?)|([0-9]+M))"
pattern = f"^{year_month_duration_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_integer(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:integer``.
See: https://www.w3.org/TR/xmlschema11-2/#integer
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
integer_rep = r"[\-+]?[0-9]+"
pattern = f"^{integer_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_long(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:long``.
See: https://www.w3.org/TR/xmlschema11-2/#long
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
long_rep = r"[\-+]?[0-9]+"
pattern = f"^{long_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_int(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:int``.
See: https://www.w3.org/TR/xmlschema11-2/#int
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
int_rep = r"[\-+]?[0-9]+"
pattern = f"^{int_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_short(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:short``.
See: https://www.w3.org/TR/xmlschema11-2/#short
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
short_rep = r"[\-+]?[0-9]+"
pattern = f"^{short_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_byte(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:byte``.
See: https://www.w3.org/TR/xmlschema11-2/#byte
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
byte_rep = r"[\-+]?[0-9]+"
pattern = f"^{byte_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_non_negative_integer(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:nonNegativeInteger``.
See: https://www.w3.org/TR/xmlschema11-2/#nonNegativeInteger
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
non_negative_integer_rep = r"(-0|\+?[0-9]+)"
pattern = f"^{non_negative_integer_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_positive_integer(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:positiveInteger``.
See: https://www.w3.org/TR/xmlschema11-2/#positiveInteger
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
positive_integer_rep = r"\+?0*[1-9][0-9]*"
pattern = f"^{positive_integer_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_unsigned_long(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:unsignedLong``.
See: https://www.w3.org/TR/xmlschema11-2/#unsignedLong
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
unsigned_long_rep = r"(-0|\+?[0-9]+)"
pattern = f"^{unsigned_long_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_unsigned_int(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:unsignedInt``.
See: https://www.w3.org/TR/xmlschema11-2/#unsignedInt
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
unsigned_int_rep = r"(-0|\+?[0-9]+)"
pattern = f"^{unsigned_int_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_unsigned_short(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:unsignedShort``.
See: https://www.w3.org/TR/xmlschema11-2/#unsignedShort
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
unsigned_short_rep = r"(-0|\+?[0-9]+)"
pattern = f"^{unsigned_short_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_unsigned_byte(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:unsignedByte``.
See: https://www.w3.org/TR/xmlschema11-2/#unsignedByte
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
unsigned_byte_rep = r"(-0|\+?[0-9]+)"
pattern = f"^{unsigned_byte_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_non_positive_integer(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:nonPositiveInteger``.
See: https://www.w3.org/TR/xmlschema11-2/#nonPositiveInteger
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
non_positive_integer_rep = r"(\+0|0|-[0-9]+)"
pattern = f"^{non_positive_integer_rep}$"
return match(pattern, text) is not None
@verification
def matches_xs_negative_integer(text: str) -> bool:
"""
Check that :paramref:`text` conforms to the pattern of an ``xs:negativeInteger``.
See: https://www.w3.org/TR/xmlschema11-2/#negativeInteger
:param text: Text to be checked
:returns: True if the :paramref:`text` conforms to the pattern
"""
negative_integer_rep = r"(-0*[1-9][0-9]*)"
pattern = f"^{negative_integer_rep}$"
return match(pattern, text) is not None
# noinspection PyUnusedLocal
@verification
@implementation_specific
def value_consistent_with_xsd_type(value: str, value_type: "Data_type_def_XSD") -> bool:
"""
Check that the :paramref:`value` conforms to its :paramref:`value_type`.
:param value: which needs to conform
:param value_type: pre-defined value type
:return: True if the :paramref:`value` conforms
"""
# NOTE (mristin, 2022-04-1):
# We specify the pattern-matching functions above, and they should be handy to check
# for most obvious pattern mismatches.
#
# However, bear in mind that the pattern checks are not enough! For example,
# consider a ``xs:dateTimeStamp``. You need to check not only that the value
# follows the pattern, but also that the day-of-month and leap seconds are taken
# into account.
@verification
@implementation_specific
def is_model_reference_to(
reference: "Model_reference", expected_type: "Key_elements"
) -> bool:
"""Check that the target of the model reference matches the expected ``target``."""
# NOTE (mristin, 2022-03-28):
# This implementation is given here only as reference. It needs to be adapted
# for each implementation separately.
return len(reference.keys) != 0 or reference.keys[-1].type == expected_type
@verification
@implementation_specific
def id_shorts_are_unique(referables: List["Referable"]) -> bool:
"""
Check that the :attr:`~Referable.id_short`'s among the :paramref:`referables` are
unique.
"""
# NOTE (mristin, 2022-04-7):
# This implementation will not be transpiled, but is given here as reference.
id_short_set = set()
for referable in referables:
if referable.id_short is not None:
if referable.id_short in id_short_set:
return False
id_short_set.add(referable.id_short)
return True
@verification
@implementation_specific
def extension_names_are_unique(extensions: List["Extension"]) -> bool:
"""Check that the extension names are unique."""
# NOTE (mristin, 2022-04-7):
# This implementation will not be transpiled, but is given here as reference.
name_set = set()
for extension in extensions:
if extension.name in name_set:
return False
name_set.add(extension.name)
return True
@verification
@implementation_specific
def submodel_elements_have_identical_semantic_ids(
elements: List["Submodel_element"],
) -> bool:
"""Check that all semantic IDs are identical, if specified."""
# NOTE (mristin, 2022-04-7):
# This implementation will not be transpiled, but is given here as a reference.
semantic_id = None
for element in elements:
if element.semantic_id is not None:
if semantic_id is None:
semantic_id = element.semantic_id
else:
if semantic_id != element.semantic_id:
return False
return True
# noinspection PyUnusedLocal
@verification
@implementation_specific
def submodel_element_is_of_type(
element: "Submodel_element", element_type: "Submodel_element_elements"
) -> bool:
"""
Check that the run-time type of the :paramref:`element` coincides with
:paramref:`element_type`.
"""
raise NotImplementedError()
@verification
@implementation_specific
def properties_or_ranges_have_value_type(
elements: List["Submodel_element"], value_type: "Data_type_def_XSD"
) -> bool:
"""Check that all the :paramref:`elements` have the :paramref:`value_type`."""
# NOTE (mristin, 2022-04-7):
# This implementation will not be transpiled, but is given here as reference.
for element in elements:
if isinstance(element, (Property, Range)):
if element.value_type != value_type:
return False
return True
@verification
@implementation_specific
def concept_description_category_is_valid(category: str) -> bool:
"""
Check that the :paramref:`category` is a valid category for
a :class:`.Concept_description`.
"""
# NOTE (mristin, 2022-04-7):
# This implementation will not be transpiled, but is given here as reference.
# Notably, the specific implementation should use a hash set or a trie for efficient
# lookups.
return category in (
"VALUE",
"PROPERTY",
"REFERENCE",
"DOCUMENT",
"CAPABILITY",
"RELATIONSHIP",
"COLLECTION",
"FUNCTION",
"EVENT",
"ENTITY",
"APPLICATION_CLASS",
"QUALIFIER",
"VIEW",
)
# endregion
@invariant(lambda self: len(self) >= 1)
class Non_empty_string(str, DBC):
"""Represent a string with at least one character."""
@invariant(lambda self: is_xs_date_time_stamp_utc(self))
@invariant(lambda self: matches_xs_date_time_stamp_utc(self))
class Date_time_stamp_UTC(str, DBC):
"""Represent an ``xs:dateTimeStamp`` with the time zone fixed to UTC."""
@reference_in_the_book(section=(5, 7, 12, 2))
class Blob_type(bytearray, DBC):
"""Group of bytes to represent file content (binaries and non-binaries)"""
@reference_in_the_book(section=(5, 7, 12, 2))
class Identifier(Non_empty_string, DBC):
"""
string
"""
# noinspection SpellCheckingInspection
@invariant(lambda self: matches_BCP_47(self))
class BCP_47_language_tag(str, DBC):
"""
Represent a language tag conformant to BCP 47.
See: https://en.wikipedia.org/wiki/IETF_language_tag
"""
@reference_in_the_book(section=(5, 7, 12, 2))
@invariant(lambda self: matches_MIME_type(self))
class Content_type(Non_empty_string, DBC):
"""
string
.. note::
Any content type as in RFC2046.
A media type (also MIME type and content type) […] is a two-part
identifier for file formats and format contents transmitted on
the Internet. The Internet Assigned Numbers Authority (IANA) is
the official authority for the standardization and publication of
these classifications. Media types were originally defined in
Request for Comments 2045 in November 1996 as a part of MIME
(Multipurpose Internet Mail Extensions) specification, for denoting
type of email message content and attachments.
"""
@invariant(lambda self: matches_RFC_8089_path(self))
@reference_in_the_book(section=(5, 7, 12, 2))
class Path_type(Non_empty_string, DBC):
"""
string
.. note::
Any string conformant to RFC8089 , the “file” URI scheme (for
relative and absolute file paths)
"""
pass
@reference_in_the_book(section=(5, 7, 12, 2))
class Qualifier_type(Non_empty_string, DBC):
"""
string
"""
class Value_data_type(str, DBC):
"""
any xsd atomic type as specified via :class:`.Data_type_def_XSD`
"""
class Resource(DBC):
"""
Resource represents an address to a file (a locator). The value is an URI that
can represent an absolute or relative path
"""
path: "Asset_kind"
"""
Path and name of the resource (with file extension).
The path can be absolute or relative.
"""
content_type: Optional["Content_type"]
"""
Content type of the content of the file.
The content type states which file extensions the file can have.
"""
def __init__(
self,
path: "Asset_kind",
content_type: Optional["Content_type"] = None,
) -> None:
self.path = path
self.content_type = content_type
@abstract
@reference_in_the_book(section=(5, 7, 2, 6))
class Has_semantics(DBC):
"""
Element that can have a semantic definition.
"""
semantic_id: Optional["Global_reference"]
"""
Identifier of the semantic definition of the element. It is called semantic ID
of the element.
"""
def __init__(self, semantic_id: Optional["Global_reference"] = None) -> None:
self.semantic_id = semantic_id
@reference_in_the_book(section=(5, 7, 2, 1), index=1)
class Extension(Has_semantics):
"""
Single extension of an element.
"""
name: Non_empty_string
"""
Name of the extension.
:constraint AASd-077:
The name of an extension within :class:`.Has_extensions` needs to be unique.
"""
value_type: Optional["Data_type_def_XSD"]
"""
Type of the value of the extension.
Default: xsd:string
"""
value: Optional["Value_data_type"]
"""
Value of the extension
"""
refers_to: Optional["Model_reference"]
"""
Reference to an element the extension refers to.
"""
def __init__(
self,
name: Non_empty_string,
semantic_id: Optional["Global_reference"] = None,
value_type: Optional["Data_type_def_XSD"] = None,
value: Optional["Value_data_type"] = None,
refers_to: Optional["Model_reference"] = None,
) -> None:
Has_semantics.__init__(self, semantic_id=semantic_id)
self.name = name
self.value_type = value_type
self.value = value
self.refers_to = refers_to
# fmt: off
@abstract
@reference_in_the_book(section=(5, 7, 2, 1))
@invariant(
lambda self:
not (self.extensions is not None) or extension_names_are_unique(self.extensions),
"Constraint AASd-077: The name of an extension within Has_extensions "
"needs to be unique."
)
# fmt: on
class Has_extensions(DBC):
"""
Element that can be extended by proprietary extensions.
Note: Extensions are proprietary, i.e. they do not support global interoperability.
"""
extensions: Optional[List["Extension"]]
"""
An extension of the element.
"""
def __init__(self, extensions: Optional[List["Extension"]] = None) -> None:
self.extensions = extensions
# fmt: off
@abstract
@invariant(
lambda self:
not (self.id_short is not None) or len(self.id_short) <= 128,
"Constraint AASd-027: ID-short shall have a maximum length of 128 characters."
)
@reference_in_the_book(section=(5, 7, 2, 2))
@serialization(with_model_type=True)
# fmt: on
class Referable(Has_extensions):
"""
An element that is referable by its :attr:`~id_short`.
This identifier is not globally unique.
This identifier is unique within the name space of the element.
"""
id_short: Optional[Non_empty_string]
"""
In case of identifiables this attribute is a short name of the element.
In case of referable this ID is an identifying string of
the element within its name space.
.. note::
In case the element is a property and the property has a semantic definition
(:class:`.Has_semantics`) conformant to IEC61360 the :attr:`~id_short`
is typically identical to the short name in English.
:constraint AASd-027:
:attr:`~id_short` of :class:`.Referable`'s shall have a maximum length
of 128 characters.
"""
display_name: Optional["Lang_string_set"]
"""
Display name. Can be provided in several languages.
If no display name is defined in the language requested by the application,
then the display name is selected in the following order if available:
* the preferred name in the requested language of the concept description defining
the semantics of the element
* If there is a default language list defined in the application,
then the corresponding preferred name in the language is chosen
according to this order.
* the English preferred name of the concept description defining
the semantics of the element
* the short name of the concept description
* the :attr:`~id_short` of the element
"""
category: Optional[Non_empty_string]
"""
The category is a value that gives further meta information
w.r.t. to the class of the element.
It affects the expected existence of attributes and the applicability of
constraints.
.. note::
The category is not identical to the semantic definition
(:class:`.Has_semantics`) of an element. The category
*e.g.* could denote that the element is a measurement value whereas the
semantic definition of the element would
denote that it is the measured temperature.
"""
description: Optional["Lang_string_set"]
"""
Description or comments on the element.
The description can be provided in several languages.
If no description is defined, then the definition of the concept
description that defines the semantics of the element is used.
Additional information can be provided, *e.g.*, if the element is
qualified and which qualifier types can be expected in which
context or which additional data specification templates are
provided.
"""
checksum: Optional["Non_empty_string"]
"""
Checksum to be used to determine if an Referable (including its
aggregated child elements) has changed.
The checksum is calculated by the user's tool environment.
The checksum has no semantic meaning for an asset administration
shell model and there is no requirement for asset administration
shell tools to manage the checksum
"""
def __init__(
self,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
) -> None:
Has_extensions.__init__(self, extensions=extensions)
self.id_short = id_short
self.display_name = display_name
self.category = category
self.description = description
self.checksum = checksum
@abstract
@reference_in_the_book(section=(5, 7, 2, 3))
class Identifiable(Referable):
"""An element that has a globally unique identifier."""
ID: "Identifier"
"""The globally unique identification of the element."""
administration: Optional["Administrative_information"]
"""
Administrative information of an identifiable element.
.. note::
Some of the administrative information like the version number might need to
be part of the identification.
"""
def __init__(
self,
ID: "Identifier",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
administration: Optional["Administrative_information"] = None,
) -> None:
Referable.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
)
self.ID = ID
self.administration = administration
@reference_in_the_book(section=(5, 7, 2, 4), index=1)
class Modeling_kind(Enum):
"""Enumeration for denoting whether an element is a template or an instance."""
Template = "TEMPLATE"
"""
Software element which specifies the common attributes shared by all instances of
the template.
[SOURCE: IEC TR 62390:2005-01, 3.1.25] modified
"""
Instance = "INSTANCE"
"""
Concrete, clearly identifiable component of a certain template.
.. note::
It becomes an individual entity of a template, for example a
device model, by defining specific property values.
.. note::
In an object oriented view, an instance denotes an object of a
template (class).
[SOURCE: IEC 62890:2016, 3.1.16 65/617/CDV] modified
"""
@abstract
@reference_in_the_book(section=(5, 7, 2, 4))
class Has_kind(DBC):
"""
An element with a kind is an element that can either represent a template or an
instance.
Default for an element is that it is representing an instance.
"""
kind: Optional["Modeling_kind"]
"""
Kind of the element: either type or instance.
Default Value = Instance
"""
def __init__(self, kind: Optional["Modeling_kind"] = None) -> None:
self.kind = kind if kind is not None else Modeling_kind.Instance
@abstract
@reference_in_the_book(section=(5, 7, 2, 9))
class Has_data_specification(DBC):
"""
Element that can be extended by using data specification templates.
A data specification template defines a named set of additional attributes an
element may or shall have. The data specifications used are explicitly specified
with their global ID.
"""
data_specifications: Optional[List["Global_reference"]]
"""
Global reference to the data specification template used by the element.
"""
def __init__(
self, data_specifications: Optional[List["Global_reference"]] = None
) -> None:
self.data_specifications = data_specifications
# fmt: off
@invariant(
lambda self:
not (self.revision is not None) or self.version is not None,
"Constraint AASd-005: If version is not specified than also revision shall "
"be unspecified. This means, a revision requires a version. If there is "
"no version there is no revision neither. Revision is optional."
)
@reference_in_the_book(section=(5, 7, 2, 5))
# fmt: on
class Administrative_information(Has_data_specification):
"""
Administrative meta-information for an element like version
information.
:constraint AASd-005:
If :attr:`~version` is not specified than also :attr:`~revision` shall be
unspecified. This means, a revision requires a version. If there is no version
there is no revision neither. Revision is optional.
"""
version: Optional[Non_empty_string]
"""Version of the element."""
revision: Optional[Non_empty_string]
"""Revision of the element."""
def __init__(
self,
data_specifications: Optional[List["Global_reference"]] = None,
version: Optional[Non_empty_string] = None,
revision: Optional[Non_empty_string] = None,
) -> None:
Has_data_specification.__init__(self, data_specifications=data_specifications)
self.version = version
self.revision = revision
# fmt: off
@abstract
@invariant(
lambda self:
not (self.qualifiers is not None)
or qualifier_types_are_unique(self.qualifiers),
"Constraint AASd-021: Every qualifiable can only have one qualifier with "
"the same type."
)
@reference_in_the_book(section=(5, 7, 2, 7))
@serialization(with_model_type=True)
# fmt: on
class Qualifiable(DBC):
"""
The value of a qualifiable element may be further qualified by one or more
qualifiers or complex formulas.
"""
qualifiers: Optional[List["Qualifier"]]
"""
Additional qualification of a qualifiable element.
:constraint AASd-021:
Every qualifiable can only have one qualifier with the same
:attr:`~Qualifier.type`.
"""
def __init__(self, qualifiers: Optional[List["Qualifier"]] = None) -> None:
self.qualifiers = qualifiers
# fmt: off
@invariant(
lambda self:
not (self.value is not None)
or value_consistent_with_xsd_type(self.value, self.value_type),
"Constraint AASd-020: The value shall be consistent to the data type as defined "
"in value_type."
)
@reference_in_the_book(section=(5, 7, 2, 8))
@serialization(with_model_type=True)
# fmt: on
class Qualifier(Has_semantics):
"""
A qualifier is a type-value-pair that makes additional statements w.r.t. the value
of the element.
:constraint AASd-006:
If both the :attr:`~value` and the :attr:`~value_id` of
a :class:`.Qualifier` are present then the :attr:`~value` needs
to be identical to the value of the referenced coded value
in :attr:`~value_id`.
:constraint AASd-020:
The value of :attr:`~value` shall be consistent to the data type as
defined in :attr:`~value_type`.
"""
type: "Qualifier_type"
"""
The qualifier type describes the type of the qualifier that is applied to
the element.
"""
value_type: "Data_type_def_XSD"
"""
Data type of the qualifier value.
"""
value: Optional["Value_data_type"]
"""
The qualifier value is the value of the qualifier.
"""
value_id: Optional["Global_reference"]
"""
Reference to the global unique ID of a coded value.
"""
def __init__(
self,
type: "Qualifier_type",
value_type: "Data_type_def_XSD",
semantic_id: Optional["Global_reference"] = None,
value: Optional["Value_data_type"] = None,
value_id: Optional["Global_reference"] = None,
) -> None:
Has_semantics.__init__(self, semantic_id=semantic_id)
self.type = type
self.value_type = value_type
self.value = value
self.value_id = value_id
# fmt: off
@reference_in_the_book(section=(5, 7, 3))
@serialization(with_model_type=True)
@invariant(
lambda self:
not (self.submodels is not None)
or (
all(
is_model_reference_to(reference, Key_elements.Submodel)
for reference in self.submodels
)
)
)
@invariant(
lambda self:
not (self.derived_from is not None)
or (
is_model_reference_to(
self.derived_from,
Key_elements.Asset_administration_shell
)
)
)
# fmt: on
class Asset_administration_shell(Identifiable, Has_data_specification):
"""An asset administration shell."""
asset_information: "Asset_information"
"""Meta-information about the asset the AAS is representing."""
submodels: Optional[List["Model_reference"]]
"""
References to submodels of the AAS.
A submodel is a description of an aspect of the asset the AAS is representing.
The asset of an AAS is typically described by one or more submodels.
Temporarily no submodel might be assigned to the AAS.
"""
derived_from: Optional["Model_reference"]
"""The reference to the AAS the AAS was derived from."""
def __init__(
self,
ID: Identifier,
asset_information: "Asset_information",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
administration: Optional["Administrative_information"] = None,
data_specifications: Optional[List["Global_reference"]] = None,
submodels: Optional[List["Model_reference"]] = None,
derived_from: Optional["Model_reference"] = None,
) -> None:
Identifiable.__init__(
self,
ID=ID,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
administration=administration,
)
Has_data_specification.__init__(self, data_specifications=data_specifications)
self.derived_from = derived_from
self.asset_information = asset_information
self.submodels = submodels
@reference_in_the_book(section=(5, 7, 4))
class Asset_information(DBC):
"""
In :class:`.Asset_information` identifying meta data of the asset that is
represented by an AAS is defined.
The asset may either represent an asset type or an asset instance.
The asset has a globally unique identifier plus – if needed – additional domain
specific (proprietary) identifiers. However, to support the corner case of very
first phase of lifecycle where a stabilised/constant global asset identifier does
not already exist, the corresponding attribute
:attr:`~global_asset_id` is optional.
"""
asset_kind: "Asset_kind"
"""
Denotes whether the Asset is of kind "Type" or "Instance".
"""
global_asset_id: Optional["Global_reference"]
"""
Reference to either an Asset object or a global reference to the asset the AAS is
representing.
This attribute is required as soon as the AAS is exchanged via partners in the life
cycle of the asset. In a first phase of the life cycle the asset might not yet have
a global ID but already an internal identifier. The internal identifier would be
modelled via :attr:`~specific_asset_id`.
"""
specific_asset_id: Optional["Identifier_key_value_pair"]
"""
Additional domain-specific, typically proprietary, Identifier for the asset.
For example, serial number.
"""
default_thumbnail: Optional["Resource"]
"""
Thumbnail of the asset represented by the asset administration shell.
Used as default.
"""
def __init__(
self,
asset_kind: "Asset_kind",
global_asset_id: Optional["Global_reference"] = None,
specific_asset_id: Optional["Identifier_key_value_pair"] = None,
default_thumbnail: Optional["Resource"] = None,
) -> None:
self.asset_kind = asset_kind
self.global_asset_id = global_asset_id
self.specific_asset_id = specific_asset_id
self.default_thumbnail = default_thumbnail
@reference_in_the_book(section=(5, 7, 4), index=2)
class Asset_kind(Enum):
"""
Enumeration for denoting whether an element is a type or an instance.
"""
Type = "Type"
"""
hardware or software element which specifies the common attributes shared by all
instances of the type
[SOURCE: IEC TR 62390:2005-01, 3.1.25]
"""
Instance = "Instance"
"""
concrete, clearly identifiable component of a certain type
.. note::
It becomes an individual entity of a type, for example a device, by defining
specific property values.
.. note::
In an object oriented view, an instance denotes an object of a class
(of a type).
[SOURCE: IEC 62890:2016, 3.1.16] 65/617/CDV
"""
@reference_in_the_book(section=(5, 7, 4), index=3)
class Identifier_key_value_pair(Has_semantics):
"""
An :class:`.Identifier_key_value_pair` describes a generic identifier as
key-value pair.
"""
key: Non_empty_string
"""Key of the identifier"""
value: Non_empty_string
"""The value of the identifier with the corresponding key."""
external_subject_id: Optional["Global_reference"]
"""The (external) subject the key belongs to or has meaning to."""
def __init__(
self,
key: Non_empty_string,
value: Non_empty_string,
semantic_id: Optional["Global_reference"] = None,
external_subject_id: Optional["Global_reference"] = None,
) -> None:
Has_semantics.__init__(self, semantic_id)
self.key = key
self.value = value
self.external_subject_id = external_subject_id
# fmt: off
@reference_in_the_book(section=(5, 7, 5))
@invariant(
lambda self:
not (self.submodel_elements is not None)
or (id_shorts_are_unique(self.submodel_elements))
)
@invariant(
lambda self:
not (self.submodel_elements is not None)
or all(
element.id_short is not None
for element in self.submodel_elements
),
"Short IDs need to be defined for all the submodel elements."
)
# fmt: on
class Submodel(
Identifiable, Has_kind, Has_semantics, Qualifiable, Has_data_specification
):
"""
A submodel defines a specific aspect of the asset represented by the AAS.
A submodel is used to structure the digital representation and technical
functionality of an Administration Shell into distinguishable parts. Each submodel
refers to a well-defined domain or subject matter. Submodels can become
standardized and, thus, become submodels templates.
"""
submodel_elements: Optional[List["Submodel_element"]]
"""A submodel consists of zero or more submodel elements."""
def __init__(
self,
ID: Identifier,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
administration: Optional["Administrative_information"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List["Qualifier"]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
submodel_elements: Optional[List["Submodel_element"]] = None,
) -> None:
Identifiable.__init__(
self,
ID=ID,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
administration=administration,
)
Has_kind.__init__(self, kind=kind)
Has_semantics.__init__(self, semantic_id=semantic_id)
Qualifiable.__init__(self, qualifiers=qualifiers)
Has_data_specification.__init__(self, data_specifications=data_specifications)
self.submodel_elements = submodel_elements
@abstract
@reference_in_the_book(section=(5, 7, 6))
class Submodel_element(
Referable, Has_kind, Has_semantics, Qualifiable, Has_data_specification
):
"""
A submodel element is an element suitable for the description and differentiation of
assets.
It is recommended to add a semantic ID to a submodel element.
"""
def __init__(
self,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List["Qualifier"]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
) -> None:
Referable.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
)
Has_kind.__init__(self, kind=kind)
Has_semantics.__init__(self, semantic_id=semantic_id)
Qualifiable.__init__(self, qualifiers=qualifiers)
Has_data_specification.__init__(self, data_specifications=data_specifications)
@reference_in_the_book(section=(5, 7, 7, 16))
@abstract
class Relationship_element(Submodel_element):
"""
A relationship element is used to define a relationship between two elements
being either referable (model reference) or external (global reference).
"""
first: "Reference"
"""
Reference to the first element in the relationship taking the role of the subject.
"""
second: "Reference"
"""
Reference to the second element in the relationship taking the role of the object.
"""
def __init__(
self,
first: "Reference",
second: "Reference",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List["Qualifier"]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
) -> None:
Submodel_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.first = first
self.second = second
# fmt: off
@reference_in_the_book(section=(5, 7, 7, 17))
@invariant(
lambda self:
not (self.value is not None)
or id_shorts_are_unique(self.value)
)
@invariant(
lambda self:
not (self.value is not None)
or all(
element.id_short is not None
for element in self.value
),
"Short IDs need to be defined for all the elements."
)
@invariant(
lambda self:
not (
self.value is not None
and (
self.type_value_list_element == Submodel_element_elements.Property
or self.type_value_list_element == Submodel_element_elements.Range
)
) or (
self.value_type_list_element is not None
and properties_or_ranges_have_value_type(
self.value,
self.value_type_list_element
)
),
"Constraint AASd-109: If type value list element is equal to "
"Property or Range value type list element shall be set "
"and all first level child elements shall have the value type as specified in "
"value type list element.")
@invariant(
lambda self:
not (self.value is not None)
or all(
submodel_element_is_of_type(element, self.type_value_list_element)
for element in self.value
),
"Constraint AASd-108: All first level child elements shall have "
"the same submodel element type as specified in type value list element."
)
@invariant(
lambda self:
not (self.value is not None)
or submodel_elements_have_identical_semantic_ids(self.value),
"Constraint AASd-114: If two first level child elements "
"have a semantic ID then they shall be identical."
)
@invariant(
lambda self:
not (
self.value is not None
and self.semantic_id_list_element is not None
) or (
all(
not (child.semantic_id is not None)
or child.semantic_id == self.semantic_id_list_element
for child in self.value
)
),
"Constraint AASd-107: If a first level child element has a semantic ID "
"it shall be identical to semantic ID list element."
)
# fmt: on
class Submodel_element_list(Submodel_element):
"""
A submodel element list is an ordered collection of submodel elements.
:constraint AASd-107:
If a first level child element in a :class:`.Submodel_element_list` has
a :attr:`~Submodel_element.semantic_id` it
shall be identical to :attr:`~Submodel_element_list.semantic_id_list_element`.
:constraint AASd-114:
If two first level child elements in a :class:`.Submodel_element_list` have
a :attr:`~Submodel_element.semantic_id` then they shall be identical.
:constraint AASd-115:
If a first level child element in a :class:`.Submodel_element_list` does not
specify a :attr:`~Submodel_element.semantic_id` then the value is assumed to be
identical to :attr:`~Submodel_element_list.semantic_id_list_element`.
:constraint AASd-108:
All first level child elements in a :class:`.Submodel_element_list` shall have
the same submodel element type as specified in :attr:`~type_value_list_element`.
:constraint AASd-109:
If :attr:`~type_value_list_element` is equal to
:attr:`Submodel_element_elements.Property` or
:attr:`Submodel_element_elements.Range`
:attr:`~value_type_list_element` shall be set and all first
level child elements in the :class:`.Submodel_element_list` shall have
the value type as specified in :attr:`~value_type_list_element`.
"""
type_value_list_element: "Submodel_element_elements"
"""
The submodel element type of the submodel elements contained in the list.
"""
order_relevant: Optional["bool"]
"""
Defines whether order in list is relevant. If :attr:`~order_relevant` = ``False``
then the list is representing a set or a bag.
Default: ``True``
"""
value: Optional[List["Submodel_element"]]
"""
Submodel element contained in the list.
The list is ordered.
"""
semantic_id_list_element: Optional["Global_reference"]
"""
The submodel element type of the submodel elements contained in the list.
"""
value_type_list_element: Optional["Data_type_def_XSD"]
"""
The value type of the submodel element contained in the list.
"""
def __init__(
self,
type_value_list_element: "Submodel_element_elements",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List["Qualifier"]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
order_relevant: Optional["bool"] = None,
value: Optional[List["Submodel_element"]] = None,
semantic_id_list_element: Optional["Global_reference"] = None,
value_type_list_element: Optional["Data_type_def_XSD"] = None,
) -> None:
Submodel_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.type_value_list_element = type_value_list_element
self.order_relevant = order_relevant
self.value = value
self.semantic_id_list_element = semantic_id_list_element
self.value_type_list_element = value_type_list_element
# fmt: off
@reference_in_the_book(section=(5, 7, 7, 18))
@invariant(
lambda self:
not (self.value is not None)
or id_shorts_are_unique(self.value)
)
@invariant(
lambda self:
not (self.value is not None)
or all(
element.id_short is not None
for element in self.value
),
"Short IDs need to be defined for all the elements."
)
# fmt: on
class Submodel_element_struct(Submodel_element):
"""
A submodel element struct is is a logical encapsulation of multiple values. It has
a number of of submodel elements.
"""
value: Optional[List["Submodel_element"]]
"""
Submodel element contained in the struct.
"""
def __init__(
self,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List["Qualifier"]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
value: Optional[List["Submodel_element"]] = None,
) -> None:
Submodel_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.value = value
@abstract
@invariant(
lambda self: self.category == "CONSTANT"
or self.category == "PARAMETER"
or self.category == "VARIABLE",
"Constraint AASd-090: For data elements category shall be one "
"of the following values: CONSTANT, PARAMETER or VARIABLE",
)
@reference_in_the_book(section=(5, 7, 7, 5))
class Data_element(Submodel_element):
"""
A data element is a submodel element that is not further composed out of
other submodel elements.
A data element is a submodel element that has a value. The type of value differs
for different subtypes of data elements.
A controlled value is a value whose meaning is given in an external source
(see “ISO/TS 29002-10:2009(E)”).
:constraint AASd-090:
For data elements :attr:`~category` shall be one of the following
values: ``CONSTANT``, ``PARAMETER`` or ``VARIABLE``.
"""
def __init__(
self,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
) -> None:
Submodel_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
# fmt: off
@reference_in_the_book(section=(5, 7, 7, 13))
@invariant(
lambda self:
not (self.value is not None)
or value_consistent_with_xsd_type(self.value, self.value_type)
)
# fmt: on
class Property(Data_element):
"""
A property is a data element that has a single value.
:constraint AASd-007:
If both, the :attr:`~value` and the :attr:`~value_id` are
present then the value of :attr:`~value` needs to be identical to
the value of the referenced coded value in :attr:`~value_id`.
"""
value_type: "Data_type_def_XSD"
"""
Data type of the value
"""
value: Optional["Value_data_type"]
"""
The value of the property instance.
"""
value_id: Optional["Global_reference"]
"""
Reference to the global unique ID of a coded value.
"""
def __init__(
self,
value_type: "Data_type_def_XSD",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
value: Optional["Value_data_type"] = None,
value_id: Optional["Global_reference"] = None,
) -> None:
Data_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.value_type = value_type
self.value = value
self.value_id = value_id
@reference_in_the_book(section=(5, 7, 7, 11))
class Multi_language_property(Data_element):
"""
A property is a data element that has a multi-language value.
:constraint AASd-012:
If both the :attr:`~value` and the :attr:`~value_id` are present then for each
string in a specific language the meaning must be the same as specified in
:attr:`~value_id`.
"""
value: Optional["Lang_string_set"]
"""
The value of the property instance.
"""
value_id: Optional["Global_reference"]
"""
Reference to the global unique ID of a coded value.
"""
def __init__(
self,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
value: Optional["Lang_string_set"] = None,
value_id: Optional["Global_reference"] = None,
) -> None:
Data_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.value = value
self.value_id = value_id
# fmt: off
@reference_in_the_book(section=(5, 7, 7, 14))
@invariant(
lambda self:
not (self.min is not None)
or value_consistent_with_xsd_type(self.min, self.value_type)
)
@invariant(
lambda self:
not (self.max is not None)
or value_consistent_with_xsd_type(self.max, self.value_type)
)
# fmt: on
class Range(Data_element):
"""
A range data element is a data element that defines a range with min and max.
"""
value_type: "Data_type_def_XSD"
"""
Data type of the min und max
"""
min: Optional["Value_data_type"]
"""
The minimum value of the range.
If the min value is missing, then the value is assumed to be negative infinite.
"""
max: Optional["Value_data_type"]
"""
The maximum value of the range.
If the max value is missing, then the value is assumed to be positive infinite.
"""
def __init__(
self,
value_type: "Data_type_def_XSD",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
min: Optional["Value_data_type"] = None,
max: Optional["Value_data_type"] = None,
) -> None:
Data_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.value_type = value_type
self.min = min
self.max = max
@reference_in_the_book(section=(5, 7, 7, 15))
class Reference_element(Data_element):
"""
A reference element is a data element that defines a logical reference to another
element within the same or another AAS or a reference to an external object or
entity.
"""
value: Optional["Reference"]
"""
Global reference to an external object or entity or a logical reference to
another element within the same or another AAS (i.e. a model reference to
a Referable).
"""
def __init__(
self,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
value: Optional["Reference"] = None,
) -> None:
Data_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.value = value
# TODO (mristin, 2022-03-26):
# Uncomment once the discussion regarding the covariant return types has been resolved.
# @reference_in_the_book(section=(5, 7, 7, 9))
# class Global_reference_element(Reference_element):
# """
# A global reference element is a data element that references an external object or entity.
# """
#
# value: Optional["Global_reference"]
# """
# Global reference to an external object or entity.
# """
#
# def __init__(
# self,
# id_short: Non_empty_string,
# extensions: Optional[List["Extension"]] = None,
# display_name: Optional["Lang_string_set"] = None,
# category: Optional[Non_empty_string] = None,
# description: Optional["Lang_string_set"] = None,
# kind: Optional["Modeling_kind"] = None,
# semantic_id: Optional["Global_reference"] = None,
# qualifiers: Optional[List[Qualifier]] = None,
# data_specifications: Optional[List["Global_reference"]] = None,
# value: Optional["Global_reference"] = None,
# ) -> None:
# Reference_element.__init__(
# self,
# extensions=extensions,
# id_short=id_short,
# display_name=display_name,
# category=category,
# description=description,
# kind=kind,
# semantic_id=semantic_id,
# qualifiers=qualifiers,
# data_specifications=data_specifications,
# )
# self.value = value
#
#
# @reference_in_the_book(section=(5, 7, 7, 10))
# class Model_reference_element(Reference_element):
# """
# A model reference element is a data element that defines
# a logical reference to another element within the same or another AAS
# """
#
# value: Optional["Model_reference"]
# """
# A logical reference to another element within the same or another AAS
# """
#
# def __init__(
# self,
# id_short: Non_empty_string,
# extensions: Optional[List["Extension"]] = None,
# display_name: Optional["Lang_string_set"] = None,
# category: Optional[Non_empty_string] = None,
# description: Optional["Lang_string_set"] = None,
# kind: Optional["Modeling_kind"] = None,
# semantic_id: Optional["Global_reference"] = None,
# qualifiers: Optional[List[Qualifier]] = None,
# data_specifications: Optional[List["Global_reference"]] = None,
# value: Optional["Model_reference"] = None,
# ) -> None:
# Reference_element.__init__(
# self,
# extensions=extensions,
# id_short=id_short,
# display_name=display_name,
# category=category,
# description=description,
# kind=kind,
# semantic_id=semantic_id,
# qualifiers=qualifiers,
# data_specifications=data_specifications,
# )
# self.value = value
@reference_in_the_book(section=(5, 7, 7, 4))
class Blob(Data_element):
"""
A :class:`.Blob` is a data element that represents a file that is contained with its
source code in the value attribute.
"""
MIME_type: Content_type
"""
MIME type of the content of the :class:`.Blob`.
The MIME type states which file extensions the file can have.
Valid values are e.g. ``application/json``, ``application/xls``, ``image/jpg``.
The allowed values are defined as in RFC2046.
"""
value: Optional["Blob_type"]
"""
The value of the :class:`.Blob` instance of a blob data element.
.. note::
In contrast to the file property the file content is stored directly as value
in the :class:`.Blob` data element.
"""
def __init__(
self,
MIME_type: Content_type,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
value: Optional["Blob_type"] = None,
) -> None:
Data_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.MIME_type = MIME_type
self.value = value
@reference_in_the_book(section=(5, 7, 7, 8))
class File(Data_element):
"""
A File is a data element that represents an address to a file.
The value is an URI that can represent an absolute or relative path.
"""
content_type: "Content_type"
"""
Content type of the content of the file.
The content type states which file extensions the file can have.
"""
value: Optional["Path_type"]
"""
Path and name of the referenced file (with file extension).
The path can be absolute or relative.
"""
def __init__(
self,
content_type: "Content_type",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
value: Optional["Path_type"] = None,
) -> None:
Data_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.content_type = content_type
self.value = value
@reference_in_the_book(section=(5, 7, 7, 1))
class Annotated_relationship_element(Relationship_element):
"""
An annotated relationship element is a relationship element that can be annotated
with additional data elements.
"""
annotation: Optional[List[Data_element]]
"""
A data element that represents an annotation that holds for the relationship
between the two elements
"""
def __init__(
self,
first: "Reference",
second: "Reference",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
annotation: Optional[List[Data_element]] = None,
) -> None:
Relationship_element.__init__(
self,
first=first,
second=second,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.annotation = annotation
@reference_in_the_book(section=(5, 7, 7, 6), index=1)
class Entity_type(Enum):
"""
Enumeration for denoting whether an entity is a self-managed entity or a co-managed
entity.
"""
Co_managed_entity = "COMANAGEDENTITY"
"""
For co-managed entities there is no separate AAS. Co-managed entities need to be
part of a self-managed entity.
"""
Self_managed_entity = "SELFMANAGEDENTITY"
"""
Self-Managed Entities have their own AAS but can be part of the bill of material of
a composite self-managed entity. The asset of an I4.0 Component is a self-managed
entity per definition."
"""
# fmt: off
@reference_in_the_book(section=(5, 7, 7, 6))
@invariant(
lambda self:
(
self.entity_type == Entity_type.Self_managed_entity
and (
(
self.global_asset_id is not None
and self.global_asset_id is None
) or (
self.global_asset_id is None
and self.global_asset_id is not None
)
)
) or (
self.global_asset_id is None
and self.specific_asset_id is None
),
"Constraint AASd-014: Either the attribute global asset ID or "
"specific asset ID must be set if entity type is set to 'SelfManagedEntity'. "
"They are not existing otherwise."
)
# fmt: on
class Entity(Submodel_element):
"""
An entity is a submodel element that is used to model entities.
:constraint AASd-014:
Either the attribute :attr:`~global_asset_id` or :attr:`~specific_asset_id`
of an :class:`.Entity` must be set if :attr:`~entity_type` is set to
``SelfManagedEntity``. They are not existing otherwise.
"""
entity_type: "Entity_type"
"""
Describes whether the entity is a co- managed entity or a self-managed entity.
"""
statements: Optional[List["Submodel_element"]]
"""
Describes statements applicable to the entity by a set of submodel elements,
typically with a qualified value.
"""
global_asset_id: Optional["Reference"]
"""
Reference to the asset the entity is representing.
"""
specific_asset_id: Optional["Identifier_key_value_pair"]
"""
Reference to an identifier key value pair representing a specific identifier
of the asset represented by the asset administration shell.
"""
def __init__(
self,
entity_type: "Entity_type",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List["Qualifier"]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
statements: Optional[List["Submodel_element"]] = None,
global_asset_id: Optional["Reference"] = None,
specific_asset_id: Optional["Identifier_key_value_pair"] = None,
) -> None:
Submodel_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.statements = statements
self.entity_type = entity_type
self.global_asset_id = global_asset_id
self.specific_asset_id = specific_asset_id
@reference_in_the_book(section=(5, 7, 7, 2), index=1)
class Direction(Enum):
"""
Direction
"""
input = "INPUT"
"""
Input direction.
"""
output = "OUTPUT"
"""
Output direction
"""
@reference_in_the_book(section=(5, 7, 7, 2), index=2)
class State_of_event(Enum):
"""
State of an event
"""
on = "ON"
"""
Event is on
"""
off = "OFF"
"""
Event is off.
"""
@reference_in_the_book(section=(5, 7, 7, 2), index=3)
class Event_payload(DBC):
"""
Defines the necessary information of an event instance sent out or received.
.. note::
The payload is not part of the information model as exchanged via
the AASX package format but used in re-active Asset Administration Shells.
"""
source: "Model_reference"
"""
Reference to the source event element, including identification of
:class:`.Asset_administration_shell`, :class:`.Submodel`,
:class:`.Submodel_element`'s.
"""
source_semantic_id: Optional["Global_reference"]
"""
:attr:`~Has_semantics.semantic_id` of the source event element, if available
"""
observable_reference: "Model_reference"
"""
Reference to the referable, which defines the scope of the event.
Can be :class:`.Asset_administration_shell`, :class:`.Submodel` or
:class:`.Submodel_element`.
"""
observable_semantic_id: Optional["Global_reference"]
"""
:attr:`~Has_semantics.semantic_id` of the referable which defines the scope of
the event, if available.
"""
topic: Optional["Non_empty_string"]
"""
Information for the outer message infrastructure for scheduling the event to
the respective communication channel.
"""
subject_id: Optional["Global_reference"]
"""
Subject, who/which initiated the creation.
"""
time_stamp: "Date_time_stamp_UTC"
"""
Timestamp in UTC, when this event was triggered.
"""
payload: Optional["Non_empty_string"]
"""
Event specific payload.
"""
def __init__(
self,
source: "Model_reference",
observable_reference: "Model_reference",
time_stamp: "Date_time_stamp_UTC",
source_semantic_id: Optional["Global_reference"] = None,
observable_semantic_id: Optional["Global_reference"] = None,
topic: Optional["Non_empty_string"] = None,
subject_id: Optional["Global_reference"] = None,
payload: Optional["Non_empty_string"] = None,
) -> None:
self.source = source
self.observable_reference = observable_reference
self.time_stamp = time_stamp
self.source_semantic_id = source_semantic_id
self.observable_semantic_id = observable_semantic_id
self.topic = topic
self.subject_id = subject_id
self.payload = payload
@abstract
@reference_in_the_book(section=(5, 7, 7, 7))
class Event_element(Submodel_element):
"""
An event element.
"""
def __init__(
self,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
) -> None:
Submodel_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
@reference_in_the_book(section=(5, 7, 7, 2))
class Basic_event_element(Event_element):
"""
A basic event element.
"""
observed: "Model_reference"
"""
Reference to the :class:`.Referable`, which defines the scope of the event.
Can be :class:`.Asset_administration_shell`, :class:`.Submodel`, or
:class:`.Submodel_element`. Reference to a referable, e.g. a data element or
a submodel, that is being observed.
"""
direction: "Direction"
"""
Direction of event.
Can be ``{ Input, Output }``.
"""
state: "State_of_event"
"""
State of event.
Can be ``{ On, Off }``.
"""
message_topic: Optional["Non_empty_string"]
"""
Information for the outer message infrastructure for scheduling the event to the
respective communication channel.
"""
message_broker: Optional["Model_reference"]
"""
Information, which outer message infrastructure shall handle messages for
the :class:`.Event_element`.
Refers to a :class:`.Submodel`, :class:`.Submodel_element_list`,
:class:`.Submodel_element_struct` or :class:`.Entity`, which contains
:class:`.Data_element`'s describing the proprietary specification for
the message broker.
.. note::
For different message infrastructure, e.g. OPC UA or MQTT or AMQP, these
proprietary specification could be standardized by having respective Submodels.
"""
last_update: Optional["Date_time_stamp_UTC"]
"""
Timestamp in UTC, when the last event was received (input direction) or sent
(output direction).
"""
min_interval: Optional["Date_time_stamp_UTC"]
"""
For input direction, reports on the maximum frequency, the software entity behind
the respective Referable can handle input events. For output events, specifies
the maximum frequency of outputting this event to an outer infrastructure.
Might be not specified, that is, there is no minimum interval.
"""
max_interval: Optional["Date_time_stamp_UTC"]
"""
For input direction: not applicable.
For output direction: maximum interval in time, the respective Referable shall send
an update of the status of the event, even if no other trigger condition for
the event was not met. Might be not specified, that is, there is no maximum interval.
"""
def __init__(
self,
observed: "Model_reference",
direction: "Direction",
state: "State_of_event",
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List[Qualifier]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
message_topic: Optional["Non_empty_string"] = None,
message_broker: Optional["Model_reference"] = None,
last_update: Optional["Date_time_stamp_UTC"] = None,
min_interval: Optional["Date_time_stamp_UTC"] = None,
max_interval: Optional["Date_time_stamp_UTC"] = None,
) -> None:
Event_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.observed = observed
self.direction = direction
self.state = state
self.message_topic = message_topic
self.message_broker = message_broker
self.last_update = last_update
self.min_interval = min_interval
self.max_interval = max_interval
@reference_in_the_book(section=(5, 7, 7, 12))
class Operation(Submodel_element):
"""
An operation is a submodel element with input and output variables.
"""
input_variables: Optional[List["Operation_variable"]]
"""
Input parameter of the operation.
"""
output_variables: Optional[List["Operation_variable"]]
"""
Output parameter of the operation.
"""
inoutput_variables: Optional[List["Operation_variable"]]
"""
Parameter that is input and output of the operation.
"""
def __init__(
self,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List["Qualifier"]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
input_variables: Optional[List["Operation_variable"]] = None,
output_variables: Optional[List["Operation_variable"]] = None,
inoutput_variables: Optional[List["Operation_variable"]] = None,
) -> None:
Submodel_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
self.input_variables = input_variables
self.output_variables = output_variables
self.inoutput_variables = inoutput_variables
@reference_in_the_book(section=(5, 7, 7, 13), index=1)
class Operation_variable(DBC):
"""
An operation variable is a submodel element that is used as input or output variable
of an operation.
.. note::
:class:`.Operation_variable` is introduced as separate class to enable future
extensions, e.g. for adding a default value, cardinality (option/mandatory).
"""
value: "Submodel_element"
"""
Describes the needed argument for an operation via a submodel element
"""
def __init__(self, value: "Submodel_element") -> None:
self.value = value
@reference_in_the_book(section=(5, 7, 7, 4))
class Capability(Submodel_element):
"""
A capability is the implementation-independent description of the potential of an
asset to achieve a certain effect in the physical or virtual world.
.. note::
The :attr:`~semantic_id` of a capability is typically an ontology.
Thus, reasoning on capabilities is enabled.
"""
def __init__(
self,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
kind: Optional["Modeling_kind"] = None,
semantic_id: Optional["Global_reference"] = None,
qualifiers: Optional[List["Qualifier"]] = None,
data_specifications: Optional[List["Global_reference"]] = None,
) -> None:
Submodel_element.__init__(
self,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
kind=kind,
semantic_id=semantic_id,
qualifiers=qualifiers,
data_specifications=data_specifications,
)
# fmt: off
@reference_in_the_book(section=(5, 7, 8))
@serialization(with_model_type=True)
@invariant(
lambda self:
not (self.category is not None)
or concept_description_category_is_valid(self.category),
"Constraint AASd-051: A concept description shall have one of "
"the following categories: 'VALUE', 'PROPERTY', 'REFERENCE', 'DOCUMENT', "
"'CAPABILITY',; 'RELATIONSHIP', 'COLLECTION', 'FUNCTION', 'EVENT', 'ENTITY', "
"'APPLICATION_CLASS', 'QUALIFIER', 'VIEW'.")
# fmt: on
class Concept_description(Identifiable, Has_data_specification):
"""
The semantics of a property or other elements that may have a semantic description
is defined by a concept description. The description of the concept should follow a
standardized schema (realized as data specification template).
:constraint AASd-051:
A :class:`.Concept_description` shall have one of the following categories
``VALUE``, ``PROPERTY``, ``REFERENCE``, ``DOCUMENT``, ``CAPABILITY``,
``RELATIONSHIP``, ``COLLECTION``, ``FUNCTION``, ``EVENT``, ``ENTITY``,
``APPLICATION_CLASS``, ``QUALIFIER``, ``VIEW``.
Default: ``PROPERTY``.
"""
is_case_of: Optional[List["Global_reference"]]
"""
Reference to an external definition the concept is compatible to or was derived from
.. note::
Compare to is-case-of relationship in ISO 13584-32 & IEC EN 61360"
"""
def __init__(
self,
ID: Identifier,
extensions: Optional[List["Extension"]] = None,
id_short: Optional[Non_empty_string] = None,
display_name: Optional["Lang_string_set"] = None,
category: Optional[Non_empty_string] = None,
description: Optional["Lang_string_set"] = None,
checksum: Optional["Non_empty_string"] = None,
administration: Optional["Administrative_information"] = None,
data_specifications: Optional[List["Global_reference"]] = None,
is_case_of: Optional[List["Global_reference"]] = None,
) -> None:
Identifiable.__init__(
self,
ID=ID,
extensions=extensions,
id_short=id_short,
display_name=display_name,
category=category,
description=description,
checksum=checksum,
administration=administration,
)
Has_data_specification.__init__(self, data_specifications=data_specifications)
self.is_case_of = is_case_of
@abstract
@reference_in_the_book(section=(5, 7, 10, 4))
@serialization(with_model_type=True)
class Reference(DBC):
"""
Reference to either a model element of the same or another AAs or to an external
entity.
"""
@reference_in_the_book(section=(5, 7, 10, 2))
@serialization(with_model_type=True)
class Global_reference(Reference):
"""
Reference to an external entity.
"""
value: "Identifier"
"""
Unique identifier
The identifier can be a concatenation of different identifiers, for example
representing an IRDI path etc.
"""
def __init__(self, value: "Identifier") -> None:
self.value = value
@invariant(lambda self: len(self.keys) >= 1)
@reference_in_the_book(section=(5, 7, 10, 3))
@serialization(with_model_type=True)
class Model_reference(Reference):
"""
Reference to a model element of the same or another AAS.
A model reference is an ordered list of keys, each key referencing an element.
The complete list of keys may for example be concatenated to a path that then gives
unique access to an element.
"""
keys: List["Key"]
"""
Unique references in their name space.
"""
referred_semantic_id: Optional["Global_reference"]
"""
:attr:`Has_semantics.semantic_id` of the referenced model element.
"""
def __init__(
self,
keys: List["Key"],
referred_semantic_id: Optional["Global_reference"] = None,
) -> None:
self.keys = keys
self.referred_semantic_id = referred_semantic_id
@reference_in_the_book(section=(5, 7, 10, 3), index=1)
class Key(DBC):
"""A key is a reference to an element by its ID."""
type: "Key_elements"
"""
Denote which kind of entity is referenced.
In case type = FragmentReference the key represents a bookmark or a similar local
identifier within its parent element as specified by the key that precedes this key.
In all other cases the key references a model element of the same or of another AAS.
The name of the model element is explicitly listed.
"""
value: Non_empty_string
"""The key value, for example an IRDI or an URI"""
def __init__(self, type: "Key_elements", value: Non_empty_string) -> None:
self.type = type
self.value = value
@reference_in_the_book(section=(5, 7, 10, 3), index=5)
class Identifiable_elements(Enum):
"""
Enumeration of all identifiable elements within an asset administration shell.
"""
Asset_administration_shell = "AssetAdministrationShell"
Concept_description = "ConceptDescription"
Submodel = "Submodel"
@reference_in_the_book(section=(5, 7, 10, 3), index=4)
class Submodel_element_elements(Enum):
"""
Enumeration of all referable elements within an asset administration shell.
"""
Annotated_relationship_element = "AnnotatedRelationshipElement"
Basic_event_element = "BasicEventElement"
Blob = "Blob"
Capability = "Capability"
Data_element = "DataElement"
"""
Data Element.
.. note::
Data Element is abstract, *i.e.* if a key uses :attr:`~Data_element`
the reference may be a :class:`.Property`, a :class:`.File` etc.
"""
Entity = "Entity"
Event_element = "EventElement"
"""
Event element
.. note::
:class:`.Event_element` is abstract.
"""
File = "File"
Multi_language_property = "MultiLanguageProperty"
"""
Property with a value that can be provided in multiple languages
"""
Operation = "Operation"
Property = "Property"
Range = "Range"
"""
Range with min and max
"""
Reference_element = "ReferenceElement"
"""
Reference
"""
Relationship_element = "RelationshipElement"
"""
Relationship
"""
Submodel_element = "SubmodelElement"
"""
Submodel Element
.. note::
Submodel Element is abstract, i.e. if a key uses
:attr:`Submodel_element` the reference may be a :class:`.Property`,
a :class:`.Submodel_element_list`, an :class:`.Operation` etc.
"""
Submodel_element_list = "SubmodelElementList"
"""
List of Submodel Elements
"""
Submodel_element_struct = "SubmodelElementStruct"
"""
Struct of Submodel Elements
"""
@reference_in_the_book(section=(5, 7, 10, 3), index=3)
@is_superset_of(enums=[Submodel_element_elements, Identifiable_elements])
class Referable_elements(Enum):
"""
Enumeration of all referable elements within an asset administration shell
"""
Annotated_relationship_element = "AnnotatedRelationshipElement"
Asset_administration_shell = "AssetAdministrationShell"
Basic_event_element = "BasicEventElement"
Blob = "Blob"
Capability = "Capability"
Concept_description = "ConceptDescription"
Data_element = "DataElement"
"""
Data element.
.. note::
Data Element is abstract, *i.e.* if a key uses :attr:`~Data_element`
the reference may be a :class:`.Property`, a :class:`.File` *etc.*
"""
Entity = "Entity"
Event_element = "EventElement"
"""
Event.
.. note::
:class:`.Event_element` is abstract.
"""
File = "File"
Multi_language_property = "MultiLanguageProperty"
"""
Property with a value that can be provided in multiple languages
"""
Operation = "Operation"
Property = "Property"
Range = "Range"
"""
Range with min and max
"""
Reference_element = "ReferenceElement"
"""
Reference
"""
Relationship_element = "RelationshipElement"
"""
Relationship
"""
Submodel = "Submodel"
Submodel_element = "SubmodelElement"
"""
Submodel Element
.. note::
Submodel Element is abstract, *i.e.* if a key uses :attr:`~Submodel_element`
the reference may be a :class:`.Property`, an :class:`.Operation` *etc.*
"""
Submodel_element_list = "SubmodelElementList"
"""
List of Submodel Elements
"""
Submodel_element_struct = "SubmodelElementStruct"
"""
Struct of Submodel Elements
"""
@reference_in_the_book(section=(5, 7, 10, 3), index=2)
@is_superset_of(enums=[Referable_elements])
class Key_elements(Enum):
"""Enumeration of different key value types within a key."""
Fragment_reference = "FragmentReference"
"""
Bookmark or a similar local identifier of a subordinate part of
a primary resource
"""
Global_reference = "GlobalReference"
Annotated_relationship_element = "AnnotatedRelationshipElement"
Asset_administration_shell = "AssetAdministrationShell"
Basic_event_element = "BasicEventElement"
Blob = "Blob"
Capability = "Capability"
Concept_description = "ConceptDescription"
Data_element = "DataElement"
"""
Data element.
.. note::
Data Element is abstract, *i.e.* if a key uses :attr:`~Data_element`
the reference may be a Property, a File *etc.*
"""
Entity = "Entity"
Event_element = "EventElement"
"""
Event.
.. note::
:class:`.Event_element` is abstract.
"""
File = "File"
Multi_language_property = "MultiLanguageProperty"
"""Property with a value that can be provided in multiple languages"""
Operation = "Operation"
Property = "Property"
Range = "Range"
"""Range with min and max"""
Reference_element = "ReferenceElement"
"""
Reference
"""
Relationship_element = "RelationshipElement"
"""
Relationship
"""
Submodel = "Submodel"
Submodel_element = "SubmodelElement"
"""
Submodel Element
.. note::
Submodel Element is abstract, *i.e.* if a key uses :attr:`~Submodel_element`
the reference may be a :class:`.Property`, an :class:`.Operation` *etc.*
"""
Submodel_element_list = "SubmodelElementList"
"""
List of Submodel Elements
"""
Submodel_element_struct = "SubmodelElementStruct"
"""
Struct of Submodel Elements
"""
@reference_in_the_book(section=(5, 7, 11, 3))
class Data_type_def_XSD(Enum):
"""
Enumeration listing all xsd anySimpleTypes
"""
Any_URI = "xs:anyURI"
Base_64_binary = "xs:base64Binary"
Boolean = "xs:boolean"
Date = "xs:date"
Date_time = "xs:dateTime"
Date_time_stamp = "xs:dateTimeStamp"
Decimal = "xs:decimal"
Double = "xs:double"
Duration = "xs:duration"
Float = "xs:float"
G_day = "xs:gDay"
G_month = "xs:gMonth"
G_month_day = "xs:gMonthDay"
G_year = "xs:gYear"
G_year_month = "xs:gYearMonth"
Hex_binary = "xs:hexBinary"
String = "xs:string"
Time = "xs:time"
Day_time_duration = "xs:dayTimeDuration"
Year_month_duration = "xs:yearMonthDuration"
Integer = "xs:integer"
Long = "xs:long"
Int = "xs:int"
Short = "xs:short"
Byte = "xs:byte"
Non_negative_integer = "xs:NonNegativeInteger"
Positive_integer = "xs:positiveInteger"
Unsigned_long = "xs:unsignedLong"
Unsigned_int = "xs:unsignedInt"
Unsigned_short = "xs:unsignedShort"
Unsigned_byte = "xs:unsignedByte"
Non_positive_integer = "xs:nonPositiveInteger"
Negative_integer = "xs:negativeInteger"
@reference_in_the_book(section=(5, 7, 12, 3), index=4)
class Data_type_def_RDF(Enum):
"""
Enumeration listing all RDF types
"""
Lang_string = "rdf:langString"
"""
String with a language tag
.. note::
RDF requires IETF BCP 47 language tags, i.e. simple two-letter language tags
for Locales like “de” conformant to ISO 639-1 are allowed as well as language
tags plus extension like “de-DE” for country code, dialect etc. like in “en-US”
or “en-GB” for English (United Kingdom) and English (United States).
IETF language tags are referencing ISO 639, ISO 3166 and ISO 15924.
"""
@reference_in_the_book(section=(5, 7, 12, 2))
@is_superset_of(enums=[Data_type_def_XSD, Data_type_def_RDF])
class Data_type_def(Enum):
"""
string with values of enumerations :class:`.Data_type_def_XSD`,
:class:`.Data_type_def_RDF`
"""
Any_URI = "xs:anyURI"
Base_64_binary = "xs:base64Binary"
Boolean = "xs:boolean"
Date = "xs:date"
Date_time = "xs:dateTime"
Date_time_stamp = "xs:dateTimeStamp"
Decimal = "xs:decimal"
Double = "xs:double"
Duration = "xs:duration"
Float = "xs:float"
G_day = "xs:gDay"
G_month = "xs:gMonth"
G_month_day = "xs:gMonthDay"
G_year = "xs:gYear"
G_year_month = "xs:gYearMonth"
Hex_binary = "xs:hexBinary"
String = "xs:string"
Time = "xs:time"
Day_time_duration = "xs:dayTimeDuration"
Year_month_duration = "xs:yearMonthDuration"
Integer = "xs:integer"
Long = "xs:long"
Int = "xs:int"
Short = "xs:short"
Byte = "xs:byte"
Non_negative_integer = "xs:NonNegativeInteger"
Positive_integer = "xs:positiveInteger"
Unsigned_long = "xs:unsignedLong"
Unsigned_int = "xs:unsignedInt"
Unsigned_short = "xs:unsignedShort"
Unsigned_byte = "xs:unsignedByte"
Non_positive_integer = "xs:nonPositiveInteger"
Negative_integer = "xs:negativeInteger"
Lang_string = "rdf:langString"
@reference_in_the_book(section=(5, 7, 12, 1))
class Lang_string(DBC):
"""Strings with language tags"""
language: BCP_47_language_tag
"""Language tag conforming to BCP 47"""
text: str
"""Text in the :attr:`~language`"""
def __init__(self, language: BCP_47_language_tag, text: str) -> None:
self.language = language
self.text = text
@reference_in_the_book(section=(5, 7, 12, 2))
@invariant(lambda self: lang_strings_have_unique_languages(self.lang_strings))
@invariant(lambda self: len(self.lang_strings) >= 1)
class Lang_string_set(DBC):
"""
Array of elements of type langString
.. note::
langString is a RDF data type.
A langString is a string value tagged with a language code.
It depends on the serialization rules for a technology how
this is realized.
"""
lang_strings: List[Lang_string]
"""Strings in different languages"""
def __init__(self, lang_strings: List[Lang_string]) -> None:
self.lang_strings = lang_strings
@reference_in_the_book(section=(5, 7, 9))
class Environment:
"""
Container for the sets of different identifiables.
.. note::
w.r.t. file exchange: There is exactly one environment independent on how many
files the contained elements are split. If the file is split then there
shall be no element with the same identifier in two different files.
"""
asset_administration_shells: Optional[List[Asset_administration_shell]]
"""
Asset administration shell
"""
submodels: Optional[List[Submodel]]
"""
Submodel
"""
concept_descriptions: Optional[List[Concept_description]]
"""
Concept description
"""
def __init__(
self,
asset_administration_shells: Optional[List[Asset_administration_shell]] = None,
submodels: Optional[List[Submodel]] = None,
concept_descriptions: Optional[List[Concept_description]] = None,
) -> None:
self.asset_administration_shells = asset_administration_shells
self.submodels = submodels
self.concept_descriptions = concept_descriptions
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#################
import gzip
from ..core.met import MET
from .csv_read import read_aim_csv, read_opc_csv, read_csv
from .txt_read import read_aim_txt, read_opc_txt
from .nc_read import read_mpl
#################
"""
mypysmps.io.read
================
Automatic reading of files by detecting format:
read
determine_filetype
Created on Thu Jul 9 14:37 2020
@author: flovan / fvanden
Revision history: 09.07.2020 - Created
20.07.2020 - filetype added to allow for different
file organisations
30.09.2020 - metdata added
"""
## -------------------------------------------------------------------------- ##
def read(filename, fileorg = 'AIM', **kwargs):
"""
Read a SMPS file and return a SMPS object
Parameters
----------
filename : str
path and name of file to read
fileorg : str
refers to the organisation of the file
Returns
-------
smps : smps
mysmps.core.smps object
"""
filetype = determine_filetype(filename)
# Gzip, uncompress and see if we can determine the type
if filetype == 'GZ':
gzfile = gzip.open(filename, 'rb')
try:
smps = read(gzfile, **kwargs)
except:
raise ValueError(
'Gzip file cannot be read compressed, '
'uncompress and try again')
finally:
gzfile.close()
return smps
# CSV
if filetype == 'CSV':
if fileorg == 'AIM':
return read_aim_csv(filename, fileorg = fileorg, **kwargs)
elif fileorg == 'OPC':
return read_opc_csv(filename, fileorg = fileorg, **kwargs)
elif fileorg == 'MET':
vardict = read_csv(filename, fileorg = fileorg, **kwargs)
MET(**vardict)
else:
try:
vardict = read_csv(filename, fileorg = fileorg, **kwargs)
return vardict
except:
raise TypeError('Unknown or unsupported file organisation: ' + fileorg)
# TXT
if filetype == 'TXT':
if fileorg == 'AIM':
return read_aim_txt(filename, fileorg = fileorg, **kwargs)
if fileorg == 'OPC':
return read_opc_txt(filename, fileorg = fileorg, **kwargs)
else:
raise TypeError('Unknown or unsupported file organisation: ' + fileorg)
if filetype == "NETCDF3" or filetype == "NETCDF4":
if fileorg == 'MPL':
return read_mpl(filename, fileorg = fileorg, **kwargs)
raise TypeError('Unknown or unsupported file format: ' + filetype)
def determine_filetype(filename):
"""
Return the filetype of a given file by examining the first few bytes.
Adapted from pyart.io.auto_read.py script by : https://arm-doe.github.io/pyart/
The following filetypes are detected:
* 'csv'
* 'txt'
* 'excel'
* 'NETCDF3'
* 'NETCDF4'
* 'HDF4'
* 'gzip'
Parameters
----------
filename : str
Name of file to examine.
Returns
-------
filetype : str
Type of file.
"""
# read the first 12 bytes from the file
try:
f = open(filename, 'rb')
begin = f.read(12)
f.close()
except TypeError:
f = filename
begin = f.read(12)
f.seek(-12, 1)
# CSV - no file signature as far as I know
csv_signature = "csv"
if filename[-3:] == csv_signature:
return "CSV"
# txt
txt_signature = "txt"
if filename[-3:] == txt_signature:
return "TXT"
# txt
txt_signature = "TXT"
if filename[-3:] == txt_signature:
return "TXT"
# xlsx
xlsx_signature = b'PK\x03\x04\x14\x00\x08\x08\x08\x00ss'
if begin == xlsx_signature:
return "XLSX"
# NetCDF3, read with read_cfradial
if begin[:3] == b"CDF":
return "NETCDF3"
# NetCDF4, read with read_cfradial, contained in a HDF5 container
# HDF5 format signature from HDF5 specification documentation
hdf5_signature = b'\x89\x48\x44\x46\x0d\x0a\x1a\x0a'
if begin[:8] == hdf5_signature:
return "NETCDF4"
# HDF4 file
# HDF4 format signature from HDF4 specification documentation
hdf4_signature = b'\x0e\x03\x13\x01'
if begin[:4] == hdf4_signature:
return "HDF4"
# gzip filetype
gzip_signature = b'\x1f\x8b'
if begin[:2] == gzip_signature:
return 'GZ'
# zip filetype
zip_signature = b'PK\x03\x04\x14\x00\x08\x00\x08\x00\x84y'
if begin == zip_signature:
return 'ZIP'
# Cannot determine filetype
return "UNKNOWN" |
<filename>database_engine.py
from sqlalchemy import create_engine
from local_settings import *
import sys
import redis
import json
class Adaptor:
platform = None
batchsize = 0
valid = False
db_engine = None
tables=[]
tabdetails = {}
def __init__(self,platform,batchsize):
self.platform=platform
self.batchsize=batchsize
self.valid=False
self.db_engine=None
self.tabdetails={}
def validate_tables(self,args):
if self.platform == None:
raise Exception("Backend RDBMS platform not set")
for table in args:
if self.db_table_check(table):
pass
else:
return False
self.valid = True
return True
def db_table_check(self,tablename):
#print(type(tablename),tablename)
if self.platform not in ('POSTGRESQL','DB2LUW','DB2ZOS','MSSQL','ORACLE'):
print("unsupported DBMS platform.Please set platform in localsettings.py")
sys.exit(1)
#print(tablename.split('.'))
#print(len(list(tablename.split('.'))) !=2 )
tab=tablename.split('.')[1]
sch=tablename.split('.')[0]
if not tab and not sch :
print("Table names should be in schemaname.tablename format",tablename)
sys.exit(1)
if self.platform=='POSTGRESQL':
self.db_engine = 'postgresql'
connection_url=self.db_engine+'://'+DB_USER+':'+DB_PASSWORD+'@'+DB_HOST+':'+str(DB_PORT)+'/'+DB_NAME
engine=create_engine(connection_url)
conn=engine.connect()
timestamp_pass = False;
pk_pass = False;
pk_single_col = False
try:
self.tables.append(tablename)
tab_schema=tablename.split('.')[0]
tab_name=tablename.split('.')[1]
result=conn.execute("select column_name,data_type from information_schema.columns where table_schema='%s' and table_name='%s'" %(tab_schema,tab_name))
for row in result:
if 'timestamp' in row['data_type']:
self.tabdetails[tab_name+'_ts']=row['column_name']
timestamp_pass=True
break;
# This returns the names and data types of all columns of the primary key for the tablename table:
result1 = conn.execute(
"SELECT count(*) as pk_col_count FROM information_schema.key_column_usage AS c LEFT JOIN information_schema.table_constraints AS t ON t.constraint_name = c.constraint_name WHERE t.table_schema='%s' AND t.table_name = '%s' AND t.constraint_type = 'PRIMARY KEY';" %(tab_schema,tab_name)
)
for row in result1:
if row['pk_col_count']==1:
pk_pass=True
pk_single_col=True
result2 = conn.execute(
"SELECT c.column_name, c.ordinal_position FROM information_schema.key_column_usage AS c LEFT JOIN information_schema.table_constraints AS t ON t.constraint_name = c.constraint_name WHERE t.table_schema='%s' AND t.table_name = '%s' AND t.constraint_type = 'PRIMARY KEY';" %(tab_schema,tab_name)
)
for row in result2:
self.tabdetails[tab_name + '_pk'] = row['column_name']
if pk_single_col and pk_pass and timestamp_pass :
return True
else:
return False
except Exception :
print(Exception)
finally:
conn.close()
elif self.platform=='DB2LUW':
self.db_engine = 'ibm_db_sa'
connection_url = self.db_engine + '://' + DB_USER + ':' + DB_PASSWORD + '@' + DB_HOST + '/' + DB_NAME
elif self.platform == 'DB2ZOS':
self.db_engine = 'ibm_db_sa'
connection_url = self.db_engine + '://' + DB_USER + ':' + DB_PASSWORD + '@' + DB_HOST + '/' + DB_NAME
print("ok")
elif self.platform == 'MSSQL':
self.db_engine = 'mssql'
connection_url = self.db_engine + '://' + DB_USER + ':' + DB_PASSWORD + '@' + DB_HOST + '/' + DB_NAME
print("ok")
elif self.platform == 'MYSQL':
self.db_engine = 'mysql'
connection_url = self.db_engine + '://' + DB_USER + ':' + DB_PASSWORD + '@' + DB_HOST + '/' + DB_NAME
print("ok")
elif self.platform == 'ORACLE':
self.db_engine = 'oracle + cx_oracle'
connection_url = self.db_engine + '://' + DB_USER + ':' + DB_PASSWORD + '@' + DB_HOST + '/' + DB_NAME
print("ok")
|
<reponame>UBT-AI2/rtlode<filename>generator/dispatcher.py
from myhdl import block, Signal, instances, always_comb, intbv, ConcatSignal, always
from generator.config import Config
from generator.cdc_utils import AsyncFifoConsumer, AsyncFifoProducer
from framework.fifo import FifoProducer, FifoConsumer, fifo
from generator.priority_encoder import priority_encoder_one_hot
from generator.solver import solver
from generator.utils import clone_signal
@block
def solver_driver(clk, rst,
rdy_signal, rdy_ack, data_in, fifo_producer,
fin_signal, fin_ack, data_out, data_out_data, fifo_consumer):
@always(clk.posedge)
def assign_data_in():
if rst:
fifo_producer.wr.next = False
rdy_signal.next = True
else:
if data_in.rd and not data_in.empty and rdy_ack:
fifo_producer.data.next = data_in.data
fifo_producer.wr.next = True
rdy_signal.next = False
elif fifo_producer.wr and not fifo_producer.full:
fifo_producer.wr.next = False
rdy_signal.next = True
@always(clk.posedge)
def assign_data_out():
if rst:
fifo_consumer.rd.next = True
fin_signal.next = False
else:
if fifo_consumer.rd and not fifo_consumer.empty:
data_out_data.next = fifo_consumer.data
fin_signal.next = True
fifo_consumer.rd.next = False
elif data_out.wr and not data_out.full and fin_ack:
fin_signal.next = False
fifo_consumer.rd.next = True
return instances()
@block
def dispatcher(config: Config, data_in: AsyncFifoConsumer, data_out: AsyncFifoProducer):
"""
Logic to handle data stream read and write from / to cpu. Including dispatching single
solver instances to solve a given ivp and collecting results to send back to cpu.
:return: myhdl instances
"""
assert data_in.clk == data_out.clk
assert data_in.rst == data_out.rst
clk = data_in.clk
rst = data_in.rst
rdy_signals = [Signal(bool(0)) for _ in range(config.nbr_solver)]
rdy_signals_vec = ConcatSignal(*reversed(rdy_signals)) if config.nbr_solver > 1 else rdy_signals[0]
rdy_priority = Signal(intbv(0)[config.nbr_solver:])
fin_signals = [Signal(bool(0)) for _ in range(config.nbr_solver)]
fin_signals_vec = ConcatSignal(*reversed(fin_signals)) if config.nbr_solver > 1 else fin_signals[0]
fin_priority = Signal(intbv(0)[config.nbr_solver:])
rdy_priority_encoder = priority_encoder_one_hot(rdy_signals_vec, rdy_priority)
fin_priority_encoder = priority_encoder_one_hot(fin_signals_vec, fin_priority)
solver_data_out = [clone_signal(data_out.data) for _ in range(config.nbr_solver)]
solver_input_producers = [
FifoProducer(clone_signal(data_in.data))
for _ in range(config.nbr_solver)
]
solver_input_consumers = [
FifoConsumer(clone_signal(data_in.data))
for _ in range(config.nbr_solver)
]
solver_input_fifos = [
fifo(clk, rst, solver_input_producers[i], solver_input_consumers[i], buffer_size_bits=2)
for i in range(config.nbr_solver)
]
solver_output_producers = [
FifoProducer(clone_signal(data_out.data))
for _ in range(config.nbr_solver)
]
solver_output_consumers = [
FifoConsumer(clone_signal(data_out.data))
for _ in range(config.nbr_solver)
]
solver_output_fifos = [
fifo(clk, rst, solver_output_producers[i], solver_output_consumers[i], buffer_size_bits=2)
for i in range(config.nbr_solver)
]
solver_inst = [
solver(config, clk, rst,
data_in=solver_input_consumers[i],
data_out=solver_output_producers[i])
for i in range(config.nbr_solver)
]
@always_comb
def rd_driver():
data_in.rd.next = rdy_priority != 0
@always_comb
def wr_driver():
data_out.wr.next = fin_priority != 0
solver_wrapper_inst = [
solver_driver(clk, rst,
rdy_signals[i], rdy_priority(i), data_in, solver_input_producers[i],
fin_signals[i], fin_priority(i), data_out, solver_data_out[i], solver_output_consumers[i])
for i in range(config.nbr_solver)
]
@always_comb
def assign_data_out():
for i in range(config.nbr_solver):
if fin_priority[i]:
data_out.data.next = solver_data_out[i]
return instances()
|
<gh_stars>1-10
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
cuda4py - CUDA cffi bindings and helper classes.
URL: https://github.com/ajkxyz/cuda4py
Original author: <NAME> <<EMAIL>>
"""
"""
Tests some of the api in cuda4py.cufft package.
"""
import cuda4py as cu
import cuda4py.cufft as cufft
import gc
import logging
import numpy
import os
import unittest
class Test(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.old_env = os.environ.get("CUDA_DEVICE")
if self.old_env is None:
os.environ["CUDA_DEVICE"] = "0"
self.ctx = cu.Devices().create_some_context()
self.path = os.path.dirname(__file__)
if not len(self.path):
self.path = "."
def tearDown(self):
if self.old_env is None:
del os.environ["CUDA_DEVICE"]
else:
os.environ["CUDA_DEVICE"] = self.old_env
del self.old_env
del self.ctx
gc.collect()
def test_constants(self):
self.assertEqual(cufft.CUFFT_SUCCESS, 0)
self.assertEqual(cufft.CUFFT_INVALID_PLAN, 1)
self.assertEqual(cufft.CUFFT_ALLOC_FAILED, 2)
self.assertEqual(cufft.CUFFT_INVALID_TYPE, 3)
self.assertEqual(cufft.CUFFT_INVALID_VALUE, 4)
self.assertEqual(cufft.CUFFT_INTERNAL_ERROR, 5)
self.assertEqual(cufft.CUFFT_EXEC_FAILED, 6)
self.assertEqual(cufft.CUFFT_SETUP_FAILED, 7)
self.assertEqual(cufft.CUFFT_INVALID_SIZE, 8)
self.assertEqual(cufft.CUFFT_UNALIGNED_DATA, 9)
self.assertEqual(cufft.CUFFT_INCOMPLETE_PARAMETER_LIST, 10)
self.assertEqual(cufft.CUFFT_INVALID_DEVICE, 11)
self.assertEqual(cufft.CUFFT_PARSE_ERROR, 12)
self.assertEqual(cufft.CUFFT_NO_WORKSPACE, 13)
self.assertEqual(cufft.CUFFT_R2C, 0x2a)
self.assertEqual(cufft.CUFFT_C2R, 0x2c)
self.assertEqual(cufft.CUFFT_C2C, 0x29)
self.assertEqual(cufft.CUFFT_D2Z, 0x6a)
self.assertEqual(cufft.CUFFT_Z2D, 0x6c)
self.assertEqual(cufft.CUFFT_Z2Z, 0x69)
self.assertEqual(cufft.CUFFT_FORWARD, -1)
self.assertEqual(cufft.CUFFT_INVERSE, 1)
def test_errors(self):
idx = cu.CU.ERRORS[cufft.CUFFT_INVALID_PLAN].find(" | ")
self.assertGreater(idx, 0)
def test_version(self):
fft = cufft.CUFFT(self.ctx)
ver = fft.version
logging.debug("cuFFT version is %d", ver)
self.assertTrue(ver == int(ver))
def test_auto_allocation(self):
fft = cufft.CUFFT(self.ctx)
self.assertTrue(fft.auto_allocation)
fft.auto_allocation = False
self.assertFalse(fft.auto_allocation)
fft.auto_allocation = True
self.assertTrue(fft.auto_allocation)
def test_make_plan_many(self):
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many((256, 128), 8, cufft.CUFFT_C2C)
logging.debug(
"make_plan_many (default layout) for 256x128 x8 returned %d", sz)
logging.debug("size is %d", fft.size)
self.assertEqual(fft.execute, fft.exec_c2c)
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many((256, 128), 8, cufft.CUFFT_C2C,
(256, 128), 1, 256 * 128,
(256, 128), 1, 256 * 128)
logging.debug(
"make_plan_many (tight layout) for 256x128 x8 returned is %d", sz)
logging.debug("size is %d", fft.size)
def _test_exec(self, dtype):
x = numpy.zeros([32, 64], dtype=dtype)
x[:] = numpy.random.rand(x.size).reshape(x.shape) - 0.5
y = numpy.ones((x.shape[0], x.shape[1] // 2 + 1),
dtype={numpy.float32: numpy.complex64,
numpy.float64: numpy.complex128}[dtype])
x_gold = x.copy()
try:
y_gold = numpy.fft.rfft2(x)
except TypeError:
y_gold = None # for pypy
xbuf = cu.MemAlloc(self.ctx, x)
ybuf = cu.MemAlloc(self.ctx, y)
# Forward transform
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.float32: cufft.CUFFT_R2C,
numpy.float64: cufft.CUFFT_D2Z}[dtype])
tmp = cu.MemAlloc(self.ctx, sz)
fft.workarea = tmp
self.assertEqual(fft.workarea, tmp)
self.assertEqual(fft.execute,
{numpy.float32: fft.exec_r2c,
numpy.float64: fft.exec_d2z}[dtype])
fft.execute(xbuf, ybuf)
ybuf.to_host(y)
if y_gold is not None:
delta = y - y_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Forward max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.float32: 1.0e-3,
numpy.float64: 1.0e-6}[dtype])
# Inverse transform
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.float32: cufft.CUFFT_C2R,
numpy.float64: cufft.CUFFT_Z2D}[dtype])
fft.workarea = cu.MemAlloc(self.ctx, sz)
y /= x.size # correct scale before inverting
ybuf.to_device_async(y)
xbuf.memset32_async(0) # reset the resulting vector
self.assertEqual(fft.execute,
{numpy.float32: fft.exec_c2r,
numpy.float64: fft.exec_z2d}[dtype])
fft.execute(ybuf, xbuf)
xbuf.to_host(x)
max_diff = numpy.fabs(x - x_gold).max()
logging.debug("Inverse max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.float32: 1.0e-3,
numpy.float64: 1.0e-6}[dtype])
def test_exec_float(self):
logging.debug("ENTER: test_exec_float")
self._test_exec(numpy.float32)
logging.debug("EXIT: test_exec_float")
def test_exec_double(self):
logging.debug("ENTER: test_exec_double")
self._test_exec(numpy.float64)
logging.debug("EXIT: test_exec_double")
def _test_exec_complex(self, dtype):
x = numpy.zeros([32, 64], dtype=dtype)
x.real = numpy.random.rand(x.size).reshape(x.shape) - 0.5
x.imag = numpy.random.rand(x.size).reshape(x.shape) - 0.5
y = numpy.ones_like(x)
x_gold = x.copy()
try:
y_gold = numpy.fft.fft2(x)
except TypeError:
y_gold = None # for pypy
xbuf = cu.MemAlloc(self.ctx, x)
ybuf = cu.MemAlloc(self.ctx, y)
# Forward transform
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.complex64: cufft.CUFFT_C2C,
numpy.complex128: cufft.CUFFT_Z2Z}[dtype])
tmp = cu.MemAlloc(self.ctx, sz)
fft.workarea = tmp
self.assertEqual(fft.workarea, tmp)
self.assertEqual(fft.execute, {numpy.complex64: fft.exec_c2c,
numpy.complex128: fft.exec_z2z}[dtype])
fft.execute(xbuf, ybuf, cufft.CUFFT_FORWARD)
ybuf.to_host(y)
if y_gold is not None:
delta = y - y_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Forward max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.complex64: 1.0e-3,
numpy.complex128: 1.0e-6}[dtype])
# Inverse transform
y /= x.size # correct scale before inverting
ybuf.to_device_async(y)
xbuf.memset32_async(0) # reset the resulting vector
fft.execute(ybuf, xbuf, cufft.CUFFT_INVERSE)
xbuf.to_host(x)
delta = x - x_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Inverse max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.complex64: 1.0e-3,
numpy.complex128: 1.0e-6}[dtype])
def test_exec_complex_float(self):
logging.debug("ENTER: test_exec_complex_float")
self._test_exec_complex(numpy.complex64)
logging.debug("EXIT: test_exec_complex_float")
def test_exec_complex_double(self):
logging.debug("ENTER: test_exec_complex_double")
self._test_exec_complex(numpy.complex128)
logging.debug("EXIT: test_exec_complex_double")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
<filename>tensorflow_probability/python/experimental/mcmc/progress_bar_reducer.py
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""`ProgressBarReducer` for showing progress bars."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.mcmc import reducer as reducer_base
__all__ = [
'ProgressBarReducer',
'make_tqdm_progress_bar_fn',
]
def make_tqdm_progress_bar_fn(description='', leave=True):
"""Make a `progress_bar_fn` that uses `tqdm`.
Args:
description: `str` to display next to the progress bar, default is "".
leave: Boolean whether to leave the progress bar up after finished.
Returns:
tqdm_progress_bar_fn: A function that takes an integer `num_steps` and
returns a `tqdm` progress bar iterator.
"""
def tqdm_progress_bar_fn(num_steps):
try:
import tqdm # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('Please install tqdm via pip install tqdm')
return iter(tqdm.tqdm(range(num_steps), desc=description, leave=leave))
return tqdm_progress_bar_fn
class ProgressBarReducer(reducer_base.Reducer):
"""`Reducer` that displays a progress bar.
Note this is not XLA-compatible (`tf.function(jit_compile=True)`).
Numpy and JAX substrates are not supported.
Example usage:
```
kernel = ...
current_state = ...
num_results = ...
pbar = tfp.experimental.mcmc.ProgressBarReducer(num_results)
_, final_state, kernel_results = tfp.experimental.mcmc.sample_fold(
num_steps=num_results,
current_state=current_state,
kernel=kernel,
reducer=pbar,
)
```
"""
def __init__(
self,
num_results,
progress_bar_fn=make_tqdm_progress_bar_fn()):
"""Instantiates a reducer that displays a progress bar.
Args:
num_results: Integer number of results to expect (as passed to sample
chain).
progress_bar_fn: A function that takes an integer `num_results` and
returns an iterator that advances a progress bar. Defaults to `tqdm`
progress bars (make sure they are pip installed befure using.)
"""
self._parameters = dict(
num_results=num_results,
progress_bar_fn=progress_bar_fn,
)
def initialize(self, initial_chain_state, initial_kernel_results=None): # pylint: disable=unused-argument
"""Initialize progress bars.
All arguments are ignored.
Args:
initial_chain_state: A (possibly nested) structure of `Tensor`s or Python
`list`s of `Tensor`s representing the current state(s) of the Markov
chain(s). It is used to infer the structure of future trace results.
initial_kernel_results: A (possibly nested) structure of `Tensor`s
representing internal calculations made in a related `TransitionKernel`.
It is used to infer the structure of future trace results.
Returns:
state: empty list.
"""
num_results = tf.convert_to_tensor(self.num_results)
def init_bar(num_results):
self.bar = self.progress_bar_fn(int(num_results))
tf.py_function(init_bar, (num_results,), ())
return []
def one_step(self, new_chain_state, current_reducer_state,
previous_kernel_results): # pylint: disable=unused-argument
"""Advance progress bar by one result.
All arguments are ignored.
Args:
new_chain_state: A (possibly nested) structure of incoming chain state(s)
with shape and dtype compatible with those used to initialize the
`TracingState`.
current_reducer_state: `TracingState`s representing all previously traced
results.
previous_kernel_results: A (possibly nested) structure of `Tensor`s
representing internal calculations made in a related
`TransitionKernel`.
Returns:
new_reducer_state: empty list.
"""
def update_bar():
try:
next(self.bar)
except StopIteration:
pass
tf.py_function(update_bar, (), ())
return []
@property
def num_results(self):
return self._parameters['num_results']
@property
def progress_bar_fn(self):
return self._parameters['progress_bar_fn']
@property
def parameters(self):
return self._parameters
|
<gh_stars>1-10
"""
Topics to be explored:
- Lattice Approximations of Continuous Space Manifolds
- Finding an embedding of a neural network in R^3
- Neural Field Models for particle dynamics and stochastic
dynamics on neural manifolds
- Intrinsic Dimensionality of a Graph
An idea that occurred to me yesterday relates to the
"*planar dimensionality of a graph*" which means the
minimal number of dimensions necessary in which to
project the graph such that no edges intersect with
eachother. For example, the intrinsic dimensionality
of a planar graph is $2$. A graph for which intersections
only exist between one single node $n_i$ and any number
of other nodes $n_j, j\ne i$, embedding this graph in
3 dimensional will remove any line intersections simply
by the definition of a line emanating from a point
(because the only place the line segments representing
edges intersect is at the node itself and therefore
they intersect nowhere else).
Once you can find the dimensionality of a graph as well
as an appropriate embedding of the graph in those
dimensions (using someforce based spring layout model) then things get interesting.
If the graph has intrinsic dimensionality $n$, by
projecting the graph into dimensions $n+1$ and
force laying out the graph in these dimensions
you obtain LATTICE APPROXIMATION OF A CONTINUOUS SPACE CURVE.
The position of a node along dimension $n+1$
converges such that the euclidean distance between
any two nodes in this $n+1$ space is exactly equal to
their edges distance.
**Now we have found the most perfect intrinsic spatial
embedding of a graph** because:
1. The distance between all the nodes in this space is
exactly equal to the weight of their edges
2. The space approximation created by the graph lattice is continuous.
*NOW* we can start playing with the physics of this high
dimensional graph manifold, for example, by fitting
a field function to the data
"""
import sys
import numpy as np
import scipy.cluster.hierarchy as sch
import pylab
import scipy
import matplotlib.pyplot as plt
import networkx as nx
import numpy.ma as ma
from scipy.integrate import odeint
import matplotlib.cm as cm
import os
import sys
import numpy as np
import scipy.stats
sys.path.append('../src/')
import data_config as dc
"""
By embedding a graph in 3+1 dimensions
we can find a continuous surface on which the
network lives
This is enabled by a theorem in network science
that the probability of edge collisions
for a graph embedded in three dimensions is
zero
"""
import numpy as np
import numpy.ma as ma
from scipy.integrate import odeint
import cv2
def get_dr(y):
n = y.shape[0]
# rj across, ri down
rs_from = np.tile(y, (n,1,1))
# ri across, rj down
rs_to = np.transpose(rs_from, axes=(1,0,2))
# directional distance between each r_i and r_j
# dr_ij is the force from j onto i, i.e. r_i - r_j
dr = rs_to - rs_from
dr = dr.astype(np.float32)
return dr
def get_radii(y):
dR = get_dr(y)
R = np.array(
np.power(
np.sum(np.power(dR, 2.), axis=2),
1./2.
)
).astype(np.float32)
return R
def spring_layout(y,t,w,k,n,d,T):
"""
y: an (n*2,d) dimensional matrix where y[:n]_i
is the position of the ith node in d dimensions
and y[n:]_i is the velocity of the ith node
w: (n,n) matrix of edge weights
"""
y = np.copy(y.reshape((n*2,d)))
x = y[:n]
v = y[n:]
dR = get_dr(x)
# F=0 <=> R=w
# we also add a damping term
F = -k*(dR-w*dR/(np.linalg.norm(dR)))
Fnet = np.sum(F, axis=1) - v
a = Fnet #nodes have unit mass
# Setting velocities
y[:n] = np.copy(y[n:])
# Entering the acceleration into the velocity slot
y[n:] = np.copy(a)
# Flattening it out for scipy.odeint to work
return np.array(y).reshape(n*2*d)
def sim_particles(t, r, v, w, k=1.):
d = r.shape[-1]
n = r.shape[0]
y0 = np.zeros((n*2,d))
y0[:n] = r
y0[n:] = v
y0 = y0.reshape(n*2*d)
w = np.array([w]).reshape( (w.shape[0], w.shape[1], 1) )
yf = odeint(
spring_layout,
y0,
t,
args=(w,k,n,d, t.shape[0])).reshape(t.shape[0],n*2,d)
return yf
def get_data():
kato = dc.kato.data()
data = kato[0]["deltaFOverF_bc"].T
mean = np.mean(data, axis=1, keepdims=True)
standardized = (data-mean)
correlation = data.T.dot(data)
connectome = dc.connectome_networkx.data().to_directed()
adjacency = nx.to_numpy_matrix(connectome)
return {
"data": data,
"correletion": correlation,
"adjacency": adjacency,
"network":connectome
}
def simulate():
data = get_data()
adjacency = data["adjacency"]
t = 10
t_f = 100
t = np.linspace(0, t, num=t_f).astype(np.float32)
# a = 0.
# b = 100.
# r = np.array([
# [a, 0.],
# [a+2.,0.],
# ])
# v = np.array([
# [0.,10.],
# [0., -10.],
# ])
#
# w = np.array([
# [0,1],
# [1,0]
# ]).astype(np.float32)
n = 5
G = nx.grid_2d_graph(n,n)
N = 25
w = nx.to_numpy_matrix(G)*10
r = np.random.rand(N,3)
d = r.shape[-1]
v = r*0.
k=1.
return sim_particles(t,r,v,w)
if __name__=="__main__":
alreadysimulated = os.path.isfile("../data/spaceembedding.npy")
if False:#alreadysimulated:
rf = np.load("../data/spaceembedding.npy")
else:
rf = simulate()
np.save("../data/spaceembedding.npy",rf)
data = get_data()
H = nx.grid_2d_graph(5,5)
pos = np.array(nx.spring_layout(H, dim=3).values())#
pos = rf[-1,:25]
from mayavi import mlab
# reorder nodes from 0,len(G)-1
G=nx.convert_node_labels_to_integers(H)
scalars=np.array(G.nodes())+5
mlab.figure(1, bgcolor=(0, 0, 0))
mlab.clf()
pts = mlab.points3d(pos[:,0], pos[:,1], pos[:,2],
scalars,
scale_factor=0.01,
scale_mode='none',
colormap='Blues',
resolution=20)
pts.mlab_source.dataset.lines = np.array(G.edges())
tube = mlab.pipeline.tube(pts, tube_radius=0.01)
mlab.pipeline.surface(tube, color=(0.8, 0.8, 0.8))
mlab.savefig('mayavi2_spring.png')
mlab.show() # interactive window
|
<gh_stars>0
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import itertools
from copy import deepcopy
from pathlib import Path
from typing import List, Type
import jsonschema
from nncf.config.schema import ROOT_NNCF_CONFIG_SCHEMA
from nncf.config.schema import validate_single_compression_algo_schema
from nncf.config.structure import NNCFExtraConfigStruct
from nncf.common.os import safe_open
try:
import jstyleson as json
except ImportError:
import json
from addict import Dict
from nncf.common.utils.logger import logger
class NNCFConfig(dict):
"""A regular dictionary object extended with some utility functions."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__nncf_extra_structs = {} # type: Dict[str, NNCFExtraConfigStruct]
@classmethod
def from_dict(cls, nncf_dict):
"""
Load NNCF config from dict;
The dict must contain only json supported primitives.
"""
NNCFConfig.validate(nncf_dict)
return cls(deepcopy(nncf_dict))
@classmethod
def from_json(cls, path) -> 'NNCFConfig':
file_path = Path(path).resolve()
with safe_open(file_path) as f:
loaded_json = json.load(f)
return cls.from_dict(loaded_json)
def register_extra_structs(self, struct_list: List[NNCFExtraConfigStruct]):
for struct in struct_list:
struct_id = struct.get_id()
if struct_id in self.__nncf_extra_structs:
raise RuntimeError("{} is already registered as extra struct in NNCFConfig!")
self.__nncf_extra_structs[struct_id] = struct
def get_extra_struct(self, struct_cls: Type[NNCFExtraConfigStruct]) -> NNCFExtraConfigStruct:
return self.__nncf_extra_structs[struct_cls.get_id()]
def get_all_extra_structs_for_copy(self) -> List[NNCFExtraConfigStruct]:
return list(self.__nncf_extra_structs.values())
@staticmethod
def validate(loaded_json):
try:
jsonschema.validate(loaded_json, schema=ROOT_NNCF_CONFIG_SCHEMA)
except jsonschema.ValidationError as e:
logger.error("Invalid NNCF config supplied!")
# The default exception's __str__ result will contain the entire schema,
# which is too large to be readable.
import nncf.config.schema as config_schema
msg = e.message + ". See documentation or {} for an NNCF configuration file JSON schema definition".format(
config_schema.__file__)
raise jsonschema.ValidationError(msg)
compression_section = loaded_json.get("compression")
if compression_section is None:
# No compression specified
return
try:
if isinstance(compression_section, dict):
validate_single_compression_algo_schema(compression_section)
else:
# Passed a list of dicts
for compression_algo_dict in compression_section:
validate_single_compression_algo_schema(compression_algo_dict)
except jsonschema.ValidationError:
# No need to trim the exception output here since only the compression algo
# specific sub-schema will be shown, which is much shorter than the global schema
logger.error("Invalid NNCF config supplied!")
raise
def product_dict(d):
keys = d.keys()
vals = d.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
|
from contextlib import suppress
from textwrap import wrap
from PyQt5 import QtCore
from PyQt5.QtCore import QEvent, QObject, Qt, QSize
from PyQt5.QtGui import QColor, QTextOption, QKeySequence, QContextMenuEvent, QBrush
from PyQt5.QtWidgets import QAbstractScrollArea, QAction, QComboBox, QFrame, QPlainTextEdit, QSizePolicy, QTableWidget, \
QTableWidgetItem, QWidget, QApplication, QShortcut, QStackedWidget
from cif.text import retranslate_delimiter
from tools.misc import essential_keys, text_field_keys
light_green = QColor(217, 255, 201)
blue = QColor(102, 150, 179)
yellow = QColor(250, 247, 150) # #faf796
[COL_CIF,
COL_DATA,
COL_EDIT
] = range(3)
class QHLine(QFrame):
def __init__(self):
super(QHLine, self).__init__()
self.setFrameShape(QFrame.HLine)
# self.setFrameShadow(QFrame.Sunken)
# gives a black line:
# self.setFrameShadow(QFrame.Plain)
self.setFrameShadow(QFrame.Raised)
# noinspection PyUnresolvedReferences
class ItemTextMixin:
def text(self, row: int, column: int) -> str:
"""
Returns the text inside a table cell.
"""
try:
txt = self.item(row, column).text()
except AttributeError:
txt = ''
if not txt:
try:
txt = self.item(row, column).data(0)
except AttributeError:
txt = ''
if not txt:
try:
# for QPlaintextWidgets:
txt = self.cellWidget(row, column).toPlainText()
except AttributeError:
txt = ''
if not txt:
# for comboboxes:
try:
txt = self.cellWidget(row, column).currentText()
except AttributeError:
txt = ''
return txt
class MyCifTable(QTableWidget, ItemTextMixin):
row_deleted = QtCore.pyqtSignal(str)
def __init__(self, parent: QWidget = None, *args, **kwargs):
self.parent = parent
super().__init__(*args, **kwargs)
self.setParent(parent)
self.installEventFilter(self)
self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
item = MyTableWidgetItem()
self.setItemPrototype(item)
self.actionDeletePair = QAction("Delete Row", self)
self.actionCopy = QAction("Copy", self)
self.actionCopyVhead = QAction("Copy CIF Keyword", self)
self.setContextMenuPolicy(Qt.ActionsContextMenu)
self.addAction(self.actionDeletePair)
self.addAction(self.actionCopy)
self.addAction(self.actionCopyVhead)
self.actionDeletePair.triggered.connect(self.delete_row)
self.actionCopy.triggered.connect(self.copy_item)
self.actionCopyVhead.triggered.connect(self.copy_vhead_item)
del_shortcut = QShortcut(QKeySequence('Ctrl+Del'), self)
del_shortcut.activated.connect(self.delete_row)
self.vheaderitems: list = []
# This is the index number of the vheader that got clicked last:
self.vheader_clicked = -1
# vertical header click:
vheader = self.verticalHeader()
vheader.setSectionsClickable(True)
# noinspection PyUnresolvedReferences
vheader.sectionClicked.connect(self.vheader_section_click)
def setCellWidget(self, row: int, column: int, widget) -> None:
widget.row = row
if (column == COL_CIF) or (column == COL_DATA):
# noinspection PyUnresolvedReferences
widget.setUneditable()
super(MyCifTable, self).setCellWidget(row, column, widget)
@property
def rows_count(self):
return self.model().rowCount()
@property
def columns_count(self):
return self.model().columnCount()
def delete_content(self):
"""
Deletes all content in the table.
"""
self.setRowCount(0)
# This deletes the header text and sets 1, 2, 3!!!
# self.ui.cif_main_table.clear()
self.clearContents()
self.vheaderitems.clear()
def vheader_section_click(self, section):
item = self.verticalHeaderItem(section)
itemtext = item.text()
# be sure not to get vheader with name of last click:
if section != self.vheader_clicked and self.vheader_clicked > -1:
self.restore_vertical_header()
self.vheader_clicked = -1
return
# get back previous name
if self.vheader_clicked > -1:
item.setText(self.vheaderitems[self.vheader_clicked])
self.vheader_clicked = -1
return
try:
txt = essential_keys[itemtext]
if txt:
# item.setText('\n'.join(wrap(txt, 20)))
item.setText(txt)
self.vheader_clicked = section
return
except KeyError:
pass
def add_separation_line(self, row_num: int) -> None:
"""
Adds a blue separation line between cif content and empty cif keywords.
"""
# The blue line in the table:
item_vhead = MyTableWidgetItem('These below are already in:')
item1 = MyTableWidgetItem('')
item2 = MyTableWidgetItem('')
item3 = MyTableWidgetItem('')
diag = QBrush(blue)
diag.setStyle(Qt.DiagCrossPattern)
item_vhead.setBackground(diag)
item1.setBackground(diag)
item1.setUneditable()
item2.setBackground(diag)
item2.setUneditable()
item3.setBackground(diag)
item3.setUneditable()
self.setVerticalHeaderItem(row_num, item_vhead)
self.setItem(row_num, COL_CIF, item1)
self.setItem(row_num, COL_DATA, item2)
self.setItem(row_num, COL_EDIT, item3)
self.resizeRowToContents(row_num)
def restore_vertical_header(self):
for row_num, key in enumerate(self.vheaderitems):
item_key = MyTableWidgetItem(key)
self.setVerticalHeaderItem(row_num, item_key)
def eventFilter(self, widget: QObject, event: QEvent):
"""
Event filter for tab down on third column.
"""
if event.type() == QEvent.KeyRelease and event.key() == Qt.Key_Backtab:
row = self.currentRow()
if row > 0:
self.setCurrentCell(row - 1, 2)
return True
if event.type() == QEvent.KeyRelease and event.key() == Qt.Key_Tab:
row = self.currentRow()
self.setCurrentCell(row, 2)
return True
if event.type() == QEvent.Wheel:
pass
return QObject.eventFilter(self, widget, event)
def setText(self, key: str, column: int, txt: str, row: int = None, color=None):
"""
Set text in current table cell regardless of the containing item.
"""
txt = retranslate_delimiter(txt)
if row is None:
row = self.vheaderitems.index(key)
if isinstance(self.cellWidget(row, column), MyComboBox):
self.cellWidget(row, column).setText(txt)
return
item = MyTableWidgetItem(txt)
self.setItem(row, column, item)
lentext = max([len(txt), len(self.getText(0, row)), len(self.getText(1, row))])
# This is a regular table cell:
if not (key in text_field_keys) and (lentext < 35):
item.setText(txt)
if (column == COL_CIF) or (column == COL_DATA):
# noinspection PyUnresolvedReferences
item.setUneditable()
if color:
item.setBackground(color)
else:
# This is a text field:
textedit = MyQPlainTextEdit(self)
self.setCellWidget(row, column, textedit)
textedit.setText(txt, color=color)
if (column == COL_CIF) or (column == COL_DATA):
textedit.setUneditable()
self.resizeRowToContents(row)
if color:
textedit.setBackground(color)
def getText(self, row: int, col: int):
return self.text(row, col)
def getTextFromKey(self, key: str, col: int):
"""
Get text from field by cif keyword.
:param key: CIF keyword like _chemical_formula_moiety
:param col: column number to get text from.
:return: text
"""
row = self.vheaderitems.index(key)
return self.text(row, col)
def row_from_key(self, key: str) -> int:
return self.vheaderitems.index(key)
def itemFromKey(self, key: str, col: int) -> QTableWidgetItem:
"""Returns the tableitem of the cell by key and column"""
row = self.vheaderitems.index(key)
return self.item(row, col)
def widget_from_key(self, key: str, column: int) -> QWidget:
row = self.vheaderitems.index(key)
return self.cellWidget(row, column)
def setBackground(self, key: str, column: int, color: QColor):
row = self.vheaderitems.index(key)
self.setCurrentCell(row, column)
item = self.currentItem()
if item:
item.setBackground(color)
if column == COL_DATA:
item.setUneditable()
else:
widget = self.cellWidget(row, column)
if widget:
with suppress(Exception):
widget.setBackground(color)
def copy_vhead_item(self):
"""
Copies the content of a field.
"""
row = self.currentRow()
clipboard = QApplication.clipboard()
clipboard.setText(self.vheaderitems[row])
def copy_item(self):
"""
Copies the content of a field.
"""
text = self.currentItem().text()
clipboard = QApplication.clipboard()
clipboard.setText(text)
def delete_row(self, row: int = None):
"""
Deletes the current row, but gemmi can not delete items from the block at the moment!
"""
if not row:
row = self.currentRow()
key = self.vheaderitems[row]
del self.vheaderitems[row]
self.removeRow(row)
self.row_deleted.emit(key)
def vheader_text(self, row):
vhead = self.model().headerData(row, Qt.Vertical)
return str(vhead)
class MyQPlainTextEdit(QPlainTextEdit):
"""
A special plaintextedit with convenient methods to set the background color and other things.
"""
def __init__(self, parent=None, minheight: int = 80, *args, **kwargs):
"""
Plaintext edit field for most of the table cells.
:param parent:
:param minheight: minimum height of the widget.
"""
super().__init__(parent, *args, **kwargs)
self.setParent(parent)
self.row: int = -1
self.minheight = minheight
self.parent: MyCifTable = parent
self.setFocusPolicy(Qt.StrongFocus)
self.setFrameShape(QFrame.NoFrame)
self.setTabChangesFocus(True)
# self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setWordWrapMode(QTextOption.WrapAtWordBoundaryOrAnywhere)
def __str__(self):
return self.toPlainText()
def contextMenuEvent(self, event: QContextMenuEvent):
menu = self.createStandardContextMenu(event.pos())
actionCopyVhead = menu.addAction("Copy CIF Keyword")
deleterow = menu.addAction("Delete Row")
actionCopyVhead.triggered.connect(self.copy_vhead_item)
deleterow.triggered.connect(self._delete_row)
choosedAction = menu.exec(event.globalPos())
def _delete_row(self):
self.parent.delete_row(self.row)
def copy_vhead_item(self, row):
"""
Copies the content of a field.
"""
if hasattr(self.parent, 'vheaderitems'):
row = self.parent.currentRow()
clipboard = QApplication.clipboard()
clipboard.setText(self.parent.vheaderitems[row])
def setBackground(self, color):
"""
Set background color of the text field.
"""
self.setStyleSheet("QPlainTextEdit {{background-color: {};}}".format(str(color.name())))
# No idea why tis does not work
# pal = self.palette()
# pal.setColor(QPalette.Base, color)
# self.setPalette(pal)
def setUneditable(self):
self.setReadOnly(True)
def setText(self, text: str, color=None):
"""
Set text of a Plaintextfield with lines wrapped at newline characters.
"""
if color:
self.setBackground(color)
txtlst = text.split(r'\n')
# special treatment for text fields in order to get line breaks:
for txt in txtlst:
self.setPlainText(txt)
def eventFilter(self, widget: QObject, event: QEvent):
"""
Event filter to ignore wheel events in comboboxes to prevent accidental changes to them.
"""
if event.type() == QEvent.Wheel and widget and not widget.hasFocus():
event.ignore()
return True
# if event.type() == QEvent.MouseButtonPress:
# self.cell_clicked.emit(event.)
return QObject.eventFilter(self, widget, event)
def getText(self):
return self.toPlainText()
def sizeHint(self) -> QSize:
"""Text field sizes are scaled to text length"""
if not self.getText():
return QSize(self.width(), self.minheight)
else:
size = QSize(100, int(0.33 * len(self.getText()) + 30))
if size.height() > 500:
# Prevent extreme height for long text:
return QSize(100, 500)
return size
class MyComboBox(QComboBox):
"""
A special QComboBox with convenient methods to set the background color and other things.
"""
def __init__(self, parent=None):
super().__init__(parent)
self.parent: MyCifTable = parent
self.setParent(parent)
self.row: int = -1
self.setFocusPolicy(Qt.StrongFocus)
self.setSizeAdjustPolicy(QComboBox.AdjustToMinimumContentsLength)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.setEditable(True) # only editable as new template
self.installEventFilter(self)
self.actionDelete = QAction("Delete Row", self)
self.setContextMenuPolicy(Qt.ActionsContextMenu)
self.addAction(self.actionDelete)
self.actionDelete.triggered.connect(self._delete_row)
def __str__(self):
return self.currentText()
def _delete_row(self):
self.parent.delete_row(self.row)
def eventFilter(self, widget: QObject, event: QEvent):
"""
Event filter to ignore wheel events in comboboxes to prevent accidental changes to them.
"""
if event.type() == QEvent.Wheel: # and widget and not widget.hasFocus():
event.ignore()
return True
return QObject.eventFilter(self, widget, event)
def setUneditable(self):
# noinspection PyUnresolvedReferences
self.setFlags(self.flags() ^ Qt.ItemIsEditable)
def setText(self, txt: str):
self.setEditText('\n'.join(wrap(txt, width=30)))
def addItem(self, *__args):
text = '\n'.join(wrap(__args[0], width=60))
super().addItem(text, __args[1])
class MyTableWidgetItem(QTableWidgetItem):
def __init__(self, *args, **kwargs):
# args and kwargs are essentiel here. Otherwise, the horizontal header text is missing!
super().__init__(*args, **kwargs)
def setUneditable(self):
# noinspection PyTypeChecker
self.setFlags(self.flags() ^ Qt.ItemIsEditable)
# noinspection PyTypeChecker
self.setFlags(self.flags() | Qt.ItemIsSelectable)
class MyEQTableWidget(QTableWidget, ItemTextMixin):
"""
A table widget for the equipment list.
"""
def __init__(self, parent: QTableWidget = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent = parent
self.setParent(parent)
self.setWordWrap(QTextOption.WrapAtWordBoundaryOrAnywhere)
def eventFilter(self, widget: QObject, event: QEvent):
"""
"""
return QObject.eventFilter(self, widget, event)
def add_row_if_needed(self):
rowcount = self.rowCount()
cont = 0
for row in range(rowcount):
key = ''
try:
key = self.text(row, 0)
except (AttributeError, TypeError) as e:
pass
# print(e)
if key: # don't count empty key rows
cont += 1
diff = rowcount - cont
if diff < 2:
self.add_equipment_row()
def add_equipment_row(self, key_text: str = '', value_text: str = ''):
"""
Add a new row with content to the table (Equipment or Property).
"""
if not isinstance(value_text, str):
return
if not isinstance(key_text, str):
return
# Create a empty row at bottom of table
row_num = self.rowCount()
self.insertRow(row_num)
key_item = MyQPlainTextEdit(parent=self)
key_item.row = row_num
key_item.setPlainText(key_text)
# This is critical, because otherwise the add_row_if_needed does not work as expected:
key_item.textChanged.connect(self.add_row_if_needed)
self.setCellWidget(row_num, 0, key_item)
# if len(value) > 38:
tab_item = MyQPlainTextEdit(self)
tab_item.setPlainText(retranslate_delimiter(value_text))
self.setCellWidget(row_num, 1, tab_item)
def adjustToContents(self):
# print('adjust')
self.resizeRowsToContents()
def delete_row(self, row: int = None):
if not row:
row = self.currentRow()
self.removeRow(row)
self.set_row_numbers()
def set_row_numbers(self):
for row in range(self.rowCount()):
self.setCurrentCell(row, 1)
for col in range(self.columnCount()):
try:
self.cellWidget(row, col).row = row
except ValueError:
print('Row or Column of MyEQTableWidget does not exist.')
class MyPropTableWidget(QTableWidget):
"""
A table widget for the properties table.
"""
def __init__(self, parent: MyQPlainTextEdit, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.parent = parent
self.setParent(parent)
def delete_row(self, row: int = None):
if not row:
row = self.currentRow()
self.removeRow(row)
# I need to set the row numbers again because one was deleted.
self.set_row_numbers()
def set_row_numbers(self):
for row in range(self.rowCount()):
for col in range(self.columnCount()):
self.setCurrentCell(row, col)
try:
self.cellWidget(row, col).row = row
except ValueError:
print('Row or Column of MyEQTableWidget does not exist.')
class MyMainStackedWidget(QStackedWidget):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.setParent(parent)
def got_to_main_page(self):
self.setCurrentIndex(0)
def go_to_cif_text_page(self):
self.setCurrentIndex(1)
def go_to_info_page(self):
self.setCurrentIndex(2)
def go_to_data_sources_page(self):
self.setCurrentIndex(3)
def go_to_options_page(self):
self.setCurrentIndex(4)
def go_to_loops_page(self):
self.setCurrentIndex(5)
def on_loops_page(self):
return self.currentIndex() == 5
def go_to_checkcif_page(self):
self.setCurrentIndex(6)
def got_to_cod_page(self):
self.setCurrentIndex(7)
@property
def current_page(self):
return self.currentIndex()
def on_checkcif_page(self):
return self.current_page == 6
|
import gym
from gym import spaces
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from gym.utils import seeding
# import panda as pd
import scipy.io as sio
'''
本环境是利用下行链路
每一次计算强化学习动作是一个时隙。
这个时隙之内速度,位置不变?
此版本为最简单的版本
支持断点重传
始终是跟最大的相连
本次尝试利用范围内的相对位置作为状态
'''
class Downlink_2d_v2(gym.Env):
def __init__(self):
# UAV 及 用户参数
self.NUAV = 1
self.NSP = 5
self.Vmax = 30 # 最大速度 in m/s
self.Vmaxz = 10
self.amax = 30 # 最大加速度 in m^2/s
self.delta = 0.1 # 时隙
self.T = 1000 # 总时间
self.time = 0
self.N = self.T/self.delta
self.Pmax = 10 # dBm 功率
self.choose = 'Urban'
self.SNRlimit = 0
self.K = 0.01 # 空气阻力系数
self.alpha = 4
# 环境信道参数
self.done = 0
self.B = 1e6 # 带宽 1Mhz
self.N0 = -130 # dBm
self.m = 1900 # in g
self.R_th = 1.3*self.B #
f = 3e9 # 载频
c = 3e8 # 光速
self.lossb = 20*math.log10(f*4*math.pi/c)
# 初始参数 说明 向上加速度为正 向下 加速度为负
self.a = np.array([0, 0]) # 加速度
self.v = np.array([10, 10]) # 速度
self.placex = 0 # 无人机位置x
self.placey = 0 # 无人机位置y
self.placez = 100 # 无人机位置z
self.SPplacex = np.random.randint(-200, 200, self.NSP) # 节点位置x
self.SPplacey = np.random.randint(-200, 200, self.NSP) # 节点位置y
self.G = np.random.uniform(20, 80, self.NSP) # 每个节点的数据量 M为单位
self.P = 10 # 初始发射功率dBm
self.P_data = 5 # 处理功率 单位W
self.PLmax = self.PLoss()
self.rate, self.SNR = self.Rate()
self.cline = np.argmax(self.rate)
if self.SNR[self.cline] <= self.SNRlimit:
self.cline = -1
# 定义状态空间
ax = spaces.Box(low=-self.amax, high=self.amax, shape=(3,), dtype=np.float32)
ay = spaces.Box(low=-self.amax, high=self.amax, shape=(3,), dtype=np.float32)
p = spaces.Box(low=0, high=self.Pmax, shape=(1,), dtype=np.float32)
self.action_space = np.array([ax, ay, p])
v_spacex = spaces.Box(low=-self.Vmax, high=self.Vmax, shape=(1,), dtype=np.float32)
v_spacey = spaces.Box(low=-self.Vmax, high=self.Vmax, shape=(1,), dtype=np.float32)
p_spacex = spaces.Box(low=-200, high=200, shape=(1,), dtype=np.float32)
p_spacey = spaces.Box(low=-200, high=200, shape=(1,), dtype=np.float32)
o_spacex = spaces.Box(low=-200, high=200, shape=(1,), dtype=np.float32)
o_spacey = spaces.Box(low=-200, high=200, shape=(1,), dtype=np.float32)
SNR = spaces.Box(low=-50, high=150, shape=(1,), dtype=np.float32)
Gleft = spaces.Box(low=0, high=100, shape=(1,), dtype=np.float32)
# 状态格式 前面12个 分为三组 分别是无人机感知范围内最大的3个节点的X 和 Y 和 连接的信噪比 和剩余数据
# 然后是无人机自身的位置 x 和 Y vx xy 加速度ax 和 ay 信噪比 功率
self.observation_space = np.array([p_spacex, p_spacey, SNR, Gleft,
p_spacex, p_spacey, SNR, Gleft,
p_spacex, p_spacey, SNR, Gleft,
o_spacex, o_spacey,
v_spacex, v_spacey,
ax, ay,
p])
self.data = [self.placex, self.placey, self.v[0], self.v[1], self.a[0], self.a[1], self.P, 0, self.cline, 0, 0]
def reset(self):
# 初始化强化学习
self.done = 0
# 初始化无人机 和 node节点 数据
self.a = [0, 0] # 加速度
self.v = [10, 10] # 速度
self.placex = 0 # 无人机位置x
self.placey = 0 # 无人机位置y
self.placez = 100 # 无人机位置z
self.SPplacex = np.random.randint(-200, 200, self.NSP) # 节点位置x
self.SPplacey = np.random.randint(-200, 200, self.NSP) # 节点位置y
self.G = np.random.uniform(20, 80, self.NSP) # 每个节点的数据量
self.time = 0
self.rate, self.SNR = self.Rate()
self.cline = np.argmax(self.SNR)
if self.SNR[self.cline] <= self.SNRlimit:
self.cline = -1
S = np.array([])
repositionx,repositiony = self.relative_position()
cline = list(np.where(self.SNR > self.SNRlimit)[0])
choose = -np.sort(-self.SNR[cline])
if len(cline) <= 3:
for c in choose:
num = np.where(c == self.SNR)
S = np.append(S,[repositionx[num],repositiony[num],self.SNR[num],self.G[num]])
for _ in range(len(S), 12):
S = np.append(S,[np.float(0)])
else:
choose=choose[0:3]
for c in choose:
num = np.where(c == self.SNR)
S = np.append(S, [repositionx[num], repositiony[num], self.SNR[num], self.G[num]])
S = np.append(S,[self.placex, self.placey, self.v[0], self.v[1], self.a[0], self.a[1], self.P])
return S
def step(self, a):
acc = a[0:2]*self.amax
self.P = (a[2])*self.Pmax/2
P = 10**(self.P/10)/1000 # W 为单位
# 速度、位置变化
# self.a = self.a + acc - self.K*self.v
self.a = acc
self.v += self.a*self.delta
self.placex += self.v[0]*self.delta # 无人机位置x
self.placey += self.v[1]*self.delta # 无人机位置y
# 判断所链接的用户
self.rate, self.SNR = self.Rate()
self.cline = np.argmax(self.rate)
# 判断信噪比
PS = self.P_calfly() # 动力消耗功率
if self.SNR[self.cline] <= self.SNRlimit:
self.cline = -1
reward = -1e-4
else:
Gidea = self.rate[self.cline]*self.delta/1e6
if Gidea < self.G[self.cline]:
output = Gidea
self.G[self.cline] -= output
reward = output / (P + PS + self.P_data)
else:
output = self.G[self.cline]
self.G[self.cline] = 0
reward = output / self.delta / (P + PS + self.P_data)
# 删除
# self.SPplacex = np.delete(self.SPplacex, self.cline)
# self.SPplacey = np.delete(self.SPplacey, self.cline)
# self.G = np.delete(self.G, self.cline)
teskleft = np.sum(self.G)
if teskleft == 0:
self.done = 1
# 限制最大用时
# if self.time == self.T:
# self.done = 1
self.time += 1
# 限制约束范围
if self.placex > 400 or self.placex < -400 or self.placey > 400 or self.placey< -400:
reward = -1e-4*np.maximum(abs(self.placex)-200, 0) - np.maximum(abs(self.placey)-200, 0)
# 限制速度范围
if self.v[0] > 40 or self.v[0] < -40 or self.v[1] > 40 or self.v[1] < -40:
reward = - 5e-4*(np.maximum(abs(self.placex)-40, 0) + np.maximum(abs(self.placey)-40, 0))
#飞机最小速度约束
# if np.sum(self.v) < 5:
# done = 1
# reward = -100
S_ = np.array([])
repositionx,repositiony = self.relative_position()
cline = list(np.where(self.SNR > self.SNRlimit)[0])
choose = -np.sort(-self.SNR[cline])
if len(cline) <= 3:
for c in choose:
num = np.where(c == self.SNR)
S_ = np.append(S_,[repositionx[num],repositiony[num],self.SNR[num],self.G[num]])
for _ in range(len(S_), 12):
S_ = np.append(S_, [np.float(0)])
else:
choose=choose[0:3]
for c in choose:
num = np.where(c == self.SNR)
S_ = np.append(S_, [repositionx[num], repositiony[num], self.SNR[num], self.G[num]])
S_ = np.append(S_,[self.placex, self.placey, self.v[0], self.v[1], self.a[0], self.a[1], self.P])
self.record(a, PS, self.cline, self.rate[self.cline], reward, self.done)
self.time += 1
return S_, reward, self.done, {}
# 计算瞬时信噪比
def Rate(self):
PLmax, D = self.PLoss()
rate = np.zeros(self.NSP)
SNR = np.zeros(self.NSP)
for i in range(self.NSP):
SNR[i] = self.P-PLmax[i]-self.N0
rate[i] = self.B*math.log2(1+self.IdB(SNR[i]))
return rate, SNR
# 计算瞬时时间延迟 loss
def PLoss(self):
caij = np.zeros(shape=[4, 4])
cbij = np.zeros(shape=[4, 4])
caij[0, :] = [9.34e-1, 2.30e-1, -2.25e-3, 1.86e-5]
caij[1, :] = [1.97e-2, 2.44e-3, 6.58e-6, 0]
caij[2, :] = [-1.24e-4, -3.34e-6, 0, 0]
caij[3, :] = [2.73e-7, 0, 0, 0]
cbij[0, :] = [1.17, -7.56e-2, 1.98e-3, -1.78e-5]
cbij[1, :] = [-5.79e-3, 1.81e-4, 1.65e-3, 0]
cbij[2, :] = [1.73e-5, -2.02e-2, 0, 0]
cbij[3, :] = [-2e-8, 0, 0, 0]
subruban = [0.1, 750, 8]
Urban = [0.3, 500, 15]
DenseUrban = [0.5, 300, 20]
HighUrban = [0.5, 300, 50]
a, b, inta_los, inta_Nlos = 0, 0, 0, 0
if self.choose == 'subruban':
a, b = self.cal_a_b(subruban, caij, cbij)
inta_los = 0.1
inta_Nlos = 21
elif self.choose == 'Urban':
a, b = self.cal_a_b(Urban, caij, cbij)
inta_los = 1
inta_Nlos = 20
elif self.choose == 'DenseUrban':
a, b = self.cal_a_b(DenseUrban, caij, cbij)
inta_los = 1.6
inta_Nlos = 23
elif self.choose == 'HighUrban':
a, b = self.cal_a_b(HighUrban, caij, cbij)
inta_los = 2.3
inta_Nlos = 34
PLmax = []
for time in range(0, self.NSP):
L = math.sqrt((self.placex - self.SPplacex[time]) ** 2 + (self.placey - self.SPplacey[time]) ** 2)
H = self.placez
D = math.sqrt((self.placex - self.SPplacex[time]) ** 2 +
(self.placey - self.SPplacey[time]) ** 2 +
(self.placez) ** 2)
theta = 180*math.asin(H / D)/math.pi
Plos = (1 / (1 + a * math.exp(-b * (theta - a))))
PNlos = 1 - Plos
PLmax.append(10 * math.log10(D**self.alpha) + self.lossb + Plos * inta_los + PNlos * inta_Nlos)
return PLmax,D
def cal_a_b(self, choose, caij, cbij):
alpha = choose[0]
belta = choose[1]
gama = choose[2]
a = 0
b = 0
for j in range(0, 4):
for i in range(3 - j):
a += ((alpha * belta) ** i) * (gama ** j) * caij[i, j]
b += ((alpha * belta) ** i) * (gama ** j) * cbij[i, j]
return a, b
# 计算顺时功率
def P_calfly(self):
C1 = 9.26e-4
C2 = 2250
g = 9.8
normV = np.linalg.norm(self.v)
norma = np.linalg.norm(self.a)
cos = np.sum(self.v*self.a)
Ps = C1*normV**3+C2/normV*(1+(norma**2-cos**2/normV**2)/(g**2))
return Ps
def relative_position(self):
replacex = self.SPplacex - self.placex
replacey = self.SPplacey - self.placey
return replacex, replacey
# 计算dB
def dB(self,a):
b = 10*math.log10(a/10)
return b
def IdB(self, a):
b = math.pow(10,a/10)
return b
# 画图三维
def drawplot(self):
fig = plt.figure(1)
ax = Axes3D(fig)
ax.scatter(self.placex, self.placey, 100)
ax.scatter(self.SPplacex, self.SPplacey, np.zeros_like(self.SPplacex))
ax.text(self.placex, self.placey, self.placez,
'loc='+str([self.placex, self.placey, self.placez])+'\n'
+'V='+str(self.v)+'\n'+'P='+str(self.P))
if self.cline != -1:
ax.plot([self.placex, self.SPplacex[self.cline]], [self.placey, self.SPplacey[self.cline]],
[self.placez, 0], '--')
ax.text((self.placex + self.SPplacex[self.cline])/2, (self.placey+self.SPplacey[self.cline])/2,
(self.placez + 0)/2, str(self.rate[self.cline]))
ax.text(self.SPplacex[self.cline], self.SPplacex[self.cline], self.SPplacex[self.cline],
'loc='+str(self.SPplacex[self.cline])+str(self.SPplacex[self.cline])+'\n'
+'G='+str(self.G[self.cline])+'\n')
ax.set_xlim(-400, 400)
ax.set_ylim(-400, 400)
ax.set_zlim(0, 150)
plt.show()
def trajectory(self):
fig = plt.figure(1)
ax = Axes3D(fig)
trax = self.data[:, 0]
tray = self.data[:, 1]
ax.set_xlim(-400, 400)
ax.set_ylim(-400, 400)
ax.set_zlim(0, 120)
ax.plot3D(trax, tray, 100*np.ones_like(trax), 'r')
ax.scatter3D(self.SPplacex, self.SPplacey, np.zeros_like(self.SPplacex), 'g')
for cline in range(self.NUAV):
ax.text(self.SPplacex[cline], self.SPplacey[cline], 0,
'loc=' + str(self.SPplacex[cline]) + str(self.SPplacey[cline]) + '\n'
+ 'G=' + str(self.G[cline]))
plt.show()
def render(self, mode='human'):
return {}
def record(self, a, ps, cline, rate, reward, done):
basic_data = [self.SPplacex,self.SPplacey, self.G]
data = [self.placex, self.placey, self.v[0], self.v[1]
, a[0], a[1], self.P, ps, cline, rate/1e6, reward]
self.data = np.vstack((self.data, data))
# if done == 1:
# sio.savemat("/home/zachary/matlab程序/UAV/2d.mat", self.data)
# sio.savemat("/home/zachary/matlab程序/UAV/basic.mat", basic_data)
def putout(self):
basic_data = np.vstack((self.SPplacex, self.SPplacey, self.G))
return basic_data
if __name__ == '__main__':
env = Downlink_2d_v2()
env.reset()
rate, snr = env.Rate()
print(rate/1e6)
print(snr)
print(env.G)
###########################3
tarx = [env.placex]
tary = [env.placey]
def road(env):
dx = env.SPplacex
dy = env.SPplacey
num = np.argmax(env.G)
aimx, aimy = dx[num]-env.placex, dy[num]-env.placey
print(env.placex, env.placey)
norm = np.sqrt(aimx**2+aimy**2)
aimx = aimx/norm
aimy = aimy/norm
if np.abs(env.v[0] + aimx * env.delta * env.amax) > env.Vmax:
aimx = 0
if np.abs(env.v[1] + aimy * env.delta * env.amax) > env.Vmax:
aimy = 0
return np.array([aimx, aimy, 1])
records = []
recordv = []
recorda = []
recorddone = []
recordcline = []
recordrate = []
recordreward = []
recordG = []
recordepisode = []
recordSP = [env.SPplacex,env.SPplacey]
done = 0
try:
for episode in range(1000):
while done == 0:
action = road(env)
S_, reward, done, info = env.step(action)
records.append([S_[0],S_[1]])
recordv.append([S_[2],S_[3]])
recorda.append(action)
recordreward.append(reward)
recorddone.append(done)
recordcline.append(env.cline)
recordG.append(env.G)
recordepisode.append(episode)
print(reward)
# fig = plt.figure(1)
# # plt.cla()
# ax = Axes3D(fig)
# ax.scatter3D(tarx, tary, 100*np.ones_like(tarx), 'r', marker='*')
# ax.scatter3D(env.SPplacex, env.SPplacey, np.zeros_like(env.SPplacex))
# ax.text(env.placex, env.placey, env.placez,
# 'loc=' + str([env.placex, env.placey, env.placez]) + '\n'
# + 'V=' + str(env.v) + '\n' + 'a=' +str([action[0]*30, action[1]*30])
# )
# if env.cline != -1:
# ax.plot([env.placex, env.SPplacex[env.cline]], [env.placey, env.SPplacey[env.cline]],
# [env.placez, 0], '--')
# ax.text((env.placex + env.SPplacex[env.cline]) / 2, (env.placey + env.SPplacey[env.cline]) / 2,
# (env.placez + 0) / 2, str(env.rate[env.cline]/1e6))
# ax.text(env.SPplacex[env.cline], env.SPplacey[env.cline], 0,
# 'loc=' + str(env.SPplacex[env.cline]) + str(env.SPplacex[env.cline]) + '\n'
# + 'G=' + str(env.G[env.cline]) + '\n')
# ax.set_xlim(-400, 400)
# ax.set_ylim(-400, 400)
# ax.set_zlim(0, 150)
# plt.pause(1)
except KeyboardInterrupt:
sio.savemat('/home/zachary/matlab程序/UAV/warmdata.mat', {'s': records,'v': recordv,'a': recorda,
'SP': [env.SPplacex, env.SPplacey],
'cline':recordcline, 'G':recordG, 'episode': recordepisode
}) |
from functools import partial
import numpy as np
import jax
import jax.numpy as jnp
def split(a, axis, factor):
assert a.shape[axis] % factor == 0
new_shape = a.shape[:axis] + (factor, a.shape[axis] // factor) + a.shape[axis+1:]
a = a.reshape(new_shape)
a = jax.pmap(lambda x: x, in_axes=axis, out_axes=axis)(a)
return a
def replica(a, factor):
a = jax.pmap(lambda x, y: x, in_axes=(None, 0), out_axes=None)(a, jnp.ones(factor))
return a
def unsplit(a, axis):
new_shape = a.shape[:axis] + (a.shape[axis] * a.shape[axis+1],) + a.shape[axis+2:]
return a.reshape(new_shape)
def test_matmul_k_partition():
def matmul_k_partition(lhs, rhs):
@partial(jax.pmap,
axis_name='k',
in_axes=(1, 0),
out_axes=None)
def matmul(lhs, rhs):
res = lhs @ rhs
return jax.lax.psum(res, axis_name='k')
return matmul(lhs, rhs)
a = jnp.ones((1024, 1024))
b = jnp.ones((1024, 1024))
a = split(a, 1)
b = split(b, 0)
c = matmul_k_partition(a, b)
print(c.shape, c.sharding_spec)
def test_mlp_forward():
@partial(jax.pmap, in_axes=(None, 1), out_axes=1)
def matmul_r_s1_s1(x, w):
return x @ w
@partial(jax.pmap, in_axes=(1, 0), out_axes=None, axis_name='k')
def matmul_s1_s0_r(x, w):
res = x @ w
return jax.lax.psum(res, axis_name='k')
N = 1024
D = 1024
x = jnp.ones((N, D))
w1 = jnp.ones((D, D))
w2 = jnp.ones((D, D))
x = replica(x)
w1 = split(w1, axis=1)
w2 = split(w2, axis=0)
x = matmul_r_s1_s1(x, w1)
x = matmul_s1_s0_r(x, w2)
@partial(jax.custom_vjp, nondiff_argnums=(1,))
def f_operator(x, axis_name):
return x
def f_operator_fwd(x, axis_name):
return f_operator(x), ()
def f_operator_bwd(axis_name, res, g):
return jax.lax.psum(x, axis_name=axis_name),
f_operator.defvjp(f_operator_fwd, f_operator_bwd)
@partial(jax.custom_vjp, nondiff_argnums=(1,))
def g_operator(x, axis_name):
return jax.lax.psum(x, axis_name=axis_name)
def g_operator_fwd(x, axis_name):
return g_operator(x, axis_name), ()
def g_operator_bwd(axis_name, res, g):
return g,
g_operator.defvjp(g_operator_fwd, g_operator_bwd)
def test_mlp_model_parallel():
lr = 0.1
n_epoch = 1
def loss_serial(x, y, w1, w2):
x = x @ w1
x = jax.nn.relu(x)
x = x @ w2
return ((x - y) ** 2).mean()
def step_serial(x, y, w1, w2):
g_w1, g_w2 = jax.grad(loss_serial, argnums=(2, 3))(x, y, w1, w2)
return w1 - lr * g_w1, w2 - lr * g_w2
def train_serial(x, y, w1, w2):
for i in range(n_epoch):
w1, w2 = step_serial(x, y, w1, w2)
return w1, w2
def loss_parallel(x, y, w1, w2):
x = f_operator(x, axis_name='model_parallel')
x = x @ w1
x = jax.nn.relu(x)
x = x @ w2
x = g_operator(x, axis_name='model_parallel')
return ((x - y) ** 2).mean()
@partial(jax.pmap, in_axes=(None, None, 1, 0), out_axes=(1, 0),
axis_name='model_parallel')
def step_parallel(x, y, w1, w2):
g_w1, g_w2 = jax.grad(loss_parallel, argnums=(2, 3))(x, y, w1, w2)
return w1 - lr * g_w1, w2 - lr * g_w2
def train_parallel(x, y, w1, w2):
model_parallel = len(jax.devices())
w1 = split(w1, 1, model_parallel)
w2 = split(w2, 0, model_parallel)
for i in range(n_epoch):
w1, w2 = step_parallel(x, y, w1, w2)
return unsplit(w1, 1), unsplit(w2, 0)
N = 8
D = 128
np.random.seed(0)
x = np.random.uniform(size=(N, D))
y = np.random.uniform(size=(N, D))
w1 = np.random.uniform(size=(D, D))
w2 = np.random.uniform(size=(D, D))
w1_serial, w2_serial = train_serial(x, y, w1, w2)
w1_parallel, w2_parallel = train_parallel(x, y, w1, w2)
np.testing.assert_allclose(w1_serial, w1_parallel, rtol=1e-4)
np.testing.assert_allclose(w2_serial, w2_parallel, rtol=1e-4)
def test_mlp_data_parallel():
lr = 0.1
n_epoch = 1
def loss_serial(x, y, w1, w2):
x = x @ w1
x = jax.nn.relu(x)
x = x @ w2
return ((x - y) ** 2).mean()
def step_serial(x, y, w1, w2):
g_w1, g_w2 = jax.grad(loss_serial, argnums=(2, 3))(x, y, w1, w2)
return w1 - lr * g_w1, w2 - lr * g_w2
def train_serial(x, y, w1, w2):
for i in range(n_epoch):
w1, w2 = step_serial(x, y, w1, w2)
return w1, w2
def loss_parallel(x, y, w1, w2):
x = x @ w1
x = jax.nn.relu(x)
x = x @ w2
return ((x - y) ** 2).mean()
@partial(jax.pmap, in_axes=(0, 0, None, None), out_axes=(None, None),
axis_name='data_parallel')
def step_parallel(x, y, w1, w2):
g_w1, g_w2 = jax.grad(loss_parallel, argnums=(2, 3))(x, y, w1, w2)
g_w1 = jax.lax.pmean(g_w1, axis_name='data_parallel')
g_w2 = jax.lax.pmean(g_w2, axis_name='data_parallel')
return w1 - lr * g_w1, w2 - lr * g_w2
def train_parallel(x, y, w1, w2):
data_parallel = len(jax.devices())
x = split(x, 0, data_parallel)
y = split(y, 0, data_parallel)
for i in range(n_epoch):
w1, w2 = step_parallel(x, y, w1, w2)
return w1, w2
N = 8
D = 128
np.random.seed(0)
x = np.random.uniform(size=(N, D))
y = np.random.uniform(size=(N, D))
w1 = np.random.uniform(size=(D, D))
w2 = np.random.uniform(size=(D, D))
w1_serial, w2_serial = train_serial(x, y, w1, w2)
w1_parallel, w2_parallel = train_parallel(x, y, w1, w2)
np.testing.assert_allclose(w1_serial, w1_parallel, rtol=1e-4)
np.testing.assert_allclose(w2_serial, w2_parallel, rtol=1e-4)
def test_mlp_data_model_parallel():
lr = 0.1
n_epoch = 1
def loss_serial(x, y, w1, w2):
x = x @ w1
x = jax.nn.relu(x)
x = x @ w2
return ((x - y) ** 2).mean()
def step_serial(x, y, w1, w2):
g_w1, g_w2 = jax.grad(loss_serial, argnums=(2, 3))(x, y, w1, w2)
return w1 - lr * g_w1, w2 - lr * g_w2
def train_serial(x, y, w1, w2):
for i in range(n_epoch):
w1, w2 = step_serial(x, y, w1, w2)
return w1, w2
def loss_parallel(x, y, w1, w2):
x = f_operator(x, axis_name='model_parallel')
x = x @ w1
x = jax.nn.relu(x)
x = x @ w2
x = g_operator(x, axis_name='model_parallel')
return ((x - y) ** 2).mean()
@partial(jax.pmap, in_axes=(None, None, 1, 0), out_axes=(1, 0),
axis_name='model_parallel')
def step_model_parallel(x, y, w1, w2):
g_w1, g_w2 = jax.grad(loss_parallel, argnums=(2, 3))(x, y, w1, w2)
return g_w1, g_w2
@partial(jax.pmap, in_axes=(0, 0, None, None), out_axes=(None, None),
axis_name='data_parallel')
def step_data_parallel(x, y, w1, w2):
g_w1, g_w2 = step_model_parallel(x, y, w1, w2)
g_w1 = jax.lax.pmean(g_w1, axis_name='data_parallel')
g_w2 = jax.lax.pmean(g_w2, axis_name='data_parallel')
return w1 - lr * g_w1, w2 - lr * g_w2
def train_parallel(x, y, w1, w2):
model_parallel = 2
data_parallel = len(jax.devices()) // model_parallel
x = split(x, 0, data_parallel)
y = split(y, 0, data_parallel)
w1 = split(w1, 1, model_parallel)
w2 = split(w2, 0, model_parallel)
for i in range(n_epoch):
w1, w2 = step_data_parallel(x, y, w1, w2)
return unsplit(w1, 1), unsplit(w2, 0)
N = 8
D = 128
np.random.seed(0)
x = np.random.uniform(size=(N, D))
y = np.random.uniform(size=(N, D))
w1 = np.random.uniform(size=(D, D))
w2 = np.random.uniform(size=(D, D))
w1_serial, w2_serial = train_serial(x, y, w1, w2)
w1_parallel, w2_parallel = train_parallel(x, y, w1, w2)
np.testing.assert_allclose(w1_serial, w1_parallel, rtol=1e-4)
np.testing.assert_allclose(w2_serial, w2_parallel, rtol=1e-4)
if __name__ == "__main__":
test_mlp_model_parallel()
test_mlp_data_parallel()
test_mlp_data_model_parallel()
|
# ---------------------------------
# 데이터 등의 사전 준비
# ----------------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# MNIST 데이터 가시화
# keras.datasets를 이용하여 MNIST 데이터를 다운로드 실시
from keras.datasets import mnist
(train_x, train_y), (test_x, test_y) = mnist.load_data()
# 2차원 데이터로 변경
train_x = train_x.reshape(train_x.shape[0], -1)
# 상위 1000건으로 축소시키기
train_x = pd.DataFrame(train_x[:1000, :])
train_y = train_y[:1000]
#%%
# -----------------------------------
# PCA
# -----------------------------------
from sklearn.decomposition import PCA
# 학습 데이터를 기반으로 한 PCA에 의한 변환 정의
pca = PCA()
x_pca = pca.fit_transform(train_x)
# 분류 후의 데이터로 2차원으로 그리기
f, ax = plt.subplots(1)
for i in range(10):
mask = train_y == i
plt.scatter(x_pca[mask, 0], x_pca[mask, 1], label=i, s=10, alpha=0.5)
ax.legend(bbox_to_anchor=(1.00, 1), loc='upper left')
plt.show()
#%%
# -----------------------------------
# LDA (Linear Discriminant Analysis)
# -----------------------------------
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# 클래스를 가장 잘 나누는 두 축을 선형 판별 분석으로 도출
lda = LDA(n_components=2)
x_lda = lda.fit_transform(train_x, train_y)
# 학급별로 분류하여 이차원 데이터를 도출
# 잘 분할하고 있으나 목적변수를 이용하고 있기 대문에 다른 것과 비교하여 매우 유리한 조건임을 주의
f, ax = plt.subplots(1)
for i in range(10):
mask = train_y == i
plt.scatter(x_lda[mask, 0], x_lda[mask, 1], label=i, s=10, alpha=0.5)
ax.legend(bbox_to_anchor=(1.00, 1), loc='upper left')
plt.show()
#%%
# -----------------------------------
# t-sne
# -----------------------------------
from sklearn.manifold import TSNE
# t-sne에 의한 변환
tsne = TSNE(n_components=2)
x_tsne = tsne.fit_transform(train_x)
# 클래스마다 나눠, 2차원 그래프 그리기
f, ax = plt.subplots(1)
for i in range(10):
mask = train_y == i
plt.scatter(x_tsne[mask, 0], x_tsne[mask, 1], label=i, s=10, alpha=0.5)
ax.legend(bbox_to_anchor=(1.00, 1), loc='upper left')
plt.show()
#%%
# -----------------------------------
# UMAP
# -----------------------------------
import umap
# UMAP에 의한 변환
um = umap.UMAP()
x_umap = um.fit_transform(train_x)
# 클래스마다 나눠, 2차원 그래프 그리기
f, ax = plt.subplots(1)
for i in range(10):
mask = train_y == i
plt.scatter(x_umap[mask, 0], x_umap[mask, 1], label=i, s=10, alpha=0.5)
ax.legend(bbox_to_anchor=(1.00, 1), loc='upper left')
plt.show()
|
<filename>servicedirectory/src/sd-api/classes/daos.py
'''
(c) Copyright 2013 Telefonica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefonica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefonica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
'''
from bson.objectid import ObjectId
from commons.daos import BaseDao
from bson.errors import InvalidId
import logging
from pymongo import DESCENDING, ASCENDING
logger = logging.getLogger(__name__)
class ServiceClassDao(BaseDao):
"""
Dao to handle 'ServiceClasses' collection documents
"""
coll = "serviceclasses"
def __init__(self, *args, **kwargs):
super(ServiceClassDao, self).__init__(*args, **kwargs)
def update(self, obj):
"""
Find a class document by class_name and update description and default version
If the update is successful, True will be returned. If no update is performed,
False is returned.
When a database error happens, operationFailure is raised by pymongo driver
:param class_name the name of the class to be find
:param obj The partial class document to be modified
:return the updated object or None if class_name does not exist
"""
class_2_update = obj.copy()
update_ret = self.dbcoll.update({'_id': class_2_update.pop('_id')},
{'$set': class_2_update},
upsert=False)
return update_ret.get('ok') and update_ret.get('updatedExisting')
class ServiceInstanceDao(BaseDao):
"""
Dao to handle the ServiceInstances for every ServiceClass
"""
coll = "serviceinstances"
def __init__(self, *args, **kwargs):
super(ServiceInstanceDao, self).__init__(*args, **kwargs)
# Ensure unique index in instances collection is created
self.dbcoll.ensure_index([("class_name", ASCENDING), ("version", ASCENDING),
("uri", ASCENDING)], unique=True, name='class_name_uri_version')
def find(self, obj_id):
try:
instance_id = ObjectId(obj_id)
return super(ServiceInstanceDao, self).find(instance_id)
except (InvalidId, TypeError):
return None
def find_by_class_name_and_id(self, class_name, obj_id):
try:
instance_id = ObjectId(obj_id)
return self.dbcoll.find_one({'_id': instance_id, 'class_name': class_name})
except (InvalidId, TypeError):
return None
def find_all(self, class_name):
"""
Find all the instances under the Class "class_name"
:param class_name name of the class to be used
:return a list with the recovered instances
"""
return list(self.dbcoll.find({'class_name': class_name}))
def find_instances(self, query_obj):
return list(self.dbcoll.find(query_obj).sort('version', DESCENDING))
def update(self, obj):
"""
Find a instance document by _id and update the object if found
_Write Concern Not Supported. Default w=1. We use the following params:
upsert=False Do not create the object if document does not exists.
new=True Return the updated object and not the original
:param obj The instance document to be modified
:return True if the instance is updated, otherwise False
"""
instance_2_update = obj.copy()
try:
instance_id = ObjectId(instance_2_update.pop('_id'))
except InvalidId:
return False
# update will launch DuplicateKeyError
update_ret = self.dbcoll.update({'_id': instance_id},
instance_2_update, upsert=False)
return update_ret.get('ok') and update_ret.get('updatedExisting')
def delete(self, obj_id):
try:
return super(ServiceInstanceDao, self).delete(ObjectId(obj_id))
except InvalidId:
return False
def delete_by_class_name(self, class_name):
self.dbcoll.remove({'class_name': class_name})
|
"""
拼接并发送邮件
"""
import smtplib
from datetime import datetime, date
from email.mime.text import MIMEText
from email.header import Header
from email.utils import formataddr
from pathlib import Path
import psycopg2
import requests
import sentry_sdk
from jinja2 import Environment, PackageLoader
import config
from app.utils.entities import Content, Image
from app.utils.weather_crawler import WeatherCrawler
from app.utils.screenshot_lib import Driver
sentry_sdk.init(dsn=config.sentry_dsn)
def get_edm_config():
day = date.today().isoformat()
conn = psycopg2.connect(
database=config.PG_DB, user=config.PG_USER,
password=config.PG_PASSWORD, host=config.PG_HOST
)
with conn.cursor() as cur:
cur.execute(
"SELECT subject,title FROM official_edmconfig "
"WHERE day=%s;",
(day,)
)
row = cur.fetchone()
if row:
return row
conn.close()
return "", ""
def update_edm_config(poetry, hitokoto):
day = date.today().isoformat()
conn = psycopg2.connect(
database=config.PG_DB, user=config.PG_USER,
password=config.PG_PASSWORD, host=config.PG_HOST
)
try:
with conn.cursor() as cur:
cur.execute(
"UPDATE official_edmconfig SET poetry=%s,hitokoto=%s "
"WHERE day=%s;",
(poetry, hitokoto, day)
)
conn.commit()
except Exception as e:
conn.rollback()
print(f"Update db error: {e}")
finally:
conn.close()
def render_html() -> str:
"""
获取数据并渲染 HTML 文件
"""
# 准备数据
# 初始化基础信息类
content = Content()
# 获取天气信息
with WeatherCrawler() as wea:
wea: WeatherCrawler
_, title = get_edm_config()
if not title:
content.title = f"早安,亲爱的你"
else:
content.title = title
wea_tips = wea.get_tips() or "快来看今天的天气呀"
weather_data = wea.get_days_wea()
# 获取截图
image = get_image_code()
# 获取一言
content.hitokoto_say = get_hitokoto_say()
print(f"获得一言: {content.hitokoto_say}")
# 获取今日诗词
content.shici_say = get_gushici_say()
update_edm_config(poetry=content.shici_say, hitokoto=content.hitokoto_say)
# 生成 HTML 文件
env = Environment(loader=PackageLoader("app"))
template = env.get_template("hei.html")
html_content = template.render(
content=content, weather_data=weather_data, image=image,
wea_tips=wea_tips,
)
return html_content
def get_hitokoto_say() -> str:
default_msg = "看,你的眼里有星辰大海!"
try:
resp = requests.get(config.HITOKOTO_URL)
if resp.status_code == 200:
data = resp.json()
return data["hitokoto"]
else:
return default_msg
except Exception as e:
print(f"Exception in get hitokoto say, errors: {e}")
return default_msg
def get_gushici_say() -> str:
default_msg = "北方有佳人,绝世而独立。"
try:
resp = requests.get(config.JINRISHICI_URL)
if resp.status_code == 200:
data = resp.json()
return data["content"]
else:
return default_msg
except Exception as e:
print(f"Exception in get jinri shici, errors: {e}")
return default_msg
def get_image_code() -> Image:
"""
获取 一个 和 星座屋的截图
"""
img = Image()
# one
one_filename = f"{config.IMAGE_FILE_PATH}/one.png"
with Driver() as webdriver:
webdriver.save_screenshot(
url=config.ONE_URL, filename=one_filename, class_name="carousel-inner",
one=True,
)
img.one = f"data:image/png;base64,{webdriver.to_base64(one_filename)}"
# xingzuowu
xzw_filename = f"{config.IMAGE_FILE_PATH}/xzw.png"
with Driver() as webdriver:
webdriver.save_screenshot(
url=config.XINGZUOWU_URL,
filename=xzw_filename,
class_name="c_main",
xzw=True
)
img.xingzuowu = f"data:image/png;base64,{webdriver.to_base64(xzw_filename)}"
return img
def send_email(html):
def _format_address(name, addr):
return formataddr((Header(name, "utf-8").encode(), addr))
message = MIMEText(html, "html", "utf-8")
message["From"] = _format_address("Ikaros", config.sender)
message["To"] = _format_address("柠柠", config.receiver)
subject, _ = get_edm_config()
if not subject:
subject = "玲玲大宝宝"
message["Subject"] = Header(subject, "utf-8")
try:
smtp_obj = smtplib.SMTP("smtp.qq.com", port=587)
smtp_obj.ehlo("smtp.qq.com")
smtp_obj.login(config.sender, config.email_password)
smtp_obj.sendmail(config.sender, [config.receiver], message.as_string())
print("邮件发送成功")
except smtplib.SMTPException:
print("Error: 无法发送邮件")
def handler():
"""
流程处理函数
"""
print(f"Begin task at {datetime.now().isoformat()}")
# HTML 文件
try:
html = render_html()
# 存储一下每日的html源
month = date.today().strftime("%Y%m")
p = Path(config.IMAGE_FILE_PATH) / month
if not p.exists():
p.mkdir(parents=True)
with open(f"{p}/{date.today().isoformat()}.html", "w") as f:
f.write(html)
except Exception as e:
sentry_sdk.capture_exception(e)
print(f"Exception in render html. errors: {e}")
return False
# 下发邮件
send_email(html)
print(f"End task at {datetime.now().isoformat()}")
if __name__ == "__main__":
handler()
|
<reponame>availablenick/getren
import unittest
import flask_testing
import datetime
import os
import time
from flask import Flask
from sqlalchemy.exc import InvalidRequestError
import models_test
from app import create_test_app, test_db
from app.config import Test_Config
from app.models import User, Course, Video, Attends, Watches, Text
TESTS = 0
class MyTest_User_Course(flask_testing.TestCase):
def create_app(self):
app = create_test_app()
test_db.init_app(app)
return app
def setUp(self):
global TESTS
if TESTS == 0:
test_db.create_all()
clear_please(test_db)
TESTS+=1
def tearDown(self):
#test_db.session.remove()
#test_db.drop_all()
clear_please(test_db)
seqs = ['user_id_seq', 'course_id_seq']
for seq in seqs:
query = f"ALTER SEQUENCE {seq} RESTART"
test_db.engine.execute(query)
class MyTest_Video(flask_testing.TestCase):
def create_app(self):
global TESTS
TESTS = 0
app = create_test_app()
test_db.init_app(app)
return app
def setUp(self):
global TESTS
if TESTS == 0:
test_db.create_all()
clear_please(test_db)
TESTS+=1
def tearDown(self):
clear_please(test_db)
seqs = ['video_id_seq', 'course_id_seq']
for seq in seqs:
query = f"ALTER SEQUENCE {seq} RESTART"
test_db.engine.execute(query)
class MyTest_Attends(flask_testing.TestCase):
def create_app(self):
global TESTS
TESTS = 0
app = create_test_app()
test_db.init_app(app)
return app
def setUp(self):
global TESTS
if TESTS == 0:
test_db.create_all()
clear_please(test_db)
TESTS+=1
def tearDown(self):
clear_please(test_db)
seqs = ['user_id_seq', 'course_id_seq']
for seq in seqs:
query = f"ALTER SEQUENCE {seq} RESTART"
test_db.engine.execute(query)
class MyTest_Watches(flask_testing.TestCase):
def create_app(self):
global TESTS
TESTS = 0
app = create_test_app()
test_db.init_app(app)
return app
def setUp(self):
global TESTS
if TESTS == 0:
test_db.create_all()
clear_please(test_db)
TESTS+=1
def tearDown(self):
clear_please(test_db)
seqs = ['user_id_seq', 'course_id_seq', 'video_id_seq']
for seq in seqs:
query = f"ALTER SEQUENCE {seq} RESTART"
test_db.engine.execute(query)
class MyTest_Text(flask_testing.TestCase):
def create_app(self):
app = create_test_app()
test_db.init_app(app)
return app
def setUp(self):
global TESTS
if TESTS == 0:
test_db.create_all()
clear_please(test_db)
TESTS+=1
def tearDown(self):
#test_db.session.remove()
#test_db.drop_all()
clear_please(test_db)
seqs = ['text_id_seq']
for seq in seqs:
query = f"ALTER SEQUENCE {seq} RESTART"
test_db.engine.execute(query)
class UserTest(MyTest_User_Course):
def test_1_create(self):
user = User.register("<EMAIL>", "12345678")
assert user is not None
def test_2_fill_register(self):
user = User.register("<EMAIL>", "12345678")
update_dict = {'name': 'Getren', 'birthdate': datetime.datetime.strptime("2020-11-11", '%Y-%m-%d'),
'federal_state': 'SP', 'city': 'São Paulo', 'job': 'Fisioterapeuta'}
user = User.update_data(1, update_dict)
assert user is not None
def test_3_fill_register_miss(self):
update_dict = {'name': 'Getren', 'birthdate': datetime.datetime.strptime("2020-11-11", '%Y-%m-%d'),
'federal_state': 'SP', 'city': 'São Paulo', 'job': 'Fisioterapeuta'}
user = User.update_data(1, update_dict)
assert user is None
def test_4_confirmation(self):
user = User.register("<EMAIL>", "12345678")
user = User.confirm_user("<EMAIL>")
assert user is not None
def test_5_confirmation_miss(self):
user = User.confirm_user("<EMAIL>")
assert user is None
def test_6_update_password(self):
user = User.register("<EMAIL>", "<PASSWORD>")
user = User.update_password("<EMAIL>", "<PASSWORD>")
assert user is not None
def test_7_update_miss(self):
user = User.update_password("<EMAIL>", "<PASSWORD>")
assert user is None
def test_8_get_by_id_hit(self):
user = User.register("<EMAIL>", "12345678")
user = User.get_by_id(1)
assert user is not None
def test_9_get_by_id_miss(self):
user = User.get_by_id(1)
assert user is None
# def test_10_repeated(self):
# user = User.register("<EMAIL>", "12345678")
# new_user = User.register("<EMAIL>", "81723981723")
# assert user is not None and new_user is None
class CourseTest(MyTest_User_Course):
def test_01_add(self):
course = Course.add({"name" : "Curso de teste"})
assert course is not None
def test_02_add_fail(self):
course = Course.add({"error" : "fail"})
assert course is None
def test_03_get_all(self):
course = Course.add({"name" : "Curso de teste"})
courses = Course.get_by_filter("all")
assert list(courses[0].keys()) == ['id', 'name', 'number_of_videos',
'duration', 'price', 'is_watchable']
def test_04_get_expired(self):
course = Course.add({"name": "Curso de teste", "expires_at": "2020-11-20"})
course = Course.add({"name": "Curso de teste 2", "expires_at": "4020-12-10"})
courses = Course.get_by_filter("expired")
assert len(courses) == 1 and courses[0]['name'] == "Curso de teste"
def test_05_get_active(self):
course = Course.add({"name": "Curso de teste", "expires_at": "2020-11-20"})
course = Course.add({"name": "Curso de teste 2", "expires_at": "4020-12-10"})
courses = Course.get_by_filter("active")
assert len(courses) == 1 and courses[0]['name'] == "Curso de teste 2"
def test_05_get_with_search(self):
course = Course.add({"name": "Curso de teste", "expires_at": "2020-11-20"})
course = Course.add({"name": "Batata", "expires_at": "4020-12-10"})
courses = Course.get_by_filter("Batata")
assert len(courses) == 1 and courses[0]['name'] == "Batata"
def test_06_get_with_multiple_word_search(self):
course = Course.add({"name": "Fisioterapia para velhinhos", "expires_at": "2020-11-20"})
course = Course.add({"name": "Batata", "expires_at": "4020-12-10"})
courses = Course.get_by_filter("Fisioterapia%20velhinhos")
assert len(courses) == 1 and courses[0]['name'] == "Fisioterapia para velhinhos"
def test_04_get_by_id(self):
course = Course.add({"name" : "Curso de teste"})
new_course = Course.get_by_id(1)
fail_course = Course.get_by_id(2)
assert new_course is not None and new_course.id == course.id and fail_course is None
def test_06_update_data(self):
course = Course.add({"name" : "Curso de teste"})
course = Course.update_data(1, {"name": "Curso de teste atualizado"})
updated_course = Course.get_by_id(1)
assert updated_course.name == "Curso de teste atualizado"
def test_07_update_data_fail(self):
course = Course.add({"name" : "Curso de teste"})
updated_course = Course.update_data(2, {"name": "Curso de teste atualizado"})
updated_course_2 = Course.update_data(1, {"error": "Curso de teste não atualizado"})
assert updated_course is None and updated_course_2 is None
def test_08_delete(self):
course = Course.add({"name" : "Curso de teste"})
is_deleted = Course.delete(1)
deleted_course = Course.get_by_id(1)
assert is_deleted == True and deleted_course is None
def test_09_delete_fail(self):
course = Course.add({"name" : "Curso de teste"})
Course.delete(2)
deleted_course = Course.get_by_id(1)
assert deleted_course is not None
class VideoTest(MyTest_Video):
def test_01_add(self):
course = Course.add({'name': "Curso de Teste"})
video = Video.add(1, {'youtube_code': 'test_code', 'course_order': 1})
assert video is not None
def test_02_add_fail(self):
video = Video.add(2, {'youtube_code': 'test_code', 'course_order': 1})
assert video is None
def test_03_get_videos_as_dict(self):
course = Course.add({'name': "Curso de Teste"})
course = Course.get_by_id(1)
video = Video.add(1, {'youtube_code': 'test_code', 'course_order': 1})
videos = course.get_videos_as_dict()
assert list(videos[0].keys()) == ['id', 'youtube_code', 'title', \
'description', 'duration', 'thumbnail', 'course_order'] and videos[0]['youtube_code'] == 'test_code'
def test_04_get_by_id(self):
course = Course.add({'name': "Curso de Teste"})
video = Video.add(1, {'youtube_code': 'test_code', 'course_order': 1})
new_video = Video.get_by_id(1)
video_fail = Video.get_by_id(2)
assert new_video is not None and new_video.id == video.id and video_fail is None
class AttendsTest(MyTest_Attends):
def test_1_enroll(self):
course = Course.add({'name': 'Curso 1'})
user = User.register(email = '<EMAIL>', password = '<PASSWORD>')
attends = Attends.add(1, {'course_id': 1, 'is_paid': False})
assert attends is not None and attends.user_id == 1 and attends.course.name == 'Curso 1'
def test_2_enroll_fail(self):
course = Course.add({'name': 'Curso 1'})
user = User.register(email = '<EMAIL>', password = '<PASSWORD>')
attends = Attends.add(3, {'course_id': 3})
assert attends is None
class WatchesTest(MyTest_Watches):
def test_01_add(self):
course = Course.add({'name': 'Curso de Teste'})
user = User.register('<EMAIL>', '12345678')
video = Video.add(1, {'title': 'Video 1'})
watches = Watches.add(1, 1)
assert watches is not None
def test_02_add_fail(self):
watches = Watches.add(1, 1)
assert watches is None
def test_03_as_dict(self):
course = Course.add({'name': 'Curso de Teste'})
user = User.register('<EMAIL>', '12345678')
video = Video.add(1, {'title': 'Video 1'})
watches = Watches.add(1, 1)
watches_dict = watches.as_dict()
assert list(watches_dict.keys()) == ['user_id', 'video_id', 'watched_time', 'finished'] and \
watches_dict['finished'] == False
def test_04_get_by_id(self):
course = Course.add({'name': 'Curso de Teste'})
user = User.register('<EMAIL>', '12345678')
video = Video.add(1, {'title': 'Video 1'})
watches = Watches.add(1, 1)
watches = Watches.get_by_ids(1, 1)
assert watches is not None
def test_05_get_by_id_fail(self):
watches = Watches.get_by_ids(3, 5)
assert watches is None
def test_06_update_data(self):
course = Course.add({'name': 'Curso de Teste'})
user = User.register('<EMAIL>', '12345678')
video = Video.add(1, {'title': 'Video 1'})
watches = Watches.add(1, 1)
updated = Watches.update_data(1, 1, {'watched_time': 200, 'finished': True})
assert updated.finished == True and updated.watched_time == 200
def test_07_update_fail(self):
course = Course.add({'name': 'Curso de Teste'})
user = User.register('<EMAIL>', '12345678')
video = Video.add(1, {'title': 'Video 1'})
watches = Watches.add(1, 1)
updated = Watches.update_data(1, 1, {'watched_time': 200, 'finish': False})
class TextTest(MyTest_Text):
def test_01_add(self):
text = Text.add('home', 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. \
Ut vel massa arcu. Ut tincidunt vestibulum eros, congue tempus dolor ultricies sodales. \
Praesent vel dui pellentesque, condimentum nulla id, efficitur metus. Morbi at porta nisl,\
ac venenatis massa. Mauris ut ultrices libero. Vivamus vitae augue vulputate, ultricies enim \
sit amet, imperdiet nunc. Curabitur egestas eget erat eu elementum. Nullam non ullamcorper\
arcu. Duis pulvinar eu felis eget placerat. Nullam sed lacus vel nisi porttitor interdum \
scelerisque id velit. Pellentesque facilisis, magna ac porttitor feugiat, ligula nulla scelerisque \
nibh, eu tincidunt ipsum urna sed nisi. Donec tincidunt nulla a molestie fermentum. Suspendisse.')
assert text is not None
# def test_03_get_from_section(self):
# text = Text.add('home', 'Lorem ipsum')
# home_text = Text.get_from_section('home')
# assert home_text is not None and home_text.body == 'Lorem ipsum'
# def test_04_get_from_section_fail(self):
# text = Text.get_from_section('index')
# assert text is None
# def test_05_update(self):
# text = Text.add('home', 'Lorem ipsum')
# updated = Text.update_body('home', 'Texto atualizado')
# text = Text.get_from_section('home')
# assert text is not None and text.body == 'Texto atualizado'
# def test_06_update_fail(self):
# text = Text.add('home', 'Lorem ipsum')
# updated = Text.update_body('faq', 'Texto atualizado')
# text = Text.get_from_section('home')
# assert text is not None and text.body == 'Lorem ipsum' and updated is None
def clear_please(db):
Attends.query.delete()
Watches.query.delete()
Video.query.delete()
User.query.delete()
Course.query.delete()
Text.query.delete()
db.session.commit()
if __name__ == "__main__":
unittest.main()
|
<filename>gusty/parsing/parsers.py<gh_stars>100-1000
import yaml, ast, importlib.util, frontmatter, nbformat, jupytext
from gusty.parsing.loaders import GustyYAMLLoader
from gusty.importing import airflow_version
if airflow_version > 1:
from airflow.operators.python import PythonOperator
else:
from airflow.operators.python_operator import PythonOperator
def parse_generic(file_path):
# Read either the frontmatter or the parsed yaml file (using "or" to coalesce them)
file_contents = frontmatter.load(file_path)
job_spec = file_contents.metadata or yaml.load(
file_contents.content, Loader=GustyYAMLLoader
)
return job_spec
def parse_py(file_path):
job_spec = {}
if airflow_version > 1:
job_spec.update({"operator": "airflow.operators.python.PythonOperator"})
else:
job_spec.update(
{"operator": "airflow.operators.python_operator.PythonOperator"}
)
file_contents = jupytext.read(file_path)["cells"][0]
# if spec contains metadata header...
if file_contents["cell_type"] == "raw":
assert (
file_contents["source"] is not None
), "You need a comment block starting and ending with '# ---' at the top of {file_path}".format(
file_path=file_path
)
assert (
"---" in file_contents["source"],
), "You need a comment block starting and ending with '# ---' at the top of {file_path}".format(
file_path=file_path
)
source = file_contents["source"].replace("---", "")
settings = yaml.load(source, Loader=GustyYAMLLoader)
job_spec.update(**settings)
# search for a python callable if one is specified
if "python_callable" in job_spec.keys():
with open(file_path) as f:
tree = ast.parse(f.read())
class Visitor(ast.NodeVisitor):
def __init__(self):
self.has_callable = None
def visit_FunctionDef(self, node):
ast.NodeVisitor.generic_visit(self, node)
if node.name == job_spec["python_callable"]:
self.has_callable = True
v = Visitor()
v.visit(tree)
if v.has_callable:
mod_file = importlib.util.spec_from_file_location(
"".join(i for i in file_path if i.isalnum()), file_path
)
mod = importlib.util.module_from_spec(mod_file)
mod_file.loader.exec_module(mod)
job_spec.update(
{"python_callable": getattr(mod, job_spec["python_callable"])}
)
else:
assert (
False
), "{file_path} specifies python_callable {callable} but {callable} not found in {file_path}".format(
file_path=file_path, callable=job_spec["python_callable"]
)
# Default to sourcing this file for a PythonOperator
else:
job_spec.update({"python_callable": lambda: exec(open(file_path).read())})
# If no metadata then we also default to sourcing this file for a PythonOperator
else:
job_spec.update({"python_callable": lambda: exec(open(file_path).read())})
return job_spec
def parse_ipynb(file_path):
# Find first yaml cell in jupyter notebook and parse yaml
file_contents = nbformat.read(file_path, as_version=4)["cells"]
yaml_cell = [
cell
for cell in file_contents
if cell["cell_type"] == "markdown"
and cell["source"].startswith(("```yaml", "```yml"))
]
assert len(yaml_cell) > 0, "Please add a yaml block to %s" % file_path
yaml_cell = yaml_cell[0]["source"]
job_spec = yaml.safe_load(
yaml_cell.replace("```yaml", "").replace("```yml", "").replace("```", "")
)
return job_spec
def parse_sql(file_path):
file_contents = frontmatter.load(file_path)
job_spec = file_contents.metadata
job_spec["sql"] = file_contents.content
return job_spec
|
# coding: utf8
"""Core functionality of tankobon."""
import concurrent.futures as cfutures
import gzip
import logging
import pathlib
import shutil
from typing import cast, Callable, Dict, List, Optional, Union
import fpdf # type: ignore
import imagesize # type: ignore
import natsort # type: ignore
import requests.exceptions
from . import models, utils
from .exceptions import MangaNotFoundError, PagesNotFoundError
_log = logging.getLogger("tankobon")
A4_WIDTH = 210
A4_HEIGHT = 297
SHORT_HASH_LEN = 8
class Cache(utils.PersistentDict):
"""A manga cache.
Args:
root: The root of the cache.
Attributes:
root: See args.
alias: A map of manga url to manga hash.
"""
INDEX = "index.json.gz"
def __init__(self, root: Union[str, pathlib.Path] = utils.ROOT):
if isinstance(root, str):
root = pathlib.Path(root)
self.root = root
index = self.root / self.INDEX
old_index = self.root / "index.json"
if old_index.is_file():
# indexes are now compressed by default so compress the old one
with gzip.open(index, "wt") as f:
f.write(old_index.read_text())
old_index.unlink()
super().__init__(self.root / self.INDEX, compress=True)
self.alias: Dict[str, str] = {}
# alias urls to their hashes
for hash, manga in self.data.items():
self.alias[manga["meta"].url] = hash
def fullhash(self, part: str) -> str:
"""Get the full SHA512 hash of a manga when only given at least the first 8 letters of the hash.
Args:
part: The first 8 letters of the hash.
Returns:
The full hash, or an empty string if part was not found.
Raises:
ValueError, if the length part is less than 8.
"""
if len(part) < SHORT_HASH_LEN:
raise ValueError(f"part {part} is too short")
for hash in self.data:
if hash.startswith(part):
return hash
return ""
def dump(self, manga: models.Manga):
"""Save this manga within the cache.
Args:
manga: The manga object to save.
"""
self.data[manga.meta.hash] = manga.dump()
self.alias[manga.meta.url] = manga.meta.hash
(self.root / manga.meta.hash).mkdir(exist_ok=True)
def load(self, hash: str) -> models.Manga:
"""Load a manga by its hash.
Args:
hash: The manga hash.
Returns:
The Manga object.
Raises:
MangaNotFoundError, if the manga does not exist in the cache.
"""
if hash not in self.data:
raise MangaNotFoundError(f"{hash} does not exist in cache")
return models.Manga.load(self.data[hash])
def delete(self, hash: str):
"""Delete a manga from the cache.
Args:
hash: The manga hash.
Raises:
MangaNotFoundError, if the manga does not exist in the cache.
"""
if hash not in self.data:
raise MangaNotFoundError(f"{hash} does not exist in cache")
del self.alias[self.data[hash]["meta"].url]
shutil.rmtree(str(self.root / hash))
del self.data[hash]
class Downloader:
"""A manga downloader.
Args:
path: The path to where the manga chapters will be downloaded.
For every manga chapter, a corrosponding folder is created if it does not exist.
"""
MANIFEST = "manifest.json"
def __init__(self, path: Union[str, pathlib.Path]):
if isinstance(path, str):
path = pathlib.Path(path)
self.path = path
self.config = utils.CONFIG
self.session = utils.UserSession()
self.manifest = utils.PersistentDict(self.path / self.MANIFEST)
# repair manifest, if it uses absolute paths
for cid, langs in self.manifest.items():
for lang, pages in langs.items():
for index, page in enumerate(pages):
page_path = pathlib.Path(page)
if page_path.is_absolute():
self.manifest[cid][lang][index] = page_path.name
else:
break
def close(self):
self.session.close()
self.manifest.close()
def downloaded(self, chapter: models.Chapter) -> bool:
"""Check whether a chapter has been downloaded or not."""
try:
self.manifest[chapter.id][chapter.lang]
except KeyError:
return False
else:
return True
def download(
self,
chapter: models.Chapter,
*,
force: bool = False,
progress: Optional[Callable[[int], None]] = None,
):
"""Download pages for a chapter.
Args:
chapter: The Chapter object to download.
force: Whether or not to re-download the chapter if it already exists.
Defaults to False.
progress: A callback function which is called with the page number every time a page is downloaded.
Defaults to None.
Raises:
PagesNotFoundError, if the chapter to be downloaded has no pages.
"""
self.session.headers.update({"Referer": chapter.url})
if not chapter.pages:
raise PagesNotFoundError(f"chapter {chapter.id} does not have any pages")
entry = self.manifest.setdefault(chapter.id, {})
if chapter.lang in entry and not force:
# bail out: dont re-download chapter
return
chapter.pages = cast(list, chapter.pages)
chapter_path = self.path / chapter.id / chapter.lang
chapter_path.mkdir(parents=True)
pages = []
total = len(chapter.pages)
with cfutures.ThreadPoolExecutor(
max_workers=self.config["download.rate_limit"]
) as pool:
futures = {
pool.submit(self.session.get, url): count
for count, url in enumerate(chapter.pages)
}
# progress count is different from page number as the page futures may not be in order.
for p_count, future in enumerate(cfutures.as_completed(futures)):
count = futures[future]
_log.info(
f"downloader: [{chapter.id}] downloading page {count} of {total}"
)
try:
resp = future.result()
resp.raise_for_status()
except requests.exceptions.RequestException as e:
_log.critical(f"downloader: failed to download page {count}: {e}")
# cant partially download, so remove chapter folder
shutil.rmtree(chapter_path)
raise e
path = utils.save_response(chapter_path / str(count), resp)
pages.append(str(path.name))
if progress is not None:
progress(p_count)
self.manifest[chapter.id][chapter.lang] = pages
def download_cover(self, manga: models.Manga):
"""Download a manga's cover to the download path as 'cover.(ext)'.
Args:
manga: The manga to download a cover for.
"""
self.session.headers.update({"Referer": manga.meta.url})
with self.session.get(manga.meta.cover) as resp:
utils.save_response(self.path / "cover", resp)
def pdfify(
self,
chapters: List[str],
dest: Union[str, pathlib.Path],
lang: str = "en",
):
"""Create a PDF out of several (downloaded) chapters.
The PDF will be A4 sized (vertical).
Args:
chapters: The chapters to create a PDF for.
lang: The language of the chapters.
Defaults to 'en'.
dest: Where to write the PDF to.
"""
document = fpdf.FPDF()
for cid in natsort.natsorted(chapters):
_log.info(f"pdf: adding chapter {cid}")
pages = self.manifest[cid][lang]
total = len(pages) - 1
for page in natsort.natsorted(pages):
_log.debug(f"adding page {page} of {total}")
page_path = self.path / cid / lang / page
width, height = imagesize.get(page_path)
ratio = min(A4_WIDTH / width, A4_HEIGHT / height)
document.add_page()
document.image(str(page_path), 0, 0, w=width * ratio, h=height * ratio)
document.output(str(dest), "F")
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
|
<gh_stars>0
# Tests numpy methods of <class 'function'>
from __future__ import print_function, absolute_import, division
import itertools
import math
import platform
from functools import partial
import numpy as np
from numba import unittest_support as unittest
from numba.compiler import Flags
from numba import jit, njit, typeof, types
from numba.numpy_support import version as np_version
from numba.errors import TypingError
from numba.config import IS_WIN32, IS_32BITS
from .support import TestCase, CompilationCache, MemoryLeakMixin
from .matmul_usecase import needs_blas
no_pyobj_flags = Flags()
no_pyobj_flags.set("nrt")
def sinc(x):
return np.sinc(x)
def angle1(x):
return np.angle(x)
def angle2(x, deg):
return np.angle(x, deg)
def delete(arr, obj):
return np.delete(arr, obj)
def diff1(a):
return np.diff(a)
def diff2(a, n):
return np.diff(a, n)
def bincount1(a):
return np.bincount(a)
def bincount2(a, w):
return np.bincount(a, weights=w)
def searchsorted(a, v):
return np.searchsorted(a, v)
def searchsorted_left(a, v):
return np.searchsorted(a, v, side='left')
def searchsorted_right(a, v):
return np.searchsorted(a, v, side='right')
def digitize(*args):
return np.digitize(*args)
def histogram(*args):
return np.histogram(*args)
def machar(*args):
return np.MachAr()
def iinfo(*args):
return np.iinfo(*args)
def finfo(*args):
return np.finfo(*args)
def finfo_machar(*args):
return np.finfo(*args).machar
def correlate(a, v):
return np.correlate(a, v)
def convolve(a, v):
return np.convolve(a, v)
def tri_n(N):
return np.tri(N)
def tri_n_m(N, M=None):
return np.tri(N, M)
def tri_n_k(N, k=0):
return np.tri(N, k)
def tri_n_m_k(N, M=None, k=0):
return np.tri(N, M, k)
def tril_m(m):
return np.tril(m)
def tril_m_k(m, k=0):
return np.tril(m, k)
def triu_m(m):
return np.triu(m)
def triu_m_k(m, k=0):
return np.triu(m, k)
def vander(x, N=None, increasing=False):
return np.vander(x, N, increasing)
def partition(a, kth):
return np.partition(a, kth)
def cov(m, y=None, rowvar=True, bias=False, ddof=None):
return np.cov(m, y, rowvar, bias, ddof)
def corrcoef(x, y=None, rowvar=True):
return np.corrcoef(x, y, rowvar)
def ediff1d(ary, to_end=None, to_begin=None):
return np.ediff1d(ary, to_end, to_begin)
def roll(a, shift):
return np.roll(a, shift)
def asarray(a):
return np.asarray(a)
def asarray_kws(a, dtype):
return np.asarray(a, dtype=dtype)
def extract(condition, arr):
return np.extract(condition, arr)
def np_trapz(y):
return np.trapz(y)
def np_trapz_x(y, x):
return np.trapz(y, x)
def np_trapz_dx(y, dx):
return np.trapz(y, dx=dx)
def np_trapz_x_dx(y, x, dx):
return np.trapz(y, x, dx)
def interp(x, xp, fp):
return np.interp(x, xp, fp)
def np_repeat(a, repeats):
return np.repeat(a, repeats)
def array_repeat(a, repeats):
return np.asarray(a).repeat(repeats)
def np_bartlett(M):
return np.bartlett(M)
def np_blackman(M):
return np.blackman(M)
def np_hamming(M):
return np.hamming(M)
def np_hanning(M):
return np.hanning(M)
def np_kaiser(M, beta):
return np.kaiser(M, beta)
class TestNPFunctions(MemoryLeakMixin, TestCase):
"""
Tests for various Numpy functions.
"""
def setUp(self):
super(TestNPFunctions, self).setUp()
self.ccache = CompilationCache()
self.rnd = np.random.RandomState(42)
def run_unary(self, pyfunc, x_types, x_values, flags=no_pyobj_flags,
func_extra_types=None, func_extra_args=None,
ignore_sign_on_zero=False, abs_tol=None, **kwargs):
"""
Runs tests for a unary function operating in the numerical real space.
Parameters
----------
pyfunc : a python function definition holding that calls the numpy
functions to be tested.
x_types: the types of the values being tested, see numba.types
x_values: the numerical values of the values to be tested
flags: flags to pass to the CompilationCache::ccache::compile function
func_extra_types: the types of additional arguments to the numpy
function
func_extra_args: additional arguments to the numpy function
ignore_sign_on_zero: boolean as to whether to allow zero values
with incorrect signs to be considered equal
prec: the required precision match, see assertPreciseEqual
Notes:
------
x_types and x_values must have the same length
"""
for tx, vx in zip(x_types, x_values):
if func_extra_args is None:
func_extra_types = func_extra_args = [()]
for xtypes, xargs in zip(func_extra_types, func_extra_args):
cr = self.ccache.compile(pyfunc, (tx,) + xtypes,
flags=flags)
cfunc = cr.entry_point
got = cfunc(vx, *xargs)
expected = pyfunc(vx, *xargs)
try:
scalty = tx.dtype
except AttributeError:
scalty = tx
prec = ('single'
if scalty in (types.float32, types.complex64)
else 'double')
msg = 'for input %r with prec %r' % (vx, prec)
self.assertPreciseEqual(got, expected,
prec=prec,
msg=msg,
ignore_sign_on_zero=ignore_sign_on_zero,
abs_tol=abs_tol, **kwargs)
def test_sinc(self):
"""
Tests the sinc() function.
This test is purely to assert numerical computations are correct.
"""
# Ignore sign of zeros, this will need masking depending on numpy
# version once the fix to numpy complex division is in upstream
# See: https://github.com/numpy/numpy/pull/6699
isoz = True
# Testing sinc(1.) leads to sin(pi)/pi, which is below machine
# precision in practice on most machines. Small floating point
# differences in sin() etc. may lead to large differences in the result
# that are at a range that is inaccessible using standard width
# floating point representations.
# e.g. Assume float64 type.
# sin(pi) ~= 1e-16, but should be zero
# sin(pi)/pi ~= 1e-17, should be zero, error carried from above
# float64 has log10(2^53)~=15.9 digits of precision and the magnitude
# change in the alg is > 16 digits (1.0...0 -> 0.0...0),
# so comparison via ULP is invalid.
# We therefore opt to assume that values under machine precision are
# equal in this case.
tol = "eps"
pyfunc = sinc
def check(x_types, x_values, **kwargs):
self.run_unary(pyfunc, x_types, x_values,
ignore_sign_on_zero=isoz, abs_tol=tol,
**kwargs)
# real domain scalar context
x_values = [1., -1., 0.0, -0.0, 0.5, -0.5, 5, -5, 5e-21, -5e-21]
x_types = [types.float32, types.float64] * (len(x_values) // 2)
check(x_types, x_values)
# real domain vector context
x_values = [np.array(x_values, dtype=np.float64)]
x_types = [typeof(v) for v in x_values]
check(x_types, x_values)
# complex domain scalar context
x_values = [1.+0j, -1+0j, 0.0+0.0j, -0.0+0.0j, 0+1j, 0-1j, 0.5+0.0j, # noqa
-0.5+0.0j, 0.5+0.5j, -0.5-0.5j, 5+5j, -5-5j, # noqa
# the following are to test sin(x)/x for small x
5e-21+0j, -5e-21+0j, 5e-21j, +(0-5e-21j) # noqa
]
x_types = [types.complex64, types.complex128] * (len(x_values) // 2)
check(x_types, x_values, ulps=2)
# complex domain vector context
x_values = [np.array(x_values, dtype=np.complex128)]
x_types = [typeof(v) for v in x_values]
check(x_types, x_values, ulps=2)
def test_angle(self, flags=no_pyobj_flags):
"""
Tests the angle() function.
This test is purely to assert numerical computations are correct.
"""
pyfunc1 = angle1
pyfunc2 = angle2
def check(x_types, x_values):
# angle(x)
self.run_unary(pyfunc1, x_types, x_values)
# angle(x, deg)
xtra_values = [(True,), (False,)]
xtra_types = [(types.bool_,)] * len(xtra_values)
self.run_unary(pyfunc2, x_types, x_values,
func_extra_types=xtra_types,
func_extra_args=xtra_values,)
# real domain scalar context
x_values = [1., -1., 0.0, -0.0, 0.5, -0.5, 5, -5]
x_types = [types.float32, types.float64] * (len(x_values) // 2 + 1)
check(x_types, x_values)
# real domain vector context
x_values = [np.array(x_values, dtype=np.float64)]
x_types = [typeof(v) for v in x_values]
check(x_types, x_values)
# complex domain scalar context
x_values = [1.+0j, -1+0j, 0.0+0.0j, -0.0+0.0j, 1j, -1j, 0.5+0.0j, # noqa
-0.5+0.0j, 0.5+0.5j, -0.5-0.5j, 5+5j, -5-5j] # noqa
x_types = [types.complex64, types.complex128] * (len(x_values) // 2 + 1)
check(x_types, x_values)
# complex domain vector context
x_values = np.array(x_values)
x_types = [types.complex64, types.complex128]
check(x_types, x_values)
# hits "Invalid PPC CTR loop!" issue on power systems, see e.g. #4026
@unittest.skipIf(platform.machine() == 'ppc64le', "LLVM bug")
def test_delete(self):
def arrays():
# array, obj
#
# an array-like type
yield [1, 2, 3, 4, 5], 3
yield [1, 2, 3, 4, 5], [2, 3]
# 1d array, scalar
yield np.arange(10), 3
yield np.arange(10), -3 # Negative obj
# 1d array, list
yield np.arange(10), [3, 5, 6]
yield np.arange(10), [2, 3, 4, 5]
# 3d array, scalar
yield np.arange(3 * 4 * 5).reshape(3, 4, 5), 2
# 3d array, list
yield np.arange(3 * 4 * 5).reshape(3, 4, 5), [5, 30, 27, 8]
# slices
yield [1, 2, 3, 4], slice(1, 3, 1)
yield np.arange(10), slice(10)
pyfunc = delete
cfunc = jit(nopython=True)(pyfunc)
for arr, obj in arrays():
expected = pyfunc(arr, obj)
got = cfunc(arr, obj)
self.assertPreciseEqual(expected, got)
def test_delete_exceptions(self):
pyfunc = delete
cfunc = jit(nopython=True)(pyfunc)
self.disable_leak_check()
with self.assertRaises(TypingError) as raises:
cfunc([1, 2], 3.14)
self.assertIn(
'obj should be of Integer dtype',
str(raises.exception)
)
with self.assertRaises(TypingError) as raises:
cfunc(np.arange(10), [3.5, 5.6, 6.2])
self.assertIn(
'obj should be of Integer dtype',
str(raises.exception)
)
with self.assertRaises(TypingError) as raises:
cfunc(2, 3)
self.assertIn(
'arr must be either an Array or a Sequence',
str(raises.exception)
)
with self.assertRaises(IndexError) as raises:
cfunc([1, 2], 3)
self.assertIn(
'obj must be less than the len(arr)',
str(raises.exception),
)
def diff_arrays(self):
"""
Some test arrays for np.diff()
"""
a = np.arange(12) ** 3
yield a
b = a.reshape((3, 4))
yield b
c = np.arange(24).reshape((3, 2, 4)) ** 3
yield c
def test_diff1(self):
pyfunc = diff1
cfunc = jit(nopython=True)(pyfunc)
for arr in self.diff_arrays():
expected = pyfunc(arr)
got = cfunc(arr)
self.assertPreciseEqual(expected, got)
# 0-dim array
a = np.array(42)
with self.assertTypingError():
cfunc(a)
def test_diff2(self):
pyfunc = diff2
cfunc = jit(nopython=True)(pyfunc)
for arr in self.diff_arrays():
size = arr.shape[-1]
for n in (0, 1, 2, 3, size - 1, size, size + 1, 421):
expected = pyfunc(arr, n)
got = cfunc(arr, n)
self.assertPreciseEqual(expected, got)
def test_diff2_exceptions(self):
pyfunc = diff2
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
# 0-dim array
arr = np.array(42)
with self.assertTypingError():
cfunc(arr, 1)
# Invalid `n`
arr = np.arange(10)
for n in (-1, -2, -42):
with self.assertRaises(ValueError) as raises:
cfunc(arr, n)
self.assertIn("order must be non-negative", str(raises.exception))
def bincount_sequences(self):
"""
Some test sequences for np.bincount()
"""
a = [1, 2, 5, 2, 3, 20]
b = np.array([5, 8, 42, 5])
c = self.rnd.randint(0, 100, size=300).astype(np.int8)
return (a, b, c)
def test_bincount1(self):
pyfunc = bincount1
cfunc = jit(nopython=True)(pyfunc)
for seq in self.bincount_sequences():
expected = pyfunc(seq)
got = cfunc(seq)
self.assertPreciseEqual(expected, got)
def test_bincount1_exceptions(self):
pyfunc = bincount1
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
# Negative input
with self.assertRaises(ValueError) as raises:
cfunc([2, -1])
self.assertIn("first argument must be non-negative",
str(raises.exception))
def test_bincount2(self):
pyfunc = bincount2
cfunc = jit(nopython=True)(pyfunc)
for seq in self.bincount_sequences():
w = [math.sqrt(x) - 2 for x in seq]
# weights as list, then array, mixed types, check upcast is ok
for weights in (w, np.array(w), seq, np.array(seq)):
expected = pyfunc(seq, weights)
got = cfunc(seq, weights)
self.assertPreciseEqual(expected, got)
def test_bincount2_exceptions(self):
pyfunc = bincount2
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
# Negative input
with self.assertRaises(ValueError) as raises:
cfunc([2, -1], [0, 0])
self.assertIn("first argument must be non-negative",
str(raises.exception))
# Mismatching input sizes
with self.assertRaises(ValueError) as raises:
cfunc([2, -1], [0])
self.assertIn("weights and list don't have the same length",
str(raises.exception))
def test_searchsorted(self):
pyfunc = searchsorted
cfunc = jit(nopython=True)(pyfunc)
pyfunc_left = searchsorted_left
cfunc_left = jit(nopython=True)(pyfunc_left)
pyfunc_right = searchsorted_right
cfunc_right = jit(nopython=True)(pyfunc_right)
def check(a, v):
expected = pyfunc(a, v)
got = cfunc(a, v)
self.assertPreciseEqual(expected, got)
expected = pyfunc_left(a, v)
got = cfunc_left(a, v)
self.assertPreciseEqual(expected, got)
expected = pyfunc_right(a, v)
got = cfunc_right(a, v)
self.assertPreciseEqual(expected, got)
# First with integer values (no NaNs)
bins = np.arange(5) ** 2
values = np.arange(20) - 1
for a in (bins, list(bins)):
# Scalar values
for v in values:
check(a, v)
# Array values
for v in (values, values.reshape((4, 5))):
check(a, v)
# Sequence values
check(a, list(values))
# Second with float values (including NaNs)
bins = np.float64(list(bins) + [float('nan')] * 7) / 2.0
values = np.arange(20) - 0.5
for a in (bins, list(bins)):
# Scalar values
for v in values:
check(a, v)
# Array values
for v in (values, values.reshape((4, 5))):
check(a, v)
# Sequence values
check(a, list(values))
# nonsense value for 'side' raises TypingError
def bad_side(a, v):
return np.searchsorted(a, v, side='nonsense')
cfunc = jit(nopython=True)(bad_side)
with self.assertTypingError():
cfunc([1,2], 1)
# non-constant value for 'side' raises TypingError
def nonconst_side(a, v, side='left'):
return np.searchsorted(a, v, side=side)
cfunc = jit(nopython=True)(nonconst_side)
with self.assertTypingError():
cfunc([1,2], 1, side='right')
def test_digitize(self):
pyfunc = digitize
cfunc = jit(nopython=True)(pyfunc)
def check(*args):
expected = pyfunc(*args)
got = cfunc(*args)
self.assertPreciseEqual(expected, got)
values = np.float64((0, 0.99, 1, 4.4, 4.5, 7, 8, 9, 9.5,
float('inf'), float('-inf'), float('nan')))
assert len(values) == 12
self.rnd.shuffle(values)
bins1 = np.float64([1, 3, 4.5, 8])
bins2 = np.float64([1, 3, 4.5, 8, float('inf'), float('-inf')])
bins3 = np.float64([1, 3, 4.5, 8, float('inf'), float('-inf')]
+ [float('nan')] * 10)
if np_version >= (1, 10):
all_bins = [bins1, bins2, bins3]
xs = [values, values.reshape((3, 4))]
else:
# Numpy < 1.10 had trouble with NaNs and N-d arrays
all_bins = [bins1, bins2]
xs = [values]
# 2-ary digitize()
for bins in all_bins:
bins.sort()
for x in xs:
check(x, bins)
check(x, bins[::-1])
# 3-ary digitize()
for bins in all_bins:
bins.sort()
for right in (True, False):
check(values, bins, right)
check(values, bins[::-1], right)
# Sequence input
check(list(values), bins1)
def test_histogram(self):
pyfunc = histogram
cfunc = jit(nopython=True)(pyfunc)
def check(*args):
pyhist, pybins = pyfunc(*args)
chist, cbins = cfunc(*args)
self.assertPreciseEqual(pyhist, chist)
# There can be a slight discrepancy in the linspace() result
# when `bins` is an integer...
self.assertPreciseEqual(pybins, cbins, prec='double', ulps=2)
def check_values(values):
# Explicit bins array
# (note Numpy seems to not support NaN bins)
bins = np.float64([1, 3, 4.5, 8])
check(values, bins)
check(values.reshape((3, 4)), bins)
# Explicit number of bins
check(values, 7)
# Explicit number of bins and bins range
check(values, 7, (1.0, 13.5))
# Implicit bins=10
check(values)
values = np.float64((0, 0.99, 1, 4.4, 4.5, 7, 8,
9, 9.5, 42.5, -1.0, -0.0))
assert len(values) == 12
self.rnd.shuffle(values)
check_values(values)
def _test_correlate_convolve(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
# only 1d arrays are accepted, test varying lengths
# and varying dtype
lengths = (1, 2, 3, 7)
dts = [np.int8, np.int32, np.int64, np.float32, np.float64,
np.complex64, np.complex128]
for dt1, dt2, n, m in itertools.product(dts, dts, lengths, lengths):
a = np.arange(n, dtype=dt1)
v = np.arange(m, dtype=dt2)
if np.issubdtype(dt1, np.complexfloating):
a = (a + 1j * a).astype(dt1)
if np.issubdtype(dt2, np.complexfloating):
v = (v + 1j * v).astype(dt2)
expected = pyfunc(a, v)
got = cfunc(a, v)
self.assertPreciseEqual(expected, got)
_a = np.arange(12).reshape(4, 3)
_b = np.arange(12)
for x, y in [(_a, _b), (_b, _a)]:
with self.assertRaises(TypingError) as raises:
cfunc(x, y)
msg = 'only supported on 1D arrays'
self.assertIn(msg, str(raises.exception))
def test_correlate(self):
self._test_correlate_convolve(correlate)
# correlate supports 0 dimension arrays
_a = np.ones(shape=(0,))
_b = np.arange(5)
cfunc = jit(nopython=True)(correlate)
for x, y in [(_a, _b), (_b, _a), (_a, _a)]:
expected = correlate(x, y)
got = cfunc(x, y)
self.assertPreciseEqual(expected, got)
def test_convolve(self):
self._test_correlate_convolve(convolve)
def test_convolve_exceptions(self):
# Exceptions leak references
self.disable_leak_check()
# convolve raises if either array has a 0 dimension
_a = np.ones(shape=(0,))
_b = np.arange(5)
cfunc = jit(nopython=True)(convolve)
for x, y in [(_a, _b), (_b, _a)]:
with self.assertRaises(ValueError) as raises:
cfunc(x, y)
if len(x) == 0:
self.assertIn("'a' cannot be empty", str(raises.exception))
else:
self.assertIn("'v' cannot be empty", str(raises.exception))
def _check_output(self, pyfunc, cfunc, params, abs_tol=None):
expected = pyfunc(**params)
got = cfunc(**params)
self.assertPreciseEqual(expected, got, abs_tol=abs_tol)
def test_vander_basic(self):
pyfunc = vander
cfunc = jit(nopython=True)(pyfunc)
_check_output = partial(self._check_output, pyfunc, cfunc)
def _check(x):
n_choices = [None, 0, 1, 2, 3, 4]
increasing_choices = [True, False]
# N and increasing defaulted
params = {'x': x}
_check_output(params)
# N provided and increasing defaulted
for n in n_choices:
params = {'x': x, 'N': n}
_check_output(params)
# increasing provided and N defaulted:
for increasing in increasing_choices:
params = {'x': x, 'increasing': increasing}
_check_output(params)
# both n and increasing supplied
for n in n_choices:
for increasing in increasing_choices:
params = {'x': x, 'N': n, 'increasing': increasing}
_check_output(params)
_check(np.array([1, 2, 3, 5]))
_check(np.arange(7) - 10.5)
_check(np.linspace(3, 10, 5))
_check(np.array([1.2, np.nan, np.inf, -np.inf]))
_check(np.array([]))
_check(np.arange(-5, 5) - 0.3)
# # boolean array
_check(np.array([True] * 5 + [False] * 4))
# cycle through dtypes to check type promotion a la numpy
for dtype in np.int32, np.int64, np.float32, np.float64:
_check(np.arange(10, dtype=dtype))
# non array inputs
_check([0, 1, 2, 3])
_check((4, 5, 6, 7))
_check((0.0, 1.0, 2.0))
_check(())
# edge cases
_check((3, 4.444, 3.142))
_check((True, False, 4))
def test_vander_exceptions(self):
pyfunc = vander
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
x = np.arange(5) - 0.5
def _check_n(N):
with self.assertTypingError() as raises:
cfunc(x, N=N)
assert "Second argument N must be None or an integer" in str(raises.exception)
for N in 1.1, True, np.inf, [1, 2]:
_check_n(N)
with self.assertRaises(ValueError) as raises:
cfunc(x, N=-1)
assert "Negative dimensions are not allowed" in str(raises.exception)
def _check_1d(x):
with self.assertRaises(ValueError) as raises:
cfunc(x)
self.assertEqual("x must be a one-dimensional array or sequence.", str(raises.exception))
x = np.arange(27).reshape((3, 3, 3))
_check_1d(x)
x = ((2, 3), (4, 5))
_check_1d(x)
def test_tri_n_basic(self):
pyfunc = tri_n
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
def n_variations():
return np.arange(-4, 8) # number of rows
# N supplied, M and k defaulted
for n in n_variations():
params = {'N': n}
_check(params)
def test_tri_n_m_basic(self):
pyfunc = tri_n_m
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
def n_variations():
return np.arange(-4, 8) # number of rows
def m_variations():
return itertools.chain.from_iterable(([None], range(-5, 9))) # number of columns
# N supplied, M and k defaulted
for n in n_variations():
params = {'N': n}
_check(params)
# N and M supplied, k defaulted
for n in n_variations():
for m in m_variations():
params = {'N': n, 'M': m}
_check(params)
def test_tri_n_k_basic(self):
pyfunc = tri_n_k
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
def n_variations():
return np.arange(-4, 8) # number of rows
def k_variations():
return np.arange(-10, 10) # offset
# N supplied, M and k defaulted
for n in n_variations():
params = {'N': n}
_check(params)
# N and k supplied, M defaulted
for n in n_variations():
for k in k_variations():
params = {'N': n, 'k': k}
_check(params)
def test_tri_n_m_k_basic(self):
pyfunc = tri_n_m_k
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
def n_variations():
return np.arange(-4, 8) # number of rows
def m_variations():
return itertools.chain.from_iterable(([None], range(-5, 9))) # number of columns
def k_variations():
return np.arange(-10, 10) # offset
# N supplied, M and k defaulted
for n in n_variations():
params = {'N': n}
_check(params)
# N and M supplied, k defaulted
for n in n_variations():
for m in m_variations():
params = {'N': n, 'M': m}
_check(params)
# N and k supplied, M defaulted
for n in n_variations():
for k in k_variations():
params = {'N': n, 'k': k}
_check(params)
# N, M and k supplied
for n in n_variations():
for k in k_variations():
for m in m_variations():
params = {'N': n, 'M': m, 'k': k}
_check(params)
def test_tri_exceptions(self):
pyfunc = tri_n_m_k
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
def _check(k):
with self.assertTypingError() as raises:
cfunc(5, 6, k=k)
assert "k must be an integer" in str(raises.exception)
for k in 1.5, True, np.inf, [1, 2]:
_check(k)
def _triangular_matrix_tests_m(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def _check(arr):
expected = pyfunc(arr)
got = cfunc(arr)
# TODO: Contiguity of result not consistent with numpy
self.assertEqual(got.dtype, expected.dtype)
np.testing.assert_array_equal(got, expected)
return self._triangular_matrix_tests_inner(self, pyfunc, _check)
def _triangular_matrix_tests_m_k(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def _check(arr):
for k in itertools.chain.from_iterable(([None], range(-10, 10))):
if k is None:
params = {}
else:
params = {'k': k}
expected = pyfunc(arr, **params)
got = cfunc(arr, **params)
# TODO: Contiguity of result not consistent with numpy
self.assertEqual(got.dtype, expected.dtype)
np.testing.assert_array_equal(got, expected)
return self._triangular_matrix_tests_inner(self, pyfunc, _check)
@staticmethod
def _triangular_matrix_tests_inner(self, pyfunc, _check):
def check_odd(a):
_check(a)
a = a.reshape((9, 7))
_check(a)
a = a.reshape((7, 1, 3, 3))
_check(a)
_check(a.T)
def check_even(a):
_check(a)
a = a.reshape((4, 16))
_check(a)
a = a.reshape((4, 2, 2, 4))
_check(a)
_check(a.T)
check_odd(np.arange(63) + 10.5)
check_even(np.arange(64) - 10.5)
# edge cases
_check(np.arange(360).reshape(3, 4, 5, 6))
_check(np.array([]))
_check(np.arange(9).reshape((3, 3))[::-1])
_check(np.arange(9).reshape((3, 3), order='F'))
arr = (np.arange(64) - 10.5).reshape((4, 2, 2, 4))
_check(arr)
_check(np.asfortranarray(arr))
def _triangular_matrix_exceptions(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
a = np.ones((5, 6))
with self.assertTypingError() as raises:
cfunc(a, k=1.5)
assert "k must be an integer" in str(raises.exception)
def test_tril_basic(self):
self._triangular_matrix_tests_m(tril_m)
self._triangular_matrix_tests_m_k(tril_m_k)
def test_tril_exceptions(self):
self._triangular_matrix_exceptions(tril_m_k)
def test_triu_basic(self):
self._triangular_matrix_tests_m(triu_m)
self._triangular_matrix_tests_m_k(triu_m_k)
def test_triu_exceptions(self):
self._triangular_matrix_exceptions(triu_m_k)
def partition_sanity_check(self, pyfunc, cfunc, a, kth):
# as NumPy uses a different algorithm, we do not expect to match outputs exactly...
expected = pyfunc(a, kth)
got = cfunc(a, kth)
# ... but we do expect the unordered collection of elements up to kth to tie out
self.assertPreciseEqual(np.unique(expected[:kth]), np.unique(got[:kth]))
# ... likewise the unordered collection of elements from kth onwards
self.assertPreciseEqual(np.unique(expected[kth:]), np.unique(got[kth:]))
def test_partition_fuzz(self):
# inspired by the test of the same name in:
# https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
self.rnd.shuffle(d)
d = d % self.rnd.randint(2, 30)
idx = self.rnd.randint(d.size)
kth = [0, idx, i, i + 1, -idx, -i] # include some negative kth's
tgt = np.sort(d)[kth]
self.assertPreciseEqual(cfunc(d, kth)[kth], tgt) # a -> array
self.assertPreciseEqual(cfunc(d.tolist(), kth)[kth], tgt) # a -> list
self.assertPreciseEqual(cfunc(tuple(d.tolist()), kth)[kth], tgt) # a -> tuple
for k in kth:
self.partition_sanity_check(pyfunc, cfunc, d, k)
def test_partition_exception_out_of_range(self):
# inspired by the test of the same name in:
# https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
# Test out of range values in kth raise an error
a = np.arange(10)
def _check(a, kth):
with self.assertRaises(ValueError) as e:
cfunc(a, kth)
assert str(e.exception) == "kth out of bounds"
_check(a, 10)
_check(a, -11)
_check(a, (3, 30))
def test_partition_exception_non_integer_kth(self):
# inspired by the test of the same name in:
# https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
def _check(a, kth):
with self.assertTypingError() as raises:
cfunc(a, kth)
self.assertIn("Partition index must be integer", str(raises.exception))
a = np.arange(10)
_check(a, 9.0)
_check(a, (3.3, 4.4))
_check(a, np.array((1, 2, np.nan)))
def test_partition_exception_a_not_array_like(self):
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
def _check(a, kth):
with self.assertTypingError() as raises:
cfunc(a, kth)
self.assertIn('The first argument must be an array-like', str(raises.exception))
_check(4, 0)
_check('Sausages', 0)
def test_partition_exception_a_zero_dim(self):
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
def _check(a, kth):
with self.assertTypingError() as raises:
cfunc(a, kth)
self.assertIn('The first argument must be at least 1-D (found 0-D)', str(raises.exception))
_check(np.array(1), 0)
def test_partition_exception_kth_multi_dimensional(self):
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
def _check(a, kth):
with self.assertRaises(ValueError) as raises:
cfunc(a, kth)
self.assertIn('kth must be scalar or 1-D', str(raises.exception))
_check(np.arange(10), kth=np.arange(6).reshape(3, 2))
def test_partition_empty_array(self):
# inspired by the test of the same name in:
# https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
def check(a, kth=0):
expected = pyfunc(a, kth)
got = cfunc(a, kth)
self.assertPreciseEqual(expected, got)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
# include this with some other empty data structures
for arr in a, (), np.array([]):
check(arr)
def test_partition_basic(self):
# inspired by the test of the same name in:
# https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
d = np.array([])
got = cfunc(d, 0)
self.assertPreciseEqual(d, got)
d = np.ones(1)
got = cfunc(d, 0)
self.assertPreciseEqual(d, got)
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
cfunc(np.arange(40), kth)
self.assertPreciseEqual(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
for k in 0, 1:
self.assertPreciseEqual(cfunc(d, k)[k], tgt[k])
self.partition_sanity_check(pyfunc, cfunc, d, k)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
for k in 0, 1, 2:
self.assertPreciseEqual(cfunc(d, k)[k], tgt[k])
self.partition_sanity_check(pyfunc, cfunc, d, k)
d = np.ones(50)
self.assertPreciseEqual(cfunc(d, 0), d)
# sorted
d = np.arange(49)
for k in 5, 15:
self.assertEqual(cfunc(d, k)[k], k)
self.partition_sanity_check(pyfunc, cfunc, d, k)
# rsorted, with input flavours: array, list and tuple
d = np.arange(47)[::-1]
for a in d, d.tolist(), tuple(d.tolist()):
self.assertEqual(cfunc(a, 6)[6], 6)
self.assertEqual(cfunc(a, 16)[16], 16)
self.assertPreciseEqual(cfunc(a, -6), cfunc(a, 41))
self.assertPreciseEqual(cfunc(a, -16), cfunc(a, 31))
self.partition_sanity_check(pyfunc, cfunc, d, -16)
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
self.assertEqual(cfunc(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
self.assertEqual(cfunc(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
self.assertEqual(cfunc(d, (2, -1))[-1], 4)
self.assertEqual(cfunc(d, (2, -1))[2], 1)
d[1] = np.nan
assert np.isnan(cfunc(d, (2, -1))[-1])
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
self.rnd.shuffle(d)
for i in range(d.size):
self.assertEqual(cfunc(d, i)[i], tgt[i])
self.partition_sanity_check(pyfunc, cfunc, d, i)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
self.assertEqual(tuple(cfunc(d, kth)[kth]), (0, 3, 7, 7))
td = [(dt, s) for dt in [np.int32, np.float32] for s in (9, 16)]
for dt, s in td:
d = np.arange(s, dtype=dt)
self.rnd.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(self.rnd.shuffle, d1)
for i in range(d.size):
p = cfunc(d, i)
self.assertEqual(p[i], i)
# all before are smaller
np.testing.assert_array_less(p[:i], p[i])
# all after are larger
np.testing.assert_array_less(p[i], p[i + 1:])
# sanity check
self.partition_sanity_check(pyfunc, cfunc, d, i)
def assert_partitioned(self, pyfunc, cfunc, d, kth):
prev = 0
for k in np.sort(kth):
np.testing.assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert (d[k:] >= d[k]).all(), "kth %d, %r not greater equal %d" % (k, d[k:], d[k])
prev = k + 1
self.partition_sanity_check(pyfunc, cfunc, d, k)
def test_partition_iterative(self):
# inspired by the test of the same name in:
# https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
assert_partitioned = partial(self.assert_partitioned, pyfunc, cfunc)
d = np.array([3, 4, 2, 1])
p = cfunc(d, (0, 3))
assert_partitioned(p, (0, 3))
assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
self.assertPreciseEqual(p, cfunc(d, (-3, -1)))
d = np.arange(17)
self.rnd.shuffle(d)
self.assertPreciseEqual(np.arange(17), cfunc(d, list(range(d.size))))
# test unsorted kth
d = np.arange(17)
self.rnd.shuffle(d)
keys = np.array([1, 3, 8, -2])
self.rnd.shuffle(d)
p = cfunc(d, keys)
assert_partitioned(p, keys)
self.rnd.shuffle(keys)
self.assertPreciseEqual(cfunc(d, keys), p)
# equal kth
d = np.arange(20)[::-1]
assert_partitioned(cfunc(d, [5] * 4), [5])
assert_partitioned(cfunc(d, [5] * 4 + [6, 13]), [5] * 4 + [6, 13])
def test_partition_multi_dim(self):
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
def check(a, kth):
expected = pyfunc(a, kth)
got = cfunc(a, kth)
self.assertPreciseEqual(expected[:, :, kth], got[:, :, kth])
for s in np.ndindex(expected.shape[:-1]):
self.assertPreciseEqual(np.unique(expected[s][:kth]), np.unique(got[s][:kth]))
self.assertPreciseEqual(np.unique(expected[s][kth:]), np.unique(got[s][kth:]))
def a_variations(a):
yield a
yield a.T
yield np.asfortranarray(a)
yield np.full_like(a, fill_value=np.nan)
yield np.full_like(a, fill_value=np.inf)
yield (((1.0, 3.142, -np.inf, 3),),) # multi-dimensional tuple input
a = np.linspace(1, 10, 48)
a[4:7] = np.nan
a[8] = -np.inf
a[9] = np.inf
a = a.reshape((4, 3, 4))
for arr in a_variations(a):
for k in range(-3, 3):
check(arr, k)
def test_partition_boolean_inputs(self):
pyfunc = partition
cfunc = jit(nopython=True)(pyfunc)
for d in np.linspace(1, 10, 17), np.array((True, False, True)):
for kth in True, False, -1, 0, 1:
self.partition_sanity_check(pyfunc, cfunc, d, kth)
@unittest.skipUnless(np_version >= (1, 10), "cov needs Numpy 1.10+")
@needs_blas
def test_cov_invalid_ddof(self):
pyfunc = cov
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
m = np.array([[0, 2], [1, 1], [2, 0]]).T
for ddof in np.arange(4), 4j:
with self.assertTypingError() as raises:
cfunc(m, ddof=ddof)
self.assertIn('ddof must be a real numerical scalar type', str(raises.exception))
for ddof in np.nan, np.inf:
with self.assertRaises(ValueError) as raises:
cfunc(m, ddof=ddof)
self.assertIn('Cannot convert non-finite ddof to integer', str(raises.exception))
for ddof in 1.1, -0.7:
with self.assertRaises(ValueError) as raises:
cfunc(m, ddof=ddof)
self.assertIn('ddof must be integral value', str(raises.exception))
def corr_corrcoef_basic(self, pyfunc, first_arg_name):
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14)
def input_variations():
# array inputs
yield np.array([[0, 2], [1, 1], [2, 0]]).T
yield self.rnd.randn(100).reshape(5, 20)
yield np.asfortranarray(np.array([[0, 2], [1, 1], [2, 0]]).T)
yield self.rnd.randn(100).reshape(5, 20)[:, ::2]
yield np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964])
yield np.full((4, 5), fill_value=True)
yield np.array([np.nan, 0.5969, -np.inf, 0.9918, 0.7964])
yield np.linspace(-3, 3, 33).reshape(33, 1)
# non-array inputs
yield ((0.1, 0.2), (0.11, 0.19), (0.09, 0.21)) # UniTuple
yield ((0.1, 0.2), (0.11, 0.19), (0.09j, 0.21j)) # Tuple
yield (-2.1, -1, 4.3)
yield (1, 2, 3)
yield [4, 5, 6]
yield ((0.1, 0.2, 0.3), (0.1, 0.2, 0.3))
yield [(1, 2, 3), (1, 3, 2)]
yield 3.142
yield ((1.1, 2.2, 1.5),)
# empty data structures
yield np.array([])
yield np.array([]).reshape(0, 2)
yield np.array([]).reshape(2, 0)
yield ()
# all inputs other than the first are defaulted
for input_arr in input_variations():
_check({first_arg_name: input_arr})
@unittest.skipUnless(np_version >= (1, 10), "corrcoef needs Numpy 1.10+")
@needs_blas
def test_corrcoef_basic(self):
pyfunc = corrcoef
self.corr_corrcoef_basic(pyfunc, first_arg_name='x')
@unittest.skipUnless(np_version >= (1, 10), "cov needs Numpy 1.10+")
@needs_blas
def test_cov_basic(self):
pyfunc = cov
self.corr_corrcoef_basic(pyfunc, first_arg_name='m')
@unittest.skipUnless(np_version >= (1, 10), "cov needs Numpy 1.10+")
@needs_blas
def test_cov_explicit_arguments(self):
pyfunc = cov
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14)
m = self.rnd.randn(105).reshape(15, 7)
y_choices = None, m[::-1]
rowvar_choices = False, True
bias_choices = False, True
ddof_choice = None, -1, 0, 1, 3.0, True
for y, rowvar, bias, ddof in itertools.product(y_choices, rowvar_choices, bias_choices, ddof_choice):
params = {'m': m, 'y': y, 'ddof': ddof, 'bias': bias, 'rowvar': rowvar}
_check(params)
@unittest.skipUnless(np_version >= (1, 10), "corrcoef needs Numpy 1.10+")
@needs_blas
def test_corrcoef_explicit_arguments(self):
pyfunc = corrcoef
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14)
x = self.rnd.randn(105).reshape(15, 7)
y_choices = None, x[::-1]
rowvar_choices = False, True
for y, rowvar in itertools.product(y_choices, rowvar_choices):
params = {'x': x, 'y': y, 'rowvar': rowvar}
_check(params)
def cov_corrcoef_edge_cases(self, pyfunc, first_arg_name):
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14)
# some of these examples borrowed from numpy doc string examples:
# https://github.com/numpy/numpy/blob/v1.15.0/numpy/lib/function_base.py#L2199-L2231
# some borrowed from TestCov and TestCorrCoef:
# https://github.com/numpy/numpy/blob/80d3a7a/numpy/lib/tests/test_function_base.py
m = np.array([-2.1, -1, 4.3])
y = np.array([3, 1.1, 0.12])
params = {first_arg_name: m, 'y': y}
_check(params)
m = np.array([1, 2, 3]) # test case modified such that m is 1D
y = np.array([[1j, 2j, 3j]])
params = {first_arg_name: m, 'y': y}
_check(params)
m = np.array([1, 2, 3])
y = (1j, 2j, 3j)
params = {first_arg_name: m, 'y': y}
_check(params)
params = {first_arg_name: y, 'y': m} # flip real and complex inputs
_check(params)
m = np.array([1, 2, 3])
y = (1j, 2j, 3) # note last item is not complex
params = {first_arg_name: m, 'y': y}
_check(params)
params = {first_arg_name: y, 'y': m} # flip real and complex inputs
_check(params)
m = np.array([])
y = np.array([])
params = {first_arg_name: m, 'y': y}
_check(params)
m = 1.1
y = 2.2
params = {first_arg_name: m, 'y': y}
_check(params)
m = self.rnd.randn(10, 3)
y = np.array([-2.1, -1, 4.3]).reshape(1, 3) / 10
params = {first_arg_name: m, 'y': y}
_check(params)
# The following tests pass with numpy version >= 1.10, but fail with 1.9
m = np.array([-2.1, -1, 4.3])
y = np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]])
params = {first_arg_name: m, 'y': y}
_check(params)
for rowvar in False, True:
m = np.array([-2.1, -1, 4.3])
y = np.array([[3, 1.1, 0.12], [3, 1.1, 0.12], [4, 1.1, 0.12]])
params = {first_arg_name: m, 'y': y, 'rowvar': rowvar}
_check(params)
params = {first_arg_name: y, 'y': m, 'rowvar': rowvar} # swap m and y
_check(params)
@unittest.skipUnless(np_version >= (1, 10), "corrcoef needs Numpy 1.10+")
@needs_blas
def test_corrcoef_edge_cases(self):
pyfunc = corrcoef
self.cov_corrcoef_edge_cases(pyfunc, first_arg_name='x')
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14)
for x in (np.nan, -np.inf, 3.142, 0):
params = {'x': x}
_check(params)
@unittest.skipUnless(np_version >= (1, 11), "behaviour per Numpy 1.11+")
@needs_blas
def test_corrcoef_edge_case_extreme_values(self):
pyfunc = corrcoef
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14)
# extreme values
x = ((1e-100, 1e100), (1e100, 1e-100))
params = {'x': x}
_check(params)
# Note
# ----
# Numpy 1.10 output is:
# [[ 0. -0.]
# [-0. 0.]]
#
# Numpy 1.11+ output is:
# [[ 1. -1.]
# [-1. 1.]]
#
# Numba implementation replicates Numpy 1.11+ behaviour
@unittest.skipUnless(np_version >= (1, 10), "cov needs Numpy 1.10+")
@needs_blas
def test_cov_edge_cases(self):
pyfunc = cov
self.cov_corrcoef_edge_cases(pyfunc, first_arg_name='m')
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14)
# invalid ddof
m = np.array([[0, 2], [1, 1], [2, 0]]).T
params = {'m': m, 'ddof': 5}
_check(params)
@unittest.skipUnless(np_version >= (1, 10), "cov needs Numpy 1.10+")
@needs_blas
def test_cov_exceptions(self):
pyfunc = cov
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
def _check_m(m):
with self.assertTypingError() as raises:
cfunc(m)
self.assertIn('m has more than 2 dimensions', str(raises.exception))
m = np.ones((5, 6, 7))
_check_m(m)
m = ((((1, 2, 3), (2, 2, 2)),),)
_check_m(m)
m = [[[5, 6, 7]]]
_check_m(m)
def _check_y(m, y):
with self.assertTypingError() as raises:
cfunc(m, y=y)
self.assertIn('y has more than 2 dimensions', str(raises.exception))
m = np.ones((5, 6))
y = np.ones((5, 6, 7))
_check_y(m, y)
m = np.array((1.1, 2.2, 1.1))
y = (((1.2, 2.2, 2.3),),)
_check_y(m, y)
m = np.arange(3)
y = np.arange(4)
with self.assertRaises(ValueError) as raises:
cfunc(m, y=y)
self.assertIn('m and y have incompatible dimensions', str(raises.exception))
# Numpy raises ValueError: all the input array dimensions except for the
# concatenation axis must match exactly.
m = np.array([-2.1, -1, 4.3]).reshape(1, 3)
with self.assertRaises(RuntimeError) as raises:
cfunc(m)
self.assertIn('2D array containing a single row is unsupported', str(raises.exception))
@unittest.skipUnless(np_version >= (1, 12), "ediff1d needs Numpy 1.12+")
def test_ediff1d_basic(self):
pyfunc = ediff1d
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
def to_variations(a):
yield None
yield a
yield a.astype(np.int16)
def ary_variations(a):
yield a
yield a.reshape(3, 2, 2)
yield a.astype(np.int32)
for ary in ary_variations(np.linspace(-2, 7, 12)):
params = {'ary': ary}
_check(params)
for a in to_variations(ary):
params = {'ary': ary, 'to_begin': a}
_check(params)
params = {'ary': ary, 'to_end': a}
_check(params)
for b in to_variations(ary):
params = {'ary': ary, 'to_begin': a, 'to_end': b}
_check(params)
@unittest.skipUnless(np_version >= (1, 12), "ediff1d needs Numpy 1.12+")
def test_ediff1d_edge_cases(self):
# NOTE: NumPy 1.16 has a variety of behaviours for type conversion, see
# https://github.com/numpy/numpy/issues/13103, as this is not resolved
# Numba replicates behaviours for <= 1.15 and conversion in 1.16.0 for
# finite inputs.
pyfunc = ediff1d
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
def _check_raises_type_error(params, arg):
with self.assertRaises(TypingError) as raises:
cfunc(**params)
msg = 'dtype of %s must be compatible with input ary' % arg
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
pyfunc(**params)
excstr = str(raises.exception)
self.assertIn("cannot convert", excstr)
self.assertIn("to array with dtype", excstr)
self.assertIn("as required for input ary", excstr)
def input_variations():
yield ((1, 2, 3), (4, 5, 6))
yield [4, 5, 6]
yield np.array([])
yield ()
if np_version < (1, 16):
yield np.array([np.nan, np.inf, 4, -np.inf, 3.142])
parts = np.array([np.nan, 2, np.nan, 4, 5, 6, 7, 8, 9])
a = parts + 1j * parts[::-1]
yield a.reshape(3, 3)
for i in input_variations():
params = {'ary': i, 'to_end': i, 'to_begin': i}
_check(params)
# to_end / to_begin are boolean
params = {'ary': [1], 'to_end': (False,), 'to_begin': (True, False)}
_check(params)
## example of unsafe type casting (np.nan to np.int32)
## fixed here: https://github.com/numpy/numpy/pull/12713 for np 1.16
to_begin = np.array([1, 2, 3.142, np.nan, 5, 6, 7, -8, np.nan])
params = {'ary': np.arange(-4, 6), 'to_begin': to_begin}
if np_version < (1, 16):
_check(params)
else:
# np 1.16 raises, cannot cast float64 array to intp array
_check_raises_type_error(params, 'to_begin')
# scalar inputs
params = {'ary': 3.142}
_check(params)
params = {'ary': 3, 'to_begin': 3.142}
if np_version < (1, 16):
_check(params)
else:
_check_raises_type_error(params, 'to_begin')
# now use 2 floats
params = {'ary': 3., 'to_begin': 3.142}
_check(params)
params = {'ary': np.arange(-4, 6), 'to_begin': -5, 'to_end': False}
if IS_WIN32 and not IS_32BITS and np_version >= (1, 16):
# XFAIL on 64-bits windows + numpy 1.16. See #3898
with self.assertRaises(TypingError) as raises:
_check(params)
expected_msg = "dtype of to_begin must be compatible with input ary"
self.assertIn(expected_msg, str(raises.exception))
else:
_check(params)
# the following would fail on one of the BITS32 builds (difference in
# overflow handling):
# params = {'ary': np.array([5, 6], dtype=np.int16), 'to_end': [1e100]}
# _check(params)
@unittest.skipUnless(np_version >= (1, 12), "ediff1d needs Numpy 1.12+")
def test_ediff1d_exceptions(self):
pyfunc = ediff1d
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
with self.assertTypingError() as e:
cfunc(np.array((True, True, False)))
msg = "Boolean dtype is unsupported (as per NumPy)"
assert msg in str(e.exception)
def test_roll_basic(self):
pyfunc = roll
cfunc = jit(nopython=True)(pyfunc)
def a_variations():
yield np.arange(7)
yield np.arange(3 * 4 * 5).reshape(3, 4, 5)
yield [1.1, 2.2, 3.3]
yield (True, False, True)
yield False
yield 4
yield (9,)
yield np.asfortranarray(np.array([[1.1, np.nan], [np.inf, 7.8]]))
yield np.array([])
yield ()
def shift_variations():
return itertools.chain.from_iterable(((True, False), range(-10, 10)))
for a in a_variations():
for shift in shift_variations():
expected = pyfunc(a, shift)
got = cfunc(a, shift)
self.assertPreciseEqual(expected, got)
def test_roll_exceptions(self):
pyfunc = roll
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
for shift in 1.1, (1, 2):
with self.assertTypingError() as e:
cfunc(np.arange(10), shift)
msg = "shift must be an integer"
assert msg in str(e.exception)
def test_extract_basic(self):
pyfunc = extract
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
a = np.arange(10)
self.rnd.shuffle(a)
for threshold in range(-3, 13):
cond = a > threshold
_check({'condition': cond, 'arr': a})
a = np.arange(60).reshape(4, 5, 3)
cond = a > 11.2
_check({'condition': cond, 'arr': a})
a = ((1, 2, 3), (3, 4, 5), (4, 5, 6))
cond = np.eye(3).flatten()
_check({'condition': cond, 'arr': a})
a = [1.1, 2.2, 3.3, 4.4]
cond = [1, 1, 0, 1]
_check({'condition': cond, 'arr': a})
a = np.linspace(-2, 10, 6)
element_pool = (True, False, np.nan, -1, -1.0, -1.2, 1, 1.0, 1.5j)
for cond in itertools.combinations_with_replacement(element_pool, 4):
_check({'condition': cond, 'arr': a})
_check({'condition': np.array(cond).reshape(2, 2), 'arr': a})
a = np.array([1, 2, 3])
cond = np.array([])
_check({'condition': cond, 'arr': a})
a = np.array([1, 2, 3])
cond = np.array([1, 0, 1, 0]) # but [1, 0, 1, 0, 1] raises
_check({'condition': cond, 'arr': a})
a = np.array([[1, 2, 3], [4, 5, 6]])
cond = [1, 0, 1, 0, 1, 0] # but [1, 0, 1, 0, 1, 0, 1] raises
_check({'condition': cond, 'arr': a})
a = np.array([[1, 2, 3], [4, 5, 6]])
cond = np.array([1, 0, 1, 0, 1, 0, 0, 0]).reshape(2, 2, 2)
_check({'condition': cond, 'arr': a})
a = np.asfortranarray(np.arange(60).reshape(3, 4, 5))
cond = np.repeat((0, 1), 30)
_check({'condition': cond, 'arr': a})
_check({'condition': cond, 'arr': a[::-1]})
a = np.array(4)
for cond in 0, 1:
_check({'condition': cond, 'arr': a})
a = 1
cond = 1
_check({'condition': cond, 'arr': a})
a = np.array(1)
cond = np.array([True, False])
_check({'condition': cond, 'arr': a})
a = np.arange(4)
cond = np.array([1, 0, 1, 0, 0, 0]).reshape(2, 3) * 1j
_check({'condition': cond, 'arr': a})
def test_extract_exceptions(self):
pyfunc = extract
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
a = np.array([])
cond = np.array([1, 2, 3])
with self.assertRaises(ValueError) as e:
cfunc(cond, a)
self.assertIn('Cannot extract from an empty array', str(e.exception))
def _check(cond, a):
msg = 'condition shape inconsistent with arr shape'
with self.assertRaises(ValueError) as e:
cfunc(cond, a)
self.assertIn(msg, str(e.exception))
a = np.array([[1, 2, 3], [1, 2, 3]])
cond = [1, 0, 1, 0, 1, 0, 1]
_check(cond, a)
a = np.array([1, 2, 3])
cond = np.array([1, 0, 1, 0, 1])
_check(cond, a)
a = np.array(60) # note, this is 0D
cond = 0, 1
_check(cond, a)
a = np.arange(4)
cond = np.array([True, False, False, False, True])
_check(cond, a)
a = np.arange(4)
cond = np.array([True, False, True, False, False, True, False])
_check(cond, a)
def test_np_trapz_basic(self):
pyfunc = np_trapz
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
y = [1, 2, 3]
_check({'y': y})
y = (3, 1, 2, 2, 2)
_check({'y': y})
y = np.arange(15).reshape(3, 5)
_check({'y': y})
y = np.linspace(-10, 10, 60).reshape(4, 3, 5)
_check({'y': y}, abs_tol=1e-13)
self.rnd.shuffle(y)
_check({'y': y}, abs_tol=1e-13)
y = np.array([])
_check({'y': y})
y = np.array([3.142, np.nan, np.inf, -np.inf, 5])
_check({'y': y})
y = np.arange(20) + np.linspace(0, 10, 20) * 1j
_check({'y': y})
y = np.array([], dtype=np.complex128)
_check({'y': y})
y = (True, False, True)
_check({'y': y})
def test_np_trapz_x_basic(self):
pyfunc = np_trapz_x
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
y = [1, 2, 3]
x = [4, 6, 8]
_check({'y': y, 'x': x})
y = [1, 2, 3, 4, 5]
x = (4, 6)
_check({'y': y, 'x': x})
y = (1, 2, 3, 4, 5)
x = [4, 5, 6, 7, 8]
_check({'y': y, 'x': x})
y = np.array([1, 2, 3, 4, 5])
x = [4, 4]
_check({'y': y, 'x': x})
y = np.array([])
x = np.array([2, 3])
_check({'y': y, 'x': x})
y = (1, 2, 3, 4, 5)
x = None
_check({'y': y, 'x': x})
y = np.arange(20).reshape(5, 4)
x = np.array([4, 5])
_check({'y': y, 'x': x})
y = np.arange(20).reshape(5, 4)
x = np.array([4, 5, 6, 7])
_check({'y': y, 'x': x})
y = np.arange(60).reshape(5, 4, 3)
x = np.array([4, 5])
_check({'y': y, 'x': x})
y = np.arange(60).reshape(5, 4, 3)
x = np.array([4, 5, 7])
_check({'y': y, 'x': x})
y = np.arange(60).reshape(5, 4, 3)
self.rnd.shuffle(y)
x = y + 1.1
self.rnd.shuffle(x)
_check({'y': y, 'x': x})
y = np.arange(20)
x = y + np.linspace(0, 10, 20) * 1j
_check({'y': y, 'x': x})
y = np.array([1, 2, 3])
x = np.array([1 + 1j, 1 + 2j])
_check({'y': y, 'x': x})
@unittest.skip('NumPy behaviour questionable')
def test_trapz_numpy_questionable(self):
# https://github.com/numpy/numpy/issues/12858
pyfunc = np_trapz
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
# passes (NumPy and Numba return 2.0)
y = np.array([True, False, True, True]).astype(np.int)
_check({'y': y})
# fails (NumPy returns 1.5; Numba returns 2.0)
y = np.array([True, False, True, True])
_check({'y': y})
def test_np_trapz_dx_basic(self):
pyfunc = np_trapz_dx
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
y = [1, 2, 3]
dx = 2
_check({'y': y, 'dx': dx})
y = [1, 2, 3, 4, 5]
dx = [1, 4, 5, 6]
_check({'y': y, 'dx': dx})
y = [1, 2, 3, 4, 5]
dx = [1, 4, 5, 6]
_check({'y': y, 'dx': dx})
y = np.linspace(-2, 5, 10)
dx = np.nan
_check({'y': y, 'dx': dx})
y = np.linspace(-2, 5, 10)
dx = np.inf
_check({'y': y, 'dx': dx})
y = np.linspace(-2, 5, 10)
dx = np.linspace(-2, 5, 9)
_check({'y': y, 'dx': dx}, abs_tol=1e-13)
y = np.arange(60).reshape(4, 5, 3) * 1j
dx = np.arange(40).reshape(4, 5, 2)
_check({'y': y, 'dx': dx})
x = np.arange(-10, 10, .1)
r = cfunc(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)
# check integral of normal equals 1
np.testing.assert_almost_equal(r, 1, 7)
y = np.arange(20)
dx = 1j
_check({'y': y, 'dx': dx})
y = np.arange(20)
dx = np.array([5])
_check({'y': y, 'dx': dx})
def test_np_trapz_x_dx_basic(self):
pyfunc = np_trapz_x_dx
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
# dx should be ignored
for dx in (None, 2, np.array([1, 2, 3, 4, 5])):
y = [1, 2, 3]
x = [4, 6, 8]
_check({'y': y, 'x': x, 'dx': dx})
y = [1, 2, 3, 4, 5]
x = [4, 6]
_check({'y': y, 'x': x, 'dx': dx})
y = [1, 2, 3, 4, 5]
x = [4, 5, 6, 7, 8]
_check({'y': y, 'x': x, 'dx': dx})
y = np.arange(60).reshape(4, 5, 3)
self.rnd.shuffle(y)
x = y * 1.1
x[2, 2, 2] = np.nan
_check({'y': y, 'x': x, 'dx': dx})
def test_np_trapz_x_dx_exceptions(self):
pyfunc = np_trapz_x_dx
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
def check_not_ok(params):
with self.assertRaises(ValueError) as e:
cfunc(*params)
self.assertIn('unable to broadcast', str(e.exception))
y = [1, 2, 3, 4, 5]
for x in [4, 5, 6, 7, 8, 9], [4, 5, 6]:
check_not_ok((y, x, 1.0))
y = np.arange(60).reshape(3, 4, 5)
x = np.arange(36).reshape(3, 4, 3)
check_not_ok((y, x, 1.0))
y = np.arange(60).reshape(3, 4, 5)
x = np.array([4, 5, 6, 7])
check_not_ok((y, x, 1.0))
y = [1, 2, 3, 4, 5]
dx = np.array([1.0, 2.0])
check_not_ok((y, None, dx))
y = np.arange(60).reshape(3, 4, 5)
dx = np.arange(60).reshape(3, 4, 5)
check_not_ok((y, None, dx))
with self.assertTypingError() as e:
y = np.array(4)
check_not_ok((y, None, 1.0))
self.assertIn('y cannot be 0D', str(e.exception))
for y in 5, False, np.nan:
with self.assertTypingError() as e:
cfunc(y, None, 1.0)
self.assertIn('y cannot be a scalar', str(e.exception))
@unittest.skipUnless(np_version >= (1, 10), "interp needs Numpy 1.10+")
def test_interp_basic(self):
pyfunc = interp
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-10)
x = np.linspace(-5, 5, 25)
xp = np.arange(-4, 8)
fp = xp + 1.5
_check(params={'x': x, 'xp': xp, 'fp': fp})
self.rnd.shuffle(x)
_check(params={'x': x, 'xp': xp, 'fp': fp})
self.rnd.shuffle(fp)
_check(params={'x': x, 'xp': xp, 'fp': fp})
# alg changed in 1.16 and other things were found not-quite-right
# in inf/nan handling, skip for now
x[:5] = np.nan
x[-5:] = np.inf
self.rnd.shuffle(x)
_check(params={'x': x, 'xp': xp, 'fp': fp})
fp[:5] = np.nan
fp[-5:] = -np.inf
self.rnd.shuffle(fp)
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = np.arange(-4, 8)
xp = x + 1
fp = x + 2
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = (2.2, 3.3, -5.0)
xp = (2, 3, 4)
fp = (5, 6, 7)
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = ((2.2, 3.3, -5.0), (1.2, 1.3, 4.0))
xp = np.linspace(-4, 4, 10)
fp = np.arange(-5, 5)
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = np.array([1.4, np.nan, np.inf, -np.inf, 0.0, -9.1])
x = x.reshape(3, 2, order='F')
xp = np.linspace(-4, 4, 10)
fp = np.arange(-5, 5)
_check(params={'x': x, 'xp': xp, 'fp': fp})
for x in range(-2, 4):
xp = [0, 1, 2]
fp = (3, 4, 5)
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = np.array([])
xp = [0, 1, 2]
fp = (3, 4, 5)
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = np.linspace(0, 25, 60).reshape(3, 4, 5)
xp = np.arange(20)
fp = xp - 10
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = np.nan
xp = np.arange(5)
fp = np.full(5, np.nan)
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = np.nan
xp = [3]
fp = [4]
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = np.arange(-4, 8)
xp = x
fp = x
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = [True, False]
xp = np.arange(-4, 8)
fp = xp
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = [-np.inf, -1.0, 0.0, 1.0, np.inf]
xp = np.arange(-4, 8)
fp = xp * 2.2
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = np.linspace(-10, 10, 10)
xp = np.array([-np.inf, -1.0, 0.0, 1.0, np.inf])
fp = xp * 2.2
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = self.rnd.randn(100)
xp = np.linspace(-3, 3, 100)
fp = np.full(100, fill_value=3.142)
_check(params={'x': x, 'xp': xp, 'fp': fp})
for factor in 1, -1:
x = np.array([5, 6, 7]) * factor
xp = [1, 2]
fp = [3, 4]
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = 1
xp = [1]
fp = [True]
_check(params={'x': x, 'xp': xp, 'fp': fp})
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.linspace(0, 1, 50)
out = cfunc(x0, x, y)
np.testing.assert_almost_equal(out, x0)
x = np.array([1, 2, 3, 4])
xp = np.array([1, 2, 3, 4])
fp = np.array([1, 2, 3.01, 4])
_check(params={'x': x, 'xp': xp, 'fp': fp})
xp = [1]
fp = [np.inf]
_check(params={'x': 1, 'xp': xp, 'fp': fp})
# alg changed in 1.16 and other things were found not-quite-right
# in inf/nan handling, skip for now
x = np.array([1, 2, 2.5, 3, 4])
xp = np.array([1, 2, 3, 4])
fp = np.array([1, 2, np.nan, 4])
_check({'x': x, 'xp': xp, 'fp': fp})
x = np.array([1, 1.5, 2, 2.5, 3, 4, 4.5, 5, 5.5])
xp = np.array([1, 2, 3, 4, 5])
fp = np.array([np.nan, 2, np.nan, 4, np.nan])
_check({'x': x, 'xp': xp, 'fp': fp})
x = np.array([1, 2, 2.5, 3, 4])
xp = np.array([1, 2, 3, 4])
fp = np.array([1, 2, np.inf, 4])
_check({'x': x, 'xp': xp, 'fp': fp})
x = np.array([1, 1.5, np.nan, 2.5, -np.inf, 4, 4.5, 5, np.inf, 0, 7])
xp = np.array([1, 2, 3, 4, 5, 6])
fp = np.array([1, 2, np.nan, 4, 3, np.inf])
_check({'x': x, 'xp': xp, 'fp': fp})
x = np.array([3.10034867, 3.0999066, 3.10001529])
xp = np.linspace(0, 10, 1 + 20000)
fp = np.sin(xp / 2.0)
_check({'x': x, 'xp': xp, 'fp': fp})
x = self.rnd.uniform(0, 2 * np.pi, (100,))
xp = np.linspace(0, 2 * np.pi, 1000)
fp = np.cos(xp)
exact = np.cos(x)
got = cfunc(x, xp, fp)
np.testing.assert_allclose(exact, got, atol=1e-5)
# very dense calibration
x = self.rnd.randn(10)
xp = np.linspace(-10, 10, 1000)
fp = np.ones_like(xp)
_check({'x': x, 'xp': xp, 'fp': fp})
# very sparse calibration
x = self.rnd.randn(1000)
xp = np.linspace(-10, 10, 10)
fp = np.ones_like(xp)
_check({'x': x, 'xp': xp, 'fp': fp})
def _make_some_values_non_finite(self, a):
p = a.size // 100
np.put(a, self.rnd.choice(range(a.size), p, replace=False), np.nan)
np.put(a, self.rnd.choice(range(a.size), p, replace=False), -np.inf)
np.put(a, self.rnd.choice(range(a.size), p, replace=False), np.inf)
def arrays(self, ndata):
# much_finer_grid
yield np.linspace(2.0, 7.0, 1 + ndata * 5)
# finer_grid
yield np.linspace(2.0, 7.0, 1 + ndata)
# similar_grid
yield np.linspace(2.1, 6.8, 1 + ndata // 2)
# coarser_grid
yield np.linspace(2.1, 7.5, 1 + ndata // 2)
# much_coarser_grid
yield np.linspace(1.1, 9.5, 1 + ndata // 5)
# finer_stretched_grid
yield np.linspace(3.1, 5.3, 1 + ndata) * 1.09
# similar_stretched_grid
yield np.linspace(3.1, 8.3, 1 + ndata // 2) * 1.09
# finer_compressed_grid
yield np.linspace(3.1, 5.3, 1 + ndata) * 0.91
# similar_compressed_grid
yield np.linspace(3.1, 8.3, 1 + ndata // 2) * 0.91
# warped_grid
yield np.linspace(3.1, 5.3, 1 + ndata // 2) + 0.3 * np.sin(
np.arange(1 + ndata / 2) * np.pi / (1 + ndata / 2))
# very_low_noise_grid
yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal(
size=1 + ndata, scale=0.5 / ndata)
# low_noise_grid
yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal(
size=1 + ndata, scale=2.0 / ndata)
# med_noise_grid
yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal(
size=1 + ndata, scale=5.0 / ndata)
# high_noise_grid
yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal(
size=1 + ndata, scale=20.0 / ndata)
# very_high_noise_grid
yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal(
size=1 + ndata, scale=50.0 / ndata)
# extreme_noise_grid
yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal(
size=1 + ndata, scale=200.0 / ndata)
# random_fine_grid
yield self.rnd.rand(1 + ndata) * 9.0 + 0.6
# random_grid
yield self.rnd.rand(1 + ndata * 2) * 4.0 + 1.3
@unittest.skipUnless(np_version >= (1, 10), "interp needs Numpy 1.10+")
def test_interp_stress_tests(self):
pyfunc = interp
cfunc = jit(nopython=True)(pyfunc)
ndata = 20000
xp = np.linspace(0, 10, 1 + ndata)
fp = np.sin(xp / 2.0)
for x in self.arrays(ndata):
atol = 1e-14 # using abs_tol as otherwise fails on 32bit builds
expected = pyfunc(x, xp, fp)
got = cfunc(x, xp, fp)
self.assertPreciseEqual(expected, got, abs_tol=atol)
# no longer require xp to be monotonically increasing
# (in keeping with numpy) even if the output might not
# be meaningful; shuffle all inputs
self.rnd.shuffle(x)
expected = pyfunc(x, xp, fp)
got = cfunc(x, xp, fp)
self.assertPreciseEqual(expected, got, abs_tol=atol)
self.rnd.shuffle(xp)
expected = pyfunc(x, xp, fp)
got = cfunc(x, xp, fp)
self.assertPreciseEqual(expected, got, abs_tol=atol)
self.rnd.shuffle(fp)
expected = pyfunc(x, xp, fp)
got = cfunc(x, xp, fp)
self.assertPreciseEqual(expected, got, abs_tol=atol)
# add some values non finite
self._make_some_values_non_finite(x)
expected = pyfunc(x, xp, fp)
got = cfunc(x, xp, fp)
self.assertPreciseEqual(expected, got, abs_tol=atol)
self._make_some_values_non_finite(xp)
expected = pyfunc(x, xp, fp)
got = cfunc(x, xp, fp)
self.assertPreciseEqual(expected, got, abs_tol=atol)
self._make_some_values_non_finite(fp)
expected = pyfunc(x, xp, fp)
got = cfunc(x, xp, fp)
self.assertPreciseEqual(expected, got, abs_tol=atol)
@unittest.skipUnless(np_version >= (1, 12), "complex interp: Numpy 1.12+")
def test_interp_complex_stress_tests(self):
pyfunc = interp
cfunc = jit(nopython=True)(pyfunc)
ndata = 2000
xp = np.linspace(0, 10, 1 + ndata)
real = np.sin(xp / 2.0)
real[:200] = self.rnd.choice([np.inf, -np.inf, np.nan], 200)
self.rnd.shuffle(real)
imag = np.cos(xp / 2.0)
imag[:200] = self.rnd.choice([np.inf, -np.inf, np.nan], 200)
self.rnd.shuffle(imag)
fp = real + 1j * imag
for x in self.arrays(ndata):
expected = pyfunc(x, xp, fp)
got = cfunc(x, xp, fp)
np.testing.assert_allclose(expected, got, equal_nan=True)
self.rnd.shuffle(x)
self.rnd.shuffle(xp)
self.rnd.shuffle(fp)
np.testing.assert_allclose(expected, got, equal_nan=True)
@unittest.skipUnless(np_version >= (1, 10), "interp needs Numpy 1.10+")
def test_interp_exceptions(self):
pyfunc = interp
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
x = np.array([1, 2, 3])
xp = np.array([])
fp = np.array([])
with self.assertRaises(ValueError) as e:
cfunc(x, xp, fp)
msg = "array of sample points is empty"
self.assertIn(msg, str(e.exception))
x = 1
xp = np.array([1, 2, 3])
fp = np.array([1, 2])
with self.assertRaises(ValueError) as e:
cfunc(x, xp, fp)
msg = "fp and xp are not of the same size."
self.assertIn(msg, str(e.exception))
x = 1
xp = np.arange(6).reshape(3, 2)
fp = np.arange(6)
with self.assertTypingError() as e:
cfunc(x, xp, fp)
msg = "xp must be 1D"
self.assertIn(msg, str(e.exception))
x = 1
xp = np.arange(6)
fp = np.arange(6).reshape(3, 2)
with self.assertTypingError() as e:
cfunc(x, xp, fp)
msg = "fp must be 1D"
self.assertIn(msg, str(e.exception))
x = 1 + 1j
xp = np.arange(6)
fp = np.arange(6)
with self.assertTypingError() as e:
cfunc(x, xp, fp)
complex_dtype_msg = (
"Cannot cast array data from complex dtype "
"to float64 dtype"
)
self.assertIn(complex_dtype_msg, str(e.exception))
x = 1
xp = (np.arange(6) + 1j).astype(np.complex64)
fp = np.arange(6)
with self.assertTypingError() as e:
cfunc(x, xp, fp)
self.assertIn(complex_dtype_msg, str(e.exception))
@unittest.skipUnless((1, 10) <= np_version < (1, 12), 'complex interp: Numpy 1.12+')
def test_interp_pre_112_exceptions(self):
pyfunc = interp
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
x = np.arange(6)
xp = np.arange(6)
fp = np.arange(6) * 1j
with self.assertTypingError() as e:
cfunc(x, xp, fp)
complex_dtype_msg = (
"Cannot cast array data from complex dtype "
"to float64 dtype"
)
self.assertIn(complex_dtype_msg, str(e.exception))
@unittest.skipUnless(np_version >= (1, 10), "interp needs Numpy 1.10+")
def test_interp_non_finite_calibration(self):
# examples from
# https://github.com/numpy/numpy/issues/12951
pyfunc = interp
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
xp = np.array([0, 1, 9, 10])
fp = np.array([-np.inf, 0.1, 0.9, np.inf])
x = np.array([0.2, 9.5])
params = {'x': x, 'xp': xp, 'fp': fp}
_check(params)
xp = np.array([-np.inf, 1, 9, np.inf])
fp = np.array([0, 0.1, 0.9, 1])
x = np.array([0.2, 9.5])
params = {'x': x, 'xp': xp, 'fp': fp}
_check(params)
@unittest.skipUnless(np_version >= (1, 10), "interp needs Numpy 1.10+")
def test_interp_supplemental_tests(self):
# inspired by class TestInterp
# https://github.com/numpy/numpy/blob/f5b6850f231/numpy/lib/tests/test_function_base.py
pyfunc = interp
cfunc = jit(nopython=True)(pyfunc)
for size in range(1, 10):
xp = np.arange(size, dtype=np.double)
yp = np.ones(size, dtype=np.double)
incpts = np.array([-1, 0, size - 1, size], dtype=np.double)
decpts = incpts[::-1]
incres = cfunc(incpts, xp, yp)
decres = cfunc(decpts, xp, yp)
inctgt = np.array([1, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
np.testing.assert_almost_equal(incres, inctgt)
np.testing.assert_almost_equal(decres, dectgt)
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = 0
np.testing.assert_almost_equal(cfunc(x0, x, y), x0)
x0 = 0.3
np.testing.assert_almost_equal(cfunc(x0, x, y), x0)
x0 = np.float32(0.3)
np.testing.assert_almost_equal(cfunc(x0, x, y), x0)
x0 = np.float64(0.3)
np.testing.assert_almost_equal(cfunc(x0, x, y), x0)
x0 = np.nan
np.testing.assert_almost_equal(cfunc(x0, x, y), x0)
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.array(0.3)
np.testing.assert_almost_equal(cfunc(x0, x, y), x0)
xp = np.arange(0, 10, 0.0001)
fp = np.sin(xp)
np.testing.assert_almost_equal(cfunc(np.pi, xp, fp), 0.0)
@unittest.skipUnless(np_version >= (1, 12), "complex interp: Numpy 1.10+")
def test_interp_supplemental_complex_tests(self):
# inspired by class TestInterp
# https://github.com/numpy/numpy/blob/f5b6850f231/numpy/lib/tests/test_function_base.py
pyfunc = interp
cfunc = jit(nopython=True)(pyfunc)
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5)) * 1.0j
x0 = 0.3
y0 = x0 + (1 + x0) * 1.0j
np.testing.assert_almost_equal(cfunc(x0, x, y), y0)
def test_asarray(self):
def input_variations():
"""
To quote from: https://docs.scipy.org/doc/numpy/reference/generated/numpy.asarray.html
Input data, in any form that can be converted to an array.
This includes:
* lists
* lists of tuples
* tuples
* tuples of tuples
* tuples of lists
* ndarrays
"""
yield 1j
yield 1.2
yield False
yield 1
yield [1, 2, 3]
yield [(1, 2, 3), (1, 2, 3)]
yield (1, 2, 3)
yield ((1, 2, 3), (1, 2, 3))
yield ([1, 2, 3], [1, 2, 3])
yield np.array([])
yield np.arange(4)
yield np.arange(12).reshape(3, 4)
yield np.arange(12).reshape(3, 4).T
# used to check that if the input is already an array and the dtype is
# the same as that of the input/omitted then the array itself is
# returned.
def check_pass_through(jitted, expect_same, params):
returned = jitted(**params)
if expect_same:
self.assertTrue(returned is params['a'])
else:
self.assertTrue(returned is not params['a'])
# should be numerically the same, just different dtype
np.testing.assert_allclose(returned, params['a'])
self.assertTrue(returned.dtype == params['dtype'])
for pyfunc in [asarray, asarray_kws]:
cfunc = jit(nopython=True)(pyfunc)
_check = partial(self._check_output, pyfunc, cfunc)
for x in input_variations():
params = {'a': x}
if 'kws' in pyfunc.__name__:
for dt in [None, np.complex128]:
params['dtype'] = dt
_check(params)
else:
_check(params)
# check the behaviour over a dtype change (or not!)
x = np.arange(10, dtype=np.float32)
params = {'a': x}
if 'kws' in pyfunc.__name__:
params['dtype'] = None
check_pass_through(cfunc, True, params)
params['dtype'] = np.complex128
check_pass_through(cfunc, False, params)
params['dtype'] = np.float32
check_pass_through(cfunc, True, params)
else:
check_pass_through(cfunc, True, params)
def test_repeat(self):
# np.repeat(a, repeats)
np_pyfunc = np_repeat
np_nbfunc = njit(np_pyfunc)
# a.repeat(repeats)
array_pyfunc = array_repeat
array_nbfunc = njit(array_pyfunc)
for pyfunc, nbfunc in ((np_pyfunc, np_nbfunc),
(array_pyfunc, array_nbfunc)):
def check(a, repeats):
self.assertPreciseEqual(pyfunc(a, repeats), nbfunc(a, repeats))
# test array argumens
target_numpy_values = [
np.ones(1),
np.arange(1000),
np.array([[0, 1], [2, 3]]),
np.array([]),
np.array([[], []]),
]
target_numpy_types = [
np.uint32,
np.int32,
np.uint64,
np.int64,
np.float32,
np.float64,
np.complex64,
np.complex128,
]
target_numpy_inputs = (np.array(a,dtype=t) for a,t in
itertools.product(target_numpy_values,
target_numpy_types))
target_non_numpy_inputs = [
1,
1.0,
True,
1j,
[0, 1, 2],
(0, 1, 2),
]
for i in itertools.chain(target_numpy_inputs, target_non_numpy_inputs):
check(i, repeats=0)
check(i, repeats=1)
check(i, repeats=2)
check(i, repeats=3)
check(i, repeats=100)
# check broadcasting when repeats is an array/list
one = np.arange(1)
for i in ([0], [1], [2]):
check(one, repeats=i)
check(one, repeats=np.array(i))
two = np.arange(2)
for i in ([0, 0], [0, 1], [1, 0], [0, 1], [1, 2], [2, 1], [2, 2]):
check(two, repeats=i)
check(two, repeats=np.array(i))
check(two, repeats=np.array([2, 2], dtype=np.int32))
check(np.arange(10), repeats=np.arange(10))
def test_repeat_exception(self):
# np.repeat(a, repeats)
np_pyfunc = np_repeat
np_nbfunc = njit(np_pyfunc)
# a.repeat(repeats)
array_pyfunc = array_repeat
array_nbfunc = njit(array_pyfunc)
self.disable_leak_check()
for pyfunc, nbfunc in ((np_pyfunc, np_nbfunc),
(array_pyfunc, array_nbfunc)):
# negative repeat argument
with self.assertRaises(ValueError) as e:
nbfunc(np.ones(1), -1)
self.assertIn("negative dimensions are not allowed",
str(e.exception))
# float repeat argument has custom error message
with self.assertRaises(TypingError) as e:
nbfunc(np.ones(1), 1.0)
self.assertIn(
"The repeats argument must be an integer "
"or an array-like of integer dtype",
str(e.exception))
# negative repeat argument as array
with self.assertRaises(ValueError) as e:
nbfunc(np.ones(2), np.array([1, -1]))
self.assertIn("negative dimensions are not allowed",
str(e.exception))
# broadcasting error, repeats too large
with self.assertRaises(ValueError) as e:
nbfunc(np.ones(2), np.array([1, 1, 1]))
self.assertIn("operands could not be broadcast together",
str(e.exception))
# broadcasting error, repeats too small
with self.assertRaises(ValueError) as e:
nbfunc(np.ones(5), np.array([1, 1, 1, 1]))
self.assertIn("operands could not be broadcast together",
str(e.exception))
# float repeat argument has custom error message
with self.assertRaises(TypingError) as e:
nbfunc(np.ones(2), [1.0, 1.0])
self.assertIn(
"The repeats argument must be an integer "
"or an array-like of integer dtype",
str(e.exception))
for rep in [True, "a", "1"]:
with self.assertRaises(TypingError):
nbfunc(np.ones(1), rep)
def test_windowing(self):
def check_window(func):
np_pyfunc = func
np_nbfunc = njit(func)
for M in [0, 1, 5, 12]:
expected = np_pyfunc(M)
got = np_nbfunc(M)
self.assertPreciseEqual(expected, got)
for M in ['a', 1.1, 1j]:
with self.assertRaises(TypingError) as raises:
np_nbfunc(1.1)
self.assertIn("M must be an integer", str(raises.exception))
check_window(np_bartlett)
check_window(np_blackman)
check_window(np_hamming)
check_window(np_hanning)
# Test np.kaiser separately
np_pyfunc = np_kaiser
np_nbfunc = njit(np_kaiser)
for M in [0, 1, 5, 12]:
for beta in [0.0, 5.0, 14.0]:
expected = np_pyfunc(M, beta)
got = np_nbfunc(M, beta)
if IS_32BITS:
self.assertPreciseEqual(expected, got, prec='double', ulps=2)
else:
self.assertPreciseEqual(expected, got, prec='exact')
for M in ['a', 1.1, 1j]:
with self.assertRaises(TypingError) as raises:
np_nbfunc(M, 1.0)
self.assertIn("M must be an integer", str(raises.exception))
for beta in ['a', 1j]:
with self.assertRaises(TypingError) as raises:
np_nbfunc(5, beta)
self.assertIn("beta must be an integer or float", str(raises.exception))
class TestNPMachineParameters(TestCase):
# tests np.finfo, np.iinfo, np.MachAr
template = '''
def foo():
ty = np.%s
return np.%s(ty)
'''
bits = ('bits',) if np_version >= (1, 12) else ()
def check(self, func, attrs, *args):
pyfunc = func
cfunc = jit(nopython=True)(pyfunc)
expected = pyfunc(*args)
got = cfunc(*args)
# check result
for attr in attrs:
self.assertPreciseEqual(getattr(expected, attr),
getattr(got, attr))
def create_harcoded_variant(self, basefunc, ty):
#create an instance of using the function with a hardcoded type
#and eval it into existence, return the function for use
tystr = ty.__name__
basestr = basefunc.__name__
funcstr = self.template % (tystr, basestr)
eval(compile(funcstr, '<string>', 'exec'))
return locals()['foo']
def test_MachAr(self):
attrs = ('ibeta', 'it', 'machep', 'eps', 'negep', 'epsneg', 'iexp',
'minexp', 'xmin', 'maxexp', 'xmax', 'irnd', 'ngrd',
'epsilon', 'tiny', 'huge', 'precision', 'resolution',)
self.check(machar, attrs)
def test_finfo(self):
types = [np.float32, np.float64, np.complex64, np.complex128]
attrs = self.bits + ('eps', 'epsneg', 'iexp', 'machep', 'max',
'maxexp', 'negep', 'nexp', 'nmant', 'precision',
'resolution', 'tiny',)
for ty in types:
self.check(finfo, attrs, ty(1))
hc_func = self.create_harcoded_variant(np.finfo, ty)
self.check(hc_func, attrs)
# check unsupported attr raises
with self.assertRaises(TypingError) as raises:
cfunc = jit(nopython=True)(finfo_machar)
cfunc(7.)
msg = "Unknown attribute 'machar' of type finfo"
self.assertIn(msg, str(raises.exception))
# check invalid type raises
with self.assertTypingError():
cfunc = jit(nopython=True)(finfo)
cfunc(np.int32(7))
def test_iinfo(self):
# check types and instances of types
types = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64]
attrs = ('min', 'max') + self.bits
for ty in types:
self.check(iinfo, attrs, ty(1))
hc_func = self.create_harcoded_variant(np.iinfo, ty)
self.check(hc_func, attrs)
# check invalid type raises
with self.assertTypingError():
cfunc = jit(nopython=True)(iinfo)
cfunc(np.float64(7))
|
<filename>terran/tracking/face.py
import numpy as np
from filterpy.kalman import KalmanFilter
from scipy.optimize import linear_sum_assignment
from terran.face.detection import Detection, face_detection
def linear_assignment(cost_matrix):
"""Implement the linear assignment as in Scikit Learn v0.21"""
return np.transpose(np.asarray(linear_sum_assignment(cost_matrix)))
def iou(bbox_1, bbox_2):
"""Computes intersection over union between two bounding boxes.
Parameters
----------
bbox_1 : np.ndarray
First bounding box, of the form (x_min, y_min, x_max, y_max).
bbox_2 : np.ndarray
Second bounding box, of the form (x_min, y_min, x_max, y_max).
Returns
-------
float
Intersection over union value between both bounding boxes.
"""
x_min = np.maximum(bbox_1[0], bbox_2[0])
y_min = np.maximum(bbox_1[1], bbox_2[1])
x_max = np.minimum(bbox_1[2], bbox_2[2])
y_max = np.minimum(bbox_1[3], bbox_2[3])
width = np.maximum(0.0, x_max - x_min)
height = np.maximum(0.0, y_max - y_min)
intersection = width * height
return (
intersection
) / (
(bbox_1[2] - bbox_1[0]) * (bbox_1[3] - bbox_1[1])
+ (bbox_2[2] - bbox_2[0]) * (bbox_2[3] - bbox_2[1])
- intersection
)
def corners_to_center(bbox):
"""Changes bounding box from corner-based specification to center-based.
Parameters
---------
bbox : np.ndarray
Bounding box of the form (x_min, y_min, x_max, y_max).
Returns
-------
np.ndarray
Same bounding box, but of the form (x, y, area, ratio).
"""
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
x = bbox[0] + width / 2.0
y = bbox[1] + height / 2.0
area = width * height
ratio = width / height
return np.array([
x, y, area, ratio
]).reshape((4, 1))
def center_to_corners(bbox):
"""Changes bounding box from corner-based specification to center-based.
Parameters
---------
bbox : np.ndarray
Bounding box of the form (x, y, area, ratio).
Returns
-------
np.ndarray
Same bounding box, but of the form (x_min, y_min, x_max, y_max).
"""
width = np.sqrt(bbox[2] * bbox[3])
height = bbox[2] / width
return np.concatenate([
bbox[0] - width / 2.0,
bbox[1] - height / 2.0,
bbox[0] + width / 2.0,
bbox[1] + height / 2.0
])
class KalmanTracker:
"""Tracker for individual face.
Maintains an internal state by way of a Kalman filter applied to the face's
bounding boxes.
The Kalman filter used tracks the bounding box's center point, scale and
ratio, and assumes a constant velocity of the box, and a constant ratio for
the faces: we don't try to estimate the velocity of the ratio, as it's
bound to be incorrect in a very short time.
"""
# Count of all trackers instantiated, to keep ascending names for tracks.
count = 0
def __init__(self, face):
"""Initializes a tracker using an initial detected face.
Parameters
----------
face : dict
Face to initialize tracker to, as returned by a `Detection`
instance.
"""
# 4 measurements plus 3 velocities. We don't keep a velocity for the
# ratio of the bounding box.
self.kf = KalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([
[1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
])
self.kf.H = np.array([
[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
])
self.kf.R[2:, 2:] *= 10.0
# Give high uncertainty to the unobservable initial velocities.
self.kf.P[4:, 4:] *= 1000.0
self.kf.P *= 10.0
self.kf.Q[-1, -1] *= 0.01
self.kf.Q[4:, 4:] *= 0.01
self.kf.x[:4] = corners_to_center(face['bbox'])
self.hits = 0
self.time_since_update = 0
self.id = KalmanTracker.count
KalmanTracker.count += 1
def update(self, face):
"""Updates the state vector with observed face.
Parameters
----------
face : dict
Face to update the tracker with, an observation that's already been
assigned to the predicted trajectory of the tracker.
"""
self.time_since_update = 0
self.hits += 1
self.kf.update(corners_to_center(face['bbox']))
def predict(self):
"""Advances the state vector and returns the predicted bounding box
estimate.
Returns
-------
np.ndarray
Predicted trajectory of the tracker, in (x_min, y_min, x_max,
y_max) format.
"""
# If the size of the bounding box is negative, nullify the velocity.
if (self.kf.x[6] + self.kf.x[2]) <= 0:
self.kf.x[6] *= 0.0
self.kf.predict()
self.time_since_update += 1
return center_to_corners(self.kf.x)
def associate_detections_to_trackers(faces, trackers, iou_threshold=0.3):
"""Assigns detections to tracked faces.
Parameters
----------
faces : list
Observed faces, as returned by a Detection class.
trackers : np.ndarray of size (T,)
Positions for the `T` existing trackers.
iou_threshold : float
Threshold of IoU value for considering two boxes a match.
Returns
-------
list of np.ndarray
Matched, unmatched faces and unmatched trackers.
"""
if not len(trackers):
return (
np.empty((0, 2), dtype=int),
np.arange(len(faces)),
np.empty((0, 5), dtype=int),
)
iou_matrix = np.zeros(
(len(faces), len(trackers)),
dtype=np.float32
)
for face_idx, face in enumerate(faces):
for track_idx, track in enumerate(trackers):
iou_matrix[face_idx, track_idx] = iou(face['bbox'], track)
# Use the hungarian method to get least-cost assignment. Returns an array
# of size (min(len(faces), len(trackers)), 2), with the (row, col) indices
# of the matches.
matched_indices = linear_assignment(-iou_matrix)
unmatched_faces = []
for face_idx, face in enumerate(faces):
if face_idx not in matched_indices[:, 0]:
unmatched_faces.append(face_idx)
unmatched_trackers = []
for track_idx, track in enumerate(trackers):
if track_idx not in matched_indices[:, 1]:
unmatched_trackers.append(track_idx)
# Filter out matched with low IOU.
matches = []
for face_idx, track_idx in matched_indices:
if iou_matrix[face_idx, track_idx] < iou_threshold:
unmatched_faces.append(face_idx)
unmatched_trackers.append(track_idx)
else:
matches.append(
np.array([face_idx, track_idx], dtype=int)
)
if not matches:
matches = np.empty((0, 2), dtype=int)
else:
matches = np.stack(matches)
return (
matches, np.array(unmatched_faces), np.array(unmatched_trackers)
)
class Sort:
"""SORT tracker for very simple, appearence-agnostic tracking.
Implements a multiple object tracker based on `Simple Online and Realtime
Tracking`_ focused on building *face tracks* out of detections (thus
performing tracking-by-detection).
The tracking performed by this class has only one objective: attach an
identity to every detection passed to it, or `None` if no identity was
found or yet constructed for it.
This means the class will not:
* Smooth out the bounding boxes: observations are returned as-is.
* Interpolate a face whenever there's a missing observation in between.
If you need to do get the smoothed out observations, your best bet is to
modify the `self.update()` method to return the tracker predictions instead
of the observations. We might make it a configurable at a later point.
.. _Simple Online and Realtime Tracking: https://arxiv.org/abs/1602.00763
"""
def __init__(self, max_age=1, min_hits=3, return_unmatched=False):
"""Initialize SORT instance with its configuration.
The recommended way of setting the time-based parameters is with
respect to the framerate of the video.
Parameters
----------
max_age : int
Maximum number of steps after which an unmatched tracker is going
to be destroyed.
min_hits : int
Minimum number of steps until a tracker is considered confirmed
(and thus, its identity returned in faces).
return_unmatched : bool
Whether to return faces with no track attached (default: False).
"""
self.max_age = max_age
self.min_hits = min_hits
self.return_unmatched = return_unmatched
self.trackers = []
self.frame_count = 0
def update(self, faces):
"""Update the tracker with new faces.
This function should be called on every frame, even if no faces are
detected.
Parameters
----------
faces : list
List of faces dicts, as returned by a `Detection` instance.
Returns
-------
list of dicts
Same list of face dicts, but with an extra `track` field specifying
the identity of the face. This field will either be `None` if the
face wasn't matched to any underlying track, or an `int` if it was.
If `self.return_unmatched` is `False`, all faces will have a
`track`, or else they'll get filtered.
"""
self.frame_count += 1
# Get predicted locations from existing trackers.
to_delete = []
tracks = np.zeros((len(self.trackers), 4))
for track_idx, track in enumerate(tracks):
position = self.trackers[track_idx].predict()
track[:] = position
# A tracker might have gone to infinity in its extrapolation.
if np.any(np.isnan(position)):
to_delete.append(track_idx)
tracks = np.ma.compress_rows(
np.ma.masked_invalid(tracks)
)
for t in reversed(to_delete):
self.trackers.pop(t)
(
matched, unmatched_faces, unmatched_tracks
) = associate_detections_to_trackers(faces, tracks)
# Faces to return, augmented with the track ID, whenever available.
augmented_faces = []
# Update matched trackers with assigned detections.
for track_idx, track in enumerate(self.trackers):
if track_idx not in unmatched_tracks:
face_idx = int(
matched[np.where(matched[:, 1] == track_idx)[0], 0]
)
track.update(faces[face_idx])
# Add the track ID to the `track` field only if the track has
# been confirmed.
track_id = track.id if (
track.hits >= self.min_hits
or self.frame_count <= self.min_hits
) else None
augmented_faces.append(
{'track': track_id, **faces[face_idx]}
)
# Create and initialize new trackers for unmatched detections.
for face_idx in unmatched_faces:
track = KalmanTracker(faces[face_idx])
self.trackers.append(track)
# Just created: only case we should return it right away is if
# there are no minimum amount of hits required.
track_id = track.id if self.min_hits == 0 else None
augmented_faces.append(
{'track': track_id, **faces[face_idx]}
)
# Filter out the faces without a confirmed tracker attached.
if not self.return_unmatched:
augmented_faces = [
face for face in augmented_faces
if face['track'] is not None
]
# Finally, clean up dead tracks.
self.trackers = [
track for track in self.trackers
if track.time_since_update <= self.max_age
]
return augmented_faces
class FaceTracking:
"""Object for performing face tracking.
This object is meant to be used as a substitute to a `Detection` object,
behaving exactly the same way except for having an extra `track` field in
the face dictionaries.
The object will only encapsulate and call the detector and tracker objects
used, offer a :meth:`__call__`-based interface. That is, it's simply a
container for the main :class:`Sort` class.
"""
def __init__(self, detector=None, tracker=None):
self.detector = detector
self.tracker = tracker
def __call__(self, frames):
"""Performs face tracking on `images`.
The face detection itself will be done by the `self.detector` object,
while the tracking by the `self.tracker` object.
Parameters
----------
frames : list of numpy.ndarray or numpy.ndarray
Frames to perform face tracking on.
Returns
-------
list of list of dicts, or list dict
List of dictionaries containing face data for a single image, or a
list of these entries thereof.
Each entry is of the form::
{
'bbox': [x_min, y_min, x_max, y_max],
'landmarks': ..., # Array of shape (5, 2).
'track': ..., # Either an `int` or `None`.
'score': ... # Confidence score.
}
"""
expanded = False
if not isinstance(frames, list) and len(frames.shape) == 3:
expanded = True
frames = frames[0]
faces_per_frame = []
detections_per_frame = self.detector(frames)
for frame, detections in zip(frames, detections_per_frame):
faces_per_frame.append(
self.tracker.update(detections)
)
return faces_per_frame[0] if expanded else faces_per_frame
def face_tracking(
*, video=None, max_age=None, min_hits=None, detector=None,
return_unmatched=False,
):
"""Default entry point to face tracking.
This is a factory for an underlying :class:`FaceTracking` instance,
which will be tasked with keeping the state of the different identities
available.
Once you create it, you can treat the resulting object as if it was an
instance of the :class:`terran.face.detection.Detection` class, but focused
on working in same-size batches of frames, and returning an additional
field on the faces corresponding to the identity, or track.
The tracking utilities provided focus on filtering observations *only*.
No smoothing nor interpolation will be performed, so the results you
obtained can be traced back to detections of the detector passed on to it.
This is meant as a building block from which to do more detailed face
recognition over videos.
Parameters
----------
video : terran.io.reader.Video
Video to derive `max_age` and `min_hits` from. The first value will be
one video of the second, while the latter will be 1/5th of a second.
When those values are specified as well, they'll have precedence.
max_age : int
Maximum number of frames to keep identities around for after no
appearance.
min_hits : int
Minimum number of observations required for an identity to be returned.
For instance, if `min_hits` is 6, it means that only after a face is
detected six times will it be returned on prediction. This is, in
essence, adding *latency* to the predictions. So consider decreasing
this value if you care more about latency than any possible noise you
may get due to short-lived faces.
You can also get around this latency by specifying `return_unmatched`
value of `True`, but in that case, returned faces will *not* have an
identity associated.
detector : terran.face.detection.Detection
Face detector to get observations from. Default is using Terran's
default face detection class.
return_unmatched : boolean
Whether to return observations (faces) that don't have a matched
identity or not.
"""
# Default values for SORT assume a 30 fps video.
max_age_ = 30
min_hits_ = 6
# If we receive a video or any of the parameters are specified, substitute
# the defaults.
if video is not None:
max_age_ = video.framerate
min_hits_ = video.framerate // 5
if max_age is None:
max_age = max_age_
if min_hits is None:
min_hits = min_hits_
# Validate that we received a valid detector, or fall back to the default
# one if none was specified.
if detector is None:
detector = face_detection
elif not isinstance(detector, Detection):
raise ValueError(
'`detector` must be an instance of `terran.face.Detection`.'
)
sort = Sort(
max_age=video.framerate,
min_hits=video.framerate // 5,
return_unmatched=return_unmatched,
)
return FaceTracking(detector=detector, tracker=sort)
|
<reponame>chw3k5/WaferScreen<filename>waferscreen/inst_control/inactive/tower_power_supply_gui.py
'''
Created on July 20, 2011
@author: schimaf
Versions:
1.0.2 10/16/2012 Check if the power supplies are powered in the GUI. Remove unused imports.
'''
import sys
from PyQt4.QtCore import SIGNAL, Qt
from PyQt4.QtGui import QWidget, QMainWindow, QVBoxLayout, QPushButton, QHBoxLayout, QApplication, \
QLabel, QIcon, QPixmap
import tower_power_supplies
class MainWindow(QMainWindow):
def __init__(self, app):
''' Constructor '''
super(MainWindow,self).__init__()
self.app = app
self.power_on = False
self.power_state_string = "Power is OFF"
pixmap = QPixmap("towerpowericon.png")
self.setWindowIcon(QIcon(pixmap))
self.version = "1.0.2"
self.setWindowTitle("Tower Power Supply GUI %s" % self.version)
self.setGeometry(100, 100, 360, 100)
self.power_supplies = tower_power_supplies.TowerPowerSupplies()
self.central_widget = QWidget(self)
self.setCentralWidget(self.central_widget)
self.layout_widget = QWidget(self.central_widget)
self.layout = QVBoxLayout(self.central_widget)
self.top_text_label = QLabel("Tower Power Supply Control", self.central_widget)
psa_text = "Power Supply A: %s %s (pad=%s)" % \
(self.power_supplies.power_supply_1.manufacturer,
self.power_supplies.power_supply_1.model_number,
str(self.power_supplies.power_supply_1.pad))
self.psa_label = QLabel(psa_text, self.central_widget)
psb_text = "Power Supply B: %s %s (pad=%s)" % \
(self.power_supplies.power_supply_2.manufacturer,
self.power_supplies.power_supply_2.model_number,
str(self.power_supplies.power_supply_2.pad))
self.psb_label = QLabel(psb_text, self.central_widget)
self.power_state_label = QLabel(self.power_state_string, self.central_widget)
self.power_on_button = QPushButton("Power ON", self.central_widget)
self.power_off_button = QPushButton("Power OFF", self.central_widget)
self.quit_button = QPushButton("Quit", self.central_widget)
self.readingLabel = QLabel("no readings yet", self.central_widget)
self.buttons_layout_widget = QWidget(self.central_widget)
self.buttons_layout = QHBoxLayout(self.buttons_layout_widget)
self.layout.addWidget(self.top_text_label, 0, Qt.AlignHCenter)
self.layout.addWidget(self.psa_label)
self.layout.addWidget(self.psb_label)
self.layout.addWidget(self.buttons_layout_widget)
self.layout.addWidget(self.power_state_label, 0, Qt.AlignHCenter)
self.layout.addWidget(self.readingLabel)
# self.readingLabel.setPixmap(pixmap) # test that the pixmap was loaded
self.buttons_layout.addWidget(self.power_on_button)
self.buttons_layout.addWidget(self.power_off_button)
self.buttons_layout.addWidget(self.quit_button)
self.connect(self.power_on_button, SIGNAL("clicked()"), self.power_on_event)
self.connect(self.power_off_button, SIGNAL("clicked()"), self.power_off_event)
self.connect(self.quit_button, SIGNAL("clicked()"), self.quit_event)
# Update the state of the power supplies
self.updatePowerOnString()
def power_on_event(self):
s = self.power_supplies.powerOnSequence()
self.updatePowerOnString()
self.readingLabel.setText("most recent readings:\n"+s)
def power_off_event(self):
self.power_supplies.powerOffSupplies()
self.updatePowerOnString()
def quit_event(self):
self.app.quit()
def updatePowerOnString(self):
if self.power_supplies.powered == True:
self.power_state_string = "Power is ON"
else:
self.power_state_string = "Power is OFF"
self.power_state_label.setText(self.power_state_string)
def main(args):
app = QApplication(args)
win = MainWindow(app)
win.show()
win.setWindowIcon(QIcon("towerpowericon.png"))
app.exec_()
if __name__=="__main__":
main(sys.argv)
|
import unittest
from meraki_cli.__main__ import _object_filter
LISTOFDICTS = [
{'id': '1', 'name': 'THING1'},
{'id': '2', 'name': 'THING2'},
{'id': '100', 'name': 'OTHERTHING'},
{'id': '200', 'name': 'OTHER200THING'},
{'id': '300', 'name': 'ELSE'}
]
class TestObjectFilter(unittest.TestCase):
def testObjectFilterNotListError(self):
# Should throw a log error since listofdicts input is not a list
with self.assertLogs(level='ERROR'):
_object_filter(LISTOFDICTS[0], ['goingToErorAnyways'])
def testObjectFilterNonFilter(self):
# Should return nothing if filter list is empty
output = _object_filter(LISTOFDICTS, [])
assert output == []
def testObjectFilterSimple(self):
# Should return OTHERTHING, but nothing else
output = _object_filter(LISTOFDICTS, ['name:OTHERTHING'])
assert output == [LISTOFDICTS[2]]
def testObjectFilterSimpleRegex(self):
# Should return THING1 and THING2
output = _object_filter(LISTOFDICTS, ['name:THING.'])
assert output == LISTOFDICTS[0:2]
def testObjectFilterOr(self):
# Should return THING1 and OTHERTHING
output = _object_filter(LISTOFDICTS,
['name:THING1', 'name:OTHERTHING'])
assert output == [LISTOFDICTS[0], LISTOFDICTS[2]]
def testObjectFilterMultiKeyOr(self):
# Should return THING1, THING2, and OTHER200THING
output = _object_filter(LISTOFDICTS,
['name:THING.', 'id:200'])
assert output == LISTOFDICTS[0:2] + [LISTOFDICTS[3]]
def testObjectFilterAnd(self):
# Should return OTHERTHING and OTHER200THING
output = _object_filter(LISTOFDICTS,
['name:THING', 'id:...'], and_logic=True)
assert output == LISTOFDICTS[2:4]
def testObjectFilterEmptyList(self):
output = _object_filter([], ['name:THING'])
assert output == []
def testObjectFilterKeyMissingWarning(self):
# Should throw a warning since this key does not exist in any of
# the data
with self.assertLogs(level='WARNING'):
_object_filter(LISTOFDICTS, ['missing:key'])
# Test with multiple filter keys, one of which is good. Make sure
# the warning still fires.
with self.assertLogs(level='WARNING'):
_object_filter(LISTOFDICTS, ['name:THING.', 'missing:key'])
# Test with AND logic and make sure warning fires
with self.assertLogs(level='WARNING'):
_object_filter(LISTOFDICTS, ['name:THING.', 'missing:key'],
and_logic=True)
def testObjectFilterKeyMissingReturn(self):
# Should return an empty list since this key does not exist in any
# of the data.
output = _object_filter(LISTOFDICTS, ['missing:key'])
assert output == []
# Test with multiple keys. Should return objects (using "OR" logic)
output = _object_filter(LISTOFDICTS, ['name:THING.', 'missing:key'])
assert output == LISTOFDICTS[0:2]
# Test with multiple keys. Should return nothing (using "AND" logic)
output = _object_filter(LISTOFDICTS, ['name:THING.', 'missing:key'],
and_logic=True)
assert output == []
def testObjectFilterKeyInconsistentData(self):
# Create a listofdicts with inconsistent keys
data = [
{'k2': 'v2'},
{'k1': 'v1', 'k2': 'v2'},
{'k3': 'v3', 'k4': 'v4'},
]
# Should return no warnings
assert _object_filter(data, ['k1:v.']) == [data[1]]
assert _object_filter(data, ['k1:v.'], and_logic=True) == []
def testObjectFilterComplexData(self):
# Test filtering complex values (dict). Should be flattened to str
# before filtering happens
data = [
{'k': {'test1': 'test1'}},
{'k': {'test2': 'test2'}},
{'k': {'test3': 'test3'}},
]
assert _object_filter(data, ['k:test2']) == [data[1]]
def testObjectFilterMalformedString(self):
# Test that a malformed filter causes a SystemExit
with self.assertRaises(SystemExit) as cm:
# And throws an ERROR log
with self.assertLogs(level='ERROR'):
_object_filter(LISTOFDICTS, ['badfilter'])
# And the exit code is 1 (error)
self.assertEqual(cm.exception.code, 1)
|
#!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import _useDvbCssUninstalled # Enable to run when dvbcss not yet installed ... @UnusedImport
from dvbcss.protocol.wc import WCMessage as WCMessage
class Test(unittest.TestCase):
def testSmokeTestCreate(self):
m=WCMessage(WCMessage.TYPE_REQUEST, 1, 256, 2, 3, 4)
self.assertEqual(m.msgtype, WCMessage.TYPE_REQUEST)
self.assertEqual(m.precision, 1)
self.assertEqual(m.maxFreqError, 256)
self.assertEqual(m.originateNanos, 2)
self.assertEqual(m.receiveNanos, 3)
self.assertEqual(m.transmitNanos, 4)
self.assertEqual(m.originalOriginate, None)
def test_simplePayload(self):
m=WCMessage(WCMessage.TYPE_RESPONSE, 5, 7680, 1000, 2000, 3000)
payload=m.pack()
self.assertEqual(payload, "\x00\x01\x05\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x03\xe8\x00\x00\x00\x00\x00\x00\x07\xd0\x00\x00\x00\x00\x00\x00\x0b\xb8")
def test_simplePayloadOverridingOriginate(self):
m=WCMessage(WCMessage.TYPE_RESPONSE, 5, 12160, 1000, 2000, 3000, (0xaabbccdd, 0xeeff1122))
payload=m.pack()
self.assertEqual(payload, "\x00\x01\x05\x00\x00\x00\x2f\x80\xaa\xbb\xcc\xdd\xee\xff\x11\x22\x00\x00\x00\x00\x00\x00\x07\xd0\x00\x00\x00\x00\x00\x00\x0b\xb8")
def test_simpleParseWithUnusualOriginate(self):
payload = "\x00\x01\x05\x00\x00\x01\xf4\x00\xaa\xbb\xcc\xdd\xee\xff\x11\x22\x00\x00\x00\x00\x00\x00\x07\xd0\x00\x00\x00\x00\x00\x00\x0b\xb8"
m=WCMessage.unpack(payload)
self.assertEquals(m.msgtype, WCMessage.TYPE_RESPONSE)
self.assertEquals(m.precision, 5)
self.assertEquals(m.maxFreqError, 500*256)
self.assertEquals(m.originalOriginate, (0xaabbccdd, 0xeeff1122))
self.assertEquals(m.receiveNanos, 2000)
self.assertEquals(m.transmitNanos, 3000)
def test_simpleParse(self):
payload = "\x00\x01\x05\x00\x00\x01\xf4\x00\xaa\xbb\xcc\xdd\x3b\x9a\xc9\xff\x00\x00\x00\x00\x00\x00\x07\xd0\x00\x00\x00\x00\x00\x00\x0b\xb8"
m=WCMessage.unpack(payload)
self.assertEquals(m.msgtype, WCMessage.TYPE_RESPONSE)
self.assertEquals(m.precision, 5)
self.assertEquals(m.maxFreqError, 500*256)
self.assertEquals(m.originateNanos, 2864434397999999999)
self.assertEquals(m.receiveNanos, 2000)
self.assertEquals(m.transmitNanos, 3000)
def test_encodePrecision(self):
self.assertEquals(WCMessage.encodePrecision(2**-128), -128)
self.assertEquals(WCMessage.encodePrecision(0.00001), -16 )
self.assertEquals(WCMessage.encodePrecision(2**127), 127)
self.assertEquals(WCMessage.encodePrecision(0.0007), -10 )
self.assertEquals(WCMessage.encodePrecision(0.001), -9 )
def test_encodeMaxFreqError(self):
self.assertEquals(WCMessage.encodeMaxFreqError(50), 12800)
self.assertEquals(WCMessage.encodeMaxFreqError(1900), 486400)
self.assertEquals(WCMessage.encodeMaxFreqError(0.01), 3)
self.assertEquals(WCMessage.encodeMaxFreqError(28), 7168)
self.assertEquals(WCMessage.encodeMaxFreqError(100000), 25600000)
self.assertEquals(WCMessage.encodeMaxFreqError(0), 0)
def test_decodePrecision(self):
self.assertEquals(WCMessage.decodePrecision(-128), 2**-128)
self.assertEquals(WCMessage.decodePrecision(-16), 2**-16 )
self.assertEquals(WCMessage.decodePrecision(127), 2**127 )
self.assertEquals(WCMessage.decodePrecision(-10), 2**-10 )
self.assertEquals(WCMessage.decodePrecision(-9), 2**-9 )
def test_decodeMaxFreqError(self):
self.assertEquals(WCMessage.decodeMaxFreqError(12800), 50 )
self.assertEquals(WCMessage.decodeMaxFreqError(486400), 1900 )
self.assertEquals(WCMessage.decodeMaxFreqError(3), 0.01171875)
self.assertEquals(WCMessage.decodeMaxFreqError(7168), 28 )
self.assertEquals(WCMessage.decodeMaxFreqError(25600000), 100000 )
self.assertEquals(WCMessage.decodeMaxFreqError(0), 0 )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testSmokeTestCreate']
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.