content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from urllib import quote
import jsonpickle
from cairis.core.Countermeasure import Countermeasure
from cairis.core.Target import Target
from cairis.core.CountermeasureEnvironmentProperties import CountermeasureEnvironmentProperties
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
from cairis.tools.PseudoClasses import SecurityAttribute, CountermeasureTarget, CountermeasureTaskCharacteristics
import os
from cairis.mio.ModelImport import importModelFile
__author__ = 'Shamal Faily'
class CountermeasureAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
def setUp(self):
# region Class fields
self.logger = logging.getLogger(__name__)
self.existing_countermeasure_name = 'Location-based X.509 extension'
self.existing_countermeasure_type = 'Information'
self.existing_countermeasure_description = 'X.509 certificates extended to tie client workstations so NeuroGrid tasks can only be carried out on these.'
self.existing_environment_name = 'Psychosis'
self.existing_requirements = ['User certificate']
self.existing_targets = [CountermeasureTarget('Certificate Ubiquity','High','Discourages certificate sharing')]
self.existing_properties = []
self.existing_rationale = ['None','None','None','None','None','None','None','None']
self.existing_cost='Medium'
self.existing_roles=['Data Consumer','Certificate Authority']
self.existing_personas=[CountermeasureTaskCharacteristics('Upload data','Claire','None','None','None','Low Hindrance'),CountermeasureTaskCharacteristics('Download data','Claire','None','None','None','Low Hindrance')]
countermeasure_class = Countermeasure.__module__+'.'+Countermeasure.__name__
# endregion
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/countermeasures?session_id=test')
countermeasures = jsonpickle.decode(rv.data)
self.assertIsNotNone(countermeasures, 'No results after deserialization')
self.assertIsInstance(countermeasures, dict, 'The result is not a dictionary as expected')
self.assertGreater(len(countermeasures), 0, 'No countermeasures in the dictionary')
self.logger.info('[%s] Countermeasures found: %d', method, len(countermeasures))
countermeasure = countermeasures.values()[0]
self.logger.info('[%s] First countermeasure: %s [%d]\n', method, countermeasure['theName'], countermeasure['theId'])
def test_get_by_name(self):
method = 'test_get_by_name'
url = '/api/countermeasures/name/%s?session_id=test' % quote(self.existing_countermeasure_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
self.logger.debug('[%s] Response data: %s', method, rv.data)
countermeasure = jsonpickle.decode(rv.data)
self.assertIsNotNone(countermeasure, 'No results after deserialization')
self.logger.info('[%s] Countermeasure: %s [%d]\n', method, countermeasure['theName'], countermeasure['theId'])
def test_delete(self):
method = 'test_delete'
url = '/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name())
new_countermeasure_body = self.prepare_json()
self.app.delete(url)
self.logger.info('[%s] Object to delete: %s', method, new_countermeasure_body)
self.app.post('/api/countermeasures', content_type='application/json', data=new_countermeasure_body)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.delete(url)
self.logger.info('[%s] Response data: %s', method, rv.data)
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
def test_post(self):
method = 'test_post'
url = '/api/countermeasures'
self.logger.info('[%s] URL: %s', method, url)
new_countermeasure_body = self.prepare_json()
self.app.delete('/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name()))
rv = self.app.post(url, content_type='application/json', data=new_countermeasure_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('countermeasure_id', None)
self.assertIsNotNone(env_id, 'No countermeasure ID returned')
self.assertGreater(env_id, 0, 'Invalid countermeasure ID returned [%d]' % env_id)
self.logger.info('[%s] Countermeasure ID: %d\n', method, env_id)
rv = self.app.delete('/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name()))
def test_target_names(self):
method = 'test_countermeasure-targets-by-requirement-get'
url = '/api/countermeasures/targets/environment/Psychosis?requirement=User%20certificate&session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
targetList = jsonpickle.decode(rv.data)
self.assertIsNotNone(targetList, 'No results after deserialization')
self.assertGreater(len(targetList), 0, 'No targets returned')
self.logger.info('[%s] Targets found: %d', method, len(targetList))
self.assertEqual(targetList[0],'Certificate ubiquity')
self.assertEqual(targetList[1],'Social engineering')
def test_task_names(self):
method = 'test_countermeasure-tasks-by-role-get'
url = '/api/countermeasures/tasks/environment/Psychosis?role=Certificate%20Authority&role=Data%20Consumer&role=Researcher&session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
taskList = jsonpickle.decode(rv.data)
self.assertIsNotNone(taskList, 'No results after deserialization')
self.assertEqual(len(taskList),2)
self.assertEqual(taskList[0]['theTask'],'Download data')
self.assertEqual(taskList[0]['thePersona'],'Claire')
self.assertEqual(taskList[1]['theTask'],'Upload data')
self.assertEqual(taskList[1]['thePersona'],'Claire')
def test_put(self):
method = 'test_put'
url = '/api/countermeasures'
self.logger.info('[%s] URL: %s', method, url)
new_countermeasure_body = self.prepare_json()
rv = self.app.delete('/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name()))
rv = self.app.post(url, content_type='application/json', data=new_countermeasure_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('countermeasure_id', None)
self.assertIsNotNone(env_id, 'No countermeasure ID returned')
self.assertGreater(env_id, 0, 'Invalid countermeasure ID returned [%d]' % env_id)
self.logger.info('[%s] Countermeasure ID: %d', method, env_id)
countermeasure_to_update = self.prepare_new_countermeasure()
countermeasure_to_update.theName = 'Edited test countermeasure'
countermeasure_to_update.theId = env_id
upd_env_body = self.prepare_json(countermeasure=countermeasure_to_update)
rv = self.app.put('/api/countermeasures/name/%s?session_id=test' % quote(self.prepare_new_countermeasure().name()), data=upd_env_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertGreater(message.find('successfully updated'), -1, 'The countermeasure was not successfully updated')
rv = self.app.get('/api/countermeasures/name/%s?session_id=test' % quote(countermeasure_to_update.name()))
upd_countermeasure = jsonpickle.decode(rv.data)
self.assertIsNotNone(upd_countermeasure, 'Unable to decode JSON data')
self.logger.debug('[%s] Response data: %s', method, rv.data)
self.logger.info('[%s] Countermeasure: %s [%d]\n', method, upd_countermeasure['theName'], upd_countermeasure['theId'])
rv = self.app.delete('/api/countermeasures/name/%s?session_id=test' % quote(countermeasure_to_update.theName))
def test_generate_asset(self):
method = 'test_generate_asset'
url = '/api/countermeasures/name/' + quote(self.existing_countermeasure_name) + '/generate_asset?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.post(url, content_type='application/json',data=jsonpickle.encode({'session_id':'test'}))
self.assertIsNotNone(rv.data, 'No response')
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
self.assertGreater(message.find('successfully generated'), -1, 'Countermeasure asset not generated')
def prepare_new_countermeasure(self):
new_countermeasure_props = [
CountermeasureEnvironmentProperties(
environmentName=self.existing_environment_name,
requirements=self.existing_requirements,
targets=self.existing_targets,
properties=self.existing_properties,
rationale=self.existing_rationale,
cost=self.existing_cost,
roles=self.existing_roles,
personas=self.existing_personas)
]
new_countermeasure = Countermeasure(
cmId=-1,
cmName='New countermeasure',
cmDesc='New CM description',
cmType='Information',
tags=[],
cProps=[]
)
new_countermeasure.theEnvironmentProperties = new_countermeasure_props
new_countermeasure.theEnvironmentDictionary = {}
delattr(new_countermeasure, 'theEnvironmentDictionary')
return new_countermeasure
def prepare_dict(self, countermeasure=None):
if countermeasure is None:
countermeasure = self.prepare_new_countermeasure()
else:
assert isinstance(countermeasure, Countermeasure)
return {
'session_id': 'test',
'object': countermeasure,
}
def prepare_json(self, data_dict=None, countermeasure=None):
if data_dict is None:
data_dict = self.prepare_dict(countermeasure=countermeasure)
else:
assert isinstance(data_dict, dict)
new_countermeasure_body = jsonpickle.encode(data_dict, unpicklable=False)
self.logger.info('JSON data: %s', new_countermeasure_body)
return new_countermeasure_body
|
nilq/baby-python
|
python
|
import pytest
import pandas
@pytest.fixture(scope="session")
def events():
return pandas.read_pickle("tests/data/events_pickle.pkl")
|
nilq/baby-python
|
python
|
## https://weinbe58.github.io/QuSpin/examples/example7.html
## https://weinbe58.github.io/QuSpin/examples/example15.html
## https://weinbe58.github.io/QuSpin/examples/user-basis_example0.html
## https://weinbe58.github.io/QuSpin/user_basis.html
## https://weinbe58.github.io/QuSpin/generated/quspin.basis.spin_basis_1d.html
from __future__ import print_function, division
from quspin.operators import hamiltonian # operators
from quspin.basis import spin_basis_1d # Hilbert space spin basis
import numpy as np # general math functions
#
###### define model parameters ######
Jleg = 1.0 # spin-spin interaction, leg
Jrung = 1.0 # spin-spin interaction, rung
L = 12 # length of chain
N = 2*L # number of sites
###### setting up bases ######
#basis_1d = spin_basis_1d(L=N,Nup=N//2,S="1/2",pauli=0)
basis_1d = spin_basis_1d(L=N,Nup=N//2,S="1/2",pauli=0,a=2,kblock=0,pblock=1,zblock=1)## even L
#basis_1d = spin_basis_1d(L=N,Nup=N//2,S="1/2",pauli=0,a=2,kblock=0,pblock=-1,zblock=-1)## odd L
###### setting up hamiltonian ######
Jzzs = \
[[Jleg,i,(i+2)%N] for i in range(0,N,2)] \
+ [[Jleg,i,(i+2)%N] for i in range(1,N,2)] \
+ [[Jrung,i,i+1] for i in range(0,N,2)]
Jpms = \
[[0.5*Jleg,i,(i+2)%N] for i in range(0,N,2)] \
+ [[0.5*Jleg,i,(i+2)%N] for i in range(1,N,2)] \
+ [[0.5*Jrung,i,i+1] for i in range(0,N,2)]
Jmps = \
[[0.5*Jleg,i,(i+2)%N] for i in range(0,N,2)] \
+ [[0.5*Jleg,i,(i+2)%N] for i in range(1,N,2)] \
+ [[0.5*Jrung,i,i+1] for i in range(0,N,2)]
static = [["zz",Jzzs],["+-",Jpms],["-+",Jmps]]
# build hamiltonian
#H = hamiltonian(static,[],static_fmt="csr",basis=basis_1d,dtype=np.float64)
no_checks = dict(check_symm=False, check_pcon=False, check_herm=False)
H = hamiltonian(static,[],static_fmt="csr",basis=basis_1d,dtype=np.float64,**no_checks)
# diagonalise H
#ene,vec = H.eigsh(time=0.0,which="SA",k=2)
ene = H.eigsh(which="SA",k=2,return_eigenvectors=False); ene = np.sort(ene)
print(Jleg,Jrung,N,ene[0]/N)
## 2-leg ladder (L=inf): -0.578043140180 (PhysRevB.89.094424, see also PhysRevB.54.R3714, PhysRevB.47.3196)
|
nilq/baby-python
|
python
|
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_creat_user_email_succesful(self):
email='hello.com'
password='123123'
user =get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertEqual(user.check_password(password),True)
def test_new_user_normalize(self):
email="test@gmail.com"
user =get_user_model().objects.create_user(
email,'123123'
)
self.assertEqual(user.email,email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None,'test123')
def test_creat_new_super_user(self):
user=get_user_model().objects.create_superuser(
'test@gmail.com',
'123123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
|
nilq/baby-python
|
python
|
""" Itertools examples """
import itertools
import collections
import operator
import os
# itertools.count can provide an infinite counter.
for i in itertools.count(step=1):
print i
if i == 20: break
# itertools.cycle cycles through an iterator
# Will keep printing 'python'
for i,j in enumerate(itertools.cycle(['python'])):
print j
if i==10: break
# itertools.repeat keeps repeating from an iterator
# Will keep producing range(10) when called in a loop
print itertools.repeat(range(10))
# chain returns elements from 'n' iterators until they are exhausted.
# Make a dictionary of count of letters in a list of strings.
birds = ['parrot','crow','dove','peacock','macaw','hen']
frequency = collections.defaultdict(int)
for letter in itertools.chain(*birds):
frequency[letter] += 1
print frequency
# takewhile returns elements as long as a predicate(condition) is True.
# Give list of favorable countries
countries=['U.S','U.K','India','Australia','Malaysia','Pakistan']
print list(itertools.takewhile(lambda x: x != 'Pakistan', countries))
# dropwhile keeps dropping elements while predicate is True.
# Produce iterator of files > a minimum size in current folder.
files = sorted([(file, os.path.getsize(file)) for file in os.listdir(".")],
key=operator.itemgetter(1))
print list(itertools.dropwhile(lambda x: x[1] < 8192, files))
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.7 on 2021-09-06 20:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('upload', '0004_auto_20210623_2006'),
]
operations = [
migrations.AlterField(
model_name='thumbnails',
name='large',
field=models.CharField(max_length=20, verbose_name='relative location of large thumbnail'),
),
migrations.AlterField(
model_name='thumbnails',
name='small',
field=models.CharField(max_length=20, verbose_name='relative location of small thumbnail'),
),
]
|
nilq/baby-python
|
python
|
# ------------------------------------------------------------------
# Step 1: import scipy and pyamg packages
# ------------------------------------------------------------------
import numpy as np
import pyamg
import matplotlib.pyplot as plt
# ------------------------------------------------------------------
# Step 2: setup up the system using pyamg.gallery
# ------------------------------------------------------------------
n = 200
X, Y = np.meshgrid(np.linspace(0, 1, n), np.linspace(0, 1, n))
stencil = pyamg.gallery.diffusion_stencil_2d(type='FE', epsilon=0.001, theta=np.pi / 3)
A = pyamg.gallery.stencil_grid(stencil, (n, n), format='csr')
b = np.random.rand(A.shape[0]) # pick a random right hand side
# ------------------------------------------------------------------
# Step 3: setup of the multigrid hierarchy
# ------------------------------------------------------------------
ml = pyamg.smoothed_aggregation_solver(A) # construct the multigrid hierarchy
# ------------------------------------------------------------------
# Step 4: solve the system
# ------------------------------------------------------------------
res1 = []
x = ml.solve(b, tol=1e-12, residuals=res1) # solve Ax=b to a tolerance of 1e-12
# ------------------------------------------------------------------
# Step 5: print details
# ------------------------------------------------------------------
print(ml) # print hierarchy information
print("residual norm is", np.linalg.norm(b - A * x)) # compute norm of residual vector
print("\n\n\n\n\n")
# notice that there are 5 (or maybe 6) levels in the hierarchy
#
# we can look at the data in each of the levels
# e.g. the multigrid components on the finest (0) level
# A: operator on level 0
# P: prolongation operator mapping from level 1 to level 0
# R: restriction operator mapping from level 0 to level 1
# B: near null-space modes for level 0
# presmoother: presmoothing function taking arguments (A,x,b)
# postsmoother: postsmoothing function taking arguments (A,x,b)
print(dir(ml.levels[0]))
# e.g. the multigrid components on the coarsest (4) level
print(dir(ml.levels[-1]))
# there are no interpoation operators (P,R) or smoothers on the coarsest level
# check the size and type of the fine level operators
print('type = ', ml.levels[0].A.format)
print(' A = ', ml.levels[0].A.shape)
print(' P = ', ml.levels[0].P.shape)
print(' R = ', ml.levels[0].R.shape)
print("\n\n\n\n\n")
# ------------------------------------------------------------------
# Step 6: change the hierarchy
# ------------------------------------------------------------------
# we can also change the details of the hierarchy
ml = pyamg.smoothed_aggregation_solver(A, # the matrix
B=X.reshape(n * n, 1), # the representation of the near null space (this is a poor choice)
BH=None, # the representation of the left near null space
symmetry='hermitian', # indicate that the matrix is Hermitian
strength='evolution', # change the strength of connection
aggregate='standard', # use a standard aggregation method
smooth=('jacobi', {'omega': 4.0 / 3.0, 'degree': 2}), # prolongation smoothing
presmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),
postsmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),
improve_candidates=[('block_gauss_seidel',
{'sweep': 'symmetric', 'iterations': 4}), None],
max_levels=10, # maximum number of levels
max_coarse=5, # maximum number on a coarse level
keep=False) # keep extra operators around in the hierarchy (memory)
# ------------------------------------------------------------------
# Step 7: print details
# ------------------------------------------------------------------
res2 = [] # keep the residual history in the solve
x = ml.solve(b, tol=1e-12, residuals=res2) # solve Ax=b to a tolerance of 1e-12
print(ml) # print hierarchy information
print("residual norm is", np.linalg.norm(b - A * x)) # compute norm of residual vector
print("\n\n\n\n\n")
# ------------------------------------------------------------------
# Step 8: plot convergence history
# ------------------------------------------------------------------
plt.semilogy(res1)
plt.semilogy(res2)
plt.title('Residual Histories')
plt.legend(['Default Solver', 'Specialized Solver'])
plt.xlabel('Iteration')
plt.ylabel('Relative Residual')
plt.show()
|
nilq/baby-python
|
python
|
import os
import shutil
import wget
import json
import logging
import tempfile
import traceback
import xml.etree.ElementTree as ET
import networkx as nx
from pml import *
#logging.basicConfig(level=logging.INFO)
def get_file(file, delete_on_exit = []):
# due to current limitations of DMC, allow shortened URLs
if "://" not in file:
file = "http://" + file
if file.startswith("file://"):
return file[7:]
else:
# create temp file to store the file contents
fd, tmpfile = tempfile.mkstemp()
os.close(fd)
os.unlink(tmpfile)
# download the file contents
wget.download(file.replace("?dl=0", "?dl=1"), tmpfile)
delete_on_exit.append(tmpfile)
return tmpfile
def write_outputs(file, fields):
with open(file, "w") as f:
for name, value in fields.items():
f.write(str(name) + " = " + str(value))
f.write("\n")
def exit_with_message(message):
write_outputs("output.txt", { "message" : message})
exit(-1)
def read_inputs(file):
inputs = {}
with open(file, "r") as f:
for line in f:
if len(line.strip()) > 0:
tokens = line.split("=")
inputs[tokens[0].strip()] = tokens[1].strip()
return inputs
def validate_inputs(inputs, fields):
for name, (required, type) in fields.items():
if required and name not in inputs:
exit_with_message("missing required input " + str(name))
if name in inputs:
inputs[name] = type(inputs[name])
return inputs
def process(input_file, user_constants=None, weight="cost"):
# Initialize the system
auto_register("library")
# Update system with user-defined constants
if user_constants is not None:
load_constants(user_constants)
# Load structure from iFAB BOM
processGraph = load_ebom(input_file)
# Expand the process graph using the PML models
expand_graph(processGraph)
# Save graph as image
as_png(processGraph, "graph.png")
# Validate the graph by ensuring routings exist
if validate_graph(processGraph):
# Find the routing that optimizes the user-defined weight (e.g., cost or time)
(_, selected_processes) = find_min(processGraph, weight=weight)
minimumGraph = create_subgraph(processGraph, selected_processes)
# Save the minimum routings to a graph
as_png(minimumGraph, "minimumGraph.png")
# Compute the cost and time
total_cost = sum_weight(minimumGraph, weight="cost")
total_time = sum_weight(minimumGraph, weight="time")
# Output the results
write_outputs("output.txt", { "message" : "Design is manufacturable",
"cost" : float(total_cost / dollars),
"time" : float(total_time / days) })
else:
exit_with_message("Unable to manufacture design, no routings exist")
if __name__ == "__main__":
try:
INPUT_DEFN = { "inputFile" : (True, str), "userConstants" : (False, str), "optimizeWeight" : (False, str)}
# read and validate the inputs from DOME
inputs = read_inputs("input.txt")
inputs = validate_inputs(inputs, INPUT_DEFN)
# convert inputs to kwargs, track any temporary files
kwargs = {}
delete_on_exit = []
kwargs["input_file"] = get_file(inputs["inputFile"], delete_on_exit)
if "userConstants" in inputs:
kwargs["user_constants"] = get_file(inputs["userConstants"], delete_on_exit)
if "optimizeWeight" in inputs:
kwargs["weight"] = inputs["optimizeWeight"]
# process the submission
process(**kwargs)
# delete the temporary files
for file in delete_on_exit:
os.unlink(file)
except Exception as e:
traceback.print_exc()
exit_with_message("An error occurred: " + str(e))
|
nilq/baby-python
|
python
|
a= int(input("input an interger:"))
n1=int("%s" % a)
n2=int("%s%s" % (a,a))
n3=int("%s%s%s" % (a,a,a))
print(n1+n2+n3)
|
nilq/baby-python
|
python
|
#PROGRAMA PARA CALCULAR AS DIMENSÕES DE UMA SAPATA DE DIVISA
import math
print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-= OTIMIZAÇÃO DE SAPATA DE DIVISA =-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
print("Utilize sempre PONTO (.) NÃO VÍRGULA (,)")
lado_A = float(input("Qual o Tamanho do Lado A? "))
lado_B = float(input("Qual o Tamanho do Lado B? "))
area = lado_A*lado_B
A = math.sqrt((area/2))
B = A*2
print("\nO Lado maior A pode ser 2 ou 2.5 vezes maior que B.\n"
"Dessa forma, a sapata otimizada possui as seguintes Dimensões:\n ")
#print("Sua sapata possui uma área de: {} m²" .format(area))
print("O Lado A fica com {} m" .format(A))
print("O Lado B fica com {} m" .format(B))
print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-= OTIMIZAÇÃO DE SAPATA DE DIVISA =-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
|
nilq/baby-python
|
python
|
from math import e
import pandas as pd
from core.mallows import Mallows
from core.patterns import ItemPref, Pattern, PATTERN_SEP
def get_itempref_of_2_succ_3_succ_m(m=10) -> ItemPref:
return ItemPref({i: {i + 1} for i in range(1, m - 1)})
def get_test_case_of_itempref(pid=0):
row = pd.read_csv('data/test_cases_item_prefs.csv').iloc[pid]
pref = ItemPref.from_string(row['pref'])
mallows = Mallows(list(range(row['m'])), row['phi'])
p_exact = e ** row['log_p']
return pref, mallows, p_exact
def get_test_case_of_pattern(pid=0):
row = pd.read_csv('data/test_cases_label_patterns.csv').iloc[pid]
pattern = Pattern.from_string(row['pattern'])
mallows = Mallows(list(range(row['m'])), row['phi'])
p_exact = e ** row['log_p']
return pattern, mallows, p_exact
def get_test_case_of_patterns_from_movielens_2_labels(rid=0):
p_exact = pd.read_csv('data/output_movielens_ramp-vs-amp_2labels_exact.csv').loc[rid, 'p_exact']
row = pd.read_csv('data/input_movielens_ramp-vs-amp_2labels.csv').loc[rid]
center = eval(row['ranking'])
mallows = Mallows(center=center, phi=row['phi'])
patterns = [Pattern.from_string(pattern_str) for pattern_str in row['patterns'].split(PATTERN_SEP)]
return patterns, mallows, p_exact
def get_test_case_of_patterns_from_movielens_linear(rid=0):
row = pd.read_csv('data/input_movielens_ramp-vs-amp.csv').loc[rid]
center = eval(row['ranking'])
mallows = Mallows(center=center, phi=row['phi'])
patterns = [Pattern.from_string(pattern_str) for pattern_str in row['patterns'].split(PATTERN_SEP)]
return patterns, mallows
def get_test_case_of_patterns_from_movielens_5_labels(rid=0):
"""
Hard cases for rAMP are 36, 52, 68, 84, 100, 116, 132, 148
"""
row = pd.read_csv('data/input_movielens_ramp-vs-amp_5_labels.csv').loc[rid]
mallows = Mallows(center=eval(row['ranking']), phi=row['phi'])
patterns = [Pattern.from_string(pattern_str) for pattern_str in row['patterns'].split(' <> ')]
return patterns, mallows
def get_test_case_of_patterns_from_synthetic_4_labels(pid=0):
df_ans = pd.read_csv('data/test_cases_4_labels_sharing_BD_3_subs_convergence_by_ramp_3.csv')
df_ans = df_ans.groupby('rid').first()
p_exact = df_ans.loc[pid, 'p_exact']
row = pd.read_csv('data/test_cases_4_labels_sharing_BD_3_subs.csv').loc[pid]
patterns_str = row['pref(A>C|A>D|B>D)']
patterns = [Pattern.from_string(pattern_str) for pattern_str in patterns_str.split('\n')]
mallows = Mallows(list(range(row['m'])), row['phi'])
return patterns, mallows, p_exact
if __name__ == '__main__':
res = get_test_case_of_patterns_from_movielens_5_labels()
print(res)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Calendar',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('color', models.CharField(max_length=100)),
('privacy', models.IntegerField(default=0)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start', models.DateTimeField(default=django.utils.timezone.now)),
('end', models.DateTimeField(default=django.utils.timezone.now)),
('title', models.CharField(max_length=200)),
('location', models.CharField(max_length=200)),
('description', models.CharField(max_length=600)),
('calendar', models.ForeignKey(to='ourcalendar.Calendar')),
('users', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple script that queries GitHub for all open PRs, then finds the ones without
issue number in title, and the ones where the linked JIRA is already closed
"""
import os
import sys
sys.path.append(os.path.dirname(__file__))
import argparse
import json
import re
from github import Github
from jira import JIRA
from datetime import datetime
from time import strftime
try:
from jinja2 import Environment, BaseLoader
can_do_html = True
except:
can_do_html = False
def read_config():
parser = argparse.ArgumentParser(description='Find open Pull Requests that need attention')
parser.add_argument('--json', action='store_true', default=False, help='Output as json')
parser.add_argument('--html', action='store_true', default=False, help='Output as html')
parser.add_argument('--token', help='Github access token in case you query too often anonymously')
newconf = parser.parse_args()
return newconf
def out(text):
global conf
if not (conf.json or conf.html):
print(text)
def make_html(dict):
if not can_do_html:
print ("ERROR: Cannot generate HTML. Please install jinja2")
sys.exit(1)
global conf
template = Environment(loader=BaseLoader).from_string("""
<h1>Lucene Github PR report</h1>
<p>Number of open Pull Requests: {{ open_count }}</p>
<h2>PRs lacking JIRA reference in title ({{ no_jira_count }})</h2>
<ul>
{% for pr in no_jira %}
<li><a href="https://github.com/apache/lucene/pull/{{ pr.number }}">#{{ pr.number }}: {{ pr.created }} {{ pr.title }}</a> ({{ pr.user }})</li>
{%- endfor %}
</ul>
<h2>Open PRs with a resolved JIRA ({{ closed_jira_count }})</h2>
<ul>
{% for pr in closed_jira %}
<li><a href="https://github.com/apache/lucene/pull/{{ pr.pr_number }}">#{{ pr.pr_number }}</a>: <a href="https://issues.apache.org/jira/browse/{{ pr.issue_key }}">{{ pr.status }} {{ pr.resolution_date }} {{ pr.issue_key}}: {{ pr.issue_summary }}</a> ({{ pr.assignee }})</li>
{%- endfor %}
</ul>
""")
return template.render(dict)
def main():
global conf
conf = read_config()
token = conf.token if conf.token is not None else None
if token:
gh = Github(token)
else:
gh = Github()
jira = JIRA('https://issues.apache.org/jira')
result = {}
repo = gh.get_repo('apache/lucene')
open_prs = repo.get_pulls(state='open')
out("Lucene Github PR report")
out("============================")
out("Number of open Pull Requests: %s" % open_prs.totalCount)
result['open_count'] = open_prs.totalCount
lack_jira = list(filter(lambda x: not re.match(r'.*\b(LUCENE)-\d{3,6}\b', x.title), open_prs))
result['no_jira_count'] = len(lack_jira)
lack_jira_list = []
for pr in lack_jira:
lack_jira_list.append({'title': pr.title, 'number': pr.number, 'user': pr.user.login, 'created': pr.created_at.strftime("%Y-%m-%d")})
result['no_jira'] = lack_jira_list
out("\nPRs lacking JIRA reference in title")
for pr in lack_jira_list:
out(" #%s: %s %s (%s)" % (pr['number'], pr['created'], pr['title'], pr['user'] ))
out("\nOpen PRs with a resolved JIRA")
has_jira = list(filter(lambda x: re.match(r'.*\b(LUCENE)-\d{3,6}\b', x.title), open_prs))
issue_ids = []
issue_to_pr = {}
for pr in has_jira:
jira_issue_str = re.match(r'.*\b((LUCENE)-\d{3,6})\b', pr.title).group(1)
issue_ids.append(jira_issue_str)
issue_to_pr[jira_issue_str] = pr
resolved_jiras = jira.search_issues(jql_str="key in (%s) AND status in ('Closed', 'Resolved')" % ", ".join(issue_ids))
closed_jiras = []
for issue in resolved_jiras:
pr_title = issue_to_pr[issue.key].title
pr_number = issue_to_pr[issue.key].number
assignee = issue.fields.assignee.name if issue.fields.assignee else None
closed_jiras.append({ 'issue_key': issue.key,
'status': issue.fields.status.name,
'resolution': issue.fields.resolution.name,
'resolution_date': issue.fields.resolutiondate[:10],
'pr_number': pr_number,
'pr_title': pr_title,
'issue_summary': issue.fields.summary,
'assignee': assignee})
closed_jiras.sort(key=lambda r: r['pr_number'], reverse=True)
for issue in closed_jiras:
out(" #%s: %s %s %s: %s (%s)" % (issue['pr_number'],
issue['status'],
issue['resolution_date'],
issue['issue_key'],
issue['issue_summary'],
issue['assignee'])
)
result['closed_jira_count'] = len(resolved_jiras)
result['closed_jira'] = closed_jiras
if conf.json:
print(json.dumps(result, indent=4))
if conf.html:
print(make_html(result))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nReceived Ctrl-C, exiting early')
|
nilq/baby-python
|
python
|
currency = [10000, 5000, 2000, 1000, 500, 200, 100, 25, 10, 5, 1]
for _ in range(int(input())):
money = input()
money = int(money[:-3] + money[-2:])
out = ""
for c in currency:
out += str(money // c)
money %= c
print(out)
|
nilq/baby-python
|
python
|
import tkinter as tk
from tkinter import Frame, Button, PanedWindow, Text
from tkinter import X, Y, BOTH
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib import pyplot as plt
class MatplotlibWindow(object):
def __init__(self,root):
self.root=root
fig,ax=plt.subplots()
xs=np.arange(-np.pi,np.pi,0.001)
ys=np.sin(xs)
ax.plot(xs,ys)
plot_frame=Frame(self.root)
self.root.add(plot_frame)
canvas=FigureCanvasTkAgg(fig,master=plot_frame)
toolbar = NavigationToolbar2TkAgg(canvas, plot_frame)
toolbar.update()
self.canvas=canvas
class MatplotlibWindow2(object):
def __init__(self,root):
self.root=root
fig,ax=plt.subplots()
xs=np.arange(-np.pi,np.pi,0.001)
ys=np.cos(xs)
ax.plot(xs,ys)
plot_frame=Frame(self.root)
self.root.add(plot_frame)
canvas=FigureCanvasTkAgg(fig,master=plot_frame)
toolbar = NavigationToolbar2TkAgg(canvas, plot_frame)
toolbar.update()
self.canvas=canvas
def main():
root=tk.Tk()
main_paned_window = PanedWindow(root)
main_paned_window.pack(fill=BOTH, expand=1)
tone_curve_paned_window=PanedWindow(main_paned_window)
main_paned_window.add(tone_curve_paned_window)
tone_curve_window=PanedWindow(tone_curve_paned_window,relief=tk.GROOVE,bd=3,orient=tk.VERTICAL)
mlp_tone_curve_window=MatplotlibWindow2(tone_curve_window)
mlp_tone_curve_window.canvas.get_tk_widget().pack(fill=tk.BOTH,expand=True)
#text_panel_left = Text(main_paned_window, height=6, width =15,relief=tk.GROOVE,bd=2)
#main_paned_window.add(text_panel_left)
sub_paned_window = PanedWindow(main_paned_window, orient=tk.VERTICAL)
#plot sin curve
plot_paned_window=PanedWindow(sub_paned_window,relief=tk.GROOVE,bd=3,orient=tk.VERTICAL)
mlp_window=MatplotlibWindow(plot_paned_window)
mlp_window.canvas.get_tk_widget().pack(fill=tk.BOTH,expand=True)
main_paned_window.add(sub_paned_window)
bottom_pane_text = Text(sub_paned_window, height=3, width =3, relief=tk.SUNKEN,bd=2)
sub_paned_window.add(plot_paned_window)
sub_paned_window.add(bottom_pane_text)
button=Button(root,text="Hello")
button.pack()
root.mainloop()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import csv
with open(str(input('Arquivo .csv Airodump-ng: '))) as arquivoCsv:
print('\n Rede Senha')
try:
reader = csv.reader(arquivoCsv)
for linha in reader:
if not linha: # Verifica se a lista está vazia
pass
else:
if linha[0] == 'Station MAC': # Sai do for porque é onde acaba as redes wireless do arquivo .csv
break
else:
dicio = { 'BSSID':linha[0],'ESSID':linha[13] } # Dicionário que contem o nome e MAC da rede wirilless
if dicio['BSSID'] == 'BSSID': # Ignora a primeira linha do arquivo .csv
pass
else:
if 'VIVO-' in dicio['ESSID']: # Apenas mostra as redes VIVO-
senha = dicio['BSSID'][3:-5].replace(':', '')+dicio['ESSID'][6:]
print(dicio['ESSID'], senha)
finally:
print('\n')
arquivoCsv.close()
|
nilq/baby-python
|
python
|
from .node_base import NodeBase
from .exceptions import NodeRegistrationError, NodeNotFoundError
class NodeFactory(object):
def __init__(self) -> None:
super().__init__()
self._nodes = {}
self._names = {}
def registerNode(self, node: NodeBase):
if node is None:
raise ValueError('node param is invalid')
name = node.NODE_NAME
node_path = node.getNodePath()
if name in self._names:
raise NodeRegistrationError(f'Node name "{name}" is already registered')
if self._nodes.get(node_path.lower()):
raise NodeRegistrationError(f'Node "{node_path}" is already registered')
self._nodes[node_path.lower()] = node
self._names[name] = node_path.lower()
def getNodesStructures(self) -> list:
result = []
for identifier, node in self._nodes.items():
result.append(node.getNodeStructure())
return result
def getNodeClass(self, path) -> NodeBase:
if not self.isPathValid(path):
raise ValueError('invalid path')
nodeClass = self._nodes.get(path.lower(), None)
if not nodeClass:
raise NodeNotFoundError(f'Node {path} was not found')
return nodeClass
def isPathValid(self, path: str):
if not path:
return False
return path.find(' ') == -1
|
nilq/baby-python
|
python
|
import asyncio
import hikari
import tanjun
from hikari.interactions.base_interactions import ResponseType
from hikari.messages import ButtonStyle
from hikari_testing.bot.client import Client
component = tanjun.Component()
@component.with_slash_command
@tanjun.as_slash_command("paginate", "Paginate through a list of options!")
async def command_paginate(ctx: tanjun.abc.Context) -> None:
values = ("Page 1", "Page 2", "Page 3", "Page 4", "Page 5", "Page 6")
index = 0
button_menu = (
ctx.rest.build_action_row()
.add_button(ButtonStyle.SECONDARY, "<<")
.set_label("<<")
.add_to_container()
.add_button(ButtonStyle.PRIMARY, "<")
.set_label("<")
.add_to_container()
.add_button(ButtonStyle.PRIMARY, ">")
.set_label(">")
.add_to_container()
.add_button(ButtonStyle.SECONDARY, ">>")
.set_label(">>")
.add_to_container()
)
await ctx.respond(values[0], component=button_menu)
while True:
try:
event = await ctx.client.events.wait_for(hikari.InteractionCreateEvent, timeout=60)
except asyncio.TimeoutError:
await ctx.edit_initial_response("Timed out.", components=[])
else:
if event.interaction.custom_id == "<<":
index = 0
elif event.interaction.custom_id == "<":
index = (index - 1) % len(values)
elif event.interaction.custom_id == ">":
index = (index + 1) % len(values)
elif event.interaction.custom_id == ">>":
index = len(values) - 1
await ctx.edit_initial_response(values[index])
await event.interaction.create_initial_response(
ResponseType.DEFERRED_MESSAGE_UPDATE,
values[index]
)
@tanjun.as_loader
def load_component(client: Client) -> None:
client.add_component(component.copy())
|
nilq/baby-python
|
python
|
# Holly Zhang sp20-516-233 E.Multipass.2
# testing code
p = Provider()
# TestMultipass.test_provider_run_os
r1 = p.run(command="uname -a", executor="os")
print(r1)
#Linux cloudmesh 4.15.0-74-generic #84-Ubuntu SMP Thu Dec 19 08:06:28 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
# TestMultipass.test_provider_run_live
r2 = self.provider.run(command="uname -a", executor="live")
print(r2)
#
|
nilq/baby-python
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Rudra Rugge
from cfgm_common import svc_info
from vnc_api.vnc_api import *
from instance_manager import InstanceManager
class VirtualMachineManager(InstanceManager):
def _create_svc_vm(self, instance_name, image_name, nics,
flavor_name, st_obj, si_obj, avail_zone):
proj_name = si_obj.get_parent_fq_name()[-1]
if flavor_name:
flavor = self._nc.oper('flavors', 'find', proj_name,
name=flavor_name)
else:
flavor = self._nc.oper('flavors', 'find', proj_name, ram=4096)
if not flavor:
return
image = self._nc.oper('images', 'find', proj_name, name=image_name)
if not image:
return
# create port
nics_with_port = []
for nic in nics:
nic_with_port = {}
vmi_obj = self._create_svc_vm_port(nic, instance_name,
st_obj, si_obj)
nic_with_port['port-id'] = vmi_obj.get_uuid()
nics_with_port.append(nic_with_port)
# launch vm
self.logger.log('Launching VM : ' + instance_name)
nova_vm = self._nc.oper('servers', 'create', proj_name,
name=instance_name, image=image,
flavor=flavor, nics=nics_with_port,
availability_zone=avail_zone)
nova_vm.get()
self.logger.log('Created VM : ' + str(nova_vm))
# create vnc VM object and link to SI
try:
proj_obj = self._vnc_lib.project_read(
fq_name=si_obj.get_parent_fq_name())
vm_obj = VirtualMachine(nova_vm.id)
vm_obj.uuid = nova_vm.id
self._vnc_lib.virtual_machine_create(vm_obj)
except RefsExistError:
vm_obj = self._vnc_lib.virtual_machine_read(id=nova_vm.id)
vm_obj.add_service_instance(si_obj)
self._vnc_lib.virtual_machine_update(vm_obj)
self.logger.log("Info: VM %s updated SI %s" %
(vm_obj.get_fq_name_str(), si_obj.get_fq_name_str()))
return nova_vm
def create_service(self, st_obj, si_obj):
si_props = si_obj.get_service_instance_properties()
st_props = st_obj.get_service_template_properties()
if st_props is None:
self.logger.log("Cannot find service template associated to "
"service instance %s" % si_obj.get_fq_name_str())
return
flavor = st_props.get_flavor()
image_name = st_props.get_image_name()
if image_name is None:
self.logger.log("Error: Image name not present in %s" %
(st_obj.name))
return
# populate nic information
nics = self._get_nic_info(si_obj, si_props, st_props)
# get availability zone
avail_zone = None
if st_props.get_availability_zone_enable():
avail_zone = si_props.get_availability_zone()
elif self._args.availability_zone:
avail_zone = self._args.availability_zone
# create and launch vm
vm_back_refs = si_obj.get_virtual_machine_back_refs()
proj_name = si_obj.get_parent_fq_name()[-1]
max_instances = si_props.get_scale_out().get_max_instances()
self.db.service_instance_insert(si_obj.get_fq_name_str(),
{'max-instances': str(max_instances),
'state': 'launching'})
instances = []
for inst_count in range(0, max_instances):
instance_name = self._get_instance_name(si_obj, inst_count)
si_info = self.db.service_instance_get(si_obj.get_fq_name_str())
prefix = self.db.get_vm_db_prefix(inst_count)
if prefix + 'name' not in si_info.keys():
vm = self._create_svc_vm(instance_name, image_name, nics,
flavor, st_obj, si_obj, avail_zone)
if not vm:
continue
vm_uuid = vm.id
state = 'pending'
else:
vm = self._nc.oper('servers', 'find', proj_name,
id=si_info[prefix + 'uuid'])
if not vm:
continue
vm_uuid = si_info[prefix + 'uuid']
state = 'active'
# store vm, instance in db; use for linking when VM is up
vm_db_entry = self._set_vm_db_info(inst_count, instance_name,
vm_uuid, state)
self.db.service_instance_insert(si_obj.get_fq_name_str(),
vm_db_entry)
instances.append({'uuid': vm_uuid})
self.db.service_instance_insert(si_obj.get_fq_name_str(),
{'state': 'active'})
# uve trace
self.logger.uve_svc_instance(si_obj.get_fq_name_str(),
status='CREATE', vms=instances,
st_name=st_obj.get_fq_name_str())
def delete_service(self, si_fq_str, vm_uuid, proj_name=None):
self.db.remove_vm_info(si_fq_str, vm_uuid)
try:
self._vnc_lib.virtual_machine_delete(id=vm_uuid)
except (NoIdError, RefsExistError):
pass
vm = self._nc.oper('servers', 'find', proj_name, id=vm_uuid)
if not vm:
raise KeyError
try:
vm.delete()
except Exception:
pass
def check_service(self, si_obj, proj_name=None):
status = 'ACTIVE'
vm_list = {}
vm_back_refs = si_obj.get_virtual_machine_back_refs()
for vm_back_ref in vm_back_refs or []:
vm = self._nc.oper('servers', 'find', proj_name,
id=vm_back_ref['uuid'])
if vm:
vm_list[vm.name] = vm
else:
try:
self._vnc_lib.virtual_machine_delete(id=vm_back_ref['uuid'])
except (NoIdError, RefsExistError):
pass
# check status of VMs
si_props = si_obj.get_service_instance_properties()
max_instances = si_props.get_scale_out().get_max_instances()
for inst_count in range(0, max_instances):
instance_name = self._get_instance_name(si_obj, inst_count)
if instance_name not in vm_list.keys():
status = 'ERROR'
elif vm_list[instance_name].status == 'ERROR':
try:
self.delete_service(si_obj.get_fq_name_str(),
vm_list[instance_name].id, proj_name)
except KeyError:
pass
status = 'ERROR'
# check change in instance count
if vm_back_refs and (max_instances > len(vm_back_refs)):
status = 'ERROR'
elif vm_back_refs and (max_instances < len(vm_back_refs)):
for vm_back_ref in vm_back_refs:
try:
self.delete_service(si_obj.get_fq_name_str(),
vm_back_ref['uuid'], proj_name)
except KeyError:
pass
status = 'ERROR'
return status
def update_static_routes(self, si_obj):
# get service instance interface list
si_props = si_obj.get_service_instance_properties()
si_if_list = si_props.get_interface_list()
if not si_if_list:
return
st_list = si_obj.get_service_template_refs()
fq_name = st_list[0]['to']
st_obj = self._vnc_lib.service_template_read(fq_name=fq_name)
st_props = st_obj.get_service_template_properties()
st_if_list = st_props.get_interface_type()
for idx in range(0, len(si_if_list)):
si_if = si_if_list[idx]
static_routes = si_if.get_static_routes()
if not static_routes:
static_routes = {'route':[]}
# update static routes
try:
rt_fq_name = self._get_if_route_table_name(
st_if_list[idx].get_service_interface_type(),
si_obj)
rt_obj = self._vnc_lib.interface_route_table_read(
fq_name=rt_fq_name)
rt_obj.set_interface_route_table_routes(static_routes)
self._vnc_lib.interface_route_table_update(rt_obj)
except NoIdError:
pass
def delete_iip(self, vm_uuid):
try:
vm_obj = self._vnc_lib.virtual_machine_read(id=vm_uuid)
except NoIdError:
return
vmi_back_refs = vm_obj.get_virtual_machine_interface_back_refs()
for vmi_back_ref in vmi_back_refs or []:
try:
vmi_obj = self._vnc_lib.virtual_machine_interface_read(
id=vmi_back_ref['uuid'])
except NoIdError:
continue
iip_back_refs = vmi_obj.get_instance_ip_back_refs()
for iip_back_ref in iip_back_refs or []:
try:
self._vnc_lib.instance_ip_delete(id=iip_back_ref['uuid'])
except (NoIdError, RefsExistError):
continue
|
nilq/baby-python
|
python
|
# Date : 03/31/2020
# Author : mcalyer
# Module : scanse_control.py
# Description : Code to explore Scanse LIDAR capabilites
# Python : 2.7
# Version : 0.2
# References :
#
# 1. Scanse User Manual V1.0 , 04/20/2017
# 2. Scanse sweep-ardunio source
# 3. sweep-sdk-1.30_10_27_2017
#
# Hardware : PC , Scanse Hardware version 1.0 , FW version 01 , 2016 KickStarter
# Not available today
#
# Notes:
# 0. Really ?! , Unit appears to wooble during spinning
# 1.
# 2. Power : 5V at 450 - 500 ma
# 3. Motor Speed : if setting is '0''0' motor off but when power cycled resets to 5HZ
# 4. Embedded use : power control scanse : control motor , power usage , fail safe device reset
# 5. There is python using driver example for Linux , see sweepy in SDK
# 6. Need to look at driver source
# 7. Scanse Status LED :
# Blinking Green = Start up OK , no ouput
# Solid Blue = Normal operation
# Solid Red = Internal communication error
# 8. Example Scan Seetings :
# Speed : 5HZ
# Sample rate : 500 - 600 HZ
# Time required : .2 sec (approx)
# Number of samples : 60 (approx) for 1 rev (360) ? , see angle problem
# Angle Delta : Generally 3.XX degrees (approx)
# Angle problem : See in 0 - 120 degreee range , large (10 degres) angle deltas
# Revolution : 1 rev , 360 degrees
# Zero Angle : One near zero reading in samples
# 9. Angular resolution : 1.4 - 7.2 degrees based on rotational speed (Other factors ?)
#
# Acknownledgements : None
#
# Releases:
# 03/28/2020 : First
# 03/31/2020 : Version 0.2
# 1. Fixed DX stop issue: Fixed scanse DX command return number of bytes ,
# added scanse flush routine , class Scanse_Control : scanse_flush()
# 2. Added get scan based on number of samples requested , also does not rely on large serial input buffer ,
# class Scanse_Control : rx_scan_samples().
# Times observed for 60 samples .150 - .23 seconds @ motor speed = 5HZ , LIDAR sample rate = 500 - 600 HZ
# 3. Added scan data to PGM file , helps visualize point cloud
#
###########################################################################
################################### Imports ###############################
import time
import serial
import sys
from scanse_pgm import *
################################## Scanse Serial Port #########################################
class Scanse_Control:
def __init__(self):
self.uart = None
self.port = None
def connect(self, port = None):
if self.uart:
return 0
if port is None : port = self.port
# Open serial port connection
# port is a string based on OS:
# Examples: Windows 'COM12' , Linux: '/dev/ttyACM0'
try:
self.uart = serial.Serial(port, baudrate=115200, timeout=1)
self.port = port
return 0 , None
except:
self.uart = None
self.port = None
return 1 , 'Serial port connection error !'
def disconnect(self):
if self.uart:
self.uart.close()
self.uart = None
def tx(self,cmd_list):
try:
#self.uart.write(''.join(chr(e) for e in cmd_list))
self.uart.write(cmd_list)
return 0 , None
except serial.SerialException:
return 1 ,'Command: Serial Port Failed'
def rx(self, n, delay = 0):
if delay != 0 : time.sleep(delay)
try:
nb = self.uart.inWaiting()
#print(nb)
if nb == 0: return 1 , 'RxBytes: Zero serial bytes'
if n == '!': n = nb
if n != nb:
self.uart.flush()
return 1 , 'RxBytes: Expected : ' + str(n) + ' Received : ' + str(nb)
data = self.uart.read(n)
return 0 , data
except serial.SerialException:
return 1, 'RxBytes: Serial Port Failed'
def rx_scan(self):
try:
nb = self.uart.inWaiting()
data = self.uart.read(nb)
except:
return None
return bytes(data)
def rx_scan_samples(self, nb):
data = bytes[0]
b = []
t = 0
try:
while(nb > 0):
t = t + 1
time.sleep(.001)
n = self.uart.inWaiting()
if n == 0 :
continue
b = self.uart.read(n)
data = data + b
nb = nb - n
except:
return 1 , t , 'rx_scan_sample error'
return 0 , t, data
def scanse_flush(self):
nb = self.uart.inWaiting()
t = 1000
while(nb != 0):
d = self.uart.read(nb)
time.sleep(.001)
nb = self.uart.inWaiting()
t = t - 1
if t == 0:
break;
return t
def flush(self):
self.uart.flush()
scanse_ctrl = Scanse_Control()
################################## Scanse Interface #########################################
class Scanse_IF():
def __init__ (self, IF , cmd , rx_bytes , decode = None):
self.IF = IF
self.cmd = cmd #['I', 'V'] + ['\n']
self.rx_nb = rx_bytes
self.data = None
self._decode = decode
self.delay = .050
def txrx(self, arg = None):
if arg is not None : self.cmd = self.cmd + arg
self.IF.tx(self.cmd + ['\n'])
if 0 == self.rx_nb : return 0, None
time.sleep(self.delay)
result, self.data = self.IF.rx(self.rx_nb)
if result : return 1, self.data
if self.data[0] != self.cmd[0] or self.data[1] != self.cmd[1] : return 1, None
return 0, self.data
def decode(self):
if self._decode is None : return self.data
return self._decode(self.data)
# IV Decode Model , Protocol , FWV , HWV , Serial Number
iv_decode = lambda x : (x[2:7] , x[7:9][::-1] , x[9:11][::-1] , x[11] , x[12:20])
scanse_iv = Scanse_IF(scanse_ctrl,['I' , 'V'] , 21 , iv_decode )
# Set Motor_Speed
# speed 0 - 10 hz , ['0','0'] - ['1','0']
scanse_ms = Scanse_IF(scanse_ctrl,['M' , 'S'] , 9)
# Motor Info
mi_decode = lambda x : (x[2:4])
scanse_mi = Scanse_IF(scanse_ctrl,['M' , 'I'] , 5 , mi_decode)
# Motor Ready
mz_decode = lambda x : (x[2:4])
scanse_mz = Scanse_IF(scanse_ctrl,['M' , 'Z'] , 5 , mz_decode)
# Device Information
di_decode = lambda x : (x[2:8] , x[8] , x[9] , x[10] , x[11:13] , x[13:17])
scanse_di = Scanse_IF(scanse_ctrl,['I' , 'D'] , 18 , di_decode)
# LIDAR Get Sample Rate
lidar_decode = lambda x : (x[2:4])
scanse_lidar_get_sr = Scanse_IF(scanse_ctrl,['L' , 'I'] , 5 , lidar_decode)
# LIDAR , Set Sample Rate
# ['0','1'] = 500 - 600 HZ
# ['0','2'] = 750 - 800 HZ
# ['0','3'] = 1000 - 1075 HZ
lidar_sr_decode = lambda x : (x[5:7])
scanse_lidar_set_sr = Scanse_IF(scanse_ctrl,['L' , 'R'] , 9 , lidar_sr_decode)
# Reset Device
scanse_reset = Scanse_IF(scanse_ctrl,['R' , 'R'] , 0)
# Stop Data Aquisition
scanse_stop_data = Scanse_IF(scanse_ctrl,['D' , 'X'] , 6)
# Start Data Aquisition
scanse_start_data = Scanse_IF(scanse_ctrl,['D' , 'S'] , 7)
############################## Data Acquisition #############################################
def measurement(s):
d = (ord(s[4]) << 8) + ord(s[3])
a_int = (ord(s[2]) << 8) + ord(s[1])
return [d, a_int/16.0]
def get_scan(delay):
scanse_ctrl.flush()
# Send DS Command , start acquisition
scanse_ctrl.tx(['D' , 'S'] + ['\n'])
# Wait for data
time.sleep(delay)
# Get data
scan = scanse_ctrl.rx_scan()
if scan is None or len(scan) < 2 : return 1,0,0, 'No Scan Data'
# Check header bytes
if scan[0] != 'D' or scan[1] != 'S' : return 1, 0, 0, 'No Scan DS header'
# Create List of samples
scan_data = []
l = len(scan)
ns = ((l - 6)/7) - 1
s = scan[6:(l - 6)]
x = 0
z = None
n = ns
for i in range(0,n):
x = i * 7
q = s[x:x+7]
w = ord(q[0])
if w & 0x01 : z = i
if w & 0xFE : return 1, i, w, 'Scan Packet Error'
da = measurement(q)
# Filter out measurements with d == 1 , error
if da[0] == 1:
ns = ns - 1
continue
scan_data.append(da)
# Send DX Command , stop acquisition
scanse_stop_data.txrx()
# Fluah scanse uart
scanse_ctrl.scanse_flush()
return 0, ns, z, scan_data
############################### Test ########################################################
def main(sys_argv):
if len(sys_argv) < 2: print("More Args Please !") ; exit(0)
port = sys_argv[1]
# Scanse Connect
result , message = scanse_ctrl.connect(port)
if result: print message ; exit(0)
print "\n"
# Scanse Flush
scanse_ctrl.scanse_flush()
# Get Version Information
scanse_ctrl.flush()
result , info = scanse_iv.txrx()
print(info if result else 'Version :' + str(scanse_iv.decode()))
#Get Device Information
scanse_ctrl.flush()
result , info = scanse_di.txrx()
print(info if result else 'Device Info : ' + str(scanse_di.decode()))
# Set LIDAR sample rate
# Lower sample rate , more light , range measurements more accurate
result , status = scanse_lidar_set_sr.txrx(['0','1'])
print(status if result else 'LIDAR Set Sample Rates Status : ' + str(scanse_lidar_set_sr.decode()))
# Get Motor Speed
result, motor_speed = scanse_mi.txrx()
ms = scanse_mi.decode()
print(motor_speed if result else 'Motor Speed : ' + str(ms))
#Get LIDAR Info
result , info = scanse_lidar_get_sr.txrx()
print(info if result else 'LIDAR Sample Rate : ' + str(scanse_lidar_get_sr.decode()))
# Get 10 Scans
data = []
for i in range(0,10):
r, n, z , data = get_scan(.225)
if r : print(data) ; break
if data != []:
print('Samples : ' + str(n) + ' Zero Index : ' + str(z))
for i in range(0,n):
print(i,data[i])
print('\n')
# Scan sorted by distance
ds = sorted(data,key = lambda data: data[0])
# Scan sorted by angle
ans = sorted(data,key = lambda data: data[1])
print('Distance Min :' + str(ds[0]))
print('Angle Min :' + str(ans[0]))
print('\n')
# PGM File
try:
scan_2_pgm(ds, int(ds[::-1][0][0]))
except:
pass
# Exit
scanse_ctrl.disconnect()
exit(0)
if __name__ == "__main__":
# one argument COM port , Example: Windows 'COM12' , Linux: '/dev/ttyACM0'
main(sys.argv)
|
nilq/baby-python
|
python
|
from re import findall,IGNORECASE
for _ in range(int(input())):
s=input()
f=sorted(findall(r'[bcdfghjklmnpqrstvwxyz]+',s,IGNORECASE),key=lambda x: len(x),reverse=True)
print(f'{s} nao eh facil') if len(f[0])>=3 else print(f'{s} eh facil')
|
nilq/baby-python
|
python
|
__author__ = 'Wenju Sun'
import
"""
This script tries to download given file via http and given the final status summary
"""
MAX_VALUE=10
MIN_VALUE=0
WARN_VALUE=0
CRITICAL_VALUE=0
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
STATUS_TEXT='OK'
STATUS_CODE=STATE_OK
murl="http://dl-3m.svc.mcitech.cn/items/60/185/3F4CBC95EF6DA685D498CC2090DDE6FB.zip"
def download(url):
urllib2
|
nilq/baby-python
|
python
|
from pathlib import Path
import pytest
from seq2rel.training.callbacks.concatenation_augmentation import ConcatenationAugmentationCallback
class TestConcatenationAugmentationCallback:
def test_aug_frac_value_error(self) -> None:
with pytest.raises(ValueError):
_ = ConcatenationAugmentationCallback(
serialization_dir="", train_data_path="", aug_frac=1.1
)
with pytest.raises(ValueError):
_ = ConcatenationAugmentationCallback(
serialization_dir="", train_data_path="", aug_frac=-0.1
)
def test_on_start(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# Ensure that on object instantiation, there are two training examples.
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 2
# Ensure that on training start, there are two plus one training examples.
concatenation_augmentation.on_start(trainer="")
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 3
def test_on_epoch(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# Ensure that on object instantiation, there are two training examples.
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 2
# Ensure that on epoch end, there are two plus one training examples.
concatenation_augmentation.on_epoch(trainer="")
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 3
def test_on_end(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# This is the train data BEFORE any augmentation.
expected = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
# Purposefully modify the training data on disk, and check that `on_end` restores it
Path(concatenation_augmentation._train_data_path).write_text(expected[0].strip())
concatenation_augmentation.on_end(trainer="")
actual = Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
assert actual == expected
def test_format_instance(
self, concatenation_augmentation: ConcatenationAugmentationCallback
) -> None:
first_instance = "I am the first instance"
second_instance = "I am the second instance"
# Test with no sep_token provided
sep_token = " "
expected = first_instance + sep_token + second_instance
actual = concatenation_augmentation._format_instance(first_instance, second_instance)
assert actual == expected
# Test with sep_token provided
concatenation_augmentation._sep_token = "[SEP]"
expected = first_instance + f" {concatenation_augmentation._sep_token} " + second_instance
actual = concatenation_augmentation._format_instance(first_instance, second_instance)
assert actual == expected
def test_augment(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# Load the training data and create a concatenated example.
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
first_source, first_target = train_data[0].split("\t")
second_source, second_target = train_data[1].split("\t")
concatenated_one = f"{first_source} {second_source}\t{first_target} {second_target}"
concatenated_two = f"{second_source} {first_source}\t{second_target} {first_target}"
# This works because there is only two possible augmentated examples given
# `concatenation_augmentation._train_data` and `concatenation_augmentation._aug_frac`.
expected_one = train_data + [concatenated_one]
expected_two = train_data + [concatenated_two]
actual = concatenation_augmentation._augment()
assert actual == expected_one or actual == expected_two
|
nilq/baby-python
|
python
|
from fastai import *
from fastai.vision import *
path = Path('../data/')
tfms = get_transforms(flip_vert=True)
np.random.seed(352)
data = ImageDataBunch.from_folder(path, valid_pct=0.2, ds_tfms=tfms, size=224).normalize(imagenet_stats)
data.show_batch(3, figsize=(15, 11))
# create a learner based on a pretrained densenet 121 model
learn = cnn_learner(data, models.densenet121, metrics=error_rate)
# use the learning rate finder to find the optimal learning rate
learn.lr_find()
learn.recorder.plot()
lr = 1e-2 # learning rate choosen based on the result of the learning rate finder
# train for 5 epochs
learn.fit_one_cycle(5, slice(lr))
# save the model
learn.save('stage-1-dn121')
# unfreeze and finetune
learn.load('stage-1-dn121');
learn.unfreeze()
learn.lr_find()
# use the learning rate finder again
learn.recorder.plot()
learn.fit_one_cycle(10, slice(1e-4, lr/10))
learn.save('stage-2-dn121')
# export as pickle file for deployment
learn.export('dn121.pkl')
# model interpretation
interp = ClassificationInterpretation.from_learner(learn)
# plot images where the model did not perform well
interp.plot_top_losses(4)
# plot confusion matrix
interp.plot_confusion_matrix(dpi=130)
|
nilq/baby-python
|
python
|
from click import ClickException, echo
class ProfileBuilderException(ClickException):
"""Base exceptions for all Profile Builder Exceptions"""
class Abort(ProfileBuilderException):
"""Abort the build"""
def show(self, **kwargs):
echo(self.format_message())
class ConfigurationError(ProfileBuilderException):
"""Error in configuration"""
class BuildError(ProfileBuilderException):
"""Error during the build process"""
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
"""
目标:能够使用多线程实现同时接收多个客户端的多条信息
1.TCP服务器端
(1) 实现指定端口监听
(2) 实现服务器端地址重用,避免"Address in use"错误。
(3) 能够支持多个客户端连接.
(4) 能够支持支持不同的客户端同时收发消息(开启子线程)
(5) 服务器端主动关闭服务器,子线程随之结束.
"""
# 1. 该程序可以支持多客户端连接.
# 2. 该程序可以支持多客户端同时发送消息.
# 1. 导入模块
import socket
import threading
def recv_msg(new_client_socket,ip_port):
# 循环接收tcp 客户端的消息.
while True:
# 7. 接收客户端发送的信息。
recv_data = new_client_socket.recv(1024)
if recv_data:
# 8. 解码数据并且进行输出.
recv_text = recv_data.decode()
print("收到来自{i}的信息:{m}".format(i = str(ip_port),m = recv_text))
else:
break
# 9. 关闭和当前客户端的连接.
new_client_socket.close()
# 2. 创建套接字
tcp_serversocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# 3. 设置地址可以重用
tcp_serversocket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,True)
# 4. 绑定端口。
tcp_serversocket.bind(("",8888))
# 5. 设置监听,套接字由主动设置为被动.
tcp_serversocket.listen(128)
while True:
# 6. 接受客户端连接.
new_client_socket,ip_port = tcp_serversocket.accept()
print("新客户端连接:",ip_port)
# 创建线程
thre_recvmsg = threading.Thread(target=recv_msg,args=(new_client_socket,ip_port))
# 设置线程守护
thre_recvmsg.setDaemon(True)
# 启动线程
thre_recvmsg.start()
tcp_serversocket.close()
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
import datetime
import itertools
from django.test import TestCase
from django.db import IntegrityError
from django.db.models import Prefetch
from modelcluster.models import get_all_child_relations
from modelcluster.queryset import FakeQuerySet
from tests.models import Band, BandMember, Place, Restaurant, SeafoodRestaurant, Review, Album, \
Article, Author, Category, Person, Room, House, Log, Dish, MenuItem, Wine
class ClusterTest(TestCase):
def test_can_create_cluster(self):
beatles = Band(name='The Beatles')
self.assertEqual(0, beatles.members.count())
beatles.members = [
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
]
# we should be able to query this relation using (some) queryset methods
self.assertEqual(2, beatles.members.count())
self.assertEqual('John Lennon', beatles.members.all()[0].name)
self.assertEqual('Paul McCartney', beatles.members.filter(name='Paul McCartney')[0].name)
self.assertEqual('Paul McCartney', beatles.members.filter(name__exact='Paul McCartney')[0].name)
self.assertEqual('Paul McCartney', beatles.members.filter(name__iexact='paul mccartNEY')[0].name)
self.assertEqual(0, beatles.members.filter(name__lt='B').count())
self.assertEqual(1, beatles.members.filter(name__lt='M').count())
self.assertEqual('John Lennon', beatles.members.filter(name__lt='M')[0].name)
self.assertEqual(1, beatles.members.filter(name__lt='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.filter(name__lt='Paul McCartney')[0].name)
self.assertEqual(2, beatles.members.filter(name__lt='Z').count())
self.assertEqual(0, beatles.members.filter(name__lte='B').count())
self.assertEqual(1, beatles.members.filter(name__lte='M').count())
self.assertEqual('John Lennon', beatles.members.filter(name__lte='M')[0].name)
self.assertEqual(2, beatles.members.filter(name__lte='Paul McCartney').count())
self.assertEqual(2, beatles.members.filter(name__lte='Z').count())
self.assertEqual(2, beatles.members.filter(name__gt='B').count())
self.assertEqual(1, beatles.members.filter(name__gt='M').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__gt='M')[0].name)
self.assertEqual(0, beatles.members.filter(name__gt='Paul McCartney').count())
self.assertEqual(2, beatles.members.filter(name__gte='B').count())
self.assertEqual(1, beatles.members.filter(name__gte='M').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__gte='M')[0].name)
self.assertEqual(1, beatles.members.filter(name__gte='Paul McCartney').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__gte='Paul McCartney')[0].name)
self.assertEqual(0, beatles.members.filter(name__gte='Z').count())
self.assertEqual(1, beatles.members.filter(name__contains='Cart').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__contains='Cart')[0].name)
self.assertEqual(1, beatles.members.filter(name__icontains='carT').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__icontains='carT')[0].name)
self.assertEqual(1, beatles.members.filter(name__in=['Paul McCartney', 'Linda McCartney']).count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__in=['Paul McCartney', 'Linda McCartney'])[0].name)
self.assertEqual(1, beatles.members.filter(name__startswith='Paul').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__startswith='Paul')[0].name)
self.assertEqual(1, beatles.members.filter(name__istartswith='pauL').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__istartswith='pauL')[0].name)
self.assertEqual(1, beatles.members.filter(name__endswith='ney').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__endswith='ney')[0].name)
self.assertEqual(1, beatles.members.filter(name__iendswith='Ney').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__iendswith='Ney')[0].name)
self.assertEqual('Paul McCartney', beatles.members.get(name='Paul McCartney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__exact='Paul McCartney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__iexact='paul mccartNEY').name)
self.assertEqual('John Lennon', beatles.members.get(name__lt='Paul McCartney').name)
self.assertEqual('John Lennon', beatles.members.get(name__lte='M').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__gt='M').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__gte='Paul McCartney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__contains='Cart').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__icontains='carT').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__in=['Paul McCartney', 'Linda McCartney']).name)
self.assertEqual('Paul McCartney', beatles.members.get(name__startswith='Paul').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__istartswith='pauL').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__endswith='ney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__iendswith='Ney').name)
self.assertEqual('John Lennon', beatles.members.get(name__regex=r'n{2}').name)
self.assertEqual('John Lennon', beatles.members.get(name__iregex=r'N{2}').name)
self.assertRaises(BandMember.DoesNotExist, lambda: beatles.members.get(name='Reginald Dwight'))
self.assertRaises(BandMember.MultipleObjectsReturned, lambda: beatles.members.get())
self.assertEqual([('Paul McCartney',)], beatles.members.filter(name='Paul McCartney').values_list('name'))
self.assertEqual(['Paul McCartney'], beatles.members.filter(name='Paul McCartney').values_list('name', flat=True))
# quick-and-dirty check that we can invoke values_list with empty args list
beatles.members.filter(name='Paul McCartney').values_list()
self.assertTrue(beatles.members.filter(name='Paul McCartney').exists())
self.assertFalse(beatles.members.filter(name='Reginald Dwight').exists())
self.assertEqual('John Lennon', beatles.members.first().name)
self.assertEqual('Paul McCartney', beatles.members.last().name)
self.assertTrue('John Lennon', beatles.members.order_by('name').first())
self.assertTrue('Paul McCartney', beatles.members.order_by('-name').first())
# these should not exist in the database yet
self.assertFalse(Band.objects.filter(name='The Beatles').exists())
self.assertFalse(BandMember.objects.filter(name='John Lennon').exists())
beatles.save()
# this should create database entries
self.assertTrue(Band.objects.filter(name='The Beatles').exists())
self.assertTrue(BandMember.objects.filter(name='John Lennon').exists())
john_lennon = BandMember.objects.get(name='John Lennon')
beatles.members = [john_lennon]
# reassigning should take effect on the in-memory record
self.assertEqual(1, beatles.members.count())
# but not the database
self.assertEqual(2, Band.objects.get(name='The Beatles').members.count())
beatles.save()
# now updated in the database
self.assertEqual(1, Band.objects.get(name='The Beatles').members.count())
self.assertEqual(1, BandMember.objects.filter(name='John Lennon').count())
# removed member should be deleted from the db entirely
self.assertEqual(0, BandMember.objects.filter(name='Paul McCartney').count())
# queries on beatles.members should now revert to SQL
self.assertTrue(beatles.members.extra(where=["tests_bandmember.name='John Lennon'"]).exists())
def test_related_manager_assignment_ops(self):
beatles = Band(name='The Beatles')
john = BandMember(name='John Lennon')
paul = BandMember(name='Paul McCartney')
beatles.members.add(john)
self.assertEqual(1, beatles.members.count())
beatles.members.add(paul)
self.assertEqual(2, beatles.members.count())
# ensure that duplicates are filtered
beatles.members.add(paul)
self.assertEqual(2, beatles.members.count())
beatles.members.remove(john)
self.assertEqual(1, beatles.members.count())
self.assertEqual(paul, beatles.members.all()[0])
george = beatles.members.create(name='George Harrison')
self.assertEqual(2, beatles.members.count())
self.assertEqual('George Harrison', george.name)
beatles.members.set([john])
self.assertEqual(1, beatles.members.count())
self.assertEqual(john, beatles.members.all()[0])
def test_can_pass_child_relations_as_constructor_kwargs(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
self.assertEqual(2, beatles.members.count())
self.assertEqual(beatles, beatles.members.all()[0].band)
def test_can_access_child_relations_of_superclass(self):
fat_duck = Restaurant(name='The Fat Duck', serves_hot_dogs=False, reviews=[
Review(author='Michael Winner', body='Rubbish.')
])
self.assertEqual(1, fat_duck.reviews.count())
self.assertEqual(fat_duck.reviews.first().author, 'Michael Winner')
self.assertEqual(fat_duck, fat_duck.reviews.all()[0].place)
fat_duck.save()
# ensure relations have been saved to the database
fat_duck = Restaurant.objects.get(id=fat_duck.id)
self.assertEqual(1, fat_duck.reviews.count())
self.assertEqual(fat_duck.reviews.first().author, 'Michael Winner')
def test_can_only_commit_on_saved_parent(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
self.assertRaises(IntegrityError, lambda: beatles.members.commit())
beatles.save()
beatles.members.commit()
def test_integrity_error_with_none_pk(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
beatles.save()
beatles.pk = None
self.assertRaises(IntegrityError, lambda: beatles.members.commit())
# this should work fine, as Django will end up cloning this entity
beatles.save()
self.assertEqual(Band.objects.get(pk=beatles.pk).name, 'The Beatles')
def test_model_with_zero_pk(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
beatles.save()
beatles.pk = 0
beatles.members.commit()
beatles.save()
self.assertEqual(Band.objects.get(pk=0).name, 'The Beatles')
def test_save_with_update_fields(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
], albums=[
Album(name='Please Please Me', sort_order=1),
Album(name='With The Beatles', sort_order=2),
Album(name='Abbey Road', sort_order=3),
])
beatles.save()
# modify both relations, but only commit the change to members
beatles.members.clear()
beatles.albums.clear()
beatles.name = 'The Rutles'
beatles.save(update_fields=['name', 'members'])
updated_beatles = Band.objects.get(pk=beatles.pk)
self.assertEqual(updated_beatles.name, 'The Rutles')
self.assertEqual(updated_beatles.members.count(), 0)
self.assertEqual(updated_beatles.albums.count(), 3)
def test_queryset_filtering(self):
beatles = Band(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
self.assertEqual('Paul McCartney', beatles.members.get(id=2).name)
self.assertEqual('Paul McCartney', beatles.members.get(id='2').name)
self.assertEqual(1, beatles.members.filter(name='Paul McCartney').count())
# also need to be able to filter on foreign fields that return a model instance
# rather than a simple python value
self.assertEqual(2, beatles.members.filter(band=beatles).count())
# and ensure that the comparison is not treating all unsaved instances as identical
rutles = Band(name='The Rutles')
self.assertEqual(0, beatles.members.filter(band=rutles).count())
# and the comparison must be on the model instance's ID where available,
# not by reference
beatles.save()
beatles.members.add(BandMember(id=3, name='George Harrison')) # modify the relation so that we're not to a plain database-backed queryset
also_beatles = Band.objects.get(id=beatles.id)
self.assertEqual(3, beatles.members.filter(band=also_beatles).count())
def test_queryset_filtering_on_models_with_inheritance(self):
strawberry_fields = Restaurant.objects.create(name='Strawberry Fields')
the_yellow_submarine = SeafoodRestaurant.objects.create(name='The Yellow Submarine')
john = BandMember(name='John Lennon', favourite_restaurant=strawberry_fields)
ringo = BandMember(name='Ringo Starr', favourite_restaurant=Restaurant.objects.get(name='The Yellow Submarine'))
beatles = Band(name='The Beatles', members=[john, ringo])
# queried instance is less specific
self.assertEqual(
list(beatles.members.filter(favourite_restaurant=Place.objects.get(name='Strawberry Fields'))),
[john]
)
# queried instance is more specific
self.assertEqual(
list(beatles.members.filter(favourite_restaurant=the_yellow_submarine)),
[ringo]
)
def test_queryset_exclude_filtering(self):
beatles = Band(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
self.assertEqual(1, beatles.members.exclude(name='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__exact='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__exact='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__iexact='paul mccartNEY').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__iexact='paul mccartNEY').first().name)
self.assertEqual(1, beatles.members.exclude(name__lt='M').count())
self.assertEqual('Paul McCartney', beatles.members.exclude(name__lt='M').first().name)
self.assertEqual(1, beatles.members.exclude(name__lt='Paul McCartney').count())
self.assertEqual('Paul McCartney', beatles.members.exclude(name__lt='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__lte='John Lennon').count())
self.assertEqual('Paul McCartney', beatles.members.exclude(name__lte='John Lennon').first().name)
self.assertEqual(1, beatles.members.exclude(name__gt='M').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__gt='M').first().name)
self.assertEqual(1, beatles.members.exclude(name__gte='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__gte='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__contains='Cart').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__contains='Cart').first().name)
self.assertEqual(1, beatles.members.exclude(name__icontains='carT').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__icontains='carT').first().name)
self.assertEqual(1, beatles.members.exclude(name__in=['Paul McCartney', 'Linda McCartney']).count())
self.assertEqual('John Lennon', beatles.members.exclude(name__in=['Paul McCartney', 'Linda McCartney'])[0].name)
self.assertEqual(1, beatles.members.exclude(name__startswith='Paul').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__startswith='Paul').first().name)
self.assertEqual(1, beatles.members.exclude(name__istartswith='pauL').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__istartswith='pauL').first().name)
self.assertEqual(1, beatles.members.exclude(name__endswith='ney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__endswith='ney').first().name)
self.assertEqual(1, beatles.members.exclude(name__iendswith='Ney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__iendswith='Ney').first().name)
def test_queryset_filter_with_nulls(self):
tmbg = Band(name="They Might Be Giants", albums=[
Album(name="Flood", release_date=datetime.date(1990, 1, 1)),
Album(name="John Henry", release_date=datetime.date(1994, 7, 21)),
Album(name="Factory Showroom", release_date=datetime.date(1996, 3, 30)),
Album(name="", release_date=None),
Album(name=None, release_date=None),
])
self.assertEqual(tmbg.albums.get(name="Flood").name, "Flood")
self.assertEqual(tmbg.albums.get(name="").name, "")
self.assertEqual(tmbg.albums.get(name=None).name, None)
self.assertEqual(tmbg.albums.get(name__exact="Flood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__exact="").name, "")
self.assertEqual(tmbg.albums.get(name__exact=None).name, None)
self.assertEqual(tmbg.albums.get(name__iexact="flood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__iexact="").name, "")
self.assertEqual(tmbg.albums.get(name__iexact=None).name, None)
self.assertEqual(tmbg.albums.get(name__contains="loo").name, "Flood")
self.assertEqual(tmbg.albums.get(name__icontains="LOO").name, "Flood")
self.assertEqual(tmbg.albums.get(name__startswith="Flo").name, "Flood")
self.assertEqual(tmbg.albums.get(name__istartswith="flO").name, "Flood")
self.assertEqual(tmbg.albums.get(name__endswith="ood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__iendswith="Ood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__lt="A").name, "")
self.assertEqual(tmbg.albums.get(name__lte="A").name, "")
self.assertEqual(tmbg.albums.get(name__gt="J").name, "John Henry")
self.assertEqual(tmbg.albums.get(name__gte="J").name, "John Henry")
self.assertEqual(tmbg.albums.get(name__in=["Flood", "Mink Car"]).name, "Flood")
self.assertEqual(tmbg.albums.get(name__in=["", "Mink Car"]).name, "")
self.assertEqual(tmbg.albums.get(name__in=[None, "Mink Car"]).name, None)
self.assertEqual(tmbg.albums.filter(name__isnull=True).count(), 1)
self.assertEqual(tmbg.albums.filter(name__isnull=False).count(), 4)
self.assertEqual(tmbg.albums.get(name__regex=r'l..d').name, "Flood")
self.assertEqual(tmbg.albums.get(name__iregex=r'f..o').name, "Flood")
def test_date_filters(self):
tmbg = Band(name="They Might Be Giants", albums=[
Album(name="Flood", release_date=datetime.date(1990, 1, 1)),
Album(name="John Henry", release_date=datetime.date(1994, 7, 21)),
Album(name="Factory Showroom", release_date=datetime.date(1996, 3, 30)),
Album(name="The Complete Dial-A-Song", release_date=None),
])
logs = FakeQuerySet(Log, [
Log(time=datetime.datetime(1979, 7, 1, 1, 1, 1), data="nobody died"),
Log(time=datetime.datetime(1980, 2, 2, 2, 2, 2), data="one person died"),
Log(time=None, data="nothing happened")
])
self.assertEqual(
tmbg.albums.get(release_date__range=(datetime.date(1994, 1, 1), datetime.date(1994, 12, 31))).name,
"John Henry"
)
self.assertEqual(
logs.get(time__range=(datetime.datetime(1980, 1, 1, 1, 1, 1), datetime.datetime(1980, 12, 31, 23, 59, 59))).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__date=datetime.date(1994, 7, 21)).name,
"John Henry"
)
self.assertEqual(
logs.get(time__date=datetime.date(1980, 2, 2)).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__year='1994').name,
"John Henry"
)
self.assertEqual(
logs.get(time__year=1980).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__month=7).name,
"John Henry"
)
self.assertEqual(
logs.get(time__month='2').data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__day='21').name,
"John Henry"
)
self.assertEqual(
logs.get(time__day=2).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__week=29).name,
"John Henry"
)
self.assertEqual(
logs.get(time__week='5').data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__week_day=5).name,
"John Henry"
)
self.assertEqual(
logs.get(time__week_day=7).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__quarter=3).name,
"John Henry"
)
self.assertEqual(
logs.get(time__quarter=1).data,
"one person died"
)
self.assertEqual(
logs.get(time__time=datetime.time(2, 2, 2)).data,
"one person died"
)
self.assertEqual(
logs.get(time__hour=2).data,
"one person died"
)
self.assertEqual(
logs.get(time__minute='2').data,
"one person died"
)
self.assertEqual(
logs.get(time__second=2).data,
"one person died"
)
def test_prefetch_related(self):
Band.objects.create(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
with self.assertNumQueries(2):
lists = [list(band.members.all()) for band in Band.objects.prefetch_related('members')]
normal_lists = [list(band.members.all()) for band in Band.objects.all()]
self.assertEqual(lists, normal_lists)
def test_prefetch_related_with_custom_queryset(self):
from django.db.models import Prefetch
Band.objects.create(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
with self.assertNumQueries(2):
lists = [
list(band.members.all())
for band in Band.objects.prefetch_related(
Prefetch('members', queryset=BandMember.objects.filter(name__startswith='Paul'))
)
]
normal_lists = [list(band.members.filter(name__startswith='Paul')) for band in Band.objects.all()]
self.assertEqual(lists, normal_lists)
def test_order_by_with_multiple_fields(self):
beatles = Band(name='The Beatles', albums=[
Album(name='Please Please Me', sort_order=2),
Album(name='With The Beatles', sort_order=1),
Album(name='Abbey Road', sort_order=2),
])
albums = [album.name for album in beatles.albums.order_by('sort_order', 'name')]
self.assertEqual(['With The Beatles', 'Abbey Road', 'Please Please Me'], albums)
albums = [album.name for album in beatles.albums.order_by('sort_order', '-name')]
self.assertEqual(['With The Beatles', 'Please Please Me', 'Abbey Road'], albums)
def test_meta_ordering(self):
beatles = Band(name='The Beatles', albums=[
Album(name='Please Please Me', sort_order=2),
Album(name='With The Beatles', sort_order=1),
Album(name='Abbey Road', sort_order=3),
])
# in the absence of an explicit order_by clause, it should use the ordering as defined
# in Album.Meta, which is 'sort_order'
albums = [album.name for album in beatles.albums.all()]
self.assertEqual(['With The Beatles', 'Please Please Me', 'Abbey Road'], albums)
def test_parental_key_checks_clusterable_model(self):
from django.core import checks
from django.db import models
from modelcluster.fields import ParentalKey
class Instrument(models.Model):
# Oops, BandMember is not a Clusterable model
member = ParentalKey(BandMember, on_delete=models.CASCADE)
class Meta:
# Prevent Django from thinking this is in the database
# This shouldn't affect the test
abstract = True
# Check for error
errors = Instrument.check()
self.assertEqual(1, len(errors))
# Check the error itself
error = errors[0]
self.assertIsInstance(error, checks.Error)
self.assertEqual(error.id, 'modelcluster.E001')
self.assertEqual(error.obj, Instrument.member.field)
self.assertEqual(error.msg, 'ParentalKey must point to a subclass of ClusterableModel.')
self.assertEqual(error.hint, 'Change tests.BandMember into a ClusterableModel or use a ForeignKey instead.')
def test_parental_key_checks_related_name_is_not_plus(self):
from django.core import checks
from django.db import models
from modelcluster.fields import ParentalKey
class Instrument(models.Model):
# Oops, related_name='+' is not allowed
band = ParentalKey(Band, related_name='+', on_delete=models.CASCADE)
class Meta:
# Prevent Django from thinking this is in the database
# This shouldn't affect the test
abstract = True
# Check for error
errors = Instrument.check()
self.assertEqual(1, len(errors))
# Check the error itself
error = errors[0]
self.assertIsInstance(error, checks.Error)
self.assertEqual(error.id, 'modelcluster.E002')
self.assertEqual(error.obj, Instrument.band.field)
self.assertEqual(error.msg, "related_name='+' is not allowed on ParentalKey fields")
self.assertEqual(error.hint, "Either change it to a valid name or remove it")
def test_parental_key_checks_target_is_resolved_as_class(self):
from django.core import checks
from django.db import models
from modelcluster.fields import ParentalKey
class Instrument(models.Model):
banana = ParentalKey('Banana', on_delete=models.CASCADE)
class Meta:
# Prevent Django from thinking this is in the database
# This shouldn't affect the test
abstract = True
# Check for error
errors = Instrument.check()
self.assertEqual(1, len(errors))
# Check the error itself
error = errors[0]
self.assertIsInstance(error, checks.Error)
self.assertEqual(error.id, 'fields.E300')
self.assertEqual(error.obj, Instrument.banana.field)
self.assertEqual(error.msg, "Field defines a relation with model 'Banana', which is either not installed, or is abstract.")
class GetAllChildRelationsTest(TestCase):
def test_get_all_child_relations(self):
self.assertEqual(
set([rel.name for rel in get_all_child_relations(Restaurant)]),
set(['tagged_items', 'reviews', 'menu_items'])
)
class ParentalM2MTest(TestCase):
def setUp(self):
self.article = Article(title="Test Title")
self.author_1 = Author.objects.create(name="Author 1")
self.author_2 = Author.objects.create(name="Author 2")
self.article.authors = [self.author_1, self.author_2]
self.category_1 = Category.objects.create(name="Category 1")
self.category_2 = Category.objects.create(name="Category 2")
self.article.categories = [self.category_1, self.category_2]
def test_uninitialised_m2m_relation(self):
# Reading an m2m relation of a newly created object should return an empty queryset
new_article = Article(title="Test title")
self.assertEqual([], list(new_article.authors.all()))
self.assertEqual(new_article.authors.count(), 0)
# the manager should have a 'model' property pointing to the target model
self.assertEqual(Author, new_article.authors.model)
def test_parentalm2mfield(self):
# Article should not exist in the database yet
self.assertFalse(Article.objects.filter(title='Test Title').exists())
# Test lookup on parental M2M relation
self.assertEqual(
['Author 1', 'Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 2)
# the manager should have a 'model' property pointing to the target model
self.assertEqual(Author, self.article.authors.model)
# Test adding to the relation
author_3 = Author.objects.create(name="Author 3")
self.article.authors.add(author_3)
self.assertEqual(
['Author 1', 'Author 2', 'Author 3'],
[author.name for author in self.article.authors.all().order_by('name')]
)
self.assertEqual(self.article.authors.count(), 3)
# Test removing from the relation
self.article.authors.remove(author_3)
self.assertEqual(
['Author 1', 'Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 2)
# Test clearing the relation
self.article.authors.clear()
self.assertEqual(
[],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 0)
# Test the 'set' operation
self.article.authors.set([self.author_2])
self.assertEqual(self.article.authors.count(), 1)
self.assertEqual(
['Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
# Test saving to / restoring from DB
self.article.authors = [self.author_1, self.author_2]
self.article.save()
self.article = Article.objects.get(title="Test Title")
self.assertEqual(
['Author 1', 'Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 2)
def test_constructor(self):
# Test passing values for M2M relations as kwargs to the constructor
article2 = Article(
title="Test article 2",
authors=[self.author_1],
categories=[self.category_2],
)
self.assertEqual(
['Author 1'],
[author.name for author in article2.authors.order_by('name')]
)
self.assertEqual(article2.authors.count(), 1)
def test_ordering(self):
# our fake querysets should respect the ordering defined on the target model
bela_bartok = Author.objects.create(name='Bela Bartok')
graham_greene = Author.objects.create(name='Graham Greene')
janis_joplin = Author.objects.create(name='Janis Joplin')
simon_sharma = Author.objects.create(name='Simon Sharma')
william_wordsworth = Author.objects.create(name='William Wordsworth')
article3 = Article(title="Test article 3")
article3.authors = [
janis_joplin, william_wordsworth, bela_bartok, simon_sharma, graham_greene
]
self.assertEqual(
list(article3.authors.all()),
[bela_bartok, graham_greene, janis_joplin, simon_sharma, william_wordsworth]
)
def test_save_m2m_with_update_fields(self):
self.article.save()
# modify both relations, but only commit the change to authors
self.article.authors.clear()
self.article.categories.clear()
self.article.title = 'Updated title'
self.article.save(update_fields=['title', 'authors'])
self.updated_article = Article.objects.get(pk=self.article.pk)
self.assertEqual(self.updated_article.title, 'Updated title')
self.assertEqual(self.updated_article.authors.count(), 0)
self.assertEqual(self.updated_article.categories.count(), 2)
def test_reverse_m2m_field(self):
# article is unsaved, so should not be returned by the reverse relation on author
self.assertEqual(self.author_1.articles_by_author.count(), 0)
self.article.save()
# should now be able to look up on the reverse relation
self.assertEqual(self.author_1.articles_by_author.count(), 1)
self.assertEqual(self.author_1.articles_by_author.get(), self.article)
article_2 = Article(title="Test Title 2")
article_2.authors = [self.author_1]
article_2.save()
self.assertEqual(self.author_1.articles_by_author.all().count(), 2)
self.assertEqual(
list(self.author_1.articles_by_author.order_by('title').values_list('title', flat=True)),
['Test Title', 'Test Title 2']
)
def test_value_from_object(self):
authors_field = Article._meta.get_field('authors')
self.assertEqual(
set(authors_field.value_from_object(self.article)),
set([self.author_1, self.author_2])
)
self.article.save()
self.assertEqual(
set(authors_field.value_from_object(self.article)),
set([self.author_1, self.author_2])
)
class ParentalManyToManyPrefetchTests(TestCase):
def setUp(self):
# Create 10 articles with 10 authors each.
authors = Author.objects.bulk_create(
Author(id=i, name=str(i)) for i in range(10)
)
authors = Author.objects.all()
for i in range(10):
article = Article(title=str(i))
article.authors = authors
article.save()
def get_author_names(self, articles):
return [
author.name
for article in articles
for author in article.authors.all()
]
def test_prefetch_related(self):
with self.assertNumQueries(11):
names = self.get_author_names(Article.objects.all())
with self.assertNumQueries(2):
prefetched_names = self.get_author_names(
Article.objects.prefetch_related('authors')
)
self.assertEqual(names, prefetched_names)
def test_prefetch_related_with_custom_queryset(self):
from django.db.models import Prefetch
with self.assertNumQueries(2):
names = self.get_author_names(
Article.objects.prefetch_related(
Prefetch('authors', queryset=Author.objects.filter(name__lt='5'))
)
)
self.assertEqual(len(names), 50)
def test_prefetch_from_fake_queryset(self):
article = Article(title='Article with related articles')
article.related_articles = list(Article.objects.all())
with self.assertNumQueries(10):
names = self.get_author_names(article.related_articles.all())
with self.assertNumQueries(1):
prefetched_names = self.get_author_names(
article.related_articles.prefetch_related('authors')
)
self.assertEqual(names, prefetched_names)
class PrefetchRelatedTest(TestCase):
def test_fakequeryset_prefetch_related(self):
person1 = Person.objects.create(name='Joe')
person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house1 = House.objects.create(name='House 1', address='123 Main St', owner=person1)
room1_1 = Room.objects.create(name='Dining room')
room1_2 = Room.objects.create(name='Lounge')
room1_3 = Room.objects.create(name='Kitchen')
house1.main_room = room1_1
house1.save()
house2 = House(name='House 2', address='45 Side St', owner=person1)
room2_1 = Room.objects.create(name='Eating room')
room2_2 = Room.objects.create(name='TV Room')
room2_3 = Room.objects.create(name='Bathroom')
house2.main_room = room2_1
person1.houses = itertools.chain(House.objects.all(), [house2])
houses = person1.houses.all()
with self.assertNumQueries(1):
qs = person1.houses.prefetch_related('main_room')
with self.assertNumQueries(0):
main_rooms = [ house.main_room for house in person1.houses.all() ]
self.assertEqual(len(main_rooms), 2)
def test_prefetch_related_with_lookup(self):
restaurant1 = Restaurant.objects.create(name='The Jolly Beaver')
restaurant2 = Restaurant.objects.create(name='The Prancing Rhino')
dish1 = Dish.objects.create(name='Goodies')
dish2 = Dish.objects.create(name='Baddies')
wine1 = Wine.objects.create(name='Chateau1')
wine2 = Wine.objects.create(name='Chateau2')
menu_item1 = MenuItem.objects.create(restaurant=restaurant1, dish=dish1, recommended_wine=wine1, price=1)
menu_item2 = MenuItem.objects.create(restaurant=restaurant2, dish=dish2, recommended_wine=wine2, price=10)
query = Restaurant.objects.all().prefetch_related(
Prefetch('menu_items', queryset=MenuItem.objects.only('price', 'recommended_wine').select_related('recommended_wine'))
)
res = list(query)
self.assertEqual(query[0].menu_items.all()[0], menu_item1)
self.assertEqual(query[1].menu_items.all()[0], menu_item2)
|
nilq/baby-python
|
python
|
import functools
import tornado.options
def define_options(option_parser):
# Debugging
option_parser.define(
'debug', default=False, type=bool,
help="Turn on autoreload and log to stderr",
callback=functools.partial(enable_debug, option_parser),
group='Debugging')
def config_callback(path):
option_parser.parse_config_file(path, final=False)
option_parser.define(
"config", type=str, help="Path to config file",
callback=config_callback, group='Config file')
# Application
option_parser.define(
'autoreload', type=bool, default=False, group='Application')
option_parser.define('cookie_secret', type=str, group='Application')
option_parser.define('port', default=8888, type=int, help=(
"Server port"), group='Application')
# Startup
option_parser.define('ensure_indexes', default=False, type=bool, help=(
"Ensure collection indexes before starting"), group='Startup')
option_parser.define('rebuild_indexes', default=False, type=bool, help=(
"Drop all indexes and recreate before starting"), group='Startup')
# Identity
option_parser.define('host', default='localhost', type=str, help=(
"Server hostname"), group='Identity')
option_parser.define('blog_name', type=str, help=(
"Display name for the site"), group='Identity')
option_parser.define('base_url', type=str, help=(
"Base url, e.g. 'blog'"), group='Identity')
option_parser.define('author_display_name', type=str, help=(
"Author name to display in posts and titles"), group='Identity')
option_parser.define('author_email', type=str, help=(
"Author email to display in feed"), group='Identity')
option_parser.define('twitter_handle', type=str, help=(
"Author's Twitter handle (no @-sign)"), group='Identity')
option_parser.define('disqus_shortname', type=str, help=(
"Site's Disqus identifier"), group='Identity')
option_parser.define('description', type=str, help=(
"Site description"), group='Identity')
# Integrations
option_parser.define('google_analytics_id', type=str, help=(
"Like 'UA-123456-1'"), group='Integrations')
option_parser.define('google_analytics_rss_id', type=str, help=(
"Like 'UA-123456-1'"), group='Integrations')
# Admin
option_parser.define('user', type=str, group='Admin')
option_parser.define('password', type=str, group='Admin')
# Appearance
option_parser.define('nav_menu', type=list, default=[], help=(
"List of url, title, CSS-class triples (define this in your"
" motor_blog.conf)'"), group='Appearance')
option_parser.define('theme', type=str, default='theme', help=(
"Directory name of your theme files"), group='Appearance')
option_parser.define('home_page', type=str, group='Appearance', help=(
"Slug of a static home page (default: recent posts)"))
option_parser.define(
'timezone', type=str, default='America/New_York',
help="Your timezone name", group='Appearance')
option_parser.add_parse_callback(
functools.partial(check_required_options, option_parser))
def check_required_options(option_parser):
for required_option_name in (
'host', 'port', 'blog_name', 'base_url', 'cookie_secret', 'timezone',
):
if not getattr(option_parser, required_option_name, None):
message = (
'%s required. (Did you forget to pass'
' --config=CONFIG_FILE?)' % (
required_option_name))
raise tornado.options.Error(message)
def enable_debug(option_parser, debug):
if debug:
option_parser.log_to_stderr = True
option_parser.autoreload = True
|
nilq/baby-python
|
python
|
from django.urls import path
from . import books_views
urlpatterns = [
path('books/', books_views.index, name='books'),
]
|
nilq/baby-python
|
python
|
""" Game API for Pacman """
import random
from collections import defaultdict
from abc import ABC, abstractmethod
import math
import mcts
import copy
import torch as tr
import pacman_net as pn
import pacman_data as pd
import numpy as np
class Node(ABC):
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
@abstractmethod
#find all the successors of the state
def find_children(self):
return set()
@abstractmethod
def random_child(self):
return None
# return true if no child
@abstractmethod
def is_leaf(self):
return True
#score
@abstractmethod
def score(self):
return 0
@abstractmethod
#node must be hashable
def __hash__(self):
return 123456
@abstractmethod
#nodes should be comparable
def __eq__(node1, node2):
return True
class MazeGameBoard():
scores_to_win = 100
max_steps = 40
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
def __init__(self, L, ghosts, pos_i, pos_j, score):
self.board = L
self.ghosts = ghosts
self.pac_i = pos_i
self.pac_j = pos_j
self.score = score
self.current_steps = 0
# 0 for pacman 1 for ghost turn
def gameOver(self):
return self.isCaught() or self.isWon() or self.current_steps >= MazeGameBoard.max_steps
def isCaught(self):
for ghost in self.ghosts:
if self.pac_i == ghost.row and self.pac_j == ghost.col:
return True
return False
def isWon(self):
return self.score == MazeGameBoard.scores_to_win
def one_step_more(self):
self.current_steps += 1
class ghost:
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
initPos = [[2,3],[6,13]] #
currentIndex = 0
oldpoint = 0
def __init__(self, L):
# 0: i++(go down), 1: j++ (go right), 2:i--(go up), 3: j-- (go left)
self.dir = random.randint(0,3)
if self.currentIndex >= len(self.initPos):
raise RuntimeError("try to init too many ghosts")
m = self.initPos[self.currentIndex][0]
n = self.initPos[self.currentIndex][1]
L[m][n] = 'X'
self.row = m
self.col = n
ghost.currentIndex += 1
def move(self, go, L):
if self.oldpoint == 'X' :
L[self.row][self.col] = 0
else :
L[self.row][self.col] = self.oldpoint
self.row += self.directionsDic[go][0]
self.col += self.directionsDic[go][1]
self.oldpoint = L[self.row][self.col]
L[self.row][self.col] = 'X'
def smallMaze(ghost_num, slippery_num) :
L= [[2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2],
[2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2],
[2,0,0,0,1,1,0,1,0,0,1,0,1,1,0,2,0,2],
[2,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,0,2],
[2,0,0,0,1,1,0,1,0,0,1,0,1,1,0,2,0,2],
[2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2],
[2,0,1,1,0,2,0,1,1,1,1,0,2,0,1,1,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2]]
ghosts = []
ghost.currentIndex = 0
for i in range(ghost_num):
ghosts.append(ghost(L))
count = 0
while count < slippery_num:
m = random.randint(1,len(L)-1)
n = random.randint(1,len(L[0])-1)
if L[m][n] == 0:
L[m][n] = 3
count += 1
return L, ghosts
def bigMaze(ghost_num, slippery_num):
L= [[2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,1,0,2,0,2,0,2,0,2,1,1,1,2,0,0,0,2,0,0,0,2],
[2,0,0,0,0,2,0,2,0,0,0,2,0,0,0,0,0,2,0,2,0,2],
[2,0,0,0,0,2,0,2,0,0,0,2,0,0,0,0,0,0,0,2,0,2],
[2,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,2,0,2],
[2,0,0,2,0,2,0,0,0,2,0,0,0,0,0,0,0,2,0,2,0,2],
[2,0,0,2,0,2,1,2,0,2,0,0,0,2,0,2,0,2,0,2,0,2],
[2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,2,0,0,0,2,0,2],
[2,0,1,2,0,2,0,2,0,1,0,0,0,0,0,1,0,2,0,2,0,2],
[2,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,2,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2]]
ghosts = []
ghost.currentIndex = 0
for i in range(ghost_num):
ghosts.append(ghost(L))
count = 0
while count < slippery_num:
m = random.randint(1,len(L)-1)
n = random.randint(1,len(L[0])-1)
if L[m][n] == 0:
L[m][n] = 3
count += 1
return L, ghosts
# the ghost changes his direction
def randomGhostAction(L, ghost):
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
dir = ghost.dir
i = ghost.row
j = ghost.col
nextI = i + directionsDic[dir][0]
nextJ = j + directionsDic[dir][1]
if isValid(L, nextI, nextJ):
return dir
randomList =[]
for a in range(4):
if(a != dir):
randomList.append(a)
random.shuffle(randomList)
for a in range(len(randomList)):
dir = randomList[a]
nextI = i + directionsDic[dir][0]
nextJ = j + directionsDic[dir][1]
if isValid(L, nextI, nextJ):
return dir
print()
print("should never reach here, Reaching here means we have a poor ghost in a dead corner")
print()
def eclideanGhostAction(L, ghost, pos_i, pos_j):
i = ghost.row
j = ghost.col
dir = [[1,0], [0,1], [-1,0], [0,-1]]
distance = []
for n in range(4):
a = i + dir[n][0]
b = j + dir[n][1]
if isValid(L, a, b):
dis = ((pos_i - a)**2 + (pos_j - b)**2)**(1/2)
distance.append(dis)
else:
distance.append(float("inf"))
minDis = min(distance)
return distance.index(minDis)
def manhanttanGhostAction(L, ghost, pos_i, pos_j):
i = ghost.row
j = ghost.col
dir = [[1,0], [0,1], [-1,0], [0,-1]]
distance = []
for n in range(4):
a = i + dir[n][0]
b = j + dir[n][1]
if isValid(L, a, b):
dis = abs(pos_i - a) + abs(pos_j - b)
distance.append(dis)
else:
distance.append(float("inf"))
minDis = min(distance)
return distance.index(minDis)
def isValid(L, i, j):
if i<= 0 or j<=0 or i >= len(L) - 1 or j>= len(L[0]) - 1 or L[i][j] == 1 or L[i][j] == 2:
return False
return True
def instruction():
print()
print("""Instructions:
The AI Pacman will take his way to move up, down, left or right to eat more dots and avoid being caught by ghosts.
Wish him good luck!""")
print()
#function when bumping into a wall
def wall():
print()
print("Oops! Ran into a wall! Try again!")
print()
def win_game(score):
print()
print("Good! AI got enough scores and Won!")
print("Total scores:", score)
def lose_game(score):
print()
print("Sorry! AI got caught by ghost and Lost!")
print("Total scores:", score)
#function to show the maze
def maze(L, pos_i, pos_j):
for i in range(0, len(L)):
for j in range(0, len(L[0])):
if i == pos_i and j == pos_j:
print("#", end=' ')
elif L[i][j] == 0 :
print(".", end=' ')
elif L[i][j] == 1 :
print("-", end=' ')
elif L[i][j] == 2:
print("|", end=' ')
elif L[i][j] == 3:
print("*", end=' ')
else:
print(L[i][j], end=' ')
print()
def pacmanMove(action, pos_i, pos_j, score, L):
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
isGameover = False
nextI = pos_i + directionsDic[action][0]
nextJ = pos_j + directionsDic[action][1]
if not isValid(L, nextI, nextJ):
wall()
elif L[nextI][nextJ] == "X":
isGameover = True
elif L[nextI][nextJ] == 3:
n = random.randint(0, 4) #25% chance that the action failed
if n == 0:
#print("Oops! Slipped and try again")
return isGameover, pos_i, pos_j, score
elif L[nextI][nextJ] == 0:
score += 10
# print(L[pos_i][pos_j])
L[pos_i][pos_j] = " "
# L[nextI][nextJ] = "#"
return isGameover, nextI, nextJ, score
def ghostMove(ghosts):
for ghost in ghosts:
if ghosts.index(ghost) % 3 == 0:
bestAction = eclideanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif ghosts.index(ghost) % 3 == 1:
bestAction = manhanttanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif ghosts.index(ghost) % 3 == 2:
bestAction = randomGhostAction(L, ghost)
ghost.move(bestAction, L)
# human player plays the game
def humanPlay(L, pos_i, pos_j):
score = 0
while True:
if L[pos_i][pos_j] == 0:
L[pos_i][pos_j] = " "
if L[pos_i][pos_j] == 3:
L[pos_i][pos_j] = "*"
move = input("Enter an action: ('w'=up, 's'=down, 'a'=left, 'd'=right, 'e'=exit)")
if move.lower() == "e":
print("Are you sure you want to leave the game?")
sure = input("Y/N")
if sure.lower() == "y":
print("Bye!")
break
else:
continue
if move.lower() == "s": action = 0
if move.lower() == "d": action = 1
if move.lower() == "w": action = 2
if move.lower() == "a": action = 3
isGameover, pos_i, pos_j, score = pacmanMove(action, pos_i, pos_j, score, L)
ghostMove(ghosts)
if score >= MazeGameBoard.scores_to_win:
maze(L, pos_i, pos_j)
win_game(score)
break
isOver = False
for ghost in ghosts:
if ghost.row == pos_i and ghost.col == pos_j:
maze(L, pos_i, pos_j)
lose_game(score)
isOver = True
break
if isOver: break
maze(L, pos_i, pos_j)
print("Scores:", score)
print()
# baseline AI which chooses actions uniformly at random
def randomAI(L, pos_i, pos_j):
score = 0
while True:
if L[pos_i][pos_j] == 0:
L[pos_i][pos_j] = " "
if L[pos_i][pos_j] == 3:
L[pos_i][pos_j] = "*"
directionsDic = [[1,0], [0,1], [-1,0], [0,-1]]
action = random.randint(0, 3)
nextI = pos_i + directionsDic[action][0]
nextJ = pos_j + directionsDic[action][1]
while not isValid(L, nextI, nextJ):
action = random.randint(0, 3)
nextI = pos_i + directionsDic[action][0]
nextJ = pos_j + directionsDic[action][1]
if action == 0: nextaction = "down"
elif action == 1: nextaction = "right"
elif action == 2: nextaction = "up"
elif action == 3: nextaction = "left"
print("AI's next action:", nextaction)
input("Press Enter to continue...")
isGameover, pos_i, pos_j, score = pacmanMove(action, pos_i, pos_j, score, L)
ghostMove(ghosts)
if score >= MazeGameBoard.scores_to_win:
maze(L, pos_i, pos_j)
win_game(score)
break
isOver = False
for ghost in ghosts:
if ghost.row == pos_i and ghost.col == pos_j:
maze(L, pos_i, pos_j)
lose_game(score)
isOver = True
break
if isOver: break
maze(L, pos_i, pos_j)
print("Scores:", score)
print()
def retriveInfoFromGameBoard(gameBoard):
return gameBoard.board, gameBoard.pac_i, gameBoard.pac_j, gameBoard.score
# MCTS AI play the game
def mctsAI(gameBoard, tree, enableHandEnter):
boardStateNode = mcts.pacmanNode(gameBoard, 0)
totalNodeCount = 0
while True:
nodesCount = 0
L0, pos_i0, pos_j0, score0 = retriveInfoFromGameBoard(boardStateNode.board)
for i in range(50):
nodesCount += tree.do_rollout(boardStateNode)
if enableHandEnter:
print("Current Turns:", boardStateNode.board.current_steps)
boardStateNode.board.one_step_more()
if boardStateNode.is_terminal():
break
boardStateNode, boardStateScoreForNN = tree.choose(boardStateNode)
L, pos_i, pos_j, score = retriveInfoFromGameBoard(boardStateNode.board)
if (pos_i - pos_i0) == 1: nextaction = "down"
elif (pos_j - pos_j0) == 1: nextaction = "right"
elif (pos_i0 - pos_i) == 1: nextaction = "up"
elif (pos_j0 - pos_j) == 1: nextaction = "left"
if enableHandEnter:
print("AI's next action:", nextaction)
input("Press Enter to continue...")
if L[pos_i][pos_j] != 3:
L[pos_i][pos_j] = " "
if boardStateNode.is_terminal() == True:
break
ghosts = boardStateNode.board.ghosts
for ghost in ghosts:
if(ghosts.index(ghost) % 3 == 0):
bestAction = eclideanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif(ghosts.index(ghost) % 3 == 1):
bestAction = manhanttanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif (ghosts.index(ghost) % 3 == 2):
bestAction = randomGhostAction(L, ghost)
ghost.move(bestAction, L)
if enableHandEnter:
maze(L, pos_i, pos_j)
print("The number of tree nodes processed:", nodesCount)
print("Scores:", score)
print()
totalNodeCount += nodesCount
# set the depth to 0 for the next round of AI search
boardStateNode = mcts.pacmanNode(boardStateNode.board, 0)
if boardStateNode.board.isWon():
if enableHandEnter:
maze(L, pos_i, pos_j)
win_game(score)
return totalNodeCount, score, True
elif boardStateNode.board.isCaught():
if enableHandEnter:
maze(L, pos_i, pos_j)
lose_game(score)
return totalNodeCount, score, False
else:
if enableHandEnter:
maze(L, pos_i, pos_j)
print("Total scores:", score)
print("The maximum steps pass, AI tied the game")
return totalNodeCount, score, False
def nn_puct(node, L, mode):
net = pn.BlockusNet3(L)
if mode == "big_1_3":
net.load_state_dict(tr.load("model_net3_big_1_3.pth" ))
elif mode == "big_2_3":
net.load_state_dict(tr.load("model_net3_big_2_3.pth" ))
elif mode == "big_2_5":
net.load_state_dict(tr.load("model_net3_big_2_5.pth" ))
elif mode == "small_1_3":
net.load_state_dict(tr.load("model_net3_small_1_3.pth" ))
elif mode == "small_2_5":
net.load_state_dict(tr.load("model_net3_small_2_5.pth" ))
with tr.no_grad():
children = list(node.find_children())
x = tr.stack(tuple(map(pd.encode, [child for child in children])))
y = net(x)
probs = tr.softmax(y.flatten(), dim=0)
a = np.random.choice(len(probs), p=probs.detach().numpy())
return list(node.find_children())[a]
def mcts_nnAI(gameBoard, mode, enableHandEnter):
tree = mcts.MCTS(choose_method = nn_puct, mode = mode)
boardStateNode = mcts.pacmanNode(gameBoard, 0)
totalNodeCount = 0
while True:
nodesCount = 0
L0, pos_i0, pos_j0, score0 = retriveInfoFromGameBoard(boardStateNode.board)
for i in range(15):
nodesCount += tree.do_rollout(boardStateNode)
if enableHandEnter:
print("Current Turns:", boardStateNode.board.current_steps)
boardStateNode.board.one_step_more()
if boardStateNode.is_terminal():
break
boardStateNode, boardStateScoreForNN = tree.choose(boardStateNode)
L, pos_i, pos_j, score = retriveInfoFromGameBoard(boardStateNode.board)
if (pos_i - pos_i0) == 1: nextaction = "down"
elif (pos_j - pos_j0) == 1: nextaction = "right"
elif (pos_i0 - pos_i) == 1: nextaction = "up"
elif (pos_j0 - pos_j) == 1: nextaction = "left"
if enableHandEnter:
print("AI's next action:", nextaction)
input("Press Enter to continue...")
if L[pos_i][pos_j] != 3:
L[pos_i][pos_j] = " "
if boardStateNode.is_terminal() == True:
break
ghosts = boardStateNode.board.ghosts
for ghost in ghosts:
if(ghosts.index(ghost) % 3 == 0):
bestAction = eclideanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif(ghosts.index(ghost) % 3 == 1):
bestAction = manhanttanGhostAction(L, ghost, pos_i, pos_j)
ghost.move(bestAction, L)
elif (ghosts.index(ghost) % 3 == 2):
bestAction = randomGhostAction(L, ghost)
ghost.move(bestAction, L)
if enableHandEnter:
maze(L, pos_i, pos_j)
print("The number of tree nodes processed:", nodesCount)
print("Scores:", score)
print()
totalNodeCount += nodesCount
# set the depth to 0 for the next round of AI search
boardStateNode = mcts.pacmanNode(boardStateNode.board, 0)
if boardStateNode.board.isWon():
if enableHandEnter:
maze(L, pos_i, pos_j)
win_game(score)
return totalNodeCount, score, True
elif boardStateNode.board.isCaught():
if enableHandEnter:
maze(L, pos_i, pos_j)
lose_game(score)
return totalNodeCount, score, False
else:
if enableHandEnter:
maze(L, pos_i, pos_j)
print("Total scores:", score)
print("The maximum steps pass, AI tied the game")
return totalNodeCount, score, False
if __name__ == "__main__":
while True :
load = input("""Please choose the problem size:
1) Enter 1 to choose big maze with 1 ghost and 3 slippery positions
2) Enter 2 to choose small maze with 1 ghost and 3 slippery positions
3) Enter 3 to choose big maze with 2 ghosts and 3 slippery positions
4) Enter 4 to choose small maze with 2 ghosts and 5 slippery positions
5) Enter 5 to choose big maze with 2 ghosts and 5 slippery positions
""")
score = 0
if load == "1":
L, ghosts = bigMaze(1,3)
pos_i, pos_j = 3, 8
mode = "big_1_3"
break
elif load == "2":
L, ghosts = smallMaze(1,3)
pos_i, pos_j = 5, 10
mode = "small_1_3"
break
elif load == "3":
L, ghosts = bigMaze(2,3)
pos_i, pos_j = 3, 8
mode = "big_2_3"
break
elif load == "4":
L, ghosts = smallMaze(2,5)
pos_i, pos_j = 5, 10
mode = "small_2_5"
break
elif load == "5":
L, ghosts = bigMaze(2,5)
pos_i, pos_j = 5, 10
mode = "big_2_5"
break
else:
print("Please enter 1,2,3,4 or 5")
while True:
ai_chosen = input("""Please choose the control strategy:
1) Enter 1 to choose human player
2) Enter 2 to choose baseline AI
3) Enter 3 to choose tree-based AI (Enter 5 to run 100 times)
4) Enter 4 to choose tree+NN-based AI(Enter 6 to run 100 times)
""")
if ai_chosen == "1" :
gameMode = "human player"
break
elif ai_chosen == "2" :
gameMode = "baseline AI"
break
elif ai_chosen == "3" :
gameMode = "tree-based AI"
break
elif ai_chosen == "4" :
gameMode = "tree+NN-based AI"
break
elif ai_chosen == "5":
gameMode = "automatic"
break
elif ai_chosen == "6":
gameMode = "automatic tree+NN-based AI"
break
else:
print("Please enter 1,2,3 4, 5, 6")
instruction()
print("Game mode:", gameMode)
print()
maze(L,pos_i,pos_j)
print()
initBoard = MazeGameBoard(L, ghosts, pos_i, pos_j, 0)
tree = mcts.MCTS()
if ai_chosen == "1":
humanPlay(L, pos_i, pos_j)
elif ai_chosen == "2":
randomAI(L, pos_i, pos_j)
elif ai_chosen == "3":
totalnodescount, finalscore, aiWon = mctsAI(copy.deepcopy(initBoard), tree, True)
print("The total number of tree nodes processed in this game is", totalnodescount)
elif ai_chosen == "4":
totalnodescount, finalscore, aiWon = mcts_nnAI(copy.deepcopy(initBoard), mode, True)
print("The total number of tree nodes processed in this game is", totalnodescount)
elif ai_chosen == "5":
nodes_list = [0]
scores_list = [0]
col = ['white']
for i in range(100):
totalnodescount = 0
totalnodescount, finalscore, aiWon = mctsAI(copy.deepcopy(initBoard), tree, False)
print("Game", i+1, ":", totalnodescount, " Score:", finalscore)
nodes_list.append(totalnodescount)
scores_list.append(finalscore)
if aiWon: col.append('#87CEFA')
else: col.append('#FFA500')
import matplotlib.pyplot as plt
plt.bar(range(len(nodes_list)), nodes_list, width=1.0, color=col)
plt.xlabel("Games")
plt.ylabel("Number of tree nodes processed")
plt.title("Efficiency")
plt.show()
plt.bar(range(len(scores_list)), scores_list, width=1.0, color=col)
plt.xlabel("Games")
plt.ylabel("Final scores")
plt.title("Performance")
plt.show()
elif ai_chosen == "6":
nodes_list = [0]
scores_list = [0]
col = ['white']
for i in range(100):
totalnodescount = 0
totalnodescount, finalscore, aiWon = mcts_nnAI(copy.deepcopy(initBoard), False)
# print("Game", i+1, ":", totalnodescount, " Score:", finalscore)
nodes_list.append(totalnodescount)
scores_list.append(finalscore)
if aiWon: col.append('#87CEFA')
else: col.append('#FFA500')
import matplotlib.pyplot as plt
plt.bar(range(len(nodes_list)), nodes_list, width=1.0, color=col)
plt.xlabel("Games")
plt.ylabel("Number of tree nodes processed")
plt.title("Efficiency")
plt.show()
plt.bar(range(len(scores_list)), scores_list, width=1.0, color=col)
plt.xlabel("Games")
plt.ylabel("Final scores")
plt.title("Performance")
plt.show()
|
nilq/baby-python
|
python
|
import streamlit as st
import pandas as pd
from tfidf import get_vocab_idf
st.title('Binary Classification')
st.write("This app shows the featurization created from delta tf-idf for binary classification.")
# Sidebar
with st.sidebar.header('1. Upload your CSV data'):
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file", type=["csv"],
help="The labels must be 0 or 1 and the column names must be 'text' and 'label'",
)
with st.sidebar.header("Display parameters"):
idf_range = st.sidebar.slider(label="IDF Range", min_value=-7., max_value=7., step=.5, value=(-7., 7.))
with st.sidebar.header("Feature parameters"):
df_range = st.sidebar.slider(label="Document frequency range", min_value=0.,
max_value=1., step=.0001, value=(0., 1.),
help="Vocabulary outside this range will not be considered")
# Main page
st.subheader('1. Dataset')
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
if st.checkbox(label="View dataset"):
st.write(df)
cnt_0 = df.loc[df['label'] == 0].shape[0]
cnt_1 = df.loc[df['label'] == 1].shape[0]
st.write(f"There are {cnt_0} samples from class 0 and {cnt_1} from class 1.")
if cnt_0 > cnt_1:
st.write(f"Class 0 is the majority class with {cnt_0/df.shape[0]*100:.4f}%")
else:
st.write(f"Class 1 is the majority class with {cnt_1 / df.shape[0]*100:.4f}%")
vocab = get_vocab_idf(df, min_df=df_range[0], max_df=df_range[1])
top_n = vocab.loc[vocab['Delta-Idf'].between(idf_range[0], idf_range[1])]\
.sort_values('Delta-Idf', ascending=False).head(10)
bottom_n = vocab.loc[vocab['Delta-Idf'].between(idf_range[0], idf_range[1])]\
.sort_values('Delta-Idf', ascending=True).head(10)
st.subheader("2. Most relevant words")
right_col, left_col = st.columns(2)
right_col.write("Top 10 most relevant words for negative (0) class")
right_col.dataframe(top_n)
left_col.write("Top 10 most relevant words for positive (1) class")
left_col.dataframe(bottom_n)
st.subheader("3. Word search")
search_word = st.text_input("Input word to search:", )
right_word, left_idf = st.columns(2)
right_word.markdown("#### Word")
left_idf.markdown("#### Delta-Idf")
right_word.write(search_word)
if vocab['Word'].isin([search_word]).any():
found_idf = vocab.loc[vocab['Word'] == search_word, 'Delta-Idf'].values[0]
left_idf.write(found_idf)
else:
if search_word != '': left_idf.write("Word not found.")
else:
st.write('Awaiting Dataset...')
|
nilq/baby-python
|
python
|
from country_assignment import assign_countries_by_priority
from data_processing.player_data import PlayerData
from flask import (Flask, redirect, render_template, request, url_for, flash)
import os
from pathlib import Path
import argparse
import uuid
app = Flask(__name__)
app.secret_key = os.urandom(24)
unique_country_tags = ["GB", "FR", "GE", "IT", "AH", "RU", "OE"]
country_names = [
"Great Britain", "France", "German Empire", "Italy", "Austria-Hungary",
"Russia", "Ottoman Empire"
]
@app.route('/result/<id>')
def result(id):
''' The result page is shown only once countries have been assigned.
It tells the players which country has been assigned to them.
'''
with PlayerData(players_file) as player_data:
player_name = player_data.get_players_by_id()[id]["name"]
# check if assignment really over, i.e. all players submitted
all_submited = all(p["submitted"] for p in player_data.get_players())
if not all_submited:
return redirect(url_for('country_selection', id=id))
with open(output_file, "r") as file:
for line in file.readlines():
# remove player number, then separate name from tag
player_country = line.split(":")[-1]
p_name, country_tag = player_country.split()
country_ind = unique_country_tags.index(country_tag)
if p_name == player_name:
return render_template("result.html",
player_name=player_name,
country=country_names[country_ind])
return 'ERROR: Unknown player in results'
@app.route("/<id>")
def country_selection(id):
''' Country selection screen only accesible for each individual player.
Here, they can submit their priorities.
'''
with PlayerData(players_file) as player_data:
# check if player id correct
player = player_data.get_players_by_id().get(id)
if player is None:
return 'ERROR: Unknown player in country selection'
# load priorities
priorities = [player["prio1"], player["prio2"], player["prio3"]]
already_submitted = player["submitted"]
if already_submitted:
# check if assignment already over, i.e. all players submitted
all_submited = all(p["submitted"]
for p in player_data.get_players())
if all_submited:
return redirect(url_for('result', id=id))
return render_template("country_selection.html",
id=id,
player_name=player["name"],
tags=unique_country_tags,
country_names=country_names,
priorities=priorities,
submitted=already_submitted,
submission_count=sum(
p["submitted"]
for p in player_data.get_players()),
zip=zip)
@app.route("/")
def home():
return "Please use your unique access link."
@app.route('/search', methods=['GET'])
def priorities_submitted():
''' Redirection link that processes the country selection and passes to
either the result page or the selection screen.
'''
prio1 = request.args.get('prio1')
prio2 = request.args.get('prio2')
prio3 = request.args.get('prio3')
id = request.args.get('id')
# check for empty or duplicate entries
priorities = [prio1, prio2, prio3]
for p in priorities:
if p == "":
flash(
"No country selected, please choose one country for each priority!"
)
return redirect(url_for('country_selection', id=id))
if priorities.count(p) > 1:
flash(
"Duplicate entries, please select different countries for each priority!"
)
return redirect(url_for('country_selection', id=id))
with PlayerData(players_file) as player_data:
players = player_data.get_players_by_id()
# set status to submitted
players[id]["submitted"] = True
players[id]["prio1"] = prio1
players[id]["prio2"] = prio2
players[id]["prio3"] = prio3
players = [dict for _, dict in players.items()]
# check if all players have submitted
for p in players:
if not p["submitted"]:
return redirect(url_for('country_selection', id=id))
# country assignment
assign_countries_by_priority(players_file)
print("Countries have been assigned.")
return redirect(url_for('result', id=id))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
'Starts a local webserver for the diplomacy game country selection.')
parser.add_argument(
'--json',
help='Storage json file for the player data (default: %(default)s)',
type=str,
default="player_priorities.json")
parser.add_argument(
'--out',
help='Text file to store the result (default: %(default)s)',
type=str,
default="result.txt")
parser.add_argument('--port',
help='Webserver port (default: %(default)s)',
type=int,
default=5000)
parser.add_argument('--id-gen',
help='Generate new player IDs (default: %(default)s)',
action='store_true',
default=False)
parser.add_argument(
'--reset',
help=
'Delete all player selections, make empty country slots instead (default: %(default)s)',
action='store_true',
default=False)
args = parser.parse_args()
players_file = Path(args.json)
output_file = Path(args.out)
with PlayerData(players_file) as player_data:
# create player ids in json
players = player_data.get_players()
for p in players:
if args.reset:
# reset player choices
p["prio1"] = ""
p["prio2"] = ""
p["prio3"] = ""
p["submitted"] = False
if len(p["id"]) == 0 or args.id_gen:
# generate new player id
p["id"] = str(uuid.uuid4())
print("Starting webserver ...")
app.run(port=args.port, threaded=False, processes=1, host="::")
|
nilq/baby-python
|
python
|
from pynterviews.mutants import mutants
def test_positive():
dna = ["CTGAGA",
"CTGAGC",
"TATTGT",
"AGAGAG",
"CCCCTA",
"TCACTG"]
result = mutants(dna)
assert result
def test_negative():
dna = ["CTGAGA",
"CTGAGC",
"TATTGT",
"AGAGAG",
"CCCATA",
"TCACTG"]
result = mutants(dna)
assert not result
def test_empty():
dna = []
result = mutants(dna)
assert not result
def test_none():
dna = None
result = mutants(dna)
assert not result
def test_large():
dna = ["CTGAGADSFFGAGACTGAGACTGAGACTGAGACTGAGAGAGAC",
"CTGAGCTGAGACTGAGACTGAGACTGAGACTGAGACTGAGACT",
"CTGAGACTGAGACTGAGCTGAGACTGAGACTGAGCTGAGACTG",
"AGACTGAGACTGAGACTGCTGAGACTGAGACTCTGAGACTGAG",
"CTGAGACTGAGCCCCTGAGACTGAGACTGCTGAGACTGAGACD",
"TCACTGCTGAGACTGAGACTGAGCTGAGACTGAGACTGACTGA",
"CTGAGACTGAGACTGAGACTGAGACTGAGACTGAGACTGAGAC",
"ETGAGCTGAGACTGAGACTGAGACTGAGACTGAGACTGAGACT",
"CTGAGACTGAGACTGAGCTGAGACTGAGACTGAGCTGAGACTG",
"AGACTGAGACTGAGACTGCTGAGACTGAGACTCTGAGACTGAG",
"CTGAGACTGAGACTGCTGAGACTGAGACTGCTGAGACTGAGAC",
"TCACTGCTGAGACTGAGACTGAGCTGAGACTGAGACTGACTGA"]
result = mutants(dna)
#TODO: assert not result
|
nilq/baby-python
|
python
|
def Bisection_Method(equation, a, b, given_error): # function, boundaries, Es
li_a = deque() # a
li_b = deque() # b
li_c = deque() # x root -> c
li_fc = deque() # f(xr)
li_fa = deque() # f(a)
li_fb = deque() # f(b)
li_Ea = deque() # estimated error
data = {
'Xl': li_a,
'Xu': li_b,
'Xr': li_c,
'f(Xl)': li_fa,
'f(Xu)': li_fb,
'f(Xr)': li_fc,
'Ea%': li_Ea,
}
global c
def f(x):
F = eval(equation) # the x here when we f(a) a will be instead of x
return F
# substitute boundaries in function
if f(a)*f(b) >= 0:
print('Error', 'Bisection method is fail')
quit()
# elif we have a different sign
else:
Estimated_Error = 0
while Estimated_Error/100 <= given_error:
c = (a + b) / 2
if Estimated_Error == 0:
li_a.append(a)
li_b.append(b)
li_c.append(c)
li_fa.append(f(a))
li_fb.append(f(b))
li_fc.append(f(c))
li_Ea.append(None)
pass
if f(a)*f(c) < 0:
b = c
c1 = (a + b)/2
Estimated_Error = abs((c1 - c)/c1) * 100 # b became the old root and c1 became the new root ((current - previous)/current) * 100
elif f(b)*f(c) < 0:
a = c
c1 = (a + b) / 2
Estimated_Error = abs((c1 - c) / c1) * 100
else:
print('Error', 'something is wrong!')
else:
while Estimated_Error/100 >= given_error:
c = (a + b) / 2
#append data to to the list
li_a.append(a)
li_b.append(b)
li_c.append(c)
li_fa.append(f(a))
li_fb.append(f(b))
li_fc.append(f(c))
li_Ea.append('%.5f' % Estimated_Error+'%')
if f(a) * f(c) < 0:
b = c
c1 = (a + b) / 2
Estimated_Error = abs((c1 - c) / c1) * 100 # b became the old root and c1 became the new root ((current - previous)/current) * 100
elif f(b) * f(c) < 0:
a = c
c1 = (a + b) / 2
Estimated_Error = abs((c1 - c) / c1) * 100
else:
print('Error', 'something is wrong!')
else:
c = (b + a)/2
li_a.append(a)
li_b.append(b)
li_c.append(c)
li_fa.append(f(a))
li_fb.append(f(b))
li_fc.append(f(c))
li_Ea.append('%.5f' % Estimated_Error+'%')
print(tabulate(data, headers='keys', tablefmt='fancy_grid', showindex=True))
if __name__ == '__main__':
from tabulate import tabulate
from collections import deque
print('\n The first case👇 \n')
Bisection_Method('(5 * x ** 3) - (5 * x ** 2) + 6 * x - 2', 0, 1, 10/100)
print('\n The second case👇 \n')
Bisection_Method('(5 * x ** 3) - (5 * x ** 2) + 6 * x - 2', 0, 5, 10 / 100)
|
nilq/baby-python
|
python
|
from django.urls import path
app_name = 'profiles'
urlpatterns = []
|
nilq/baby-python
|
python
|
import os
if os.getenv('HEROKU') is not None:
from .prod import *
elif os.getenv('TRAVIS') is not None:
from test import *
else:
from base import *
|
nilq/baby-python
|
python
|
"""
1. Верхняя одежда
1.1. #куртки
1.2. #кофты
1.3. #майки
1.4. #футболки
1.5. #рубашки
1.6. #шапки
1.7. #кепки
2. Нижняя одежда
2.1. #брюки
2.2. #шорты
2.3. #ремни
2.4. #болье
2.5. #носки
3. Костюмы
3.1. #спортивные
3.2. #класические
4. Обувь
4.1. #красовки
4.2. #кеды
4.3. #ботинки
4.4. #туфли
5.Аксесуары
5.1. #рюкзаки
5.2. #сумки
5.3. #очки
5.4. #духи
5.5. #зонты
"""
|
nilq/baby-python
|
python
|
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic system module, executing statements on local node
"""
from subprocess import check_output
from ovs.plugin.provider.configuration import Configuration
class System(object):
"""
Generic helper class
"""
my_machine_id = ''
my_storagerouter_guid = ''
my_storagedriver_id = ''
def __init__(self):
"""
Dummy init method
"""
_ = self
@staticmethod
def get_my_machine_id(client=None):
"""
Returns unique machine id based on mac address
"""
if not System.my_machine_id:
ip_path = Configuration.get('ovs.core.ip.path')
if ip_path is None:
ip_path = "`which ip`"
cmd = """{0} a | grep link/ether | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | sed 's/://g' | sort""".format(ip_path)
if client is None:
output = check_output(cmd, shell=True).strip()
else:
output = client.run(cmd).strip()
for mac in output.split('\n'):
if mac.strip() != '000000000000':
System.my_machine_id = mac.strip()
break
return System.my_machine_id
@staticmethod
def get_my_storagerouter():
"""
Returns unique machine storagerouter id
"""
from ovs.dal.hybrids.storagerouter import StorageRouter
from ovs.dal.lists.storagerouterlist import StorageRouterList
if not System.my_storagerouter_guid:
for storagerouter in StorageRouterList.get_storagerouters():
if storagerouter.machine_id == System.get_my_machine_id():
System.my_storagerouter_guid = storagerouter.guid
return StorageRouter(System.my_storagerouter_guid)
@staticmethod
def get_my_storagedriver_id(vpool_name):
"""
Returns unique machine storagedriver_id based on vpool_name and machineid
"""
return vpool_name + System.get_my_machine_id()
@staticmethod
def update_hosts_file(hostname, ip):
"""
Update/add entry for hostname ip in /etc/hosts
"""
import re
with open('/etc/hosts', 'r') as hosts_file:
contents = hosts_file.read()
if isinstance(hostname, list):
hostnames = ' '.join(hostname)
else:
hostnames = hostname
result = re.search('^{0}\s.*\n'.format(ip), contents, re.MULTILINE)
if result:
contents = contents.replace(result.group(0), '{0} {1}\n'.format(ip, hostnames))
else:
contents += '{0} {1}\n'.format(ip, hostnames)
with open('/etc/hosts', 'wb') as hosts_file:
hosts_file.write(contents)
@staticmethod
def exec_remote_python(client, script):
"""
Executes a python script on a client
"""
return client.run('python -c """{0}"""'.format(script))
@staticmethod
def read_remote_config(client, key):
"""
Reads remote configuration key
"""
read = """
from ovs.plugin.provider.configuration import Configuration
print Configuration.get('{0}')
""".format(key)
return System.exec_remote_python(client, read)
@staticmethod
def ports_in_use(client=None):
"""
Returns the ports in use
"""
cmd = """netstat -ln4 | sed 1,2d | sed 's/\s\s*/ /g' | cut -d ' ' -f 4 | cut -d ':' -f 2"""
if client is None:
output = check_output(cmd, shell=True).strip()
else:
output = client.run(cmd).strip()
for found_port in output.split('\n'):
yield int(found_port.strip())
|
nilq/baby-python
|
python
|
import os
import pandas as pd
os.chdir('/Users/forrestbadgley/Documents/DataScience/git/NUCHI201801DATA4-Class-Repository-DATA/MWS/Homework/03-Python/Instructions/PyPoll/raw_data')
csv_path = "election_data_1.csv"
csv_path2 = "election_data_2.csv"
elect1_df = pd.read_csv(csv_path)
elect2_df = pd.read_csv(csv_path2)
#vertical stack of two dataframes
elect3_df = pd.concat([elect1_df, elect2_df], axis=0)
total_votes_cast = elect3_df['Voter ID'].value_counts(dropna=True)
elect3_df['Candidate']= elect3_df['Candidate']
candidates_list = elect3_df['Candidate'].unique()
elect3_group = elect3_df.groupby(['Candidate']).count()
total_votes_cast2=elect3_group['Voter ID'].sum()
elect3_group['Decimal']=((elect3_group['Voter ID']/total_votes_cast2)*100).round(2)
print("Election Results")
print("-----------------")
print("Total Votes: " + (str(total_votes_cast2)))
print("-----------------")
print(elect3_group.iloc([1]))
|
nilq/baby-python
|
python
|
import re
from pyhanlp import *
# def Tokenizer(sent, stopwords=None):
# pat = re.compile(r'[0-9!"#$%&\'()*+,-./:;<=>?@—,。:★、¥…【】()《》?“”‘’!\[\\\]^_`{|}~\u3000]+')
# tokens = [t.word for t in HanLP.segment(sent)]
# tokens = [re.sub(pat, r'', t).strip() for t in tokens]
# tokens = [t for t in tokens if t != '']
#
# if stopwords is not None:
# tokens = [t for t in tokens if not (t in stopwords)]
# return tokens
def Tokenizer(sent, stopwords=None):
tokens = sent.split()
del tokens[0]
tokens = list(filter(lambda token: token != '', tokens))
#tokens = list(filter(lambda token: len(tokens) > 3, tokens))
if stopwords is not None:
tokens = [t for t in tokens if not (t in stopwords)]
return tokens
# def Tokenizer(sent,stopwords=None):
# # Tokenizer for English.
# pat = re.compile(r'[0-9!"#$%&\'()*+,-./:;<=>?@—,。:★、¥…【】()《》?“”‘’!\[\\\]^_`{|}~\u3000]+')
# tokens = [re.sub(pat,r'',t).strip() for t in sent.split(' ')]
# tokens = [t for t in tokens if t != '']
# from nltk.stem import WordNetLemmatizer
# wnl = WordNetLemmatizer()
# tokens = [wnl.lemmatize(t).lower() for t in tokens]
# if stopwords is not None:
# tokens = [t for t in tokens if not (t in stopwords)]
# return tokens
if __name__ == '__main__':
print(Tokenizer('他拿的是《红楼梦》?!我还以为他是个Foreigner———'))
|
nilq/baby-python
|
python
|
import numpy as np
from seisflows.tools.array import uniquerows
from seisflows.tools.code import Struct
from seisflows.tools.io import BinaryReader, mychar, mysize
from seisflows.seistools.shared import SeisStruct
from seisflows.seistools.segy.headers import \
SEGY_TAPE_LABEL, SEGY_BINARY_HEADER, SEGY_TRACE_HEADER
NMAX = 100000
FIXEDLENGTH = True
SAVEHEADERS = True
COORDSCALAR = 1.
DEPTHSCALAR = 1.
FIELDS = [
'TraceSequenceLine',
'SourceWaterDepth',
'GroupWaterDepth',
'ElevationOrDepthScalar',
'CoordinateScalar',
'SourceX',
'SourceY',
'GroupX',
'GroupY',
'RecordingDelay_ms',
'NumberSamples',
'SampleInterval_ms']
# cull header fields
_tmp = []
for field in SEGY_TRACE_HEADER:
if field[-1] in FIELDS:
_tmp.append(field)
SEGY_TRACE_HEADER = _tmp
class SeismicReader(BinaryReader):
""" Base class used by both SegyReader and SuReader
"""
def ReadSeismicData(self):
nsamples = int(self.read('int16', 1, self.offset + 114)[0])
nbytes = int(nsamples*self.dsize + 240)
ntraces = int((self.size - self.offset)/nbytes)
# prepare offset pointers
if FIXEDLENGTH:
tracelen = [nsamples]*ntraces
traceptr = [nbytes*i + self.offset for i in range(ntraces)]
else:
ntraces = 1
tracelen = []
traceptr = [self.offset]
while 1:
ntraces += 1
nsamples = int(self.read('int16', 1, traceptr[-1] + 114)[0])
nbytes = nsamples*self.dsize + 240
tracelen.append(nsamples)
traceptr.append(traceptr[-1] + nbytes)
if ntraces > NMAX:
raise Exception
elif traceptr[-1] >= self.size:
raise Exception
traceptr = traceptr[:-1]
tracelen = tracelen[:-1]
# preallocate trace headers
if SAVEHEADERS:
h = [self.scan(SEGY_TRACE_HEADER, traceptr[0], contiguous=False)]
h = h*ntraces
else:
h = []
# preallocate data array
if FIXEDLENGTH:
d = np.zeros((nsamples, ntraces))
else:
d = np.zeros((tracelen.max(), len(traceptr)))
# read trace headers and data
for k in range(ntraces):
if SAVEHEADERS:
h[k] = self.scan(SEGY_TRACE_HEADER, traceptr[k],
contiguous=False)
d[:, k] = self.read(self.dtype, nsamples, traceptr[k] + 240)
# store results
self.ntraces = ntraces
self.hdrs = h
self.data = d
def getstruct(self):
nr = self.ntraces
# collect scalars
nt = self.getscalar('NumberSamples')
ts = self.getscalar('RecordingDelay_ms')
dt = self.getscalar('SampleInterval_ms')
# collect arrays
sx = self.getarray('SourceX')
sy = self.getarray('SourceY')
sz = self.getarray('SourceWaterDepth')
rx = self.getarray('GroupX')
ry = self.getarray('GroupY')
rz = self.getarray('GroupWaterDepth')
# apply scaling factors
if COORDSCALAR and DEPTHSCALAR:
c1 = COORDSCALAR
c2 = DEPTHSCALAR
c3 = 1.e-6
else:
c1 = self.getscalar('CoordinateScalar')
c2 = self.getscalar('ElevationOrDepthScalar')
c3 = 1.e-6
sxyz = np.column_stack([sx, sy, sz])
rxyz = np.column_stack([rx, ry, rz])
nsrc = len(uniquerows(sxyz))
nrec = len(uniquerows(rxyz))
return SeisStruct(nr, nt, dt, ts,
c1*sx, c1*sy, c2*sz,
c1*rx, c1*ry, c2*rz,
nsrc, nrec)
def getarray(self, key):
# collect array
list = [hdr[key] for hdr in self.hdrs]
return np.array(list)
def getscalar(self, key):
# collect scalar
array = self.getarray(key)
return array[0]
class SegyReader(SeismicReader):
""" SEGY reader
"""
def __init__(self, fname, endian=None):
SeismicReader.__init__(self, fname, endian)
self.dtype = 'float'
self.dsize = mysize(self.dtype)
self.offset = 0
# check byte order
if endian:
self.endian = endian
else:
raise ValueError("SU Reader should specify the endianness")
def ReadSegyHeaders(self):
# read in tape label header if present
code = self.read('char', 2, 4)
if code == 'SY':
tapelabel = file.scan(SEGY_TAPE_LABEL, self.offset)
self.offset += 128
else:
tapelabel = 'none'
# read textual file header
self.segyTxtHeader = self.read('char', 3200, self.offset)
self.offset += 3200
# read binary file header
self.segyBinHeader = self.scan(SEGY_BINARY_HEADER, self.offset)
self.offset += 400
# read in extended textual headers if present
self.CheckSegyHeaders()
def CheckSegyHeaders(self):
# check revision number
self.segyvers = '1.0'
# check format code
self.segycode = 5
# check trace length
if FIXEDLENGTH:
assert bool(self.segyBinHeader.FixedLengthTraceFlag) == bool(
FIXEDLENGTH)
class SuReader(SeismicReader):
""" Seismic Unix file reader
"""
def __init__(self, fname, endian=None):
SeismicReader.__init__(self, fname, endian)
self.dtype = 'float'
self.dsize = mysize(self.dtype)
self.offset = 0
# check byte order
if endian:
self.endian = endian
else:
raise ValueError("SU Reader should specify the endianness")
def readsegy(filename):
""" SEGY convenience function
"""
obj = SegyReader(filename, endian='>')
obj.ReadSegyHeaders()
obj.ReadSeismicData()
d = obj.data
h = obj.getstruct()
return d, h
def readsu(filename):
""" SU convenience function
"""
obj = SuReader(filename, endian='<')
obj.ReadSeismicData()
d = obj.data
h = obj.getstruct()
return d, h
|
nilq/baby-python
|
python
|
import sys
import torch
import numpy as np
sys.path.append('../')
from models import networks
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model-in-file',help='file path to generator model to export (.pth file)',required=True)
parser.add_argument('--model-out-file',help='file path to exported model (.pt file)')
parser.add_argument('--model-type',default='mobile_resnet_9blocks',help='model type, e.g. mobile_resnet_9blocks')
parser.add_argument('--img-size',default=256,type=int,help='square image size')
parser.add_argument('--cpu',action='store_true',help='whether to export for CPU')
parser.add_argument('--bw',action='store_true',help='whether input/output is bw')
args = parser.parse_args()
if not args.model_out_file:
model_out_file = args.model_in_file.replace('.pth','.pt')
else:
model_out_file = args.model_out_file
if args.bw:
input_nc = output_nc = 1
else:
input_nc = output_nc = 3
ngf = 64
use_dropout = False
decoder = True
img_size = args.img_size
model = networks.define_G(input_nc,output_nc,ngf,args.model_type,'instance',use_dropout,
decoder=decoder,
img_size=args.img_size,
img_size_dec=args.img_size)
if not args.cpu:
model = model.cuda()
model.eval()
model.load_state_dict(torch.load(args.model_in_file))
if args.cpu:
device = 'cpu'
else:
device = 'cuda'
dummy_input = torch.randn(1, input_nc, args.img_size, args.img_size, device=device)
jit_model = torch.jit.trace(model, dummy_input)
jit_model.save(model_out_file)
|
nilq/baby-python
|
python
|
def parse_line(line, extraction_map):
print("---------parsing line------")
key = get_key_for_line(line)
extraction_guide = extraction_map[key]
obj = get_blank_line_object()
flag = special_line_case(key)
answer_flag = special_answer_case(key)
if (flag == True):
line = escape_underscore(key, line)
if (answer_flag == True or key == "answerQuestion.userClick.NA"):
line = escape_parenth(line)
if (answer_flag == True):
semi_final_line = replace_all_delimeters_with_commas_after_field_6_and_answer_field(key, line)
else:
semi_final_line = replace_all_delimeters_with_commas_after_field_6(line)
# get rid of ignored data at end of line so can compare field counts.
almost_final_line = semi_final_line.replace(",false,false,false,false,false,false\n", "")
final_line = almost_final_line.replace(",false,false,false,false,false,false", "")
guide_parts = extraction_guide.split(",")
final_line_parts = final_line.split(",")
if (len(guide_parts) != len(final_line_parts)):
print("ERROR - guide field count {} line field count {}".format(len(guide_parts),len(final_line_parts)))
print("original line : {}".format(line))
print("all commas line : {}".format(final_line))
print("extraction guide : {}".format(extraction_guide))
raise SystemExit
field_count = len(guide_parts)
for i in range(field_count):
col_name = guide_parts[i]
if ("NOTE_PRESENCE" in col_name):
col_name_parts = col_name.split(">")
true_col_name = col_name_parts[1]
obj[true_col_name] = "yes"
elif (col_name == "OMIT"):
# skip this one
continue
else:
unescaped_value = unescape_all(final_line_parts[i])
print("colname {} gets val {}".format(col_name, unescaped_value))
obj[col_name] = unescaped_value
return obj
def replace_all_delimeters_with_commas_after_field_6(line):
fields = line.split(",")
# go through each field, after the first 6
new_string = ""
for i in range(len(fields)):
if (i == 0):
new_string = "{}".format(fields[i])
elif (i < 6):
# copy without changing
new_string = "{},{}".format(new_string, fields[i])
else:
# replace delims
new_string = "{},{}".format(new_string, replace_all_delimeters_with_commas(fields[i]))
return new_string
def replace_all_delimeters_with_commas(line):
no_under_and_left_parens = line.replace("_(", ",")
no_colons = no_under_and_left_parens.replace(":",",")
no_semicolons = no_colons.replace(";", ",")
no_underscores = no_semicolons.replace("_",",")
no_left_parens = no_underscores.replace("(",",")
no_right_parens = no_left_parens.replace(")","")
return no_right_parens
def get_key_for_line(line):
key = "UNKNOWN"
fields = line.split(',')
print("{}".format(line))
if ("userClick" in line):
key = get_key_for_user_click_line(line)
elif ("startMouseOverSaliencyMap" in line):
key = "startMouseOverSaliencyMap"
elif ("endMouseOverSaliencyMap" in line):
key = "endMouseOverSaliencyMap"
elif ("waitForResearcherStart" in line):
key = "waitForResearcherStart"
elif ("waitForResearcherEnd" in line):
key = "waitForResearcherEnd"
else:
# uses primary discriminator as key
field = fields[6]
subfields = field.split(';')
subfield0 = subfields[0]
subsubfields = subfield0.split(':')
key = subsubfields[0]
return key
def get_key_for_user_click_line(line):
key = "UNKNOWN"
if ("answerQuestion" in line):
#need to look into the saved off click
if ("(NA)" in line):
key = "answerQuestion.userClick.NA"
elif ("clickEntity" in line):
key = "answerQuestion.userClick.clickEntity"
elif ("selectedRewardBar" in line):
key = "answerQuestion.userClick.selectedRewardBar"
elif ("clickSaliencyMap" in line):
key = "answerQuestion.userClick.clickSaliencyMap"
else:
# use secondary discriminator as key
fields = line.split(',')
field = fields[6]
subfields = field.split(';')
subfield3 = subfields[3]
subsubfields = subfield3.split(':')
key = subsubfields[0]
if (key == "NA"):
key = "userClick"
return key
def special_line_case(key):
if (key == "clickSaliencyMap" or key == "startMouseOverSaliencyMap" or key == "endMouseOverSaliencyMap"):
return True
else:
return False
def special_answer_case(key):
if (key == "answerQuestion.userClick.clickEntity" or key == "answerQuestion.userClick.selectedRewardBar" or key == "answerQuestion.userClick.clickSaliencyMap"):
return True
else:
return False
def unescape_all(s):
#with_comma = with_underscore.replace("ESCAPED-COMMA", ",")
#with_newline = with_comma.replace("ESCAPED-NEWLINE", "\n")
with_underscore = s.replace("ESCAPED-UNDERSCORE", "_")
with_colon = with_underscore.replace("ESCAPED-COLON", ":")
with_semicolon = with_colon.replace("ESCAPED-SEMICOLON", ";")
with_left_parenth = with_semicolon.replace("ESCAPED-LEFT-PARENTH", "(")
with_right_parenth = with_left_parenth.replace("ESCAPED-RIGHT-PARENTH", ")")
return with_right_parenth
def escape_underscore(key, line):
if (key == "clickSaliencyMap"):
fields = line.split(',')
field = fields[6]
subfields = field.split(';')
subfield2 = subfields[2]
subsubfields = subfield2.split(':')
target_replace = subsubfields[1]
new_target_replace = target_replace.replace("_", "ESCAPED-UNDERSCORE")
subsubfields[1] = new_target_replace
new_subsubfields = ':'.join([str(i) for i in subsubfields])
subfields[2] = new_subsubfields
new_subfields = ';'.join([str(j) for j in subfields])
fields[6] = new_subfields
new_line = ','.join([str(k) for k in fields])
return new_line
else:
new_line = line.replace("_", "ESCAPED-UNDERSCORE")
return new_line
def escape_parenth (line):
fields = line.split(",")
field = fields[6]
subfields = field.split(';')
subfield3 = subfields[3]
subsubfields = subfield3.split(':')
answer_fields = subsubfields[1]
answer_subfields = answer_fields.split('_')
answer_one = answer_subfields[1]
answer_two = answer_subfields[2]
new_answer_one = answer_one.replace("(", "ESCAPED-LEFT-PARENTH")
new_answer_two = answer_two.replace("(", "ESCAPED-LEFT-PARENTH")
new_new_answer_one = new_answer_one.replace(")", "ESCAPED-RIGHT-PARENTH")
new_new_answer_two = new_answer_two.replace(")", "ESCAPED-RIGHT-PARENTH")
answer_subfields[1] = new_new_answer_one
answer_subfields[2] = new_new_answer_two
new_answer_fields = '_'.join([str(h) for h in answer_subfields])
subsubfields[1] = new_answer_fields
new_subfield3 = ':'.join([str(i) for i in subsubfields])
subfields[3] = new_subfield3
new_field = ';'.join([str(j) for j in subfields])
fields[6] = new_field
new_line = ','.join([str(k) for k in fields])
return new_line
def replace_all_delimeters_with_commas_after_field_6_and_answer_field(key, line):
entries = line.split('_(', 1)
start_of_click_answer_entry = entries[1]
find_end_of_click_answer = start_of_click_answer_entry.split(')')
answer_entry = find_end_of_click_answer[0]
button_save_info = entries[0]
if (key == "answerQuestion.userClick.clickSaliencyMap"):
answer_entry = escape_underscore("clickSaliencyMap", answer_entry)
new_string = replace_all_delimeters_with_commas_after_field_6(button_save_info)
new_answer_string = replace_all_delimeters_with_commas_after_field_6(answer_entry)
new_new_string = new_string + ',' + new_answer_string
return new_new_string
def get_blank_line_object():
obj = {}
obj["fileName"] = "NA"
obj["date"] = "NA"
obj["time"] = "NA"
obj["1970Sec"] = "NA"
obj["decisionPoint"] = "NA"
obj["questionId"] = "NA"
obj["stepIntoDecisionPoint"] = "NA"
obj["showQuestion"] = "NA"
obj["hideEntityTooltips"] = "NA"
obj["showEntityTooltip.entityInfo"] = "NA"
obj["showEntityTooltip.tipQuadrant"] = "NA"
obj["startMouseOverSaliencyMap"] = "NA"
obj["endMouseOverSaliencyMap"] = "NA"
obj["waitForResearcherStart"] = "NA"
obj["waitForResearcherEnd"] = "NA"
obj["userClick"] = "NA"
obj["userClick.coordX"] = "NA"
obj["userClick.coordY"] = "NA"
obj["userClick.region"] = "NA"
obj["userClick.target"] = "NA"
obj["userClick.answerQuestion.clickStep"] = "NA"
obj["userClick.answerQuestion.questionId"] = "NA"
obj["userClick.answerQuestion.answer1"] = "NA"
obj["userClick.answerQuestion.answer2"] = "NA"
obj["userClick.answerQuestion.userClick"] = "NA"
obj["userClick.answerQuestion.userClick.fileName"] = "NA"
obj["userClick.answerQuestion.userClick.date"] = "NA"
obj["userClick.answerQuestion.userClick.time"] = "NA"
obj["userClick.answerQuestion.userClick.1970Sec"] = "NA"
obj["userClick.answerQuestion.userClick.decisionPoint"] = "NA"
obj["userClick.answerQuestion.userClick.questionId"] = "NA"
obj["userClick.answerQuestion.userClick.coordX"] = "NA"
obj["userClick.answerQuestion.userClick.coordY"] = "NA"
obj["userClick.answerQuestion.userClick.region"] = "NA"
obj["userClick.answerQuestion.userClick.target"] = "NA"
obj["userClick.answerQuestion.userClick.clickEntity.clickGameEntity"] = "NA"
obj["userClick.answerQuestion.userClick.clickEntity.clickQuadrant"] = "NA"
obj["userClick.answerQuestion.userClick.clickEntity.coordX"] = "NA"
obj["userClick.answerQuestion.userClick.clickEntity.coordY"] = "NA"
obj["userClick.answerQuestion.userClick.selectedRewardBar"] = "NA"
obj["userClick.answerQuestion.userClick.clickSaliencyMap"] = "NA"
obj["userClick.answerQuestion.userClick.clickSaliencyMap.clickGameEntity"] = "NA"
obj["userClick.answerQuestion.userClick.clickSaliencyMap.clickQuadrant"] = "NA"
obj["userClick.timelineClick"] = "NA"
obj["userClick.jumpToDecisionPoint"] = "NA"
obj["userClick.clickTimeLineBlocker"] = "NA"
obj["userClick.play"] = "NA"
obj["userClick.pause"] = "NA"
obj["userClick.touchStepProgressLabel"] = "NA"
obj["userClick.clickGameQuadrant"] = "NA"
obj["userClick.clickEntity.clickGameEntity"] = "NA"
obj["userClick.clickEntity.clickQuadrant"] = "NA"
obj["userClick.clickEntity.coordX"] = "NA"
obj["userClick.clickEntity.coordY"] = "NA"
obj["userClick.clickActionLabel"] = "NA"
obj["userClick.clickActionLabelDenied"] = "NA"
obj["userClick.selectedRewardBar"] = "NA"
obj["userClick.clickSaliencyMap"] = "NA"
obj["userClick.clickSaliencyMap.clickGameEntity"] = "NA"
obj["userClick.clickSaliencyMap.clickQuadrant"] = "NA"
obj["userClick.touchCumRewardLabel"] = "NA"
obj["userClick.touchCumRewardValueFor"] = "NA"
return obj
|
nilq/baby-python
|
python
|
from matplotlib.offsetbox import AnchoredText
import numpy as np
import matplotlib.pyplot as plt
from iminuit import Minuit, describe
from iminuit.util import make_func_code
class Chi2Reg: # This class is like Chi2Regression but takes into account dx
# this part defines the variables the class will use
def __init__(self, model, x, y, dx, dy):
self.model = model # model predicts y value for given x value
self.x = np.array(x) # the x values
self.y = np.array(y) # the y values
self.dx = np.array(dx) # the x-axis uncertainties
self.dy = np.array(dy) # the y-axis uncertainties
self.func_code = make_func_code(describe(self.model)[1:])
# this part defines the calculations when the function is called
def __call__(self, *par): # par are a variable number of model parameters
self.ym = self.model(self.x, *par)
chi2 = sum(((self.y - self.ym) ** 2) / (self.dy ** 2)) # chi2 is now Sum of: f(x)-y)^2/(uncert_y^2)
return chi2
# this part defines a function called "show" which will make a nice plot when invoked
def show(self, optimizer, x_title="X", y_title="Y", goodness_loc=2):
self.par = optimizer.parameters
self.fit_arg = optimizer.fitarg
self.chi2 = optimizer.fval
self.ndof = len(self.x) - len(self.par)
self.chi_ndof = self.chi2 / self.ndof
self.par_values = []
self.par_error = []
text = ""
for _ in (self.par):
self.par_values.append(self.fit_arg[_])
self.par_error.append(self.fit_arg["error_" + _])
text += "%s = %0.4f \u00B1 %0.4f \n" % (_, self.fit_arg[_], self.fit_arg["error_" + _])
text = text + "\u03C7\u00B2 /ndof = %0.4f(%0.4f/%d)" % (self.chi_ndof, self.chi2, self.ndof)
self.func_x = np.linspace(self.x[0], self.x[-1], 10000) # 10000 linearly spaced numbers
self.y_fit = self.model(self.func_x, *self.par_values)
plt.rc("font", size=16, family="Times New Roman")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(self.func_x, self.y_fit) # plot the function over 10k points covering the x axis
ax.scatter(self.x, self.y, c="red")
# ax.errorbar(self.x, self.y, self.dy, self.dy,fmt='none',ecolor='red', capsize=3) typo here I think! dy twice instead of dy, dx
ax.errorbar(self.x, self.y, self.dy, self.dx, fmt='none', ecolor='red', capsize=3)
ax.set_xlabel(x_title, fontdict={"size": 21})
ax.set_ylabel(y_title, fontdict={"size": 21})
anchored_text = AnchoredText(text, loc=goodness_loc)
ax.add_artist(anchored_text)
plt.grid(True)
class EffVarChi2Reg: # This class is like Chi2Regression but takes into account dx
# this part defines the variables the class will use
def __init__(self, model, x, y, dx, dy):
self.model = model # model predicts y value for given x value
self.x = np.array(x) # the x values
self.y = np.array(y) # the y values
self.dx = np.array(dx) # the x-axis uncertainties
self.dy = np.array(dy) # the y-axis uncertainties
self.func_code = make_func_code(describe(self.model)[1:])
self.h = (x[-1] - x[
0]) / 10000 # this is the step size for the numerical calculation of the df/dx = last value in x (x[-1]) - first value in x (x[0])/10000
# this part defines the calculations when the function is called
def __call__(self, *par): # par are a variable number of model parameters
self.ym = self.model(self.x, *par)
df = (self.model(self.x + self.h, *par) - self.ym) / self.h # the derivative df/dx at point x is taken as [f(x+h)-f(x)]/h
chi2 = sum(((self.y - self.ym) ** 2) / (self.dy ** 2 + (df * self.dx) ** 2)) # chi2 is now Sum of: f(x)-y)^2/(uncert_y^2+(df/dx*uncert_x)^2)
return chi2
# this part defines a function called "show" which will make a nice plot when invoked
def show(self, optimizer, x_title="X", y_title="Y", goodness_loc=2):
self.par = optimizer.parameters
self.fit_arg = optimizer.fitarg
self.chi2 = optimizer.fval
self.ndof = len(self.x) - len(self.par)
self.chi_ndof = self.chi2 / self.ndof
self.par_values = []
self.par_error = []
text = ""
for _ in (self.par):
self.par_values.append(self.fit_arg[_])
self.par_error.append(self.fit_arg["error_" + _])
text += "%s = %0.4f \u00B1 %0.4f \n" % (_, self.fit_arg[_], self.fit_arg["error_" + _])
text = text + "\u03C7\u00B2 /ndof = %0.4f(%0.4f/%d)" % (self.chi_ndof, self.chi2, self.ndof)
self.func_x = np.linspace(self.x[0], self.x[-1], 10000) # 10000 linearly spaced numbers
self.y_fit = self.model(self.func_x, *self.par_values)
plt.rc("font", size=16, family="Times New Roman")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(self.func_x, self.y_fit) # plot the function over 10k points covering the x axis
ax.scatter(self.x, self.y, c="red")
# ax.errorbar(self.x, self.y, self.dy, self.dy,fmt='none',ecolor='red', capsize=3) typo here I think! dy twice instead of dy, dx
ax.errorbar(self.x, self.y, self.dy, self.dx, fmt='none', ecolor='red', capsize=3)
ax.set_xlabel(x_title, fontdict={"size": 21})
ax.set_ylabel(y_title, fontdict={"size": 21})
anchored_text = AnchoredText(text, loc=goodness_loc)
ax.add_artist(anchored_text)
plt.grid(True)
if __name__ == "__main__":
np.random.seed(42)
X = np.linspace(1,6,5)
dX = 0.1 * np.ones(len(X))
y = 2*X + np.random.randn(len(X))
dy = abs(np.random.randn(len(X)))
fun = lambda X,a,b: a*X + b
reg = Chi2Reg(fun,X,y,dX,dy)
opt = Minuit(reg)
opt.migrad()
reg.show(opt)
plt.show()
|
nilq/baby-python
|
python
|
import socket
HOST, PORT = "localhost", 9999
msg = b'\x16\x04\x04\x01\xfd 94193A04010020B8'
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("localhost", 33000))
s.sendto(msg, (HOST, PORT))
|
nilq/baby-python
|
python
|
from ..model_tests_utils import (
status_codes,
DELETE,
PUT,
POST,
GET,
ERROR,
random_model_dict,
check_status_code,
compare_data
)
from core.models import (
Inventory,
Actor,
Status
)
inventory_test_data = {}
inventory_tests = [
##----TEST 0----##
# creates 6 actors
# creates 2 statuses
# creates an inventory with 3 of the actors and a status
# gets it
# updates inventory with 3 other actors and the other status
# gets it
# deletes it
# gets it (should error)
[
*[{
'name': name,
'method': POST,
'endpoint': 'actor-list',
'body': random_model_dict(Actor),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST,
}
}
} for name in ['owner0','operator0','lab0','owner1','operator1','lab1']
],
*[{
'name': name,
'method': POST,
'endpoint': 'status-list',
'body': random_model_dict(Status),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST,
}
}
} for name in ['status0','status1']
],
{
'name': 'inventory',
'method': POST,
'endpoint': 'inventory-list',
'body': (request_body := random_model_dict(Inventory,
owner='owner0__url',
operator='operator0__url',
lab='lab0__url',
status='status0__url')),
'args': [],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': POST,
'request_body': request_body,
}
}
},
{
'name': 'inventory_get',
'method': GET,
'endpoint': 'inventory-detail',
'body': {},
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET,
}
}
},
{
'name': 'inventory_update',
'method': PUT,
'endpoint': 'inventory-detail',
'body': (request_body := random_model_dict(Inventory,
owner='owner1__url',
operator='operator1__url',
lab='lab1__url',
status='status1__url')),
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': PUT,
'request_body': request_body
}
}
},
{
'name': 'inventory_update_get',
'method': GET,
'endpoint': 'inventory-detail',
'body': {},
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET,
}
}
},
{
'name': 'inventory_update_del',
'method': DELETE,
'endpoint': 'inventory-detail',
'body': {},
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': DELETE,
}
}
},
{
'name': 'inventory_update_del_get',
'method': GET,
'endpoint': 'inventory-detail',
'body': {},
'args': [
'inventory__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': ERROR,
}
}
},
],
]
|
nilq/baby-python
|
python
|
from utils import utils
day = 18
tD = """
2 * 3 + (4 * 5)
5 + (8 * 3 + 9 + 3 * 4 * 3)
5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))
((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2
"""
tA1 = 26 + 437 + 12240 + 13632
tA2 = 46 + 1445 + 669060 + 23340
class Calculator:
def __init__(self, pattern):
self.pattern = pattern
def findFirstBrackets(self, line):
# Find those brackets
bracketStack = list()
for i in range(len(line)):
c = line[i]
if c == "(":
bracketStack.append(i)
elif c == ")":
j = bracketStack.pop()
return (j, i)
return None
def findSum(self, line, charSet):
found = False
startingPointer = 0
for i in range(len(line)):
c = line[i]
# Is this the end?
if not c.isdigit() and found:
return (startingPointer, i)
# Is this the start?
elif c in charSet and not found:
found = True
elif not c.isdigit():
startingPointer = i + 1
# if we found a digit, but reached the end, we still have maths to do
return (startingPointer, len(line)) if found else None
def solve(self, line, charset):
sumRange = self.findSum(line, charset)
while sumRange != None:
result = eval(line[sumRange[0]:sumRange[1]])
line = str(result).join([line[:sumRange[0]], line[sumRange[1]:]])
sumRange = self.findSum(line, charset)
return line
def calculate(self, line, charset):
line = line.strip().replace(" ", "")
brackets = self.findFirstBrackets(line)
while brackets != None:
partialLine = line[brackets[0]+1:brackets[1]]
partial = self.calculateLine(partialLine)
line = str(partial).join([line[:brackets[0]], line[brackets[1]+1:]])
brackets = self.findFirstBrackets(line)
return self.solve(line, charset)
def calculateLine(self, line):
for charset in self.pattern:
line = self.calculate(line, charset)
return int(line)
def sumData(self, data):
return sum(self.calculateLine(l) for l in data)
def test():
assert Calculator(["+*"]).sumData(utils.load_test_data(tD)) == tA1
assert Calculator(["+", "*"]).sumData(utils.load_test_data(tD)) == tA2
return "Pass!"
if __name__ == "__main__":
def process_data(d): return d
def partOne(d): return Calculator(["+*"]).sumData(d)
def partTwo(d): return Calculator(["+", "*"]).sumData(d)
utils.run(day, process_data, test, partOne, partTwo)
|
nilq/baby-python
|
python
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# 核心思路
# 这题是 add_two_numbers_II_q445.py 的特例版本,做法更简单
# 另,这题没说不能修改原链表,所以可以先reverse,变成低位在前
# 处理之后再 reverse 回去
class Solution(object):
def plusOne(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
dummy.next = head
pnc = p = dummy
while p.next:
if p.val != 9:
pnc = p
p = p.next
val = p.val + 1
if val > 9:
p.val = 0
pnc.val += 1
while pnc.next != p:
pnc.next.val = 0
pnc = pnc.next
else:
p.val = val
return dummy.next if dummy.val == 0 else dummy
|
nilq/baby-python
|
python
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete a job store used by a previous Toil workflow invocation."""
import logging
from toil.common import Toil, parser_with_common_options
from toil.jobStores.abstractJobStore import NoSuchJobStoreException
from toil.statsAndLogging import set_logging_from_options
logger = logging.getLogger(__name__)
def main():
parser = parser_with_common_options(jobstore_option=True)
options = parser.parse_args()
set_logging_from_options(options)
try:
jobstore = Toil.getJobStore(options.jobStore)
jobstore.resume()
jobstore.destroy()
logger.info(f"Successfully deleted the job store: {options.jobStore}")
except NoSuchJobStoreException:
logger.info(f"Failed to delete the job store: {options.jobStore} is non-existent.")
except:
logger.info(f"Failed to delete the job store: {options.jobStore}")
raise
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import time
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.clip import GradientClipByGlobalNorm
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from seq2seq_dygraph_model import BaseModel, AttentionModel
from seq2seq_utils import Seq2SeqModelHyperParams
from seq2seq_utils import get_data_iter
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace(
)
program_translator = ProgramTranslator()
STEP_NUM = 10
PRINT_STEP = 2
def prepare_input(batch):
src_ids, src_mask, tar_ids, tar_mask = batch
src_ids = src_ids.reshape((src_ids.shape[0], src_ids.shape[1]))
in_tar = tar_ids[:, :-1]
label_tar = tar_ids[:, 1:]
in_tar = in_tar.reshape((in_tar.shape[0], in_tar.shape[1]))
label_tar = label_tar.reshape((label_tar.shape[0], label_tar.shape[1], 1))
inputs = [src_ids, in_tar, label_tar, src_mask, tar_mask]
return inputs, np.sum(tar_mask)
def train(args, attn_model=False):
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = 2020
fluid.default_main_program().random_seed = 2020
if attn_model:
model = AttentionModel(
args.hidden_size,
args.src_vocab_size,
args.tar_vocab_size,
args.batch_size,
num_layers=args.num_layers,
init_scale=args.init_scale,
dropout=args.dropout)
else:
model = BaseModel(
args.hidden_size,
args.src_vocab_size,
args.tar_vocab_size,
args.batch_size,
num_layers=args.num_layers,
init_scale=args.init_scale,
dropout=args.dropout)
gloabl_norm_clip = GradientClipByGlobalNorm(args.max_grad_norm)
optimizer = fluid.optimizer.SGD(args.learning_rate,
parameter_list=model.parameters(),
grad_clip=gloabl_norm_clip)
model.train()
train_data_iter = get_data_iter(args.batch_size)
batch_times = []
for batch_id, batch in enumerate(train_data_iter):
total_loss = 0
word_count = 0.0
batch_start_time = time.time()
input_data_feed, word_num = prepare_input(batch)
input_data_feed = [
fluid.dygraph.to_variable(np_inp) for np_inp in input_data_feed
]
word_count += word_num
loss = model(input_data_feed)
loss.backward()
optimizer.minimize(loss)
model.clear_gradients()
total_loss += loss * args.batch_size
batch_end_time = time.time()
batch_time = batch_end_time - batch_start_time
batch_times.append(batch_time)
if batch_id % PRINT_STEP == 0:
print(
"Batch:[%d]; Time: %.5f s; loss: %.5f; total_loss: %.5f; word num: %.5f; ppl: %.5f"
% (batch_id, batch_time, loss.numpy(), total_loss.numpy(),
word_count, np.exp(total_loss.numpy() / word_count)))
if attn_model:
# NOTE: Please see code of AttentionModel.
# Because diff exits if call while_loop in static graph, only run 4 batches to pass the test temporarily.
if batch_id + 1 >= 4:
break
else:
if batch_id + 1 >= STEP_NUM:
break
model_path = args.attn_model_path if attn_model else args.base_model_path
model_dir = os.path.join(model_path)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
fluid.save_dygraph(model.state_dict(), model_dir)
return loss.numpy()
def infer(args, attn_model=False):
with fluid.dygraph.guard(place):
if attn_model:
model = AttentionModel(
args.hidden_size,
args.src_vocab_size,
args.tar_vocab_size,
args.batch_size,
beam_size=args.beam_size,
num_layers=args.num_layers,
init_scale=args.init_scale,
dropout=0.0,
mode='beam_search')
else:
model = BaseModel(
args.hidden_size,
args.src_vocab_size,
args.tar_vocab_size,
args.batch_size,
beam_size=args.beam_size,
num_layers=args.num_layers,
init_scale=args.init_scale,
dropout=0.0,
mode='beam_search')
model_path = args.attn_model_path if attn_model else args.base_model_path
state_dict, _ = fluid.dygraph.load_dygraph(model_path)
model.set_dict(state_dict)
model.eval()
train_data_iter = get_data_iter(args.batch_size, mode='infer')
for batch_id, batch in enumerate(train_data_iter):
input_data_feed, word_num = prepare_input(batch)
input_data_feed = [
fluid.dygraph.to_variable(np_inp) for np_inp in input_data_feed
]
outputs = model.beam_search(input_data_feed)
break
return outputs.numpy()
class TestSeq2seq(unittest.TestCase):
def setUp(self):
self.args = Seq2SeqModelHyperParams
self.temp_dir = tempfile.TemporaryDirectory()
self.args.base_model_path = os.path.join(self.temp_dir.name,
self.args.base_model_path)
self.args.attn_model_path = os.path.join(self.temp_dir.name,
self.args.attn_model_path)
self.args.reload_model = os.path.join(self.temp_dir.name,
self.args.reload_model)
def tearDown(self):
self.temp_dir.cleanup()
def run_dygraph(self, mode="train", attn_model=False):
program_translator.enable(False)
if mode == "train":
return train(self.args, attn_model)
else:
return infer(self.args, attn_model)
def run_static(self, mode="train", attn_model=False):
program_translator.enable(True)
if mode == "train":
return train(self.args, attn_model)
else:
return infer(self.args, attn_model)
def _test_train(self, attn_model=False):
dygraph_loss = self.run_dygraph(mode="train", attn_model=attn_model)
static_loss = self.run_static(mode="train", attn_model=attn_model)
result = np.allclose(dygraph_loss, static_loss)
self.assertTrue(
result,
msg="\ndygraph_loss = {} \nstatic_loss = {}".format(dygraph_loss,
static_loss))
def _test_predict(self, attn_model=False):
pred_dygraph = self.run_dygraph(mode="test", attn_model=attn_model)
pred_static = self.run_static(mode="test", attn_model=attn_model)
result = np.allclose(pred_static, pred_dygraph)
self.assertTrue(
result,
msg="\npred_dygraph = {} \npred_static = {}".format(pred_dygraph,
pred_static))
def test_base_model(self):
self._test_train(attn_model=False)
self._test_predict(attn_model=False)
def test_attn_model(self):
self._test_train(attn_model=True)
# TODO(liym27): add predict
# self._test_predict(attn_model=True)
if __name__ == '__main__':
# switch into new eager mode
with fluid.framework._test_eager_guard():
unittest.main()
|
nilq/baby-python
|
python
|
#! /bin/false
import weblogic
import javax.xml
import java.io.FileInputStream as fis
import java.io.FileOutputStream as fos
import os
import shutil
import java.io.BufferedReader as BR
import java.lang.System.in as Sin
import java.io.InputStreamReader as isr
import java.lang.System.out.print as jprint
import weblogic.security
#Standards are defined here
class ConfigStore:
def __init__(self, fileLocation):
factory=javax.xml.parsers.DocumentBuilderFactory.newInstance()
builder=factory.newDocumentBuilder()
input=fis(fileLocation)
self.document=builder.parse(input)
self.DOM=self.document.getDocumentElement()
def write(self, newFileLocation):
xmlFrom=javax.xml.transform.dom.DOMSource(self.document)
xmlTo=javax.xml.transform.stream.StreamResult(fos(newFileLocation))
Transformer=javax.xml.transform.TransformerFactory.newInstance().newTransformer()
Transformer.transform(xmlFrom, xmlTo)
configxml=ConfigStore("/home/andresaquino/Downloads/config/config.xml")
es=weblogic.security.internal.SerializedSystemIni.getEncryptionService("/home/andresaquino/Downloads/security")
ces=weblogic.security.internal.encryption.ClearOrEncryptedService(es)
numServers=configxml.DOM.getElementsByTagName("server").getLength()
domainName=configxml.DOM.getAttribute("name")
print "The domain found: %s has %s servers." % (domainName, numServers)
print '## Servers'
for i in range(configxml.DOM.getElementsByTagName("server").getLength()):
serverNode=configxml.DOM.getElementsByTagName("server").item(i)
name=serverNode.getAttribute("name")
print 'Server: ' + name
print '## Decrypt the JDBC passwords'
for j in range(configxml.DOM.getElementsByTagName("JDBCConnectionPool").getLength()):
poolNode=configxml.DOM.getElementsByTagName("JDBCConnectionPool").item(j)
print 'Name: ' + poolNode.getAttribute("Name")
print '\tURL: ' + poolNode.getAttribute("URL")
print '\tDriverName: ' + poolNode.getAttribute("DriverName")
print '\tUser: ' + poolNode.getAttribute("Properties")
print '\tPassword: ' + ces.decrypt(poolNode.getAttribute("PasswordEncrypted"))
print '\tTargets: ' + poolNode.getAttribute("Targets")
print '## Decrypt the EmbeddedLDAP'
for j in range(configxml.DOM.getElementsByTagName("EmbeddedLDAP").getLength()):
poolNode=configxml.DOM.getElementsByTagName("EmbeddedLDAP").item(j)
print 'Name: ' + poolNode.getAttribute("Name")
print '\tCredential: ' + ces.decrypt(poolNode.getAttribute("CredentialEncrypted"))
print '## Decrypt the Security Configuration'
for j in range(configxml.DOM.getElementsByTagName("SecurityConfiguration").getLength()):
poolNode=configxml.DOM.getElementsByTagName("SecurityConfiguration").item(j)
print 'Name: ' + poolNode.getAttribute("Name")
print '\tCredential: ' + ces.decrypt(poolNode.getAttribute("CredentialEncrypted"))
print '## Decrypt the ServerStart'
for j in range(configxml.DOM.getElementsByTagName("ServerStart").getLength()):
poolNode=configxml.DOM.getElementsByTagName("ServerStart").item(j)
print 'Name: ' + poolNode.getAttribute("Name")
print '\tUserName: ' + poolNode.getAttribute("Username")
print '\tPassword: ' + ces.decrypt(poolNode.getAttribute("PasswordEncrypted"))
|
nilq/baby-python
|
python
|
from niaaml.classifiers.classifier import Classifier
from niaaml.utilities import MinMax
from niaaml.utilities import ParameterDefinition
from sklearn.tree import DecisionTreeClassifier as DTC
import numpy as np
import warnings
from sklearn.exceptions import ChangedBehaviorWarning, ConvergenceWarning, DataConversionWarning, DataDimensionalityWarning, EfficiencyWarning, FitFailedWarning, NonBLASDotWarning, UndefinedMetricWarning
__all__ = ['DecisionTree']
class DecisionTree(Classifier):
r"""Implementation of decision tree classifier.
Date:
2020
Author:
Luka Pečnik
License:
MIT
Reference:
L. Breiman, J. Friedman, R. Olshen, and C. Stone, “Classification and Regression Trees”, Wadsworth, Belmont, CA, 1984.
Documentation:
https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier
See Also:
* :class:`niaaml.classifiers.Classifier`
"""
Name = 'Decision Tree Classifier'
def __init__(self, **kwargs):
r"""Initialize DecisionTree instance.
"""
warnings.filterwarnings(action='ignore', category=ChangedBehaviorWarning)
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings(action='ignore', category=DataDimensionalityWarning)
warnings.filterwarnings(action='ignore', category=EfficiencyWarning)
warnings.filterwarnings(action='ignore', category=FitFailedWarning)
warnings.filterwarnings(action='ignore', category=NonBLASDotWarning)
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
self._params = dict(
criterion = ParameterDefinition(['gini', 'entropy']),
splitter = ParameterDefinition(['best', 'random'])
)
self.__decision_tree_classifier = DTC()
def set_parameters(self, **kwargs):
r"""Set the parameters/arguments of the algorithm.
"""
self.__decision_tree_classifier.set_params(**kwargs)
def fit(self, x, y, **kwargs):
r"""Fit DecisionTree.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
y (pandas.core.series.Series): n classes of the samples in the x array.
Returns:
None
"""
self.__decision_tree_classifier.fit(x, y)
def predict(self, x, **kwargs):
r"""Predict class for each sample (row) in x.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
Returns:
pandas.core.series.Series: n predicted classes.
"""
return self.__decision_tree_classifier.predict(x)
def to_string(self):
r"""User friendly representation of the object.
Returns:
str: User friendly representation of the object.
"""
return Classifier.to_string(self).format(name=self.Name, args=self._parameters_to_string(self.__decision_tree_classifier.get_params()))
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
# Script implementing the multiplicative rules from the following
# article:
#
# J.-L. Durrieu, G. Richard, B. David and C. Fevotte
# Source/Filter Model for Unsupervised Main Melody
# Extraction From Polyphonic Audio Signals
# IEEE Transactions on Audio, Speech and Language Processing
# Vol. 18, No. 3, March 2010
#
# with more details and new features explained in my PhD thesis:
#
# J.-L. Durrieu,
# Automatic Extraction of the Main Melody from Polyphonic Music Signals,
# EDITE
# Institut TELECOM, TELECOM ParisTech, CNRS LTCI
# copyright (C) 2010 Jean-Louis Durrieu
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import time, os
from numpy.random import randn
from string import join
def db(positiveValue):
"""
db(positiveValue)
Returns the decibel value of the input positiveValue
"""
return 10 * np.log10(np.abs(positiveValue))
def ISDistortion(X,Y):
"""
value = ISDistortion(X, Y)
Returns the value of the Itakura-Saito (IS) divergence between
matrix X and matrix Y. X and Y should be two NumPy arrays with
same dimension.
"""
return np.sum((-np.log(X / Y) + (X / Y) - 1))
def SIMM(# the data to be fitted to:
SX,
# the basis matrices for the spectral combs
WF0,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=4, numberOfAccompanimentSpectralShapes=10,
# if any, initial amplitude matrices for
HGAMMA0=None, HPHI0=None,
HF00=None,
WM0=None, HM0=None,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=1000, updateRulePower=1.0,
stepNotes=4,
lambdaHF0=0.00,alphaHF0=0.99,
displayEvolution=False, verbose=True, makeMovie=False):
"""
HGAMMA, HPHI, HF0, HM, WM, recoError =
SIMM(SX, WF0, WGAMMA, numberOfFilters=4,
numberOfAccompanimentSpectralShapes=10, HGAMMA0=None, HPHI0=None,
HF00=None, WM0=None, HM0=None, numberOfIterations=1000,
updateRulePower=1.0, stepNotes=4,
lambdaHF0=0.00, alphaHF0=0.99, displayEvolution=False,
verbose=True)
Implementation of the Smooth-filters Instantaneous Mixture Model
(SIMM). This model can be used to estimate the main melody of a
song, and separate the lead voice from the accompaniment, provided
that the basis WF0 is constituted of elements associated to
particular pitches.
Inputs:
SX
the F x N power spectrogram to be approximated.
F is the number of frequency bins, while N is the number of
analysis frames
WF0
the F x NF0 basis matrix containing the NF0 source elements
WGAMMA
the F x P basis matrix of P smooth elementary filters
numberOfFilters
the number of filters K to be considered
numberOfAccompanimentSpectralShapes
the number of spectral shapes R for the accompaniment
HGAMMA0
the P x K decomposition matrix of WPHI on WGAMMA
HPHI0
the K x N amplitude matrix of the filter part of the lead
instrument
HF00
the NF0 x N amplitude matrix for the source part of the lead
instrument
WM0
the F x R the matrix for spectral shapes of the
accompaniment
HM0
the R x N amplitude matrix associated with each of the R
accompaniment spectral shapes
numberOfIterations
the number of iterations for the estimatino algorithm
updateRulePower
the power to which the multiplicative gradient is elevated to
stepNotes
the number of elements in WF0 per semitone. stepNotes=4 means
that there are 48 elements per octave in WF0.
lambdaHF0
Lagrangian multiplier for the octave control
alphaHF0
parameter that controls how much influence a lower octave
can have on the upper octave's amplitude.
Outputs:
HGAMMA
the estimated P x K decomposition matrix of WPHI on WGAMMA
HPHI
the estimated K x N amplitude matrix of the filter part
HF0
the estimated NF0 x N amplitude matrix for the source part
HM
the estimated R x N amplitude matrix for the accompaniment
WM
the estimate F x R spectral shapes for the accompaniment
recoError
the successive values of the Itakura Saito divergence
between the power spectrogram and the spectrogram
computed thanks to the updated estimations of the matrices.
Please also refer to the following article for more details about
the algorithm within this function, as well as the meaning of the
different matrices that are involved:
J.-L. Durrieu, G. Richard, B. David and C. Fevotte
Source/Filter Model for Unsupervised Main Melody
Extraction From Polyphonic Audio Signals
IEEE Transactions on Audio, Speech and Language Processing
Vol. 18, No. 3, March 2010
"""
eps = 10 ** (-20)
if displayEvolution:
import matplotlib.pyplot as plt
from imageMatlab import imageM
plt.ion()
print "Is the display interactive? ", plt.isinteractive()
# renamed for convenience:
K = numberOfFilters
R = numberOfAccompanimentSpectralShapes
omega = updateRulePower
F, N = SX.shape
Fwf0, NF0 = WF0.shape
Fwgamma, P = WGAMMA.shape
# Checking the sizes of the matrices
if Fwf0 != F:
return False # A REVOIR!!!
if HGAMMA0 is None:
HGAMMA0 = np.abs(randn(P, K))
else:
if not(isinstance(HGAMMA0,np.ndarray)): # default behaviour
HGAMMA0 = np.array(HGAMMA0)
Phgamma0, Khgamma0 = HGAMMA0.shape
if Phgamma0 != P or Khgamma0 != K:
print "Wrong dimensions for given HGAMMA0, \n"
print "random initialization used instead"
HGAMMA0 = np.abs(randn(P, K))
HGAMMA = np.copy(HGAMMA0)
if HPHI0 is None: # default behaviour
HPHI = np.abs(randn(K, N))
else:
Khphi0, Nhphi0 = np.array(HPHI0).shape
if Khphi0 != K or Nhphi0 != N:
print "Wrong dimensions for given HPHI0, \n"
print "random initialization used instead"
HPHI = np.abs(randn(K, N))
else:
HPHI = np.copy(np.array(HPHI0))
if HF00 is None:
HF00 = np.abs(randn(NF0, N))
else:
if np.array(HF00).shape[0] == NF0 and np.array(HF00).shape[1] == N:
HF00 = np.array(HF00)
else:
print "Wrong dimensions for given HF00, \n"
print "random initialization used instead"
HF00 = np.abs(randn(NF0, N))
HF0 = np.copy(HF00)
if HM0 is None:
HM0 = np.abs(randn(R, N))
else:
if np.array(HM0).shape[0] == R and np.array(HM0).shape[1] == N:
HM0 = np.array(HM0)
else:
print "Wrong dimensions for given HM0, \n"
print "random initialization used instead"
HM0 = np.abs(randn(R, N))
HM = np.copy(HM0)
if WM0 is None:
WM0 = np.abs(randn(F, R))
else:
if np.array(WM0).shape[0] == F and np.array(WM0).shape[1] == R:
WM0 = np.array(WM0)
else:
print "Wrong dimensions for given WM0, \n"
print "random initialization used instead"
WM0 = np.abs(randn(F, R))
WM = np.copy(WM0)
# Iterations to estimate the SIMM parameters:
WPHI = np.dot(WGAMMA, HGAMMA)
SF0 = np.dot(WF0, HF0)
SPHI = np.dot(WPHI, HPHI)
SM = np.dot(WM, HM)
hatSX = SF0 * SPHI + SM
## SX = SX + np.abs(randn(F, N)) ** 2
# should not need this line
# which ensures that data is not
# 0 everywhere.
# temporary matrices
tempNumFbyN = np.zeros([F, N])
tempDenFbyN = np.zeros([F, N])
# Array containing the reconstruction error after the update of each
# of the parameter matrices:
recoError = np.zeros([numberOfIterations * 5 * 2 + NF0 * 2 + 1])
recoError[0] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error at beginning: ", recoError[0]
counterError = 1
if displayEvolution:
h1 = plt.figure(1)
if makeMovie:
dirName = 'tmp%s/' %time.strftime("%Y%m%d%H%M%S")
os.system('mkdir %s' %dirName)
# Main loop for multiplicative updating rules:
for n in np.arange(numberOfIterations):
# order of re-estimation: HF0, HPHI, HM, HGAMMA, WM
if verbose:
print "iteration ", n, " over ", numberOfIterations
if displayEvolution:
h1.clf();imageM(db(HF0));
plt.clim([np.amax(db(HF0))-100, np.amax(db(HF0))]);plt.draw();
## h1.clf();
## imageM(HF0 * np.outer(np.ones([NF0, 1]),
## 1 / (HF0.max(axis=0))));
if makeMovie:
filename = dirName + '%04d' % n + '.png'
plt.savefig(filename, dpi=100)
# updating HF0:
tempNumFbyN = (SPHI * SX) / np.maximum(hatSX ** 2, eps)
tempDenFbyN = SPHI / np.maximum(hatSX, eps)
# This to enable octave control
HF0[np.arange(12 * stepNotes, NF0), :] \
= HF0[np.arange(12 * stepNotes, NF0), :] \
* (np.dot(WF0[:, np.arange(12 * stepNotes,
NF0)].T, tempNumFbyN) \
/ np.maximum(
np.dot(WF0[:, np.arange(12 * stepNotes, NF0)].T,
tempDenFbyN) \
+ lambdaHF0 * (- (alphaHF0 - 1.0) \
/ np.maximum(HF0[
np.arange(12 * stepNotes, NF0), :], eps) \
+ HF0[
np.arange(NF0 - 12 * stepNotes), :]),
eps)) ** omega
HF0[np.arange(12 * stepNotes), :] \
= HF0[np.arange(12 * stepNotes), :] \
* (np.dot(WF0[:, np.arange(12 * stepNotes)].T,
tempNumFbyN) /
np.maximum(
np.dot(WF0[:, np.arange(12 * stepNotes)].T,
tempDenFbyN), eps)) ** omega
## # normal update rules:
## HF0 = HF0 * (np.dot(WF0.T, tempNumFbyN) /
## np.maximum(np.dot(WF0.T, tempDenFbyN), eps)) ** omega
SF0 = np.maximum(np.dot(WF0, HF0),eps)
hatSX = np.maximum(SF0 * SPHI + SM,eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after HF0 : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HPHI
tempNumFbyN = (SF0 * SX) / np.maximum(hatSX ** 2, eps)
tempDenFbyN = SF0 / np.maximum(hatSX, eps)
HPHI = HPHI * (np.dot(WPHI.T, tempNumFbyN) / np.maximum(np.dot(WPHI.T, tempDenFbyN), eps)) ** omega
sumHPHI = np.sum(HPHI, axis=0)
HPHI[:, sumHPHI>0] = HPHI[:, sumHPHI>0] / np.outer(np.ones(K), sumHPHI[sumHPHI>0])
HF0 = HF0 * np.outer(np.ones(NF0), sumHPHI)
SF0 = np.maximum(np.dot(WF0, HF0), eps)
SPHI = np.maximum(np.dot(WPHI, HPHI), eps)
hatSX = np.maximum(SF0 * SPHI + SM, eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after HPHI : ", recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HM
tempNumFbyN = SX / np.maximum(hatSX ** 2, eps)
tempDenFbyN = 1 / np.maximum(hatSX, eps)
HM = np.maximum(HM * (np.dot(WM.T, tempNumFbyN) / np.maximum(np.dot(WM.T, tempDenFbyN), eps)) ** omega, eps)
SM = np.maximum(np.dot(WM, HM), eps)
hatSX = np.maximum(SF0 * SPHI + SM, eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after HM : ", recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HGAMMA
tempNumFbyN = (SF0 * SX) / np.maximum(hatSX ** 2, eps)
tempDenFbyN = SF0 / np.maximum(hatSX, eps)
HGAMMA = np.maximum(HGAMMA * (np.dot(WGAMMA.T, np.dot(tempNumFbyN, HPHI.T)) / np.maximum(np.dot(WGAMMA.T, np.dot(tempDenFbyN, HPHI.T)), eps)) ** omega, eps)
sumHGAMMA = np.sum(HGAMMA, axis=0)
HGAMMA[:, sumHGAMMA>0] = HGAMMA[:, sumHGAMMA>0] / np.outer(np.ones(P), sumHGAMMA[sumHGAMMA>0])
HPHI = HPHI * np.outer(sumHGAMMA, np.ones(N))
sumHPHI = np.sum(HPHI, axis=0)
HPHI[:, sumHPHI>0] = HPHI[:, sumHPHI>0] / np.outer(np.ones(K), sumHPHI[sumHPHI>0])
HF0 = HF0 * np.outer(np.ones(NF0), sumHPHI)
WPHI = np.maximum(np.dot(WGAMMA, HGAMMA), eps)
SF0 = np.maximum(np.dot(WF0, HF0), eps)
SPHI = np.maximum(np.dot(WPHI, HPHI), eps)
hatSX = np.maximum(SF0 * SPHI + SM, eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after HGAMMA: ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating WM, after a certain number of iterations (here, after 1 iteration)
if n > -1: # this test can be used such that WM is updated only
# after a certain number of iterations
tempNumFbyN = SX / np.maximum(hatSX ** 2, eps)
tempDenFbyN = 1 / np.maximum(hatSX, eps)
WM = np.maximum(WM * (np.dot(tempNumFbyN, HM.T) /
np.maximum(np.dot(tempDenFbyN, HM.T),
eps)) ** omega, eps)
sumWM = np.sum(WM, axis=0)
WM[:, sumWM>0] = (WM[:, sumWM>0] /
np.outer(np.ones(F),sumWM[sumWM>0]))
HM = HM * np.outer(sumWM, np.ones(N))
SM = np.maximum(np.dot(WM, HM), eps)
hatSX = np.maximum(SF0 * SPHI + SM, eps)
recoError[counterError] = ISDistortion(SX, hatSX)
if verbose:
print "Reconstruction error difference after WM : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
return HGAMMA, HPHI, HF0, HM, WM, recoError
def Stereo_SIMM(# the data to be fitted to:
SXR, SXL,
# the basis matrices for the spectral combs
WF0,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=4, numberOfAccompanimentSpectralShapes=10,
# if any, initial amplitude matrices for
HGAMMA0=None, HPHI0=None,
HF00=None,
WM0=None, HM0=None,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=1000, updateRulePower=1.0,
stepNotes=4,
lambdaHF0=0.00,alphaHF0=0.99,
displayEvolution=False, verbose=True,
updateHGAMMA=True):
"""
HGAMMA, HPHI, HF0, HM, WM, recoError =
SIMM(SXR, SXL, WF0, WGAMMA, numberOfFilters=4,
numberOfAccompanimentSpectralShapes=10, HGAMMA0=None, HPHI0=None,
HF00=None, WM0=None, HM0=None, numberOfIterations=1000,
updateRulePower=1.0, stepNotes=4,
lambdaHF0=0.00, alphaHF0=0.99, displayEvolution=False,
verbose=True)
Implementation of the Smooth-filters Instantaneous Mixture Model
(SIMM). This model can be used to estimate the main melody of a
song, and separate the lead voice from the accompaniment, provided
that the basis WF0 is constituted of elements associated to
particular pitches.
Inputs:
SX
the F x N power spectrogram to be approximated.
F is the number of frequency bins, while N is the number of
analysis frames
WF0
the F x NF0 basis matrix containing the NF0 source elements
WGAMMA
the F x P basis matrix of P smooth elementary filters
numberOfFilters
the number of filters K to be considered
numberOfAccompanimentSpectralShapes
the number of spectral shapes R for the accompaniment
HGAMMA0
the P x K decomposition matrix of WPHI on WGAMMA
HPHI0
the K x N amplitude matrix of the filter part of the lead
instrument
HF00
the NF0 x N amplitude matrix for the source part of the lead
instrument
WM0
the F x R the matrix for spectral shapes of the
accompaniment
HM0
the R x N amplitude matrix associated with each of the R
accompaniment spectral shapes
numberOfIterations
the number of iterations for the estimatino algorithm
updateRulePower
the power to which the multiplicative gradient is elevated to
stepNotes
the number of elements in WF0 per semitone. stepNotes=4 means
that there are 48 elements per octave in WF0.
lambdaHF0
Lagrangian multiplier for the octave control
alphaHF0
parameter that controls how much influence a lower octave
can have on the upper octave's amplitude.
Outputs:
HGAMMA
the estimated P x K decomposition matrix of WPHI on WGAMMA
HPHI
the estimated K x N amplitude matrix of the filter part
HF0
the estimated NF0 x N amplitude matrix for the source part
HM
the estimated R x N amplitude matrix for the accompaniment
WM
the estimate F x R spectral shapes for the accompaniment
recoError
the successive values of the Itakura Saito divergence
between the power spectrogram and the spectrogram
computed thanks to the updated estimations of the matrices.
Please also refer to the following article for more details about
the algorithm within this function, as well as the meaning of the
different matrices that are involved:
J.-L. Durrieu, G. Richard, B. David and C. Fevotte
Source/Filter Model for Unsupervised Main Melody
Extraction From Polyphonic Audio Signals
IEEE Transactions on Audio, Speech and Language Processing
Vol. 18, No. 3, March 2010
"""
eps = 10 ** (-20)
if displayEvolution:
import matplotlib.pyplot as plt
from imageMatlab import imageM
plt.ion()
print "Is the display interactive? ", plt.isinteractive()
# renamed for convenience:
K = numberOfFilters
R = numberOfAccompanimentSpectralShapes
omega = updateRulePower
F, N = SXR.shape
if (F, N) != SXL.shape:
print "The input STFT matrices do not have the same dimension.\n"
print "Please check what happened..."
raise ValueError("Dimension of STFT matrices must be the same.")
Fwf0, NF0 = WF0.shape
Fwgamma, P = WGAMMA.shape
# Checking the sizes of the matrices
if Fwf0 != F:
return False # A REVOIR!!!
if HGAMMA0 is None:
HGAMMA0 = np.abs(randn(P, K))
else:
if not(isinstance(HGAMMA0,np.ndarray)): # default behaviour
HGAMMA0 = np.array(HGAMMA0)
Phgamma0, Khgamma0 = HGAMMA0.shape
if Phgamma0 != P or Khgamma0 != K:
print "Wrong dimensions for given HGAMMA0, \n"
print "random initialization used instead"
HGAMMA0 = np.abs(randn(P, K))
HGAMMA = np.copy(HGAMMA0)
if HPHI0 is None: # default behaviour
HPHI = np.abs(randn(K, N))
else:
Khphi0, Nhphi0 = np.array(HPHI0).shape
if Khphi0 != K or Nhphi0 != N:
print "Wrong dimensions for given HPHI0, \n"
print "random initialization used instead"
HPHI = np.abs(randn(K, N))
else:
HPHI = np.copy(np.array(HPHI0))
if HF00 is None:
HF00 = np.abs(randn(NF0, N))
else:
if np.array(HF00).shape[0] == NF0 and np.array(HF00).shape[1] == N:
HF00 = np.array(HF00)
else:
print "Wrong dimensions for given HF00, \n"
print "random initialization used instead"
HF00 = np.abs(randn(NF0, N))
HF0 = np.copy(HF00)
if HM0 is None:
HM0 = np.abs(randn(R, N))
else:
if np.array(HM0).shape[0] == R and np.array(HM0).shape[1] == N:
HM0 = np.array(HM0)
else:
print "Wrong dimensions for given HM0, \n"
print "random initialization used instead"
HM0 = np.abs(randn(R, N))
HM = np.copy(HM0)
if WM0 is None:
WM0 = np.abs(randn(F, R))
else:
if np.array(WM0).shape[0] == F and np.array(WM0).shape[1] == R:
WM0 = np.array(WM0)
else:
print "Wrong dimensions for given WM0, \n"
print "random initialization used instead"
WM0 = np.abs(randn(F, R))
WM = np.copy(WM0)
alphaR = 0.5
alphaL = 0.5
betaR = np.diag(np.random.rand(R))
betaL = np.eye(R) - betaR
# Iterations to estimate the SIMM parameters:
WPHI = np.dot(WGAMMA, HGAMMA)
SF0 = np.dot(WF0, HF0)
SPHI = np.dot(WPHI, HPHI)
# SM = np.dot(WM, HM)
hatSXR = (alphaR**2) * SF0 * SPHI + np.dot(np.dot(WM, betaR**2),HM)
hatSXL = (alphaL**2) * SF0 * SPHI + np.dot(np.dot(WM, betaL**2),HM)
# SX = SX + np.abs(randn(F, N)) ** 2
# should not need this line
# which ensures that data is not
# 0 everywhere.
# temporary matrices
tempNumFbyN = np.zeros([F, N])
tempDenFbyN = np.zeros([F, N])
# Array containing the reconstruction error after the update of each
# of the parameter matrices:
recoError = np.zeros([numberOfIterations * 5 * 2 + NF0 * 2 + 1])
recoError[0] = ISDistortion(SXR, hatSXR) + ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error at beginning: ", recoError[0]
counterError = 1
if displayEvolution:
h1 = plt.figure(1)
# Main loop for multiplicative updating rules:
for n in np.arange(numberOfIterations):
# order of re-estimation: HF0, HPHI, HM, HGAMMA, WM
if verbose:
print "iteration ", n, " over ", numberOfIterations
if displayEvolution:
h1.clf();imageM(db(HF0));
plt.clim([np.amax(db(HF0))-100, np.amax(db(HF0))]);plt.draw();
# h1.clf();
# imageM(HF0 * np.outer(np.ones([NF0, 1]),
# 1 / (HF0.max(axis=0))));
# updating HF0:
tempNumFbyN = ((alphaR**2) * SPHI * SXR) / np.maximum(hatSXR ** 2, eps)\
+ ((alphaL**2) * SPHI * SXL) / np.maximum(hatSXL ** 2, eps)
tempDenFbyN = (alphaR**2) * SPHI / np.maximum(hatSXR, eps)\
+ (alphaL**2) * SPHI / np.maximum(hatSXL, eps)
# This to enable octave control
HF0[np.arange(12 * stepNotes, NF0), :] \
= HF0[np.arange(12 * stepNotes, NF0), :] \
* (np.dot(WF0[:, np.arange(12 * stepNotes,
NF0)].T, tempNumFbyN) \
/ np.maximum(
np.dot(WF0[:, np.arange(12 * stepNotes, NF0)].T,
tempDenFbyN) \
+ lambdaHF0 * (- (alphaHF0 - 1.0) \
/ np.maximum(HF0[
np.arange(12 * stepNotes, NF0), :], eps) \
+ HF0[
np.arange(NF0 - 12 * stepNotes), :]),
eps)) ** omega
HF0[np.arange(12 * stepNotes), :] \
= HF0[np.arange(12 * stepNotes), :] \
* (np.dot(WF0[:, np.arange(12 * stepNotes)].T,
tempNumFbyN) /
np.maximum(
np.dot(WF0[:, np.arange(12 * stepNotes)].T,
tempDenFbyN), eps)) ** omega
## # normal update rules:
## HF0 = HF0 * (np.dot(WF0.T, tempNumFbyN) /
## np.maximum(np.dot(WF0.T, tempDenFbyN), eps)) ** omega
SF0 = np.maximum(np.dot(WF0, HF0), eps)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM),
eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM),
eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HF0 : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HPHI
if updateHGAMMA or True:
tempNumFbyN = ((alphaR**2) * SF0 * SXR) / np.maximum(hatSXR ** 2, eps)\
+ ((alphaL**2) * SF0 * SXL) / np.maximum(hatSXL ** 2, eps)
tempDenFbyN = (alphaR**2) * SF0 / np.maximum(hatSXR, eps)\
+ (alphaL**2) * SF0 / np.maximum(hatSXL, eps)
HPHI = HPHI * (np.dot(WPHI.T, tempNumFbyN) / np.maximum(np.dot(WPHI.T, tempDenFbyN), eps)) ** omega
sumHPHI = np.sum(HPHI, axis=0)
HPHI[:, sumHPHI>0] = HPHI[:, sumHPHI>0] / np.outer(np.ones(K), sumHPHI[sumHPHI>0])
HF0 = HF0 * np.outer(np.ones(NF0), sumHPHI)
SF0 = np.maximum(np.dot(WF0, HF0), eps)
SPHI = np.maximum(np.dot(WPHI, HPHI), eps)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM),
eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM),
eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HPHI : ", recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HM
# tempNumFbyN = SXR / np.maximum(hatSXR ** 2, eps)\
# + SXL / np.maximum(hatSXL ** 2, eps)
# tempDenFbyN = 1 / np.maximum(hatSXR, eps)\
# + 1 / np.maximum(hatSXL, eps)
# HM = np.maximum(HM * (np.dot(WM.T, tempNumFbyN) / np.maximum(np.dot(WM.T, tempDenFbyN), eps)) ** omega, eps)
HM = HM * \
((np.dot(np.dot((betaR**2), WM.T), SXR /
np.maximum(hatSXR ** 2, eps)) +
np.dot(np.dot((betaL**2), WM.T), SXL /
np.maximum(hatSXL ** 2, eps))
) /
np.maximum(np.dot(np.dot((betaR**2), WM.T), 1 /
np.maximum(hatSXR, eps)) +
np.dot(np.dot((betaL**2), WM.T), 1 /
np.maximum(hatSXL, eps)),
eps)) ** omega
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HM : ", recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating HGAMMA
if updateHGAMMA:
tempNumFbyN = ((alphaR ** 2) * SF0 * SXR) / np.maximum(hatSXR ** 2, eps)\
+ ((alphaL ** 2) * SF0 * SXL) / np.maximum(hatSXL ** 2, eps)
tempDenFbyN = (alphaR ** 2) * SF0 / np.maximum(hatSXR, eps) \
+ (alphaL ** 2) * SF0 / np.maximum(hatSXL, eps)
HGAMMA = np.maximum(HGAMMA * (np.dot(WGAMMA.T, np.dot(tempNumFbyN, HPHI.T)) / np.maximum(np.dot(WGAMMA.T, np.dot(tempDenFbyN, HPHI.T)), eps)) ** omega, eps)
sumHGAMMA = np.sum(HGAMMA, axis=0)
HGAMMA[:, sumHGAMMA>0] = HGAMMA[:, sumHGAMMA>0] / np.outer(np.ones(P), sumHGAMMA[sumHGAMMA>0])
HPHI = HPHI * np.outer(sumHGAMMA, np.ones(N))
sumHPHI = np.sum(HPHI, axis=0)
HPHI[:, sumHPHI>0] = HPHI[:, sumHPHI>0] / np.outer(np.ones(K), sumHPHI[sumHPHI>0])
HF0 = HF0 * np.outer(np.ones(NF0), sumHPHI)
WPHI = np.maximum(np.dot(WGAMMA, HGAMMA), eps)
SF0 = np.maximum(np.dot(WF0, HF0), eps)
SPHI = np.maximum(np.dot(WPHI, HPHI), eps)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HGAMMA: ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating WM, after a certain number of iterations (here, after 1 iteration)
if n > -1: # this test can be used such that WM is updated only
# after a certain number of iterations
## tempNumFbyN = SX / np.maximum(hatSX ** 2, eps)
## tempDenFbyN = 1 / np.maximum(hatSX, eps)
## WM = np.maximum(WM * (np.dot(tempNumFbyN, HM.T) /
## np.maximum(np.dot(tempDenFbyN, HM.T),
## eps)) ** omega, eps)
WM = WM * \
((np.dot(SXR / np.maximum(hatSXR ** 2, eps),
np.dot(HM.T, betaR ** 2)) +
np.dot(SXL / np.maximum(hatSXL ** 2, eps),
np.dot(HM.T, betaL ** 2))
) /
(np.dot(1 / np.maximum(hatSXR, eps),
np.dot(HM.T, betaR ** 2)) +
np.dot(1 / np.maximum(hatSXL, eps),
np.dot(HM.T, betaL ** 2))
)) ** omega
sumWM = np.sum(WM, axis=0)
WM[:, sumWM>0] = (WM[:, sumWM>0] /
np.outer(np.ones(F),sumWM[sumWM>0]))
HM = HM * np.outer(sumWM, np.ones(N))
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after WM : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating alphaR and alphaL:
tempNumFbyN = SF0 * SPHI * SXR / np.maximum(hatSXR ** 2, eps)
tempDenFbyN = SF0 * SPHI / np.maximum(hatSXR, eps)
alphaR = np.maximum(alphaR *
(np.sum(tempNumFbyN) /
np.sum(tempDenFbyN)) ** (omega*.1), eps)
tempNumFbyN = SF0 * SPHI * SXL / np.maximum(hatSXL ** 2, eps)
tempDenFbyN = SF0 * SPHI / np.maximum(hatSXL, eps)
alphaL = np.maximum(alphaL *
(np.sum(tempNumFbyN) /
np.sum(tempDenFbyN)) ** (omega*.1), eps)
alphaR = alphaR / np.maximum(alphaR + alphaL, .001)
alphaL = np.copy(1 - alphaR)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after ALPHA : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating betaR and betaL
betaR = np.diag(np.diag(np.maximum(betaR *
((np.dot(np.dot(WM.T, SXR / np.maximum(hatSXR ** 2, eps)), HM.T)) /
(np.dot(np.dot(WM.T, 1 / np.maximum(hatSXR, eps)), HM.T))) ** (omega*.1), eps)))
betaL = np.diag(np.diag(np.maximum(betaL *
((np.dot(np.dot(WM.T, SXL / np.maximum(hatSXL ** 2, eps)), HM.T)) /
(np.dot(np.dot(WM.T, 1 / np.maximum(hatSXL, eps)), HM.T))) ** (omega*.1), eps)))
betaR = betaR / np.maximum(betaR + betaL, eps)
betaL = np.copy(np.eye(R) - betaR)
hatSXR = np.maximum((alphaR**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum((alphaL**2) * SF0 * SPHI + \
np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after BETA : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
return alphaR, alphaL, HGAMMA, HPHI, HF0, betaR, betaL, HM, WM, recoError
def stereo_NMF(SXR, SXL,
numberOfAccompanimentSpectralShapes,
WM0=None, HM0=None,
numberOfIterations=50, updateRulePower=1.0,
verbose=False, displayEvolution=False):
eps = 10 ** (-20)
if displayEvolution:
import matplotlib.pyplot as plt
from imageMatlab import imageM
plt.ion()
print "Is the display interactive? ", plt.isinteractive()
R = numberOfAccompanimentSpectralShapes
omega = updateRulePower
F, N = SXR.shape
if (F, N) != SXL.shape:
print "The input STFT matrices do not have the same dimension.\n"
print "Please check what happened..."
raise ValueError("Dimension of STFT matrices must be the same.")
if HM0 is None:
HM0 = np.abs(randn(R, N))
else:
if np.array(HM0).shape[0] == R and np.array(HM0).shape[1] == N:
HM0 = np.array(HM0)
else:
print "Wrong dimensions for given HM0, \n"
print "random initialization used instead"
HM0 = np.abs(randn(R, N))
HM = np.copy(HM0)
if WM0 is None:
WM0 = np.abs(randn(F, R))
else:
if np.array(WM0).shape[0] == F and np.array(WM0).shape[1] == R:
WM0 = np.array(WM0)
else:
print "Wrong dimensions for given WM0, \n"
print "random initialization used instead"
WM0 = np.abs(randn(F, R))
WM = np.copy(WM0)
betaR = np.diag(np.random.rand(R))
betaL = np.eye(R) - betaR
hatSXR = np.maximum(np.dot(np.dot(WM, betaR**2), HM), eps)
hatSXL = np.maximum(np.dot(np.dot(WM, betaL**2), HM), eps)
# temporary matrices
tempNumFbyN = np.zeros([F, N])
tempDenFbyN = np.zeros([F, N])
recoError = np.zeros([numberOfIterations * 3 + 1])
recoError[0] = ISDistortion(SXR, hatSXR) + ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error at beginning: ", recoError[0]
counterError = 1
if displayEvolution:
h1 = plt.figure(1)
for n in np.arange(numberOfIterations):
# order of re-estimation: HF0, HPHI, HM, HGAMMA, WM
if verbose:
print "iteration ", n, " over ", numberOfIterations
if displayEvolution:
h1.clf()
imageM(db(hatSXR))
plt.clim([np.amax(db(hatSXR))-100, np.amax(db(hatSXR))])
plt.draw()
# updating HM
HM = HM * \
((np.dot(np.dot((betaR**2), WM.T), SXR /
np.maximum(hatSXR ** 2, eps)) +
np.dot(np.dot((betaL**2), WM.T), SXL /
np.maximum(hatSXL ** 2, eps))
) /
np.maximum(np.dot(np.dot((betaR**2), WM.T), 1 /
np.maximum(hatSXR, eps)) +
np.dot(np.dot((betaL**2), WM.T), 1 /
np.maximum(hatSXL, eps)),
eps)) ** omega
hatSXR = np.maximum(np.dot(np.dot(WM, betaR**2),HM), eps)
hatSXL = np.maximum(np.dot(np.dot(WM, betaL**2),HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after HM : ",\
recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating WM
WM = WM * \
((np.dot(SXR / np.maximum(hatSXR ** 2, eps),
np.dot(HM.T, betaR ** 2)) +
np.dot(SXL / np.maximum(hatSXL ** 2, eps),
np.dot(HM.T, betaL ** 2))
) /
(np.dot(1 / np.maximum(hatSXR, eps),
np.dot(HM.T, betaR ** 2)) +
np.dot(1 / np.maximum(hatSXL, eps),
np.dot(HM.T, betaL ** 2))
)) ** omega
sumWM = np.sum(WM, axis=0)
WM[:, sumWM>0] = (WM[:, sumWM>0] /
np.outer(np.ones(F),sumWM[sumWM>0]))
HM = HM * np.outer(sumWM, np.ones(N))
hatSXR = np.maximum(np.dot(np.dot(WM, betaR**2), HM), eps)
hatSXL = np.maximum(np.dot(np.dot(WM, betaL**2), HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after WM : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
# updating betaR and betaL
betaR = np.diag(np.diag(np.maximum(betaR *
((np.dot(np.dot(WM.T, SXR / np.maximum(hatSXR ** 2,
eps)),
HM.T)) /
(np.dot(np.dot(WM.T, 1 / np.maximum(hatSXR,
eps)),
HM.T))) ** (omega*.1), eps)))
betaL = np.diag(np.diag(np.maximum(betaL *
((np.dot(np.dot(WM.T, SXL / np.maximum(hatSXL ** 2,
eps)),
HM.T)) /
(np.dot(np.dot(WM.T, 1 / np.maximum(hatSXL,
eps)),
HM.T))) ** (omega*.1), eps)))
betaR = betaR / np.maximum(betaR + betaL, eps)
betaL = np.copy(np.eye(R) - betaR)
hatSXR = np.maximum(np.dot(np.dot(WM, betaR**2), HM), eps)
hatSXL = np.maximum(np.dot(np.dot(WM, betaL**2), HM), eps)
recoError[counterError] = ISDistortion(SXR, hatSXR) \
+ ISDistortion(SXL, hatSXL)
if verbose:
print "Reconstruction error difference after BETA : ",
print recoError[counterError] - recoError[counterError - 1]
counterError += 1
return betaR, betaL, HM, WM
|
nilq/baby-python
|
python
|
import sys
import os
import torch
from helen.modules.python.TextColor import TextColor
from helen.modules.python.models.predict_cpu import predict_cpu
from helen.modules.python.models.predict_gpu import predict_gpu
from helen.modules.python.FileManager import FileManager
from os.path import isfile, join
from os import listdir
"""
The Call Consensus method generates base predictions for images generated through MarginPolish. This script reads
hdf5 files generated by MarginPolish and produces another Hdf5 file that holds all predictions. The generated hdf5 file
is given to stitch.py which then stitches the segments using an alignment which gives us a polished sequence.
The algorithm is described here:
1) INPUTS:
- directory path to the image files generated by MarginPolish
- model path directing to a trained model
- batch size for mini-batch prediction
- num workers for mini-batch processing threads
- output directory path to where the output hdf5 will be saved
- gpu mode indicating if GPU will be used
2) METHOD:
- Call predict function that loads the neural network and generates base predictions and saves it into a hdf5 file
- Loads the model
- Iterates over the input images in minibatch
- For each image uses a sliding window method to slide of the image sequence
- Aggregate the predictions to get sequence prediction for the entire image sequence
- Save all the predictions to a file
3) OUTPUT:
- A hdf5 file containing all the base predictions
"""
def get_file_paths_from_directory(directory_path):
"""
Returns all paths of files given a directory path
:param directory_path: Path to the directory
:return: A list of paths of files
"""
file_paths = [os.path.abspath(join(directory_path, file)) for file in listdir(directory_path)
if isfile(join(directory_path, file)) and file[-2:] == 'h5']
return file_paths
def call_consensus(image_dir, model_path, batch_size, num_workers, threads, output_dir, output_prefix, gpu_mode,
device_ids, callers):
"""
This method provides an interface too call the predict method that generates the prediction hdf5 file
:param image_dir: Path to directory where all MarginPolish images are saved
:param model_path: Path to a trained model
:param batch_size: Batch size for minibatch processing
:param num_workers: Number of workers for minibatch processing
:param threads: Number of threads for pytorch
:param output_dir: Path to the output directory
:param output_prefix: Prefix of the output HDF5 file
:param gpu_mode: If true, predict method will use GPU.
:param device_ids: List of CUDA devices to use.
:param callers: Total number of callers.
:return:
"""
# check the model file
if not os.path.isfile(model_path):
sys.stderr.write(TextColor.RED + "ERROR: CAN NOT LOCATE MODEL FILE.\n" + TextColor.END)
exit(1)
# check the input directory
if not os.path.isdir(image_dir):
sys.stderr.write(TextColor.RED + "ERROR: CAN NOT LOCATE IMAGE DIRECTORY.\n" + TextColor.END)
exit(1)
# check batch_size
if batch_size <= 0:
sys.stderr.write(TextColor.RED + "ERROR: batch_size NEEDS TO BE >0.\n" + TextColor.END)
exit(1)
# check num_workers
if num_workers < 0:
sys.stderr.write(TextColor.RED + "ERROR: num_workers NEEDS TO BE >=0.\n" + TextColor.END)
exit(1)
# check number of threads
if threads <= 0:
sys.stderr.write(TextColor.RED + "ERROR: THREAD NEEDS TO BE >=0.\n" + TextColor.END)
exit(1)
output_dir = FileManager.handle_output_directory(output_dir)
# create a filename for the output file
output_filename = os.path.join(output_dir, output_prefix)
# inform the output directory
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "OUTPUT FILE: " + output_filename + "\n")
if gpu_mode:
# Make sure that GPU is
if not torch.cuda.is_available():
sys.stderr.write(TextColor.RED + "ERROR: TORCH IS NOT BUILT WITH CUDA.\n" + TextColor.END)
sys.stderr.write(TextColor.RED + "SEE TORCH CAPABILITY:\n$ python3\n"
">>> import torch \n"
">>> torch.cuda.is_available()\n If true then cuda is avilable"
+ TextColor.END)
exit(1)
# Now see which devices to use
if device_ids is None:
total_gpu_devices = torch.cuda.device_count()
sys.stderr.write(TextColor.GREEN + "INFO: TOTAL GPU AVAILABLE: " + str(total_gpu_devices) + "\n" + TextColor.END)
device_ids = [i for i in range(0, total_gpu_devices)]
callers = total_gpu_devices
else:
device_ids = [int(i) for i in device_ids.split(',')]
for device_id in device_ids:
major_capable, minor_capable = torch.cuda.get_device_capability(device=device_id)
if major_capable < 0:
sys.stderr.write(TextColor.RED + "ERROR: GPU DEVICE: " + str(device_id) + " IS NOT CUDA CAPABLE.\n" + TextColor.END)
sys.stderr.write(TextColor.GREEN + "Try running: $ python3\n"
">>> import torch \n"
">>> torch.cuda.get_device_capability(device="
+ str(device_id) + ")\n" + TextColor.END)
else:
sys.stderr.write(TextColor.GREEN + "INFO: CAPABILITY OF GPU#" + str(device_id)
+ ":\t" + str(major_capable) + "-" + str(minor_capable) + "\n" + TextColor.END)
callers = len(device_ids)
sys.stderr.write(TextColor.GREEN + "INFO: AVAILABLE GPU DEVICES: " + str(device_ids) + "\n" + TextColor.END)
threads_per_caller = 0
else:
# calculate how many threads each caller can use
threads_per_caller = int(threads / callers)
device_ids = []
# chunk the inputs
input_files = get_file_paths_from_directory(image_dir)
# generate file chunks to process in parallel
file_chunks = [[] for i in range(callers)]
for i in range(0, len(input_files)):
file_chunks[i % callers].append(input_files[i])
# get the file chunks
file_chunks = [file_chunks[i] for i in range(len(file_chunks)) if len(file_chunks[i]) > 0]
callers = len(file_chunks)
if gpu_mode:
# Distributed GPU setup
predict_gpu(file_chunks, output_filename, model_path, batch_size, callers, device_ids, num_workers)
else:
# distributed CPU setup, call the prediction function
predict_cpu(file_chunks, output_filename, model_path, batch_size,
callers, threads_per_caller, num_workers)
# notify the user that process has completed successfully
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "PREDICTION GENERATED SUCCESSFULLY.\n")
|
nilq/baby-python
|
python
|
from dpconverge.data_set import DataSet
import numpy as np
n_features = 2
points_per_feature = 100
centers = [[2, 2], [4, 4]]
ds = DataSet(parameter_count=2)
n_samples = 500
outer_circ_x = 1.0 + np.cos(np.linspace(0, np.pi, n_samples)) / 2
outer_circ_y = 0.5 + np.sin(np.linspace(0, np.pi, n_samples))
X = np.vstack((outer_circ_x, outer_circ_y)).T
np.random.seed(1)
X[:, 0] += (np.random.rand(500) - 0.5) / 16
X[:, 1] += (np.random.rand(500) - 0.5) / 16
X[:, 0] += (np.random.rand(500) - 0.5) / 16
X[:, 1] += (np.random.rand(500) - 0.5) / 16
ds.add_blob(1, X)
ds.plot_blobs(ds.classifications, x_lim=[0, 6], y_lim=[0, 6])
component_count = 32
ds.cluster(
component_count=component_count,
burn_in=1000,
iteration_count=200,
random_seed=123
)
valid_components = ds.get_valid_components()
print "Recommended component count: ", len(valid_components)
for i in range(component_count):
if i in valid_components:
ds.plot_iteration_traces(i)
for i in range(component_count):
if i not in valid_components:
print "Possible invalid Component"
ds.plot_iteration_traces(i)
ds.plot_animated_trace()
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.1 on 2020-09-16 15:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('erp', '0091_auto_20200914_1720'),
('erp', '0091_auto_20200914_1638'),
]
operations = [
]
|
nilq/baby-python
|
python
|
"""Montrer dans le widget
Revision ID: 8b4768bb1336
Revises: dc85620e95c3
Create Date: 2021-04-12 17:24:31.906506
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8b4768bb1336'
down_revision = 'dc85620e95c3'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('recommandation', sa.Column('montrer_dans_le_widget', sa.Boolean(), nullable=True))
def downgrade():
op.drop_column('recommandation', 'montrer_dans_le_widget')
|
nilq/baby-python
|
python
|
def helper(s, k, maxstr, ctr):
# print("YES")
if k == 0 or ctr == len(s):
return
n = len(s)
maxx = s[ctr]
for i in range(ctr+1, n):
if int(maxx) < int(s[i]):
maxx = s[i]
if maxx != s[ctr]:
k -= 1
for j in range(n-1, ctr, -1):
if int(s[j]) == int(maxx):
s[j], s[ctr] = s[ctr], s[j]
if int(maxstr[0]) < int("".join(map(str, s))):
maxstr[0] = "".join(map(str, s))
helper(s, k, maxstr, ctr+1)
s[j], s[ctr] = s[ctr], s[j]
else:
helper(s, k, maxstr, ctr+1)
class Solution:
#Function to find the largest number after k swaps.
def findMaximumNum(self, s, k):
#code here
maxx = [s]
s = list(map(str, s.strip()))
helper(s, k, maxx, 0)
return maxx[0]
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == "__main__":
for _ in range(1):
k = 3
s = "3435335"
ob = Solution()
print(ob.findMaximumNum(s, k))
# } Driver Code Ends
|
nilq/baby-python
|
python
|
# # -*- coding: utf-8 -*-
# import scrapy
#
# import re
#
# class A55Spider(scrapy.Spider):
# name = '55'
# allowed_domains = ['fsx.sxxz.gov.cn']
# start_urls = ['http://fsx.sxxz.gov.cn/fsxzw/zwgk/xxgkzn/']
#
# def parse(self, response):
# navi_list = response.xpath('//ul[@class="item-nav"]//@href').extract()
# web_domain = "http://fsx.sxxz.gov.cn/fsxzw/zwgk"
# for navi in navi_list:
# complete_url = web_domain + navi[2:]
# yield scrapy.Request(url=complete_url, callback=self.extract_table)
#
# def extract_table(self, response):
# web_url = response.url
# url_rule = re.compile(r'/\d+/t\d+_\d+\.html$')
# if url_rule.match(web_url):
# yield scrapy.Request(url=web_url, callback=self.table_url)
|
nilq/baby-python
|
python
|
# Pop() -> Remove um elemento do endereço especifícado.
lista_4 = [10,9,8,7,5,6,4,2,3,1,2,3]
print(lista_4)
lista_4.pop(2)
print(lista_4)
lista_4.pop(-1)
print(lista_4)
|
nilq/baby-python
|
python
|
from typing import List, Union
import numpy as np
def get_test_function_method_min(n: int, a: List[List[float]], c: List[List[float]],
p: List[List[float]], b: List[float]):
"""
Функция-замыкание, генерирует и возвращает тестовую функцию, применяя метод Фельдбаума,
т. е. применяя оператор минимума к одноэкстремальным степенным функциям.
:param n: количество экстремумов, целое число >= 1
:param a: список коэффициентов крутости экстремумов (длиной n), чем выше значения,
тем быстрее функция убывает/возрастает и тем уже область экстремума, List[List[float]]
:param c: список координат экстремумов длиной n, List[List[float]]
:param p: список степеней гладкости в районе экстремума,
если 0<p[i][j]<=1 функция в точке экстремума будет угловой
:param b: список значений функции (длиной n) в экстремуме, List[float], len(b) = n
:return: возвращает функцию, которой необходимо передавать одномерный список координат точки,
возвращаемая функция вернет значение тестовой функции в данной точке
"""
def func(x):
l = []
for i in range(n):
res = 0
for j in range(len(x)):
res = res + a[i][j] * np.abs(x[j] - c[i][j]) ** p[i][j]
res = res + b[i]
l.append(res)
res = np.array(l)
return np.min(res)
return func
def get_tf_hyperbolic_potential_abs(n: int, a: List[float], c: List[List[float]],
p: List[List[float]], b: List[float]):
"""
Функция-замыкание. Генерирует и возвращает тестовую функцию,
основанную на гиперболических потенциалах с аддитивными модульными функциями в знаменателе.
:param n: количество экстремумов, целое число >= 1
:param a: одномерный список коэффициентов (длиной n), определяющих крутость функции в районе экстремума
:param c: двумерный список координат экстремумов длиной n, List[List[float]]
:param p:
:param b: одномерный список коэффициентов (длиной n), определяющих значения функции в точках экстремумов
:return: возвращает функцию, которой необходимо передавать одномерный список координат точки,
возвращаемая функция вернет значение тестовой функции в данной точке
"""
def func(x):
value = 0
for i in range(n):
res = 0
for j in range(len(x)):
res = res + np.abs(x[j] - c[i][j]) ** p[i][j]
res = a[i] * res + b[i]
res = -(1 / res)
value = value + res
return value
return func
def get_tf_hyperbolic_potential_sqr(n: int, a: List[List[float]], c: List[List[float]], b):
"""
Функция-замыкание. Генерирует и возвращает тестовую функцию,
основанную на гиперболических потенциалах с иддитивными квадратичными функциями в знаменателе.
:param n: количество экстремумов, целое число >= 1
:param a:
:param c: двумерный список координат экстремумов длиной n,
List[List[float]], размерность n * m, m - размерность задачи
:param b: одномерный список коэффициентов (длиной n), определяющих значения функции в точках экстремумов
:return: возвращает функцию, которой необходимо передавать одномерный список координат точки,
возвращаемая функция вернет значение тестовой функции в данной точке
"""
def func(x):
value = 0
for i in range(n):
res = 0
for j in range(len(x)):
res = res + a[i][j] * (x[j] - c[i][j]) ** 2 # правильно ли стоит a???????
res = res + b[i]
res = -(1 / res)
value = value + res
return value
return func
def get_tf_exponential_potential(n: int, a: List[float], c: List[List[float]],
p: List[List[float]], b: List[float]):
"""
Функция-замыкание. Генерирует и возвращает тестовую функцию,
основанную на экспоненциальных потенциалах с аддитивными модульными функциями в знаменателе.
:param n: количество экстремумов, целое число >= 1
:param a: одномерный список коэффициентов (длиной n), определяющих крутость функции в районе экстремума
:param c: двумерный список координат экстремумов, List[List[float]], размерность n * m, m - размерность задачи
:param p: двумерный список степеней гладкости функции в районе экстремума, List[List[float]], размерность n * m
:param b: одномерный список коэффициентов (длиной n), определяющих значения функции в точках экстремумов
:return: возвращает функцию, которой необходимо передавать одномерный список координат точки,
возвращаемая функция вернет значение тестовой функции в данной точке
"""
def func(x):
value = 0
for i in range(n):
res = 0
for j in range(len(x)):
res = res + np.abs(x[j] - c[i][j]) ** p[i][j]
res = (-b[i]) * np.exp((-a[i]) * res)
value = value + res
return value
return func
def get_test_func(type_func: str, n: int,
a: List[Union[List[float], float]], c: List[List[float]], p: List[List[float]], b: List[float]):
"""Возвращает необходимую функцию в зависимости от переданного типа"""
if type_func == "feldbaum_function":
func = get_test_function_method_min(n, a, c, p, b)
elif type_func == "hyperbolic_potential_abs":
func = get_tf_hyperbolic_potential_abs(n, a, c, p, b)
elif type_func == "exponential_potential":
func = get_tf_exponential_potential(n, a, c, p, b)
else:
func = None
return func
|
nilq/baby-python
|
python
|
from typing import cast, Mapping, Any, List, Tuple
from .models import PortExpenses, Port
def parse_port_expenses(json: Mapping[str, Any]) -> PortExpenses:
return PortExpenses(
cast(int, json.get("PortId")),
cast(int, json.get("PortCanal")),
cast(int, json.get("Towage")),
cast(int, json.get("Berth")),
cast(int, json.get("PortDues")),
cast(int, json.get("Lighthouse")),
cast(int, json.get("Mooring")),
cast(int, json.get("Pilotage")),
cast(int, json.get("Quay")),
cast(int, json.get("Anchorage")),
cast(int, json.get("AgencyFees")),
cast(int, json.get("Other")),
cast(int, json.get("SuezDues")),
cast(int, json.get("TotalCost")),
cast(int, json.get("MiscellaneousDues")),
cast(bool, json.get("IsEstimated")),
cast(int, json.get("CanalDues")),
cast(int, json.get("BerthDues")),
cast(int, json.get("LighthouseDues")),
cast(int, json.get("MooringUnmooring")),
cast(int, json.get("QuayDues")),
cast(int, json.get("AnchorageDues")),
cast(List[int], json.get("PortAgents")),
)
def parse_ports(json: Mapping[str, Any]) -> Tuple[Port, ...]:
ports: List[Port] = []
json_ports = json.get("Ports")
if json_ports is not None and isinstance(json_ports, list):
for port_json in json_ports:
port = Port(
cast(int, port_json.get("PortId")),
cast(str, port_json.get("PortName")),
)
ports.append(port)
return tuple(ports)
|
nilq/baby-python
|
python
|
from typing import Callable
from rx import operators as ops
from rx.core import Observable, pipe
from rx.core.typing import Predicate
def _all(predicate: Predicate) -> Callable[[Observable], Observable]:
filtering = ops.filter(lambda v: not predicate(v))
mapping = ops.map(lambda b: not b)
some = ops.some()
return pipe(
filtering,
some,
mapping
)
|
nilq/baby-python
|
python
|
import uuid
from datetime import datetime
from os import path
from sqlalchemy.orm.scoping import scoped_session
import factory
import factory.fuzzy
from app.extensions import db
from tests.status_code_gen import *
from app.api.applications.models.application import Application
from app.api.document_manager.models.document_manager import DocumentManager
from app.api.documents.expected.models.mine_expected_document import MineExpectedDocument
from app.api.documents.mines.models.mine_document import MineDocument
from app.api.documents.variances.models.variance import VarianceDocumentXref
from app.api.mines.location.models.mine_location import MineLocation
from app.api.mines.mine.models.mine import Mine
from app.api.mines.mine.models.mine_type import MineType
from app.api.mines.mine.models.mine_type_detail import MineTypeDetail
from app.api.mines.mine.models.mine_verified_status import MineVerifiedStatus
from app.api.mines.incidents.models.mine_incident import MineIncident
from app.api.mines.status.models.mine_status import MineStatus
from app.api.mines.subscription.models.subscription import Subscription
from app.api.mines.tailings.models.tailings import MineTailingsStorageFacility
from app.api.parties.party.models.party import Party
from app.api.parties.party.models.address import Address
from app.api.parties.party_appt.models.mine_party_appt import MinePartyAppointment
from app.api.permits.permit.models.permit import Permit
from app.api.permits.permit_amendment.models.permit_amendment import PermitAmendment
from app.api.permits.permit_amendment.models.permit_amendment_document import PermitAmendmentDocument
from app.api.users.core.models.core_user import CoreUser, IdirUserDetail
from app.api.users.minespace.models.minespace_user import MinespaceUser
from app.api.variances.models.variance import Variance
from app.api.parties.party_appt.models.party_business_role_appt import PartyBusinessRoleAppointment
GUID = factory.LazyFunction(uuid.uuid4)
TODAY = factory.LazyFunction(datetime.now)
FACTORY_LIST = []
class FactoryRegistry:
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
FACTORY_LIST.append(cls)
class BaseFactory(factory.alchemy.SQLAlchemyModelFactory, FactoryRegistry):
class Meta:
abstract = True
sqlalchemy_session = db.session
sqlalchemy_session_persistence = 'flush'
class ApplicationFactory(BaseFactory):
class Meta:
model = Application
class Params:
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
application_guid = GUID
mine_guid = factory.SelfAttribute('mine.mine_guid')
application_no = factory.Sequence(lambda n: f'TX-{n}-TEST')
application_status_code = factory.LazyFunction(RandomApplicationStatusCode)
description = factory.Faker('sentence', nb_words=8, variable_nb_words=True)
received_date = TODAY
class DocumentManagerFactory(BaseFactory):
class Meta:
model = DocumentManager
class Params:
path_root = ''
document_guid = GUID
full_storage_path = factory.LazyAttribute(
lambda o: path.join(o.path_root, 'mine_no/category', o.file_display_name))
upload_started_date = TODAY
upload_completed_date = TODAY
file_display_name = factory.Faker('file_name')
path_display_name = factory.LazyAttribute(
lambda o: path.join(o.path_root, 'mine_name/category', o.file_display_name))
class MineDocumentFactory(BaseFactory):
class Meta:
model = MineDocument
class Params:
document_manager_obj = factory.SubFactory(
DocumentManagerFactory, file_display_name=factory.SelfAttribute('..document_name'))
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
mine_document_guid = GUID
mine_guid = factory.SelfAttribute('mine.mine_guid')
document_manager_guid = factory.SelfAttribute('document_manager_obj.document_guid')
document_name = factory.Faker('file_name')
mine_expected_document = []
class MineExpectedDocumentFactory(BaseFactory):
class Meta:
model = MineExpectedDocument
exp_document_guid = GUID
required_document = factory.LazyFunction(RandomRequiredDocument)
exp_document_status_code = factory.LazyFunction(RandomExpectedDocumentStatusCode)
exp_document_name = factory.SelfAttribute('required_document.req_document_name')
exp_document_description = factory.SelfAttribute('required_document.description')
due_date = TODAY
received_date = TODAY
hsrc_code = factory.SelfAttribute('required_document.hsrc_code')
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
related_documents = []
@factory.post_generation
def related_documents(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
MineDocumentFactory.create_batch(
size=extracted, mine_expected_document=[obj], mine=obj.mine, **kwargs)
class MineLocationFactory(BaseFactory):
class Meta:
model = MineLocation
mine_location_guid = GUID
latitude = factory.Faker('latitude') # or factory.fuzzy.FuzzyFloat(49, 60) for ~ inside BC
longitude = factory.Faker('longitude') # or factory.fuzzy.FuzzyFloat(-132, -114.7) for ~ BC
geom = factory.LazyAttribute(lambda o: 'SRID=3005;POINT(%f %f)' % (o.longitude, o.latitude))
mine_location_description = factory.Faker('sentence', nb_words=8, variable_nb_words=True)
effective_date = TODAY
expiry_date = TODAY
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
class MineStatusFactory(BaseFactory):
class Meta:
model = MineStatus
mine_status_guid = GUID
effective_date = TODAY
mine_status_xref = factory.LazyFunction(RandomMineStatusXref)
class MineTypeDetailFactory(BaseFactory):
class Meta:
model = MineTypeDetail
class Params:
tenure = 'MIN'
commodity = factory.Trait(
mine_commodity_code=factory.LazyAttribute(
lambda o: SampleMineCommodityCodes(o.tenure, 1)[0]))
disturbance = factory.Trait(
mine_disturbance_code=factory.LazyAttribute(
lambda o: SampleMineDisturbanceCodes(o.tenure, 1)[0]))
mine_type_detail_xref_guid = GUID
mine_commodity_code = None
mine_disturbance_code = None
class MineTypeFactory(BaseFactory):
class Meta:
model = MineType
mine_type_guid = GUID
mine_tenure_type_code = factory.LazyFunction(RandomTenureTypeCode)
mine_type_detail = []
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
@factory.post_generation
def mine_type_detail(obj, create, extracted, **kwargs):
if not create:
return
if extracted is None:
extracted = {}
commodities = extracted.get('commodities', 1)
commodities = SampleMineCommodityCodes(obj.mine_tenure_type_code, commodities)
disturbances = extracted.get('disturbances', 1)
disturbances = SampleMineDisturbanceCodes(obj.mine_tenure_type_code, disturbances)
for commodity in commodities:
MineTypeDetailFactory(
mine_type_guid=obj.mine_type_guid,
tenure=obj.mine_tenure_type_code,
mine_commodity_code=commodity,
**kwargs)
for disturbance in disturbances:
MineTypeDetailFactory(
mine_type_guid=obj.mine_type_guid,
tenure=obj.mine_tenure_type_code,
mine_disturbance_code=disturbance,
**kwargs)
class MineTailingsStorageFacilityFactory(BaseFactory):
class Meta:
model = MineTailingsStorageFacility
mine_tailings_storage_facility_guid = GUID
mine_tailings_storage_facility_name = factory.Faker('last_name')
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
class VarianceFactory(BaseFactory):
class Meta:
model = Variance
class Params:
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
inspector = factory.SubFactory('tests.factories.PartyBusinessRoleFactory')
approved = factory.Trait(
variance_application_status_code='APP',
issue_date=TODAY,
expiry_date=TODAY,
inspector_party_guid=factory.SelfAttribute('inspector.party_guid'))
denied = factory.Trait(
variance_application_status_code='DEN',
inspector_party_guid=factory.SelfAttribute('inspector.party_guid'))
not_applicable = factory.Trait(variance_application_status_code='NAP')
variance_guid = GUID
compliance_article_id = factory.LazyFunction(RandomComplianceArticleId)
mine_guid = factory.SelfAttribute('mine.mine_guid')
note = factory.Faker('sentence', nb_words=6, variable_nb_words=True)
parties_notified_ind = factory.Faker('boolean', chance_of_getting_true=50)
received_date = TODAY
documents = []
@factory.post_generation
def documents(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
VarianceDocumentFactory.create_batch(
size=extracted, variance=obj, mine_document__mine=None, **kwargs)
class VarianceDocumentFactory(BaseFactory):
class Meta:
model = VarianceDocumentXref
class Params:
mine_document = factory.SubFactory(
'tests.factories.MineDocumentFactory',
mine_guid=factory.SelfAttribute('..variance.mine_guid'))
variance = factory.SubFactory('tests.factories.VarianceFactory')
variance_document_xref_guid = GUID
mine_document_guid = factory.SelfAttribute('mine_document.mine_document_guid')
variance_id = factory.SelfAttribute('variance.variance_id')
variance_document_category_code = factory.LazyFunction(RandomVarianceDocumentCategoryCode)
def RandomPermitNumber():
return random.choice(['C-', 'CX-', 'M-', 'M-', 'P-', 'PX-', 'G-', 'Q-']) + str(
random.randint(1, 9999999))
class PermitFactory(BaseFactory):
class Meta:
model = Permit
permit_guid = GUID
permit_no = factory.LazyFunction(RandomPermitNumber)
permit_status_code = factory.LazyFunction(RandomPermitStatusCode)
permit_amendments = []
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
@factory.post_generation
def permit_amendments(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
for n in range(extracted):
PermitAmendmentFactory(permit=obj, initial_permit=(n == 0), **kwargs)
class PermitAmendmentFactory(BaseFactory):
class Meta:
model = PermitAmendment
class Params:
initial_permit = factory.Trait(
description='Initial permit issued.',
permit_amendment_type_code='OGP',
)
permit = factory.SubFactory(PermitFactory, permit_amendments=0)
permit_amendment_guid = GUID
permit_id = factory.SelfAttribute('permit.permit_id')
received_date = TODAY
issue_date = TODAY
authorization_end_date = factory.Faker('future_datetime', end_date='+30d')
permit_amendment_status_code = 'ACT'
permit_amendment_type_code = 'AMD'
description = factory.Faker('sentence', nb_words=6, variable_nb_words=True)
documents = []
class PermitAmendmentDocumentFactory(BaseFactory):
class Meta:
model = PermitAmendmentDocument
class Params:
document_manager_obj = factory.SubFactory(
DocumentManagerFactory, file_display_name=factory.SelfAttribute('..document_name'))
permit_amendment_document_guid = GUID
permit_amendment_id = factory.SelfAttribute('permit_amendment.permit_amendment_id')
document_name = factory.Faker('file_name')
mine_guid = factory.SelfAttribute('permit_amendment.permit.mine.mine_guid')
document_manager_guid = factory.SelfAttribute('document_manager_obj.document_guid')
permit_amendment = factory.SubFactory(PermitAmendmentFactory)
class MineVerifiedStatusFactory(BaseFactory):
class Meta:
model = MineVerifiedStatus
healthy_ind = factory.Faker('boolean', chance_of_getting_true=50)
verifying_user = factory.Faker('name')
verifying_timestamp = TODAY
update_user = factory.Faker('name')
update_timestamp = TODAY
class MineIncidentFactory(BaseFactory):
class Meta:
model = MineIncident
class Params:
do_subparagraph_count = 2
mine_incident_id_year = 2019
mine_incident_guid = GUID
incident_timestamp = factory.Faker('past_datetime')
incident_description = factory.Faker('sentence', nb_words=20, variable_nb_words=True)
reported_timestamp = factory.Faker('past_datetime')
reported_by = factory.Faker('name')
reported_by_role = factory.Faker('job')
determination_type_code = factory.LazyFunction(RandomIncidentDeterminationTypeCode)
followup_type_code = 'NOA'
followup_inspection_no = factory.Faker('numerify', text='######') #nullable???
closing_report_summary = factory.Faker('sentence', nb_words=20, variable_nb_words=True)
dangerous_occurrence_subparagraphs = factory.LazyAttribute(
lambda o: SampleDangerousOccurrenceSubparagraphs(o.do_subparagraph_count)
if o.determination_type_code == 'DO' else [])
class AddressFactory(BaseFactory):
class Meta:
model = Address
address_line_1 = factory.Faker('street_address')
suite_no = factory.Iterator([None, None, '123', '123'])
address_line_2 = factory.Iterator([None, 'Apt. 123', None, 'Apt. 123'])
city = factory.Faker('city')
sub_division_code = factory.LazyFunction(RandomSubDivisionCode)
post_code = factory.Faker('bothify', text='?#?#?#', letters='ABCDEFGHIJKLMNOPQRSTUVWXYZ')
class PartyFactory(BaseFactory):
class Meta:
model = Party
class Params:
person = factory.Trait(
first_name=factory.Faker('first_name'),
party_name=factory.Faker('last_name'),
email=factory.LazyAttribute(lambda o: f'{o.first_name}.{o.party_name}@example.com'),
party_type_code='PER',
)
company = factory.Trait(
party_name=factory.Faker('company'),
email=factory.Faker('company_email'),
party_type_code='ORG',
)
party_guid = factory.LazyFunction(uuid.uuid4)
first_name = None
party_name = None
phone_no = factory.Faker('numerify', text='###-###-####')
phone_ext = factory.Iterator([None, '123'])
email = None
effective_date = TODAY
expiry_date = None
party_type_code = None
mine_party_appt = []
address = factory.List([factory.SubFactory(AddressFactory) for _ in range(1)])
class PartyBusinessRoleFactory(BaseFactory):
class Meta:
model = PartyBusinessRoleAppointment
party_business_role_code = factory.LazyFunction(RandomPartyBusinessRoleCode)
party = factory.SubFactory(PartyFactory, person=True)
start_date = TODAY
end_date = None
class MinePartyAppointmentFactory(BaseFactory):
class Meta:
model = MinePartyAppointment
mine_party_appt_guid = GUID
mine = factory.SubFactory('tests.factories.MineFactory')
party = factory.SubFactory(PartyFactory, person=True)
mine_party_appt_type_code = factory.LazyFunction(RandomMinePartyAppointmentTypeCode)
start_date = TODAY
end_date = None
processed_by = factory.Faker('first_name')
processed_on = TODAY
mine_tailings_storage_facility_guid = factory.LazyAttribute(
lambda o: o.mine.mine_tailings_storage_facilities[0].mine_tailings_storage_facility_guid if o.mine_party_appt_type_code == 'EOR' else None
)
permit_guid = factory.LazyAttribute(
lambda o: o.mine.mine_permit[0].permit_guid if o.mine_party_appt_type_code == 'PMT' else None
)
class CoreUserFactory(BaseFactory):
class Meta:
model = CoreUser
core_user_guid = GUID
email = factory.Faker('email')
phone_no = factory.Faker('numerify', text='###-###-####')
last_logon = TODAY
idir_user_detail = factory.RelatedFactory('tests.factories.IdirUserDetailFactory', 'core_user')
class IdirUserDetailFactory(BaseFactory):
class Meta:
model = IdirUserDetail
class Params:
core_user = factory.SubFactory(CoreUserFactory)
core_user_id = factory.SelfAttribute('core_user.core_user_id')
bcgov_guid = GUID
username = factory.Faker('first_name')
class MinespaceUserFactory(BaseFactory):
class Meta:
model = MinespaceUser
keycloak_guid = GUID
email = factory.Faker('email')
class SubscriptionFactory(BaseFactory):
class Meta:
model = Subscription
class Params:
mine = factory.SubFactory('tests.factories.MineFactory', minimal=True)
mine_guid = factory.SelfAttribute('mine.mine_guid')
user_name = factory.Faker('last_name')
class MineFactory(BaseFactory):
class Meta:
model = Mine
class Params:
minimal = factory.Trait(
mine_no=None,
mine_note=None,
mine_region='NE',
mine_location=None,
mine_type=None,
verified_status=None,
mine_status=None,
mine_tailings_storage_facilities=0,
mine_permit=0,
mine_expected_documents=0,
mine_incidents=0,
mine_variance=0,
)
mine_guid = GUID
mine_no = factory.Faker('ean', length=8)
mine_name = factory.Faker('company')
mine_note = factory.Faker('sentence', nb_words=6, variable_nb_words=True)
major_mine_ind = factory.Faker('boolean', chance_of_getting_true=50)
mine_region = factory.LazyFunction(RandomMineRegionCode)
ohsc_ind = factory.Faker('boolean', chance_of_getting_true=50)
union_ind = factory.Faker('boolean', chance_of_getting_true=50)
mine_location = factory.RelatedFactory(MineLocationFactory, 'mine')
mine_type = factory.RelatedFactory(MineTypeFactory, 'mine')
verified_status = factory.RelatedFactory(MineVerifiedStatusFactory, 'mine')
mine_status = factory.RelatedFactory(MineStatusFactory, 'mine')
mine_tailings_storage_facilities = []
mine_permit = []
mine_expected_documents = []
mine_incidents = []
mine_variance = []
@factory.post_generation
def mine_tailings_storage_facilities(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
MineTailingsStorageFacilityFactory.create_batch(size=extracted, mine=obj, **kwargs)
@factory.post_generation
def mine_permit(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
PermitFactory.create_batch(size=extracted, mine=obj, **kwargs)
@factory.post_generation
def mine_expected_documents(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
MineExpectedDocumentFactory.create_batch(size=extracted, mine=obj, **kwargs)
@factory.post_generation
def mine_incidents(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
MineIncidentFactory.create_batch(size=extracted, mine_guid=obj.mine_guid, **kwargs)
@factory.post_generation
def mine_variance(obj, create, extracted, **kwargs):
if not create:
return
if not isinstance(extracted, int):
extracted = 1
VarianceFactory.create_batch(size=extracted, mine=obj, **kwargs)
|
nilq/baby-python
|
python
|
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, BatchNormalization, ReLU
class Decoder(Model):
def __init__(self, channels):
super().__init__()
self.conv1 = Conv2D(channels[0], 3, padding='SAME', use_bias=False)
self.bn1 = BatchNormalization(momentum=0.1, epsilon=1e-5)
self.conv2 = Conv2D(channels[1], 3, padding='SAME', use_bias=False)
self.bn2 = BatchNormalization(momentum=0.1, epsilon=1e-5)
self.conv3 = Conv2D(channels[2], 3, padding='SAME', use_bias=False)
self.bn3 = BatchNormalization(momentum=0.1, epsilon=1e-5)
self.conv4 = Conv2D(channels[3], 3, padding='SAME', use_bias=True)
self.relu = ReLU()
def call(self, x, training=None):
x4, x3, x2, x1, x0 = x
x = tf.image.resize(x4, tf.shape(x3)[1:3])
x = tf.concat([x, x3], axis=-1)
x = self.conv1(x, training=training)
x = self.bn1(x, training=training)
x = self.relu(x, training=training)
x = tf.image.resize(x, tf.shape(x2)[1:3])
x = tf.concat([x, x2], axis=-1)
x = self.conv2(x, training=training)
x = self.bn2(x, training=training)
x = self.relu(x, training=training)
x = tf.image.resize(x, tf.shape(x1)[1:3])
x = tf.concat([x, x1], axis=-1)
x = self.conv3(x, training=training)
x = self.bn3(x, training=training)
x = self.relu(x, training=training)
x = tf.image.resize(x, tf.shape(x0)[1:3])
x = tf.concat([x, x0], axis=-1)
x = self.conv4(x, training=training)
return x
|
nilq/baby-python
|
python
|
import os
from typing import List, Tuple
from urllib.request import urlopen
from discord.ext import commands
from blurpo.func import database, send_embed, wrap
def basename(path: str) -> Tuple[str, str]:
# Get file's basename from url
# eg. https://website.com/index.html -> (index.html, index)
return (base := path.split('/')[-1]), base.split('.')[0]
def exts_list(chn_id: int) -> None:
with database() as db:
exts = list(db['Github'])
chunks = wrap('\n'.join(exts), code='bash')
send_embed(chn_id, chunks, title='Github Extensions', color=333333)
def ext_load(bot: commands.Bot, path: str) -> None:
base, name = basename(path)
url = 'https://raw.githubusercontent.com/' + path
with open(base, 'w') as f:
f.write(urlopen(url).read().decode('utf-8'))
try: bot.load_extension(name)
except commands.ExtensionAlreadyLoaded: bot.reload_extension(name)
finally: os.remove(base)
def exts_load(bot) -> List[str]:
with database() as db:
exts = db['Github']
loaded = []
for ext in exts.keys():
try:
ext_load(bot, exts[ext])
loaded.append(ext)
except Exception as e: print(e)
return loaded
class Github(commands.Cog):
def __init__(self, bot) -> None:
self.bot = bot
with database() as db:
if 'Github' in db:
exts = exts_load(self.bot)
print(f'{exts} loaded')
else: db['Github'] = {}
@commands.command(
'gload',
brief='Load exts. Path: [owner/repo/branch/filepath]')
async def exts_load(self, ctx, *paths: str) -> None:
with database() as db:
for path in paths:
ext_load(self.bot, path)
_, ext = basename(path)
exts = db['Github']
exts[ext] = path
db['Github'] = exts
exts_list(ctx.channel.id)
@commands.command('gunld', brief='Unload exts')
async def exts_unload(self, ctx, *exts: str) -> None:
with database() as db:
for ext in exts:
es = db['Github']
if ext in es:
del es[ext]
db['Github'] = es
self.bot.unload_extension(ext)
exts_list(ctx.channel.id)
@commands.command('gexts', brief='List exts')
async def exts_list(self, ctx) -> None:
exts_list(ctx.channel.id)
@commands.command('greld', brief='Reload all exts')
async def ghExtsReload(self, ctx) -> None:
exts = exts_load(self.bot)
chunks = wrap('\n'.join(exts), code='bash')
send_embed(ctx.channel.id,
chunks,
title='Extensions Reloaded',
color=333333)
def setup(bot):
bot.add_cog(Github(bot))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (C) 2017 Udacity Inc.
#
# This file is part of Robotic Arm: Pick and Place project for Udacity
# Robotics nano-degree program
#
# All Rights Reserved.
# Author: Harsh Pandya
# import modules
import rospy
import tf
from kuka_arm.srv import *
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import Pose
from mpmath import *
from sympy import *
import numpy as np
from numpy import array
from sympy import symbols, cos, sin, pi, sqrt, atan2
#### Transformation matrix function###
def Transform(q,d,a,alpha,s):
T = Matrix([[cos(q) , -sin(q) , 0 , a ],
[sin(q) * cos(alpha), cos(q) * cos(alpha), -sin(alpha), -sin(alpha) * d],
[sin(q) * sin(alpha), cos(q) * sin(alpha), cos(alpha), cos(alpha) * d ],
[0 , 0 , 0 , 1 ]])
return T.subs(s)
######################################
def handle_calculate_IK(req):
rospy.loginfo("Received %s eef-poses from the plan" % len(req.poses))
if len(req.poses) < 1:
print "No valid poses received"
return -1
else:
# Create symbols
q1,q2,q3,q4,q5,q6,q7 = symbols('q1:8')
d1,d2,d3,d4,d5,d6,d7 = symbols('d1:8')
a0,a1,a2,a3,a4,a5,a6 = symbols('a0:6')
alpha0,alpha1,alpha2,alpha3,alpha4,alpha5,alpha6 = symbols('alpha0:7')
################
# Create Modified DH parameters
s = {alpha0: 0, a0: 0, d1: 0.75,
alpha1: -90.0, a1: 0.35, d2: 0, q2: q2-90.0,
alpha2: 0, a2: 1.25, d3: 0,
alpha3: -90.0, a3: -0.054, d4: 1.5,
alpha4: 90.0, a4: 0, d5: 0,
alpha5: -90.0, a5: 0, d6: 0,
alpha6: 0, a6: 0, d7: 0.303, q7: 0}
################################
# Create individual transformation matrices
T0_1=Transform(q1,d1,a0,alpha0,s)
T1_2=Transform(q2,d2,a1,alpha1,s)
T2_3=Transform(q3,d3,a2,alpha2,s)
T3_4=Transform(q4,d4,a3,alpha3,s)
T4_5=Transform(q5,d5,a4,alpha4,s)
T5_6=Transform(q6,d6,a5,alpha5,s)
T6_G=Transform(q7,d7,a6,alpha6,s)
T0_G= T0_1*T1_2*T2_3*T3_4*T4_5*T5_6*T6_G
###########################################
# Creating function for Rotation matrices
R,P,Y = symbols('R P Y')
def Rot(symb,Roll=R,Pitch=P,Yaw=Y):
if symb == 'R':
Rot = Matrix([
[ 1, 0, 0],
[ 0, cos(Roll), -sin(Roll)],
[ 0, sin(Roll), cos(Roll)]])
elif symb == 'P':
Rot = Matrix([
[ cos(Pitch), 0, sin(Pitch)],
[ 0, 1, 0],
[-sin(Pitch), 0, cos(Pitch)]])
elif symb == 'Y':
Rot = Matrix([
[cos(Yaw), -sin(Yaw), 0],
[sin(Yaw), cos(Yaw), 0],
[ 0, 0, 1]])
return Rot
#######################################
# Accounting for Orientation Difference
Rot_x = Rot('R')
Rot_y = Rot('P')
Rot_z = Rot('Y')
Rot_F = Rot_z.subs(Y,radians(180))*Rot_Y.subs(P,radians(-90))
Rot_E = Rot_z*Rot_y*Rot_x
Rot_EE = Rot_E * Rot_F
#######################################
# Initialize service response
joint_trajectory_list = []
for x in xrange(0, len(req.poses)):
# IK code starts here
joint_trajectory_point = JointTrajectoryPoint()
# Extract end-effector position and orientation from request
# px,py,pz = end-effector position
# roll, pitch, yaw = end-effector orientation
px = req.poses[x].position.x
py = req.poses[x].position.y
pz = req.poses[x].position.z
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(
[req.poses[x].orientation.x, req.poses[x].orientation.y,
req.poses[x].orientation.z, req.poses[x].orientation.w])
# Finding the position of WC according to End Effector
Rot_EE.subs({'R':roll , 'P':pitch , 'Y':yaw})
Pos_EE = Matrix([px,py,pz])
Pos_WC = Pos_EE - 0.303*Rot_EE[:,2]
WC_x = Pos_WC[0]
WC_y = Pos_WC[1]
WC_z = Pos_WC[2]
# Calculate joint angles using Geometric IK method
La = 1.502
Lc = 1.25
a1 = 0.35
d1 = 0.75
Lxy= sqrt(pow(WC_x, 2.) + pow(WC_y, 2.) ) - a1
Lz = WC_z - d1
Lb = sqrt(pow(Lxy, 2.) + pow(Lz, 2.))
a_ang = acos( ( pow(Lb, 2.) + pow(Lc, 2.) - pow(La, 2.)) / (2. * Lb * Lc) )
b_ang = acos( ( pow(La, 2.) + pow(Lc, 2.) - pow(Lb, 2.)) / (2. * La * Lc) )
c_ang = acos( ( pow(La, 2.) + pow(Lb, 2.) - pow(Lc, 2.)) / (2. * La * Lb) )
### Finding Theta 1,2,3
theta1 = atan2(WC_y , WC_x)
theta2 = 90. - a_ang - atan2(Lz/Lxy)
theta3 = 90. - Lb - atan2(0.054/1.5)
#######################
# Evaluating Transformation from 0 to 3
R0_3 = (T0_1 * T1_2 * T2_3).evalf(subs={theta1: theta1,theta2: theta2,theta3: theta3})[0:3, 0:3]
#######################################
# Evaluating Transformation from 3 to 6
R3_6 = R0_3.T * R_EE
theta4 = atan2(R3_6[2,2], -R3_6[0,2])
theta5 = atan2(sqrt(pow(R3_6[0,2], 2) + pow(R3_6[2,2], 2)), R3_6[1,2])
theta6 = atan2(-R3_6[1,1], R3_6[1,0])
#######################################
# Populate response for the IK request
# In the next line replace theta1,theta2...,theta6 by your joint angle variables
joint_trajectory_point.positions = [theta1, theta2, theta3, theta4, theta5, theta6]
joint_trajectory_list.append(joint_trajectory_point)
rospy.loginfo("length of Joint Trajectory List: %s" % len(joint_trajectory_list))
return CalculateIKResponse(joint_trajectory_list)
def IK_server():
# initialize node and declare calculate_ik service
rospy.init_node('IK_server')
s = rospy.Service('calculate_ik', CalculateIK, handle_calculate_IK)
print "Ready to receive an IK request"
rospy.spin()
if __name__ == "__main__":
IK_server()
|
nilq/baby-python
|
python
|
import codecs
import os
from hacktools import common
import constants
import game
def run(data, copybin=False, analyze=False):
infile = data + "extract/arm9.bin"
outfile = data + "repack/arm9.bin"
fontdata = data + "font_data.bin"
dictionarydata = data + "dictionary.asm"
binfile = data + "bin_input.txt"
datfile = data + "dat_input.txt"
binsize = os.path.getsize(infile)
table, invtable = game.getTable(data)
glyphs, dictionary = game.getGlyphs(data)
if not os.path.isfile(binfile):
common.logError("Input file", binfile, "not found")
return
if not os.path.isfile(datfile):
common.logError("Input file", datfile, "not found")
return
common.logMessage("Repacking BIN from", binfile, "...")
# Read all strings
translations = {}
strings = {}
with codecs.open(binfile, "r", "utf-8") as bin:
section = common.getSection(bin, "")
chartot, transtot = common.getSectionPercentage(section)
for jpstr in section:
if section[jpstr][0] != "":
translations[jpstr] = section[jpstr][0]
if section[jpstr][0] not in strings:
strings[section[jpstr][0]] = -1
elif jpstr not in strings:
strings[jpstr] = 0
if copybin:
common.copyFile(infile, outfile)
if os.path.isfile(data + "bmpcache.txt"):
os.remove(data + "bmpcache.txt")
lastfreepos = 0
with common.Stream(infile, "rb") as fin:
ptrgroups, allptrs = game.getBINPointerGroups(fin)
with common.Stream(outfile, "rb+") as f:
# Write all strings
outofspace = False
outchars = 0
lastgood = 0
f.seek(constants.mainptr["offset"])
for string in common.showProgress(strings):
writestr = string
if strings[string] == -1 and not writestr.startswith(">>") and "<ch1>" not in writestr and "<00>" not in writestr:
writestr = writestr.replace("<0A>", "|")
writestr = common.wordwrap(writestr, glyphs, constants.wordwrap, game.detectTextCode, default=0xa)
if outofspace:
common.logDebug("Skipping string", writestr)
outchars += len(writestr) - writestr.count("<") * 3
strings[string] = lastgood
else:
usedictionary = True
if writestr.startswith(">>"):
usedictionary = False
writestr = game.alignCenter(writestr[2:], glyphs) + "<00>"
common.logDebug("Writing string", writestr, "at", common.toHex(f.tell()))
strings[string] = lastgood = f.tell()
game.writeString(f, writestr, table, usedictionary and dictionary or {}, compress=usedictionary)
if "<ch1>" in writestr:
f.writeByte(0)
if f.tell() >= constants.mainptr["end"]:
outofspace = True
common.logMessage("Ran out of space while writing string", writestr)
common.logDebug("Finished at", common.toHex(f.tell()))
if outofspace:
common.logMessage("Characters left out:", outchars)
else:
lastfreepos = f.tell()
common.logMessage("Room for", common.toHex(constants.mainptr["end"] - lastfreepos), "more bytes")
# Change pointers
for ptrgroup in ptrgroups:
atstr = "@" + common.toHex(ptrgroup)
for ptr in ptrgroups[ptrgroup]:
f.seek(ptr["pos"])
fin.seek(ptr["ptr"])
if ptr["data"]:
jpstr = game.readData(fin, allptrs)
else:
jpstr = game.readString(fin, invtable, allptrs)
if jpstr + atstr in translations:
jpstr = translations[jpstr + atstr]
elif jpstr in translations:
jpstr = translations[jpstr]
if jpstr not in strings:
common.logError("String", jpstr, "not found")
else:
common.logDebug("Writing pointer", common.toHex(strings[jpstr]), "for string", jpstr, "at", common.toHex(f.tell()))
f.writeUInt(0x02000000 + strings[jpstr])
common.logMessage("Done! Translation is at {0:.2f}%".format((100 * transtot) / chartot))
common.logMessage("Text statistics:")
common.logMessage(" Groups printed: {0}".format(game.text_stats_groups))
common.logMessage(" Characters printed: {0}".format(game.text_stats_other))
common.logMessage(" Dictionary saved: {0}-{1} overhead ({2}%)".format(game.text_stats_dict_saved, game.text_stats_dict_overhead, (game.text_stats_dict_overhead * 100) // game.text_stats_dict_saved))
common.logMessage(" Compression saved: {0}".format(game.text_stats_compression_saving))
common.logMessage("Repacking DAT from", datfile, "...")
chartot = transtot = 0
with codecs.open(datfile, "r", "utf-8") as dat:
with common.Stream(infile, "rb") as fin:
with common.Stream(outfile, "rb+") as f:
for datname in constants.datptrs:
if type(constants.datptrs[datname]) is not list and "main" in constants.datptrs[datname]:
continue
section = common.getSection(dat, datname)
if len(section) == 0:
continue
chartot, transtot = common.getSectionPercentage(section, chartot, transtot)
datptrs = []
if type(constants.datptrs[datname]) is list:
for datoffset in constants.datptrs[datname]:
datptrs.append(datoffset)
else:
datptrs.append(constants.datptrs[datname])
# Read all strings first
allstrings = []
for datptr in datptrs:
writegroups = "writegroups" in datptr and datptr["writegroups"]
usedictionary = "dictionary" in datptr and datptr["dictionary"]
redirect = "redirect" in datptr and datptr["redirect"]
wordwrap = "wordwrap" in datptr and datptr["wordwrap"] or 0
aligncenter = "aligncenter" in datptr and datptr["aligncenter"] or 0
fin.seek(datptr["offset"])
if "end" in datptr:
while fin.tell() < datptr["end"]:
strstart = fin.tell()
jpstr = game.readString(fin, invtable)
fin.readZeros(binsize)
allstrings.append({"start": strstart, "end": fin.tell() - 1, "str": jpstr,
"writegroups": writegroups, "dictionary": usedictionary, "wordwrap": wordwrap, "aligncenter": aligncenter, "redirect": redirect})
else:
ptrs = []
for i in range(datptr["count"]):
ptrpos = fin.tell()
ptrs.append({"address": fin.readUInt() - 0x02000000, "pos": ptrpos})
if "skip" in datptr:
fin.seek(datptr["skip"], 1)
for i in range(datptr["count"]):
fin.seek(ptrs[i]["address"])
strstart = fin.tell()
jpstr = game.readString(fin, invtable)
fin.readZeros(binsize)
allstrings.append({"start": strstart, "end": fin.tell() - 1, "str": jpstr,
"ptrpos": ptrs[i]["pos"], "writegroups": writegroups, "dictionary": usedictionary, "wordwrap": wordwrap, "aligncenter": aligncenter, "redirect": redirect})
# Check how much space is used by these strings and update them with the translations
minpos = 0xffffffff
maxpos = 0
for jpstr in allstrings:
if jpstr["start"] < minpos:
minpos = jpstr["start"]
if jpstr["end"] > maxpos:
maxpos = jpstr["end"]
check = jpstr["str"]
if check in section and section[check][0] != "":
jpstr["str"] = section[check].pop()
if len(section[check]) == 0:
del section[check]
if jpstr["wordwrap"] > 0:
jpstr["str"] = common.wordwrap(jpstr["str"], glyphs, jpstr["wordwrap"], game.detectTextCode, default=0xa)
if jpstr["str"].startswith("<<"):
jpstr["str"] = game.alignLeft(jpstr["str"][2:], glyphs)
if jpstr["str"].startswith(">>"):
jpstr["str"] = game.alignCenter(jpstr["str"][2:], glyphs) + "<00>"
if jpstr["aligncenter"] > 0:
jpstr["str"] = game.alignCenterSpace(jpstr["str"], glyphs, jpstr["aligncenter"]) + "<00>"
if analyze:
allspace = []
for i in range(minpos, maxpos + 1):
allspace.append(i)
for jpstr in allstrings:
for i in range(jpstr["start"], jpstr["end"] + 1):
allspace.remove(i)
common.logMessage(datname)
common.logMessage(allspace)
# Start writing them
f.seek(minpos)
writingmain = False
for jpstr in allstrings:
if "ptrpos" in jpstr and datname != "ItemShop":
common.logDebug("Writing pointer string", jpstr["str"], "at", common.toHex(f.tell()))
# Write the string and update the pointer
strpos = f.tell()
stringfits = game.writeString(f, jpstr["str"], table, dictionary if jpstr["dictionary"] else {}, maxlen=maxpos - f.tell(), writegroups=jpstr["writegroups"], checkfit=jpstr["redirect"])
if jpstr["redirect"] and not stringfits and lastfreepos > 0 and not writingmain:
common.logDebug("String", jpstr["str"], "didn't fit, enabling writing to main...")
f.seek(lastfreepos)
maxpos = constants.mainptr["end"]
game.writeString(f, jpstr["str"], table, dictionary if jpstr["dictionary"] else {}, maxlen=maxpos - f.tell(), writegroups=jpstr["writegroups"], checkfit=jpstr["redirect"])
writingmain = True
f.writeUIntAt(jpstr["ptrpos"], strpos + 0x02000000)
else:
# Try to fit the string in the given space
f.seek(jpstr["start"])
common.logDebug("Writing fixed string", jpstr["str"], "at", common.toHex(f.tell()))
game.writeString(f, jpstr["str"], table, dictionary if jpstr["dictionary"] else {}, maxlen=jpstr["end"] - f.tell(), writegroups=jpstr["writegroups"])
while f.tell() < jpstr["end"]:
f.writeByte(0)
if writingmain:
lastfreepos = f.tell()
common.logMessage("Room for", common.toHex(constants.mainptr["end"] - lastfreepos), "more bytes")
common.logMessage("Done! Translation is at {0:.2f}%".format((100 * transtot) / chartot))
# Export font data, dictionary data and apply armips patch
with common.Stream(fontdata, "wb") as f:
for charcode in range(0x9010, 0x908f + 1):
c = invtable[charcode]
f.writeByte(glyphs[c].width)
with codecs.open(dictionarydata, "w", "utf-8") as f:
alldictionary = []
for dictentry in dictionary:
dictname = "DICTIONARY_" + common.toHex(dictionary[dictentry]).lower()
dictvalue = dictname + ":\n" + game.writeDictionaryString(dictentry, table)
f.write(".dw " + dictname + "\n")
alldictionary.append(dictvalue)
f.write("\n")
f.write("\n".join(alldictionary))
f.write("\n")
common.armipsPatch(common.bundledFile("bin_patch.asm"))
|
nilq/baby-python
|
python
|
import time
import random
currentBot = 1
from threadly import Threadly
def worker(**kwargs):
botID = kwargs["botID"]
resultsQ = kwargs["resultsQ"]
time.sleep(random.randint(1,15))
resultsQ.put({"botID":botID, "time":time.time()})
def workerKwargs():
global currentBot
tosend = {"botID":"bot {}".format(currentBot)}
currentBot += 1
return tosend
def finish(**kwargs):
greeting = kwargs["greeting"]
resultsQ = kwargs["resultsQ"]
overallTime = kwargs["totalTime"]
print("{}, It took {} seconds".format(greeting, overallTime))
print("bot results")
for i in range(resultsQ.qsize()):
aresult = resultsQ.get()
print("bot {botID} finished at {time}".format(**aresult))
print("Starting..")
mytest = Threadly()
testerkwargs = {"workerFunc":worker,
"workerKwargGenFunc":workerKwargs,
"numberOfWorkers":10,
"numberOfThreads":2,
"finishFunc":finish,
"finishFuncKwargs":{"greeting":"Howdy"},
"delayBetweenThreads":0.1}
testerkwargs2 = {"workerFunc":worker,
"workerKwargGenFunc":workerKwargs,
"lengthOfTest":20,
"numberOfThreads":20,
"finishFunc":finish,
"finishFuncKwargs":{"greeting":"Howdy"},
"delayBetweenThreads":0.1}
random.seed()
mytest.runTest(**testerkwargs2)
print("Done")
|
nilq/baby-python
|
python
|
"""
Searching for optimal parameters.
"""
from section1_video5_data import get_data
from sklearn import model_selection
from xgboost import XGBClassifier
seed=123
# Load prepared data
X, Y = get_data('../data/video1_diabetes.csv')
# Build our single model
c = XGBClassifier(random_state=seed)
#n_trees = range(500, 1000, 50)
#max_depth = range(1, 3) # 72.44% - {'max_depth': 1, 'n_estimators': 500}
#max_depth = range(3, 5) # 68.70% - {'max_depth': 3, 'n_estimators': 500}
n_trees = range(10, 500, 50)
max_depth = range(3, 5) # - 74.10% {'max_depth': 1, 'n_estimators': 260}
#max_depth = range(1, 3) # - 72.24% {'max_depth': 3, 'n_estimators': 60}
params_to_search = dict(n_estimators=n_trees, max_depth=max_depth)
grid_search = model_selection.GridSearchCV(c, params_to_search, scoring="neg_log_loss", n_jobs=-1, cv=10, iid=False)
grid_result = grid_search.fit(X, Y)
print("Found best params: %s" % (grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
params = grid_result.cv_results_['params']
for m, p in zip(means, params):
print("%f: %r" % (m, p))
# Check accuracy of a classfier once again
c = XGBClassifier(random_state=seed, **grid_result.best_params_)
results = c.fit(X, Y)
# using 10-fold Cross Validation.
results_kfold_model = model_selection.cross_val_score(c, X, Y, cv=10)
print("XGBoost accuracy:\t{:2.2f}%".format(results_kfold_model.mean()*100))
|
nilq/baby-python
|
python
|
import numpy as np
from pytest import mark
from numpy.testing import assert_allclose
@mark.plots
def test_transition_map(init_plots):
axes_data = init_plots.plot_transition_map(cagr=False, full_frontier=False).lines[0].get_data()
values = np.genfromtxt('data/test_transition_map.csv', delimiter=',')
assert_allclose(axes_data, values, rtol=1e-1, atol=1e-1)
@mark.plots
def test_plot_assets(init_plots):
axes_data = init_plots.plot_assets(tickers='names').collections[0].get_offsets().data
values = np.genfromtxt('data/test_plot_assets.csv', delimiter=',')
assert_allclose(axes_data, values, rtol=1e-1, atol=1e-1)
@mark.plots
def test_plot_pair_ef(init_plots):
axes_data = init_plots.plot_pair_ef(tickers='names').lines[0].get_data()
values = np.genfromtxt('data/test_plot_pair_ef.csv', delimiter=',')
assert_allclose(axes_data, values, rtol=1e-1, atol=1e-1)
|
nilq/baby-python
|
python
|
import time
from membase.api.rest_client import RestConnection, Bucket
from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import MemcachedClientHelper
from basetestcase import BaseTestCase
from mc_bin_client import MemcachedError
from couchbase_helper.documentgenerator import BlobGenerator
from threading import Thread
class StatsCrashRepro(BaseTestCase):
def setUp(self):
super(StatsRepro, self).setUp()
self.timeout = 120
self.bucket_name = self.input.param("bucket", "default")
self.bucket_size = self.input.param("bucket_size", 100)
self.data_size = self.input.param("data_size", 2048)
self.threads_to_run = self.input.param("threads_to_run", 5)
# self.nodes_in = int(self.input.param("nodes_in", 1))
# self.servs_in = [self.servers[i + 1] for i in range(self.nodes_in)]
# rebalance = self.cluster.async_rebalance(self.servers[:1], self.servs_in, [])
# rebalance.result()
bucket_params=self._create_bucket_params(server=self.servers[0], size=self.bucket_size, replicas=self.num_replicas)
self.cluster.create_default_bucket(bucket_params)
self.buckets.append(Bucket(name="default",
num_replicas=self.num_replicas, bucket_size=self.bucket_size))
rest = RestConnection(self.servers[0])
self.nodes_server = rest.get_nodes()
def tearDown(self):
super(StatsRepro, self).tearDown()
def _load_doc_data_all_buckets(self, op_type='create', start=0, expiry=0):
loaded = False
count = 0
gen_load = BlobGenerator('warmup', 'warmup-', self.data_size, start=start, end=self.num_items)
while not loaded and count < 60:
try :
self._load_all_buckets(self.servers[0], gen_load, op_type, expiry)
loaded = True
except MemcachedError as error:
if error.status == 134:
loaded = False
self.log.error("Memcached error 134, wait for 5 seconds and then try again")
count += 1
time.sleep(5)
def _get_stats(self, stat_str='all'):
# for server in self.nodes_server:
server = self.servers[0]
mc_conn = MemcachedClientHelper.direct_client(server, self.bucket_name, self.timeout)
stat_result = mc_conn.stats(stat_str)
# self.log.info("Getting stats {0} : {1}".format(stat_str, stat_result))
self.log.info("Getting stats {0}".format(stat_str))
mc_conn.close()
def _run_get(self):
server = self.servers[0]
mc_conn = MemcachedClientHelper.direct_client(server, self.bucket_name, self.timeout)
for i in range(self.num_items):
key = "warmup{0}".format(i)
mc_conn.get(key)
def run_test(self):
ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
active_resident_threshold = int(self.input.param("active_resident_threshold", 10))
mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
stats = mc.stats()
threshold = int(self.input.param('threshold', stats[ep_threshold]))
threshold_reached = False
self.num_items = self.input.param("items", 10000)
self._load_doc_data_all_buckets('create')
# load items till reached threshold or mem-ratio is less than resident ratio threshold
while not threshold_reached :
mem_used = int(mc.stats()["mem_used"])
if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
self.log.info("mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s " % (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"]))
items = self.num_items
self.num_items += self.input.param("items", 10000)
self._load_doc_data_all_buckets('create', items)
else:
threshold_reached = True
self.log.info("DGM state achieved!!!!")
# wait for draining of data before restart and warm up
for bucket in self.buckets:
RebalanceHelper.wait_for_persistence(self.nodes_server[0], bucket, bucket_type=self.bucket_type)
while True:
# read_data_task = self.cluster.async_verify_data(self.master, self.buckets[0], self.buckets[0].kvs[1])
read_data_task = Thread(target=self._run_get)
read_data_task.start()
#5 threads to run stats all and reset asynchronously
start = time.time()
while (time.time() - start) < 300:
stats_all_thread = []
stats_reset_thread = []
for i in range(self.threads_to_run):
stat_str = ''
stats_all_thread.append(Thread(target=self._get_stats, args=[stat_str]))
stats_all_thread[i].start()
stat_str = 'reset'
stats_reset_thread.append(Thread(target=self._get_stats, args=[stat_str]))
stats_reset_thread[i].start()
for i in range(self.threads_to_run):
stats_all_thread[i].join()
stats_reset_thread[i].join()
del stats_all_thread
del stats_reset_thread
# read_data_task.result()
read_data_task.join()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
{
'name': "Odoo Cogito Move Mutual",
'summary': "",
'author': "CogitoWEB",
'description': "Odoo Cogito move mutual",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Test',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'account'],
# always loaded
'data': [
'view/account_mutual_view.xml',
# 'security/ir.model.access.csv',
# 'security/security.xml'
],
# only loaded in demonstration mode
'demo': [
# 'demo.xml',
],
'installable': True
}
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.3 on 2018-04-13 22:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0006_auto_20180413_1527'),
]
operations = [
migrations.RenameField(
model_name='reply',
old_name='mediaItem',
new_name='reply_to',
),
migrations.AddField(
model_name='review',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
nilq/baby-python
|
python
|
import numpy as np
from gym.spaces import Box, Dict, Discrete
from database_env.foop import DataBaseEnv_FOOP
from database_env.query_encoding import DataBaseEnv_QueryEncoding
class DataBaseEnv_FOOP_QueryEncoding(DataBaseEnv_FOOP, DataBaseEnv_QueryEncoding):
"""
Database environment with states and actions as in the article (https://arxiv.org/pdf/1911.11689.pdf)
and encoding like NEO (http://www.vldb.org/pvldb/vol12/p1705-marcus.pdf).
Suitable for use with RLlib.
Attributes:
env_config(dict): Algorithm-specific configuration data, should contain item corresponding to the DB scheme.
"""
def __init__(self, env_config, is_join_graph_encoding=False):
super().__init__(env_config)
self.is_join_graph_encoding = is_join_graph_encoding
real_obs_shape = self.N_rels * self.N_cols + self.N_cols
if self.is_join_graph_encoding:
real_obs_shape += self.query_encoding_size
real_obs_shape = (real_obs_shape, )
self.observation_space = Dict({
'real_obs': Box(low = 0, high = 1, shape = real_obs_shape, dtype = np.int),
'action_mask': Box(low = 0, high = 1, shape = (len(self.actions), ), dtype = np.int),
})
def get_obs(self):
real_obs = [self.get_foop().flatten()]
if self.is_join_graph_encoding:
real_obs.append(self.join_graph_encoding)
real_obs.append(self.predicate_ohe)
real_obs = np.concatenate(real_obs).astype(np.int)
return {
'real_obs': real_obs.tolist(),
'action_mask': self.valid_actions().astype(np.int).tolist()
}
|
nilq/baby-python
|
python
|
def intercala(nomeA, nomeB, nomeS):
fileA = open(nomeA, 'rt')
fileB = open(nomeB, 'rt')
fileS = open(nomeS, 'wt')
nA = int(fileA.readline())
nB = int(fileB.readline())
while
def main():
nomeA = input('Nome do primeiro arquivo: ')
nomeB = input('Nome do segundo arquivo: ')
nomeS = input('Nome para o arquivo de saida: ')
intercala(nomeA, nomeB, nomeS)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import RPi.GPIO as GPIO
import time
class Motion:
def __init__(self, ui, pin, timeout=30):
self._ui = ui
self._pin = int(pin)
self._timeout = int(timeout)
self._last_motion = time.time()
GPIO.setmode(GPIO.BCM) # choose BCM or BOARD
GPIO.setup(self._pin, GPIO.IN)
def check(self):
now = time.time()
if GPIO.input(self._pin):
self._last_motion = now
if (now - self._last_motion) <= self._timeout:
self._ui.on()
#if not self._ui.backlight_on:
# print "Turning UI on"
else:
# elif self._ui.backlight_on:
self._ui.off()
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module H3C-DOMAIN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H3C-DOMAIN-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:08:30 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
h3cCommon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "h3cCommon")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
ObjectIdentity, iso, Bits, Integer32, ModuleIdentity, TimeTicks, IpAddress, NotificationType, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, MibIdentifier, Counter32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "iso", "Bits", "Integer32", "ModuleIdentity", "TimeTicks", "IpAddress", "NotificationType", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "MibIdentifier", "Counter32", "Unsigned32")
RowStatus, TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TruthValue", "DisplayString", "TextualConvention")
h3cDomain = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46))
if mibBuilder.loadTexts: h3cDomain.setLastUpdated('200908050000Z')
if mibBuilder.loadTexts: h3cDomain.setOrganization('H3C Technologies Co., Ltd.')
class H3cModeOfDomainScheme(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("none", 1), ("local", 2), ("radius", 3), ("tacacs", 4))
class H3cAAATypeDomainScheme(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("accounting", 1), ("authentication", 2), ("authorization", 3), ("none", 4))
class H3cAccessModeofDomainScheme(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
namedValues = NamedValues(("default", 1), ("login", 2), ("lanAccess", 3), ("portal", 4), ("ppp", 5), ("gcm", 6), ("dvpn", 7), ("dhcp", 8), ("voice", 9), ("superauthen", 10), ("command", 11), ("wapi", 12))
h3cDomainControl = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 1))
h3cDomainDefault = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cDomainDefault.setStatus('current')
h3cDomainTables = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2))
h3cDomainInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1), )
if mibBuilder.loadTexts: h3cDomainInfoTable.setStatus('current')
h3cDomainInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1), ).setIndexNames((0, "H3C-DOMAIN-MIB", "h3cDomainName"))
if mibBuilder.loadTexts: h3cDomainInfoEntry.setStatus('current')
h3cDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 128)))
if mibBuilder.loadTexts: h3cDomainName.setStatus('current')
h3cDomainState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("block", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainState.setStatus('current')
h3cDomainMaxAccessNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainMaxAccessNum.setStatus('current')
h3cDomainVlanAssignMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("integer", 1), ("string", 2), ("vlanlist", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainVlanAssignMode.setStatus('current')
h3cDomainIdleCutEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 5), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIdleCutEnable.setStatus('current')
h3cDomainIdleCutMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 120))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIdleCutMaxTime.setStatus('current')
h3cDomainIdleCutMinFlow = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10240000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIdleCutMinFlow.setStatus('current')
h3cDomainMessengerEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 8), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainMessengerEnable.setStatus('current')
h3cDomainMessengerLimitTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainMessengerLimitTime.setStatus('current')
h3cDomainMessengerSpanTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 60))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainMessengerSpanTime.setStatus('current')
h3cDomainSelfServiceEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 11), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSelfServiceEnable.setStatus('current')
h3cDomainSelfServiceURL = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSelfServiceURL.setStatus('current')
h3cDomainAccFailureAction = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ignore", 1), ("reject", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainAccFailureAction.setStatus('current')
h3cDomainRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 14), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainRowStatus.setStatus('current')
h3cDomainCurrentAccessNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDomainCurrentAccessNum.setStatus('current')
h3cDomainSchemeTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2), )
if mibBuilder.loadTexts: h3cDomainSchemeTable.setStatus('current')
h3cDomainSchemeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1), ).setIndexNames((0, "H3C-DOMAIN-MIB", "h3cDomainName"), (0, "H3C-DOMAIN-MIB", "h3cDomainSchemeIndex"))
if mibBuilder.loadTexts: h3cDomainSchemeEntry.setStatus('current')
h3cDomainSchemeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: h3cDomainSchemeIndex.setStatus('current')
h3cDomainSchemeMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 2), H3cModeOfDomainScheme()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeMode.setStatus('current')
h3cDomainAuthSchemeName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainAuthSchemeName.setStatus('current')
h3cDomainAcctSchemeName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainAcctSchemeName.setStatus('current')
h3cDomainSchemeRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeRowStatus.setStatus('current')
h3cDomainSchemeAAAType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 6), H3cAAATypeDomainScheme()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeAAAType.setStatus('current')
h3cDomainSchemeAAAName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeAAAName.setStatus('current')
h3cDomainSchemeAccessMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 2, 1, 8), H3cAccessModeofDomainScheme()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainSchemeAccessMode.setStatus('current')
h3cDomainIpPoolTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3), )
if mibBuilder.loadTexts: h3cDomainIpPoolTable.setStatus('current')
h3cDomainIpPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1), ).setIndexNames((0, "H3C-DOMAIN-MIB", "h3cDomainName"), (0, "H3C-DOMAIN-MIB", "h3cDomainIpPoolNum"))
if mibBuilder.loadTexts: h3cDomainIpPoolEntry.setStatus('current')
h3cDomainIpPoolNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99)))
if mibBuilder.loadTexts: h3cDomainIpPoolNum.setStatus('current')
h3cDomainIpPoolLowIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 2), InetAddressType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIpPoolLowIpAddrType.setStatus('current')
h3cDomainIpPoolLowIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 3), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIpPoolLowIpAddr.setStatus('current')
h3cDomainIpPoolLen = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIpPoolLen.setStatus('current')
h3cDomainIpPoolRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 46, 2, 3, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cDomainIpPoolRowStatus.setStatus('current')
mibBuilder.exportSymbols("H3C-DOMAIN-MIB", H3cAAATypeDomainScheme=H3cAAATypeDomainScheme, h3cDomainSelfServiceURL=h3cDomainSelfServiceURL, h3cDomainIpPoolEntry=h3cDomainIpPoolEntry, h3cDomainInfoEntry=h3cDomainInfoEntry, h3cDomainMessengerLimitTime=h3cDomainMessengerLimitTime, h3cDomainIdleCutEnable=h3cDomainIdleCutEnable, h3cDomainSchemeRowStatus=h3cDomainSchemeRowStatus, h3cDomainIpPoolLen=h3cDomainIpPoolLen, h3cDomainName=h3cDomainName, h3cDomain=h3cDomain, h3cDomainIdleCutMaxTime=h3cDomainIdleCutMaxTime, H3cAccessModeofDomainScheme=H3cAccessModeofDomainScheme, h3cDomainRowStatus=h3cDomainRowStatus, h3cDomainAcctSchemeName=h3cDomainAcctSchemeName, h3cDomainVlanAssignMode=h3cDomainVlanAssignMode, h3cDomainIdleCutMinFlow=h3cDomainIdleCutMinFlow, h3cDomainSelfServiceEnable=h3cDomainSelfServiceEnable, h3cDomainControl=h3cDomainControl, h3cDomainMessengerEnable=h3cDomainMessengerEnable, h3cDomainSchemeAAAName=h3cDomainSchemeAAAName, h3cDomainIpPoolTable=h3cDomainIpPoolTable, h3cDomainAccFailureAction=h3cDomainAccFailureAction, h3cDomainIpPoolRowStatus=h3cDomainIpPoolRowStatus, h3cDomainIpPoolLowIpAddrType=h3cDomainIpPoolLowIpAddrType, H3cModeOfDomainScheme=H3cModeOfDomainScheme, h3cDomainDefault=h3cDomainDefault, h3cDomainSchemeTable=h3cDomainSchemeTable, h3cDomainMessengerSpanTime=h3cDomainMessengerSpanTime, h3cDomainSchemeEntry=h3cDomainSchemeEntry, h3cDomainSchemeAccessMode=h3cDomainSchemeAccessMode, h3cDomainSchemeMode=h3cDomainSchemeMode, PYSNMP_MODULE_ID=h3cDomain, h3cDomainAuthSchemeName=h3cDomainAuthSchemeName, h3cDomainTables=h3cDomainTables, h3cDomainIpPoolNum=h3cDomainIpPoolNum, h3cDomainInfoTable=h3cDomainInfoTable, h3cDomainCurrentAccessNum=h3cDomainCurrentAccessNum, h3cDomainSchemeAAAType=h3cDomainSchemeAAAType, h3cDomainIpPoolLowIpAddr=h3cDomainIpPoolLowIpAddr, h3cDomainMaxAccessNum=h3cDomainMaxAccessNum, h3cDomainSchemeIndex=h3cDomainSchemeIndex, h3cDomainState=h3cDomainState)
|
nilq/baby-python
|
python
|
import sys
import numpy as np
from matplotlib import pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
def preprocess(array: np.array):
""" Normalizes the supplied array and reshapes it into the appropriate format """
array = array.astype("float32")/255.0
array = np.reshape(array, (len(array), 28, 28, 1))
print("Final Shape:", array.shape)
return array
def noise(array):
""" Adds random noise to each image in the supplied array """
noise_factor = 0.5
noise_array = array + noise_factor * \
np.random.normal(loc=0.0, scale=1.0, size=array.shape)
return np.clip(noise_array, 0.0, 1.0)
def load_data(path="mnist.npz"):
""" Loading the data and applying the preprocessing steps """
with np.load("mnist.npz", allow_pickle=True) as f:
train_data, test_data = f['x_train'], f['x_test']
train_data = preprocess(train_data)
test_data = preprocess(test_data)
return train_data, test_data
train_data, test_data = load_data()
# create a copy of data with noise
noisy_train_data = noise(train_data)
noisy_test_data = noise(test_data)
def build_model(input_shape=(28, 28, 1)):
""" Building the autoencoder model for mnist """
input = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, (3, 3), activation='relu',
padding='same', name="Conv1")(input)
x = layers.MaxPooling2D((2, 2), padding='same', name='Pool1')(x)
x = layers.Conv2D(32, (3, 3), activation='relu',
padding='same', name='Conv2')(x)
x = layers.MaxPooling2D((2, 2), padding='same', name='Pool2')(x)
# decoder
x = layers.Conv2DTranspose(
32, (3, 3), strides=2, activation='relu', padding='same', name="Conv1_transpose")(x)
x = layers.Conv2DTranspose(
32, (3, 3), strides=2, activation='relu', padding='same', name='Conv2_transpose')(x)
output = layers.Conv2D(1, (3, 3), activation='sigmoid',
padding='same', name="output_layer")(x)
autoencoder = keras.models.Model(input, output, name='AutoEncoder-Model')
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
return autoencoder
def train_model(checkpoint_dir="tmp", monitor="val_loss"):
autoencoder = build_model()
autoencoder.summary()
early_stopping = keras.callbacks.EarlyStopping(
monitor=monitor,
patience=5,
restore_best_weights=True)
model_checkpoint = keras.callbacks.ModelCheckpoint(
checkpoint_dir,
monitor=monitor,
verbose=0,
save_best_only=True,
save_weights_only=False,
mode="auto",
save_freq="epoch",
options=None)
autoencoder.fit(
x=noisy_train_data,
y=train_data,
epochs=100,
batch_size=128,
shuffle=True,
validation_data=(noisy_test_data, test_data),
callbacks=[early_stopping, model_checkpoint])
autoencoder.save('saved_model')
def display(array1, array2, n=10):
"""
Displays n random images from each one of the supplied arrays.
args:
n: Number of output to show
"""
indices = np.random.randint(len(array1), size=n)
images1 = array1[indices, :]
images2 = array2[indices, :]
plt.figure(figsize=(20, 4))
for i, (image1, image2) in enumerate(zip(images1, images2)):
ax = plt.subplot(2, n, i + 1)
plt.imshow(image1.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(image2.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def show_output():
""" function for showing the output """
try:
autoencoder = keras.models.load_model(
"saved_model") # loading model from tmp folder
except Exception:
print("There is no model please train the model first then use the run command")
predictions = autoencoder.predict(noisy_test_data)
display(noisy_test_data, predictions, n=10)
if __name__ == '__main__':
try:
if sys.argv[1] == "train":
train_model()
if sys.argv[1] == "run":
show_output()
except Exception:
print("Please Use train and run argument to run the process. check the Readme for more details")
|
nilq/baby-python
|
python
|
import math
from time import sleep
from timeit import default_timer as timer
LMS8001_C1_STEP=1.2e-12
LMS8001_C2_STEP=10.0e-12
LMS8001_C3_STEP=1.2e-12
LMS8001_C2_FIX=150.0e-12
LMS8001_C3_FIX=5.0e-12
LMS8001_R2_0=24.6e3
LMS8001_R3_0=14.9e3
class PLL_METHODS(object):
def __init__(self, chip, fRef):
self.chip = chip
self.fRef = fRef
def estim_KVCO(self, FIT_KVCO=True, PROFILE=0):
# Check VCO_SEL and VCO_FREQ
reg_pll_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_pll_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
vco_sel=reg_pll_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']
vco_freq=reg_pll_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']
if not (FIT_KVCO):
# Use Average for KVCO in Calculations
if (vco_sel==1):
KVCO_avg=44.404e6
elif (vco_sel==2):
KVCO_avg=33.924e6
elif (vco_sel==3):
KVCO_avg=41.455e6
else:
self.chip.log('Ext. LO selected in PLL_PROFILE.')
return None
else:
# Use Fitted Values for KVCO in Calculations
# Changed on 17.05.2017. with new results
# Following equations fitted for VTUNE=0.7 V
CBANK=vco_freq
if (vco_sel==1):
KVCO_avg=27.296e6 * (2.26895e-10*CBANK**4+4.98467e-9*CBANK**3+9.01884e-6*CBANK**2+3.69804e-3*CBANK**1+1.01283e+00)
elif (vco_sel==2):
KVCO_avg=23.277e6 * (8.38795e-11*CBANK**4+2.20202e-08*CBANK**3+3.68009e-06*CBANK**2+3.22264e-03*CBANK**1+1.01093e+00)
elif (vco_sel==3):
KVCO_avg=29.110e6 * (-1.54988e-11*CBANK**4+4.27489e-08*CBANK**3+5.26971e-06*CBANK**2+2.83453e-03*CBANK**1+9.94192e-01)
else:
self.chip.log('Ext. LO selected in PLL_PROFILE.')
return None
return KVCO_avg
def calc_ideal_LPF(self, fc, PM_deg, Icp, KVCO_HzV, N, gamma=1.045, T31=0.1):
PM_rad=PM_deg*math.pi/180
wc=2*math.pi*fc
Kphase=Icp/(2*math.pi)
Kvco=2*math.pi*KVCO_HzV
# Approx. formula, Dean Banerjee
T1=(1.0/math.cos(PM_rad)-math.tan(PM_rad))/(wc*(1+T31))
T3=T1*T31;
T2=gamma/((wc**2)*(T1+T3));
A0=(Kphase*Kvco)/((wc**2)*N)*math.sqrt((1+(wc**2)*(T2**2))/((1+(wc**2)*(T1**2))*(1+(wc**2)*(T3**2))));
A2=A0*T1*T3;
A1=A0*(T1+T3);
C1=A2/(T2**2)*(1+math.sqrt(1+T2/A2*(T2*A0-A1)));
C3=(-(T2**2)*(C1**2)+T2*A1*C1-A2*A0)/((T2**2)*C1-A2);
C2=A0-C1-C3;
R2=T2/C2;
R3=A2/(C1*C3*T2);
LPF_vals=dict()
LPF_vals['C1']=C1
LPF_vals['C2']=C2
LPF_vals['C3']=C3
LPF_vals['R2']=R2
LPF_vals['R3']=R3
return LPF_vals
def calc_real_LPF(self, LPF_IDEAL_VALS):
C1_cond=(LMS8001_C1_STEP<=LPF_IDEAL_VALS['C1']<=15*LMS8001_C1_STEP)
C2_cond=(LMS8001_C2_FIX<=LPF_IDEAL_VALS['C2']<=LMS8001_C2_FIX+15*LMS8001_C2_STEP)
C3_cond=(LMS8001_C3_FIX+LMS8001_C3_STEP<=LPF_IDEAL_VALS['C3']<=LMS8001_C3_FIX+15*LMS8001_C3_STEP)
R2_cond=(LMS8001_R2_0/15.0<=LPF_IDEAL_VALS['R2']<=LMS8001_R2_0)
R3_cond=(LMS8001_R3_0/15.0<=LPF_IDEAL_VALS['R3']<=LMS8001_R3_0)
LPFvals_OK=(C1_cond and C2_cond and C3_cond and R2_cond and R3_cond)
LPF_REAL_VALS=dict()
if (LPFvals_OK):
C1_CODE=int(round(LPF_IDEAL_VALS['C1']/LMS8001_C1_STEP))
C2_CODE=int(round((LPF_IDEAL_VALS['C2']-LMS8001_C2_FIX)/LMS8001_C2_STEP))
C3_CODE=int(round((LPF_IDEAL_VALS['C3']-LMS8001_C3_FIX)/LMS8001_C3_STEP))
C1_CODE=int(min(max(C1_CODE,0),15))
C2_CODE=int(min(max(C2_CODE,0),15))
C3_CODE=int(min(max(C3_CODE,0),15))
R2_CODE=int(round(LMS8001_R2_0/LPF_IDEAL_VALS['R2']))
R3_CODE=int(round(LMS8001_R3_0/LPF_IDEAL_VALS['R3']))
R2_CODE=min(max(R2_CODE,1),15)
R3_CODE=min(max(R3_CODE,1),15)
LPF_REAL_VALS['C1_CODE']=C1_CODE
LPF_REAL_VALS['C2_CODE']=C2_CODE
LPF_REAL_VALS['C3_CODE']=C3_CODE
LPF_REAL_VALS['R2_CODE']=R2_CODE
LPF_REAL_VALS['R3_CODE']=R3_CODE
return (LPFvals_OK, LPF_REAL_VALS)
def setSDM(self, DITHER_EN=0, SEL_SDMCLK=0, REV_SDMCLK=0, PROFILE=0):
# Sets Sigma-Delta Modulator Config.
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_PLL_SDM_CFG=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
reg_PLL_SDM_CFG['DITHER_EN_'+str(PROFILE)]=DITHER_EN
reg_PLL_SDM_CFG['SEL_SDMCLK_'+str(PROFILE)]=SEL_SDMCLK
reg_PLL_SDM_CFG['REV_SDMCLK_'+str(PROFILE)]=REV_SDMCLK
self.chip.setImmediateMode(Imd_Mode)
def setVCOBIAS(self, EN=0, BYP_VCOREG=1, CURLIM_VCOREG=1, SPDUP_VCOREG=0, VDIV_VCOREG=32):
"""Sets VCO Bias Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
regVCOBIAS=self.chip.getRegisterByName('PLL_VREG')
regVCOBIAS['EN_VCOBIAS']=EN
regVCOBIAS['BYP_VCOREG']=BYP_VCOREG
regVCOBIAS['CURLIM_VCOREG']=CURLIM_VCOREG
regVCOBIAS['SPDUP_VCOREG']=SPDUP_VCOREG
regVCOBIAS['VDIV_VCOREG<7:0>']=VDIV_VCOREG
self.chip.setImmediateMode(Imd_Mode)
def setSPDUP_VCO(self, SPDUP=0, PROFILE=0):
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_VCO_CFG=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
reg_VCO_CFG['SPDUP_VCO_'+str(PROFILE)]=SPDUP
self.chip.setImmediateMode(Imd_Mode)
def setSPDUP_VCOREG(self, SPDUP=0):
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
regVCOBIAS=self.chip.getRegisterByName('PLL_VREG')
regVCOBIAS['SPDUP_VCOREG']=SPDUP
self.chip.setImmediateMode(Imd_Mode)
def setXBUF(self, EN=0, BYPEN=0, SLFBEN=1):
"""Sets XBUF Configuration"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
regXBUF=self.chip.getRegisterByName('PLL_CFG_XBUF')
regXBUF['PLL_XBUF_EN']=EN
regXBUF['PLL_XBUF_SLFBEN']=SLFBEN
regXBUF['PLL_XBUF_BYPEN']=BYPEN
self.chip.setImmediateMode(Imd_Mode)
def setCP(self, PULSE=4, OFS=0, ICT_CP=16, PROFILE=0):
"""Sets CP Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_CP_CFG0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
reg_CP_CFG0['PULSE_'+str(PROFILE)+'<5:0>']=PULSE
reg_CP_CFG0['OFS_'+str(PROFILE)+'<5:0>']=OFS
reg_CP_CFG1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
reg_CP_CFG1['ICT_CP_'+str(PROFILE)+'<4:0>']=ICT_CP
reg_PLL_ENABLE=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
if (OFS>0):
reg_PLL_ENABLE['PLL_EN_CPOFS_'+str(PROFILE)]=1
else:
reg_PLL_ENABLE['PLL_EN_CPOFS_'+str(PROFILE)]=0
self.chip.setImmediateMode(Imd_Mode)
def getCP(self, PROFILE=0):
"""Returns CP Parameters"""
d=dict()
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_CP_CFG0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
d["PULSE"]=reg_CP_CFG0['PULSE_'+str(PROFILE)+'<5:0>']
d["OFS"]=reg_CP_CFG0['OFS_'+str(PROFILE)+'<5:0>']
reg_CP_CFG1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
d["ICT_CP"]=reg_CP_CFG1['ICT_CP_'+str(PROFILE)+'<4:0>']
reg_PLL_ENABLE=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
d["EN_CPOFS"]=reg_PLL_ENABLE['PLL_EN_CPOFS_'+str(PROFILE)]
self.chip.setImmediateMode(Imd_Mode)
return d
def setCP_FLOCK(self, PULSE=4, OFS=0, PROFILE=0):
reg_pll_flock_cfg2=self.chip.getRegisterByName('PLL_FLOCK_CFG2_'+str(PROFILE))
reg_pll_flock_cfg2['FLOCK_PULSE_'+str(PROFILE)+'<5:0>']=int(PULSE)
reg_pll_flock_cfg2['FLOCK_OFS_'+str(PROFILE)+'<5:0>']=int(OFS)
def setLD(self, LD_VCT=2, PROFILE=0):
"""Sets Lock-Detector's Comparator Threashold"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_pll_enable=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
reg_pll_enable['PLL_EN_LD_'+str(PROFILE)]=1
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
reg_pll_cp_cfg1['LD_VCT_'+str(PROFILE)+'<1:0>']=LD_VCT
self.chip.setImmediateMode(Imd_Mode)
def setPFD(self, DEL=0, FLIP=0, PROFILE=0):
"""Sets PFD Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_CP_CFG0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
reg_CP_CFG0['FLIP_'+str(PROFILE)]=FLIP
reg_CP_CFG0['DEL_'+str(PROFILE)+'<1:0>']=DEL
self.chip.setImmediateMode(Imd_Mode)
def setVTUNE_VCT(self, VTUNE_VCT, PROFILE=0):
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
def openPLL(self, VTUNE_VCT=2, PROFILE=0, dbgMode=False):
"""Breaks the PLL Loop and sets the fixed VCO tuning voltage"""
VTUNE_VCT=int(VTUNE_VCT)
VTUNE_DICT={0:300, 1:600, 2:750, 3:900}
if (VTUNE_VCT>3):
VTUNE_VCT=3
elif (VTUNE_VCT<0):
VTUNE_VCT=0
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['LPFSW_'+str(PROFILE)]=1
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
if (dbgMode):
self.chip.log("PLL Loop Broken. VTUNE=%.2f mV" %(VTUNE_DICT[VTUNE_VCT]))
def closePLL(self, PROFILE=0):
"""Closes PLL Loop"""
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['LPFSW_'+str(PROFILE)]=0
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)]=2
def setVCO(self, SEL=3, FREQ=128, AMP=1, VCO_AAC_EN=True, VDIV_SWVDD=2, PROFILE=0):
"""Sets VCO Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_VCO_FREQ=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_VCO_FREQ['VCO_FREQ_'+str(PROFILE)+'<7:0>']=FREQ
reg_VCO_CFG=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
if (VCO_AAC_EN):
reg_VCO_CFG['VCO_AAC_EN_'+str(PROFILE)]=1
else:
reg_VCO_CFG['VCO_AAC_EN_'+str(PROFILE)]=0
reg_VCO_CFG['VCO_SEL_'+str(PROFILE)+'<1:0>']=SEL
reg_VCO_CFG['VCO_AMP_'+str(PROFILE)+'<6:0>']=AMP
reg_VCO_CFG['VDIV_SWVDD_'+str(PROFILE)+'<1:0>']=VDIV_SWVDD
self.chip.setImmediateMode(Imd_Mode)
def setFFDIV(self, FFMOD=0, PROFILE=0):
"""Sets FF-DIV Modulus"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_PLL_ENABLE=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
if (FFMOD>0):
reg_PLL_ENABLE['PLL_EN_FFCORE_'+str(PROFILE)]=1
else:
reg_PLL_ENABLE['PLL_EN_FFCORE_'+str(PROFILE)]=0
reg_FF_CFG=self.chip.getRegisterByName('PLL_FF_CFG_'+str(PROFILE))
if (FFMOD>0):
reg_FF_CFG['FFDIV_SEL_'+str(PROFILE)]=1
else:
reg_FF_CFG['FFDIV_SEL_'+str(PROFILE)]=0
reg_FF_CFG['FFCORE_MOD_'+str(PROFILE)+'<1:0>']=FFMOD
reg_FF_CFG['FF_MOD_'+str(PROFILE)+'<1:0>']=FFMOD
self.chip.setImmediateMode(Imd_Mode)
def setFBDIV(self, N_INT, N_FRAC, IntN_Mode=False, PROFILE=0):
"""Sets FB-DIV Parameters"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_SDM_CFG=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
if (IntN_Mode):
reg_SDM_CFG['INTMOD_EN_'+str(PROFILE)]=1
N_FRAC_H=0
N_FRAC_L=0
else:
reg_SDM_CFG['INTMOD_EN_'+str(PROFILE)]=0
N_FRAC_H=int(math.floor(N_FRAC/2**16))
N_FRAC_L=int(N_FRAC-N_FRAC_H*(2**16))
#if (DITHER_EN):
# reg_SDM_CFG['DITHER_EN_'+str(PROFILE)]=1
#else:
# reg_SDM_CFG['DITHER_EN_'+str(PROFILE)]=0
reg_SDM_CFG['INTMOD_'+str(PROFILE)+'<9:0>']=N_INT
reg_FRACMODL=self.chip.getRegisterByName('PLL_FRACMODL_'+str(PROFILE))
reg_FRACMODL['FRACMODL_'+str(PROFILE)+'<15:0>']=N_FRAC_L
reg_FRACMODH=self.chip.getRegisterByName('PLL_FRACMODH_'+str(PROFILE))
reg_FRACMODH['FRACMODH_'+str(PROFILE)+'<3:0>']=N_FRAC_H
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
self.chip.setImmediateMode(Imd_Mode)
def setLPF(self, C1=8, C2=8, R2=1, C3=8, R3=1, PROFILE=0):
"""Sets LPF Element Values"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_PLL_LPF_CFG1=self.chip.getRegisterByName('PLL_LPF_CFG1_'+str(PROFILE))
reg_PLL_LPF_CFG1['R3_'+str(PROFILE)+'<3:0>']=R3
reg_PLL_LPF_CFG1['R2_'+str(PROFILE)+'<3:0>']=R2
reg_PLL_LPF_CFG1['C2_'+str(PROFILE)+'<3:0>']=C2
reg_PLL_LPF_CFG1['C1_'+str(PROFILE)+'<3:0>']=C1
reg_PLL_LPF_CFG2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_PLL_LPF_CFG2['C3_'+str(PROFILE)+'<3:0>']=C3
self.chip.setImmediateMode(Imd_Mode)
def setLPF_FLOCK(self, C1=8, C2=8, R2=1, C3=8, R3=1, PROFILE=0):
reg_pll_flock_cfg1=self.chip.getRegisterByName('PLL_FLOCK_CFG1_'+str(PROFILE))
reg_pll_flock_cfg1['FLOCK_R3_'+str(PROFILE)+'<3:0>']=int(R3)
reg_pll_flock_cfg1['FLOCK_R2_'+str(PROFILE)+'<3:0>']=int(R2)
reg_pll_flock_cfg1['FLOCK_C1_'+str(PROFILE)+'<3:0>']=int(C1)
reg_pll_flock_cfg1['FLOCK_C2_'+str(PROFILE)+'<3:0>']=int(C2)
reg_pll_flock_cfg2=self.chip.getRegisterByName('PLL_FLOCK_CFG2_'+str(PROFILE))
reg_pll_flock_cfg2['FLOCK_C3_'+str(PROFILE)+'<3:0>']=int(C3)
def setLODIST(self, channel, EN=True, EN_FLOCK=False, IQ=True, phase=0, PROFILE=0):
"""Sets LODIST Configuration"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
channel_dict={'A':0, 'B':1, 'C':2, 'D':3}
phase_dict={0:0, 90:1, 180:2, 270:3}
if (channel not in channel_dict.keys()):
self.chip.log("Not valid LO-DIST channel name.")
return None
if (phase not in phase_dict.keys()):
self.chip.log("Not valid LO-DIST phase value.")
return None
reg_pll_enable=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
reg_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
val_old=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']
if (EN):
reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']=val_old|int(2**channel_dict[channel])
reg_pll_enable['PLL_LODIST_EN_BIAS_'+str(PROFILE)]=1
else:
reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']=val_old&(15-int(2**channel_dict[channel]))
# Disable LO DIST Bias if not needed
if (reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']==0):
reg_pll_enable['PLL_LODIST_EN_BIAS_'+str(PROFILE)]=0
reg_pll_enable['PLL_LODIST_EN_DIV2IQ_'+str(PROFILE)]=0
if (IQ==True):
reg_lodist_cfg['PLL_LODIST_FSP_OUT'+str(channel_dict[channel])+'_'+str(PROFILE)+'<2:0>']=phase_dict[phase]
reg_pll_enable['PLL_LODIST_EN_DIV2IQ_'+str(PROFILE)]=1
else:
reg_lodist_cfg['PLL_LODIST_FSP_OUT'+str(channel_dict[channel])+'_'+str(PROFILE)+'<2:0>']=phase_dict[phase]+4
A_IQ=reg_lodist_cfg['PLL_LODIST_FSP_OUT0_'+str(PROFILE)+'<2:0>']
A_EN=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']&1
B_IQ=reg_lodist_cfg['PLL_LODIST_FSP_OUT1_'+str(PROFILE)+'<2:0>']
B_EN=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']&2
C_IQ=reg_lodist_cfg['PLL_LODIST_FSP_OUT2_'+str(PROFILE)+'<2:0>']
C_EN=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']&4
D_IQ=reg_lodist_cfg['PLL_LODIST_FSP_OUT3_'+str(PROFILE)+'<2:0>']
D_EN=reg_lodist_cfg['PLL_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']&8
# Disable DivBy2 IQ Gen. Core if not needed
if ((A_IQ>=4 or A_EN==0) and (B_IQ>=4 or B_EN==0) and (C_IQ>=4 or C_EN==0) and (D_IQ>=4 or D_EN==0)):
reg_pll_enable['PLL_LODIST_EN_DIV2IQ_'+str(PROFILE)]=0
# Enable Output of desired LO channel during the Fast-Lock Operating Mode of LMS8001-PLL if EN_FLOCK=True
if (EN_FLOCK):
if (channel=='A'):
LO_FLOCK_EN_MASK=1
elif (channel=='B'):
LO_FLOCK_EN_MASK=2
elif (channel=='C'):
LO_FLOCK_EN_MASK=4
else:
LO_FLOCK_EN_MASK=8
else:
LO_FLOCK_EN_MASK=0
reg_pll_flock_cfg3=self.chip.getRegisterByName('PLL_FLOCK_CFG3_'+str(PROFILE))
LO_FLOCK_EN=reg_pll_flock_cfg3['FLOCK_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']
reg_pll_flock_cfg3['FLOCK_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']=LO_FLOCK_EN | LO_FLOCK_EN_MASK
# Set Back to initial value of ImmediateMode
self.chip.setImmediateMode(Imd_Mode)
def setFLOCK(self, BWEF, LoopBW=600.0e3, PM=50.0, FLOCK_N=200, Ch_EN=[], METHOD='SIMPLE', FIT_KVCO=True, FLOCK_VCO_SPDUP=1, PROFILE=0, dbgMode=False):
"""
Automatically calculates Fast-Lock Mode parameters from BWEF argument. BWEF-BandWidth Extension Factor
METHOD='SIMPLE'
Clips charge pump current settings in Fast-Lock Operating Mode if ICP_NORMAL*BWEF^2 is greater than (ICP)max.
Only changes the values of LoopFilter resistors during Fast-Lock mode.
Capacitor values are the same as in NORMAL operating mode.
METHOD=='SMART'
Takes the phase-margin argument PM to calculate LoopFilter elements and maximum pulse CP current which will give
the PLL loop bandwidth value of LoopBW with desired phase margin PM.
"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
LO_OUT_EN=0
if ('A' in Ch_EN):
LO_OUT_EN+=1
if ('B' in Ch_EN):
LO_OUT_EN+=2
if ('C' in Ch_EN):
LO_OUT_EN+=4
if ('D' in Ch_EN):
LO_OUT_EN+=8
if (METHOD not in ['SIMPLE', 'SMART']):
self.chip.log("Bad Fast-Lock Mode Optimization Method. METHOD='SIMPLE' or METHOD='SMART'.")
return False
reg_cp_cfg0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
PULSE=reg_cp_cfg0['PULSE_'+str(PROFILE)+'<5:0>']
OFS=reg_cp_cfg0['OFS_'+str(PROFILE)+'<5:0>']
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
ICT_CP_INIT=reg_pll_cp_cfg1['ICT_CP_'+str(PROFILE)+'<4:0>']
reg_pll_flock_cfg3=self.chip.getRegisterByName('PLL_FLOCK_CFG3_'+str(PROFILE))
reg_pll_flock_cfg3['FLOCK_LODIST_EN_OUT_'+str(PROFILE)+'<3:0>']=LO_OUT_EN
reg_pll_flock_cfg3['FLOCK_VCO_SPDUP_'+str(PROFILE)]=0
reg_pll_flock_cfg3['FLOCK_N_'+str(PROFILE)+'<9:0>']=min(FLOCK_N, 1023)
reg_pll_flock_cfg3['FLOCK_VCO_SPDUP_'+str(PROFILE)]=FLOCK_VCO_SPDUP
if (METHOD=='SIMPLE'):
reg_lpf_cfg1=self.chip.getRegisterByName('PLL_LPF_CFG1_'+str(PROFILE))
R3=reg_lpf_cfg1['R3_'+str(PROFILE)+'<3:0>']
R2=reg_lpf_cfg1['R2_'+str(PROFILE)+'<3:0>']
C1=reg_lpf_cfg1['C1_'+str(PROFILE)+'<3:0>']
C2=reg_lpf_cfg1['C2_'+str(PROFILE)+'<3:0>']
reg_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
C3=reg_lpf_cfg2['C3_'+str(PROFILE)+'<3:0>']
R3_FLOCK=min(round(R3*BWEF), 15)
# R3_FLOCK=min(round(R3*math.sqrt(BWEF)), 15)
R2_FLOCK=min(round(R2*BWEF), 15)
PULSE_FLOCK=min(round(PULSE*BWEF**2), 63)
#PULSE_FLOCK=min(round(PULSE*BWEF), 63)
OFS_FLOCK=min(round(OFS*PULSE_FLOCK/PULSE), 63)
#OFS_FLOCK=OFS
self.setLPF_FLOCK(C1=C1, C2=C2, R2=R2_FLOCK, C3=C3, R3=R3_FLOCK, PROFILE=PROFILE)
self.setCP_FLOCK(PULSE=PULSE_FLOCK, OFS=OFS_FLOCK, PROFILE=PROFILE)
else:
fc=LoopBW/1.65
# Sweep CP PULSE values and find the first one that result with implementable LPF values for desired PLL dynamics in Fast-Lock Mode
cp_pulse_vals=range(PULSE,64)
cp_pulse_vals.reverse()
# Estimate the value of KVCO for settings in the PLL Profile PROFILE
KVCO_avg=self.estim_KVCO(FIT_KVCO=FIT_KVCO, PROFILE=PROFILE)
# Read Feedback-Divider Modulus
N=self.getNDIV(PROFILE=PROFILE)
for cp_pulse in cp_pulse_vals:
# Calculate CP Current Value
Icp=ICT_CP_INIT*25.0e-6/16.0*cp_pulse
gamma=1.045
T31=0.1
LPF_IDEAL_VALS=self.calc_ideal_LPF(fc=fc, PM_deg=PM, Icp=Icp, KVCO_HzV=KVCO_avg, N=N, gamma=gamma, T31=T31)
(LPFvals_OK, LPF_REAL_VALS)=self.calc_real_LPF(LPF_IDEAL_VALS)
if (LPFvals_OK):
# Set CP Pulse Current to the optimized value
self.setCP_FLOCK(PULSE=cp_pulse, OFS=min(round(OFS*cp_pulse/PULSE),63), PROFILE=PROFILE)
# self.setCP_FLOCK(PULSE=cp_pulse, OFS=0, PROFILE=PROFILE)
# Set LPF Components to the optimized values
self.setLPF_FLOCK(C1=LPF_REAL_VALS['C1_CODE'], C2=LPF_REAL_VALS['C2_CODE'], R2=LPF_REAL_VALS['R2_CODE'], C3=LPF_REAL_VALS['C3_CODE'], R3=LPF_REAL_VALS['R3_CODE'], PROFILE=PROFILE)
if (dbgMode):
self.chip.log('PLL LoopBW Optimization finished successfuly.')
self.chip.log('-'*45)
self.chip.log('\tIcp=%.2f uA' %(Icp/1.0e-6))
self.chip.log('\tUsed Value for KVCO=%.2f MHz/V' %(KVCO_avg/1.0e6))
self.chip.log('\tNDIV=%.2f' % (N))
self.chip.log('-'*45)
self.chip.log('')
self.chip.log('Ideal LPF Values')
self.chip.log('-'*45)
self.chip.log('\tC1= %.2f pF' %(LPF_IDEAL_VALS['C1']/1.0e-12))
self.chip.log('\tC2= %.2f pF' %(LPF_IDEAL_VALS['C2']/1.0e-12))
self.chip.log('\tR2= %.2f kOhm' %(LPF_IDEAL_VALS['R2']/1.0e3))
self.chip.log('\tC3= %.2f pF' %(LPF_IDEAL_VALS['C3']/1.0e-12))
self.chip.log('\tR3= %.2f kOhm' %(LPF_IDEAL_VALS['R3']/1.0e3))
self.chip.log('')
return True
self.chip.setImmediateMode(Imd_Mode)
return True
def disablePLL(self, PROFILE=0):
"""Disables PLL Blocks, XBUF and VCO Bias"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Disable VCO-BIAS
self.setVCOBIAS(EN=0)
# Disable XBUF
self.setXBUF(EN=0)
# Disable PLL core circuits
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
reg_pll_enable['PLL_EN_VTUNE_COMP_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_LD_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_PFD_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_CP_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_CPOFS_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_VCO_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_FFDIV_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_FBDIV_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]=0
reg_pll_enable['PLL_SDM_CLK_EN_'+str(PROFILE)]=0
self.chip.setImmediateMode(Imd_Mode)
def enablePLL(self, PDIV2=False, IntN_Mode=False, XBUF_SLFBEN=1, PROFILE=0):
"""Enables VCO Bias, XBUF and PLL Blocks"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Define PLL Config
# Enable VCO Biasing Block
reg_pll_vreg=self.chip.getRegisterByName("PLL_VREG")
reg_pll_vreg['EN_VCOBIAS']=1
# Enable XBUF
# Sets SLFBEN, when TCXO is AC-coupled to LMS8001 IC REFIN
reg_cfg_xbuf=self.chip.getRegisterByName("PLL_CFG_XBUF")
reg_cfg_xbuf['PLL_XBUF_EN']=1
reg_cfg_xbuf['PLL_XBUF_SLFBEN']=XBUF_SLFBEN
# Define Desired PLL Profile
# Enable Blocks
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
reg_pll_enable['PLL_EN_VTUNE_COMP_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_LD_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_PFD_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_CP_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_VCO_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_FFDIV_'+str(PROFILE)]=1
reg_pll_enable['PLL_EN_FBDIV_'+str(PROFILE)]=1
if (PDIV2):
reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]=1
else:
reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]=0
reg_pll_enable['PLL_EN_FBDIV_'+str(PROFILE)]=1
if (IntN_Mode):
reg_pll_enable['PLL_SDM_CLK_EN_'+str(PROFILE)]=0
else:
reg_pll_enable['PLL_SDM_CLK_EN_'+str(PROFILE)]=1
self.chip.setImmediateMode(Imd_Mode)
def calc_fbdiv(self, F_TARGET, IntN_Mode, PDIV2):
"""Calculates Configuration Parameters for FB-DIV for targeted VCO Frequency"""
if (PDIV2):
N_FIX=2.0
else:
N_FIX=1.0
# Integer-N or Fractional-N Mode
if (IntN_Mode):
N_INT=round(F_TARGET/N_FIX/self.fRef)
N_FRAC=0
else:
N_INT=int(math.floor(F_TARGET/N_FIX/self.fRef))
N_FRAC=int(round(2**20*(F_TARGET/N_FIX/self.fRef-N_INT)))
return (N_INT, N_FRAC, N_FIX)
def vco_auto_ctune(self, F_TARGET, PROFILE=0, XBUF_SLFBEN=1, IntN_Mode=False, PDIV2=False, VTUNE_VCT=1, VCO_SEL_FORCE=0, VCO_SEL_INIT=2, FREQ_INIT_POS=7, FREQ_INIT=0, FREQ_SETTLING_N=4, VTUNE_WAIT_N=128, VCO_SEL_FREQ_MAX=250, VCO_SEL_FREQ_MIN=5, dbgMode=False):
"""Performs VCO Coarse Frequency Tuning Using On-Chip LMS8001 IC Calibration State-Machine"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Store the current PLL Profile Index before proceeding to the new one for configuration
PROFILE_OLD=self.chip.PLL.ACTIVE_PROFILE
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE
# Determine the FB-DIV configuration for targeted VCO frequency and self.fRef reference frequency
(N_INT, N_FRAC, N_FIX)=self.calc_fbdiv(F_TARGET, IntN_Mode, PDIV2)
# The exact value of targetec VCO frequency that will be used in automatic coarse-tune algorithm
# If IntN-Mode is chosen, VCO will be locked to the closest integer multiple of reference frequency
FVCO_TARGET=N_FIX*(N_INT+N_FRAC/2.0**20)*self.fRef
# Calculate the fractional division words
N_FRAC_H=int(math.floor(N_FRAC/2**16))
N_FRAC_L=int(N_FRAC-N_FRAC_H*(2**16))
# Enable PLL
self.enablePLL(PDIV2, IntN_Mode, XBUF_SLFBEN, PROFILE)
# Define VCO
reg_vco_cfg=self.chip.getRegisterByName("PLL_VCO_CFG_"+str(PROFILE))
# Set the VCO tuning voltage value during coarse-tuning
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
# Define SDM & FB-DIV Modulus
reg_sdm_cfg=self.chip.getRegisterByName("PLL_SDM_CFG_"+str(PROFILE))
if (IntN_Mode or N_FRAC==0):
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=1
else:
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=0
reg_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']=int(N_INT)
reg_fracmod_l=self.chip.getRegisterByName("PLL_FRACMODL_"+str(PROFILE))
reg_fracmod_l['FRACMODL_'+str(PROFILE)+'<15:0>']=N_FRAC_L
reg_fracmod_h=self.chip.getRegisterByName("PLL_FRACMODH_"+str(PROFILE))
reg_fracmod_h['FRACMODH_'+str(PROFILE)+'<3:0>']=N_FRAC_H
# Reset PLL, Enable Calibration Mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['PLL_CALIBRATION_EN']=1
reg_pll_cfg['CTUNE_RES<1:0>']=3
# Write VCO AUTO-CAL Registers
reg_pll_cal_auto1=self.chip.getRegisterByName('PLL_CAL_AUTO1')
reg_pll_cal_auto1['VCO_SEL_FORCE']=VCO_SEL_FORCE
reg_pll_cal_auto1['VCO_SEL_INIT<1:0>']=VCO_SEL_INIT
reg_pll_cal_auto1['FREQ_INIT_POS<2:0>']=FREQ_INIT_POS
reg_pll_cal_auto1['FREQ_INIT<7:0>']=FREQ_INIT
reg_pll_cal_auto2=self.chip.getRegisterByName('PLL_CAL_AUTO2')
reg_pll_cal_auto2['FREQ_SETTLING_N<3:0>']=FREQ_SETTLING_N
reg_pll_cal_auto2['VTUNE_WAIT_N<7:0>']=VTUNE_WAIT_N
reg_pll_cal_auto3=self.chip.getRegisterByName('PLL_CAL_AUTO3')
reg_pll_cal_auto3['VCO_SEL_FREQ_MAX<7:0>']=VCO_SEL_FREQ_MAX
reg_pll_cal_auto3['VCO_SEL_FREQ_MIN<7:0>']=VCO_SEL_FREQ_MIN
# Start VCO Auto-Tuning Process
reg_pll_cal_auto0=self.chip.getRegisterByName('PLL_CAL_AUTO0')
reg_pll_cal_auto0['FCAL_START']=1
# Wait for VCO Auto-Tuning to Finish
while(True):
reg_pll_cal_auto0=self.chip.getRegisterByName('PLL_CAL_AUTO0')
if (reg_pll_cal_auto0['FCAL_START']==0):
break
# Evaluate Calibration Results
reg_pll_cal_auto0=self.chip.getRegisterByName('PLL_CAL_AUTO0')
if (reg_pll_cal_auto0['VCO_SEL_FINAL_VAL'] and reg_pll_cal_auto0['FREQ_FINAL_VAL']):
VCO_SEL_FINAL=reg_pll_cal_auto0['VCO_SEL_FINAL<1:0>']
VCO_FREQ_FINAL=reg_pll_cal_auto0['FREQ_FINAL<7:0>']
else:
self.chip.log("Calibration Failed!!!!")
return False
# Disable Calibration
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_CALIBRATION_EN']=0
# Write Calibration Results to the Dedicated VCO Registers in the Chosen Profile
reg_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']=VCO_FREQ_FINAL
reg_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
reg_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']=VCO_SEL_FINAL
if (dbgMode):
self.chip.log("Calibration Done!!!")
self.chip.log("Configured PLL Profile=%d" %(PROFILE))
self.chip.log("Target VCO Frequency [MHz]= %.5f" %(FVCO_TARGET/1.0e6))
self.chip.log("Frequency Error [Hz]= %.2e" %(abs(FVCO_TARGET-F_TARGET)))
self.chip.log("VCO_SEL_FINAL= %d" %(VCO_SEL_FINAL))
self.chip.log("VCO_FREQ_FINAL= %d" %(VCO_FREQ_FINAL))
self.chip.log('')
self.chip.log('')
if (dbgMode):
self.chip.PLL.infoLOCK()
# Go back to the initial PLL profile
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE_OLD
self.chip.setImmediateMode(Imd_Mode)
return True
def vco_manual_cloop_tune(self, F_TARGET, PROFILE=0, XBUF_SLFBEN=1, IntN_Mode=False, PDIV2=False, dbgMode=False):
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Store the current PLL Profile Index before proceeding to the new one for configuration
PROFILE_OLD=self.chip.PLL.ACTIVE_PROFILE
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE
# Determine the FB-DIV configuration for targeted VCO frequency and self.fRef reference frequency
(N_INT, N_FRAC, N_FIX)=self.calc_fbdiv(F_TARGET, IntN_Mode, PDIV2)
# The exact value of targetec VCO frequency that will be used in automatic coarse-tune algorithm
# If IntN-Mode is chosen, VCO will be locked to the closest integer multiple of reference frequency
FVCO_TARGET=N_FIX*(N_INT+N_FRAC/2.0**20)*self.fRef
# Calculate the fractional division words
N_FRAC_H=int(math.floor(N_FRAC/2**16))
N_FRAC_L=int(N_FRAC-N_FRAC_H*(2**16))
# Enable PLL
self.enablePLL(PDIV2, IntN_Mode, XBUF_SLFBEN, PROFILE)
# Define VCO
reg_vco_cfg=self.chip.getRegisterByName("PLL_VCO_CFG_"+str(PROFILE))
# Define SDM & FB-DIV Modulus
reg_sdm_cfg=self.chip.getRegisterByName("PLL_SDM_CFG_"+str(PROFILE))
if (IntN_Mode or N_FRAC==0):
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=1
else:
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=0
reg_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']=int(N_INT)
reg_fracmod_l=self.chip.getRegisterByName("PLL_FRACMODL_"+str(PROFILE))
reg_fracmod_l['FRACMODL_'+str(PROFILE)+'<15:0>']=N_FRAC_L
reg_fracmod_h=self.chip.getRegisterByName("PLL_FRACMODH_"+str(PROFILE))
reg_fracmod_h['FRACMODH_'+str(PROFILE)+'<3:0>']=N_FRAC_H
# Reset PLL, Enable Manual Calibration Mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['PLL_CALIBRATION_EN']=1
reg_pll_cfg['PLL_CALIBRATION_MODE']=1
reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# 1st step is to determine the correct VCO core for targeted frequency
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=2
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=15
sleep(0.01) # wait 10ms for PLL loop to settle
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
if (reg_pll_status['VTUNE_LOW']==1):
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=1
else:
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=240
sleep(0.01)
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
if (reg_pll_status['VTUNE_HIGH']==1):
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=3
# 2nd step is to determine optimal cap bank configuration of selected VCO core for the targeted frequency value
freq_low=0
freq_high=255
freq=int((freq_high+freq_low+1)/2)
iter_num=0
while (freq_low<freq_high and iter_num<=8):
iter_num+=1
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
sleep(0.01)
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
if (reg_pll_status['VTUNE_HIGH']==1):
freq_low=freq
freq=int((freq_high+freq_low+1)/2.0)
elif (reg_pll_status['VTUNE_LOW']==1):
freq_high=freq
freq=int((freq_high+freq_low+1)/2.0)
else:
if (reg_pll_status['PLL_LOCK']==1):
# Cap. bank configuration for which PLL is locked at the targeted frequency is found
# This is the starting point for the next step
break
else:
self.chip.log("Calibration Failed.")
return False
# Find 1st cap. bank configuration above initial one, for which stands VTUNE_LOW=1
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
freq_init=freq
while(reg_pll_status['VTUNE_LOW']==0):
freq=freq+1
if (freq>=255):
break
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
sleep(0.01)
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
freq_max=freq
# Find 1st cap. bank configuration bellow initial one, for which stands VTUNE_HIGH=1
freq=freq_init
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
sleep(0.01)
while(reg_pll_status['VTUNE_HIGH']==0):
freq=freq-1
if (freq<=1):
break
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
sleep(0.01)
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
# In some VCO_FREQ<7:0> regions, FVCO vs VCO_FREQ<7:0> is not monotonic
# Next line detects that condition and exits the loop to prevent false results
if (reg_pll_status['VTUNE_LOW']==1):
break
freq_min=freq
# Optimal cap. bank configuration is between freq_min and freq_max
# It can be arithmetic or geometric average of boundary values
#freq_opt=int(math.sqrt(freq_min*freq_max))
freq_opt=int((freq_min+freq_max)/2.0)
sel_opt=reg_pll_cal_man['VCO_SEL_MAN<1:0>']
# Exit the manual calibration mode, enter the normal PLL operation mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['PLL_CALIBRATION_EN']=0
reg_pll_cfg['PLL_CALIBRATION_MODE']=0
# Write the results of calibration to the dedicated registers inside the chosen PLL profile
reg_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']=freq_opt
reg_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
reg_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']=sel_opt
if (dbgMode):
self.chip.log("")
self.chip.log("Closed-Loop Manual Calibration Done!!!")
self.chip.log("Configured PLL Profile= %d" %(PROFILE))
self.chip.log("Target VCO Frequency [MHz]= %.5f" % (FVCO_TARGET/1.0e6))
self.chip.log("Frequency Error [Hz]= %.2e" %(abs(FVCO_TARGET-F_TARGET)))
self.chip.log("VCO_SEL_FINAL= %d" %(sel_opt))
self.chip.log("VCO_FREQ_FINAL= %d" %(freq_opt))
self.chip.log("VCO_FREQ_INIT= %d" %(freq_init))
self.chip.log("VCO_FREQ_MIN= %d" %(freq_min))
self.chip.log("VCO_FREQ_MAX= %d" %(freq_max))
self.chip.log('')
self.chip.log('')
if (dbgMode):
self.chip.PLL.infoLOCK()
# Go back to the initial PLL profile
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE_OLD
self.chip.setImmediateMode(Imd_Mode)
return True
def vco_manual_ctune(self, F_TARGET, XBUF_SLFBEN=1, PROFILE=0, IntN_Mode=False, PDIV2=False, VTUNE_VCT=2, dbgMode=False):
"""Selects the tuning curve where VCO frequency @ VTUNE_VCT is closest to F_TARGET (greater/equal than targeted frequecy)"""
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Store the current PLL Profile Index before proceeding to the new one for configuration
PROFILE_OLD=self.chip.PLL.ACTIVE_PROFILE
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE
# Determine the FB-DIV configuration for targeted VCO frequency and self.fRef reference frequency
(N_INT, N_FRAC, N_FIX)=self.calc_fbdiv(F_TARGET, IntN_Mode, PDIV2)
# The exact value of targetec VCO frequency that will be used in automatic coarse-tune algorithm
# If IntN-Mode is chosen, VCO will be locked to the closest integer multiple of reference frequency
FVCO_TARGET=N_FIX*(N_INT+N_FRAC/2.0**20)*self.fRef
# Calculate the fractional division words
N_FRAC_H=int(math.floor(N_FRAC/2**16))
N_FRAC_L=int(N_FRAC-N_FRAC_H*(2**16))
# Enable PLL
self.enablePLL(PDIV2, IntN_Mode, XBUF_SLFBEN, PROFILE)
# Define VCO
reg_vco_cfg=self.chip.getRegisterByName("PLL_VCO_CFG_"+str(PROFILE))
# Set the VCO tuning voltage value during coarse-tuning
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
# Define SDM & FB-DIV Modulus
reg_sdm_cfg=self.chip.getRegisterByName("PLL_SDM_CFG_"+str(PROFILE))
if (IntN_Mode or N_FRAC==0):
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=1
else:
reg_sdm_cfg['INTMOD_EN_'+str(PROFILE)]=0
reg_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']=int(N_INT)
reg_fracmod_l=self.chip.getRegisterByName("PLL_FRACMODL_"+str(PROFILE))
reg_fracmod_l['FRACMODL_'+str(PROFILE)+'<15:0>']=N_FRAC_L
reg_fracmod_h=self.chip.getRegisterByName("PLL_FRACMODH_"+str(PROFILE))
reg_fracmod_h['FRACMODH_'+str(PROFILE)+'<3:0>']=N_FRAC_H
# Reset PLL, Enable Calibration Mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['CTUNE_RES<1:0>']=3
reg_pll_cfg['PLL_CALIBRATION_EN']=1
reg_pll_cfg['PLL_CALIBRATION_MODE']=1
# Write to PLL_CAL_MAN Register
reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Enable Coarse-Tuning Frequency Comparator
reg_pll_cal_man['CTUNE_EN']=1
# Initial Value for VCO_SEL
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=2
# Find optimal VCO Core
# 24.02.2017. - overlap between VCO cores 2 and 3 is quite large, therefore value 240 for upper boundary can be decreased down to 200
#reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=240
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=200
reg_pll_cal_man['CTUNE_START']=1
# Start the coarse-tuning step
# Wait for CTUNE_STEP_DONE
#while (reg_pll_cal_man['CTUNE_STEP_DONE']==0):
# reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Read the result of coarse-tuning step
freq_high=reg_pll_cal_man['FREQ_HIGH']
freq_equal=reg_pll_cal_man['FREQ_EQUAL']
freq_low=reg_pll_cal_man['FREQ_LOW']
# Reset the frequency comparator
reg_pll_cal_man['CTUNE_START']=0
if (freq_low==1):
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=3
else:
#reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=15
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=8
# Start the coarse-tuning step
reg_pll_cal_man['CTUNE_START']=1
# Wait for CTUNE_STEP_DONE
#while (reg_pll_cal_man['CTUNE_STEP_DONE']==0):
# reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Read the result of coarse-tuning step
freq_high=reg_pll_cal_man['FREQ_HIGH']
freq_equal=reg_pll_cal_man['FREQ_EQUAL']
freq_low=reg_pll_cal_man['FREQ_LOW']
# Reset the frequency comparator
reg_pll_cal_man['CTUNE_START']=0
if (freq_high==1):
reg_pll_cal_man['VCO_SEL_MAN<1:0>']=1
# Find the optimal VCO_FREQ value
bit_pos=7
bit_mask=0
freq=0
while (bit_pos>=0):
freq+=2**bit_pos
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']=freq
# Start the coarse-tuning step
reg_pll_cal_man['CTUNE_START']=1
# Wait for CTUNE_STEP_DONE
#while (reg_pll_cal_man['CTUNE_STEP_DONE']==0):
# reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Read the result of coarse-tuning step
freq_high=reg_pll_cal_man['FREQ_HIGH']
freq_equal=reg_pll_cal_man['FREQ_EQUAL']
freq_low=reg_pll_cal_man['FREQ_LOW']
# Reset the frequency comparator
reg_pll_cal_man['CTUNE_START']=0
bit_mask=(2**bit_pos)*(1-freq_low)
bit_val=(freq&bit_mask)>>bit_pos
if (bit_val==1):
freq-=2**bit_pos
if (bit_pos==0 and freq_low):
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']+=1
# In the last pass, set VTUNE_VCT to minimum value of 300 mV
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=0
# Start the coarse-tuning step
reg_pll_cal_man['CTUNE_START']=1
# Wait for CTUNE_STEP_DONE
#while (reg_pll_cal_man['CTUNE_STEP_DONE']==0):
# reg_pll_cal_man=self.chip.getRegisterByName('PLL_CAL_MAN')
# Read the result of coarse-tuning step
freq_high=reg_pll_cal_man['FREQ_HIGH']
freq_equal=reg_pll_cal_man['FREQ_EQUAL']
freq_low=reg_pll_cal_man['FREQ_LOW']
# Reset the frequency comparator
reg_pll_cal_man['CTUNE_START']=0
# Set-Back the VTUNE_VCT to the initial value
reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']=VTUNE_VCT
if (freq_high==1):
reg_pll_cal_man['VCO_FREQ_MAN<7:0>']-=1
bit_pos-=1
sel_opt=reg_pll_cal_man['VCO_SEL_MAN<1:0>']
freq_opt=reg_pll_cal_man['VCO_FREQ_MAN<7:0>']
# Disable Frequency Comparator
reg_pll_cal_man['CTUNE_EN']=0
# Exit the manual calibration mode, enter the normal PLL operation mode
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
reg_pll_cfg['PLL_CALIBRATION_EN']=0
reg_pll_cfg['PLL_CALIBRATION_MODE']=0
# Write the results of calibration to the dedicated registers inside the chosen PLL profile
reg_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
reg_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']=freq_opt
reg_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
reg_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']=sel_opt
if (dbgMode):
self.chip.log("Open-Loop Manual Calibration Done!!!")
self.chip.log("Configured PLL Profile= %d" %(PROFILE))
self.chip.log("Target VCO Frequency [MHz]= %.5f" %(FVCO_TARGET/1.0e6))
self.chip.log("Frequency Error [Hz]= %.2e" %(abs(FVCO_TARGET-F_TARGET)))
self.chip.log("VCO_SEL_FINAL= %d" %(sel_opt))
self.chip.log("VCO_FREQ_FINAL= %d" %(freq_opt))
self.chip.log('')
self.chip.log('')
if (dbgMode):
self.chip.PLL.infoLOCK()
# Go back to the initial PLL profile
if (PROFILE_OLD!=PROFILE):
self.chip.PLL.ACTIVE_PROFILE=PROFILE_OLD
self.chip.setImmediateMode(Imd_Mode)
return True
def optimLPF(self, PM_deg=49.8, fc=80.0e3, PROFILE=0, dbgMode=False):
PM_rad=PM_deg*math.pi/180
wc=2*math.pi*fc
# Check VCO_SEL
reg_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
vco_sel=reg_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']
# Use Average for KVCO in Calculations
if (vco_sel==1):
KVCO_avg=44.404e6
elif (vco_sel==2):
KVCO_avg=33.924e6
elif (vco_sel==3):
KVCO_avg=41.455e6
else:
self.chip.log('Ext. LO selected in PLL_PROFILE %d.' % (PROFILE))
return None
# Read CP Current Value
reg_pll_cp_cfg0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
PULSE=reg_pll_cp_cfg0['PULSE_'+str(PROFILE)+'<5:0>']
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
ICT_CP=reg_pll_cp_cfg1['ICT_CP_'+str(PROFILE)+'<4:0>']
Icp=ICT_CP*25.0e-6/16.0*PULSE
# Read Feedback-Divider Modulus
reg_pll_enable=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
reg_pll_sdm_cfg=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
N_INT=reg_pll_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']
INTMOD_EN=reg_pll_sdm_cfg['INTMOD_EN_'+str(PROFILE)]
reg_pll_fracmodl=self.chip.getRegisterByName('PLL_FRACMODL_'+str(PROFILE))
N_FRACL=reg_pll_fracmodl['FRACMODL_'+str(PROFILE)+'<15:0>']
reg_pll_fracmodh=self.chip.getRegisterByName('PLL_FRACMODH_'+str(PROFILE))
N_FRACH=reg_pll_fracmodh['FRACMODH_'+str(PROFILE)+'<3:0>']
N_FRAC=N_FRACH*2**16+N_FRACL
N=N_INT+(1-INTMOD_EN)*N_FRAC*1.0/2.0**20
Kvco=2*math.pi*KVCO_avg
Kphase=Icp/(2*math.pi)
gamma=1.045
T31=0.1
# Approx. formula, Dean Banerjee
T1=(1.0/math.cos(PM_rad)-math.tan(PM_rad))/(wc*(1+T31))
T3=T1*T31;
T2=gamma/((wc**2)*(T1+T3));
A0=(Kphase*Kvco)/((wc**2)*N)*math.sqrt((1+(wc**2)*(T2**2))/((1+(wc**2)*(T1**2))*(1+(wc**2)*(T3**2))));
A2=A0*T1*T3;
A1=A0*(T1+T3);
C1=A2/(T2**2)*(1+math.sqrt(1+T2/A2*(T2*A0-A1)));
C3=(-(T2**2)*(C1**2)+T2*A1*C1-A2*A0)/((T2**2)*C1-A2);
C2=A0-C1-C3;
R2=T2/C2;
R3=A2/(C1*C3*T2);
if (dbgMode):
self.chip.log('Loop-Filter Optimization')
self.chip.log('-'*45)
self.chip.log('Input Parameters')
self.chip.log('\tIcp=%.2f uA' %(Icp/1.0e-6))
self.chip.log('\tKVCO=%.2f MHz/V' %(KVCO_avg/1.0e6))
self.chip.log('\tNDIV=%.2f' % (N))
self.chip.log('-'*45)
self.chip.log('Ideal LPF Values')
self.chip.log('\tC1= %.2f pF' %(C1/1.0e-12))
self.chip.log('\tC2= %.2f pF' %(C2/1.0e-12))
self.chip.log('\tR2= %.2f kOhm' %(R2/1.0e3))
self.chip.log('\tC3= %.2f pF' %(C3/1.0e-12))
self.chip.log('\tR3= %.2f kOhm' %(R3/1.0e3))
self.chip.log('')
self.chip.log('')
C1_CODE=int(round(C1/1.2e-12))
C2_CODE=int(round((C2-150.0e-12)/10.0e-12))
C3_CODE=int(round((C3-5.0e-12)/1.2e-12))
C1_CODE=int(min(max(C1_CODE,0),15))
C2_CODE=int(min(max(C2_CODE,0),15))
C3_CODE=int(min(max(C3_CODE,0),15))
R2_CODE=int(round(24.6e3/R2))
R3_CODE=int(round(14.9e3/R3))
R2_CODE=min(max(R2_CODE,1),15)
R3_CODE=min(max(R3_CODE,1),15)
self.setLPF(C1=C1_CODE, C2=C2_CODE, R2=R2_CODE, C3=C3_CODE, R3=R3_CODE, PROFILE=PROFILE)
def getNDIV(self, PROFILE=0):
"""
Returns float that represents PLL feedback division ratio for configuration in PLL profile PROFILE.
"""
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_pll_enable=self.chip.getRegisterByName('PLL_ENABLE_'+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
reg_fracmodl=self.chip.getRegisterByName('PLL_FRACMODL_'+str(PROFILE))
reg_fracmodh=self.chip.getRegisterByName('PLL_FRACMODH_'+str(PROFILE))
reg_pll_sdm_cfg=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
NINT=reg_pll_sdm_cfg['INTMOD_'+str(PROFILE)+'<9:0>']
NFRAC=reg_fracmodh['FRACMODH_'+str(PROFILE)+'<3:0>']*2**16+reg_fracmodl['FRACMODL_'+str(PROFILE)+'<15:0>']
self.chip.setImmediateMode(Imd_Mode)
return 2**PDIV2*1.0*(NINT*1.0+NFRAC*1.0/2**20)
def getNFFDIV(self, PROFILE=0):
"""
Returns float that represents PLL feedforward division ratio for configuration in PLL profile PROFILE.
"""
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
reg_pll_ff_cfg=self.chip.getRegisterByName('PLL_FF_CFG_'+str(PROFILE))
if (reg_pll_ff_cfg['FFDIV_SEL_'+str(PROFILE)]==0):
return 1.0
else:
return 2.0**int(reg_pll_ff_cfg['FFMOD_'+str(PROFILE)])
self.chip.setImmediateMode(Imd_Mode)
def getNIQDIV2(self, channel, PROFILE=0):
"""
Returns float that represents PLL IQ-DivBy2 division ratio for configuration in PLL profile PROFILE for desired LO channel.
"""
if (PROFILE>=8):
self.chip.log('Wrong PLL Profile Number. Valid values 0-7.')
return None
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
if (channel=='A' or channel==0):
reg_pll_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
IQ_EXP=(reg_pll_lodist_cfg["PLL_LODIST_FSP_OUT0_"+str(PROFILE)+"<2:0>"]&4)>>2
elif (channel=='B' or channel==1):
reg_pll_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
IQ_EXP=(reg_pll_lodist_cfg["PLL_LODIST_FSP_OUT1_"+str(PROFILE)+"<2:0>"]&4)>>2
elif (channel=='C' or channel==2):
reg_pll_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
IQ_EXP=(reg_pll_lodist_cfg["PLL_LODIST_FSP_OUT2_"+str(PROFILE)+"<2:0>"]&4)>>2
elif (channel=='D' or channel==3):
reg_pll_lodist_cfg=self.chip.getRegisterByName('PLL_LODIST_CFG_'+str(PROFILE))
IQ_EXP=(reg_pll_lodist_cfg["PLL_LODIST_FSP_OUT3_"+str(PROFILE)+"<2:0>"]&4)>>2
else:
self.chip.log('Wrong LO channel selected. Valid values: "A" or 0, "B" or 1, "C" or 2, "D" or 3.')
return None
self.chip.setImmediateMode(Imd_Mode)
return 2.0**(1.0-IQ_EXP)
def get_LOfreq(self, channel, PROFILE=0):
"""
Returns the exact value of LO frequency at chosen LO channel.
"""
if (PROFILE>=8):
self.chip.log('Wrong PLL Profile Number. Valid values 0-7.')
return None
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Get Feedback-Divider Division Modulus
N_FBDIV=self.getNDIV(PROFILE=PROFILE)
# Get Feedforward-Divider Division Modulus
N_FFDIV=self.getNFFDIV(PROFILE=PROFILE)
# Get IQ-DivBy2 Division Modulus
N_IQDIV2=self.getNIQDIV2(channel, PROFILE)
self.chip.setImmediateMode(Imd_Mode)
return (N_FBDIV)*self.fRef/N_FFDIV/N_IQDIV2
def centerVTUNE(self, PROFILE=0, dbgMode=False):
"""
This method should be used when coarse tuning algorithm converges to the subband at which PLL locks with VTUNE_HIGH=1 or VTUNE_LOW=1
If it's possible, this method tweaks different VCO setings in order to get PLL locked at desired frequency with VTUNE_HIGH=VTUNE_LOW=0
The purpose of this method is same as of centerVTUNE method.
Algorithm is different.
"""
# Set Immediate Mode for LMS8001 EVB
Imd_Mode=self.chip.getImmediateMode()
self.chip.setImmediateMode(True)
# Reset PLL
reg_pll_cfg=self.chip.getRegisterByName('PLL_CFG')
reg_pll_cfg['PLL_RSTN']=0
reg_pll_cfg['PLL_RSTN']=1
# Here set active PLL profile to the value given by argument PROFILE
self.chip.PLL.ACTIVE_PROFILE=PROFILE
# Get register with VTUNE_HIGH and VTUNE_LOW Indicators and PLL_LOCK bit
reg_pll_status=self.chip.getRegisterByName('PLL_CFG_STATUS')
# Get register with VCO_FREQ_n<7:0> word
#reg_pll_vco_freq=self.chip.getRegisterByName('PLL_VCO_FREQ_'+str(PROFILE))
# Get register with VDIV_SWVDD_n<1:0> word
reg_pll_vco_cfg=self.chip.getRegisterByName('PLL_VCO_CFG_'+str(PROFILE))
# Get Initial value for VCO_FREQ<1:0> word
#freq_init=reg_pll_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']
# Get Initial value for VDIV_SWVDD<1:0> word
vdiv_swvdd_init=reg_pll_vco_cfg['VDIV_SWVDD_'+str(PROFILE)+'<1:0>']
#sel_init=reg_pll_vco_cfg['VCO_SEL_'+str(PROFILE)+'<1:0>']
# Get Initial Value for VCO_AMP<7:0> and VCO_AAC_EN
amp_init=reg_pll_vco_cfg['VCO_AMP_'+str(PROFILE)+'<6:0>']
aac_en_init=reg_pll_vco_cfg['VCO_AAC_EN_'+str(PROFILE)]
# Get VTUNE_HIGH, VTUNE_LOW, PLL_LOCK bit values
vtune_high=reg_pll_status['VTUNE_HIGH']
vtune_low=reg_pll_status['VTUNE_LOW']
pll_lock=reg_pll_status['PLL_LOCK']
if (vtune_high==0 and vtune_low==0):
if (dbgMode):
self.chip.log('Centering of VTUNE not needed.')
self.chip.setImmediateMode(Imd_Mode)
return True
swvdd_list=range(0,4)
swvdd_list.reverse()
amp_list=range(0,4)
amp_list.reverse()
# Try to center VTUNE by changing Bias Voltages of MOS switches in Capacitor Bank and VCO Amp control and reruning VCO Auto-Tuning State-Machine
reg_pll_vco_cfg['VCO_AAC_EN_'+str(PROFILE)]=1
for amp in amp_list:
reg_pll_vco_cfg['VCO_AMP_'+str(PROFILE)+'<6:0>']=amp
for vdiv_swvdd in swvdd_list:
if not (amp_init==amp and vdiv_swvdd_init==vdiv_swvdd):
reg_pll_vco_cfg['VDIV_SWVDD_'+str(PROFILE)+'<1:0>']=vdiv_swvdd
# changed FREQ_INIT_POS to 5
# The VCO Auto-Tuning State Machine will not be re-runed again for each amp and swvdd combination
# The following two commands can be commented
#autotune_status=self.vco_auto_ctune(F_TARGET=F_TARGET, PROFILE=0, XBUF_SLFBEN=1, IntN_Mode=INTMOD_EN, PDIV2=PDIV2_EN, VTUNE_VCT=1, VCO_SEL_FORCE=1, VCO_SEL_INIT=sel_init, FREQ_INIT_POS=5, FREQ_INIT=freq_init, dbgMode=dbgMode)
#sleep(0.001)
vtune_high=reg_pll_status['VTUNE_HIGH']
vtune_low=reg_pll_status['VTUNE_LOW']
pll_lock=reg_pll_status['PLL_LOCK']
if (vtune_high==0 and vtune_low==0):
if (dbgMode):
self.chip.log('VTUNE voltage centered successfuly.')
self.chip.log('New VCO control values: VDIV_AMP<6:0>= %d, VCO_AAC_EN=1, VDIV_SWVDD<1:0>= %d' %(amp, vdiv_swvdd))
self.chip.log('')
self.chip.PLL.infoLOCK()
self.chip.setImmediateMode(Imd_Mode)
# Set back PLL_CAL_AUTO1 to starting values
# Uncomment these lines bellow if autotuning was invoked for each step of centering VTUNE
#reg_pll_cal_auto1['VCO_SEL_FORCE']=vco_sel_force_init
#reg_pll_cal_auto1['VCO_SEL_INIT<1:0>']=vco_sel_init
#reg_pll_cal_auto1['FREQ_INIT_POS<2:0>']=vco_freq_init_pos
#reg_pll_cal_auto1['FREQ_INIT<7:0>']=vco_freq_init
return True
if (dbgMode):
self.chip.log("Centering VTUNE failed.")
# Set back VDIV_SWVDD<1:0> and FREQ<7:0> to inital values
reg_pll_vco_cfg['VDIV_SWVDD_'+str(PROFILE)+'<1:0>']=vdiv_swvdd_init
#reg_pll_vco_freq['VCO_FREQ_'+str(PROFILE)+'<7:0>']=freq_init
# Set back VCO amplitude controls to initial values
reg_pll_vco_cfg['VCO_AMP_'+str(PROFILE)+'<6:0>']=amp_init
reg_pll_vco_cfg['VCO_AAC_EN_'+str(PROFILE)]=aac_en_init
# Set back the inital value of Immediate mode for LMS8001 EVB
self.chip.setImmediateMode(Imd_Mode)
return False
def setLOFREQ(self, F_LO, XBUF_SLFBEN=1, IQ=False, IntN_Mode=False, CTUNE_METHOD='OPEN-LOOP', PROFILE=0, dbgMode=False):
"""
This methods configures PLL-LODIST subsystems of LMS8001 IC to generate desired LO frequency.
Frequency Range Available with Quadrature Divider By 2 enabled:
260 MHz<=F_LO<=4.55 GHz,
Frequency Range Available with Quadrature Divider By 2 disabled:,
520 MHz<=F_LO<=9.11 GHz.
Frequencies bellow 520 MHz can only be synthesized using IQ generator.
CTUNE_METHOD='OPEN-LOOP' calls the vco_auto_tune method to tune VCO to the desired frequency
CTUNE_METHOD='OPEN-LOOP-MANUAL' calls the vco_manual_ctune method to tune VCO to the desired frequency
CTUNE_METHOD='CLOSE-LOOP' calls the vco_manual_cloop_tune method to tune VCO to the desired frequency
"""
if (IQ):
if not (260.0e6<=F_LO<=4.55e9):
self.chip.log("F_LO should be between 260 MHz and 4.55 GHz, with argument IQ=True. Failed to set LO Freq.")
return False
DIV2IQ=1
else:
if not (260.0e6<=F_LO<=9.11e9):
self.chip.log("F_LO should be between 260 MHz and 9.11 GHz. Failed to set LO Freq.")
return False
if (260e6<=F_LO<=520e6):
self.chip.log("F_LO values between 260 MHz and 520 MHz can only be generated with argument IQ=True. Failed to set LO Freq.")
return False
DIV2IQ=0
FFMOD=0
F_VCO=(2.0**DIV2IQ)*(2.0**FFMOD)*F_LO
while not (4.1e9<=F_VCO<=9.11e9):
FFMOD+=1
F_VCO=(2.0**DIV2IQ)*(2**FFMOD)*F_LO
if (dbgMode):
self.chip.log('')
self.chip.log('Setting LO Frequency')
self.chip.log('-'*60)
self.chip.log('Required FF-DIV Modulus: %d (%d)' %(2**FFMOD, FFMOD))
self.chip.log('IQ DIV2 Gen: %s' %(str(IQ)))
self.chip.log('Targeted VCO Frequency: %.5f GHz' %(F_VCO/1.0e9))
self.chip.log('IntN-Mode: %s' %(str(IntN_Mode)))
self.chip.log('-'*60)
self.chip.log('')
# Set FF-DIV Control Signals
self.setFFDIV(FFMOD=FFMOD, PROFILE=PROFILE)
if (CTUNE_METHOD=='OPEN-LOOP'):
# Read VCO AUTO-CAL Registers - use user defined values
reg_pll_cal_auto1=self.chip.getRegisterByName('PLL_CAL_AUTO1')
VCO_SEL_FORCE=reg_pll_cal_auto1['VCO_SEL_FORCE']
VCO_SEL_INIT=reg_pll_cal_auto1['VCO_SEL_INIT<1:0>']
FREQ_INIT_POS=reg_pll_cal_auto1['FREQ_INIT_POS<2:0>']
FREQ_INIT=reg_pll_cal_auto1['FREQ_INIT<7:0>']
reg_pll_cal_auto2=self.chip.getRegisterByName('PLL_CAL_AUTO2')
FREQ_SETTLING_N=reg_pll_cal_auto2['FREQ_SETTLING_N<3:0>']
VTUNE_WAIT_N=reg_pll_cal_auto2['VTUNE_WAIT_N<7:0>']
reg_pll_cal_auto3=self.chip.getRegisterByName('PLL_CAL_AUTO3')
VCO_SEL_FREQ_MAX=reg_pll_cal_auto3['VCO_SEL_FREQ_MAX<7:0>']
VCO_SEL_FREQ_MIN=reg_pll_cal_auto3['VCO_SEL_FREQ_MIN<7:0>']
# Read PLL_EN_FB_PDIV2_n value - use user defined values
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
# Read VTUNE_VCT_n value - use user defined values
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
VTUNE_VCT=reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']
ctune_status=self.vco_auto_ctune(F_TARGET=F_VCO, PROFILE=PROFILE, XBUF_SLFBEN=XBUF_SLFBEN, IntN_Mode=IntN_Mode, PDIV2=PDIV2, VTUNE_VCT=VTUNE_VCT, VCO_SEL_FORCE=VCO_SEL_FORCE, VCO_SEL_INIT=VCO_SEL_INIT, FREQ_INIT_POS=FREQ_INIT_POS, FREQ_INIT=FREQ_INIT, FREQ_SETTLING_N=FREQ_SETTLING_N, VTUNE_WAIT_N=VTUNE_WAIT_N, VCO_SEL_FREQ_MAX=VCO_SEL_FREQ_MAX, VCO_SEL_FREQ_MIN=VCO_SEL_FREQ_MIN, dbgMode=dbgMode)
elif (CTUNE_METHOD=='OPEN-LOOP-MANUAL'):
# Read PLL_EN_FB_PDIV2_n value - use user defined values
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
# Read VTUNE_VCT_n value - use user defined values
reg_pll_lpf_cfg2=self.chip.getRegisterByName('PLL_LPF_CFG2_'+str(PROFILE))
VTUNE_VCT=reg_pll_lpf_cfg2['VTUNE_VCT_'+str(PROFILE)+'<1:0>']
ctune_status=self.vco_manual_ctune(F_TARGET=F_VCO, XBUF_SLFBEN=XBUF_SLFBEN, PROFILE=PROFILE, IntN_Mode=IntN_Mode, PDIV2=PDIV2, VTUNE_VCT=VTUNE_VCT, dbgMode=dbgMode)
elif (CTUNE_METHOD=='CLOSE-LOOP'):
# Read PLL_EN_FB_PDIV2_n value - use user defined values
reg_pll_enable=self.chip.getRegisterByName("PLL_ENABLE_"+str(PROFILE))
PDIV2=reg_pll_enable['PLL_EN_FB_PDIV2_'+str(PROFILE)]
ctune_status=self.vco_manual_cloop_tune(F_VCO, PROFILE=PROFILE, XBUF_SLFBEN=XBUF_SLFBEN, IntN_Mode=IntN_Mode, PDIV2=PDIV2, dbgMode=dbgMode)
else:
if (dbgMode):
self.chip.log('Bad CTUNE_METHOD selected. Possible Options: OPEN-LOOP and CLOSE-LOOP.')
self.chip.log('Setting LO Frequency failed.')
return False
if not (self.chip.PLL.VTUNE_HIGH==0 and self.chip.PLL.VTUNE_LOW==0):
self.centerVTUNE(PROFILE=PROFILE, dbgMode=dbgMode)
if (ctune_status):
if (dbgMode):
self.chip.log('Setting LO Frequency finished succesfully.')
return True
else:
self.chip.log('Setting LO Frequency failed.')
return False
def optim_PLL_LoopBW(self, PM_deg=49.8, fc=120.0e3, FIT_KVCO=False, PROFILE=0, dbgMode=False):
"""
This method finds optimal PLL configuration, CP pulse current and LPF element values.
Optimization finds maximal CP current which can results with targeted PLL Loop BW using Loop-Filter elements which can be implemented in LMS8001 IC.
Result should be PLL configuration with best phase noise performance for targeted loop bandwidth.
"""
# Get initial CP current settings
reg_pll_cp_cfg0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
PULSE_INIT=reg_pll_cp_cfg0['PULSE_'+str(PROFILE)+'<5:0>']
OFS_INIT=reg_pll_cp_cfg0['OFS_'+str(PROFILE)+'<5:0>']
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
ICT_CP_INIT=reg_pll_cp_cfg1['ICT_CP_'+str(PROFILE)+'<4:0>']
# Pulse control word of CP inside LMS8001 will be swept from 63 to 4.
# First value that gives implementable PLL configuration will be used.
cp_pulse_vals=range(4,64)
cp_pulse_vals.reverse()
# Estimate the value of KVCO for settings in the PLL Profile PROFILE
KVCO_avg=self.estim_KVCO(FIT_KVCO=FIT_KVCO, PROFILE=PROFILE)
# Read Feedback-Divider Modulus
N=self.getNDIV(PROFILE=PROFILE)
#Kvco=2*math.pi*KVCO_avg
for cp_pulse in cp_pulse_vals:
# Calculate CP Current Value
Icp=ICT_CP_INIT*25.0e-6/16.0*cp_pulse
gamma=1.045
T31=0.1
LPF_IDEAL_VALS=self.calc_ideal_LPF(fc=fc, PM_deg=PM_deg, Icp=Icp, KVCO_HzV=KVCO_avg, N=N, gamma=gamma, T31=T31)
(LPFvals_OK, LPF_REAL_VALS)=self.calc_real_LPF(LPF_IDEAL_VALS)
if (LPFvals_OK):
# Set CP Pulse Current to the optimized value
self.setCP(PULSE=cp_pulse, OFS=OFS_INIT, ICT_CP=ICT_CP_INIT, PROFILE=PROFILE)
# Set LPF Components to the optimized values
self.setLPF(C1=LPF_REAL_VALS['C1_CODE'], C2=LPF_REAL_VALS['C2_CODE'], R2=LPF_REAL_VALS['R2_CODE'], C3=LPF_REAL_VALS['C3_CODE'], R3=LPF_REAL_VALS['R3_CODE'], PROFILE=PROFILE)
if (dbgMode):
self.chip.log('PLL LoopBW Optimization finished successfuly.')
self.chip.log('-'*45)
self.chip.log('\tIcp=%.2f uA' %(Icp/1.0e-6))
self.chip.log('\tUsed Value for KVCO=%.2f MHz/V' %(KVCO_avg/1.0e6))
self.chip.log('\tNDIV=%.2f' % (N))
self.chip.log('-'*45)
self.chip.log('')
self.chip.log('Ideal LPF Values')
self.chip.log('-'*45)
self.chip.log('\tC1= %.2f pF' %(LPF_IDEAL_VALS['C1']/1.0e-12))
self.chip.log('\tC2= %.2f pF' %(LPF_IDEAL_VALS['C2']/1.0e-12))
self.chip.log('\tR2= %.2f kOhm' %(LPF_IDEAL_VALS['R2']/1.0e3))
self.chip.log('\tC3= %.2f pF' %(LPF_IDEAL_VALS['C3']/1.0e-12))
self.chip.log('\tR3= %.2f kOhm' %(LPF_IDEAL_VALS['R3']/1.0e3))
self.chip.log('')
return True
if (dbgMode):
self.chip.log('PLL LoopBW Optimization failed.')
self.chip.log('Some of the LPF component(s) out of implementable range.')
# Set back to initial settings of CP
self.setCP(PULSE=PULSE_INIT, OFS=OFS_INIT, ICT_CP=ICT_CP_INIT, PROFILE=PROFILE)
return False
def optimCPandLD(self, PROFILE=0, dbgMode=False):
"""This method checks if PLL works in fractional-N Mode. If this condition is true, it sets the offset CP current to optimize phase noise performance in FracN operation mode.
When CP offset current is used, it is recommended to set ICP_OFS ~ 1.9% of ICP_PULSE for Frac-N Mode, 1.2% of ICP_PULSE for Int-N Mode"""
# Check operating mode of LMS8001 PLL
reg_pll_sdm_cfg=self.chip.getRegisterByName('PLL_SDM_CFG_'+str(PROFILE))
INTMOD_EN=reg_pll_sdm_cfg['INTMOD_EN_'+str(PROFILE)]
# Read CP current configuration
reg_pll_cp_cfg0=self.chip.getRegisterByName('PLL_CP_CFG0_'+str(PROFILE))
reg_pll_cp_cfg1=self.chip.getRegisterByName('PLL_CP_CFG1_'+str(PROFILE))
PULSE=reg_pll_cp_cfg0['PULSE_'+str(PROFILE)+'<5:0>']
OFS=reg_pll_cp_cfg0['OFS_'+str(PROFILE)+'<5:0>']
ICT_CP=reg_pll_cp_cfg1['ICT_CP_'+str(PROFILE)+'<4:0>']
# Read Lock Detector Threashold Voltage
LD_VCT=reg_pll_cp_cfg1['LD_VCT_'+str(PROFILE)+'<1:0>']
# Calculate OFS and LD_VCT optimal values
if (INTMOD_EN):
# Set Offset Current and Lock Detector Threashold for IntN-Operating Mode
LD_VCT=2
Icp=(25.0*ICT_CP/16.0)*PULSE
# Calculate Target Value for Offset Current, as 1.2% of Pulse current value
Icp_OFS=1.2/100.0*Icp
Icp_OFS_step=(25.0*ICT_CP/16.0)*0.25
OFS=int(round(Icp_OFS/Icp_OFS_step))
else:
# Set Offset Current and Lock Detector Threashold for FracN-Operating Mode
LD_VCT=0
Icp=(25.0*ICT_CP/16.0)*PULSE
# Calculate Target Value for Offset Current, as 1.9% of Pulse current value
Icp_OFS=1.9/100.0*Icp
Icp_OFS_step=(25.0*ICT_CP/16.0)*0.25
OFS=int(max(1, round(Icp_OFS/Icp_OFS_step)))
self.setCP(PULSE=PULSE, OFS=OFS, ICT_CP=ICT_CP, PROFILE=PROFILE)
self.setLD(LD_VCT=LD_VCT, PROFILE=PROFILE)
if (dbgMode):
self.chip.log('')
self.chip.log('Optimization of CP-OFS and LD-VCT Settings')
self.chip.log('-'*60)
self.chip.log('OFS=%d' %(OFS))
self.chip.log('LD_VCT=%d' %(LD_VCT))
self.chip.log('-'*60)
self.chip.log('')
return True
def configPLL(self, F_LO, IQ=False, autoConfXBUF=True, autoConfVREG=True, IntN_Mode=False, LoopBW=340.0e3, PM=55.0, FIT_KVCO=True, BWEF=1.0, FLOCK_N=200, SKIP_STEPS=[], CTUNE_METHOD='OPEN-LOOP', FLOCK_METHOD='SIMPLE', FLOCK_VCO_SPDUP=1, PROFILE=0, dbgMode=False):
"""This method does complete configuration of LMS8001 IC PLL in 5 steps:
1. 'VCO_CTUNE' STEP
Runs VCO Coarse Frequency Tuning and Sets FF-DIV Ratios needed for generation of F_LO frequency
CTUNE_METHOD='OPEN-LOOP' calls the vco_auto_tune method to tune VCO to the desired frequency
CTUNE_METHOD='OPEN-LOOP-MANUAL' calls the vco_manual_ctune method to tune VCO to the desired frequency
CTUNE_METHOD='CLOSE-LOOP' calls the vco_manual_cloop_tune method to tune VCO to the desired frequency
2. 'OPTIM_PLL_LOOPBW' STEP
Optimizes PLL configuration for targeted LoopBW and Phase Margin (PM)
3. 'OPTIM_CP_OFFSET' STEP
Optimize CP offset current and Lock-Detector threashold settings depending on chosen PLL operating mode
4. 'OPTIM_FAST_LOCK' STEP
Sets Fast-Lock Settings for PLL Profile PROFILE
"""
# Calculate Loop-Crossover frequency
fc=LoopBW/1.65
# Set VCO Bias Parameters
if (autoConfVREG):
self.setVCOBIAS(EN=1, BYP_VCOREG=1)
else:
self.chip.PLL.EN_VCOBIAS=1
# Set XBUF_SLFBEN Parameter
if (autoConfXBUF):
XBUF_SLFBEN=1
else:
XBUF_SLFBEN=self.chip.PLL.PLL_XBUF_SLFBEN
# Step 1 - Tune PLL to generate F_LO frequency at LODIST outputs that should be manualy enabled outside this method
if not ((1 in SKIP_STEPS) or ('VCO_CTUNE' in SKIP_STEPS)):
# Set VCO Core Parameters
self.setVCO(AMP=3, VDIV_SWVDD=2, PROFILE=PROFILE)
status1=self.setLOFREQ(F_LO, IQ=IQ, XBUF_SLFBEN=XBUF_SLFBEN, IntN_Mode=IntN_Mode, CTUNE_METHOD=CTUNE_METHOD, PROFILE=PROFILE, dbgMode=dbgMode)
if not (status1):
self.chip.log('PLL Tuning to F_LO=%.5f GHz failed.' %(F_LO/1.0e9))
return status1
else:
status1=True
# Step 2 - Optimize PLL settings for targeted LoopBW
if not ((2 in SKIP_STEPS) or ('OPTIM_PLL_LOOPBW' in SKIP_STEPS)):
status2=self.optim_PLL_LoopBW(PM_deg=PM, fc=fc, FIT_KVCO=FIT_KVCO, PROFILE=PROFILE, dbgMode=dbgMode)
if not (status2):
self.chip.log('Optimization of PLL at F_LO=%.5f GHz, LoopBW=%.2f kHz and PM=%.2f deg failed.' %(F_LO/1.0e9, LoopBW/1.0e3, PM))
else:
status2=True
# Step 3 - Optimize CP offset current Lock Detector Threashold depending on operating mode chosen (IntN or FracN)
if not ((3 in SKIP_STEPS) or ('OPTIM_CP_OFFSET' in SKIP_STEPS)):
status3=self.optimCPandLD(PROFILE=PROFILE, dbgMode=dbgMode)
if not (status3):
self.chip.log('Optimization of CP-OFS and LD-VCT at F_LO=%.5f GHz.' %(F_LO/1.0e9))
else:
status3=True
# Step 4 - Configure Fast-Lock Mode Registers
if not ((4 in SKIP_STEPS) or ('OPTIM_FAST_LOCK' in SKIP_STEPS)):
if (BWEF>=1.0):
self.setFLOCK(BWEF, LoopBW=BWEF*LoopBW, PM=PM, FLOCK_N=FLOCK_N, Ch_EN=[], METHOD=FLOCK_METHOD, FIT_KVCO=FIT_KVCO, FLOCK_VCO_SPDUP=FLOCK_VCO_SPDUP, PROFILE=PROFILE)
else:
status4=True
return (status1 and status2 and status3)
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.14 on 2019-12-02 11:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('djautotask', '0030_task_phase'),
('djautotask', '0028_task_secondary_resources'),
]
operations = [
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import dataiku
from dataiku.customrecipe import *
import pandas as pd
import networkx as nx
from networkx.algorithms import bipartite
# Read recipe config
input_name = get_input_names_for_role('Input Dataset')[0]
output_name = get_output_names_for_role('Output Dataset')[0]
needs_eig = get_recipe_config()['eigenvector_centrality']
needs_clu = get_recipe_config()['clustering']
needs_tri = get_recipe_config()['triangles']
needs_clo = get_recipe_config()['closeness']
needs_pag = get_recipe_config()['pagerank']
needs_squ = get_recipe_config()['sq_clustering']
node_A=get_recipe_config()['node_A']
node_B=get_recipe_config()['node_B']
print get_recipe_config()
# Recipe input
df = dataiku.Dataset(input_name).get_dataframe()
print "[+] Dataset loaded..."
# Creating the bipartite graph
graph = nx.Graph()
graph.add_edges_from(zip(df[node_A].values.tolist(),df[node_B].values.tolist()))
print "[+] Created bipartite graph..."
# Always run: nodes degree
print "[+] Computing degree..."
deg = pd.Series(nx.degree(graph), name='degree')
stats = pd.DataFrame(list(deg),columns=['node_name','degree'])
if needs_eig:
print "[+] Computing eigenvector centrality..."
eig = pd.Series(nx.eigenvector_centrality_numpy(graph), name='eigenvector_centrality').reset_index()
eig.columns=['node_name','eigenvector_centrality']
if needs_clu:
print "[+] Computing clustering coefficient..."
clu = pd.Series(nx.clustering(graph), name='clustering_coefficient').reset_index()
clu.columns=['node_name','clustering_coefficient']
if needs_tri:
print "[+] Computing number of triangles..."
tri = pd.Series(nx.triangles(graph), name='triangles').reset_index()
tri.columns=['node_name','triangles']
if needs_clo:
print "[+] Computing closeness centrality..."
clo = pd.Series(nx.closeness_centrality(graph), name='closeness_centrality').reset_index()
clo.columns=['node_name','closeness_centrality']
if needs_pag:
print "[+] Computing pagerank..."
pag = pd.Series(nx.pagerank(graph), name='pagerank').reset_index()
pag.columns=['node_name','pagerank']
if needs_squ:
print "[+] Computing square clustering..."
squ = pd.Series(nx.square_clustering(graph), name='square_clustering_coefficient').reset_index()
squ.columns=['node_name','square_clustering_coefficient']
# Always run: connected components
_cco = {}
for i, c in enumerate(nx.connected_components(graph)):
for e in c:
_cco[e] = i
cco = pd.Series(_cco, name='connected_component_id').reset_index()
cco.columns=['node_name','connected_component_id']
# Putting all together
stats = stats.merge(cco,how='left')
if needs_eig:
stats = stats.merge(eig,how='left')
if needs_clu:
stats = stats.merge(clu,how='left')
if needs_tri:
stats = stats.merge(tri,how='left')
if needs_clo:
stats = stats.merge(clo,how='left')
if needs_pag:
stats = stats.merge(pag,how='left')
if needs_squ:
stats = stats.merge(squ,how='left')
_s = stats["connected_component_id"].value_counts().reset_index()
_s.columns = ['connected_component_id', 'connected_component_size']
stats = stats.merge(_s, on="connected_component_id", how="left")
# Recipe outputs
print "[+] Writing output dataset..."
graph = dataiku.Dataset(output_name)
graph.write_with_schema(stats)
|
nilq/baby-python
|
python
|
import json
import gmplot
import os
import random
import collections
# FIX FOR MISSING MARKERS
# 1. Open gmplot.py in Lib/site-packages/gmplot
# 2. Replace line 29 (self.coloricon.....) with the following two lines:
# self.coloricon = os.path.join(os.path.dirname(__file__), 'markers/%s.png')
# self.coloricon = self.coloricon.replace('/', '\\').replace('\\', '\\\\')
def create_range_map(user_json, date, start, end, position_json, show_trips):
nice_colors = collections.deque(['#006699', '#6e4673', '#649e0b', '#f6921e', '#d14343', '#00afaf', '#66bbed', '#95609c', '#a1c964', '#faaf40', '#e56f6f', '#46dbdb'])
start_set = False
gmap = None
# Go through selected trips.
for i in range(start, end + 1):
latt_list = []
long_list = []
transport = int(user_json['TripDocuments'][date]['TripList'][i]['Transport']['$numberInt'])
print(transport)
if transport == 0: # WALK
map_marker = '#000000'
elif transport == 1: # BIKE
map_marker = '#0000FF'
elif transport == 2: # CAR
map_marker = '#0000CD'
else: # TRANSIT
map_marker = '#00BFFF'
# Go through logs in a trip.
for log in user_json['TripDocuments'][date]['TripList'][i]['TripPositions']:
latt_list.append(float(log['Latitude']['$numberDouble']))
long_list.append(float(log['Longitude']['$numberDouble']))
# Set the start of the map at the first trip.
if not start_set:
gmap = gmplot.GoogleMapPlotter(latt_list[0], long_list[0], 13)
gmap.apikey = 'AIzaSyDPVbZkJPURllC7bFlR44iZhoLfwNSS5JI'
start_set = True
for log in user_json['TripDocuments'][date]['TripList'][i]['TripPositions']:
gmap.marker(float(log['Latitude']['$numberDouble']), float(log['Longitude']['$numberDouble']), color=map_marker, title=f"SPEED: {log['Speed']}")
color = None
if nice_colors.count == 0:
color = "#%06x" % random.randint(0, 0xFFFFFF)
else:
color = nice_colors[0]
nice_colors.popleft()
gmap.plot(latt_list, long_list, color, edge_width=5)
''''# Add markers for trip.
if show_trips:
for idx, log in enumerate(user_json['TripDocuments'][date]['TripList'][i]['TripPositions']):
if idx == 0:
gmap.marker(float(log['Latitude']['$numberDouble']), float(log['Longitude']['$numberDouble']), '#7FFF00', title=f'TRIP: {str(i)} START')
elif idx == len(user_json['TripDocuments'][date]['TripList'][i]['TripPositions']) + 1:
gmap.marker(float(log['Latitude']['$numberDouble']), float(log['Longitude']['$numberDouble']), '#A52A2A', title=f'TRIP: {str(i)} END')
else:
gmap.marker(float(log['Latitude']['$numberDouble']), float(log['Longitude']['$numberDouble']), '#4682B4')
'''
# Add markers for positions.
if not show_trips:
for pos in position_json:
gmap.marker(float(pos['Latitude']['$numberDouble']), float(pos['Longitude']['$numberDouble']), '#FFA500')
gmap.draw(os.path.join(os.getcwd(), 'plots', f'result.html'))
def generate_map_gui():
# Load JSON.
collection = open('raw.json', 'r').readlines()
users = []
for user in collection:
users.append(json.loads(user))
# Select user.
print('\nShowing users:')
for idx, user in enumerate(users):
print(f"[{idx}]: {user['_id']}")
user_select = int(input('Please select a user: '))
while user_select > len(users) - 1:
print('Wrong input!')
user_select = int(input('Please select a user: '))
# Show trip date overview.
print(f"\nShowing dates for user: {users[user_select]['_id']}")
for idx, date in enumerate(users[user_select]['TripDocuments']):
print(f"[{idx}]: {date['_id']}")
# Select date.
date_select = int(input('Please select a date: '))
while date_select > len(users[user_select]['TripDocuments']) - 1:
print('Wrong input!')
date_select = int(input('Please select a date: '))
# Show trip overview for chosen date.
print(f"\nShowing trips for date: {users[user_select]['TripDocuments'][date_select]['_id']}")
for idx, trip in enumerate(users[user_select]['TripDocuments'][date_select]['TripList']):
print(f"[{idx}]: {trip['_id']}")
# Range select
print('\nPlease select a range of trips to map. Give the same number twice to only map one.')
start_range = int(input('Start range: '))
end_range = int(input('End range: '))
pos_json = None
''''# Get positions for user.
pos_collection = open('rawPos.json', 'r').readlines()
pos_json = None
for user_positions in pos_collection:
user_pos_data = json.loads(user_positions)
if user_pos_data['_id'] == users[user_select]['_id']:
# Get pos doc for selected date.
for doc in user_pos_data['Documents']:
if doc['_id'] == users[user_select]['TripDocuments'][date_select]['_id']:
pos_json = doc['PositionList']'''
create_range_map(users[user_select], date_select, start_range, end_range, pos_json, True)
print('\nMap created in plots/result.html')
if __name__ == '__main__':
generate_map_gui()
|
nilq/baby-python
|
python
|
import pytest
from cx_const import Number, StepperDir
from cx_core.stepper import MinMax, Stepper, StepperOutput
class FakeStepper(Stepper):
def __init__(self) -> None:
super().__init__(MinMax(0, 1), 1)
def step(self, value: Number, direction: str) -> StepperOutput:
return StepperOutput(next_value=0, next_direction=None)
@pytest.mark.parametrize(
"direction_input, previous_direction, expected_direction",
[
(StepperDir.UP, StepperDir.UP, StepperDir.UP),
(StepperDir.DOWN, StepperDir.DOWN, StepperDir.DOWN),
(StepperDir.UP, StepperDir.DOWN, StepperDir.UP),
(StepperDir.DOWN, StepperDir.UP, StepperDir.DOWN),
(StepperDir.TOGGLE, StepperDir.UP, StepperDir.DOWN),
(StepperDir.TOGGLE, StepperDir.DOWN, StepperDir.UP),
],
)
def test_get_direction(
direction_input: str, previous_direction: str, expected_direction: str
) -> None:
stepper = FakeStepper()
stepper.previous_direction = previous_direction
direction_output = stepper.get_direction(0, direction_input)
assert direction_output == expected_direction
@pytest.mark.parametrize(
"direction_input, expected_sign",
[
(StepperDir.UP, 1),
(StepperDir.DOWN, -1),
(StepperDir.UP, 1),
(StepperDir.DOWN, -1),
],
)
def test_sign(direction_input: str, expected_sign: int) -> None:
stepper = FakeStepper()
sign_output = stepper.sign(direction_input)
assert sign_output == expected_sign
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.5 on 2019-01-25 19:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0008_shoppingcart'),
]
operations = [
migrations.AddField(
model_name='shoppingcart',
name='total_price',
field=models.IntegerField(default=-1),
),
migrations.AddField(
model_name='shoppingcart',
name='user_address',
field=models.CharField(default='unknown', max_length=200),
),
migrations.AddField(
model_name='shoppingcart',
name='user_name',
field=models.CharField(default='unknown', max_length=30),
),
]
|
nilq/baby-python
|
python
|
import numpy as np
def Adam_Opt(X_0, function, gradient_function, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, max_iter=500,
disp=False, tolerance=1e-5, store_steps=False):
"""
To be passed into Scipy Minimize method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize
https://github.com/sagarvegad/Adam-optimizer/blob/master/Adam.py
https://arxiv.org/abs/1412.6980
Args:
function (callable): Stochastic objective function
gradient_function (callable): function to obtain gradient of Stochastic objective
X0 (np.array): Initial guess
learning_rate (float): Step size
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
epsilon (float): Constant (small) for numerical stability
Attributes:
t (int): Timestep
m_t (float): first moment vector
v_t (float): second moment vector
"""
input_vectors=[]
output_results=[]
# initialization
t=0 # timestep
m_t = 0 #1st moment vector
v_t = 0 #2nd moment vector
X_t = X_0
while(t<max_iter):
if store_steps is True:
input_vectors.append(X_t)
output_results.append(function(X_t))
t+=1
g_t = gradient_function(X_t)
m_t = beta_1*m_t + (1-beta_1)*g_t #updates the moving averages of the gradient (biased first moment estimate)
v_t = beta_2*v_t + (1-beta_2)*(g_t*g_t) #updates the moving averages of the squared gradient (biased 2nd
# raw moment estimate)
m_cap = m_t / (1 - (beta_1 ** t)) # Compute bias-corrected first moment estimate
v_cap = v_t / (1 - (beta_2 ** t)) # Compute bias-corrected second raw moment estimate
X_t_prev = X_t
X_t = X_t_prev - (learning_rate * m_cap) / (np.sqrt(v_cap) + epsilon) # updates the parameters
if disp is True:
output = function(X_t)
print('step: {} input:{} obj_funct: {}'.format(t, X_t, output))
if np.isclose(X_t, X_t_prev, atol=tolerance).all(): # convergence check
break
if store_steps is True:
return X_t, input_vectors, output_results
else:
return X_t
if __name__ == '__main__':
def Function_to_minimise(input_vect, const=2):
# z = x^2 + y^2 + constant
x = input_vect[0]
y = input_vect[1]
z = x ** 2 + y ** 2 + const
return z
def calc_grad(input_vect):
# z = 2x^2 + y^2 + constant
x = input_vect[0]
y = input_vect[1]
dz_dx = 2 * x
dz_dy = 2 * y
return np.array([dz_dx, dz_dy])
X0 = np.array([1,2])
GG = Adam_Opt(X0, calc_grad,
learning_rate=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
print(Function_to_minimise(GG))
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
x = np.arange(-10, 10, 0.25)
y = np.arange(-10, 10, 0.25)
const = 2
x, y = np.meshgrid(x, y)
z = x ** 2 + y ** 2 + const
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.viridis)
plt.show()
print('Minimum should be:', 2.0)
### for scipy ###
# (fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
# bounds=bounds, constraints=constraints,
# callback=callback, **options)
def fmin_ADAM(f, x0, fprime=None, args=(), gtol=1e-5,
maxiter=500, full_output=0, disp=1, maxfev=500,
retall=0, callback=None, learning_rate = 0.001,
beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
delta (float): stepsize to approximate gradient
"""
opts = {'gtol': gtol,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _adam_minimize(f, x0, fprime, args=args, callback=callback,
xtol=gtol, maxiter=maxiter,
disp=disp, maxfev=maxfev, return_all=retall,
learning_rate = learning_rate,
beta_1 = beta_1, beta_2 = beta_2, epsilon=epsilon, **opts)
if full_output:
retlist = (res['x'], res['fun'], #res['jac'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
return result
from scipy.optimize.optimize import OptimizeResult, wrap_function, _status_message, _check_unknown_options
from numpy import squeeze
# _minimize_powell
def _adam_minimize(func, x0, args=(), jac=None, callback=None,
xtol=1e-8, maxiter=None, maxfev=None,
disp=False, return_all=False,
learning_rate = 0.001,
beta_1=0.9, beta_2=0.999, epsilon=1e-8, **unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*1000``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
direc : ndarray
Initial set of direction vectors for the Powell method.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Adam-CG method')
if maxfev is None:
maxfev = maxiter + 10
_, func = wrap_function(func, args)
retall = return_all
if retall:
allvecs = [x0]
all_jac_vecs=[jac(x0)]
fval = squeeze(func(x0))
# initialization
t=0 # timestep
m_t = 0 # 1st moment vector
v_t = 0 # 2nd moment vector
X_t = x0
fcalls=0
iter = 0
while True:
# ADAM Algorithm
t+=1
g_t = jac(X_t)
m_t = beta_1*m_t + (1-beta_1)*g_t #updates the moving averages of the gradient (biased first moment estimate)
v_t = beta_2*v_t + (1-beta_2)*(g_t*g_t) #updates the moving averages of the squared gradient (biased 2nd
# raw moment estimate)
m_cap = m_t / (1 - (beta_1 ** t)) # Compute bias-corrected first moment estimate
v_cap = v_t / (1 - (beta_2 ** t)) # Compute bias-corrected second raw moment estimate
X_t_prev = X_t
X_t = X_t_prev - (learning_rate * m_cap) / (np.sqrt(v_cap) + epsilon) # updates the parameters
# Adam END
# updates and termination criteria
fcalls+=1
fval = func(X_t)
iter += 1
if callback is not None:
callback(X_t)
if retall:
allvecs.append(X_t)
all_jac_vecs.append(g_t)
if fcalls >= maxfev: # max function evaluation
break
if iter >= maxiter: # max no. of iterations
break
if np.isclose(X_t, X_t_prev, atol=xtol).all(): # convergence check
break
warnflag = 0
if fcalls >= maxfev:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
elif np.isnan(fval) or np.isnan(x).any():
warnflag = 3
msg = _status_message['nan']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls)
result = OptimizeResult(fun=fval, nit=iter, nfev=fcalls,
status=warnflag, success=(warnflag == 0),
message=msg, x=X_t)
if retall:
result['allvecs'] = allvecs
result['jac'] = all_jac_vecs
return result
if __name__ == '__main__':
def Function_to_minimise(input_vect, const=2):
# z = x^2 + y^2 + constant
x = input_vect[0]
y = input_vect[1]
z = x ** 2 + y ** 2 + const
return z
def calc_grad(input_vect):
# z = 2x^2 + y^2 + constant
x = input_vect[0]
y = input_vect[1]
dz_dx = 2 * x
dz_dy = 2 * y
return np.array([dz_dx, dz_dy])
X0 = np.array([1,2])
x = fmin_ADAM(Function_to_minimise, X0, fprime=calc_grad, learning_rate=1, maxiter=800, full_output=1, gtol=1e-5) #retall=1)
print(x)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry import options
from sentry.api.bases.project import ProjectEndpoint
from sentry.models import ProjectKey
class ProjectDocsEndpoint(ProjectEndpoint):
def get(self, request, project):
data = options.get('sentry:docs')
project_key = ProjectKey.get_default(project)
context = {
'platforms': data['platforms'],
}
if project_key:
context['dsn'] = project_key.dsn_private
context['dsnPublic'] = project_key.dsn_public
return Response(context)
|
nilq/baby-python
|
python
|
import tensorflow as tf
from groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices,\
make_d4_z2_indices, make_d4_p4m_indices, flatten_indices
from groupy.gconv.tensorflow_gconv.transform_filter import transform_filter_2d_nchw, transform_filter_2d_nhwc
def gconv2d(input, filter, strides, padding, gconv_indices, gconv_shape_info,
use_cudnn_on_gpu=None, data_format='NHWC', name=None):
"""
Tensorflow implementation of the group convolution.
This function has the same interface as the standard convolution nn.conv2d, except for two new parameters,
gconv_indices and gconv_shape_info. These can be obtained from gconv2d_util(), and are described below
:param input: a tensor with (batch, height, width, in channels) axes.
:param filter: a tensor with (ksize, ksize, in channels * in transformations, out channels) axes.
The shape for filter can be obtained from gconv2d_util().
:param strides: A list of ints. 1-D of length 4. The stride of the sliding window for each dimension of input.
Must be in the same order as the dimension specified with format.
:param padding: A string from: "SAME", "VALID". The type of padding algorithm to use.
:param gconv_indices: indices used in the filter transformation step of the G-Conv.
Can be obtained from gconv2d_util() or using a command like flatten_indices(make_d4_p4m_indices(ksize=3)).
:param gconv_shape_info: a tuple containing
(num output channels, num output transformations, num input channels, num input transformations, kernel size)
Can be obtained from gconv2d_util()
:param use_cudnn_on_gpu: an optional bool. Defaults to True.
:param data_format: the order of axes. Currently only NCHW is supported
:param name: a name for the operation (optional)
:return: tensor with (batch, out channels, height, width) axes.
"""
if data_format != 'NHWC':
raise NotImplemented('Currently only NHWC data_format is supported. Got:' + str(data_format))
# Transform the filters
transformed_filter = transform_filter_2d_nhwc(w=filter, flat_indices=gconv_indices, shape_info=gconv_shape_info)
# Convolve input with transformed filters
conv = tf.nn.conv2d(input=input, filter=transformed_filter, strides=strides, padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, name=name)
return conv
def gconv2d_util(h_input, h_output, in_channels, out_channels, ksize):
"""
Convenience function for setting up static data required for the G-Conv.
This function returns:
1) an array of indices used in the filter transformation step of gconv2d
2) shape information required by gconv2d
5) the shape of the filter tensor to be allocated and passed to gconv2d
:param h_input: one of ('Z2', 'C4', 'D4'). Use 'Z2' for the first layer. Use 'C4' or 'D4' for later layers.
:param h_output: one of ('C4', 'D4'). What kind of transformations to use (rotations or roto-reflections).
The choice of h_output of one layer should equal h_input of the next layer.
:param in_channels: the number of input channels. Note: this refers to the number of (3D) channels on the group.
The number of 2D channels will be 1, 4, or 8 times larger, depending the value of h_input.
:param out_channels: the number of output channels. Note: this refers to the number of (3D) channels on the group.
The number of 2D channels will be 1, 4, or 8 times larger, depending on the value of h_output.
:param ksize: the spatial size of the filter kernels (typically 3, 5, or 7).
:return: gconv_indices
"""
if h_input == 'Z2' and h_output == 'C4':
gconv_indices = flatten_indices(make_c4_z2_indices(ksize=ksize))
nti = 1
nto = 4
elif h_input == 'C4' and h_output == 'C4':
gconv_indices = flatten_indices(make_c4_p4_indices(ksize=ksize))
nti = 4
nto = 4
elif h_input == 'Z2' and h_output == 'D4':
gconv_indices = flatten_indices(make_d4_z2_indices(ksize=ksize))
nti = 1
nto = 8
elif h_input == 'D4' and h_output == 'D4':
gconv_indices = flatten_indices(make_d4_p4m_indices(ksize=ksize))
nti = 8
nto = 8
else:
raise ValueError('Unknown (h_input, h_output) pair:' + str((h_input, h_output)))
w_shape = (ksize, ksize, in_channels * nti, out_channels)
gconv_shape_info = (out_channels, nto, in_channels, nti, ksize)
return gconv_indices, gconv_shape_info, w_shape
def gconv2d_addbias(input, bias, nti=8):
"""
In a G-CNN, the feature maps are interpreted as functions on a group G instead of functions on the plane Z^2.
Just like how we use a single scalar bias per 2D feature map, in a G-CNN we should use a single scalar bias per
G-feature map. Failing to do this breaks the equivariance and typically hurts performance.
A G-feature map usually consists of a number (e.g. 4 or 8) adjacent channels.
This function will add a single bias vector to a stack of feature maps that has e.g. 4 or 8 times more 2D channels
than G-channels, by replicating the bias across adjacent groups of 2D channels.
:param input: tensor of shape (n, h, w, ni * nti), where n is the batch dimension, (h, w) are the height and width,
ni is the number of input G-channels, and nti is the number of transformations in H.
:param bias: tensor of shape (ni,)
:param nti: number of transformations, e.g. 4 for C4/p4 or 8 for D4/p4m.
:return: input with bias added
"""
# input = tf.reshape(input, ())
pass # TODO
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.9 on 2019-12-05 20:27
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Curso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('create_at', models.DateTimeField(auto_now_add=True)),
('start', models.DateTimeField(blank=True, default=datetime.datetime(2019, 12, 5, 20, 27, 55, 729200, tzinfo=utc))),
('end', models.DateTimeField(blank=True, null=True)),
('document', models.FileField(blank=True, upload_to='documents/')),
],
),
migrations.CreateModel(
name='Interfaz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('document', models.FileField(blank=True, null=True, upload_to='documents/')),
('photo', models.ImageField(blank=True, null=True, upload_to='fotos/')),
('curso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interfaz', to='cursos.Curso')),
],
),
]
|
nilq/baby-python
|
python
|
class SeedsNotFound(Exception):
pass
class ZoneNotFound(Exception):
pass
class TooManyZones(Exception):
pass
|
nilq/baby-python
|
python
|
"""
@author: acfromspace
"""
"""
Notes:
Find the most common word from a paragraph that can't be a banned word.
"""
from collections import Counter
class Solution:
def most_common_word(self, paragraph: str, banned: [str]) -> str:
unbanned = []
for character in "!?',;.":
paragraph = paragraph.replace(character, " ")
paragraph_list = paragraph.lower().split()
for word in paragraph_list:
if word not in banned:
unbanned.append(word)
# Get the `most_common` element, which holds a key value, which then we need the key.
return Counter(unbanned).most_common(1)[0][0]
test = Solution()
paragraph = "kraq and jeff are talking about the problems with kraq jeff JEFF KRAQ are"
banned = "jeff kraq"
print("most_common_word():", test.most_common_word(paragraph, banned))
"""
Time complexity: O(p+b). "p" is the size of the `paragraph` and "b" is the size of `banned`.
Space complexity: O(p+b). To store the `paragraph_list` and the `banned` data structures.
"""
|
nilq/baby-python
|
python
|
import itertools
import os
import random
import pytest
from polyswarmd.utils.bloom import BloomFilter
@pytest.fixture
def log_entries():
def _mk_address():
return os.urandom(20)
def _mk_topic():
return os.urandom(32)
return [(_mk_address(), [_mk_topic()
for _ in range(1, random.randint(0, 4))])
for _ in range(1, random.randint(0, 30))]
def check_bloom(bloom, log_entries):
for address, topics in log_entries:
assert address in bloom
for topic in topics:
assert topic in bloom
def test_bloom_filter_add_method(log_entries):
bloom = BloomFilter()
for address, topics in log_entries:
bloom.add(address)
for topic in topics:
bloom.add(topic)
check_bloom(bloom, log_entries)
def test_bloom_filter_extend_method(log_entries):
bloom = BloomFilter()
for address, topics in log_entries:
bloom.extend([address])
bloom.extend(topics)
check_bloom(bloom, log_entries)
def test_bloom_filter_from_iterable_method(log_entries):
bloomables = itertools.chain.from_iterable(
itertools.chain([address], topics) for address, topics in log_entries
)
bloom = BloomFilter.from_iterable(bloomables)
check_bloom(bloom, log_entries)
def test_casting_to_integer():
bloom = BloomFilter()
assert int(bloom) == 0
bloom.add(b'value 1')
bloom.add(b'value 2')
assert int(bloom) == int(
'63119152483043774890037882090529841075600744123634985501563996'
'49538536948165624479433922134690234594539820621615046612478986'
'72305890903532059401028759565544372404512800814146245947429340'
'89705729059810916441565944632818634262808769353435407547341248'
'57159120012171916234314838712163868338766358254974260070831608'
'96074485863379577454706818623806701090478504217358337630954958'
'46332941618897428599499176135798020580888127915804442383594765'
'16518489513817430952759084240442967521334544396984240160630545'
'50638819052173088777264795248455896326763883458932483359201374'
'72931724136975431250270748464358029482656627802817691648'
)
def test_casting_to_binary():
bloom = BloomFilter()
assert bin(bloom) == '0b0'
bloom.add(b'value 1')
bloom.add(b'value 2')
assert bin(bloom) == (
'0b1000000000000000000000000000000000000000001000000100000000000000'
'000000000000000000000000000000000000000000000010000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000001000000'
'000000000000000000000000000000000000000000000000000000000000000010'
'000000000000000000000000000000000000000100000000000000000000001000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000010000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000010000000000001000000000000001000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000001000000000000000000000000000000000000000000000000000100000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000100000000000000000'
'00000000000000000000000000000000000001000000000000000000000000'
)
def test_combining_filters():
b1 = BloomFilter()
b2 = BloomFilter()
b1.add(b'a')
b1.add(b'b')
b1.add(b'c')
b2.add(b'd')
b2.add(b'e')
b2.add(b'f')
b1.add(b'common')
b2.add(b'common')
assert b'a' in b1
assert b'b' in b1
assert b'c' in b1
assert b'a' not in b2
assert b'b' not in b2
assert b'c' not in b2
assert b'd' in b2
assert b'e' in b2
assert b'f' in b2
assert b'd' not in b1
assert b'e' not in b1
assert b'f' not in b1
assert b'common' in b1
assert b'common' in b2
b3 = b1 | b2
assert b'a' in b3
assert b'b' in b3
assert b'c' in b3
assert b'd' in b3
assert b'e' in b3
assert b'f' in b3
assert b'common' in b3
b4 = b1 + b2
assert b'a' in b4
assert b'b' in b4
assert b'c' in b4
assert b'd' in b4
assert b'e' in b4
assert b'f' in b4
assert b'common' in b4
b5 = BloomFilter(int(b1))
b5 |= b2
assert b'a' in b5
assert b'b' in b5
assert b'c' in b5
assert b'd' in b5
assert b'e' in b5
assert b'f' in b5
assert b'common' in b5
b6 = BloomFilter(int(b1))
b6 += b2
assert b'a' in b6
assert b'b' in b6
assert b'c' in b6
assert b'd' in b6
assert b'e' in b6
assert b'f' in b6
assert b'common' in b6
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Unit test package for fv3config."""
|
nilq/baby-python
|
python
|
from SimPy.SimulationRT import Simulation, Process, hold
import numpy as np
import scipy as sp
import scipy.io as spio
import networkx as nx
import matplotlib.pyplot as plt
import ConfigParser
from pylayers.util.project import *
import pylayers.util.pyutil as pyu
from pylayers.network.network import Network, Node, PNetwork
from pylayers.gis.layout import Layout
import copy
import pickle
import pdb
import os
class Save(Process):
"""
Save all variables of a simulnet simulation.
Save process can be setup with the save.ini file from /<project>/ini
Attributes
----------
net : pylayers.network.network()
sim : SimPy.SimulationRT()
savemat : dictionnary with all the saved results from a simulation
( obtained after self.export() )
Methods
-------
run ():
save the current simulation every k steps (setup into save.ini)
load():
Load saved results of a simulation. file extension .pck
export(etype) :
export the results into the etype format.
available format :
- 'python'
- 'matlab'
"""
def __init__(self, **args):
defaults = {'L': None,
'net': None,
'sim': None}
## initialize attributes
for key, value in defaults.items():
if key in args:
setattr(self, key, args[key])
else:
setattr(self, key, value)
args[key] = value
self.args = args
Process.__init__(self, name='save', sim=self.args['sim'])
self.C = ConfigParser.ConfigParser()
self.C.read(pyu.getlong('save.ini','ini'))
self.opt = dict(self.C.items('config'))
self.pos = dict(self.C.items('position'))
self.ldp = dict(self.C.items('ldp'))
self.wstd = dict(self.C.items('wstd'))
self.lpos = eval(self.pos['position'])
self.lldp = eval(self.ldp['ldp'])
self.lwstd = eval(self.wstd['wstd'])
self.sim = args['sim']
self.net = args['net']
def load(self,filename=[]):
""" Load a saved trace simulation
Examples
--------
>>> from pylayers.util.save import *
>>> S=Save()
>>> S.load()
"""
if filename == []:
filename = self.filename
out=[0]
infile = open(os.path.join(basename,pstruc['DIRNETSAVE'],filename), 'r')
while 1:
try:
out.append(pickle.load(infile))
except (EOFError, pickle.UnpicklingError):
break
out.pop(0)
infile.close()
dout= dict(out[-1])
return dout
def mat_export(self):
"""
export save simulation to a matlab file
Examples
--------
>>> from pylayers.util.save import *
>>> S=Save()
>>> S.mat_export()
"""
self.save=self.load()
self.savemat=copy.deepcopy(self.save)
nodes=self.save['saveopt']['type'].keys()
for inn,n in enumerate(nodes):
self.savemat['node_'+n]=self.save[n]
for n2 in nodes:
if n2 != n:
try:
self.savemat['node_'+n]['node_'+n2]=self.save[n][n2]
del self.savemat[n][n2]
except:
pass
del self.savemat[n]
for o in self.save['saveopt']:
if o =='subnet' and inn == 0:
for r in self.save['saveopt']['lwstd']:
li=self.save['saveopt'][o][r]
self.savemat['saveopt'][o][r]=['node_'+l for l in li]
else :
try:
self.savemat['saveopt'][o]['node_'+n]=self.save['saveopt'][o][n]
del self.savemat['saveopt'][o][n]
except:
pass
spio.savemat(os.path.join(basename,pstruc['DIRNETSAVE'],self.filename),self.savemat)
self.save=self.load()
def run(self):
"""
Run the save Result process
"""
self.save={}
self.filename = eval(self.opt['filename'])
self.file=open(os.path.join(basename,pstruc['DIRNETSAVE'],self.filename),'write')
self.save['saveopt'] = {}
self.save['saveopt']['lpos'] = self.lpos
self.save['saveopt']['lldp'] = self.lldp
self.save['saveopt']['lwstd'] = self.lwstd
self.save['saveopt']['nbsamples'] = np.ceil(eval(self.sim.sim_opt['duration'])/eval(self.opt['save_update_time']))+1
self.save['saveopt']['duration'] = eval(self.sim.sim_opt['duration'])
self.save['saveopt']['save_update_time'] = eval(self.opt['save_update_time'])
pickle.dump(self.save, self.file)
self.file.close()
self.idx=0
### init save dictionnary
self.save['saveopt']['Layout'] = self.L._filename
self.save['saveopt']['type'] = nx.get_node_attributes(self.net,'type')
self.save['saveopt']['epwr'] = nx.get_node_attributes(self.net,'epwr')
self.save['saveopt']['sens'] = nx.get_node_attributes(self.net,'sens')
self.save['saveopt']['subnet']={}
for wstd in self.lwstd:
self.save['saveopt']['subnet'][wstd]=self.net.SubNet[wstd].nodes()
[self.save.update({n:{}}) for n in self.net.nodes()]
# find the size of save array regarding the simulation duwstdion and
# the saved sample time
nb_sample=np.ceil(eval(self.sim.sim_opt['duration'])/eval(self.opt['save_update_time']))+1
# create void array to be fill with simulation data
for n in self.net.nodes():
for position in self.lpos:
self.save[n][position]=np.zeros((nb_sample,2))*np.nan
for e in self.net.edges():
self.save[e[0]][e[1]]={}
self.save[e[1]][e[0]]={}
for wstd in self.lwstd:
self.save[e[0]][e[1]][wstd]={}
self.save[e[1]][e[0]][wstd]={}
for ldp in self.lldp:
self.save[e[0]][e[1]][wstd][ldp]=np.zeros((nb_sample,2))*np.nan
self.save[e[1]][e[0]][wstd][ldp]=np.zeros((nb_sample,2))*np.nan
while True:
rl={}
for wstd in self.lwstd:
for ldp in self.lldp:
rl[wstd+ldp]=nx.get_edge_attributes(self.net.SubNet[wstd],ldp)
for n in self.net.nodes():
for position in self.lpos:
try:
p = nx.get_node_attributes(self.net,position)
self.save[n][position][self.idx]=p[n]
except:
pass
for e in self.net.edges():
for wstd in self.lwstd:
for ldp in self.lldp:
try:
le=tuple([e[0],e[1],wstd])
self.save[e[0]][e[1]][wstd][ldp][self.idx]=rl[wstd+ldp][le]
self.save[e[1]][e[0]][wstd][ldp][self.idx]=rl[wstd+ldp][le]
except:
pass
self.file=open(os.path.join(basename,pstruc['DIRNETSAVE'],self.filename),'a')
pickle.dump(self.save, self.file)
self.file.close()
self.idx=self.idx+1
yield hold, self, eval(self.opt['save_update_time'])
|
nilq/baby-python
|
python
|
# Import libraries
from bs4 import BeautifulSoup
import requests
import psycopg2
import dateutil.parser as p
from colorama import Fore, Back, Style
# Insert the results to the database
def insert_datatable(numberOfLinks, selected_ticker, filtered_links_with_dates, conn, cur):
if filtered_links_with_dates:
for link in filtered_links_with_dates:
cur.execute("INSERT INTO articles (SYMBOL, LINK, ARTICLE_DATE) VALUES ('{a}', '{b}', '{c}')".format(a=selected_ticker, b=link[0], c=link[1]))
conn.commit()
print(f"{Fore.RED}{numberOfLinks}.{Style.RESET_ALL}\t{Fore.CYAN}{link[1]}{Style.RESET_ALL}\t{Fore.GREEN}{link[0]}{Style.RESET_ALL}")
numberOfLinks += 1
else:
print(f"{Fore.GREEN}No links have been found in the date range given{Style.RESET_ALL}")
print('\n')
# Filter out any irrelevant article based on dates
def extract_date(x, dateToBegin, dateToEnd):
if x[1] >= dateToBegin and x[1] <= dateToEnd:
return x
# Scrape the web pages and get the links
def get_news(dateToBegin, dateToEnd, endpoint, port, dbName, usr, masterUserPassword, selected_tickers):
# Get the year, month and day of the ending date in the query
endingDate = dateToEnd.strftime('%Y-%m-%d').split("-")
year = endingDate[0]
month = endingDate[1]
day = endingDate[2]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
# Open database connection
conn = psycopg2.connect(host=endpoint, port=port, database=dbName, user=usr, password=masterUserPassword)
cur = conn.cursor()
# Scrape article links and dates
for selected_ticker in selected_tickers:
print('\n')
print(f"{Fore.MAGENTA}The following links have been collected and written to the database for{Style.RESET_ALL} {Fore.CYAN}{selected_ticker}: {Style.RESET_ALL}")
print('\n')
# Find the url of the page of the ending date
base_url = "https://www.marketwatch.com/search?q="+selected_ticker+"&m=Ticker&rpp=100&mp=2005&bd=true&bd=false&bdv="+month+"%2F"+day+"%2F"+year+"&rs=true"
page = 1
nav = "Next"
numberOfLinks = 1
# Keep crawling for more pages
while nav == "Next":
if page > 1:
new_page = "&o="+str(page)
else:
new_page = ""
# Scrape the target page
active_url = base_url + new_page
r = requests.get(active_url, headers=headers)
c = r.content
soup = BeautifulSoup(c, "html.parser")
# Find all results with the article links and dates
try:
resultlist = soup.findAll('div', attrs={'class' : 'resultlist'})[0]
except:
break
# Extract the links
search_results = resultlist.findAll('div', attrs={'class' : 'searchresult'})
links = [x.find('a')['href'] for x in search_results]
# Extract the dates
dates_and_times = resultlist.findAll('div', attrs={'class' : 'deemphasized'})
dates_extracted = [x.find('span').text.split("m")[-1].replace(".", "").lstrip() for x in dates_and_times]
article_dates = [p.parse(x).date() for x in dates_extracted]
# Merge links and dates
links_with_dates = list(zip(links, article_dates))
# Filter out any links that the dates are outside the query range
filtered_links_with_dates = list(filter(None, [extract_date(x, dateToBegin, dateToEnd) for x in links_with_dates]))
# Insert the results to the database
insert_datatable(numberOfLinks, selected_ticker, filtered_links_with_dates, conn, cur)
# Check if the next page is relevant
numberOfRelevantArticles = len(filtered_links_with_dates)
if numberOfRelevantArticles == 100:
try:
nav_links = soup.findAll('div', attrs={'class' : 'nextprevlinks'})
for nav_link in nav_links:
if "Next" in nav_link.text:
nav = "Next"
page += 100
numberOfLinks += 100
break
except:
nav = ""
else:
nav = ""
|
nilq/baby-python
|
python
|
import random
import torch.nn as nn
import torch.nn.functional as F
from torch import LongTensor
from torch import from_numpy, ones, zeros
from torch.utils import data
from . import modified_linear
PATH_TO_SAVE_WEIGHTS = 'saved_weights/'
def get_layer_dims(dataname):
res_ = [1,2,2,4] if dataname in ['dsads'] else [1,2,4] if dataname in ['opp'] else [0.5, 1, 2] \
if dataname in ['hapt', 'milan', 'pamap', 'aruba'] else [500, 500] if dataname in ['cifar100'] else [100, 100, 100] \
if dataname in ['mnist', 'permuted_mnist'] else [1,2,2]
return res_
class Net(nn.Module):
def __init__(self, input_dim, n_classes, dataname, lwf=False, cosine_liner=False):
super(Net, self).__init__()
self.dataname = dataname
layer_nums = get_layer_dims(self.dataname)
self.layer_sizes = layer_nums if self.dataname in ['cifar100', 'mnist'] else\
[int(input_dim / num) for num in layer_nums]
self.fc0 = nn.Linear(input_dim, self.layer_sizes[0])
if len(self.layer_sizes) == 2:
self.fc_penultimate = nn.Linear(self.layer_sizes[0], self.layer_sizes[1])
elif len(self.layer_sizes) == 3:
self.fc1 = nn.Linear(self.layer_sizes[0], self.layer_sizes[1])
self.fc_penultimate = nn.Linear(self.layer_sizes[1], self.layer_sizes[2])
elif (len(self.layer_sizes) == 4):
self.fc1 = nn.Linear(self.layer_sizes[0], self.layer_sizes[1])
self.fc2 = nn.Linear(self.layer_sizes[1], self.layer_sizes[2])
self.fc_penultimate = nn.Linear(self.layer_sizes[2], self.layer_sizes[3])
final_dim = self.fc_penultimate.out_features
self.fc = modified_linear.CosineLinear(final_dim, n_classes) if cosine_liner \
else nn.Linear(final_dim, n_classes, bias=lwf==False) # no biases for LwF
def forward(self, x):
x = F.relu(self.fc0(x))
if len(self.layer_sizes) > 2:
x = F.relu(self.fc1(x))
if len(self.layer_sizes) > 3:
x = F.relu(self.fc2(x))
x = F.relu(self.fc_penultimate(x))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class Dataset(data.Dataset):
def __init__(self, features, labels):
self.labels = labels
self.features = features
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
X = from_numpy(self.features[idx])
y = self.labels[idx]
y = LongTensor([y])
return X, y
def get_sample(self, sample_size):
return random.sample(self.features, sample_size)
class BiasLayer(nn.Module):
def __init__(self, device):
super(BiasLayer, self).__init__()
self.beta = nn.Parameter(ones(1, requires_grad=True, device=device))
self.gamma = nn.Parameter(zeros(1, requires_grad=True, device=device))
def forward(self, x):
return self.beta * x + self.gamma
def printParam(self, i):
print(i, self.beta.item(), self.gamma.item())
def get_beta(self):
return self.beta
def get_gamma(self):
return self.gamma
def set_beta(self, new_beta):
self.beta = new_beta
def set_gamma(self, new_gamma):
self.gamma = new_gamma
def set_grad(self, bool_value):
self.beta.requires_grad = bool_value
self.gamma.requires_grad = bool_value
|
nilq/baby-python
|
python
|
from core.advbase import *
def module():
return Pia
class Pia(Adv):
conf = {}
conf['slots.a'] = [
'Dragon_and_Tamer',
'Flash_of_Genius',
'Astounding_Trick',
'The_Plaguebringer',
'Dueling_Dancers'
]
conf['slots.d'] = 'Vayu'
conf['acl'] = """
`dragon(c3-s-end), not energy()=5 and s1.check()
`s3, not buff(s3)
`s2
`s4
`s1, buff(s3)
`fs, x=5
"""
conf['coabs'] = ['Blade','Dragonyule_Xainfried','Bow']
conf['share'] = ['Tobias']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Benjamin Vial
# License: MIT
"""
The :mod:`pytheas.homogenization.twoscale2D` module implements tools for the
two scale convergence homogenization of 2D metamaterials for TM polarization
"""
from .femmodel import TwoScale2D
|
nilq/baby-python
|
python
|
import os.path as osp
import numpy as np
import numpy.linalg as LA
import random
import open3d as o3
import torch
import common3Dfunc as c3D
from asm_pcd import asm
from ASM_Net import pointnet
"""
Path setter
"""
def set_paths( dataset_root, category ):
paths = {}
paths["trainset_path"] = osp.join(dataset_root,category,"train")
"""
paths["testset_path"] = osp.join(dataset_root,category,"test")
paths["valset_path"] = osp.join(dataset_root,category,"val")
paths["original_path"] = osp.join(dataset_root,category,"original")
paths["sorted_path"] = osp.join(dataset_root,category,"sorted")
paths["trainmodels_path"] = osp.join(dataset_root,category,"train_models")
paths["testmodels_path"] = osp.join(dataset_root,category,"test_models")
paths["valmodels_path"] = osp.join(dataset_root,category,"val_models")
"""
for p in paths.values():
if osp.exists(p) is not True:
print("!!ERROR!! Path not found. Following path is not found.")
print(p)
return False
return paths
def load_asmds( root, synset_names ):
""" load multiple Active Shape Model Deformations
Args:
root(str): Root directory
synset_names(str): List of class names.
The first element "BG" is ignored.
Return:
dict: A dictionary of ASMDeformation
"""
print("Root dir:", root )
asmds = {}
for s in range(len(synset_names)-1):
paths = set_paths( root, synset_names[s+1] )
trainset_path = paths["trainset_path"]
info = np.load( osp.join(trainset_path,"info.npz"))
asmd = asm.ASMdeformation( info )
asmds[synset_names[s+1]] = asmd
return asmds
def load_models( root, dirname, n_epoch, synset_names, ddim, n_points, device ):
""" Load multiple network weights (for experiments)
Args:
root(str): Path to dataset root
dirname(str): Directory name of weights
n_epoch(int): choose the epoch of weights
synset_names(str): The first element is "BG" should be ignored.
use_dim(int): # of dimensions used to deformation
n_points(int): # of points fed to the networks
device(str): device("cuda:0" or "cpu")
Return:
A dictionary of weights
"""
print("Root dir:", root )
models = {}
for s in range(len(synset_names)-1):
path = osp.join(root,
synset_names[s+1],
"weights",
dirname,
"model_"+str(n_epoch)+".pth")
print(" loading:", path )
total_dim = ddim+1 # deformation(ddim) + scale(1)
model = pointnet.ASM_Net(k = total_dim, num_points = n_points)
model.load_state_dict( torch.load(path) )
model.to(device)
model.eval()
models[synset_names[s+1]] = model
return models
def load_models_release( root, synset_names, ddim, n_points, device ):
""" Load multiple network weights (for release)
Args:
root(str): Path to model root
synset_names(str): The first element is "BG" should be ignored.
use_dim(int): # of dimensions used to deformation
n_points(int): # of points fed to the networks
device(str): device("cuda:0" or "cpu")
Return:
A dictionary of weights
"""
print("Root dir:", root )
models = {}
for s in range(len(synset_names)-1):
path = osp.join(root,
synset_names[s+1],
"model.pth")
print(" loading:", path )
total_dim = ddim+1 # deformation(ddim) + scale(1)
model = pointnet.ASM_Net(k = total_dim, num_points = n_points)
model.load_state_dict( torch.load(path) )
model.to(device)
model.eval()
models[synset_names[s+1]] = model
return models
def get_pcd_from_rgbd( im_c, im_d, intrinsic ):
""" generate point cloud from cv2 image
Args:
im_c(ndarray 3ch): RGB image
im_d(ndarray 1ch): Depth image
intrinsic(PinholeCameraIntrinsic): intrinsic parameter
Return:
open3d.geometry.PointCloud: point cloud
"""
color_raw = o3.geometry.Image(im_c)
depth_raw = o3.geometry.Image(im_d)
rgbd_image = o3.geometry.RGBDImage.create_from_color_and_depth( color_raw, depth_raw,
depth_scale=1000.0,
depth_trunc=3.0,
convert_rgb_to_intensity=False )
pcd = o3.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic )
return pcd
def generate_pose():
""" generate pose from hemisphere-distributed viewpoints
"""
# y axis(yr): -pi - pi
# x axis(xr): 0 - 0.5pi
# view_direction(ar): -0.1pi - 0.1pi
yr = (random.random()*2.0*np.pi)-np.pi
xr = (random.random()*0.5*np.pi)
ar = (random.random()*0.2*np.pi)-(0.1*np.pi)
# x,y-axis
y = c3D.RPY2Matrix4x4( 0, yr, 0 )[:3,:3]
x = c3D.RPY2Matrix4x4( xr, 0, 0 )[:3,:3]
rot = np.dot( x, y )
# rotation around view axis
v = np.array([0.,0.,-1.]) #basis vector
rot_v = np.dot(x,v) # prepare axis
q = np.hstack([ar,rot_v]) # generate quaternion
q = q/LA.norm(q) # unit quaternion
pose = c3D.quaternion2rotation(q)
rot = np.dot(pose,rot)
return rot
def get_mask( mask_info, choice="pred" ):
"""
Args:
mask_info(dict): object mask of "GT" and "Mask RCNN used NOCS_CVPR2019)
choice(str): choice of mask.gt(GT) or pred(Mask-RCNN).
Return:
tuple: mask
"""
key_id = choice+"_class_ids"
key_mask = choice+"_masks"
class_ids = mask_info[key_id]
mask = mask_info[key_mask]
return np.asarray(mask), np.asarray(class_ids)
def get_model_scale( image_path, model_root ):
model_path = None
meta_path = image_path + '_meta.txt'
sizes = []
class_ids = []
pcds = []
with open(meta_path, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
words = line[:-1].split(' ')
model_path = osp.join( model_root, words[-1]+".obj")
pcd = o3.io.read_triangle_mesh(model_path)
bb = pcd.get_axis_aligned_bounding_box()
bbox = bb.get_max_bound() - bb.get_min_bound()
size = np.linalg.norm(bbox)
sizes.append(size)
class_ids.append(int(words[1]))
pcds.append(pcd)
return np.asarray(sizes), np.asarray(class_ids), pcds
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.