content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from functools import reduce
num = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
digits = 13
max_prod = 0
for i in range(0, len(num)+1-digits):
#print(num[i:i+4])
prod = reduce((lambda x, y: int(x) * int(y)), num[i:i+digits])
if prod > max_prod:
max_prod = prod
print(max_prod)
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
x = np.array([1., 2., 3., 4., 5.])
y = np.array([1., 3., 2., 3., 5.])
plt.scatter(x, y)
plt.axis([0, 6, 0, 6])
plt.show()
x_mean = np.mean(x)
y_mean = np.mean(y)
num = 0.0
d = 0.0
for x_i, y_i in zip(x, y):
num += (x_i - x_mean) * (y_i - y_mean)
d += (x_i - x_mean) ** 2
a = num/d
b = y_mean - a * x_mean
y_hat = a * x + b
plt.scatter(x, y)
plt.plot(x, y_hat, color='r')
plt.axis([0, 6, 0, 6])
plt.show()
x_predict = 6
y_predict = a * x_predict + b
print(y_predict)
|
nilq/baby-python
|
python
|
import pika
from collections import deque
class Messaging():
def __init__(self, identity):
self._connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
self._channel = self._connection.channel()
self._queue = None
self._identity = identity
self._create_exchanges()
self._create_queue(identity)
self._setup_consumers()
self._messages = dict()
def _create_exchanges(self):
self._channel.exchange_declare(exchange='broadcast', type='fanout')
self._channel.exchange_declare(exchange='direct_message', type='topic')
def _create_queue(self, identity):
self._queue = self._channel.queue_declare(exclusive=True)
self._channel.queue_bind(exchange='broadcast', queue=self._queue.method.queue)
self._channel.queue_bind(exchange='direct_message',
queue=self._queue.method.queue,
routing_key=identity + '.*')
def _callback(self, ch, method, properties, body):
# Ignore broadcast sent by this node
if method.exchange == 'broadcast' and method.routing_key == self._identity:
ch.basic_ack(delivery_tag=method.delivery_tag)
return True
# Ignore broadcast for now
if method.exchange == 'broadcast':
ch.basic_ack(delivery_tag=method.delivery_tag)
return True
self.send(method.routing_key, body)
ch.basic_ack(delivery_tag=method.delivery_tag)
def _setup_consumers(self):
self._channel.basic_consume(self._callback,
queue=self._queue.method.queue)
def run(self):
self._channel.start_consuming()
def stop(self):
print('Stop messaging process')
self._channel.stop_consuming()
self._connection.close()
def send(self, to, message):
if to.find(self._identity) == 0:
if to not in self._messages:
self._messages[to] = deque()
self._messages[to].append(message)
else:
self._channel.basic_publish(exchange='direct_message',
routing_key=to,
body=message)
def broadcast(self, message):
self._channel.basic_publish(exchange='broadcast',
routing_key=self._identity,
body=message)
def has_message_for(self, to):
if to in self._messages:
return True
return False
def messages_for(self, client_id):
to = '%s.%s' % (self._identity , client_id)
if not self.has_message_for(to):
return None
return self._messages[to]
|
nilq/baby-python
|
python
|
#Desenvolva um gerador de tabuada
n = int(input("Tabuada de que número? "))
i = 0
while i < 10:
print("{} X {} = {}".format(n, i + 1, (n*(i + 1))))
i = i + 1
|
nilq/baby-python
|
python
|
# Register your models here.
from django.contrib import admin
from .models import Photo, Metadata, Album
admin.site.register(Photo)
admin.site.register(Metadata)
admin.site.register(Album)
|
nilq/baby-python
|
python
|
t = 5
sentences = []
while t:
sentences.append(input("Podaj zdanie")+"\n")
t -= 1
f = open("sentence.txt", "w")
for i in sentences:
f.write(i)
f.close()
f = open("sentence.txt", "a")
f.writelines(sentences)
f.close()
f = open("sentence.txt", "r")
for line in f:
print(line, end="")
f.close()
f = open("sentence.txt", "r")
x = f.readline()
print(x)
f.close()
with open("sentence.txt", "r") as f:
f.write("Hello from context")
|
nilq/baby-python
|
python
|
import toml
output_file = ".streamlit/secrets.toml"
with open("project-327006-2314b3476b3a.json") as json_file:
json_text = json_file.read()
config = {"textkey": json_text}
toml_config = toml.dumps(config)
with open(output_file, "w") as target:
target.write(toml_config)
|
nilq/baby-python
|
python
|
from setuptools import find_packages, setup
setup(
name='robotframework-historic',
version="0.2.9",
description='Custom report to display robotframework historical execution records',
long_description='Robotframework Historic is custom report to display historical execution records using MySQL + Flask',
classifiers=[
'Framework :: Robot Framework',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
],
keywords='robotframework historical execution report',
author='Shiva Prasad Adirala',
author_email='adiralashiva8@gmail.com',
url='https://github.com/adiralashiva8/robotframework-historic',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'robotframework',
'config',
'flask',
'flask-mysqldb'
],
entry_points={
'console_scripts': [
'rfhistoric=robotframework_historic.app:main',
'rfhistoricparser=robotframework_historic.parserargs:main',
'rfhistoricreparser=robotframework_historic.reparserargs:main',
'rfhistoricsetup=robotframework_historic.setupargs:main',
'rfhistoricupdate=robotframework_historic.updateargs:main',
]
},
)
|
nilq/baby-python
|
python
|
import numpy as np
import random
import math
import cmath
import itertools
from tqdm import tqdm
from PIL import Image
from matplotlib import cm
def log_density_map(val, max_count):
brightness = math.log(val) / math.log(max_count)
gamma = 3.2 #7.2
brightness = math.pow(brightness, 1/gamma)
return brightness
def flip(t):
return t.real*1j + t.imag
def g1(t):
return cmath.sin(t)*1j + cmath.cos(t)
def g2(t):
return cmath.tan(t)
def popcorn(h):
sc = 2
iterationsN = 21
a = 3
mn = 0.05
counts = np.zeros((h, h))
for i in tqdm(range(h)):
for j in range(h):
col = 0
z = complex(sc/2 - sc*i/h, sc/2 - sc*j/h)
for k in range(iterationsN):
x = z.real
y = z.imag*1j
px = x
x = x - mn*( g1(y + g2(a*y) )).real - mn*(g1(x + g2(a*x))).imag*1j
y = y - mn*(g1(px + g2(a*px))).real - mn*(g1(y + g2(a*y))).imag*1j
z = x + flip(y)
try:
x = x / abs(z)
y = y / abs(z)
except Exception:
# x = 1000
# y = 1000
pass
z = x + flip(y)
#print(z)
if abs(z) > 2:
print('!!!')
break
float angle = 45.*3.14/180.;
x0=(real(z)*cos(angle)-imag(z)*sin(angle)-xmin)/deltap
y0=(ymax-real(z)*sin(angle)-imag(z)*cos(angle))/deltaq;
counts[x0, y0] += 1
return counts
def colorize(counts, h):
cmap = cm.get_cmap("copper")
im_arr = np.zeros((h, h, 3), dtype=np.uint8)
max_count = np.max(counts)
for y in tqdm(range(h)):
for x in range(h):
if counts[y, x] > 0:
rgba = cmap( log_density_map(counts[y, x], max_count) )
#rgba = [counts[y, x]/max_count for i in range(3)]
im_arr[y, x, 0] = int(255 * rgba[0])
im_arr[y, x, 1] = int(255 * rgba[1])
im_arr[y, x, 2] = int(255 * rgba[2])
return im_arr
def run():
h = 400
counts = popcorn(h)
im_arr = colorize(counts, h)
print(f"Saving image...{h}")
name = f"img_{h}_{random.random()}.png"
if h >= 3000:
name = 'morethan4000/' + name
else:
name = 'examples2/' + name
im = Image.fromarray(im_arr)
im.save(name)
if __name__ == "__main__":
run()
|
nilq/baby-python
|
python
|
from typing import *
# extmod/modtrezorcrypto/modtrezorcrypto-bip32.h
class HDNode:
'''
BIP0032 HD node structure.
'''
def __init__(self,
depth: int,
fingerprint: int,
child_num: int,
chain_code: bytes,
private_key: bytes = None,
public_key: bytes = None,
curve_name: str = None) -> None:
'''
'''
def derive(self, index: int, public: bool=False) -> None:
'''
Derive a BIP0032 child node in place.
'''
def derive_cardano(self, index: int) -> None:
'''
Derive a BIP0032 child node in place using Cardano algorithm.
'''
def derive_path(self, path: List[int]) -> None:
'''
Go through a list of indexes and iteratively derive a child node in place.
'''
def serialize_public(self, version: int) -> str:
'''
Serialize the public info from HD node to base58 string.
'''
def serialize_private(self, version: int) -> str:
'''
Serialize the private info HD node to base58 string.
'''
def clone(self) -> HDNode:
'''
Returns a copy of the HD node.
'''
def depth(self) -> int:
'''
Returns a depth of the HD node.
'''
def fingerprint(self) -> int:
'''
Returns a fingerprint of the HD node (hash of the parent public key).
'''
def child_num(self) -> int:
'''
Returns a child index of the HD node.
'''
def chain_code(self) -> bytes:
'''
Returns a chain code of the HD node.
'''
def private_key(self) -> bytes:
'''
Returns a private key of the HD node.
'''
def private_key_ext(self) -> bytes:
'''
Returns a private key extension of the HD node.
'''
def public_key(self) -> bytes:
'''
Returns a public key of the HD node.
'''
def address(self, version: int) -> str:
'''
Compute a base58-encoded address string from the HD node.
'''
def nem_address(self, network: int) -> str:
'''
Compute a NEM address string from the HD node.
'''
def nem_encrypt(self, transfer_public_key: bytes, iv: bytes, salt: bytes, payload: bytes) -> bytes:
'''
Encrypts payload using the transfer's public key
'''
def ethereum_pubkeyhash(self) -> bytes:
'''
Compute an Ethereum pubkeyhash (aka address) from the HD node.
'''
def deserialize(self, value: str, version_public: int, version_private: int) -> HDNode:
'''
Construct a BIP0032 HD node from a base58-serialized value.
'''
def from_seed(seed: bytes, curve_name: str) -> HDNode:
'''
Construct a BIP0032 HD node from a BIP0039 seed value.
'''
def from_mnemonic_cardano(mnemonic: str, passphrase: str) -> bytes:
'''
Convert mnemonic to hdnode
'''
|
nilq/baby-python
|
python
|
import django.core.management.base as djcmb
import anwesende.room.models as arm
import anwesende.users.models as aum
class Command(djcmb.BaseCommand):
help = "Silently creates group 'datenverwalter'"
def handle(self, *args, **options):
aum.User.get_datenverwalter_group() # so admin has it on first visit
arm.Seat.get_dummy_seat() # create now to make it nicely the first one
|
nilq/baby-python
|
python
|
'''
English_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
Below is marathi numbers list
This program will convert the input number into english number
'''
marathi_digits = ['०', '१', '२', '१', '४', '५', '६', '७', '८', '९']
a = input("Enter marathi digit: ")
if a in marathi_digits:
print("English Digit: ", marathi_digits.index(a))
# It will go to this condition if marathi number is of more than one digit
else:
c = 0 # counter is to check input is valid or not
n1 = ''
for i in a:
if i in marathi_digits:
n1 += str(marathi_digits.index(i))
c = c + 1
if c != 0:
print("English Digit: ", n1)
else:
print("Enter marathi number only")
'''
OUTPUT-:Enter marathi digit:६७८
English Digit: 678
Enter marathi digit:०
English Digit: 0
Enter marathi digit: seven
Enter marathi number only
'''
|
nilq/baby-python
|
python
|
# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.
"""
storage hosts views
"""
from datatables import ColumnDT, DataTables
from flask import jsonify, redirect, render_template, request, url_for
from sqlalchemy import func, literal_column
from sqlalchemy_filters import apply_filters
from sner.server.auth.core import role_required
from sner.server.extensions import db
from sner.server.forms import ButtonForm
from sner.server.sqlafilter import FILTER_PARSER
from sner.server.storage.core import annotate_model, tag_model_multiid
from sner.server.storage.forms import HostForm
from sner.server.storage.models import Host, Note, Service, Vuln
from sner.server.storage.views import blueprint
from sner.server.utils import relative_referrer, valid_next_url
@blueprint.route('/host/list')
@role_required('operator')
def host_list_route():
"""list hosts"""
return render_template('storage/host/list.html')
@blueprint.route('/host/list.json', methods=['GET', 'POST'])
@role_required('operator')
def host_list_json_route():
"""list hosts, data endpoint"""
query_cnt_services = db.session.query(Service.host_id, func.count(Service.id).label('cnt')).group_by(Service.host_id).subquery()
query_cnt_vulns = db.session.query(Vuln.host_id, func.count(Vuln.id).label('cnt')).group_by(Vuln.host_id).subquery()
query_cnt_notes = db.session.query(Note.host_id, func.count(Note.id).label('cnt')).group_by(Note.host_id).subquery()
columns = [
ColumnDT(Host.id, mData='id'),
ColumnDT(Host.address, mData='address'),
ColumnDT(Host.hostname, mData='hostname'),
ColumnDT(Host.os, mData='os'),
ColumnDT(func.coalesce(query_cnt_services.c.cnt, 0), mData='cnt_s', global_search=False),
ColumnDT(func.coalesce(query_cnt_vulns.c.cnt, 0), mData='cnt_v', global_search=False),
ColumnDT(func.coalesce(query_cnt_notes.c.cnt, 0), mData='cnt_n', global_search=False),
ColumnDT(Host.tags, mData='tags'),
ColumnDT(Host.comment, mData='comment'),
ColumnDT(literal_column('1'), mData='_buttons', search_method='none', global_search=False)
]
query = db.session.query().select_from(Host) \
.outerjoin(query_cnt_services, Host.id == query_cnt_services.c.host_id) \
.outerjoin(query_cnt_vulns, Host.id == query_cnt_vulns.c.host_id) \
.outerjoin(query_cnt_notes, Host.id == query_cnt_notes.c.host_id)
if 'filter' in request.values:
query = apply_filters(query, FILTER_PARSER.parse(request.values.get('filter')), do_auto_join=False)
hosts = DataTables(request.values.to_dict(), query, columns).output_result()
return jsonify(hosts)
@blueprint.route('/host/add', methods=['GET', 'POST'])
@role_required('operator')
def host_add_route():
"""add host"""
form = HostForm()
if form.validate_on_submit():
host = Host()
form.populate_obj(host)
db.session.add(host)
db.session.commit()
return redirect(url_for('storage.host_view_route', host_id=host.id))
return render_template('storage/host/addedit.html', form=form)
@blueprint.route('/host/edit/<host_id>', methods=['GET', 'POST'])
@role_required('operator')
def host_edit_route(host_id):
"""edit host"""
host = Host.query.get(host_id)
form = HostForm(obj=host, return_url=relative_referrer())
if form.validate_on_submit():
form.populate_obj(host)
db.session.commit()
if valid_next_url(form.return_url.data):
return redirect(form.return_url.data)
return render_template('storage/host/addedit.html', form=form)
@blueprint.route('/host/delete/<host_id>', methods=['GET', 'POST'])
@role_required('operator')
def host_delete_route(host_id):
"""delete host"""
form = ButtonForm()
if form.validate_on_submit():
db.session.delete(Host.query.get(host_id))
db.session.commit()
return redirect(url_for('storage.host_list_route'))
return render_template('button-delete.html', form=form)
@blueprint.route('/host/annotate/<model_id>', methods=['GET', 'POST'])
@role_required('operator')
def host_annotate_route(model_id):
"""annotate vuln"""
return annotate_model(Host, model_id)
@blueprint.route('/host/view/<host_id>')
@role_required('operator')
def host_view_route(host_id):
"""view host"""
host = Host.query.get(host_id)
return render_template('storage/host/view.html', host=host, button_form=ButtonForm())
@blueprint.route('/host/tag_multiid', methods=['POST'])
@role_required('operator')
def host_tag_multiid_route():
"""tag multiple route"""
return tag_model_multiid(Host)
|
nilq/baby-python
|
python
|
import platform
import sys
class AppMapPyVerException(Exception):
pass
# Library code uses these, so provide intermediate
# functions that can be stubbed when testing.
def _get_py_version():
return sys.version_info
def _get_platform_version():
return platform.python_version()
def check_py_version():
req = (3, 6)
actual = _get_platform_version()
if _get_py_version() < req:
raise AppMapPyVerException(
f'Minimum Python version supported is {req[0]}.{req[1]}, found {actual}'
)
|
nilq/baby-python
|
python
|
'''
GaussGammaDistr.py
Joint Gaussian-Gamma distribution: D independent Gaussian-Gamma distributions
Attributes
--------
m : mean for Gaussian, length D
kappa : scalar precision parameter for Gaussian covariance
a : parameter for Gamma, vector length D
b : parameter for Gamma, vector length D
'''
import numpy as np
import scipy.linalg
from bnpy.util import MVgammaln, MVdigamma
from bnpy.util import LOGTWO, LOGPI, LOGTWOPI, EPS
from bnpy.util import gammaln, digamma
from .Distr import Distr
class GaussGammaDistr( Distr ):
######################################################### Constructor
#########################################################
def __init__(self, a=None, b=None, m=None, kappa=None, **kwargs):
''' Create new GaussGammaDistr object, with specified parameter values
Args
-------
a : numpy 1D array_like, length D
b : numpy 1D array_like, length D
m : numpy 1D array_like, length D
kappa : float
Returns
-------
D : bnpy GaussGammaDistr object, with provided parameters
'''
# Unpack
self.a = np.squeeze(np.asarray(a))
self.b = np.squeeze(np.asarray(b))
self.m = np.squeeze(np.asarray(m))
self.kappa = float(kappa)
# Dimension check
assert self.b.ndim <= 1
assert self.m.shape == self.b.shape
assert self.a.shape == self.m.shape
self.D = self.b.size
self.Cache = dict()
@classmethod
def CreateAsPrior( cls, argDict, Data):
''' Creates Gaussian-Gamma prior for params that generate Data.
Returns GaussGammaDistr object with same dimension as Data.
Provided argDict specifies prior's expected mean and variance.
'''
D = Data.dim
a0 = argDict['a0']
b0 = argDict['b0']
m0 = argDict['m0']
kappa = argDict['kappa']
m = m0 * np.ones(D)
a = a0 * np.ones(D)
b = b0 * np.ones(D)
return cls(a=a, b=b, m=m, kappa=kappa)
######################################################### Log Cond. Prob.
######################################################### E-step
def E_log_pdf( self, Data ):
''' Calculate E[ log p( x_n | theta ) ] for each x_n in Data.X
Args
-------
Data : bnpy XData object
with attribute Data.X, numpy 2D array of size nObs x D
Returns
-------
logp : numpy 1D array, length nObs
'''
logPDFConst = -0.5 * self.D * LOGTWOPI + 0.5 * np.sum(self.E_logLam())
logPDFData = -0.5 * self.E_distMahalanobis(Data.X)
return logPDFConst + logPDFData
def E_distMahalanobis(self, X):
''' Calculate E[ (x_n - \mu)^T diag(\lambda) (x_n - mu) ]
which has simple form due to diagonal structure.
Args
-------
X : numpy array, nObs x D
Returns
-------
dist : numpy 1D array, length nObs
dist[n] = E[ (X[n] - \mu)^T diag(\lambda) (X[n] - mu) ]
= expected mahalanobis distance to observation n
'''
Elambda = self.a / self.b
if X.ndim == 2:
weighted_SOS = np.sum( Elambda * np.square(X - self.m), axis=1)
else:
weighted_SOS = np.sum(Elambda * np.square(X - self.m))
weighted_SOS += self.D/self.kappa
return weighted_SOS
######################################################### Param updates
######################################################### (M step)
def get_post_distr( self, SS, k=None, kB=None, **kwargs):
''' Create new GaussGammaDistr as posterior given sufficient stats
for a particular component (or components)
Args
------
SS : bnpy SuffStatBag, with K components
k : int specifying component of SS to use.
Range {0, 1, ... K-1}.
kB : [optional] int specifying additional component of SS to use
if provided, k-th and kB-th entry of SS are *merged* additively
Range {0, 1, ... K-1}.
Returns
-------
D : bnpy.distr.GaussGammaDistr, with updated posterior parameters
'''
if k is None:
EN = SS.N
Ex = SS.x
Exx = SS.xx
elif kB is not None:
EN = float(SS.N[k] + SS.N[kB])
Ex = SS.x[k] + SS.x[kB]
Exx = SS.xx[k] + SS.xx[kB]
else:
EN = float(SS.N[k])
Ex = SS.x[k]
Exx = SS.xx[k]
kappa = self.kappa + EN
m = (self.kappa * self.m + Ex) / kappa
a = self.a + 0.5*EN
b = self.b + 0.5*(Exx + self.kappa*np.square(self.m) - kappa*np.square(m))
return GaussGammaDistr(a, b, m, kappa)
def post_update_soVB( self, rho, refDistr, **kwargs):
''' In-place update of this GaussGammaDistr's internal parameters,
via the stochastic online variational algorithm.
Updates via interpolation between self and reference.
self = self * (1-rho) + refDistr * rho
Args
-----
rho : float, learning rate to use for the update
refDistr : bnpy GaussGammaDistr, reference distribution for update
Returns
-------
None.
'''
etaCUR = self.get_natural_params()
etaSTAR = refDistr.get_natural_params()
etaNEW = list(etaCUR)
for i in xrange(len(etaCUR)):
etaNEW[i] = rho*etaSTAR[i] + (1-rho)*etaCUR[i]
self.set_natural_params(tuple(etaNEW))
######################################################### Required accessors
#########################################################
@classmethod
def calc_log_norm_const(cls, a, b, m, kappa):
logNormConstNormal = 0.5 * D * (LOGTWOPI + np.log(kappa))
logNormConstGamma = np.sum(gammaln(a)) - np.inner(a, np.log(b))
return logNormConstNormal + logNormConstGamma
def get_log_norm_const(self):
''' Calculate log normalization constant (aka log partition function)
for this Gauss-Gamma distribution.
p(mu,Lam) = NormalGamma( mu, Lam | a, b, m, kappa)
= 1/Z f(mu|Lam) g(Lam), where Z is const w.r.t mu,Lam
Normalization constant = Z = \int f() g() dmu dLam
Returns
--------
logZ : float
'''
D = self.D
a = self.a
b = self.b
logNormConstNormal = 0.5 * D * (LOGTWOPI - np.log(self.kappa))
logNormConstGamma = np.sum(gammaln(a)) - np.inner(a, np.log(b))
return logNormConstNormal + logNormConstGamma
def E_log_pdf_Phi(self, Distr, doNormConst=True):
''' Evaluate expectation of log PDF for given GaussGammaDistr
Args
-------
Distr : bnpy GaussGammaDistr
doNormConst : boolean, if True then Distr's log norm const is included
Returns
-------
logPDF : float
'''
assert Distr.D == self.D
selfELam = self.a / self.b
logPDF = np.inner(Distr.a - 0.5, self.E_logLam()) \
- np.inner(Distr.b, selfELam) \
- 0.5 * Distr.kappa * self.E_distMahalanobis(Distr.m)
if doNormConst:
return logPDF - Distr.get_log_norm_const()
return logPDF
def get_entropy(self):
''' Calculate entropy of this Gauss-Gamma disribution,
'''
return -1.0 * self.E_log_pdf_Phi(self)
def get_natural_params(self):
'''
'''
t1 = self.a
t2 = self.b + 0.5 * self.kappa * np.square(self.m)
t3 = self.kappa * self.m
t4 = self.kappa
etatuple = t1, t2, t3, t4
return etatuple
def set_natural_params(self, etatuple):
self.a = etatuple[0]
self.kappa = etatuple[3]
self.m = etatuple[2]/self.kappa
self.b = etatuple[1] - 0.5 * self.kappa * np.square(self.m)
self.Cache = dict()
######################################################### Custom Accessors
#########################################################
def E_logLam(self):
''' E[ \log \lambda_d ]
Returns
-------
1D array, length D
'''
return digamma(self.a) - np.log(self.b)
def E_sumlogLam(self):
''' \sum_d E[ \log \lambda_d ]
Returns
-------
float, scalar
'''
return np.sum(digamma(self.a) - np.log(self.b))
def E_Lam(self):
''' E[ \lambda_d * \mu_d ]
Returns vector, length D
'''
return (self.a / self.b)
def E_LamMu(self):
''' E[ \lambda_d * \mu_d ]
Returns vector, length D
'''
return (self.a / self.b) * self.m
def E_LamMu2(self):
''' E[ \lambda_d * \mu_d * \mu_d ]
Returns vector, length D
'''
return (self.a / self.b) * np.square(self.m) + 1./self.kappa
############################################################## I/O
##############################################################
def to_dict(self):
''' Convert attributes of this GaussGammaDistr into a dict
useful for long-term storage to disk, pickling, etc.
Returns
-------
Dict with entries for each named parameter: a, b, m, kappa
'''
return dict(name=self.__class__.__name__, \
m=self.m, kappa=self.kappa, a=self.a, b=self.b)
def from_dict(self, Dict):
''' Internally set this GaussGammaDistr's parameters via provided dict
Returns
--------
None. This Distr's parameters set to new values.
'''
self.m = Dict['m']
self.a = Dict['a']
self.b = Dict['b']
self.kappa = Dict['kappa']
self.D = self.b.shape[0]
self.Cache = dict()
def to_string(self, offset=" "):
Elam = self.a[:2] / self.b[:2]
if self.D > 2:
sfx = '...\n'
else:
sfx = '\n'
np.set_printoptions(precision=3, suppress=False)
msg = offset + 'E[ mean \mu ] ' + str(self.m[:2]) + sfx
msg += offset + 'E[ precision \lambda ]' + str(Elam) + sfx
return msg
|
nilq/baby-python
|
python
|
import torch
from .defaults import get_default_config
def update_config(config):
if config.dataset.name in ['CIFAR10', 'CIFAR100']:
dataset_dir = f'~/.torch/datasets/{config.dataset.name}'
config.dataset.dataset_dir = dataset_dir
config.dataset.image_size = 32
config.dataset.n_channels = 3
config.dataset.n_classes = int(config.dataset.name[5:])
elif config.dataset.name in ['MNIST', 'FashionMNIST', 'KMNIST']:
dataset_dir = '~/.torch/datasets'
config.dataset.dataset_dir = dataset_dir
config.dataset.image_size = 28
config.dataset.n_channels = 1
config.dataset.n_classes = 10
if not torch.cuda.is_available():
config.device = 'cpu'
return config
|
nilq/baby-python
|
python
|
from time import sleep
import logging
import pytest
from common.utils import resize_browser
from common.asserts import assert_customer_logo, assert_customer_testimonial, assert_typography, assert_overflowing
from common.svb_form import assert_required_fields_top, assert_bad_email_top, assert_non_business_email_top, assert_success_form_top, assert_svb_contact_form_required_fields, assert_svb_contact_form_invalid_name, assert_svb_contact_form_invalid_phone, assert_svb_contact_form_invalid_phone_length_min, assert_svb_contact_form_invalid_phone_length_max, assert_svb_contact_form_success
logger = logging.getLogger(__name__)
@pytest.fixture(scope='function')
def browser(module_browser, base_url, request):
resize_browser(browser=module_browser, resolution=request.param)
module_browser.get(base_url + '/svb')
sleep(4)
return module_browser
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_page_overflow(browser):
assert_overflowing(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_customer_logo(browser):
assert_customer_logo(browser)
@pytest.mark.parametrize('browser', [('desktop_1')], indirect=True)
def test_customer_testimonial(browser):
assert_customer_testimonial(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_typography(browser):
assert_typography(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_required_fields_top(browser):
assert_required_fields_top(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_bad_email_top(browser):
assert_bad_email_top(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_non_business_email_top(browser):
assert_non_business_email_top(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_success_form_top(browser):
assert_success_form_top(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_required_fields(browser):
assert_svb_contact_form_required_fields(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_invalid_name(browser):
assert_svb_contact_form_invalid_name(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_invalid_phone(browser):
assert_svb_contact_form_invalid_phone(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_invalid_phone_length_min(browser):
assert_svb_contact_form_invalid_phone_length_min(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_invalid_phone_length_max(browser):
assert_svb_contact_form_invalid_phone_length_max(browser)
@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)
def test_svb_contact_form_success(browser):
assert_svb_contact_form_success(browser)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import glob
import re
import json
import os
import shutil
from PIL import Image
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
# 画像サイズ
IMAGE_SIZE = 224
# チャネル数
CHANNEL_SIZE = 3
# ラベル作成
def make_label_list():
# ディレクトリのパスを取得
dir_path_list = glob.glob('image/*')
# 辞書を準備
label_dic = {}
# 各ディレクトリごとにラベルを振り分け
for i, dir_path in enumerate(dir_path_list):
key = re.search(r'image/(.+)', dir_path)
key = key.group(1)
# label_dic[key] = i
label_dic[key] = 1
# 辞書をjsonで保存
with open('./data/label_dic.json', 'w') as f:
label_json = json.dumps(label_dic)
json.dump(label_json, f)
return label_dic
# 画像を数値データに変換する関数
def convert_image(img_path):
try:
img = load_img(img_path, target_size=(IMAGE_SIZE, IMAGE_SIZE))
x = img_to_array(img)
# 正規化
x = x / 255.0
return x
except Exception as e:
shutil.move(img_path, 'noise')
x = None
print('[Error] {0} <{1}>'.format(img_path, e))
return x
#ラベルデータを取得する関数
def get_label_data(img_path, label_dic):
#画像のディレクトリのパスを取得
key = re.search(r'image/(.+)/.+/.+/.+/.+', img_path)
key = key.group(1)
#辞書からラベルを取得
t = label_dic[key]
#ラベルをnumpy配列に変換
t = np.asarray(t, dtype=np.int32)
return t
#画像のパスから画像のファイル名を取得する関数
def get_image_name(img_path):
image_name = re.search(r'image/.+/.+/.+/.+/(.+).jpg', img_path)
image_name = image_name.group(1)
return image_name
#データセットを作成する関数
def make_dataset(label_dic):
#各人物のディレクトリのリストを取得
person_path_list = glob.glob('image/*/*/*/*')
for person_path in person_path_list:
#写真の人物の名前を取得
person_name = re.search(r'image/.+/.+/.+/(.+)', person_path)
person_name = person_name.group(1)
#画像のあるディレクトリの名前を取得
dir_name = re.search(r'image/(.+/.+/.+)/.+', person_path)
dir_name = dir_name.group(1)
#画像データ・ラベルデータのファイルを保存するディレクトリ
save_dir = './data/' + dir_name + '/' + person_name
#ファイルを保存するディレクトリを作成
if not os.path.exists(save_dir): os.makedirs(save_dir)
#人物のディレクト内の画像のリストを取得
img_path_list = glob.glob(person_path+'/*.jpg')
if img_path_list == []:
shutil.move(person_path, 'noise')
print('[Remove] {0}'.format(person_path))
#画像データを入れるリストを準備
image_data = []
#ラベルデータを入れるリストを準備
label_data = []
for img_path in img_path_list:
#画像を数値データに変換
x = convert_image(img_path)
if x is None:
continue
# ラベルデータを取得
t = get_label_data(img_path, label_dic)
image_name = get_image_name(img_path)
# 画像データを保存するパス
save_image_path = save_dir + '/' + image_name + '_image.npy'
# ラベルデータを保存するパス
save_label_path = save_dir + '/' + image_name + '_label.npy'
#画像データをファイルに保存
np.save(save_image_path, x)
#ラベルデータをファイルに保存
np.save(save_label_path, t)
print('[Save] {0}: {1}'.format(person_name, len(img_path_list)))
print()
occupation_path_list = glob.glob('image/*')
for occupation_path in occupation_path_list:
#職業を取得
occupation_name = re.search(r'image/(.+)', occupation_path)
occupation_name = occupation_name.group(1)
#全画像のリストを取得
img_path_list = glob.glob(occupation_path + '/*/*/*')
print('{0}: {1}'.format(occupation_name, len(img_path_list)))
print()
# 全画像のリストを取得
img_path_list = glob.glob('image/*/*/*/*/*')
print('total: {0}'.format(len(img_path_list)))
if __name__ == '__main__':
# ラベル作成
label_dic = make_label_list()
# データセット作成
make_dataset(label_dic)
|
nilq/baby-python
|
python
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
password1 = forms.CharField(
label= ("Password"),
strip=False,
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label= ("Password confirmation"),
widget=forms.PasswordInput,
strip=False,
)
class Meta(UserCreationForm):
model = CustomUser
fields = ('username', 'email')
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ('username', 'email')
|
nilq/baby-python
|
python
|
'''
Created on Sep 20, 2013
@author: nshearer
'''
from ConsoleYesNoQuestion import ConsoleYesNoQuestion
class ConsoleActionPrompt(ConsoleYesNoQuestion):
'''Present an action prompt on the console'''
# def __init__(self, question):
# super(ConsoleActionPrompt, self).__init__(question)
def present_question(self):
print ""
print "-- ACTION --"
super(ConsoleActionPrompt, self).present_question()
if self.question.previous_answer is True:
print "* This task has already been completed *"
|
nilq/baby-python
|
python
|
import sedate
from datetime import timedelta, time
from itertools import groupby
from sqlalchemy import types
from sqlalchemy.schema import Column
from sqlalchemy.schema import Index
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.orm import object_session
from sqlalchemy.orm.util import has_identity
from libres.modules import utils
from libres.modules.rasterizer import (
rasterize_start,
rasterize_span,
rasterize_end,
iterate_span,
MIN_RASTER
)
from libres.db.models import ORMBase
from libres.db.models.types import UUID, UTCDateTime, JSON
from libres.db.models.other import OtherModels
from libres.db.models.timestamp import TimestampMixin
from sqlalchemy.ext.hybrid import hybrid_property
class Allocation(TimestampMixin, ORMBase, OtherModels):
"""Describes a timespan within which one or many timeslots can be
reserved.
There's an important concept to understand before working with allocations.
The resource uuid of an alloction is not always pointing to the actual
resource.
A resource may in fact be a real resource, or an imaginary resource with
a uuid derived from the real resource. This is a somewhat historical
artifact.
If you need to know which allocations belong to a real resource, the
mirror_of field is what's relevant. The originally created allocation
with the real_resource is also called the master-allocation and it is
the one allocation with mirror_of and resource being equal.
When in doubt look at the managed_* functions of the
:class:`.scheduler.Scheduler` class.
"""
__tablename__ = 'allocations'
#: the id of the allocation, autoincremented
id = Column(types.Integer(), primary_key=True, autoincrement=True)
#: the resource uuid of the allocation, may not be an actual resource
#: see :class:`.models.Allocation` for more information
resource = Column(UUID(), nullable=False)
#: the polymorphic type of the allocation
type = Column(types.Text(), nullable=True)
#: resource of which this allocation is a mirror. If the mirror_of
#: attribute equals the resource, this is a real resource
#: see :class:`.models.Allocation` for more information
mirror_of = Column(UUID(), nullable=False)
#: Group uuid to which this allocation belongs to. Every allocation has a
#: group but some allcations may be the only one in their group.
group = Column(UUID(), nullable=False)
#: Number of times this allocation may be reserved
quota = Column(types.Integer(), default=1)
#: Maximum number of times this allocation may be reserved with one
#: single reservation.
quota_limit = Column(types.Integer(), default=0, nullable=False)
#: Partly available allocations may be reserved partially. How They may
#: be partitioned is defined by the allocation's raster.
partly_available = Column(types.Boolean(), default=False)
#: True if reservations for this allocation must be approved manually.
approve_manually = Column(types.Boolean(), default=False)
#: The timezone this allocation resides in.
timezone = Column(types.String())
#: Custom data reserved for the user
data = Column(
JSON(),
nullable=True
)
_start = Column(UTCDateTime(timezone=False), nullable=False)
_end = Column(UTCDateTime(timezone=False), nullable=False)
_raster = Column(types.Integer(), nullable=False)
__table_args__ = (
Index('mirror_resource_ix', 'mirror_of', 'resource'),
UniqueConstraint('resource', '_start', name='resource_start_ix')
)
__mapper_args__ = {
'polymorphic_identity': None,
'polymorphic_on': type
}
def __eq__(self, other):
return self.resource == other.resource and self._start == other._start
def __hash__(self):
return id(self)
def copy(self):
""" Creates a new copy of this allocation. """
allocation = self.__class__()
allocation.resource = self.resource
allocation.mirror_of = self.mirror_of
allocation.group = self.group
allocation.quota = self.quota
allocation.partly_available = self.partly_available
allocation.approve_manually = self.approve_manually
allocation.timezone = self.timezone
allocation.data = self.data
allocation._start = self._start
allocation._end = self._end
allocation._raster = self._raster
return allocation
def get_start(self):
return self._start
def set_start(self, start):
if not start.tzinfo:
assert self.timezone
start = sedate.replace_timezone(start, self.timezone)
if self.raster is not None:
self._start = rasterize_start(start, self.raster)
else:
self._start = rasterize_start(start, MIN_RASTER)
#: The start of this allocation. Must be timezone aware.
#: This date is rastered by the allocation's raster.
start = property(get_start, set_start)
def get_end(self):
return self._end
def set_end(self, end):
if not end.tzinfo:
assert self.timezone
end = sedate.replace_timezone(end, self.timezone)
if self.raster is not None:
self._end = rasterize_end(end, self.raster)
else:
self._end = rasterize_end(end, MIN_RASTER)
#: The end of this allocation. Must be timezone aware.
#: This date is rastered by the allocation's raster.
#: The end date is stored with an offset of minues one microsecond
#: to avoid overlaps with other allocations.
#: That is to say an allocation that ends at 15:00 really ends at
#: 14:59:59.999999
end = property(get_end, set_end)
def get_raster(self):
return self._raster
def set_raster(self, raster):
# the raster can only be set once!
assert not self._raster
self._raster = raster
# re-rasterize start/end - during initialization it's possible for
# them not to be setup correctly because that's done using
# kwargs which has a random order. So it might set start, end, raster
# in this order one time, then raster, start, end another time.
#
# this should of course only happen once - hence the assertion above
if self._start:
self._start = rasterize_start(self._start, self.raster)
if self._end:
self._end = rasterize_end(self._end, self.raster)
raster = property(get_raster, set_raster)
def display_start(self, timezone=None):
"""Returns the start in either the timezone given or the timezone
on the allocation."""
return sedate.to_timezone(self.start, timezone or self.timezone)
def display_end(self, timezone=None):
"""Returns the end plus one microsecond in either the timezone given
or the timezone on the allocation.
"""
end = self.end + timedelta(microseconds=1)
return sedate.to_timezone(end, timezone or self.timezone)
def _prepare_range(self, start, end):
if start:
start = sedate.standardize_date(start, self.timezone)
if end:
end = sedate.standardize_date(end, self.timezone)
return start, end
@property
def whole_day(self):
"""True if the allocation is a whole-day allocation.
A whole-day allocation is not really special. It's just an allocation
which starts at 0:00 and ends at 24:00 (or 23:59:59'999). Relative
to its timezone.
As such it can actually also span multiple days, only hours and minutes
count.
The use of this is to display allocations spanning days differently.
"""
s, e = self.display_start(), self.display_end()
assert s != e # this can never be, except when caused by cosmic rays
return sedate.is_whole_day(s, e, self.timezone)
def overlaps(self, start, end):
""" Returns true if the allocation overlaps with the given dates. """
start, end = self._prepare_range(start, end)
start, end = rasterize_span(start, end, self.raster)
return sedate.overlaps(start, end, self.start, self.end)
def contains(self, start, end):
""" Returns true if the the allocation contains the given dates. """
start, end = self._prepare_range(start, end)
start, end = rasterize_span(start, end, self.raster)
return self.start <= start and end <= self.end
def free_slots(self, start=None, end=None):
""" Returns the slots which are not yet reserved. """
reserved = [slot.start for slot in self.reserved_slots]
slots = []
for start, end in self.all_slots(start, end):
if start not in reserved:
slots.append((start, end))
return slots
def align_dates(self, start=None, end=None):
""" Aligns the given dates to the start and end date of the allocation.
"""
start, end = self._prepare_range(start, end)
start = start or self.start
start = start < self.start and self.start or start
end = end or self.end
end = end > self.end and self.end or end
return start, end
def all_slots(self, start=None, end=None):
""" Returns the slots which exist with this timespan. Reserved or free.
"""
start, end = self.align_dates(start, end)
if self.partly_available:
for start, end in iterate_span(start, end, self.raster):
yield start, end
else:
yield self.start, self.end
def count_slots(self, start=None, end=None):
""" Returns the number of slots which exist with this timespan.
Reserved or free.
"""
if not self.partly_available:
return 1
start, end = self.align_dates(start, end)
seconds = (end + timedelta(microseconds=1) - start).total_seconds()
return seconds // (self.raster * 60)
def is_available(self, start=None, end=None):
""" Returns true if the given daterange is completely available. """
if not (start and end):
start, end = self.start, self.end
assert self.overlaps(start, end)
reserved = {slot.start for slot in self.reserved_slots}
for start, end in self.all_slots(start, end):
if start in reserved:
return False
return True
def limit_timespan(self, start, end, timezone=None):
""" Takes the given timespan and moves the start/end date to
the closest reservable slot. So if 10:00 - 11:00 is requested it will
- on a partly available allocation return 10:00 - 11:00 if the raster
allows for that
- on a non-partly available allocation return the start/end date of
the allocation itself.
The resulting times are combined with the allocations start/end date
to form a datetime. (time in, datetime out -> maybe not the best idea)
"""
timezone = timezone or self.timezone
if self.partly_available:
assert isinstance(start, time)
assert isinstance(end, time)
s, e = sedate.get_date_range(
self.display_start(timezone), start, end
)
if self.display_end(timezone) < e:
e = self.display_end()
if self.display_start(timezone) > s:
s = self.display_start()
s, e = rasterize_span(s, e, self.raster)
return s, e + timedelta(microseconds=1)
else:
return self.display_start(timezone), self.display_end(timezone)
@property
def pending_reservations(self):
""" Returns the pending reservations query for this allocation.
As the pending reservations target the group and not a specific
allocation this function returns the same value for masters and
mirrors.
"""
assert not self.is_transient, (
"Don't call if the allocation does not yet exist"
)
Reservation = self.models.Reservation
query = object_session(self).query(Reservation.id)
query = query.filter(Reservation.target == self.group)
query = query.filter(Reservation.status == u'pending')
return query
@property
def waitinglist_length(self):
return self.pending_reservations.count()
@property
def availability(self):
"""Returns the availability in percent."""
total = self.count_slots()
used = len(self.reserved_slots)
if total == used:
return 0.0
if used == 0:
return 100.0
return 100.0 - (float(used) / float(total) * 100.0)
@property
def in_group(self):
"""True if the event is in any group."""
query = object_session(self).query(Allocation.id)
query = query.filter(Allocation.resource == self.resource)
query = query.filter(Allocation.group == self.group)
query = query.limit(2)
return len(query.all()) > 1
@property
def quota_left(self):
# this can be done quickly if this is a master with a quota of 1
if self.is_master and self.quota == 1:
return 1 if self.is_available() else 0
# if not we need to go through the mirrors
free_quota = 0
for mirror in self.siblings():
if mirror.is_available():
free_quota += 1
return free_quota
def find_spot(self, start, end):
""" Returns the first free allocation spot amongst the master and the
mirrors. Honors the quota set on the master and will only try the
master if the quota is set to 1.
If no spot can be found, None is returned.
"""
master = self.get_master()
if master.is_available(start, end):
return master
if master.quota == 1:
return None
tries = master.quota - 1
for mirror in (m for m in self.siblings() if not m.is_master):
if mirror.is_available(start, end):
return mirror
if tries >= 1:
tries -= 1
else:
return None
@property
def is_separate(self):
"""True if available separately (as opposed to available only as
part of a group)."""
if self.partly_available:
return True
if self.in_group:
return False
return True
def availability_partitions(self):
"""Partitions the space between start and end into blocks of either
free or reserved time. Each block has a percentage representing the
space the block occupies compared to the size of the whole allocation.
The blocks are ordered from start to end. Each block is an item with
two values. The first being the percentage, the second being true if
the block is reserved.
So given an allocation that goes from 8 to 9 and a reservation that
goes from 8:15 until 8:30 we get the following blocks::
[
(25%, False),
(25%, True),
(50%, False)
]
This is useful to divide an allocation block into different divs on the
frontend, indicating to the user which parts of an allocation are
reserved.
"""
if (len(self.reserved_slots) == 0):
return [(100.0, False)]
reserved = {r.start for r in self.reserved_slots}
# Get the percentage one slot represents
slots = tuple(s[0] for s in self.all_slots())
step = 100.0 / float(len(slots))
# Create an entry for each slot with either True or False
pieces = tuple(s in reserved for s in slots)
# Group by the true/false values in the pieces and sum up the
# percentage
partitions = []
total = 0
for flag, group in groupby(pieces, key=lambda p: p):
percentage = sum(1 for item in group) * step
partitions.append([percentage, flag])
total += percentage
# Make sure to get rid of floating point rounding errors
diff = 100.0 - total
partitions[-1:][0][0] -= diff
return partitions
@property
def is_transient(self):
"""True if the allocation does not exist in the database, and is not
about to be written to the database. If an allocation is transient it
means that the given instance only exists in memory.
See:
http://www.sqlalchemy.org/docs/orm/session.html
#quickie-intro-to-object-states
http://stackoverflow.com/questions/3885601/
sqlalchemy-get-object-instance-state
"""
return object_session(self) is None and not has_identity(self)
@hybrid_property
def is_master(self):
"""True if the allocation is a master allocation."""
return self.resource == self.mirror_of
def get_master(self):
if self.is_master:
return self
else:
query = object_session(self).query(Allocation)
query = query.filter(Allocation._start == self._start)
query = query.filter(Allocation.resource == self.mirror_of)
return query.one()
def siblings(self, imaginary=True):
"""Returns the master/mirrors group this allocation is part of.
If 'imaginary' is true, inexistant mirrors are created on the fly.
those mirrors are transient (see self.is_transient)
"""
# this function should always have itself in the result
if not imaginary and self.is_transient:
assert False, "the resulting list wouldn't contain this allocation"
if self.quota == 1:
assert self.is_master
return [self]
query = object_session(self).query(Allocation)
query = query.filter(Allocation.mirror_of == self.mirror_of)
query = query.filter(Allocation._start == self._start)
existing = dict(((e.resource, e) for e in query))
master = self.is_master and self or existing[self.mirror_of]
existing[master.resource] = master
uuids = utils.generate_uuids(master.resource, master.quota)
imaginary = imaginary and (master.quota - len(existing)) or 0
siblings = [master]
for uuid in uuids:
if uuid in existing:
siblings.append(existing[uuid])
elif imaginary > 0:
allocation = master.copy()
allocation.resource = uuid
siblings.append(allocation)
imaginary -= 1
return siblings
|
nilq/baby-python
|
python
|
from collections.abc import Mapping
import shelve
import random
import time
class ConcurrentShelf(Mapping):
def __init__(self, file_name, time_out_seconds=60):
self._file_name = file_name
self._time_out_seconds = time_out_seconds
self._locked_shelf = None
shelf = self._open(write=True)
shelf.close()
def __del__(self):
if self._locked_shelf is not None:
self._locked_shelf.close()
def _open(self, write=False):
flag = 'c' if write else 'r'
start = time.time()
while True:
if time.time() - start > self._time_out_seconds:
raise RuntimeError('ConcurrentShelf time out, cannot gain access to shelf on disk')
try:
shelf = shelve.open(self._file_name, flag=flag)
return shelf
except Exception as e:
if '[Errno 11] Resource temporarily unavailable' in str(e):
# print('Shelf locked, waiting...')
time.sleep(random.uniform(0.01, 0.250))
next
else:
raise e
def lock(self, write=True):
self._locked_shelf = self._open(write=write)
def unlock(self):
if self._locked_shelf is not None:
self._locked_shelf.close()
self._locked_shelf = None
def __getitem__(self, key):
if self._locked_shelf is not None:
return self._locked_shelf[key]
else:
shelf = self._open()
try:
value = shelf[key]
shelf.close()
except Exception as e:
shelf.close()
raise e
return value
def __setitem__(self, key, value):
if self._locked_shelf is not None:
self._locked_shelf[key] = value
else:
shelf = self._open(write=True)
try:
shelf[key] = value
shelf.close()
except Exception as e:
shelf.close()
raise e
def __iter__(self):
if self._locked_shelf is not None:
for value in self._locked_shelf:
yield value
else:
shelf = self._open()
try:
for value in shelf:
yield value
shelf.close()
except Exception as e:
shelf.close()
raise e
def __len__(self):
if self._locked_shelf is not None:
return len(self._locked_shelf)
else:
shelf = self._open()
try:
value = len(shelf)
shelf.close()
except Exception as e:
shelf.close()
raise e
return value
|
nilq/baby-python
|
python
|
from flask import Blueprint, request, jsonify
from ortools.sat.python import cp_model
import numpy
bp = Blueprint('optimize', __name__)
@bp.route('/', methods=['POST'])
def recieve_data():
model = model = cp_model.CpModel()
juniors = request.get_json()[0]
boat_parameters = request.get_json()[1]
constraints = request.get_json()[2]
maxTime = int(request.get_json()[3])
boat_parameters['minCrew'] = int(boat_parameters['minCrew']) if boat_parameters['minCrew'] else 0
boat_parameters['noBoats'] = int(boat_parameters['noBoats'])
boat_parameters['maxCrew'] = int(boat_parameters['maxCrew']) if boat_parameters['maxCrew'] else len(juniors)
boat_parameters['useAllBoats'] = bool(boat_parameters['useAllBoats'])
variables = {}
variables['x'] = create_x_var(model, juniors, boat_parameters['noBoats'])
variables['y'] = create_y_var(model, juniors, boat_parameters['noBoats'])
variables['boat_used'] = create_boat_var(model, boat_parameters['noBoats'])
variables['worst_boat'] = model.NewIntVar(0, 1, 'worst_boat')
pref_matrix = create_pref_matrix(juniors)
create_std_constraints(model, variables, boat_parameters, juniors, pref_matrix)
create_custom_constraints(model, variables, constraints, juniors, boat_parameters)
sum_exp = sum(pref_matrix[i][j] * variables['y'][i, j, b]
for i in range(len(juniors)) for j in range(len(juniors)) for b in range(boat_parameters['noBoats']) if j != i)
model.Maximize(variables['worst_boat'] + sum_exp)
#Debug - skriv ut hela modellen!
#print(solver.ExportModelAsLpFormat(False).replace('\\', '').replace(',_', ','), sep='\n')
# Check if there are hints, i.e. not first iteration
if len(request.get_json()) > 4:
hints = request.get_json()[4]
create_hints(variables, hints, juniors, boat_parameters['noBoats'], model)
solver = cp_model.CpSolver()
#Avbryt lösaren efter 60 sekunder
solver.parameters.max_time_in_seconds = maxTime
status = solver.Solve(model)
print(solver.ResponseStats())
if status == cp_model.INFEASIBLE:
return dict(success=False, status='Infeasible')
if status == cp_model.MODEL_INVALID:
return dict(success=False, status='Model invalid')
if status == cp_model.UNKNOWN:
return dict(success=False, status='Unknown')
retval = create_retval(variables, juniors, boat_parameters['noBoats'], solver)
if status == cp_model.OPTIMAL:
retval['status'] = 'Optimal'
elif status == cp_model.FEASIBLE:
retval['status'] = 'Feasible'
retval['hints'] = get_current_variable_values(variables, juniors, boat_parameters['noBoats'], solver)
retval['solver_response'] = solver.ResponseStats()
retval['objective_value'] = solver.ObjectiveValue()
return jsonify(retval)
#x[i, b] är 1 om jun i sitter i båt b, noll annars
def create_x_var(model, juniors, no_boats):
x = {}
for i in range(len(juniors)):
for b in range(no_boats):
x[i, b] = model.NewIntVar(0, 1, 'x[Junior {}, Boat {}]'.format(i, b))
return x
#y[i, j, b] är 1 om jun i sitter med jun j i båt b, noll annars
def create_y_var(model, juniors, no_boats):
y = {}
for i in range(len(juniors)):
for j in range(len(juniors)):
for b in range(no_boats):
if i != j:
y[i, j, b] = model.NewIntVar(0, 1, 'y[Jun i {}, Jun j {}, Boat {}]'.format(i, j, b))
return y
#boat_used[b] = 1 om båt b används, 0 f.ö.
def create_boat_var(model, no_boats):
boat_used = {}
for b in range(no_boats):
boat_used[b] = model.NewIntVar(0, 1, 'boat_used[Boat {}]'.format(b))
return boat_used
#p[i, j] = 1 om junior i önskat att segla med junior j
def create_pref_matrix(juniors):
p = numpy.zeros(shape=(len(juniors), len(juniors)), dtype=int)
i = 0
for junior_i in juniors:
j = 0
for junior_j in juniors:
if junior_j['name'] in junior_i['wishes']:
p[i][j] = 1
j = j + 1
i = i + 1
return p
def create_std_constraints(model, variables, boat_parameters, juniors, pref_matrix):
#En junior sitter i exakt en båt
for i in range(len(juniors)):
model.Add(sum(variables['x'][i, b] for b in range(boat_parameters['noBoats'])) == 1)
#min capacity
for b in range(boat_parameters['noBoats']):
sum_exp = sum(variables['x'][i, b] for i in range(len(juniors)))
if boat_parameters['useAllBoats']:
model.Add(sum_exp >= max(boat_parameters['minCrew'], 1))
else:
model.Add(sum_exp >= boat_parameters['minCrew'] * variables['boat_used'][b])
#max capacity
if boat_parameters['noBoats']:
for b in range(boat_parameters['noBoats']):
sum_exp = sum(variables['x'][i, b] for i in range(len(juniors)))
if boat_parameters['useAllBoats']:
model.Add(sum_exp <= boat_parameters['maxCrew'])
else:
model.Add(sum_exp <= boat_parameters['maxCrew'] * variables['boat_used'][b])
#Koppling x till y (jun i med jun j) -->
for i in range(len(juniors)):
for j in range(len(juniors)):
for b in range(boat_parameters['noBoats']):
if i != j:
model.Add(2 * variables['y'][i, j, b] <= variables['x'][i, b] + variables['x'][j, b])
#Sämsta båten --> målfunktion
for b in range(boat_parameters['noBoats']):
sum_exp = sum(pref_matrix[i][j] * variables['y'][i, j, b] for i in range(len(juniors)) for j in range(len(juniors)) if j!= i)
model.Add(variables['worst_boat'] <= sum_exp)
#constraint_exp = [pref_matrix[i][j] * variables['y'][i, j, b] for i in range(len(juniors)) for j in range(len(juniors)) if j!= i]
#solver.Add(variables['worst_boat'] <= solver.Sum(constraint_exp))
#Löser BV som tvingar juniorer att segla eller inte segla tsm
def create_custom_constraints(model, variables, constraints, juniors, boat_parameters):
for c in constraints:
name1 = c['name1']
name2 = c['name2']
i = list(filter(lambda j: juniors[j]['name'] == name1, range(len(juniors))))[0]
j = list(filter(lambda j: juniors[j]['name'] == name2, range(len(juniors))))[0]
if c['mustSail']:
#Måste segla --> summa över b av y[i,j,b] för (i,j) == 1
sum_exp = sum(variables['y'][i, j, b] for b in range(boat_parameters['noBoats']))
model.Add(sum_exp == 1)
#constraint_exp = [variables['y'][i, j, b] for b in range(boat_parameters['noBoats'])]
#solver.Add(solver.Sum(constraint_exp) == 1)
else:
#Får inte segla:
sum_exp = sum(variables['x'][i, b] + variables['x'][j, b] for b in range(boat_parameters['noBoats']))
model.Add(sum_exp == 1)
#constraint_exp = [variables['x'][i, b] + variables['x'][j, b] for b in range(boat_parameters['noBoats'])]
#solver.Add(solver.Sum(constraint_exp) <= 1)
def create_hints(variables, hints, juniors, no_boats, model):
# Hints for X
for i in range(len(juniors)):
for b in range(no_boats):
model.AddHint(variables['x'][i, b], hints['x'][str(i)][str(b)])
# Hints for Y
for i in range(len(juniors)):
for j in range(len(juniors)):
for b in range(no_boats):
if i != j:
model.AddHint(variables['y'][i, j, b], hints['y'][str(i)][str(j)][str(b)])
def create_retval(variables, juniors, no_boats, solver):
retval = {}
retval['boats'] = {}
for b in range(no_boats):
retval['boats'][b] = []
for i in range(len(juniors)):
if solver.Value(variables['x'][i, b]) == 1:
#if variables['x'][i, b].solution_value() == 1:
retval['boats'][b].append(juniors[i]['name'])
retval['success'] = True
return retval
def get_current_variable_values(variables, juniors, no_boats, solver):
current_values = {}
current_values['x'] = {}
for i in range(len(juniors)):
current_values['x'][i] = {}
for b in range(no_boats):
current_values['x'][i][b] = solver.Value(variables['x'][i, b])
current_values['y'] = {}
for i in range(len(juniors)):
current_values['y'][i] = {}
for j in range(len(juniors)):
current_values['y'][i][j] = {}
for b in range(no_boats):
if i != j:
current_values['y'][i][j][b] = solver.Value(variables['y'][i, j, b])
return current_values
# In Python, you can also set the constraints as follows.
# for i in range(number_of_constraints):
# constraint_expr = coeffs[i][j] * x[j] for j in range(data['number_of_variables'])]
# solver.Add(sum(constraint_expr) <= data['bounds'][i])
#serialize (skapa dict av dig själv) alla index i array
#jsonify
|
nilq/baby-python
|
python
|
import os
import re
import datetime
from mod_python import apache
NOW = str(datetime.datetime.utcnow().strftime("%s"))
DUMP_DIR="/var/www/html/dump"
if not os.path.exists(DUMP_DIR):
os.makedirs(DUMP_DIR)
def index(req):
if not 'file' in req.form or not req.form['file'].filename:
return "Error: Please upload a file"
ethmac = "unset"
password = "unset"
if 'ethmac' in req.form: ethmac = req.form['ethmac']
if 'password' in req.form: password = req.form['password']
user = auth_user(ethmac,password)
# Record which IP send this file
ip = req.get_remote_host(apache.REMOTE_NOLOOKUP)
return save_file(req.form['file'], ip, user)
def auth_user(mac, password):
# shoulc check if this really is the user's MAC address
# check Eth MAC is well-formed
if not re.match("([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})",mac):
return "unset"
return mac
def save_file(fileitem, ip, user=None):
# strip leading path from file name to avoid directory traversal attacks
filename = NOW+".tgz"
if user!=None:
filename = user+"-"+filename
#os.path.basename(fileitem.filename)
# build absolute path to files directory
host_dir = os.path.join(DUMP_DIR, ip)
ensure_dir(host_dir)
filepath = os.path.join(host_dir, filename)
if os.path.exists(filepath):
return "Error: file already exists"
fd = open(filepath, 'wb')
while 1:
chunk = fileitem.file.read(100000)
if not chunk: break
fd.write (chunk)
return 'The file "%s" was uploaded successfully from %s' % (filepath, ip)
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
|
nilq/baby-python
|
python
|
# Ryan McCarthy, rbmccart@usc.edu
# ITP 115, Fall 2020
# Assignment 4
# Description:
# Part 1 takes a sentence from the user and counts the number of times a letter or special character appear
# this info is returned to the user
# Part 1: this gets the sentence
sentence = input('PART 1 - Character Counter\nPlease enter a sentence:')
# this removes all of the white space so I don't have to worry about spaces when iterating and counting characters
sentence = sentence.replace(" ", "")
# changes sentence to lower so it doesn't count caps as special chars
sentence = sentence.lower()
# defines all of the characters counts
ac = "a: NONE"
bc = "b: NONE"
cc = "c: NONE"
dc = "d: NONE"
ec = "e: NONE"
fc = "f: NONE"
gc = "g: NONE"
hc = "h: NONE"
ic = "i: NONE"
jc = "j: NONE"
kc = "k: NONE"
lc = "l: NONE"
mc = "m: NONE"
nc = "n: NONE"
oc = "o: NONE"
pc = "p: NONE"
qc = "q: NONE"
rc = "r: NONE"
sc = "s: NONE"
tc = "t: NONE"
uc = "u: NONE"
vc = "v: NONE"
wc = "w: NONE"
xc = "x: NONE"
yc = "y: NONE"
zc = "z: NONE"
special_c = "Special Characters: NONE"
# for each loop to add an asterisk to the count if the letter is found
for character in sentence:
char = character
if char == "a":
ac = ac.replace("NONE", "")
ac += "*"
elif char == "b":
bc = bc.replace("NONE", "")
bc += "*"
elif char == "c":
cc = cc.replace("NONE", "")
cc += "*"
elif char == "d":
dc = dc.replace("NONE", "")
dc += "*"
elif char == "e":
ec = ec.replace("NONE", "")
ec += "*"
elif char == "f":
fc = fc.replace("NONE", "")
fc += "*"
elif char == "g":
gc = gc.replace("NONE", "")
gc += "*"
elif char == "h":
hc = hc.replace("NONE", "")
hc += "*"
elif char == "i":
ic = ic.replace("NONE", "")
ic += "*"
elif char == "j":
jc = jc.replace("NONE", "")
jc += "*"
elif char == "k":
kc = kc.replace("NONE", "")
kc += "*"
elif char == "l":
lc = lc.replace("NONE", "")
lc += "*"
elif char == "m":
mc = mc.replace("NONE", "")
mc += "*"
elif char == "n":
nc = nc.replace("NONE", "")
nc += "*"
elif char == "o":
oc = oc.replace("NONE", "")
oc += "*"
elif char == "p":
pc = pc.replace("NONE", "")
pc += "*"
elif char == "q":
qc = qc.replace("NONE", "")
qc += "*"
elif char == "r":
rc = rc.replace("NONE", "")
rc += "*"
elif char == "s":
sc = sc.replace("NONE", "")
sc += "*"
elif char == "t":
tc = tc.replace("NONE", "")
tc += "*"
elif char == "u":
uc = uc.replace("NONE", "")
uc += "*"
elif char == "v":
vc = vc.replace("NONE", "")
vc += "*"
elif char == "w":
wc = wc.replace("NONE", "")
wc += "*"
elif char == "x":
xc = xc.replace("NONE", "")
xc += "*"
elif char == "y":
yc = yc.replace("NONE", "")
yc += "*"
elif char == "z":
zc = zc.replace("NONE", "")
zc += "*"
else:
special_c = special_c.replace("NONE", "")
special_c += "*"
print("Here is the character distribution:\n\n " + ac, "\n", bc, "\n", cc, "\n", dc, "\n", ec, "\n", fc, "\n", gc, "\n",
hc, "\n", ic, "\n", jc, "\n", kc, "\n", lc, "\n", mc, "\n", nc, "\n", oc, "\n", pc, "\n", qc, "\n", rc, "\n",
sc, "\n", tc, "\n", uc, "\n", vc, "\n", wc, "\n", xc, "\n", yc, "\n", zc, "\n", special_c)
|
nilq/baby-python
|
python
|
import peewee as pw
from core.model.base import BaseModel
from playhouse.shortcuts import model_to_dict
class Activity(BaseModel):
name = pw.CharField(null=False)
url_image = pw.CharField(null=False)
def to_dict(self, recurse=False, backrefs=False):
return model_to_dict(self, recurse=recurse, backrefs=backrefs, exclude=[Activity.created_at, Activity.updated_at])
class Meta:
db_table = "activities"
|
nilq/baby-python
|
python
|
# [M / F] while not strip upper
sexo = str(input('Digite seu sexo: [M/F] ')) .strip().upper()[0]
while sexo not in 'MmFf':
sexo = str(input('Dados inválidos. Por favor, informe corretamente: ')).strip().upper()[0]
print(sexo)
|
nilq/baby-python
|
python
|
import json
import jsonschema
import os
import re
from urllib.request import urlopen, Request
show_descriptions = True # If False, don't include 'name' as the description of 'licenseId'
repo = 'https://github.com/spdx/license-list-data/tree/master/json'
files = ['licenses.json', 'exceptions.json']
outfile = 'spdx-license-enums'
"""
Fetch current SPDX license list from repo
"""
def github_contents(web_url):
"""
Convert a GitHub repo web page URL to the corresponding API directory URL
:param web_url: https://github.com/spdx/license-list-data/tree/master/json
:return: dir_url: https://api.github.com/repos/spdx/license-list-data/contents/json
"""
m = re.match(r'^(https://)(github.com/)(.*?)/tree/master/(.*)$', web_url)
if m:
return m.group(1) + 'api.' + m.group(2) + 'repos/' + m.group(3) + '/contents/' + m.group(4)
data = {}
auth = {'Authorization': 'token ' + os.environ['GitHubToken']}
with urlopen(Request(github_contents(repo), headers=auth)) as d:
dir = json.loads(d.read().decode())
for n, f in enumerate(dir):
if f['name'] in files:
with urlopen(Request(f['download_url'], headers=auth)) as file:
data[os.path.splitext(f['name'])[0]] = json.loads(file.read().decode())
"""
Validate license list files
"""
llversion = data['licenses']['licenseListVersion']
print(f'License List Version {llversion}, {data["licenses"]["releaseDate"]}')
assert llversion == data['exceptions']['licenseListVersion']
with open('license_list_source.json') as f:
jschema = json.load(f)
jsonschema.Draft7Validator(jschema).validate({'licenselist': data['licenses']})
jsonschema.Draft7Validator(jschema).validate({'exceptionlist': data['exceptions']})
"""
Generate license and exception enumerations
"""
def item(license, le, desc=True):
id = {'l': 'licenseId', 'e': 'licenseExceptionId'}
return [int(license['referenceNumber']), license[id[le]], license['name'].strip() if desc else '']
license_items = [item(k, 'l', show_descriptions) for k in data['licenses']['licenses']]
exception_items = [item(k, 'e', show_descriptions) for k in data['exceptions']['exceptions']]
le_schema = {
'meta': {
'module': 'http://spdx.org/license-list/v3.0',
'patch': llversion,
'description': f'SPDX License List Enumerations, Version {llversion}, Released {data["licenses"]["releaseDate"]}',
'exports': ["LicenseList", "ExceptionList"],
'config': {'$MaxElements': 1000} # Default is 100, 2020-07-21 license list has 441
},
'types': [
['LicenseList', 'Enumerated', [], '', license_items],
['ExceptionList', 'Enumerated', [], '', exception_items]
]
}
print(f'{len(license_items)} licenses, {len(exception_items)} exceptions')
fname = os.path.join('data', f'{outfile}-{llversion}.jadn')
with open(fname, 'w') as f:
json.dump(le_schema, f)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains implementation for type editor
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
import logging
from functools import partial
from Qt.QtCore import *
from Qt.QtWidgets import *
import tpDcc as tp
from tpDcc.libs.qt.core import base
from tpDcc.libs.qt.widgets import grid
import artellapipe
from artellapipe.tools.tagger.widgets import taggereditor
LOGGER = logging.getLogger()
class TypeEditor(taggereditor.TaggerEditor, object):
EDITOR_TYPE = 'Type'
def __init__(self, project, parent=None):
super(TypeEditor, self).__init__(project=project, parent=parent)
def ui(self):
super(TypeEditor, self).ui()
self._type_grid = grid.GridWidget()
self._type_grid.setShowGrid(False)
self._type_grid.setColumnCount(4)
self._type_grid.horizontalHeader().hide()
self._type_grid.verticalHeader().hide()
self._type_grid.resizeRowsToContents()
self._type_grid.resizeColumnsToContents()
self.main_layout.addWidget(self._type_grid)
def initialize(self):
self._type_grid.clear()
tag_types = artellapipe.TagsMgr().get_tag_types()
if not tag_types:
LOGGER.warning('No Tag Types defined in current project!')
return
for tag_type in tag_types:
tag_widget = TaggerTypeWidget(type_title=tag_type)
tag_widget._btn.toggled.connect(partial(self.update_data, tag_widget.get_name()))
self._type_grid.add_widget_first_empty_cell(tag_widget)
def reset(self):
"""
Function that resets all editor information
"""
try:
self._type_grid.blockSignals(True)
finally:
self._type_grid.blockSignals(False)
def update_tag_buttons_state(self, sel=None):
"""
Updates the type tag attribute of the tag data node
:param name: str, name of the type tag to add/remove
"""
tag_data_node = artellapipe.TagsMgr().get_tag_data_node_from_current_selection(sel)
if tag_data_node is None:
return
self.set_tag_widgets_state(False)
attr_exists = tp.Dcc.attribute_exists(node=tag_data_node, attribute_name='types')
if attr_exists:
types = tp.Dcc.get_attribute_value(node=tag_data_node, attribute_name='types')
if types is not None and types != '':
types = types.split()
for t in types:
for i in range(self._type_grid.columnCount()):
for j in range(self._type_grid.rowCount()):
container_w = self._type_grid.cellWidget(j, i)
if container_w is not None:
tag_w = container_w.containedWidget
tag_name = tag_w.get_name()
if tag_name == t:
tag_w._btn.blockSignals(True)
tag_w._btn.setChecked(True)
tag_w._btn.blockSignals(False)
def fill_tag_node(self, tag_data_node, *args, **kwargs):
"""
Fills given tag node with the data managed by this editor
:param tag_data_node: str
"""
attr_exists = tp.Dcc.attribute_exists(node=tag_data_node, attribute_name='types')
if not attr_exists:
tp.Dcc.add_string_attribute(node=tag_data_node, attribute_name='types')
data = kwargs.get('data', None)
types = tp.Dcc.get_attribute_value(node=tag_data_node, attribute_name='types')
if args and args[0]:
if types is None or types == '':
types = data
else:
types_split = types.split()
if data in types_split:
return
types_split.append(data)
types = ''.join(str(e) + ' ' for e in types_split)
tp.Dcc.unlock_attribute(node=tag_data_node, attribute_name='types')
tp.Dcc.set_string_attribute_value(node=tag_data_node, attribute_name='types', attribute_value=types)
tp.Dcc.lock_attribute(node=tag_data_node, attribute_name='types')
else:
if types is None or types == '':
return
types_split = types.split()
if data in types_split:
types_split.remove(data)
else:
return
types = ''.join(str(e) + ' ' for e in types_split)
tp.Dcc.unlock_attribute(node=tag_data_node, attribute_name='types')
tp.Dcc.set_string_attribute_value(node=tag_data_node, attribute_name='types', attribute_value=types)
tp.Dcc.lock_attribute(node=tag_data_node, attribute_name='types')
def set_tag_widgets_state(self, state=False):
"""
Disables/Enables all tag buttons on the grid layout
:param state: bool
"""
for i in range(self._type_grid.columnCount()):
for j in range(self._type_grid.rowCount()):
container_w = self._type_grid.cellWidget(j, i)
if container_w is not None:
tag_w = container_w.containedWidget
tag_w._btn.blockSignals(True)
tag_w._btn.setChecked(state)
tag_w._btn.blockSignals(False)
class TaggerTypeWidget(base.BaseWidget, object):
def __init__(self, type_title, parent=None):
self._type_title_name = type_title
self._type_name = type_title.replace(' ', '_').lower()
super(TaggerTypeWidget, self).__init__(parent=parent)
def ui(self):
super(TaggerTypeWidget, self).ui()
self._btn = QPushButton(self._type_title_name)
self._btn.setCheckable(True)
self.main_layout.addWidget(self._btn)
type_lbl = QLabel(self._type_title_name)
type_lbl.setAlignment(Qt.AlignCenter)
# main_layout.addWidget(type_lbl)
def get_name(self):
"""
Returns type name of the tagger widget
:return: str
"""
return self._type_name
|
nilq/baby-python
|
python
|
import random
def n_list(n):
nl = [] #int list to be returned
#creating a list of integers from 1 to n
for i in xrange(1,n+1):
nl.extend([i])
#shuffle the list of integers into random order
#to the best ability of python prng
while n > 1:
choice = int(random.random()*n)
pick = nl.pop(choice)
nl.extend([pick])
n -= 1
return nl
|
nilq/baby-python
|
python
|
"""
--- Day 18: Operation Order ---
As you look out the window and notice a heavily-forested continent slowly appear over the horizon, you are interrupted by the child sitting next to you. They're curious if you could help them with their math homework.
Unfortunately, it seems like this "math" follows different rules than you remember.
The homework (your puzzle input) consists of a series of expressions that consist of addition (+), multiplication (*), and parentheses ((...)). Just like normal math, parentheses indicate that the expression inside must be evaluated before it can be used by the surrounding expression. Addition still finds the sum of the numbers on both sides of the operator, and multiplication still finds the product.
However, the rules of operator precedence have changed. Rather than evaluating multiplication before addition, the operators have the same precedence, and are evaluated left-to-right regardless of the order in which they appear.
For example, the steps to evaluate the expression 1 + 2 * 3 + 4 * 5 + 6 are as follows:
1 + 2 * 3 + 4 * 5 + 6
3 * 3 + 4 * 5 + 6
9 + 4 * 5 + 6
13 * 5 + 6
65 + 6
71
Parentheses can override this order; for example, here is what happens if parentheses are added to form 1 + (2 * 3) + (4 * (5 + 6)):
1 + (2 * 3) + (4 * (5 + 6))
1 + 6 + (4 * (5 + 6))
7 + (4 * (5 + 6))
7 + (4 * 11 )
7 + 44
51
Here are a few more examples:
2 * 3 + (4 * 5) becomes 26.
5 + (8 * 3 + 9 + 3 * 4 * 3) becomes 437.
5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4)) becomes 12240.
((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2 becomes 13632.
Before you can help with the homework, you need to understand it yourself. Evaluate the expression on each line of the homework; what is the sum of the resulting values?
"""
import re
f = open("challenges\data\day18data.txt", "r")
def processData(file):
data = []
for x in f:
x=x.strip().replace('\n', '')
data.append(x)
return data
def evalEquationString(equation):
operations = []
stack = []
def apply_top_op():
if operations[-1] == '+':
stack[-2:] = [stack[-2] + stack[-1]]
elif operations[-1] == '*':
stack[-2:] = [stack[-2] * stack[-1]]
else:
raise Exception(f"Bad state; opstack {operations} stack {stack}")
operations.pop()
for m in re.finditer(r'([()])|(\d+)|([+*])', equation):
if m.group(1) == '(':
operations.append('(')
elif m.group(1) == ')':
while operations[-1] != '(':
apply_top_op()
operations.pop()
elif m.group(2):
stack.append(int(m.group(2)))
else:
while operations and operations[-1] != '(':
apply_top_op()
operations.append(m.group(3))
while operations:
apply_top_op()
assert len(stack) == 1, f"operations {operations} stack {stack}"
return stack[0]
def sumResultingValues(arr, evalMethod):
count = 0
for equationString in arr:
count += evalMethod(equationString)
return count
"""
--- Part Two ---
You manage to answer the child's questions and they finish part 1 of their homework, but get stuck when they reach the next section: advanced math.
Now, addition and multiplication have different precedence levels, but they're not the ones you're familiar with. Instead, addition is evaluated before multiplication.
For example, the steps to evaluate the expression 1 + 2 * 3 + 4 * 5 + 6 are now as follows:
1 + 2 * 3 + 4 * 5 + 6
3 * 3 + 4 * 5 + 6
3 * 7 * 5 + 6
3 * 7 * 11
21 * 11
231
Here are the other examples from above:
1 + (2 * 3) + (4 * (5 + 6)) still becomes 51.
2 * 3 + (4 * 5) becomes 46.
5 + (8 * 3 + 9 + 3 * 4 * 3) becomes 1445.
5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4)) becomes 669060.
((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2 becomes 23340.
What do you get if you add up the results of evaluating the homework problems using these new rules?
"""
def evalAdvEquationString(equation):
operations = []
stack = []
def apply_top_op():
if operations[-1] == '+':
stack[-2:] = [stack[-2] + stack[-1]]
elif operations[-1] == '*':
stack[-2:] = [stack[-2] * stack[-1]]
else:
raise Exception(f"Bad state; opstack {operations} stack {stack}")
operations.pop()
for m in re.finditer(r'([()])|(\d+)|([+*])', equation):
if m.group(1) == '(':
operations.append('(')
elif m.group(1) == ')':
while operations[-1] != '(':
apply_top_op()
operations.pop()
elif m.group(2):
stack.append(int(m.group(2)))
else:
while operations and m.group(3) == '*' and operations[-1] == '+':
apply_top_op()
operations.append(m.group(3))
while operations:
apply_top_op()
assert len(stack) == 1, f"operations {operations} stack {stack}"
return stack[0]
data = processData(f)
print(sumResultingValues(data, evalEquationString))
print(sumResultingValues(data, evalAdvEquationString))
|
nilq/baby-python
|
python
|
"""Exceptions raised by the s3control service."""
from moto.core.exceptions import RESTError
ERROR_WITH_ACCESS_POINT_NAME = """{% extends 'wrapped_single_error' %}
{% block extra %}<AccessPointName>{{ name }}</AccessPointName>{% endblock %}
"""
ERROR_WITH_ACCESS_POINT_POLICY = """{% extends 'wrapped_single_error' %}
{% block extra %}<AccessPointName>{{ name }}</AccessPointName>{% endblock %}
"""
class S3ControlError(RESTError):
def __init__(self, *args, **kwargs):
kwargs.setdefault("template", "single_error")
super().__init__(*args, **kwargs)
class AccessPointNotFound(S3ControlError):
code = 404
def __init__(self, name, **kwargs):
kwargs.setdefault("template", "ap_not_found")
kwargs["name"] = name
self.templates["ap_not_found"] = ERROR_WITH_ACCESS_POINT_NAME
super().__init__(
"NoSuchAccessPoint", "The specified accesspoint does not exist", **kwargs
)
class AccessPointPolicyNotFound(S3ControlError):
code = 404
def __init__(self, name, **kwargs):
kwargs.setdefault("template", "apf_not_found")
kwargs["name"] = name
self.templates["apf_not_found"] = ERROR_WITH_ACCESS_POINT_POLICY
super().__init__(
"NoSuchAccessPointPolicy",
"The specified accesspoint policy does not exist",
**kwargs
)
|
nilq/baby-python
|
python
|
# encoding: utf-8
from __future__ import unicode_literals
import os
from django.test import TestCase
from data_importer.core.descriptor import ReadDescriptor
from data_importer.core.descriptor import InvalidDescriptor
from data_importer.core.descriptor import InvalidModel
from data_importer.importers.base import BaseImporter
BASEDIR = os.path.dirname(__file__)
JSON_FILE = os.path.abspath(os.path.join(BASEDIR, 'data/test_json_descriptor.json'))
class ReadDescriptorTestCase(TestCase):
def setUp(self):
self.descriptor = ReadDescriptor(file_name=JSON_FILE, model_name='Contact')
def test_readed_file(self):
self.assertTrue(self.descriptor.source)
def test_get_fields(self):
self.assertEquals(self.descriptor.get_fields(), ["name", "year", "last"])
def test_invalid_model(self):
descriptor = ReadDescriptor(file_name=JSON_FILE, model_name='TestInvalidModel')
self.assertRaises(InvalidModel, lambda: descriptor.get_model())
def test_invalid_file(self):
self.assertRaises(InvalidDescriptor, lambda: ReadDescriptor(file_name='invalid_file.er',
model_name='TestInvalidModel'))
class MyBaseImport(BaseImporter):
class Meta:
delimiter = ';'
ignore_first_line = True
descriptor = JSON_FILE
descriptor_model = "Contact"
def set_reader(self):
return
class TestDescriptionUsingBaseImporter(TestCase):
def setUp(self):
self.importer = MyBaseImport(source=None)
def test_get_fields(self):
self.assertEquals(self.importer.fields, ['name', 'year', 'last'])
|
nilq/baby-python
|
python
|
#---------------------------------------------
# Set up Trick executive parameters.
#---------------------------------------------
#instruments.echo_jobs.echo_jobs_on()
trick.exec_set_trap_sigfpe(True)
#trick.checkpoint_pre_init(1)
trick.checkpoint_post_init(1)
#trick.add_read(0.0 , '''trick.checkpoint('chkpnt_point')''')
# NOTE: You must set this to be the same as the master federate's frame for IMSim freezing
trick.exec_set_software_frame(0.25)
trick.exec_set_stack_trace(False)
# Trick config
trick.exec_set_enable_freeze(False)
trick.exec_set_freeze_command(False)
trick.sim_control_panel_set_enabled(False)
trick.exec_set_stack_trace(False)
#trick.exec_set_thread_amf_cycle_time( 1 , 0.250 )
#trick.exec_set_thread_process_type( 1 , trick.PROCESS_TYPE_AMF_CHILD )
run_duration = 15.0
#---------------------------------------------
# Set up data to record.
#---------------------------------------------
exec(open( "Log_data/log_sine_states.py" ).read())
log_sine_states( 'A', 0.250 )
log_sine_states( 'P', 0.250 )
#---------------------------------------------
# Set up the initial Sine states
#---------------------------------------------
exec(open( "Modified_data/sine_init.py" ).read())
#TODO: setup integration
#P.integ.option = Runge_Kutta_Fehlberg_45
#P.integ.first_step_deriv = Yes
# Example of a 1-dimensional dynamic array.
A.packing.buff_size = 10
A.packing.buff = trick.sim_services.alloc_type( A.packing.buff_size, 'unsigned char' )
P.packing.buff_size = 10
P.packing.buff = trick.sim_services.alloc_type( P.packing.buff_size, 'unsigned char' )
# We are taking advantage of the input file to specify a unique name for the
# sim-data name field for the P-side federate.
A.sim_data.name = 'A.name.P-side'
P.sim_data.name = 'P.name.P-side'
# We are taking advantage of the input file to specify a unique name and
# message for the P-side federate interaction handler.
A.interaction_handler.name = 'P-side: A.interaction_hdlr.name'
P.interaction_handler.name = 'P-side: P.interaction_hdlr.name'
A.interaction_handler.message = 'P-side: A.interaction_hdlr.message'
P.interaction_handler.message = 'P-side: P.interaction_hdlr.message'
# =========================================================================
# Set up HLA interoperability.
# =========================================================================
# Show or hide the TrickHLA debug messages.
# Use Level-3 to show the ownership transfer debug messages.
THLA.federate.debug_level = trick.DEBUG_LEVEL_6_TRACE
# Configure the CRC.
# Pitch specific local settings designator:
THLA.federate.local_settings = 'crcHost = localhost\n crcPort = 8989'
# Mak specific local settings designator, which is anything from the rid.mtl file:
#THLA.federate.local_settings = '(setqb RTI_tcpForwarderAddr \'192.168.15.3\') (setqb RTI_distributedForwarderPort 5000)'
THLA.federate.lookahead_time = 0.250
# Configure the federate.
THLA.federate.name = 'P-side-Federate'
THLA.federate.FOM_modules = 'FOMs/S_FOMfile.xml,FOMs/TrickHLAFreezeInteraction.xml'
THLA.federate.federation_name = 'SineWaveSim'
THLA.federate.time_regulating = True
THLA.federate.time_constrained = True
# Configure ExecutionControl.
# Set the multiphase initialization synchronization points.
THLA.execution_control.multiphase_init_sync_points = 'Phase1, Phase2'
# Set the simulation timeline to be used for time computations.
THLA.execution_control.sim_timeline = THLA_INIT.sim_timeline
# Set the scenario timeline to be used for configuring federation freeze times.
THLA.execution_control.scenario_timeline = THLA_INIT.scenario_timeline
# The list of Federates known to be in our Federation. The simulation will
# wait for all Federates marked as required to join the Federation before
# continuing on.
THLA.federate.enable_known_feds = True
THLA.federate.known_feds_count = 2
THLA.federate.known_feds = trick.sim_services.alloc_type( THLA.federate.known_feds_count, 'TrickHLA::KnownFederate' )
THLA.federate.known_feds[0].name = 'A-side-Federate'
THLA.federate.known_feds[0].required = True
THLA.federate.known_feds[1].name = 'P-side-Federate'
THLA.federate.known_feds[1].required = True
#---------------------------------------------
# Set up for simulation configuration.
#---------------------------------------------
THLA.simple_sim_config.owner = 'P-side-Federate'
THLA.simple_sim_config.run_duration = run_duration
# TrickHLA Interactions and Parameters.
THLA.manager.inter_count = 1
THLA.manager.interactions = trick.alloc_type( THLA.manager.inter_count, 'TrickHLA::Interaction' )
THLA.manager.interactions[0].FOM_name = 'Communication'
THLA.manager.interactions[0].publish = False
THLA.manager.interactions[0].subscribe = True
THLA.manager.interactions[0].handler = P.interaction_handler
THLA.manager.interactions[0].param_count = 3
THLA.manager.interactions[0].parameters = trick.sim_services.alloc_type( THLA.manager.interactions[0].param_count, 'TrickHLA::Parameter' )
THLA.manager.interactions[0].parameters[0].FOM_name = 'Message'
THLA.manager.interactions[0].parameters[0].trick_name = 'P.interaction_handler.message'
THLA.manager.interactions[0].parameters[0].rti_encoding = trick.ENCODING_UNICODE_STRING
THLA.manager.interactions[0].parameters[1].FOM_name = 'time'
THLA.manager.interactions[0].parameters[1].trick_name = 'P.interaction_handler.time'
THLA.manager.interactions[0].parameters[1].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.interactions[0].parameters[2].FOM_name = 'year'
THLA.manager.interactions[0].parameters[2].trick_name = 'P.interaction_handler.year'
THLA.manager.interactions[0].parameters[2].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
# The Federate has two objects, it publishes one and subscribes to another.
THLA.manager.obj_count = 2
THLA.manager.objects = trick.sim_services.alloc_type( THLA.manager.obj_count, 'TrickHLA::Object' )
# Configure the object this federate subscribes to but will not create an
# HLA instance for.
THLA.manager.objects[0].FOM_name = 'Test'
THLA.manager.objects[0].name = 'A-side-Federate.Test'
THLA.manager.objects[0].create_HLA_instance = False
THLA.manager.objects[0].packing = A.packing
THLA.manager.objects[0].deleted = A.obj_deleted_callback
THLA.manager.objects[0].attr_count = 8
THLA.manager.objects[0].attributes = trick.sim_services.alloc_type( THLA.manager.objects[0].attr_count, 'TrickHLA::Attribute' )
THLA.manager.objects[0].attributes[0].FOM_name = 'Time'
THLA.manager.objects[0].attributes[0].trick_name = 'A.sim_data.time'
THLA.manager.objects[0].attributes[0].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[0].publish = True
THLA.manager.objects[0].attributes[0].subscribe = True
THLA.manager.objects[0].attributes[0].locally_owned = False
THLA.manager.objects[0].attributes[0].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[1].FOM_name = 'Value'
THLA.manager.objects[0].attributes[1].trick_name = 'A.sim_data.value'
THLA.manager.objects[0].attributes[1].config = trick.CONFIG_INITIALIZE + trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[1].publish = True
THLA.manager.objects[0].attributes[1].subscribe = True
THLA.manager.objects[0].attributes[1].locally_owned = False
THLA.manager.objects[0].attributes[1].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[2].FOM_name = 'dvdt'
THLA.manager.objects[0].attributes[2].trick_name = 'A.sim_data.dvdt'
THLA.manager.objects[0].attributes[2].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[2].publish = True
THLA.manager.objects[0].attributes[2].subscribe = True
THLA.manager.objects[0].attributes[2].locally_owned = False
THLA.manager.objects[0].attributes[2].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[3].FOM_name = 'Phase'
THLA.manager.objects[0].attributes[3].trick_name = 'A.packing.phase_deg' # using packed data instead of 'A.sim_data.phase'
THLA.manager.objects[0].attributes[3].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[3].publish = True
THLA.manager.objects[0].attributes[3].subscribe = True
THLA.manager.objects[0].attributes[3].locally_owned = False
THLA.manager.objects[0].attributes[3].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[4].FOM_name = 'Frequency'
THLA.manager.objects[0].attributes[4].trick_name = 'A.sim_data.freq'
THLA.manager.objects[0].attributes[4].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[4].publish = True
THLA.manager.objects[0].attributes[4].subscribe = True
THLA.manager.objects[0].attributes[4].locally_owned = False
THLA.manager.objects[0].attributes[4].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[5].FOM_name = 'Amplitude'
THLA.manager.objects[0].attributes[5].trick_name = 'A.sim_data.amp'
THLA.manager.objects[0].attributes[5].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[5].publish = True
THLA.manager.objects[0].attributes[5].subscribe = True
THLA.manager.objects[0].attributes[5].locally_owned = False
THLA.manager.objects[0].attributes[5].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[6].FOM_name = 'Tolerance'
THLA.manager.objects[0].attributes[6].trick_name = 'A.sim_data.tol'
THLA.manager.objects[0].attributes[6].config = trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[6].publish = True
THLA.manager.objects[0].attributes[6].subscribe = True
THLA.manager.objects[0].attributes[6].locally_owned = False
THLA.manager.objects[0].attributes[6].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[0].attributes[7].FOM_name = 'Name'
THLA.manager.objects[0].attributes[7].trick_name = 'A.sim_data.name'
THLA.manager.objects[0].attributes[7].config = trick.CONFIG_INITIALIZE + trick.CONFIG_CYCLIC
THLA.manager.objects[0].attributes[7].publish = True
THLA.manager.objects[0].attributes[7].subscribe = True
THLA.manager.objects[0].attributes[7].locally_owned = False
THLA.manager.objects[0].attributes[7].rti_encoding = trick.ENCODING_UNICODE_STRING
# Configure the object this federate will create an HLA instance and
# publish data for.
THLA.manager.objects[1].FOM_name = 'Test'
THLA.manager.objects[1].name = 'P-side-Federate.Test'
THLA.manager.objects[1].create_HLA_instance = True
THLA.manager.objects[1].packing = P.packing
THLA.manager.objects[1].deleted = P.obj_deleted_callback
THLA.manager.objects[1].attr_count = 8
THLA.manager.objects[1].attributes = trick.sim_services.alloc_type( THLA.manager.objects[1].attr_count, 'TrickHLA::Attribute' )
THLA.manager.objects[1].attributes[0].FOM_name = 'Time'
THLA.manager.objects[1].attributes[0].trick_name = 'P.sim_data.time'
THLA.manager.objects[1].attributes[0].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[0].publish = True
THLA.manager.objects[1].attributes[0].locally_owned = True
THLA.manager.objects[1].attributes[0].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[1].FOM_name = 'Value'
THLA.manager.objects[1].attributes[1].trick_name = 'P.sim_data.value'
THLA.manager.objects[1].attributes[1].config = trick.CONFIG_INITIALIZE + trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[1].publish = True
THLA.manager.objects[1].attributes[1].subscribe = True
THLA.manager.objects[1].attributes[1].locally_owned = True
THLA.manager.objects[1].attributes[1].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[2].FOM_name = 'dvdt'
THLA.manager.objects[1].attributes[2].trick_name = 'P.sim_data.dvdt'
THLA.manager.objects[1].attributes[2].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[2].publish = True
THLA.manager.objects[1].attributes[2].locally_owned = True
THLA.manager.objects[1].attributes[2].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[3].FOM_name = 'Phase'
THLA.manager.objects[1].attributes[3].trick_name = 'P.packing.phase_deg' # using packed data instead of 'P.sim_data.phase'
THLA.manager.objects[1].attributes[3].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[3].publish = True
THLA.manager.objects[1].attributes[3].locally_owned = True
THLA.manager.objects[1].attributes[3].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[4].FOM_name = 'Frequency'
THLA.manager.objects[1].attributes[4].trick_name = 'P.sim_data.freq'
THLA.manager.objects[1].attributes[4].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[4].publish = True
THLA.manager.objects[1].attributes[4].locally_owned = True
THLA.manager.objects[1].attributes[4].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[5].FOM_name = 'Amplitude'
THLA.manager.objects[1].attributes[5].trick_name = 'P.sim_data.amp'
THLA.manager.objects[1].attributes[5].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[5].publish = True
THLA.manager.objects[1].attributes[5].locally_owned = True
THLA.manager.objects[1].attributes[5].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[6].FOM_name = 'Tolerance'
THLA.manager.objects[1].attributes[6].trick_name = 'P.sim_data.tol'
THLA.manager.objects[1].attributes[6].config = trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[6].publish = True
THLA.manager.objects[1].attributes[6].locally_owned = True
THLA.manager.objects[1].attributes[6].rti_encoding = trick.ENCODING_LITTLE_ENDIAN
THLA.manager.objects[1].attributes[7].FOM_name = 'Name'
THLA.manager.objects[1].attributes[7].trick_name = 'P.sim_data.name'
THLA.manager.objects[1].attributes[7].config = trick.CONFIG_INITIALIZE + trick.CONFIG_CYCLIC
THLA.manager.objects[1].attributes[7].publish = True
THLA.manager.objects[1].attributes[7].locally_owned = True
THLA.manager.objects[1].attributes[7].rti_encoding = trick.ENCODING_UNICODE_STRING
#---------------------------------------------
# Set up simulation termination time.
#---------------------------------------------
trick.sim_services.exec_set_terminate_time( run_duration )
|
nilq/baby-python
|
python
|
import hglib
import os
__all__ = ['HGState']
class HGState(object):
def __init__(self, path):
self.client, self.hg_root_path = self.find_hg_root(path)
def find_hg_root(self, path):
input_path = path
found_root = False
while not found_root:
try:
client = hglib.open(path)
found_root = True
except hglib.error.ServerError:
ppath = os.path.abspath(os.path.join(path, os.pardir))
if ppath == path:
raise ValueError('No hg repo at %s' % input_path)
path = ppath
return client, os.path.abspath(path)
@property
def has_addmodr(self):
return '+' in self.client.identify()
@property
def has_untracked(self):
return len(self.client.status()) > 0
@property
def is_clean(self):
return not self.has_untracked and not self.has_addmodr
@property
def id_str(self):
return '-'.join([self.client.branch()] + self.client.identify().split())
def get_state(self):
return {'version': self.client.identify(),
'status':self.client.status(),
'branch':self.client.branch()}
|
nilq/baby-python
|
python
|
def isPalindrome(word):
for i in range(len(word)//2):
if word[i]!=word[-(i+1)]:
return False
return True
for t in range(10):
N=int(input())
L=[]
for i in range(8):
L.append(input())
ans=0
for i in range(8):
for j in range(9-N):
if isPalindrome(L[i][j:j+N])==True:
ans+=1
for j in range(8):
for i in range(9-N):
if isPalindrome([a[j] for a in L[i:i+N]])==True:
ans+=1
print(f"#{t+1} {ans}")
|
nilq/baby-python
|
python
|
"""
pyStatic_problem
"""
# =============================================================================
# Imports
# =============================================================================
import warnings
import os
import numpy as np
from collections import OrderedDict
import time
from .base import TACSProblem
import tacs.TACS
class StaticProblem(TACSProblem):
def __init__(self, name, assembler, comm, outputViewer=None, meshLoader=None, options={}):
"""
The main purpose of this class is to represent all relevant
information for a static analysis. This will include
information defining the loading condition as well as various
other pieces of information.
Parameters
----------
name : str
Name of this tacs problem
assembler : assembler
Cython object responsible for creating and setting tacs objects used to solve problem
comm : MPI Intracomm
The comm object on which to create the pyTACS object.
outputViewer : TACSToFH5 object
Cython object used to write out f5 files that can be converted and used for postprocessing.
meshLoader : pyMeshLoader object
pyMeshLoader object used to create the assembler.
options : dict
Dictionary holding problem-specific option parameters.
"""
# python object name
self.objectName = 'StaticProblem'
# Problem name
self.name = name
# Defualt setup for common problem class objects
super().__init__(assembler, comm, outputViewer, meshLoader)
# Default Option List
defOpts = {
'outputdir': [str, './'],
# Solution Options
'KSMSolver': [str, 'GMRES'],
'orderingType': [str, 'ND'],
'PCFillLevel': [int, 1000],
'PCFillRatio': [float, 20.0],
'subSpaceSize': [int, 10],
'nRestarts': [int, 15],
'flexible': [int, 1],
'L2Convergence': [float, 1e-12],
'L2ConvergenceRel': [float, 1e-12],
'useMonitor': [bool, False],
'monitorFrequency': [int, 10],
'resNormUB': [float, 1e20],
# Output Options
'writeSolution': [bool, True],
'numberSolutions': [bool, True],
'printTiming': [bool, False],
'printIterations': [bool, False],
}
# Process the default options which are added to self.options
# under the 'defaults' key. Make sure the key are lower case
self.options = {}
def_keys = defOpts.keys()
self.options['defaults'] = {}
for key in def_keys:
self.options['defaults'][key.lower()] = defOpts[key]
self.options[key.lower()] = defOpts[key]
# Set user-defined options
for key in options:
self.setOption(key, options[key])
# Linear solver factor flag
self._factorOnNext = True
# Create problem-specific variables
self._createVariables()
def _createVariables(self):
"""Internal to create the variable required by TACS"""
# Generic residual vector
self.res = self.assembler.createVec()
self.rhs = self.assembler.createVec()
# Dictionaries to hold adjoint/sens vectors for each evalFunc
self.adjointList = OrderedDict()
self.dIduList = OrderedDict()
self.dvSensList = OrderedDict()
self.xptSensList = OrderedDict()
# Temporary vector for adjoint solve
self.phi = self.assembler.createVec()
self.adjRHS = self.assembler.createVec()
# Load vector
self.F = self.assembler.createVec()
self.F_array = self.F.getArray()
# State variable vector
self.u = self.assembler.createVec()
self.u_array = self.u.getArray()
# Auxillary element object for applying tractions/pressure
self.auxElems = tacs.TACS.AuxElements()
self.callCounter = -1
# Norms
self.initNorm = 0.0
self.startNorm = 0.0
self.finalNorm = 0.0
opt = self.getOption
# Tangent Stiffness --- process the ordering option here:
tmp = opt('orderingType').lower()
if tmp == 'natural':
ordering = tacs.TACS.NATURAL_ORDER
elif tmp == 'nd':
ordering = tacs.TACS.ND_ORDER
elif tmp == 'rcm':
ordering = tacs.TACS.RCM_ORDER
elif tmp == 'tacs_amd':
ordering = tacs.TACS.TACS_AMD_ORDER
elif tmp == 'multicolor':
ordering = tacs.TACS.MULTICOLOR_ORDER
else:
raise self.TACSError("Unrecognized 'orderingType' option value: "
"'%s'. Valid values are: 'natural', 'nd', 'rcm', "
"'tacs_amd', or 'multicolor'." % tmp)
self.K = self.assembler.createSchurMat(ordering)
# Additional Vecs for updates
self.update = self.assembler.createVec()
# Setup PCScMat and KSM solver
self.alpha = 1.0
self.beta = 0.0
self.gamma = 0.0
self.assembler.assembleJacobian(self.alpha, self.beta, self.gamma, self.res, self.K)
reorderSchur = 1
self.PC = tacs.TACS.Pc(self.K, lev_fill=opt('PCFillLevel'),
ratio_fill=opt('PCFillRatio'), reorder=reorderSchur)
# Operator, fill level, fill ratio, msub, rtol, ataol
if opt('KSMSolver').upper() == 'GMRES':
self.KSM = tacs.TACS.KSM(
self.K, self.PC, opt('subSpaceSize'), opt('nRestarts'),
opt('flexible'))
# TODO: Fix this
# elif opt('KSMSolver').upper() == 'GCROT':
# self.KSM = tacs.TACS.GCROT(
# self.K, self.PC, opt('subSpaceSize'), opt('subSpaceSize'),
# opt('nRestarts'), opt('flexible'))
else:
raise self.TACSError("Unknown KSMSolver option. Valid options are "
"'GMRES' or 'GCROT'")
self.KSM.setTolerances(self.getOption('L2ConvergenceRel'),
self.getOption('L2Convergence'))
if opt('useMonitor'):
self.KSM.setMonitor(tacs.TACS.KSMPrintStdout(
opt('KSMSolver'), self.comm.rank, opt('monitorFrequency')))
def addFunction(self, funcName, funcHandle, compIDs=None, **kwargs):
"""
Generic function to add a function for TACS. It is intended to
be reasonably generic since the user supplies the actual
function handle to use. The following functions can be used:
KSFailure, KSTemperature, AverageTemperature, Compliance,
KSDisplacement, StructuralMass, HeatFlux.
Parameters
----------
funcName : str
The user-supplied name for the function. This will
typically be a string that is meanful to the user
funcHandle : tacs.functions
The fucntion handle to use for creation. This must come
from the functions module in tacs.
compIDs: list
List of compIDs to select. Alternative to selectCompIDs
arguments.
"""
success = super().addFunction(funcName, funcHandle, compIDs, **kwargs)
if success:
# Create additional tacs BVecs to hold adjoint and sens info
self.adjointList[funcName] = self.assembler.createVec()
self.dIduList[funcName] = self.assembler.createVec()
self.dvSensList[funcName] = self.assembler.createDesignVec()
self.xptSensList[funcName] = self.assembler.createNodeVec()
return success
def setDesignVars(self, x):
"""
Update the design variables used by tacs.
Parameters
----------
x : ndarray
The variables (typically from the optimizer) to set. It
looks for variable in the ``self.varName`` attribute.
"""
super().setDesignVars(x)
self._factorOnNext = True
def setNodes(self, coords):
"""
Set the mesh coordinates of the structure.
Parameters
----------
coords : ndarray
Structural coordinate in array of size (N * 3) where N is
the number of structural nodes on this processor.
"""
super().setNodes(coords)
self._factorOnNext = True
####### Load adding methods ########
def addLoadToComponents(self, compIDs, F, averageLoad=False):
""""
The function is used to add a *FIXED TOTAL LOAD* on one or more
components, defined by COMPIDs. The purpose of this routine is to add loads that
remain fixed throughout an optimization. An example would be an engine load.
This routine determines all the unqiue nodes in the FE model that are part of the
the requested components, then takes the total 'force' by F and divides by the
number of nodes. This average load is then applied to the nodes.
Parameters
----------
compIDs : list[int] or int
The components with added loads. Use pyTACS selectCompIDs method
to determine this.
F : Numpy 1d or 2d array length (varsPerNodes) or (numNodeIDs, varsPerNodes)
Vector(s) of 'force' to apply to each components. If only one force vector is provided,
force will be copied uniformly across all components.
averageLoad : bool
Flag to determine whether load should be split evenly across all components (True)
or copied and applied individually to each component (False). Defaults to False.
Notes
----------
The units of the entries of the 'force' vector F are not
necesarily physical forces and their interpretation depends
on the physics problem being solved and the dofs included
in the model.
A couple of examples of force vector components for common problem are listed below:
In Heat Conduction with varsPerNode = 1
F = [Qdot] # heat rate
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Qdot] # forces + heat rate
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Qdot] # forces + moments + heat rate
"""
self._addLoadToComponents(self.F, compIDs, F, averageLoad)
def addLoadToNodes(self, nodeIDs, F, nastranOrdering=False):
"""
The function is used to add a fixed point load of F to the
selected node IDs.
Parameters
----------
nodeIDs : list[int]
The nodes IDs with added loads.
F : Numpy 1d or 2d array length (varsPerNodes) or (numNodeIDs, varsPerNodes)
Array of force vectors, one for each node. If only one force vector is provided,
force will be copied uniformly across all nodes.
nastranOrdering : bool
Flag signaling whether nodeIDs are in TACS (default)
or NASTRAN (grid IDs in bdf file) ordering
Notes
----------
The units of the entries of the 'force' vector F are not
necesarily physical forces and their interpretation depends
on the physics problem being solved and the dofs included
in the model.
A couple of examples of force vector components for common problem are listed below:
In Heat Conduction with varsPerNode = 1
F = [Qdot] # heat rate
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Qdot] # forces + heat rate
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Qdot] # forces + moments + heat rate
"""
self._addLoadToNodes(self.F, nodeIDs, F, nastranOrdering)
def addLoadToRHS(self, Fapplied):
""""
The function is used to add a *FIXED TOTAL LOAD* directly to the
right hand side vector given the equation below:
K*u = f
Where:
K : Stiffness matrix for problem
u : State variables for problem
f : Right-hand side vector to add loads to
Parameters
----------
Fapplied : ndarray or BVec
Distributed array containing loads to applied to RHS of the problem.
"""
self._addLoadToRHS(self.F, Fapplied)
def addTractionToComponents(self, compIDs, tractions,
faceIndex=0):
"""
The function is used to add a *FIXED TOTAL TRACTION* on one or more
components, defined by COMPIDs. The purpose of this routine is
to add loads that remain fixed throughout an optimization.
Parameters
----------
compIDs : list[int] or int
The components with added loads. Use pyTACS selectCompIDs method
to determine this.
tractions : Numpy array length 1 or compIDs
Array of traction vectors for each components
faceIndex : int
Indicates which face (side) of element to apply traction to.
Note: not required for certain elements (i.e. shells)
"""
self._addTractionToComponents(self.auxElems, compIDs, tractions, faceIndex)
def addTractionToElements(self, elemIDs, tractions,
faceIndex=0, nastranOrdering=False):
"""
The function is used to add a fixed traction to the
selected element IDs. Tractions can be specified on an
element by element basis (if tractions is a 2d array) or
set to a uniform value (if tractions is a 1d array)
Parameters
----------
elemIDs : list[int]
The global element ID numbers for which to apply the traction.
tractions : Numpy 1d or 2d array length varsPerNodes or (elemIDs, varsPerNodes)
Array of traction vectors for each element
faceIndex : int
Indicates which face (side) of element to apply traction to.
Note: not required for certain elements (i.e. shells)
nastranOrdering : bool
Flag signaling whether elemIDs are in TACS (default)
or NASTRAN ordering
"""
self._addTractionToElements(self.auxElems, elemIDs, tractions, faceIndex, nastranOrdering)
def addPressureToComponents(self, compIDs, pressures,
faceIndex=0):
"""
The function is used to add a *FIXED TOTAL PRESSURE* on one or more
components, defined by COMPIds. The purpose of this routine is
to add loads that remain fixed throughout an optimization. An example
would be a fuel load.
Parameters
----------
compIDs : list[int] or int
The components with added loads. Use pyTACS selectCompIDs method
to determine this.
pressures : Numpy array length 1 or compIDs
Array of pressure values for each components
faceIndex : int
Indicates which face (side) of element to apply pressure to.
Note: not required for certain elements (i.e. shells)
"""
self._addPressureToComponents(self.auxElems, compIDs, pressures, faceIndex)
def addPressureToElements(self, elemIDs, pressures,
faceIndex=0, nastranOrdering=False):
"""
The function is used to add a fixed presure to the
selected element IDs. Pressures can be specified on an
element by element basis (if pressures is an array) or
set to a uniform value (if pressures is a scalar)
Parameters
----------
elemIDs : list[int]
The global element ID numbers for which to apply the pressure.
pressures : Numpy array length 1 or elemIDs
Array of pressure values for each element
faceIndex : int
Indicates which face (side) of element to apply pressure to.
Note: not required for certain elements (i.e. shells)
nastranOrdering : bool
Flag signaling whether elemIDs are in TACS (default)
or NASTRAN ordering
"""
self._addPressureToElements(self.auxElems, elemIDs, pressures,
faceIndex, nastranOrdering)
####### Static solver methods ########
def _updateAssemblerVars(self):
"""
Make sure that the assembler is using
the input variables associated with this problem
"""
self.assembler.setDesignVars(self.x)
self.assembler.setNodes(self.Xpts)
self.assembler.setAuxElements(self.auxElems)
# Set state variables
self.assembler.setVariables(self.u)
# Zero any time derivitive terms
self.assembler.zeroDotVariables()
self.assembler.zeroDDotVariables()
def _initializeSolve(self):
"""
Initialze the solution of the structural system for the
loadCase. The stiffness matrix is assembled and factored.
"""
if self._factorOnNext:
self.assembler.assembleJacobian(self.alpha, self.beta, self.gamma, self.res, self.K)
self.PC.factor()
self._factorOnNext = False
def solve(self, Fext=None):
"""
Solution of the static problem for current load set. The
forces must already be set.
Parameters
----------
Optional Arguments:
Fext : ndarray or BVec
Distributed array containing additional loads (ex. aerodynamic forces for aerostructural coupling)
to applied to RHS of the static problem.
"""
startTime = time.time()
self.callCounter += 1
setupProblemTime = time.time()
# Set problem vars to assembler
self._updateAssemblerVars()
# Check if we need to initialize
self._initializeSolve()
initSolveTime = time.time()
# Compute the RHS
self.assembler.assembleRes(self.res)
# Add force terms from rhs
self.rhs.copyValues(self.F) # Fixed loads
# Add external loads, if specified
if Fext is not None:
if isinstance(Fext, tacs.TACS.Vec):
self.rhs.axpy(1.0, Fext)
elif isinstance(Fext, np.ndarray):
rhsArray = self.rhs.getArray()
rhsArray[:] = rhsArray[:] + Fext[:]
# Zero out bc terms in rhs
self.assembler.applyBCs(self.rhs)
# Add the -F
self.res.axpy(-1.0, self.rhs)
# Set initnorm as the norm of F
self.initNorm = np.real(self.F.norm())
# Starting Norm for this compuation
self.startNorm = np.real(self.res.norm())
initNormTime = time.time()
# Solve Linear System for the update
self.KSM.solve(self.res, self.update)
self.update.scale(-1.0)
solveTime = time.time()
# Update State Variables
self.assembler.getVariables(self.u)
self.u.axpy(1.0, self.update)
self.assembler.setVariables(self.u)
stateUpdateTime = time.time()
# Compute final FEA Norm
self.assembler.assembleRes(self.res)
self.res.axpy(-1.0, self.F) # Add the -F
self.finalNorm = np.real(self.res.norm())
finalNormTime = time.time()
# If timing was was requested print it, if the solution is nonlinear
# print this information automatically if prinititerations was requested.
if self.getOption('printTiming') or self.getOption('printIterations'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Solve Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Setup Time', setupProblemTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Solve Init Time', initSolveTime - setupProblemTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Init Norm Time', initNormTime - initSolveTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Solve Time', solveTime - initNormTime))
self.pp('| %-30s: %10.3f sec' % ('TACS State Update Time', stateUpdateTime - solveTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Final Norm Time', finalNormTime - stateUpdateTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Total Solution Time', finalNormTime - startTime))
self.pp('+--------------------------------------------------+')
return
####### Function eval/sensitivity methods ########
def evalFunctions(self, funcs, evalFuncs=None,
ignoreMissing=False):
"""
This is the main routine for returning useful information from
pytacs. The functions corresponding to the strings in
EVAL_FUNCS are evaluated and updated into the provided
dictionary.
Parameters
----------
funcs : dict
Dictionary into which the functions are saved.
evalFuncs : iterable object containing strings.
If not none, use these functions to evaluate.
ignoreMissing : bool
Flag to supress checking for a valid function. Please use
this option with caution.
Examples
--------
>>> funcs = {}
>>> staticProblem.solve()
>>> staticProblem.evalFunctions(funcs, ['mass'])
>>> funcs
>>> # Result will look like (if StaticProblem has name of 'c1'):
>>> # {'cl_mass':12354.10}
"""
startTime = time.time()
# Set problem vars to assembler
self._updateAssemblerVars()
if evalFuncs is None:
evalFuncs = sorted(list(self.functionList))
else:
evalFuncs = sorted(list(evalFuncs))
if not ignoreMissing:
for f in evalFuncs:
if not f in self.functionList:
raise Error("Supplied function '%s' has not been added "
"using addFunction()." % f)
setupProblemTime = time.time()
# Fast parallel function evaluation of structural funcs:
handles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
funcVals = self.assembler.evalFunctions(handles)
functionEvalTime = time.time()
# Assign function values to appropriate dictionary
i = 0
for f in evalFuncs:
if f in self.functionList:
key = self.name + '_%s' % f
funcs[key] = funcVals[i]
i += 1
dictAssignTime = time.time()
if self.getOption('printTiming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Function Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Function Setup Time', setupProblemTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Function Eval Time', functionEvalTime - setupProblemTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Dict Time', dictAssignTime - functionEvalTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Function Time', dictAssignTime - startTime))
self.pp('+--------------------------------------------------+')
def evalFunctionsSens(self, funcsSens, evalFuncs=None):
"""
This is the main routine for returning useful (sensitivity)
information from problem. The derivatives of the functions
corresponding to the strings in EVAL_FUNCS are evaluated and
updated into the provided dictionary.
Parameters
----------
funcsSens : dict
Dictionary into which the derivatives are saved.
evalFuncs : iterable object containing strings
The functions the user wants returned
Examples
--------
>>> funcsSens = {}
>>> staticProblem.evalFunctionsSens(funcsSens, ['mass'])
>>> funcs
>>> # Result will look like (if StaticProblem has name of 'c1'):
>>> # {'c1_mass':{'struct':[1.234, ..., 7.89]}
"""
startTime = time.time()
# Set problem vars to assembler
self._updateAssemblerVars()
if evalFuncs is None:
evalFuncs = sorted(list(self.functionList))
else:
evalFuncs = sorted(list(evalFuncs))
# Check that the functions are all ok.
# and prepare tacs vecs for adjoint procedure
dvSenses = []
xptSenses = []
dIdus = []
adjoints = []
for f in evalFuncs:
if f not in self.functionList:
raise Error("Supplied function has not beed added "
"using addFunction()")
else:
# Populate the lists with the tacs bvecs
# we'll need for each adjoint/sens calculation
dvSens = self.dvSensList[f]
dvSens.zeroEntries()
dvSenses.append(dvSens)
xptSens = self.xptSensList[f]
xptSens.zeroEntries()
xptSenses.append(xptSens)
dIdu = self.dIduList[f]
dIdu.zeroEntries()
dIdus.append(dIdu)
adjoint = self.adjointList[f]
adjoint.zeroEntries()
adjoints.append(adjoint)
setupProblemTime = time.time()
adjointStartTime = {}
adjointEndTime = {}
# Next we will solve all the adjoints
# Set adjoint rhs
self.addSVSens(evalFuncs, dIdus)
adjointRHSTime = time.time()
for i, f in enumerate(evalFuncs):
adjointStartTime[f] = time.time()
self.solveAdjoint(dIdus[i], adjoints[i])
adjointEndTime[f] = time.time()
adjointFinishedTime = time.time()
# Evaluate all the adoint res prooduct at the same time for
# efficiency:
self.addDVSens(evalFuncs, dvSenses)
self.addAdjointResProducts(adjoints, dvSenses)
self.addXptSens(evalFuncs, xptSenses)
self.addAdjointResXptSensProducts(adjoints, xptSenses)
# Recast sensititivities into dict for user
for i, f in enumerate(evalFuncs):
key = self.name + '_%s' % f
# Return sensitivities as array in sens dict
funcsSens[key] = {self.varName: dvSenses[i].getArray().copy(),
self.coordName: xptSenses[i].getArray().copy()}
totalSensitivityTime = time.time()
if self.getOption('printTiming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Adjoint Times:')
print('|')
print('| %-30s: %10.3f sec' % ('TACS Sens Setup Problem Time', setupProblemTime - startTime))
print('| %-30s: %10.3f sec' % (
'TACS Adjoint RHS Time', adjointRHSTime - setupProblemTime))
for f in evalFuncs:
print('| %-30s: %10.3f sec' % (
'TACS Adjoint Solve Time - %s' % (f), adjointEndTime[f] - adjointStartTime[f]))
print('| %-30s: %10.3f sec' % ('Total Sensitivity Time', totalSensitivityTime - adjointFinishedTime))
print('|')
print('| %-30s: %10.3f sec' % ('Complete Sensitivity Time', totalSensitivityTime - startTime))
print('+--------------------------------------------------+')
def addSVSens(self, evalFuncs, svSensList):
"""
Add the state variable partial sensitivity to the ADjoint RHS for given evalFuncs
Parameters
----------
evalFuncs : list[str]
The functions the user wants returned
svSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add partial sensitivity to
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Get list of TACS function handles from evalFuncs
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(svSensList[0], np.ndarray):
svSensBVecList = [self._arrayToVec(svSensArray) for svSensArray in svSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
svSensBVecList = svSensList
self.assembler.addSVSens(funcHandles, svSensBVecList, self.alpha, self.beta, self.gamma)
# Update from the BVec values, if the input was a numpy array
if isinstance(svSensList[0], np.ndarray):
for svSensArray, svSensBVec in zip(svSensList, svSensBVecList):
svSensArray[:] = svSensBVec.getArray()
def addDVSens(self, evalFuncs, dvSensList, scale=1.0):
"""
Add partial sensitivity contribution due to design vars for evalFuncs
Parameters
----------
evalFuncs : list[str]
The functions the user wants returned
dvSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add partial sensitivity to
scale : float
Scalar to multiply partial sensitivity by. Defaults to 1.0
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Get list of TACS function handles from evalFuncs
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(dvSensList[0], np.ndarray):
dvSensBVecList = [self._arrayToDesignVec(dvSensArray) for dvSensArray in dvSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
dvSensBVecList = dvSensList
self.assembler.addDVSens(funcHandles, dvSensBVecList, scale)
# Finalize sensitivity arrays across all procs
for dvSensBVec in dvSensBVecList:
dvSensBVec.beginSetValues()
dvSensBVec.endSetValues()
# Update the BVec values, if the input was a numpy array
if isinstance(dvSensList[0], np.ndarray):
for dvSensArray, dvSensBVec in zip(dvSensList, dvSensBVecList):
# Copy values to numpy array
dvSensArray[:] = dvSensBVec.getArray()
def addAdjointResProducts(self, adjointlist, dvSensList, scale=-1.0):
"""
Add the adjoint product contribution to the design variable sensitivity arrays
Parameters
----------
adjointlist : list[BVec] or list[ndarray]
List of adjoint vectors for residual sensitivity product
dvSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add product to
scale : float
Scalar to multiply product by. Defaults to -1.0
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(adjointlist[0], np.ndarray):
adjointBVeclist = [self._arrayToVec(adjointArray) for adjointArray in adjointlist]
# Otherwise the input is already a BVec and we can do the operation in place
else:
adjointBVeclist = adjointlist
# Make sure BC terms are zeroed out in adjoint
for adjoint in adjointBVeclist:
self.assembler.applyBCs(adjoint)
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(dvSensList[0], np.ndarray):
dvSensBVecList = [self._arrayToDesignVec(dvSensArray) for dvSensArray in dvSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
dvSensBVecList = dvSensList
self.assembler.addAdjointResProducts(adjointBVeclist, dvSensBVecList, scale)
# Finalize sensitivity arrays across all procs
for dvSensBVec in dvSensBVecList:
dvSensBVec.beginSetValues()
dvSensBVec.endSetValues()
# Update the BVec values, if the input was a numpy array
if isinstance(dvSensList[0], np.ndarray):
for dvSensArray, dvSensBVec in zip(dvSensList, dvSensBVecList):
# Copy values to numpy array
dvSensArray[:] = dvSensBVec.getArray()
def addXptSens(self, evalFuncs, xptSensList, scale=1.0):
"""
Add partial sensitivity contribution due to nodal coordinates for evalFuncs
Parameters
----------
evalFuncs : list[str]
The functions the user wants returned
xptSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add partial sensitivity to
scale : float
Scalar to multiply partial sensitivity by. Defaults to 1.0
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Get list of TACS function handles from evalFuncs
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(xptSensList[0], np.ndarray):
xptSensBVecList = [self._arrayToNodeVec(xptSensArray) for xptSensArray in xptSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
xptSensBVecList = xptSensList
self.assembler.addXptSens(funcHandles, xptSensBVecList, scale)
# Finalize sensitivity arrays across all procs
for xptSensBVec in xptSensBVecList:
xptSensBVec.beginSetValues()
xptSensBVec.endSetValues()
# Update from the BVec values, if the input was a numpy array
if isinstance(xptSensList[0], np.ndarray):
for xptSensArray, xptSensBVec in zip(xptSensList, xptSensBVecList):
# Copy values to numpy array
xptSensArray[:] = xptSensBVec.getArray()
def addAdjointResXptSensProducts(self, adjointlist, xptSensList, scale=-1.0):
"""
Add the adjoint product contribution to the nodal coordinates sensitivity arrays
Parameters
----------
adjointlist : list[BVec] or list[ndarray]
List of adjoint vectors for residual sensitivity product
xptSensList : list[BVec] or list[ndarray]
List of sensitivity vectors to add product to
scale : float
Scalar to multiply product by. Defaults to -1.0
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(adjointlist[0], np.ndarray):
adjointBVeclist = [self._arrayToVec(adjointArray) for adjointArray in adjointlist]
# Otherwise the input is already a BVec and we can do the operation in place
else:
adjointBVeclist = adjointlist
# Make sure BC terms are zeroed out in adjoint
for adjoint in adjointBVeclist:
self.assembler.applyBCs(adjoint)
# Create a tacs BVec copy for the operation if the output is a numpy array
if isinstance(xptSensList[0], np.ndarray):
xptSensBVecList = [self._arrayToNodeVec(xptSensArray) for xptSensArray in xptSensList]
# Otherwise the input is already a BVec and we can do the operation in place
else:
xptSensBVecList = xptSensList
self.assembler.addAdjointResXptSensProducts(adjointBVeclist, xptSensBVecList, scale)
# Finalize sensitivity arrays across all procs
for xptSensBVec in xptSensBVecList:
xptSensBVec.beginSetValues()
xptSensBVec.endSetValues()
if isinstance(xptSensList[0], np.ndarray):
for xptSensArray, xptSensBVec in zip(xptSensList, xptSensBVecList):
# Copy values to numpy array
xptSensArray[:] = xptSensBVec.getArray()
def getResidual(self, res, Fext=None):
"""
This routine is used to evaluate directly the structural
residual. Only typically used with aerostructural analysis.
Parameters
----------
res : TACS BVec or numpy array
If res is not None, place the residuals into this array.
Fext : TACS BVec or numpy array
Distributed array containing additional loads (ex. aerodynamic forces for aerostructural coupling)
to applied to RHS of the static problem.
"""
# Make sure assembler variables are up to date
self._updateAssemblerVars()
# Assemble residual
self.assembler.assembleRes(self.res)
# Add the -F
self.res.axpy(-1.0, self.F)
# Compute the RHS
self.assembler.assembleRes(self.res)
# Add force terms from rhs
self.rhs.copyValues(self.F) # Fixed loads
# Add external loads, if specified
if Fext is not None:
if isinstance(Fext, tacs.TACS.Vec):
self.rhs.axpy(1.0, Fext)
elif isinstance(Fext, np.ndarray):
rhsArray = self.rhs.getArray()
rhsArray[:] = rhsArray[:] + Fext[:]
# Zero out bc terms in rhs
self.assembler.applyBCs(self.rhs)
# Add the -F
self.res.axpy(-1.0, self.rhs)
# Output residual
if isinstance(res, tacs.TACS.Vec):
res.copyValues(self.res)
else:
res[:] = self.res.getArray()
def addTransposeJacVecProduct(self, phi, prod, scale=1.0):
"""
Adds product of transpose Jacobian and input vector into output vector as shown below:
prod += scale * J^T . phi
Parameters
----------
phi : TACS BVec or numpy array
Input vector to product with the transpose Jacobian.
prod : TACS BVec or numpy array
Output vector to add Jacobian product to.
scale : float
Scalar used to scale Jacobian product by.
"""
# Create a tacs bvec copy of the adjoint vector
if isinstance(phi, tacs.TACS.Vec):
self.phi.copyValues(phi)
elif isinstance(phi, np.ndarray):
self.phi.getArray()[:] = phi
# Tacs doesn't actually transpose the matrix here so keep track of
# RHS entries that TACS zeros out for BCs.
bcTerms = self.update
bcTerms.copyValues(self.phi)
self.assembler.applyBCs(self.phi)
bcTerms.axpy(-1.0, self.phi)
# Set problem vars to assembler
self._updateAssemblerVars()
self.K.mult(self.phi, self.res)
# Add bc terms back in
self.res.axpy(1.0, bcTerms)
# Output residual
if isinstance(prod, tacs.TACS.Vec):
prod.axpy(scale, self.res)
else:
prod[:] = prod + scale * self.res.getArray()
def zeroVariables(self):
"""
Zero all the tacs solution b-vecs
"""
self.res.zeroEntries()
self.u.zeroEntries()
self.assembler.setVariables(self.u)
self.update.zeroEntries()
def solveAdjoint(self, rhs, phi):
"""
Solve the structural adjoint.
Parameters
----------
rhs : TACS BVec or numpy array
right hand side vector for adjoint solve
phi : TACS BVec or numpy array
BVec or numpy array into which the adjoint is saved
"""
# Set problem vars to assembler
self._updateAssemblerVars()
# Check if we need to initialize
self._initializeSolve()
# Create a copy of the adjoint/rhs guess
if isinstance(phi, tacs.TACS.Vec):
self.phi.copyValues(phi)
elif isinstance(phi, np.ndarray):
self.phi.getArray()[:] = phi
if isinstance(rhs, tacs.TACS.Vec):
self.adjRHS.copyValues(rhs)
elif isinstance(rhs, np.ndarray):
self.adjRHS.getArray()[:] = rhs
# Tacs doesn't actually transpose the matrix here so keep track of
# RHS entries that TACS zeros out for BCs.
bcTerms = self.update
bcTerms.copyValues(self.adjRHS)
self.assembler.applyBCs(self.adjRHS)
bcTerms.axpy(-1.0, self.adjRHS)
# Solve Linear System
self.KSM.solve(self.adjRHS, self.phi)
self.assembler.applyBCs(self.phi)
# Add bc terms back in
self.phi.axpy(1.0, bcTerms)
# Copy output values back to user vectors
if isinstance(phi, tacs.TACS.Vec):
phi.copyValues(self.phi)
elif isinstance(phi, np.ndarray):
phi[:] = self.phi.getArray()
def getVariables(self, states=None):
"""
Return the current state values for the
problem
Parameters
----------
states : TACS BVec or numpy array
Vector to place current state variables into (optional)
Returns
----------
states : numpy array
current state vector
"""
if isinstance(states, tacs.TACS.Vec):
states.copyValues(self.u)
elif isinstance(states, np.ndarray):
states[:] = self.u_array[:]
return self.u_array.copy()
def setVariables(self, states):
"""
Set the structural states for current load case.
Parameters
----------
states : ndarray
Values to set. Must be the size of getNumVariables()
"""
# Copy array values
if isinstance(states, tacs.TACS.Vec):
self.u.copyValues(states)
elif isinstance(states, np.ndarray):
self.u_array[:] = states[:]
# Apply boundary conditions
self.assembler.applyBCs(self.u)
# Set states to assembler
self.assembler.setVariables(self.u)
def writeSolution(self, outputDir=None, baseName=None, number=None):
"""
This is a generic shell function that writes the output
file(s). The intent is that the user or calling program can
call this function and pyTACS writes all the files that the
user has defined. It is recommended that this function is used
along with the associated logical flags in the options to
determine the desired writing procedure
Parameters
----------
outputDir : str or None
Use the supplied output directory
baseName : str or None
Use this supplied string for the base filename. Typically
only used from an external solver.
number : int or None
Use the user spplied number to index solution. Again, only
typically used from an external solver
"""
# Make sure assembler variables are up to date
self._updateAssemblerVars()
# Check input
if outputDir is None:
outputDir = self.getOption('outputDir')
if baseName is None:
baseName = self.name
# If we are numbering solution, it saving the sequence of
# calls, add the call number
if number is not None:
# We need number based on the provided number:
baseName = baseName + '_%3.3d' % number
else:
# if number is none, i.e. standalone, but we need to
# number solutions, use internal counter
if self.getOption('numberSolutions'):
baseName = baseName + '_%3.3d' % self.callCounter
# Unless the writeSolution option is off write actual file:
if self.getOption('writeSolution'):
base = os.path.join(outputDir, baseName) + '.f5'
self.outputViewer.writeToFile(base)
|
nilq/baby-python
|
python
|
# A collection of functions for loading the esm2m perturbation experiments
import xarray as xr
from gfdl_utils.core import get_pathspp
def get_path(variable=None,
ppname=None,
override=False,
experiments=None,
timespan=None):
"""Returns a dictionary of paths relevant to the specified
experiments, variables, and timespans
Parameters
----------
variable : str
Name of variable, or None (for all variables)
ppname : str
Name of postprocess directory from
ocean_bling_tracers
ocean_bling_ocn_flux
bling_atm_flux
ocean_gat_dic
Note that if variable is specified exactly, ppname is not needed.
If variable is None or has a wildcard character, ppname is required.
override : bool
Get variables from the override experiment.
experiments : str
List of perturbation experiments from which to grab data.
If none, get for all experiments.
timespan : str
Specify the time string if a subset of years required.
Returns
-------
path : dict of str
paths : dict of list
Expanded paths
"""
config = 'MOM5_SIS_BLING_CORE2-gat'
if override:
config = config+'-override-po4'
pp = '/archive/Richard.Slater/Siena/siena_201308_rds-c3-gat-slurm/'+config+'/gfdl.ncrc3-intel16-prod-openmp/pp/'
out = 'ts'
local = 'monthly/10yr'
# Unless experiment is specified, set to all
if experiments is None:
experiments = ['','_gat','_zero','_double']
if type(experiments)==str: # Force into a list
experiments = [experiments]
# Unless variable is specified, set to all
if variable is None:
variable = '*'
# Unless timespan is specified, set to all
if timespan is None:
timespan = '*'
# If ppname is not specified, derive from variable
if ppname is None:
d = get_variable_dict()
if variable in d:
ppname = d[variable]
else:
raise NameError('If ppname is not specified, must give exact variable name.'+
' To specify wildcard variable, specify ppname.')
# Configure correct ppname
if ppname == 'ocean_bling_tracers':
ppname_pre = 'ocean_bling'
ppname_suf = '_tracers'
elif ppname in ['ocean_bling_ocn_flux','bling_atm_flux','ocean_gat_dic']:
ppname_pre = ppname
ppname_suf = ''
# ocean_gat_dic has no non-gat control
if (ppname == 'ocean_gat_dic') & ('' in experiments):
experiments.remove('')
path = {}
for e in experiments:
pathDict = {'pp':pp,
'ppname':ppname_pre+e+ppname_suf,
'out':out,
'local':local,
'time':timespan,
'add':variable}
path[e] = get_pathspp(**pathDict)
return path
def load_exps(variable=None,
ppname=None,
override=False,
experiments=None,
timespan=None,
verbose=False):
"""Returns a dictionary of datasets for each of the specified
experiments, variables, and timespans
Parameters
----------
variable : str
Name of variable, or None (for all variables)
ppname : str
Name of postprocess directory from
ocean_bling_tracers
ocean_bling_ocn_flux
bling_atm_flux
ocean_gat_dic
Note that if variable is specified exactly, ppname is not needed.
If variable is None or has a wildcard character, ppname is required.
override : bool
Get variables from the override experiment.
experiments : str
List of perturbation experiments from which to grab data.
If none, get for all experiments.
timespan : str
Specify the time string if a subset of years required.
verbose : bool
Print paths to page
Returns
-------
dd : dict
Dictionary of {xarray.Dataset}'s with each entry corresponding to
each experiment.
"""
paths = get_path(variable=variable,ppname=ppname,override=override,experiments=experiments,timespan=timespan)
dd = {}
for p,path in paths.items():
if verbose:
print(path)
dd[p] = xr.open_mfdataset(path)
if len(dd)==1:
dd=dd[experiments]
return dd
def load_grid(fromwork=True,z=None,z_i=None):
if fromwork:
# Load augmented grid saved to work
# See notebook save_grid.ipynp
gridpath = '/work/gam/projects/bio-pump-timescales/data/esm2m/raw/grid.nc'
grid = xr.open_dataset(gridpath)
else:
pp = '/archive/Richard.Slater/Siena/siena_201308_rds-c3-gat-slurm/MOM5_SIS_BLING_CORE2-gat/gfdl.ncrc3-intel16-prod-openmp/pp/'
gridpath = pp+'static.nc'
grid = xr.open_dataset(gridpath)
if z is not None:
grid['dz'] = (z_i.diff('st_edges_ocean')
.rename({'st_edges_ocean':'st_ocean'})
.assign_coords({'st_ocean':z}))
grid['volume_t'] = grid['area_t']*grid['dz']
return grid
def calc_anom(dd):
ddanom = {}
ddanom['zero'] = dd['_zero']-dd['_gat']
ddanom['double'] = dd['_double']-dd['_gat']
ddanom['noneq'] = dd['']-dd['_gat']
return ddanom
def get_variable_dict():
return {'alk':'ocean_bling_tracers',
'alpha':'ocean_bling_tracers',
'biomass_p':'ocean_bling_tracers',
'chl':'ocean_bling_tracers',
'co2_alpha':'ocean_bling_tracers',
'co3_ion':'ocean_bling_tracers',
'delta_csurf':'ocean_bling_tracers',
'delta_pco2':'ocean_bling_tracers',
'dic_area_integral':'ocean_bling_tracers',
'dic':'ocean_bling_tracers',
'dic_stf':'ocean_bling_tracers',
'dic_volume_integral':'ocean_bling_tracers',
'dop_area_integral':'ocean_bling_tracers',
'dop':'ocean_bling_tracers',
'dop_volume_integral':'ocean_bling_tracers',
'fed':'ocean_bling_tracers',
'fed_stf':'ocean_bling_tracers',
'htotal':'ocean_bling_tracers',
'integral_dic':'ocean_bling_tracers',
'integral_dic_stf':'ocean_bling_tracers',
'irr_mem':'ocean_bling_tracers',
'jdic_100':'ocean_bling_tracers',
'o2':'ocean_bling_tracers',
'pco2_surf':'ocean_bling_tracers',
'po4_area_integral':'ocean_bling_tracers',
'po4':'ocean_bling_tracers',
'po4_volume_integral':'ocean_bling_tracers',
'co2_flux_alpha_ocn':'ocean_bling_ocn_flux',
'co2_flux_cair_ice_ocn':'ocean_bling_ocn_flux',
'co2_flux_csurf_ocn':'ocean_bling_ocn_flux',
'co2_flux_flux_ice_ocn':'ocean_bling_ocn_flux',
'co2_flux_kw_ice_ocn':'ocean_bling_ocn_flux',
'co2_flux_schmidt_ocn':'ocean_bling_ocn_flux',
'o2_flux_alpha_ocn':'ocean_bling_ocn_flux',
'o2_flux_csurf_ocn':'ocean_bling_ocn_flux',
'o2_flux_flux_ice_ocn':'ocean_bling_ocn_flux',
'o2_flux_schmidt_ocn':'ocean_bling_ocn_flux',
'atm_gas_flux':'ocean_gat_dic',
'atm_gas_input':'ocean_gat_dic',
'atm_mol_wgt':'ocean_gat_dic',
'base_mix_ratio':'ocean_gat_dic',
'gas_mol_wgt':'ocean_gat_dic',
'mix_ratio':'ocean_gat_dic',
'total_atm_mass':'ocean_gat_dic',
'total_gas_mass':'ocean_gat_dic'}
def disp_variables():
return list(get_variable_dict().keys())
def add_override_suffix(directory,override):
if override:
directory = directory+'override-po4/'
else:
directory = directory+'no-override/'
return directory
|
nilq/baby-python
|
python
|
import imports.dataHandler as jdata
import imports.passwordToKey as keys
import imports.randomText as rand_text
import pyperclip as clipboard
import imports.CONSTS as CONSTS
import os
from cryptography.fernet import Fernet
from getpass import getpass
import json
protected = ["key", "state"]
MAIN_MENU = 0
RECORDS = 1
VERSION = "v1.0.1"
class passwordManager:
def __init__(self, pathDir):
while(True):
value = input("Enter username : ")
print("Enter password")
password = getpass()
key = keys.passwordToKey(value, password)
self.state = MAIN_MENU
self.database = ""
try:
self.pathName = pathDir + value
self.data = jdata.dataBase(pathDir + value, key)
self.key = key
self.user = value
self.checkUpdate()
break
# print(self.data.json)
# Do something with the file
except:
print("Wrong username or password, try again!")
self.main_menu()
def update_ver(self):
print("Initiating update...")
orecords = {}
for record in self.data.json:
if(not self.isProtected(record, False)):
orecords[record] = self.data.json[record]
ndata = {}
ndata["state"] = self.data.json["state"]
ndata["key"] = self.data.json["key"]
ndata["version"] = VERSION
ndata["orecords"] = orecords
self.data.json = ndata
os.rename(self.pathName + "_data" + CONSTS.SED,
self.pathName + "_data_orecords" + CONSTS.SED)
self.data.save()
print("update done...")
def loadFile(self, record_name):
key = self.data.json["key"].encode()
self.kdata = jdata.dataBase(
self.pathName + "_data_" + record_name, key)
def checkUpdate(self):
if("state" not in self.data.json):
self.confirmPass()
self.init()
else:
if "version" not in self.data.json:
self.update_ver()
# key = self.data.json["key"].encode()
# self.kdata = jdata.dataBase(self.pathName + "_data", key)
def confirm(self) -> bool:
value = input("confirm to procced! y/n : ")
if(value.lower() == "y" or value.lower() == "yes"):
print("Enter password to confirm :")
password = getpass()
key = keys.passwordToKey(self.user, password)
if(key == self.key):
return True
print("wrong password try again!")
return self.confirm()
return False
def isProtected(self, value, log=True) -> bool:
pr = False
for name in protected:
if(value == name):
pr = True
break
if(pr and log):
print("this name is protected, try different name")
return pr
def confirmPass(self):
while(True):
print("Enter password to confirm :")
password = getpass()
key = keys.passwordToKey(self.user, password)
if(key == self.key):
return True
print("wrong password try again!")
def getHelpRecords(self):
print("commands : ")
print("add example_unqiue_name example_username/-n example_password/random/-r")
print("load unqiue_name or load unique_name -n (copy username then password)")
print("remove or delete -n")
print("records : show all records")
print("back : Go back")
print("help")
def main_menu(self):
while(True):
if self.state == MAIN_MENU:
value = input("Enter command : ").split()
try:
if value[0] == "help":
print("Main menu commands:")
print("load -n, where n is the name of the db, load database.")
print("adddb -n, where n is the name of the new database.")
print("records - prints all databases.")
print("version - print version.")
print("quit - exist.")
print()
if value[0] == "quit":
return
if value[0] == "records":
i = 0
for record in self.data.json:
if(not self.isProtected(record, False)):
i += 1
print("(", i, ") : ", record)
if value[0] == "version":
print("version", self.data.json["version"])
if value[0] == "load":
if value[1] in self.data.json:
self.loadFile(value[1])
self.state = RECORDS
self.database = value[1]
if value[0] == "adddb":
if value[1] in self.data.json:
print("db already exist!")
continue
self.data.json[value[1]] = {}
self.loadFile(value[1])
self.state = RECORDS
self.database = value[1]
except:
print("soemthing went wrong!")
elif self.state == RECORDS:
self.records_menu()
def records_menu(self):
run = True
self.getHelpRecords()
while(run):
value = input("Enter command : ").split()
if(value[0] == "back"):
run = False
self.database = ""
self.state = MAIN_MENU
elif(value[0] == "add"):
if(len(value) > 3 and not self.isProtected(value[1])):
add = True
if(value[1] in self.data.json[self.database]):
print(
"there already exist a record with the same unique_name!")
add = self.confirm()
if(add):
if(value[3] == "-r" or value[3] == "random"):
value[3] = rand_text.randomStringDigits(12)
key = Fernet.generate_key()
self.data.json[self.database][value[1]] = key.decode()
jdata = json.loads("{}")
jdata["name"] = value[1]
if(value[2] == "-n"):
jdata["username"] = value[1]
else:
jdata["username"] = value[2]
jdata["pass"] = value[3]
jdata = json.dumps(jdata)
fkey = Fernet(key)
jdata = fkey.encrypt(jdata.encode())
self.kdata.json[value[1]] = jdata.decode()
clipboard.copy(value[3])
self.kdata.save()
self.data.save()
print("copied the password to clipboard!")
else:
print("invalid syntax : valid syntax example")
print(
"add example_unqiue_name example_username/-n example_password/random/-r")
elif(value[0] == "load"):
if(len(value) > 1 and not self.isProtected(value[1])):
if(value[1] in self.data.json[self.database]):
key = self.data.json[self.database][value[1]].encode()
data = self.kdata.json[value[1]]
fkey = Fernet(key)
data = fkey.decrypt(data.encode())
data = json.loads(data.decode())
# print(data["pass"])
# print(clipboard.paste())
if(len(value) > 2 and value[2] == "-n"):
clipboard.copy(data["username"])
print("copied username to clipboard!")
value = input("copy password y/n : ")
if(value.lower() == "y" or value.lower() == "yes"):
clipboard.copy(data["pass"])
print("copied the password to clipboard!")
else:
clipboard.copy(data["pass"])
print("username : ", data["username"])
print("copied the password to clipboard!")
else:
print(value[1], " record doesnt exist!")
else:
print("invalid syntax : valid syntax example")
print(
"load unqiue_name or load unique_name -n (copy username then password)")
elif(value[0] == "clear"):
import os
os.system('cls' if os.name == 'nt' else 'clear')
elif(value[0] == "help"):
self.getHelpRecords()
elif(value[0] == "remove" or value[0] == "delete"):
self.delete(value)
elif(value[0] == "records"):
i = 0
for record in self.data.json[self.database]:
if(not self.isProtected(record, False)):
i += 1
print("(", i, ") : ", record)
def delete(self, user_input):
if(self.isProtected(user_input[1])):
# a.k.a values we dont want to delete
return
if(user_input[1] in self.data.json):
value = input("you sure you want to delete this record y/n : ")
if(value.lower() == "y" or value.lower() == "yes"):
del self.data.json[self.database][user_input[1]]
self.data.save()
def init(self):
self.data.json["state"] = "initialized"
key = Fernet.generate_key()
self.data.json["key"] = key.decode()
self.data.json["version"] = VERSION
# self.kdata = jdata.dataBase(self.pathName + "_data", key)
# self.kdata.save()
self.data.save()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from doodle.config import CONFIG
from doodle.core.models.article import Article, ArticleHitCount
from doodle.core.models.comment import ArticleComments
from ..base_handler import BaseHandler
class HomeHandler(BaseHandler):
def get(self):
articles, next_cursor = Article.get_articles_for_homepage(self.cursor)
if articles:
article_ids = [article.id for article in articles]
hit_counts = ArticleHitCount.get_by_ids(article_ids)
replies_dict = ArticleComments.get_comment_count_of_articles(article_ids)
else:
hit_counts = replies_dict = {}
self.set_cache(CONFIG.DEFAULT_CACHE_TIME, is_public=True)
self.render('web/home.html', {
'title': CONFIG.BLOG_TITLE,
'page': 'home',
'articles': articles,
'hit_counts': hit_counts,
'replies_dict': replies_dict,
'next_cursor': next_cursor
})
|
nilq/baby-python
|
python
|
# NOTICE
# This software was produced for the U.S. Government under contract FA8702-21-C-0001,
# and is subject to the Rights in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
# ©2021 The MITRE Corporation. All Rights Reserved.
'''
A PropertyConstraints object describes type and cardinality constraints
for a single property of a single ontology class.
This module implements the PropertyConstraints object
'''
from message import OntologyError
from context import Context
class PropertyConstraints:
'''
Instances of this class describe the type and cardinality constraints
for a property of an ontology class.
This class facilitates collecting the constraints and checking them
for consistency.
Attributes:
onto_class_uri An rdflib.term.URIRef object for the ontology class or None
property_uri An rdflib.term.URIRef object or None
min_cardinality An integer or None
max_cardinality An integer or None
value_range An rdflib.term.URIRef object
_qualified A boolean, or None
Two PropertyConstraint instances are equal if their attributes,
not including _qualified, are equal.
'''
def __init__(self, onto_class_uri=None, property_uri=None):
'''
Create and initialize an instance of this class
The arguments are for error messages and self-description.
Arguments:
onto_class_uri An rdflib.term.URIRef for the ontology class whose
property is constrained by these PropertyConstraints
property_uri An rdflib.term.URIRef object for the property constrained by these PropertyConstraints
'''
self.onto_class_uri = onto_class_uri
self.property_uri = property_uri
self.min_cardinality = None
self.max_cardinality = None
self.value_range = None
self._qualified = None # True/False/None where None means unset
def add_min_cardinality(self, min_cardinality):
'''
"Add" specified min_cardinality value if possible.
Arguments:
min_cardinality The minimum cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = False
elif self._qualified is True:
error_messages.append(self._get_ontology_error('unqualified min_cardinality specified for a qualified constraint'))
if self.min_cardinality is None:
if self.max_cardinality is not None and min_cardinality > self.max_cardinality:
error_messages.append(self._get_ontology_error('min_cardinality exceeds max_cardinality'))
else:
self.min_cardinality = min_cardinality
elif self.min_cardinality != min_cardinality:
error_messages.append(self._get_ontology_error('multiple min_cardinality values specified'))
return error_messages
def add_max_cardinality(self, max_cardinality):
'''
"Add" specified max_cardinality value if possible.
Arguments:
max_cardinality The maximum cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = False
elif self._qualified is True:
error_messages.append(self._get_ontology_error('unqualified max_cardinality specified for a qualified constraint'))
if self.max_cardinality is None:
if self.min_cardinality is not None and max_cardinality < self.min_cardinality:
error_messages.append(self._get_ontology_error('max_cardinality exceeds min_cardinality'))
else:
self.max_cardinality = max_cardinality
elif self.max_cardinality != max_cardinality:
error_messages.append(self._get_ontology_error('multiple max_cardinality values specified'))
return error_messages
def add_cardinality(self, cardinality):
'''
"Add" specified cardinaltiy to self if possible.
Arguments:
cardinality The (minimum and maximum) cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = False
elif self._qualified is True:
error_messages.append(self._get_ontology_error('unqualified cardinality specified for a qualified constraint'))
if self.min_cardinality is None and self.max_cardinality is None:
self.min_cardinality = cardinality
self.max_cardinality = cardinality
elif self.min_cardinality != cardinality or self.max_cardinality != cardinality:
error_messages.append(self._get_ontology_error('multiple cardinality values specified'))
return error_messages
def add_qualified_min_cardinality(self, min_cardinality):
'''
"Add" specified min_qualified_cardinaltiy value if possible.
Arguments:
min_cardinality The minimum qualified cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = True
elif self._qualified is False:
error_messages.append(self._get_ontology_error('qualified min_cardinality specified for a unqualified constraint'))
if self.min_cardinality is None:
if self.max_cardinality is not None and min_cardinality > self.max_cardinality:
error_messages.append(self._get_ontology_error('min_cardinality exceeds max_cardinality'))
else:
self.min_cardinality = min_cardinality
elif self.min_cardinality != min_cardinality:
error_messages.append(self._get_ontology_error('multiple min_cardinality values specified'))
return error_messages
def add_qualified_max_cardinality(self, max_cardinality):
'''
"Add" specified max_qualified_cardinaltiy value if possible.
Arguments:
max_cardinality The maximum qualified cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = True
elif self._qualified is False:
error_messages.append(self._get_ontology_error('qualified max_cardinality specified for a unqualified constraint'))
if self.max_cardinality is None:
if self.min_cardinality is not None and max_cardinality < self.min_cardinality:
error_messages.append(self._get_ontology_error('max_cardinality exceeds min_cardinality'))
else:
self.max_cardinality = max_cardinality
elif self.max_cardinality != max_cardinality:
error_messages.append(self._get_ontology_error('multiple max_cardinality values specified'))
return error_messages
def add_qualified_cardinality(self, cardinality):
'''
"Add" specified qualified_cardinality to self if possible.
Arguments:
cardinality The (minimum and maximum) qualified cardinality, an integer
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self._qualified is None:
self._qualified = True
elif self._qualified is False:
error_messages.append(self._get_ontology_error('qualified cardinality specified for a qualified constraint'))
if self.min_cardinality is None and self.max_cardinality is None:
self.min_cardinality = cardinality
self.max_cardinality = cardinality
elif self.min_cardinality != cardinality or self.max_cardinality != cardinality:
error_messages.append(self._get_ontology_error('multiple cardinality values specified'))
return error_messages
def add_value_range(self, value_range):
'''
Add specified value_range (usually xsd) value if possible.
Arguments:
value_range The rdflib.term.URIRef of the range type
Return:
List of ErrorMessages on failure, empty list on success.
'''
error_messages = []
if self.value_range is None:
self.value_range = value_range
else:
error_messages.append(self._get_ontology_error('multiple ranges specified'))
return error_messages
def merge_parent(self, parent):
'''
Arguments:
parent Another PropertyConstraints object, presumable belonging to a parent class
Return:
A tuple consisting of two items:
A new PropertyConstraints object containing the result of the merger
A (hopefully empty) list of ErrorMessage objects
Side effects:
None. Self is not changed.
Definitions and Algorithm:
Each PropertyConstraint object has three attributes of interest: min_cardinality, max_cardinality, and range.
If any of these attributes is None, that means it has not been set; there is no default value.
Before the merge operation, self's constraints must be equal to or tighter than parent's.
If the parent's constraint is tighter, the offending attibutes are ignored and we get an ErrorMessage.
The merge operation builds the new merged PropertyConstraints object as follows:
1. Start with an empty merged PropertyConstraints object and an empty list of ErrorMessages
2. Copy attributes from self or parent to merged as follows:
a. If an attribute is unset (None) in self
merged attribute is parent's value (which could also be None)
b. If an attribute has a value in self and is unset (None) in parent,
merged attribute is self's value
c. If an attribute has a value in self and a value in parent.
merged attribute is self's value (because self's value should be at least as tight as parent's)
if parent's constraint is tighter than or equal to parent's, add ErrorMessage
3. If both min_cardinality and max_cardinality have values in merged PropertyConstraints,
If min_cardinality > max_cardinality, add ErrorMessage and revert to unmerged values.
The _qualified attribute in the merged constraints is not used.
'''
# Step 1. Initialize.
merged = PropertyConstraints(onto_class_uri=self.onto_class_uri, property_uri=self.property_uri)
error_messages = []
# Step 2 for min_cardinality.
merged.min_cardinality = parent.min_cardinality if self.min_cardinality is None else self.min_cardinality
if parent.min_cardinality is not None and self.min_cardinality is not None:
if parent.min_cardinality > self.min_cardinality: # parent has tighter constraint
error_messages.append(self._get_ontology_error(
'cannot merge min_cardinality {} with {} from {}'.format(
self.min_cardinality, parent.min_cardinality, parent.onto_class_uri)))
# Step 2 for max_cardinality.
merged.max_cardinality = parent.max_cardinality if self.max_cardinality is None else self.max_cardinality
if parent.max_cardinality is not None and self.max_cardinality is not None:
if parent.max_cardinality < self.max_cardinality: # parent has tighter constraint
error_messages.append(self._get_ontology_error(
'cannot merge max_cardinality {} with {} from {}'.format(
self.max_cardinality, parent.max_cardinality, parent.onto_class_uri)))
# Step 2 for range.
merged.value_range = parent.value_range if self.value_range is None else self.value_range
if parent.value_range is not None and self.value_range is not None:
if parent.value_range != self.value_range: # inconsistant ranges (we don't check for subclass yet)
error_messages.append(self._get_ontology_error(
'cannot merge value_range {} with {} from {}'.format(
self.value_range, parent.value_range, parent.onto_class_uri)))
# Step 3. Make sure min <= max
if merged.min_cardinality is not None and merged.max_cardinality is not None:
if merged.min_cardinality > merged.max_cardinality:
error_messages.append(self._get_ontology_error(
'cannot merge cardinalities from {} because min_cardinality exceeds max_cardinality'.format(parent.onto_class_uri)))
merged.min_cardinality = self.min_cardinality
merged.max_cardinality = self.max_cardinality
# Return merged PropertyConstraints and List of error messages
#print('child {}\nparent {}\nmerged {} {}'.format(self, parent, merged, error_messages))
return merged, error_messages
def check_consistency(self):
'''
Check this PropertyConstraints object for global inconsistencies
that could not be determined when adding items one at a time.
Return:
List of ErrorMessages if inconsistencies were found, empty list if not.
'''
error_messages = []
if self._qualified is True:
if self.value_range is None:
error_messages.append(self._get_ontology_error('qualified constraint has no range'))
if self._qualified is False:
if self.value_range is not None:
error_messages.append(self._get_ontology_error('unqualified constraint has range'))
# if self._qualified is None:
# pass
return error_messages
def describe(self, context=None):
'''
Assemble and return a plain-text description of these PropertyConstraints
Return:
A single-line string describing this PropertyConstraints object.
'''
value = lambda n: 'value' if n == 1 else 'values'
phrases = []
if context is None:
context = Context()
if self.onto_class_uri:
phrases.append('Class {}'.format(context.format(self.onto_class_uri)))
if self.property_uri:
phrases.append('Property {}'.format(context.format(self.property_uri)))
else:
phrases.append('Property')
if self.max_cardinality == 0:
phrases.append('may have no values')
elif self.min_cardinality in (None, 0):
if self.max_cardinality is None:
phrases.append('may have any number of values')
else: # self.max_cardinality > 0
phrases.append('may have at most {} {}'.format(self.max_cardinality, value(self.max_cardinality)))
else: # self.min_cardinality > 0
if self.max_cardinality is None:
phrases.append('must have at least {} {}'.format(self.min_cardinality, value(self.min_cardinality)))
elif self.min_cardinality == self.max_cardinality:
phrases.append('must have exactly {} {}'.format(self.min_cardinality, value(self.min_cardinality)))
else: # self.max_cardinality > 0
phrases.append('must have between {} and {} values'.format(self.min_cardinality, self.max_cardinality))
if self.value_range:
phrases.append('of type {}'.format(self.value_range))
return ' '.join(phrases)
def _get_ontology_error(self, message):
'''
Arguments:
message A message string describing some kind of error condition
Return:
An OntologyError object using self.onto_class_uri and self.property_uri with the message
'''
return OntologyError(
message='constraint violation: ' + message,
onto_class_uri=self.onto_class_uri,
property_uri=self.property_uri)
def __members(self):
'''
Two instances of this class are equal if the __member attributes are equal
'''
return (self.onto_class_uri, self.property_uri, self.min_cardinality, self.max_cardinality, self.value_range)
def __str__(self):
return '<{} {} [{}-{}] {}>'.format(
self.onto_class_uri if self.onto_class_uri else None,
self.property_uri if self.property_uri else 'DATATYPE',
'?' if self.min_cardinality is None else self.min_cardinality,
'?' if self.max_cardinality is None else self.max_cardinality,
self.value_range if self.value_range else '?')
def __eq__(self, other):
'''
Two instances of this class are equal if the __member attributes are equal
'''
if type(other) is type(self):
return self.__members() == other.__members()
else:
return False
def __hash__(self):
'''
Two instances of this class are equal if the __member attributes are equal
'''
return hash(self.__members())
|
nilq/baby-python
|
python
|
from mock import Mock
from flows.simulacra.youtube_dl.factory import youtube_dl_flow_factory
from flows.simulacra.youtube_dl.post import download_videos
from tests.testcase import TestCase
class TestDownloadVideos(TestCase):
def setUp(self):
self.open = self.set_up_patch(
'flows.simulacra.youtube_dl.post.open'
)
self.open.return_value.__exit__ = lambda a, b, c, d: None
self.file_handle = Mock()
self.file_handle.readlines.return_value = iter([
'some_channel1',
'some_other_channel2'
])
self.open.return_value.__enter__ = lambda x: self.file_handle
self.post_job = self.set_up_patch(
'flows.simulacra.youtube_dl.post.post_job'
)
def test_download_videos_opens_channels_file(self):
download_videos('/tmp/some_list_of_yt_channels.txt')
self.open.assert_called_once_with(
'/tmp/some_list_of_yt_channels.txt'
)
def test_download_videos_reads_lines_from_channels_file(self):
download_videos('/tmp/some_list_of_yt_channels.txt')
self.file_handle.readlines.assert_called_once_with()
def test_download_videos_posts_job_to_download_yt_videos(self):
download_videos('/tmp/some_list_of_yt_channels.txt')
expected_channels = ['some_channel1', 'some_other_channel2']
self.post_job.assert_called_once_with(
youtube_dl_flow_factory,
hierarchy=False,
factory_args=[expected_channels]
)
def test_download_videos_draws_hierarchy_of_download_yt_videos(self):
download_videos('/tmp/some_list_of_yt_channels.txt', hierarchy=True)
expected_channels = ['some_channel1', 'some_other_channel2']
self.post_job.assert_called_once_with(
youtube_dl_flow_factory,
hierarchy=True,
factory_args=[expected_channels]
)
|
nilq/baby-python
|
python
|
# Copyright 2015 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from mistral import exceptions as exc
from mistral import expressions as expr
from mistral import utils
from mistral.workflow import base
from mistral.workflow import commands
from mistral.workflow import data_flow
from mistral.workflow import states
from mistral.workflow import utils as wf_utils
LOG = logging.getLogger(__name__)
class DirectWorkflowController(base.WorkflowController):
"""'Direct workflow' handler.
This handler implements the workflow pattern which is based on
direct transitions between tasks, i.e. after each task completion
a decision should be made which tasks should run next based on
result of task execution.
Note, that tasks can run in parallel. For example, if there's a workflow
consisting of three tasks 'A', 'B' and 'C' where 'A' starts first then
'B' and 'C' can start second if certain associated with transition
'A'->'B' and 'A'->'C' evaluate to true.
"""
__workflow_type__ = "direct"
def _get_upstream_task_executions(self, task_spec):
return list(
filter(
lambda t_e: self._is_upstream_task_execution(task_spec, t_e),
wf_utils.find_task_executions_by_specs(
self.wf_ex,
self.wf_spec.find_inbound_task_specs(task_spec)
)
)
)
def _is_upstream_task_execution(self, t_spec, t_ex_candidate):
if not states.is_completed(t_ex_candidate.state):
return False
if not t_spec.get_join():
return not t_ex_candidate.processed
return self._triggers_join(
t_spec,
self.wf_spec.get_tasks()[t_ex_candidate.name]
)
def _find_next_commands(self, env=None):
cmds = super(DirectWorkflowController, self)._find_next_commands(
env=env
)
if not self.wf_ex.task_executions:
return self._find_start_commands()
task_execs = [
t_ex for t_ex in self.wf_ex.task_executions
if states.is_completed(t_ex.state) and not t_ex.processed
]
for t_ex in task_execs:
cmds.extend(self._find_next_commands_for_task(t_ex))
return cmds
def _find_start_commands(self):
return [
commands.RunTask(
self.wf_ex,
t_s,
self._get_task_inbound_context(t_s)
)
for t_s in self.wf_spec.find_start_tasks()
]
def _find_next_commands_for_task(self, task_ex):
"""Finds next commands based on the state of the given task.
:param task_ex: Task execution for which next commands need
to be found.
:return: List of workflow commands.
"""
cmds = []
for t_n in self._find_next_task_names(task_ex):
t_s = self.wf_spec.get_tasks()[t_n]
if not (t_s or t_n in commands.RESERVED_CMDS):
raise exc.WorkflowException("Task '%s' not found." % t_n)
elif not t_s:
t_s = self.wf_spec.get_tasks()[task_ex.name]
cmd = commands.create_command(
t_n,
self.wf_ex,
t_s,
self._get_task_inbound_context(t_s)
)
# NOTE(xylan): Decide whether or not a join task should run
# immediately.
if self._is_unsatisfied_join(cmd):
cmd.wait_flag = True
cmds.append(cmd)
# We need to remove all "join" tasks that have already started
# (or even completed) to prevent running "join" tasks more than
# once.
cmds = self._remove_started_joins(cmds)
LOG.debug("Found commands: %s" % cmds)
return cmds
# TODO(rakhmerov): Need to refactor this method to be able to pass tasks
# whose contexts need to be merged.
def evaluate_workflow_final_context(self):
ctx = {}
for t_ex in self._find_end_tasks():
ctx = utils.merge_dicts(
ctx,
data_flow.evaluate_task_outbound_context(t_ex)
)
return ctx
def is_error_handled_for(self, task_ex):
return bool(self.wf_spec.get_on_error_clause(task_ex.name))
def all_errors_handled(self):
for t_ex in wf_utils.find_error_task_executions(self.wf_ex):
tasks_on_error = self._find_next_task_names_for_clause(
self.wf_spec.get_on_error_clause(t_ex.name),
data_flow.evaluate_task_outbound_context(t_ex)
)
if not tasks_on_error:
return False
return True
def _find_end_tasks(self):
return list(
filter(
lambda t_ex: not self._has_outbound_tasks(t_ex),
wf_utils.find_successful_task_executions(self.wf_ex)
)
)
def _has_outbound_tasks(self, task_ex):
# In order to determine if there are outbound tasks we just need
# to calculate next task names (based on task outbound context)
# and remove all engine commands. To do the latter it's enough to
# check if there's a corresponding task specification for a task name.
return bool([
t_name for t_name in self._find_next_task_names(task_ex)
if self.wf_spec.get_tasks()[t_name]
])
def _find_next_task_names(self, task_ex):
t_state = task_ex.state
t_name = task_ex.name
ctx = data_flow.evaluate_task_outbound_context(task_ex)
t_names = []
if states.is_completed(t_state):
t_names += self._find_next_task_names_for_clause(
self.wf_spec.get_on_complete_clause(t_name),
ctx
)
if t_state == states.ERROR:
t_names += self._find_next_task_names_for_clause(
self.wf_spec.get_on_error_clause(t_name),
ctx
)
elif t_state == states.SUCCESS:
t_names += self._find_next_task_names_for_clause(
self.wf_spec.get_on_success_clause(t_name),
ctx
)
return t_names
@staticmethod
def _find_next_task_names_for_clause(clause, ctx):
"""Finds next tasks names.
This method finds next task(command) base on given {name: condition}
dictionary.
:param clause: Dictionary {task_name: condition} taken from
'on-complete', 'on-success' or 'on-error' clause.
:param ctx: Context that clause expressions should be evaluated
against of.
:return: List of task(command) names.
"""
if not clause:
return []
return [
t_name
for t_name, condition in clause
if not condition or expr.evaluate(condition, ctx)
]
def _remove_started_joins(self, cmds):
return list(
filter(lambda cmd: not self._is_started_join(cmd), cmds)
)
def _is_started_join(self, cmd):
if not (isinstance(cmd, commands.RunTask) and
cmd.task_spec.get_join()):
return False
return wf_utils.find_task_execution_not_state(
self.wf_ex,
cmd.task_spec,
states.WAITING
)
def _is_unsatisfied_join(self, cmd):
if not isinstance(cmd, commands.RunTask):
return False
task_spec = cmd.task_spec
join_expr = task_spec.get_join()
if not join_expr:
return False
in_task_specs = self.wf_spec.find_inbound_task_specs(task_spec)
if not in_task_specs:
return False
# We need to count a number of triggering inbound transitions.
num = len([1 for in_t_s in in_task_specs
if self._triggers_join(task_spec, in_t_s)])
# If "join" is configured as a number.
if isinstance(join_expr, int) and num < join_expr:
return True
if join_expr == 'all' and len(in_task_specs) > num:
return True
if join_expr == 'one' and num == 0:
return True
return False
# TODO(rakhmerov): Method signature is incorrect given that
# we may have multiple task executions for a task. It should
# accept inbound task execution rather than a spec.
def _triggers_join(self, join_task_spec, inbound_task_spec):
in_t_execs = wf_utils.find_task_executions_by_spec(
self.wf_ex,
inbound_task_spec
)
# TODO(rakhmerov): Temporary hack. See the previous comment.
in_t_ex = in_t_execs[-1] if in_t_execs else None
if not in_t_ex or not states.is_completed(in_t_ex.state):
return False
return list(
filter(
lambda t_name: join_task_spec.get_name() == t_name,
self._find_next_task_names(in_t_ex)
)
)
|
nilq/baby-python
|
python
|
# Copyright 2021 Prayas Energy Group(https://www.prayaspune.org/peg/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""module which has all loaders for io layer
this module can depend only on python modules and functionstore,
filemanager, config
"""
import os
import csv
import importlib
import yaml
import click
import pandas as pd
from rumi.io import config
from rumi.io import filemanager
from rumi.io.functionstore import transpose, column, unique, concat, x_in_y
from rumi.io.functionstore import circular, valid_date
import functools
import logging
from rumi.io.logger import init_logger, get_event
from rumi.io.multiprocessutils import execute_in_process_pool
from rumi.io.multiprocessutils import execute_in_thread_pool
logger = logging.getLogger(__name__)
class LoaderError(Exception):
pass
def eval_(validation, g=None, l=None):
statement = validation['code']
if not g:
g = {}
if not l:
l = {}
try:
return eval(statement, g, l)
except Exception as e:
logger.error("Failed to evaluate statement " + statement)
logger.exception(e)
def load_param(param_name: str):
"""load parameter from file in RAW format
Parameters
----------
param_name : str
parameter name
Returns
-------
Parameter data from file
"""
specs = filemanager.get_specs(param_name)
nested = specs.get('nested')
if nested and '$' not in nested:
subfolder = specs.get('nested')
filepath = filemanager.find_filepath(param_name, subfolder)
else:
filepath = filemanager.find_filepath(param_name)
logger.debug(f"Reading {param_name} from file {filepath}")
if specs.get("optional") and not os.path.exists(filepath):
logger.warning(
f"Unable to find file for optional parameter {param_name}")
return None
if specs.get("noheader"):
return read_headerless_csv(param_name, filepath)
else:
return read_csv(param_name, filepath)
def load_dataframe(param, filepath, specs):
with open(filepath) as f:
csvf = csv.DictReader(f)
filecols = csvf.fieldnames
data = {}
for row in csvf:
columns = {k: v for k,
v in specs['columns'].items() if k in filecols}
for key, col in columns.items():
if key not in row:
if col.get("optional"):
continue
else:
raise LoaderError(
f"column {key} expected in {param}, but not found.")
else:
convert = eval(columns[key]['type'])
try:
data.setdefault(key, []).append(
convert(row[key]))
except ValueError as v:
logger.error(
f"In {param}, could not convert {row[key]} for {key}")
logger.exception(v)
data.setdefault(key, []).append(
convert(col['default']))
cols = [c for c in filecols if c in specs['columns']]
return pd.DataFrame(data)[cols]
def param_env(data):
env = {c: data[c] for c in data.columns}
#env['rows'] = data.to_dict(orient='records')
return env
def validate_each_item(param: str, spec: dict, data):
"""check if min/max boundaries are satisfied
evaluates whether data is in min/max limits
in yaml specifications
Parameters
----------
param : str
Name of parameter
spec : dict
dictinary of specifications for param
data : pd.DataFrame/[]
data for the parameters
Returns
-------
True if data is within min/max boubndaries
"""
if not isinstance(data, pd.DataFrame) and not data:
if spec.get('optional'):
logger.warning(f"No data found for optional {param}")
return True
else:
logger.error(f"No data found for {param}")
return False
if not spec.get("noheader"):
for column_, metadata in spec['columns'].items():
if column_ not in data.columns:
if metadata.get('optional'):
continue
else:
logger.error(
f"Expected column {column_} not found in {param}")
raise Exception(
f"Expected column {column_} not found in {param}")
c = data[column_]
if 'min' in spec['columns'].get(column_, {}):
m = spec['columns'][column_]['min']
default = spec['columns'][column_].get('default')
if (c < m).any() and (c[c < m] != default).any():
logger.error(
f"for {param}, {column_} should be >= {m}")
return False
if 'max' in spec['columns'].get(column_, {}):
m = spec['columns'][column_]['max']
if (c > m).any():
logger.error(
f"For {param}, {column_} should be <= {m}")
return False
return True
def validate_param(param: str, spec: dict, data, module, **kwargs):
"""validate individual parameter data
evaluates every condition form validations given
in yaml specifications
Parameters
----------
param : str
Name of parameter
spec : dict
dictinary of specifications for param
data : pd.DataFrame/[]
data for the parameters
module: string
definations from this module will be available in validation code
**kwargs:
any additional item that should be available in validation code
"""
logger.info(f"Validating {param}")
valid = validate_each_item(param, spec, data)
for validation in spec.get('validation', []):
if spec.get('noheader'):
env = {param: data}
else:
env = param_env(data)
env[param] = data
env.update({p: get_parameter(p) for p in spec.get("dependencies", [])})
load_module("rumi.io.functionstore", env)
load_module(module, env)
env.update(kwargs)
env.update(globals())
if not eval_(validation, env):
logger.error(f"Invalid data for {param}")
logger.error("{} failed".format(validation['code']))
# print(validation['message'].format(**env))
message = validation['message']
print(eval(f"f'{message}'", env))
logger.error(eval(f"f'{message}'", env))
valid = False
return valid
def load_module(module, env):
"""loads functions from given module in env
"""
m = importlib.import_module(module)
for function in dir(m):
env[function] = getattr(m, function)
def load_namespace(namespace_defs, env):
for key, value in namespace_defs.items():
env[key] = eval(value, env)
def get_params(specs, threaded=False):
def get_param(param):
try:
return get_parameter(param, validation=True)
except FileNotFoundError as fn:
logger.exception(fn)
raise fn
except filemanager.FolderStructureError as fse:
logger.exception(fse)
raise fse
except TypeError as tpe:
logger.debug(f"Automatic loading of {param} failed.")
raise tpe
param_names = [p for p in specs.keys() if p != 'global_validation']
if threaded:
values = execute_in_thread_pool(get_param, param_names)
else:
values = [get_param(p) for p in param_names]
return {p: v for p, v in zip(param_names, values)}
def global_validation(data, global_validation_):
"""validations that depend on multiple parameters
"""
validations = global_validation_['validation']
# valid = execute_in_process_pool(validate_,
# [(data, global_validation_, v) for v in validations])
valid = [validate_(data, global_validation_, v) for v in validations]
return all(valid)
def validate_(data, global_validation_, validation):
env = {}
env.update(data)
env.update(globals())
load_module(global_validation_['module'], env)
load_module("rumi.io.functionstore", env)
include = global_validation_.get("include", [])
for type_ in include:
s = filemanager.get_type_specs(type_)
env.update(get_params(s))
load_namespace(s['global_validation'].get('namespace', {}), env)
load_namespace(global_validation_.get('namespace', {}), env)
if eval_(validation, env):
return True
else:
print(validation['message'])
logger.error(f"Global validation failed for {validation['code']}")
logger.error(validation['message'])
return False
def validate_param_(param,
specs,
d,
module):
try:
if isinstance(d, type(None)):
# for complicated paramters with variable nested folders
# skip individual validation
return True
else:
return validate_param(
param, specs, d, module)
except Exception as e:
print(f"Error occured while validating {param}")
logger.error(f"Error occured while validating {param}")
logger.exception(e)
raise e
def validate_params(param_type):
""" validate all prameters
Parameters
----------
param_type: str
one of Common, Demand, Supply
specs_file: str
yaml file path
Returns
-------
returns True if all paramters are valid, else returns False
"""
logger.info(f"Validating {param_type}")
print(f"Validating {param_type}")
allspecs = dict(filemanager.get_type_specs(param_type))
gvalidation = allspecs['global_validation']
del allspecs['global_validation']
data = get_params(allspecs, threaded=True)
valid = True
module = gvalidation['module']
valid = execute_in_process_pool(validate_param_,
[(p,
allspecs[p],
v,
module) for p, v in data.items()])
return global_validation(data, gvalidation) and all(valid)
def call_loader(loaderstring, **kwargs):
functionname = loaderstring.split(".")[-1]
module = ".".join(loaderstring.split(".")[:-1])
m = importlib.import_module(module)
loader_function = getattr(m, functionname)
return loader_function(**kwargs)
def get_config_parameter(param_name):
path = filemanager.get_config_parameter_path(param_name)
return pd.read_csv(path)
def call_loader_(specs, param_name, **kwargs):
try:
if not specs.get('nested'):
d = call_loader(specs.get('loader'))
elif '$' not in specs.get('nested'):
d = call_loader(specs['loader'],
param_name=param_name,
subfolder=specs.get('nested'))
elif "$" in specs.get('nested'):
if "validation" in kwargs and kwargs['validation'] == True:
d = None
else:
d = call_loader(specs.get('loader'), **kwargs)
except FileNotFoundError as fne:
if specs.get('optional'):
d = None
else:
raise fne
return d
@functools.lru_cache(maxsize=None)
def get_parameter(param_name, **kwargs):
""" returns data for given parameter. It returns final expanded data.
except noheader kind of parameter, everything it returns is pandas
DataFrame.
for header less parameter, it returns dictionary with first item
on every row as key and list of rest items as value.
examples
--------
::
get_parameter('GDP') -> will return GDP parameter as a DataFrame
get_parameter('SubGeography1') -> will return SubGeography1 parameter as a list
get_parameter('SubGeography2') -> will return SubGeography2 parameter as a dictionary, keys are regions and values are list of states
get_parameter('BaseYearDemand',
demand_sector='D_AGRI') -> BaseYearDemand parameter for 'D_AGRI' as DataFrame
get_parameter('NumInstances',
demand_sector='D_RES',
energy_service='RES_COOL') -> NumInstances parameter for <'D_RES','RES_COOL'> as DataFrame
:param: param_name
:param: `**kwargs` - variable number of named arguments
:returns: DataFrame or list or dictionary
"""
#logger.debug("Getting Parameter " + param_name + str(kwargs))
specs = filemanager.get_specs(param_name)
if specs.get('loader'):
d = call_loader_(specs, param_name, **kwargs)
else:
d = load_param(param_name)
if d is None:
r = d
elif specs.get("noheader"):
r = reformat_headerless(param_name, specs, d)
else:
r = d
return filter_param(param_name, r)
def reformat_headerless(param_name, specs, d):
"""Formate headerless data to list/dictionary/string as required
"""
if specs.get("map"):
firstcolumn = column(d, 0)
if not unique(firstcolumn):
repeating = set(
[c for c in firstcolumn if firstcolumn.count(c) > 1])
logger.warning(
f"First column in {param_name} should not repeat, but repeating rows discovered for {repeating}")
logger.warning(
f"For {param_name} last item from repeating rows of {repeating} will be considered")
r = {key: d[r][1:] for r, key in enumerate(column(d, 0))}
elif specs.get("list"):
r = d[0]
if len(d) > 1:
logger.warning(
f"Parameter {param_name} expects only one row but found multiple rows. Only first row will be considered")
elif len(d) == 1 and len(d[0]) == 1:
r = d[0][0]
else:
r = d
return r
def filter_param(param_name, param_data):
"""This functions filters parameter based on scheme given
in yaml specifications.
caution: this function creates a circular dependency by
calling get_parameter again. SO IF SELF REFERENCING DEPENDENCIES ARE GIVEN
IT MIGHT RESULT IN RECURSION ERROR.
"""
specs = filemanager.get_specs(param_name)
if specs.get("filterqueries") and isinstance(param_data, pd.DataFrame):
logger.debug(f"Filtering parameter {param_name}")
dependencies = specs.get("dependencies")
dependencies_data = {p: get_parameter(p) for p in dependencies}
queries = specs.get("filterqueries")
dependencies_data['param_data'] = param_data
queries_ = [f"( {q} )" for q in queries]
statement = "param_data.query(f\"{0}\")".format(" & ".join(queries_))
param_data = eval(statement, dependencies_data) # .copy()
if len(param_data) == 0:
logger.warning(
f"Filtering of {param_name} has resulted in empty data")
return param_data
def find_cols(filepath, columnsdata):
"""
find columns common between column names provided in specifications
and those given in file.
"""
with open(filepath) as f:
csvf = csv.reader(f)
columnsf = next(csvf)
# columns as per order in file
return [c for c in columnsf if c in columnsdata]
def read_headerless_csv(param_name, filepath):
try:
with open(filepath) as f:
csvf = csv.reader(f)
return [row for row in csvf]
except ValueError as v:
logger.error(f"Unable to parse data for {param_name}")
logger.exception(v)
raise v
except FileNotFoundError as fne:
logger.error(f"Unable to find file for {param_name}")
logger.exception(fne)
raise fne
except Exception as e:
logger.error(f"Falied to read parameter {param_name}")
logger.exception(e)
raise e
def read_csv(param_name, filepath):
"""read dataframe using pandas.read_csv, but with appropriate types
"""
specs = filemanager.get_specs(param_name)
columndata = specs['columns']
converters = {c: eval(data['type']) for c, data in columndata.items()}
try:
cols = find_cols(filepath, columndata)
return pd.read_csv(filepath,
usecols=cols,
converters=converters,
na_values="")
except ValueError as v:
logger.error(f"Unable to parse data for {param_name}")
logger.exception(v)
raise v
except FileNotFoundError as fne:
if specs.get('optional'):
logger.warning(
f"Unable to find file for optional parameter {param_name}")
else:
logger.error(f"Unable to find file for {param_name}")
logger.exception(fne)
raise fne
except Exception as e:
logger.error(f"Falied to read parameter {param_name}")
logger.exception(e)
raise e
def sanity_check_cmd_args(param_type: str,
model_instance_path: str,
scenario: str,
logger_level: str,
numthreads: int,
cmd='rumi_validate'):
def check_null(param_value, param_name):
if not param_value:
print(f"Command line parameter, {param_name} is compulsory")
return True
else:
return False
valid = False
if check_null(param_type, "-p/--param_type") or\
check_null(model_instance_path, "-m/--model_instance_path") or\
check_null(scenario, "-s/--scenario"):
pass
elif param_type not in ["Common", "Demand", "Supply"]:
print(f"Invalid param_type '{param_type}'")
print("param_type can be one of Common, Demand or Supply")
elif not os.path.exists(model_instance_path) or not os.path.isdir(model_instance_path):
print(f"Invalid model_instance_path '{model_instance_path}'")
print("give appropriate folder path")
elif logger_level not in ["INFO", "WARN", "DEBUG", "ERROR"]:
print(f"Invalid logger_level '{logger_level}'")
print("logger_level can be one of INFO,WARN,DEBUG,ERROR.")
elif numthreads <= 0:
print(f"Invalid numthreads '{numthreads}'")
print("numthreads can be positive integer")
else:
valid = True
if not valid:
print(f"run {cmd} --help for more help")
return valid
def rumi_validate(param_type: str,
model_instance_path: str,
scenario: str,
logger_level: str,
numthreads: int):
"""Function to validate Common or Demand or Supply
"""
global logger
if not sanity_check_cmd_args(param_type,
model_instance_path,
scenario,
logger_level,
numthreads):
return
config.initialize_config(model_instance_path, scenario)
init_logger(param_type, logger_level)
config.set_config("numthreads", str(numthreads))
logger = logging.getLogger("rumi.io.loaders")
try:
if (validate_params(param_type)):
logger.info(f"{param_type} Validation succeeded")
print(f"{param_type} Validation succeeded")
else:
logger.error(f"{param_type} Validation failed")
print(f"{param_type} Validation failed")
finally:
get_event().set()
@click.command()
@click.option("-p", "--param_type",
help="Parameter type to validate. can be one of Common, Demand or Supply")
@click.option("-m", "--model_instance_path",
help="Path where model instance is stored")
@click.option("-s", "--scenario",
help="Name of Scenario")
@click.option("-l", "--logger_level",
help="Level for logging,one of INFO,WARN,DEBUG,ERROR. (default: INFO)",
default="INFO")
@click.option("-t", "--numthreads",
help="Number of threads/processes (default: 2)",
default=2)
def main(param_type: str,
model_instance_path: str,
scenario: str,
logger_level: str,
numthreads: int):
"""Command line interface for data validation.
"""
rumi_validate(param_type,
model_instance_path,
scenario,
logger_level,
numthreads)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import gym
import numpy as np
from gym.spaces import Discrete
from gym_holdem.holdem import Table, Player, BetRound
from pokereval_cactus import Card
class HoldemEnv(gym.Env):
def __init__(self, player_amount=4, small_blind=25, big_blind=50, stakes=1000):
super().__init__()
self.player_amount = player_amount
self.small_blind = small_blind
self.big_blind = big_blind
self.stakes = stakes
self.table = None
self.done = True
# 0 -> FOLD
# 1 -> CALL || CHECK
# 2 -> ALL_IN
# 3..(stakes * player_amount + 2) -> bet_amount + 2
self.action_space = Discrete(self.stakes_in_game + 3)
self.players = [Player(stakes, name=str(i)) for i in range(player_amount)]
self.players_last_stakes = [stakes for _ in range(player_amount)]
self.debug = {}
self.last_action = (-1, None)
self.reset()
def step(self, action: int):
dbg_end_round = False
dbg_new_round = False
dbg_winners = []
dbg_new_bet_round = False
player = self.table.next_player
if action not in self.valid_actions:
raise ValueError(f"Action {action} is not valid in this context")
self._take_action(action, player)
if self.table.all_players_called():
self.table.start_next_bet_round()
dbg_new_bet_round = True
while self.table.bet_round == BetRound.SHOWDOWN:
dbg_end_round = True
dbg_winners = self.table.end_round()
if len(self.table.players) >= 2:
self.table.new_round()
dbg_new_round = True
if self.table.all_players_called():
self.table.start_next_bet_round()
else:
self.done = True
idx = self.players.index(player)
reward = player.stakes - self.players_last_stakes[idx]
self.players_last_stakes[idx] = player.stakes
self.debug = {
"new_bet_round": dbg_new_bet_round,
"new_round": dbg_new_round,
"end_round": dbg_end_round,
"winners": dbg_winners
}
self.last_action = action, player
return self.observation_space(player), reward, self.done, self.debug
def reset(self):
self.done = False
self.table = Table(small_blind=self.small_blind, big_blind=self.big_blind)
for idx, p in enumerate(self.players):
p.reset(stakes=self.stakes)
p.table = self.table
self.players_last_stakes[idx] = self.stakes
self.table.players = self.players[:]
self.table.new_round()
return self.observation_space(self.table.next_player)
@staticmethod
def _take_action(action, player):
if action == 0:
player.fold()
elif action == 1:
player.call_check()
elif action == 2:
player.action_from_amount(player.stakes)
else:
player.raise_bet(action - 2)
@property
def valid_actions(self):
player = self.table.next_player
to_call = player.to_call_amount()
min_bet_amount = to_call + self.table.last_bet_raise_delta
max_bet_amount = player.stakes
# 0 -> FOLD
# 1 -> CALL || CHECK
actions = [0, 1, 2]
if min_bet_amount <= max_bet_amount:
possible_bet_actions = range(min_bet_amount + 2, max_bet_amount + 3)
actions += possible_bet_actions
# else:
# if player.stakes > to_call:
# actions.append(player.stakes)
return np.array(actions)
def observation_space(self, player):
max_card_value = 268471337
hand = [card / (max_card_value + 1) for card in player.hand]
board = [card / (max_card_value + 1) for card in self.table.board]
for _ in range(len(self.table.board), 5):
board.append(0)
pot = self.table.pot_value() / (self.stakes_in_game + 1)
player_stakes = player.stakes / (self.stakes_in_game + 1)
other_players_stakes = []
for p in self.players:
if p == player:
continue
other_players_stakes.append(p.stakes / (self.stakes_in_game + 1))
active_false = 0
active_true = 0.1
player_active = active_true if player in self.table.active_players else active_false
other_players_active = []
for p in self.players:
if p == player:
continue
active = active_true if p in self.table.active_players else active_false
other_players_active.append(active)
observation = hand + board + [pot, player_stakes] + other_players_stakes + [
player_active] + other_players_active
return np.array(observation)
@property
def table_players(self):
return self.table.players
@property
def next_player(self):
return self.table.next_player
@property
def stakes_in_game(self):
return self.player_amount * self.stakes
def render(self, mode="human", close=False):
# for p in self.table.active_players:
# print(str(p))
# print(f"Board: {Card.print_pretty_cards(self.table.board)}")
# print(f"Bet round: {bet_round_to_str(self.table.bet_round)}")
if self.last_action[0] == 0:
print(f"{self.last_action[1].name}: FOLDED")
elif self.last_action[0] == 1:
print(f"{self.last_action[1].name}: CALLED")
elif self.last_action[0] == 2:
print(f"{self.last_action[1].name}: ALL_IN")
else:
print(f"{self.last_action[1].name}: RAISED({self.last_action[0] - 2})")
if self.debug["new_bet_round"]:
print("### NEW BET ROUND ###")
print(f"Community Cards: {Card.print_pretty_cards(self.table.board)}")
if self.debug["end_round"]:
print("### END ROUND ###")
all_winners = [[w.name for w in winners] for winners in self.debug["winners"]]
print(f"WINNERS: {all_winners}")
if self.debug["new_round"]:
print("### NEW ROUND ###")
for p in self.table.players:
print(f"Player {p.name}: hand={Card.print_pretty_cards(p.hand)}, stakes={p.stakes}, "
f"bet={p.bet}, has_called={p.has_called}, has_folded={p not in self.table.active_players}, "
f"dealer={not self.done and self.table.players[self.table.dealer] == p}")
if self.done:
print("### GAME ENDED - RESETTING ###")
|
nilq/baby-python
|
python
|
# Generated by Django 4.0.1 on 2022-01-27 07:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bookinstance',
options={'ordering': ['due_back']},
),
migrations.AddField(
model_name='book',
name='language',
field=models.CharField(blank=True, choices=[('EN', 'English'), ('FR', 'French'), ('JP', 'Japanese')], default='EN', help_text='Select language of teh book', max_length=2),
),
migrations.AddField(
model_name='bookinstance',
name='status',
field=models.CharField(blank=True, choices=[('m', 'Maintenance'), ('o', 'On loan'), ('a', 'Available'), ('r', 'Reserved')], default='m', help_text='Book availability', max_length=1),
),
]
|
nilq/baby-python
|
python
|
"""
return 0 = Success
return 1 = Login = 'Invalid username or password!', Register = 'User is already'
return 2 = 'Something went wrong'
"""
from connect_db import megatronDBC
# login sys / ระบบล็อคอิน
def loginSYS(userInput, passInput):
try:
cursor = megatronDBC.cursor()
selectDB = "SELECT * FROM users;"
cursor.execute(selectDB)
result = cursor.fetchall()
for x in result:
if userInput == x[1] and passInput == x[2]:
return 0, x[1], x[3]
return 1
except:
return 2
# end login sys จบระบบล็อคอิน
# register sys / ระบบสมัครสมาชิก
def registerSYS(userInput, passInput):
try:
cursor = megatronDBC.cursor()
selectDB = "SELECT * FROM users;"
cursor.execute(selectDB)
result = cursor.fetchall()
for x in result:
if userInput == x[1]:
return 1
insertDB = f"INSERT INTO users (username, password, level) values ('{userInput}', '{passInput}', 0);"
cursor.execute(insertDB)
megatronDBC.commit()
return 0
except:
return 2
# end register sys / จบระบบสมัครสมาชิก
|
nilq/baby-python
|
python
|
from datetime import timedelta
from app import hackathon_variables
from django.db import models
from django.utils import timezone
from user.models import User
class ItemType(models.Model):
"""Represents a kind of hardware"""
# Human readable name
name = models.CharField(max_length=50, unique=True)
# Image of the hardware
image = models.FileField(upload_to='hw_images/')
# Description of this hardware
# what is it used for? which items are contained in the package?
description = models.TextField()
def get_borrowable_items(self):
""" Get items not borrowed already """
availables = Item.objects.filter(item_type=self, available=True)
borrowings = Borrowing.objects.filter(item__item_type=self, return_time__isnull=True)
return availables.exclude(id__in=[x.item.id for x in borrowings])
def get_available_count(self):
ava_count = Item.objects.filter(item_type=self, available=True).count()
req_count = self.get_requested_count()
borrowed_count = self.get_borrowed_count()
return ava_count - req_count - borrowed_count
def get_requested_count(self):
return Request.objects.get_active_by_item_type(self).count()
def get_borrowed_count(self):
return Borrowing.objects.get_active_by_item_type(self).count()
def get_unavailable_count(self):
return Item.objects.filter(item_type=self, available=False).count()
def make_request(self, user):
req = Request(item_type=self, user=user)
req.save()
def __str__(self):
return self.name
class Item(models.Model):
"""Represents a real world object identified by label"""
# Hardware model/type
item_type = models.ForeignKey(ItemType, on_delete=models.CASCADE)
# Identifies a real world object
label = models.CharField(max_length=20, unique=True)
# Is the item available?
available = models.BooleanField(default=True)
# Any other relevant information about this item
comments = models.TextField(blank=True, null=True)
def can_be_borrowed(self):
return Borrowing.objects.filter(return_time__isnull=True, item=self).count() == 0
def __str__(self):
return '{} ({})'.format(self.label, self.item_type.name)
class BorrowingQuerySet(models.QuerySet):
def get_active(self):
return self.filter(return_time__isnull=True)
def get_returned(self):
return self.filter(return_time__isnull=False)
def get_active_by_item_type(self, item_type):
return self.filter(return_time__isnull=True, item__item_type=item_type)
def get_active_by_user(self, user):
return self.filter(return_time__isnull=True, user=user)
class Borrowing(models.Model):
"""
The 'item' has been borrowed to the 'user'
"""
objects = BorrowingQuerySet.as_manager()
user = models.ForeignKey(User, on_delete=models.DO_NOTHING)
item = models.ForeignKey(Item, on_delete=models.DO_NOTHING)
# Instant of creation
picked_up_time = models.DateTimeField(auto_now_add=True)
# If null: item has not been returned yet
return_time = models.DateTimeField(null=True, blank=True)
# Borrowing handled by
borrowing_by = models.ForeignKey(User, related_name='hardware_admin_borrowing', on_delete=models.DO_NOTHING)
# Return handled by (null until returned)
return_by = models.ForeignKey(User, related_name='hardware_admin_return', null=True, blank=True,
on_delete=models.SET_NULL)
def get_picked_up_time_ago(self):
return str(timezone.now() - self.picked_up_time)
def get_return_time_ago(self):
return str(timezone.now() - self.return_time)
def is_active(self):
return self.return_time is None
def __str__(self):
return '{} ({})'.format(self.item.item_type.name, self.user)
class RequestQuerySet(models.QuerySet):
def get_active(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__gte=threshold)
def get_borrowed(self):
return self.filter(borrowing__isnull=False)
def get_expired(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__lt=threshold)
def get_active_by_user(self, user):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__gte=threshold, user=user)
def get_active_by_item_type(self, item_type):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__gte=threshold, item_type=item_type)
class Request(models.Model):
"""
Represents reservation of an item
of type 'item_type' done by 'user'
"""
objects = RequestQuerySet.as_manager()
# Requested item type
item_type = models.ForeignKey(ItemType, on_delete=models.CASCADE)
# Hacker that made the request
user = models.ForeignKey(User, on_delete=models.CASCADE)
# Borrowing derived from this request
borrowing = models.ForeignKey(Borrowing, null=True, blank=True, on_delete=models.CASCADE)
# Instant of creation
request_time = models.DateTimeField(auto_now_add=True)
def is_active(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
remaining = delta - (timezone.now() - self.request_time)
return not self.borrowing and remaining.total_seconds() > 0
def get_remaining_time(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
remaining = delta - (timezone.now() - self.request_time)
if self.borrowing:
return "Borrowed"
elif remaining.total_seconds() < 0:
return "Expired"
else:
return str(remaining)
def __str__(self):
return '{} ({})'.format(self.item_type, self.user)
|
nilq/baby-python
|
python
|
from __future__ import print_function
from builtins import object
import copy
import numpy as np
class Observer(object):
def __init__(self):
pass
def update(self, state):
pass
def reset(self):
pass
class Printer(object):
def __init__(self, elems=1, msg=None, skip=1):
self.elems = elems
self.msg = msg or "{}, "*(self.elems-1) + '{}'
self.count = 0
self.skip = skip
def update(self, sample):
if self.count % self.skip == 0:
if hasattr(sample, '__iter__'):
msg = self.msg.format(*[i for i in sample])
else:
msg = self.msg.format(sample)
print(self.count, ':', msg)
self.count += 1
def reset(self):
self.count = 0
class TimeAutoCorrelation(Observer):
def __init__(self):
self.arr = []
def update(self, sample):
self.arr.append(sample)
def get_correlation(self):
npn = np.array(self.arr)
fn = np.fft.fftn(npn)
return np.real(np.fft.ifftn(fn*fn.conj()))
def reset(self):
self.arr = []
class MeanObserver(Observer):
def __init__(self, block=None):
self.s = block or np.s_[:]
self.dat = None
self.n = 0
def update(self, sample):
if self.dat is None:
self.n = 1
self.dat = sample[self.s]
else:
self.n += 1
self.dat = self.dat + (sample[self.s] - self.dat) / self.n
def get_mean(self):
return self.dat
def reset(self):
self.dat = None
class CovarianceObserver(Observer):
def __init__(self, block=None):
self.s = block or np.s_[:]
self.mean = None
self.cov = None
self.n = 0
def update(self, sample):
if self.mean is None:
self.n = 1
self.mean = sample[self.s]
self.cov = 0*np.outer(sample[self.s], sample[self.s])
else:
self.n += 1
self.mean = self.mean + (sample[self.s] - self.mean) / self.n
self.cov = (self.n-1.0)*self.cov
self.cov += (self.n-1.0)/self.n*np.outer(sample[self.s]-self.mean,sample[self.s]-self.mean)
self.cov *= 1.0/self.n
def get_mean(self):
return self.mean
def get_covariance(self):
return self.cov
def reset(self):
self.mean = None
class HistogramObserver(Observer):
def __init__(self, block=None):
self.s = block if block is not None else np.s_[:]
self.dat = []
def update(self, sample):
self.dat.append(copy.copy(sample.state[self.s]))
def get_histogram(self):
return np.array(self.dat)
def reset(self):
self.dat = []
|
nilq/baby-python
|
python
|
# proxy module
from __future__ import absolute_import
from chaco.abstract_plot_data import *
|
nilq/baby-python
|
python
|
__author__ = "Doug Napoleone"
__version__ = "0.0.1"
__email__ = 'Doug.Napoleone+niche_scraper@gmail.com'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import logging
from flask import Flask
from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
# set a 'SECRET_KEY' to enable the Flask session cookies
app.config['SECRET_KEY'] = '<replace with a secret key>'
@app.route("/")
def index():
# NOTE: Need tab body: "Could not insert debug toolbar. </body> tag not found in response."
return "<body>Hello World!</body>"
if __name__ == '__main__':
app.debug = True
if app.debug:
logging.basicConfig(level=logging.DEBUG)
toolbar = DebugToolbarExtension(app)
# Localhost
# port=0 -- random free port
# app.run(port=0)
app.run(
port=5000
)
# # Public IP
# app.run(host='0.0.0.0')
|
nilq/baby-python
|
python
|
from loggers import Actions
from stopping_decision_makers.base_decision_maker import BaseDecisionMaker
class SequentialNonrelDecisionMaker(BaseDecisionMaker):
"""
A concrete implementation of a decision maker.
Returns True iif the depth at which a user is in a SERP is less than a predetermined value.
"""
def __init__(self, search_context, logger, nonrelevant_threshold=3):
super(SequentialNonrelDecisionMaker, self).__init__(search_context, logger)
self.__nonrelevant_threshold = nonrelevant_threshold # The threshold; get to this point, we stop in the current SERP.
def decide(self):
"""
If the user's current position in the current SERP is < the maximum depth, look at the next snippet in the SERP.
Otherwise, a new query should be issued.
"""
counter = 0
examined_snippets = self._search_context.get_examined_snippets()
for snippet in examined_snippets:
judgment = snippet.judgment
if judgment == 0:
counter = counter + 1
if counter == self.__nonrelevant_threshold:
return Actions.QUERY
else:
counter = 0 # Break the sequence; found something relevant, reset the counter.
return Actions.SNIPPET
|
nilq/baby-python
|
python
|
from django.db import models
from django.contrib.auth.models import User
from course.models import Course
# Create your models here.
class Answer(models.Model):
user = models.ForeignKey(User, name="user", on_delete=models.CASCADE)
answer = models.TextField()
def __str__(self) -> str:
return self.answer
class Question(models.Model):
question = models.TextField()
user = models.ForeignKey(User, name="quser", on_delete=models.CASCADE)
answer = models.ManyToManyField(Answer, name="answer")
course = models.ForeignKey(Course, name="course", on_delete=models.CASCADE)
def __str__(self) -> str:
return self.question
|
nilq/baby-python
|
python
|
# Time: O(k * log(min(n, m, k))), with n x m matrix
# Space: O(min(n, m, k))
from heapq import heappush, heappop
class Solution(object):
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
kth_smallest = 0
min_heap = []
def push(i, j):
if len(matrix) > len(matrix[0]):
if i < len(matrix[0]) and j < len(matrix):
heappush(min_heap, [matrix[j][i], i, j])
else:
if i < len(matrix) and j < len(matrix[0]):
heappush(min_heap, [matrix[i][j], i, j])
push(0, 0)
while min_heap and k > 0:
kth_smallest, i, j = heappop(min_heap)
push(i, j + 1)
if j == 0:
push(i + 1, 0)
k -= 1
return kth_smallest
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from . import main_menu, signals, slides, widgets # noqa
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
@author: wj3235@126.com
说明:(1)程序仅供技术学习,严禁用于任何商业用途
(2)对于抓取内容及其分析,请勿乱发布,后果自负
(3)软件可能有bug,如果发现望及时告知
(4)成交数据,需要提供修改账户密码,请查找 admin 或者password 修改
'''
import sqlite3
import os
from ErShouFangDbHelper import GetXiaoquNianDai
from ErShouFangDbHelper import GetCountFromSummary
from ChengJiaoDbHelper import GetMaxRiQi
from AdvancedAnalysisErShouFangMain import GetPriceFromDbList
import time
import datetime
def GetXiaoquData(dbname):
xiaoqulist={}
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=u'select DISTINCT xiaoqu from chengjiao '
cursor=conn.execute(sqlstring)
for row in cursor:
xiaoqulist[row[0]]={}
xiaoqulist[row[0]]['qu']=''
xiaoqulist[row[0]]['zhen']=''
xiaoqulist[row[0]]['min']=None
xiaoqulist[row[0]]['max']=None
xiaoqulist[row[0]]['range']=0.0
conn.close()
return xiaoqulist
def Analysis(dbname):
'''key $ xiaoqu $ fangxing $ mianji $ qu $ zhen $ zongjia $ danjia $ manji $ riqi'''
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring='select qu,zhen,sum(zongjia),sum(mianji) from chengjiao group by zhen order by qu'
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
for row in cursor:
qu=row[0]
zhen=row[1]
totalprice=row[2]
totalmianji=row[3]
if(totalmianji!=0):
print qu,zhen,totalprice,totalmianji,totalprice/totalmianji
conn.close()
def TrendShanghaiMonth(dbname):
'''key $ xiaoqu $ fangxing $ mianji $ qu $ zhen $ zongjia $ danjia $ manji $ riqi'''
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring='select qu,zhen,sum(zongjia),sum(mianji) from chengjiao group by zhen order by qu'
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
for row in cursor:
qu=row[0]
zhen=row[1]
totalprice=row[2]
totalmianji=row[3]
if(totalmianji!=0):
print qu,zhen,totalprice,totalmianji,totalprice/totalmianji
conn.close()
def TrendZhenMonth(dbname):
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=u'select qu,zhen,count(key),sum(zongjia),sum(mianji),strftime(\'%Y%m\',riqi) \
from chengjiao group by zhen,strftime(\'%Y%m\',riqi) '
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
zhenlist=[]
for row in cursor:
qu=row[0]
zhen=row[1]
count=row[2]
totalprice=row[3]
totalmianji=row[4]
average=totalprice/totalmianji
riqi=row[5]
zhenlist.append([qu,zhen,count,totalprice,totalmianji,average,riqi])
conn.close()
zhendata={}
for zhen in zhenlist:
key=zhen[1]
zhendata[key]={}
zhendata[key]['qu']=''
zhendata[key]['zhen']=''
zhendata[key]['min']=None
zhendata[key]['max']=None
zhendata[key]['range']=0.0
zhendata[key]['count']=0
show=[u'张江',u'唐镇',u'三林',u'川沙',u'北蔡',u'祝桥 ']
if key.strip() in show:
print zhen[0],zhen[1],zhen[2],zhen[5],zhen[6]
for zhen in zhenlist:
key=zhen[1]
zhendata[key]['qu']=zhen[0];
zhendata[key]['zhen']=zhen[1]
junjia=zhen[5]
if zhendata[key]['min']==None or \
zhendata[key]['max']==None:
zhendata[key]['min']=junjia
zhendata[key]['max']=junjia
if junjia<zhendata[key]['min']:
zhendata[key]['min']=junjia
zhendata[key]['range']=zhendata[key]['max']-zhendata[key]['min']
elif junjia>zhendata[key]['max']:
zhendata[key]['max']=junjia
zhendata[key]['range']=zhendata[key]['max']-zhendata[key]['min']
zhendata[key]['count']+=zhen[2]
f=open('report\\chenjiao\\chengjiaoTrendZhen.txt','w')
f.write('qu$zhen$shuliang$min$max$range\n')
for key in zhendata:
txt=('%s $ %s $ %s $ %s $ %s $ %s\n')%(zhendata[key]['qu'],zhendata[key]['zhen'],\
zhendata[key]['count'],zhendata[key]['min'],zhendata[key]['max'],zhendata[key]['range'])
f.write(txt.encode('utf-8'))
f.close()
def TrendXiaoQuMonth(dbname,xiaoqudata):
xiaoqulist=GetXiaoquNianDai('20170107')
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=u'select qu,zhen,xiaoqu,count(key),sum(zongjia),sum(mianji),strftime(\'%m\',riqi) \
from chengjiao group by xiaoqu,strftime(\'%m\',riqi) '
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
for row in cursor:
qu=row[0]
zhen=row[1]
xiaoqu=row[2]
count=row[3]
totalprice=row[4]
totalmianji=row[5]
riqi=row[6]
niandai=''
junjia=totalprice/totalmianji
if xiaoqulist.has_key(xiaoqu):
niandai=xiaoqulist[xiaoqu]
print xiaoqu,niandai
xiaoqudata[xiaoqu]['niandai']=niandai
xiaoqudata[xiaoqu]['qu']=qu;
xiaoqudata[xiaoqu]['zhen']=zhen
if xiaoqudata[xiaoqu]['min']==None or \
xiaoqudata[xiaoqu]['max']==None:
xiaoqudata[xiaoqu]['min']=junjia
xiaoqudata[xiaoqu]['max']=junjia
if junjia<xiaoqudata[xiaoqu]['min']:
xiaoqudata[xiaoqu]['min']=junjia
xiaoqudata[xiaoqu]['range']=xiaoqudata[xiaoqu]['max']-xiaoqudata[xiaoqu]['min']
elif junjia>xiaoqudata[xiaoqu]['max']:
xiaoqudata[xiaoqu]['max']=junjia
xiaoqudata[xiaoqu]['range']=xiaoqudata[xiaoqu]['max']-xiaoqudata[xiaoqu]['min']
conn.close()
f=open('report\\chenjiao\\chengjiaoTrendXiaoqu.txt','w')
f.write('qu$zhen$xiaoqu$niandai$min$max$range\n')
for key in xiaoqudata:
txt=('%s$%s$%s$%s$%s$%s$%s\n')%(xiaoqudata[key]['qu'],xiaoqudata[key]['zhen'],\
key,xiaoqudata[key]['niandai'],xiaoqudata[key]['min'],\
xiaoqudata[key]['max'],xiaoqudata[key]['range'])
f.write(txt.encode('utf-8'))
f.close()
def ChenJiaoShangHaiPerMonth(dbname='chengjiao'):
print '月份 均价 套数'
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=u'select count(key),sum(zongjia),sum(mianji),strftime(\'%Y%m\',riqi) \
from chengjiao group by strftime(\'%Y%m\',riqi) '#%d
cursor=conn.execute(sqlstring)
totalprice=0.0
totalmianji=0.0
count=0
t=''
for row in cursor:
count=row[0]
totalprice=row[1]
totalmianji=row[2]
t=row[3]
print ('%s %-11.4f %s')%(t,totalprice/totalmianji,count)
def get_latest_90_count(date,dbname='chengjiao'):
date = time.strptime(date,"%Y-%m-%d")
date=datetime.datetime(date[0],date[1],date[2])
lastdate= date + datetime.timedelta(days = -90)
print date,'九十天前日期;',lastdate
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=(u'select count(key) from chengjiao where riqi>=\'%s\'')%(lastdate)
cursor=conn.execute(sqlstring)
c=0
for row in cursor:
c=row[0]
conn.close()
return c
def get_chengjiao_count(where,dbname='chengjiao'):
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=(u'select count(key) from chengjiao where %s')%(where)
cursor=conn.execute(sqlstring)
c=0
for row in cursor:
c=row[0]
conn.close()
return c
def get_90_count(date,dbname='chengjiao'):
date = time.strptime(date,"%Y-%m-%d")
date=datetime.datetime(date[0],date[1],date[2])
lastdate= date + datetime.timedelta(days = -90)
lastdate=lastdate.strftime("%Y-%m-%d")
#print "90 day before",date,lastdate
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=(u'select count(key) from chengjiao where riqi=\'%s\'')%(lastdate)
cursor=conn.execute(sqlstring)
c=0
for row in cursor:
c=row[0]
conn.close()
return c
def get_latest_15_count():
date=time.strftime('%Y-%m-%d',time.localtime())
datetime=time.strftime('%Y%m%d',time.localtime())
count= GetCountFromSummary(datetime)
count90= get_latest_90_count(date)
print '最近90天成交套数 ,统计到:',GetMaxRiQi(),count90
delta=count-count90
countbetween=get_chengjiao_count(' riqi>=\'2017-02-01\' and riqi <= \'2017-01-08\'')
print delta+countbetween
def get_day_count(date=None):
if date==None:
date=time.strftime('%Y-%m-%d',time.localtime())
date = time.strptime(date,"%Y-%m-%d")
date=datetime.datetime(date[0],date[1],date[2])
predate= date + datetime.timedelta(days = -1)
day=date.strftime("%Y%m%d")
preday=predate.strftime("%Y%m%d")
preday2=predate.strftime("%Y-%m-%d")
countday=GetCountFromSummary(day)
countpreday=GetCountFromSummary(preday)
coutpreday90=get_90_count(preday2)
#print countday,countpreday,coutpreday90
print preday2,"chenjiao",countday-(countpreday-coutpreday90)
def GetChengJiaoPreviousData(dbname='chengjiao'):
conn = sqlite3.connect("database/"+dbname+".db")
sqlstring=(u'select * from chengjiao order by riqi desc')
cursor=conn.execute(sqlstring)
chengjiao=[]
count=1
with open('report/chengjiaoPreviousJiaGe.txt','w+') as chenjiaofile:
for row in cursor:
chen=[]
chen.append(row[0]) #key
chen.append(row[6]) #zongjia
chen.append(row[1]) #xiaoqu
chen.append(row[2]) #fang
chen.append(row[3]) #mianji
chen.append(row[4]) #qu
chen.append(row[5]) #zhen
if row[5] == u'北3蔡':
continue
chen.append(row[7]) #danjia
chen.append(row[8])
chen.append(row[9]) #riqi
chen.append(GetPriceFromDbList(row[0]))
chengjiao.append(chen)
print chen[0],chen[1],chen[2],chen[3],chen[4],chen[5],chen[6],chen[7],chen[8],chen[9],chen[10]
count+=1
lastprice=0
change=0
daikan=0
if len(chen[10])>0:
lastprice=int(chen[10][1])
daikan=int(chen[10][0])
change=int(chen[1])-lastprice
txt=('%s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s $ %s\n')%(chen[0],chen[2],chen[1],lastprice,change,daikan,chen[3],\
chen[4],chen[5],chen[6],chen[7],chen[8],chen[9],chen[10])#chen[10] pricelist latest to oldest
chenjiaofile.write(txt)
chenjiaofile.flush()
def GetChenJiaoPerDay():
#显示的数据晚一天
date=time.strftime('%Y-%m-%d',time.localtime())
get_day_count(date)
date = time.strptime(date,"%Y-%m-%d")
while(True):
date=datetime.datetime(date[0],date[1],date[2])
predate= date + datetime.timedelta(days = -1)
predate=predate.strftime("%Y-%m-%d")
date=predate
date = time.strptime(date,"%Y-%m-%d")
if(predate=='2017-02-23'):
break
get_day_count(predate)
if __name__ == "__main__":
if 1:
#Analysis('chengjiao')
#TrendXiaoQuMonth('chengjiao',GetXiaoquData('chengjiao'))
GetChengJiaoPreviousData()
pass
else:
pass
GetChengJiaoPreviousData()
ChenJiaoShangHaiPerMonth()
get_latest_15_count()
GetChenJiaoPerDay()
TrendZhenMonth('chengjiao')
TrendXiaoQuMonth('chengjiao',GetXiaoquData('chengjiao'))
TrendShanghaiMonth('chengjiao')
|
nilq/baby-python
|
python
|
from flask import Blueprint, request
from libs.tools import json_response, JsonParser, Argument
from .models import NotifyWay
blueprint = Blueprint(__name__, __name__)
@blueprint.route('/', methods=['GET'])
def get():
form, error = JsonParser(Argument('page', type=int, default=1, required=False),
Argument('pagesize', type=int, default=10, required=False),
Argument('notify_query', type=dict, required=False), ).parse(request.args)
if error is None:
notify_data = NotifyWay.query
if form.page == -1:
return json_response({'data': [x.to_json() for x in notify_data.all()], 'total': -1})
if form.notify_query.get('name_field'):
notify_data = notify_data.filter(NotifyWay.name.like('%{}%'.format(form.notify_query['name_field'])))
result = notify_data.limit(form.pagesize).offset((form.page - 1) * form.pagesize).all()
return json_response({'data': [x.to_json() for x in result], 'total': notify_data.count()})
return json_response(message=error)
@blueprint.route('/', methods=['POST'])
def post():
form, error = JsonParser('name', 'value',
Argument('desc', nullable=True)).parse()
if error is None:
notify_is_exist = NotifyWay.query.filter_by(name=form.name).first()
if notify_is_exist:
return json_response(message="通知名称已存在")
NotifyWay(**form).save()
return json_response()
return json_response(message=error)
@blueprint.route('/<int:u_id>', methods=['DELETE'])
def delete(u_id):
NotifyWay.query.get_or_404(u_id).delete()
return json_response(), 204
@blueprint.route('/<int:n_id>', methods=['PUT'])
def put(n_id):
form, error = JsonParser('name', 'value',
Argument('desc', nullable=True)).parse()
if error is None:
notify_info = NotifyWay.query.get_or_404(n_id)
if not notify_info.update(**form):
notify_info.save()
return json_response(notify_info)
return json_response(message=error)
|
nilq/baby-python
|
python
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### We add an option contract using 'QCAlgorithm.AddOptionContract' and place a trade, the underlying
### gets deselected from the universe selection but should still be present since we manually added the option contract.
### Later we call 'QCAlgorithm.RemoveOptionContract' and expect both option and underlying to be removed.
### </summary>
class AddOptionContractFromUniverseRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2014, 6, 5)
self.SetEndDate(2014, 6, 9)
self._expiration = datetime(2014, 6, 21)
self._securityChanges = None
self._option = None
self._traded = False
self._twx = Symbol.Create("TWX", SecurityType.Equity, Market.USA)
self._aapl = Symbol.Create("AAPL", SecurityType.Equity, Market.USA)
self.UniverseSettings.Resolution = Resolution.Minute
self.UniverseSettings.DataNormalizationMode = DataNormalizationMode.Raw
self.AddUniverse(self.Selector, self.Selector)
def Selector(self, fundamental):
if self.Time <= datetime(2014, 6, 5):
return [ self._twx ]
return [ self._aapl ]
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if self._option != None and self.Securities[self._option].Price != 0 and not self._traded:
self._traded = True
self.Buy(self._option, 1)
if self.Time == datetime(2014, 6, 6, 14, 0, 0):
# liquidate & remove the option
self.RemoveOptionContract(self._option)
def OnSecuritiesChanged(self, changes):
# keep track of all removed and added securities
if self._securityChanges == None:
self._securityChanges = changes
else:
self._securityChanges.op_Addition(self._securityChanges, changes)
if any(security.Symbol.SecurityType == SecurityType.Option for security in changes.AddedSecurities):
return
for addedSecurity in changes.AddedSecurities:
options = self.OptionChainProvider.GetOptionContractList(addedSecurity.Symbol, self.Time)
options = sorted(options, key=lambda x: x.ID.Symbol)
option = next((option for option in options if option.ID.Date == self._expiration and option.ID.OptionRight == OptionRight.Call and option.ID.OptionStyle == OptionStyle.American), None)
self.AddOptionContract(option)
# just keep the first we got
if self._option == None:
self._option = option
|
nilq/baby-python
|
python
|
import pytest
from django.contrib.auth.models import User
from shrubberies.factories import UserFactory
from shrubberies.models import Profile
from .rules import Is, current_user
@pytest.mark.django_db
def test_is_user_function():
u1 = UserFactory()
u2 = UserFactory()
is_own_profile = Is(lambda u: u.profile)
assert is_own_profile.check(u1, u1.profile)
assert is_own_profile.check(u2, u2.profile)
assert not is_own_profile.check(u1, u2.profile)
assert not is_own_profile.check(u2, u1.profile)
qs1 = is_own_profile.filter(u1, Profile.objects.all())
qs2 = is_own_profile.filter(u2, Profile.objects.all())
assert qs1.count() == 1
assert u1.profile in qs1
assert u2.profile not in qs1
assert qs2.count() == 1
assert u2.profile in qs2
assert u1.profile not in qs2
@pytest.mark.django_db
def test_is_never_global():
user = UserFactory()
is_own_profile = Is(lambda u: u.profile)
assert not is_own_profile.check(user)
@pytest.mark.django_db
def test_current_user():
u1 = UserFactory()
u2 = UserFactory()
assert current_user.check(u1, u1)
assert not current_user.check(u1, u2)
assert set(current_user.filter(u1, User.objects.all())) == {u1}
|
nilq/baby-python
|
python
|
if __name__ == '__main__':
from scummer.validator import Validator
t = {
'a': 'x',
'b': {
'b1': 123
},
'c': [1,2],
'd': {
'x': 1,
'y': 'aaaa'
}
}
v = Validator(schema={
'a': ('enum',{
'items': ['x','y']
}),
'b': {
'b1': ['str','int'],
'b2': ('int',{'required':False})
},
'c': 'int[]',
'd': ('map',{
'definition': 'int'
})
})
v.validate(t)
|
nilq/baby-python
|
python
|
import pandas as pd
import matplotlib.pyplot as plt
import pdb
def main():
results_df = pd.read_csv("results_nm.csv", delimiter=",")
fig_0, ax_0 = plt.subplots()
fig_1, ax_1 = plt.subplots()
for m in results_df.m.unique():
m_subset = results_df[results_df.m == m]
m_means = []
m_vars = []
m_evals = []
m_evals_var = []
for steps in m_subset.steps.unique():
steps_subset = m_subset[m_subset.steps == steps]
mean_overlap = steps_subset.squared_overlap.mean()
var_overlap = steps_subset.squared_overlap.var()
m_means.append(mean_overlap)
m_vars.append(var_overlap)
mean_evals = steps_subset.bfgs_evaluations.mean()
var_evals = steps_subset.bfgs_evaluations.var()
m_evals.append(mean_evals)
m_evals_var.append(var_evals)
ax_0.errorbar(m_subset.steps.unique(), m_means, yerr=m_vars, label=str(int(m)), marker="o", capsize=2)
ax_1.errorbar(m_subset.steps.unique(), m_evals, yerr=m_evals_var, label=str(int(m)), marker="o", capsize=2)
ax_0.set_xlabel("Number of circuit layers")
ax_0.set_ylabel("Squared overlap")
ax_0.set_ylim([0, 1.05])
ax_0.legend()
ax_1.set_xlabel("Number of circuit layers")
ax_1.set_ylabel("BFGS function evaluations")
ax_1.legend()
fig_0.savefig("squared_overlap")
fig_1.savefig("bfgs_evaluations")
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import json
import sys
import os
def get_offset(call):
s = call.split("+")
try:
return int(s[1], 16)
except:
pass
def get_hashsum(call):
s = call.split("{")
try:
ss = s[1].split("}")
return ss[0]
except:
s = call.split("!")
try:
return s[0]
except:
return
def generate_tag(call, proto):
offset = get_offset(call)
if offset is None:
return
hashsum = get_hashsum(call)
if hashsum is None:
return
return {'offset': offset, 'tag': proto, 'feeder': 'TagPcap', 'hash': hashsum}
def export_tags(tag_list, jsonf):
out_f = jsonf + "_TagPcap" + ".json"
with open(out_f, 'wb') as f:
json.dump(tag_list, f)
print("[TagPcap] Tags created : " + str(len(tag_list)))
print("[TagPcap] Tags exported : " + out_f)
def usage():
print("Usage : TagPcap.py <JSON PCAP file>")
def main(argv):
try:
jsonf = sys.argv[1]
except:
usage()
return
tag_list = []
with open(jsonf, "r") as f:
json_data = json.load(f)
for l0 in json_data:
try:
callstack = l0["_source"]["layers"]["pkt_comment"]["frame.comment"]
calls = callstack.splitlines()
proto = l0["_source"]["layers"]["frame"]["frame.protocols"]
except:
continue
for call in calls:
tag = generate_tag(call, proto)
if tag is not None:
tag_list.append(tag)
export_tags(tag_list, jsonf)
if __name__ == "__main__":
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
import os
import shutil
# optional: if you get a SSL CERTIFICATE_VERIFY_FAILED exception
import ssl
import sys
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from urllib.request import urlopen, urlretrieve
from zipfile import ZipFile, is_zipfile
import pandas as pd
from tqdm import tqdm
import socceraction.spadl as spadl
import socceraction.spadl.statsbomb as statsbomb
import socceraction.spadl.wyscout as wyscout
ssl._create_default_https_context = ssl._create_unverified_context
_data_dir = os.path.dirname(__file__)
def read_json_file(filename):
with open(filename, 'rb') as json_file:
return BytesIO(json_file.read()).getvalue().decode('unicode_escape')
def download_statsbomb_data():
dataset_url = 'https://github.com/statsbomb/open-data/archive/master.zip'
tmp_datafolder = os.path.join(_data_dir, 'statsbomb', 'tmp')
raw_datafolder = os.path.join(_data_dir, 'statsbomb', 'raw')
for datafolder in [tmp_datafolder, raw_datafolder]:
if not os.path.exists(datafolder):
os.makedirs(datafolder, exist_ok=True)
statsbombzip = os.path.join(tmp_datafolder, 'statsbomb-open-data.zip')
with urlopen(dataset_url) as dl_file:
with open(statsbombzip, 'wb') as out_file:
out_file.write(dl_file.read())
with ZipFile(statsbombzip, 'r') as zipObj:
zipObj.extractall(tmp_datafolder)
shutil.rmtree(raw_datafolder)
Path(f'{tmp_datafolder}/open-data-master/data').rename(raw_datafolder)
shutil.rmtree(tmp_datafolder)
def convert_statsbomb_data():
seasons = {
3: '2018',
}
leagues = {
'FIFA World Cup': 'WorldCup',
}
spadl_datafolder = os.path.join(_data_dir, 'statsbomb')
free_open_data_remote = 'https://raw.githubusercontent.com/statsbomb/open-data/master/data/'
SBL = statsbomb.StatsBombLoader(root=free_open_data_remote, getter='remote')
# View all available competitions
df_competitions = SBL.competitions()
df_selected_competitions = df_competitions[
df_competitions.competition_name.isin(leagues.keys())
]
for competition in df_selected_competitions.itertuples():
# Get games from all selected competition
games = SBL.games(competition.competition_id, competition.season_id)
games_verbose = tqdm(list(games.itertuples()), desc='Loading match data')
teams, players = [], []
competition_id = leagues[competition.competition_name]
season_id = seasons[competition.season_id]
spadl_h5 = os.path.join(spadl_datafolder, f'spadl-{competition_id}-{season_id}.h5')
with pd.HDFStore(spadl_h5) as spadlstore:
spadlstore.put('actiontypes', spadl.actiontypes_df(), format='table')
spadlstore.put('results', spadl.results_df(), format='table')
spadlstore.put('bodyparts', spadl.bodyparts_df(), format='table')
for game in games_verbose:
# load data
teams.append(SBL.teams(game.game_id))
players.append(SBL.players(game.game_id))
events = SBL.events(game.game_id)
# convert data
spadlstore.put(
f'actions/game_{game.game_id}',
statsbomb.convert_to_actions(events, game.home_team_id),
format='table',
)
games.season_id = season_id
games.competition_id = competition_id
spadlstore.put('games', games)
spadlstore.put(
'teams',
pd.concat(teams).drop_duplicates('team_id').reset_index(drop=True),
)
spadlstore.put(
'players',
pd.concat(players).drop_duplicates('player_id').reset_index(drop=True),
)
def download_wyscout_data():
# https://figshare.com/collections/Soccer_match_event_dataset/4415000/5
dataset_urls = dict(
competitions='https://ndownloader.figshare.com/files/15073685',
teams='https://ndownloader.figshare.com/files/15073697',
players='https://ndownloader.figshare.com/files/15073721',
games='https://ndownloader.figshare.com/files/14464622',
events='https://ndownloader.figshare.com/files/14464685',
)
raw_datafolder = os.path.join(_data_dir, 'wyscout_public', 'raw')
if not os.path.exists(raw_datafolder):
os.makedirs(raw_datafolder, exist_ok=True)
# download and unzip Wyscout open data
for url in tqdm(dataset_urls.values(), desc='Downloading data'):
url_obj = urlopen(url).geturl()
path = Path(urlparse(url_obj).path)
file_name = os.path.join(raw_datafolder, path.name)
file_local, _ = urlretrieve(url_obj, file_name)
if is_zipfile(file_local):
with ZipFile(file_local) as zip_file:
zip_file.extractall(raw_datafolder)
def convert_wyscout_data():
seasons = {
10078: '2018',
}
leagues = {
28: 'WorldCup',
}
raw_datafolder = os.path.join(_data_dir, 'wyscout_public', 'raw')
spadl_datafolder = os.path.join(_data_dir, 'wyscout_public')
# select competitions
json_competitions = read_json_file(f'{raw_datafolder}/competitions.json')
df_competitions = pd.read_json(json_competitions)
# Rename competitions to the names used in the file names
df_competitions['name'] = df_competitions.apply(
lambda x: x.area['name'] if x.area['name'] != '' else x['name'], axis=1
)
df_selected_competitions = df_competitions[df_competitions.wyId.isin(leagues.keys())]
json_teams = read_json_file(f'{raw_datafolder}/teams.json')
df_teams = wyscout.convert_teams(pd.read_json(json_teams))
json_players = read_json_file(f'{raw_datafolder}/players.json')
df_players = wyscout.convert_players(pd.read_json(json_players))
for competition in df_selected_competitions.itertuples():
json_games = read_json_file(
f"{raw_datafolder}/matches_{competition.name.replace(' ', '_')}.json"
)
df_games = pd.read_json(json_games)
competition_id = leagues[competition.wyId]
season_id = seasons[df_games.seasonId.unique()[0]]
df_games = wyscout.convert_games(df_games)
df_games['competition_id'] = competition_id
df_games['season_id'] = season_id
json_events = read_json_file(
f"{raw_datafolder}/events_{competition.name.replace(' ', '_')}.json"
)
df_events = pd.read_json(json_events).groupby('matchId', as_index=False)
spadl_h5 = os.path.join(spadl_datafolder, f'spadl-{competition_id}-{season_id}.h5')
# Store all spadl data in h5-file
print(f'Converting {competition_id} {season_id}')
with pd.HDFStore(spadl_h5) as spadlstore:
spadlstore['actiontypes'] = spadl.actiontypes_df()
spadlstore['results'] = spadl.results_df()
spadlstore['bodyparts'] = spadl.bodyparts_df()
spadlstore['games'] = df_games
for game in tqdm(list(df_games.itertuples())):
game_id = game.game_id
game_events = wyscout.convert_events(df_events.get_group(game_id))
# convert events to SPADL actions
home_team = game.home_team_id
df_actions = wyscout.convert_to_actions(game_events, home_team)
df_actions['action_id'] = range(len(df_actions))
spadlstore[f'actions/game_{game_id}'] = df_actions
spadlstore['players'] = df_players
spadlstore['teams'] = df_teams[
df_teams.team_id.isin(df_games.home_team_id)
| df_teams.team_id.isin(df_games.away_team_id)
]
if __name__ == '__main__':
if len(sys.argv) == 1 or sys.argv[1] == 'statsbomb':
download_statsbomb_data()
convert_statsbomb_data()
if len(sys.argv) == 1 or sys.argv[1] == 'wyscout':
download_wyscout_data()
convert_wyscout_data()
|
nilq/baby-python
|
python
|
def test_latest():
print('\n >>> start Latest Features... \n')
import talos
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
x, y = talos.templates.datasets.iris()
p = {'activation': ['relu', 'elu'],
'optimizer': ['Nadam', 'Adam'],
'losses': ['logcosh'],
'shapes': ['brick'],
'first_neuron': [16, 32, 64, 128],
'hidden_layers': [0, 1, 2, 3],
'dropout': [.2, .3, .4],
'batch_size': [20, 30, 40, 50],
'epochs': [10]}
def iris_model(x_train, y_train, x_val, y_val, params):
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=4,
activation=params['activation']))
talos.utils.hidden_layers(model, params, 3)
model.add(Dense(3, activation='softmax'))
model.compile(optimizer=params['optimizer'],
loss=params['losses'], metrics=['acc'])
out = model.fit(x_train,
y_train,
callbacks=[talos.utils.ExperimentLogCallback('test_latest', params)],
batch_size=params['batch_size'],
epochs=params['epochs'],
validation_data=(x_val, y_val),
verbose=0)
return out, model
scan_object = talos.Scan(x, y,
model=iris_model,
params=p,
experiment_name='test_latest',
round_limit=5,
reduction_method='gamify',
save_weights=False)
print('finised Latest Features \n')
|
nilq/baby-python
|
python
|
# Time Complexity: O(n^2)
# Space Complexity: O(n)
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
res = []
nums.sort()
for cur in range(len(nums)):
if nums[cur] > 0: break
if cur>0 and nums[cur]==nums[cur-1]: continue # 重点理解
left = cur + 1
right = len(nums) - 1
while left < right:
if nums[cur] + nums[left] + nums[right] > 0:
# while left < right and nums[right-1] == nums[right]:
# right -= 1
right -= 1
elif nums[cur] + nums[left] + nums[right] < 0:
# while left < right and nums[left+1] == nums[left]:
# left += 1
left += 1
else:
res.append([nums[cur], nums[left], nums[right]])
print([nums[cur], nums[left], nums[right]])
while left < right and nums[left+1] == nums[left]:
left += 1
while left < right and nums[right-1] == nums[right]:
right -= 1
left += 1
right -= 1
return res
|
nilq/baby-python
|
python
|
import pandas as pd
import os
# df = pd.read_csv('./train_annotation_list.csv')
# for i in range (len(df['Image_Path'])):
# dirname = os.path.dirname(df['Image_Path'][i])
# patient_name = os.path.basename(df['Image_Path'][i])
# patient_no = int(patient_name.split('.')[0].split('_')[1])
# folder = 'center_'+str(int(patient_no//20))
# corrected_path = os.path.join(dirname, folder, patient_name)
# # print (corrected_path)
# df['Image_Path'][i] = corrected_path
df = pd.read_csv('./annotated_train_data.csv')
for i in range (len(df['Image_Path'])):
image_path = df['Image_Path'][i]
label_path = df['Mask_Path'][i]
if label_path == 'empty':
print (label_path)
|
nilq/baby-python
|
python
|
import pandas as pd
import matplotlib.pyplot as plt
import errno
import os
import numpy as np
from pathlib import Path
import data_helper as dh
# Counts the number of learning agents in one log path
def count_learning_agents_in_logs_path(logs_path):
count = 0
# loop through win rates directories in logs
for content_path in logs_path.rglob("*"):
# check if content is directory
if not content_path.is_dir():
continue
# Create file path to win_rates.csv
win_rates_path = content_path / 'win_rates.csv'
# Check if win rate csv exists
if not win_rates_path.exists():
continue
# Get directory name
directory_name = content_path.name
# Split directory name into agent names
agent_names = directory_name.split('_vs_')
# Iterate over both agents
for agent in agent_names:
# Plot only winrates for not-random agents
if 'Random' in agent:
continue
count = count + 1
return count
if __name__=='__main__':
# Create path to experiments directory
experiments_path = Path(Path().cwd().parents[0] / 'experiments')
# Loop through all training directories
for train_directory_path in experiments_path.rglob("*"):
# check if train_directory_path is directory
if not train_directory_path.is_dir():
continue
# Loop through experiment directories
for experiment_directory_path in train_directory_path.rglob("*"):
# check if experiment_directory_path is directory
if not experiment_directory_path.is_dir():
continue
# Create path to train/logs
logs_path = experiment_directory_path / 'train/logs'
# Check path to train/logs is available
if not logs_path.exists():
continue
# Create new plot for each experiment
plt.figure()
# Current agent number
i = 0
# Number of learning agents in this experiment
n = count_learning_agents_in_logs_path(logs_path)
# loop through win rates directories in logs
for content_path in sorted(logs_path.rglob("*")):
# check if content is directory
if not content_path.is_dir():
continue
# Create file path to win_rates.csv
win_rates_path = content_path / 'win_rates.csv'
# Check if win rate csv exists
if not win_rates_path.exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), win_rates_path)
# Read dataframe from csv file
winrate_df = dh.get_winrate_df_from_csv(win_rates_path)
# Get directory name
directory_name = content_path.name
# Split directory name into agent names
agent_names = directory_name.split('_vs_')
# Iterate over both agents
for agent in agent_names:
# Plot only winrates for not-random agents
if 'Random' in agent:
continue
# Get side of agent
side = dh.WINRATE_A if agent.split('_')[-1] == 'SideA' else dh.WINRATE_B
# Create list with winrates to investigate
winrate_bars = [0.60, 0.70, 0.80, 0.85, 0.90, 0.95, 0.97, 0.99]
# Create empty list for episode marks
episodes_for_winrate = []
# Get episodes, when the winrates are reached
for winrate in winrate_bars:
found_episode = False
for index, row in winrate_df.iterrows():
if row[side] > winrate and not found_episode:
episodes_for_winrate.append(row[dh.EPISODE])
found_episode = True
break
if not found_episode:
episodes_for_winrate.append(0)
width = 0.175
# the label locations
label_locations = np.arange(len(winrate_bars))
# Plot winrate of agent
plt.bar(label_locations - ((n - 1)/2 * width) + i * width, episodes_for_winrate, width, label=agent)
i = i + 1
# Set tick labels
plt.xticks(label_locations, winrate_bars)
# Set the x and y axis label
plt.xlabel('win rate')
plt.ylabel(dh.EPISODE)
# Show legend
plt.legend()
# Create path to plots directory
plots_path = experiment_directory_path /'plots'
# Check plot directory is available
if not plots_path.exists():
os.mkdir(plots_path)
# Create path to convergence_speed directory
convergence_speed_path = Path(plots_path / 'convergence_speed')
# Check convergence_speed directory is available
if not convergence_speed_path.exists():
os.mkdir(convergence_speed_path)
# Get image name from source directroy
image_name = 'convergence_speed.png'
# Create image path
image_path = convergence_speed_path / image_name
# Save plot as image
plt.savefig(image_path)
|
nilq/baby-python
|
python
|
# TODO: change name to queue
from db_works import db_connect, db_tables
import datetime
def get_settings(interval_param_):
db_schema_name, db_table_name, db_settings_table_name = db_tables()
cursor, cnxn = db_connect()
# interval parameter: current - API data; daily_hist - data from daily files; monthly_hist - data from monthly files
if interval_param_ == "current":
cursor.execute(
"SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE current_update_from_api = 1 and "
"download_setting_status_id = 0 and "
"daily_hist_complete = 1 AND "
"monthly_hist_complete = 1 AND "
"coalesce(next_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by next_download_ux_timestamp asc limit 1")
elif interval_param_ == "daily_hist":
cursor.execute("SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE daily_update_from_files = 1 and "
"download_setting_status_id = 0 and "
"daily_hist_complete = 0 AND "
"monthly_hist_complete = 1 AND "
"coalesce(start_hist_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by start_hist_download_ux_timestamp asc limit 1")
elif interval_param_ == "monthly_hist":
cursor.execute("SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE monthly_update_from_files = 1 and "
"download_setting_status_id = 0 and "
"monthly_hist_complete = 0 AND "
"coalesce(start_hist_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by start_hist_download_ux_timestamp asc limit 1")
else:
exit()
download_setting = cursor.fetchall()
if len(download_setting) > 0:
download_settings_id = download_setting[0][0]
market = download_setting[0][1]
tick_interval = download_setting[0][2]
data_granulation = download_setting[0][3]
stock_type = download_setting[0][4]
stock_exchange = download_setting[0][5]
range_to_download = download_setting[0][6]
download_api_interval_sec = download_setting[0][7]
daily_update_from_files = download_setting[0][8]
monthly_update_from_files = download_setting[0][9]
start_hist_download_ux_timestamp = download_setting[0][10]
else:
print("no data to download")
exit()
# block current setting changing its status
cursor.execute("UPDATE " + db_schema_name + "." + db_settings_table_name + " SET download_setting_status_id = %s where download_settings_id = %s", (1, download_settings_id))
cnxn.commit()
print("settings blocked")
return download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, range_to_download, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp
print()
|
nilq/baby-python
|
python
|
from tests.utils import TEST_DATA_DIR
from dexy.doc import Doc
from tests.utils import wrap
import os
markdown_file = os.path.join(TEST_DATA_DIR, "markdown-test.md")
def run_kramdown(ext):
with open(markdown_file, 'r') as f:
example_markdown = f.read()
with wrap() as wrapper:
node = Doc("markdown.md|kramdown",
wrapper,
[],
kramdown = { 'ext' : ext },
contents = example_markdown
)
wrapper.run_docs(node)
assert node.output_data().is_cached()
return node.output_data()
def test_kramdown_html():
html = str(run_kramdown(".html"))
assert """<h2 id="download">""" in html
def test_kramdown_tex():
tex = str(run_kramdown(".tex"))
assert "\subsection" in tex
|
nilq/baby-python
|
python
|
from flask import Blueprint
from flask import redirect
from flask import render_template
from flask import abort, jsonify, request
from flask_login import current_user
from flask_login import login_required
from app.models import User, load_user
from app.extensions import db
import os
import stripe
stripe.api_key = os.environ['STRIPE_SECRET_KEY']
stripe_bp = Blueprint('stripe', __name__)
stripe_prefix = '/stripe'
products = {
'private_model': {
'name': 'your own private fine-tuned model',
'price': 5000,
'per': 'month',
'adjustable_quantity': {
'enabled': True,
'minimum': 1,
'maximum': 3,
},
},
}
@stripe_bp.route('/')
def index():
if current_user.is_authenticated:
user = load_user(current_user.get_id())
allowed = user.models_allowed
return render_template('stripe.html', username=str(user), allowed=allowed, products=products,
stripe_prefix=stripe_prefix)
else:
return render_template('stripe.html', products=products,stripe_prefix=stripe_prefix)
@stripe_bp.route('/order/<product_id>', methods=['POST'])
@login_required
def order(product_id):
if product_id not in products:
abort(404)
checkout_session = stripe.checkout.Session.create(
line_items=[
{
'price_data': {
'product_data': {
'name': products[product_id]['name'],
},
'unit_amount': products[product_id]['price'],
'currency': 'usd',
},
'quantity': 1,
'adjustable_quantity': products[product_id].get(
'adjustable_quantity', {'enabled': False}),
},
],
payment_method_types=['card'],
mode='payment',
success_url=request.host_url + stripe_prefix.replace('/', '') + '/order/success',
cancel_url=request.host_url + stripe_prefix.replace('/', '') + '/order/cancel',
metadata={'userId': current_user.get_id()},
)
return redirect(checkout_session.url)
@stripe_bp.route('/order/success')
@login_required
def success():
return render_template('success.html')
@stripe_bp.route('/order/cancel')
@login_required
def cancel():
return render_template('cancel.html')
@stripe_bp.route('/webhook', methods=['POST'])
def webhook():
event = None
payload = request.data
sig_header = request.headers['STRIPE_SIGNATURE']
try:
event = stripe.Webhook.construct_event(
payload, sig_header, os.environ['STRIPE_WEBHOOK_SECRET']
)
except ValueError as e:
# Invalid payload
raise e
except stripe.error.SignatureVerificationError as e:
# Invalid signature
raise e
# Handle the event
if event['type'] == 'checkout.session.async_payment_failed':
session = event['data']['object']
elif event['type'] == 'checkout.session.async_payment_succeeded':
session = event['data']['object']
elif event['type'] == 'checkout.session.completed':
session = event['data']['object']
print('🔔 Payment succeeded!')
user_id = session['metadata']['userId']
user = load_user(user_id)
user.inc_models_allowed()
db.session.commit()
# session = stripe.checkout.Session.retrieve(
# event['data']['object'].id, expand=['line_items'])
# print(f'Sale to {session.customer_details.email}:')
# for item in session.line_items.data:
# print(f' - {item.quantity} {item.description} '
# f'${item.amount_total/100:.02f} {item.currency.upper()}')
# ... handle other event types
else:
print('Unhandled event type {}'.format(event['type']))
return jsonify(success=True)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import get_object_or_404
from django.db import models, migrations
def encrypt_secrets(apps, schema_editor):
Secret = apps.get_model("server", "Secret")
for secret in Secret.objects.all():
secret.save()
class Migration(migrations.Migration):
dependencies = [
('server', '0006_auto_20150714_0821'),
]
operations = [
migrations.RunPython(encrypt_secrets),
]
|
nilq/baby-python
|
python
|
import pytest
from tartiflette_middleware.examples.standalone import\
StandaloneMiddleware
from tartiflette_middleware.exceptions import\
RequestDataNotStoredException
class TestStandaloneMiddleware:
def test_standalone_example_init(self):
service = StandaloneMiddleware()
@pytest.mark.asyncio
async def test_standalone_example_call_data_not_set(self):
service = StandaloneMiddleware()
service.request = {'fake': 'data'}
with pytest.raises(RequestDataNotStoredException):
await service()
@pytest.mark.asyncio
async def test_standalone_example_call_data_set(self):
service = StandaloneMiddleware()
service.request = {'fake': 'data'}
async with service:
pass
assert await service() == 'foo'
|
nilq/baby-python
|
python
|
import pyrebase
config={
"apiKey": "AIzaSyDYt-fmafI1kkMZSIphL829C6QgdlE1Tro",
"authDomain": "cp19-12.firebaseapp.com",
"databaseURL": "https://cp19-12.firebaseio.com",
" projectId": "cp19-12",
"storageBucket": "cp19-12.appspot.com",
"messagingSenderId": "681358965828",
"appId": "1:681358965828:web:3e31fb7429aed930"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
storage=firebase.storage()
import flask
from flask import Flask , render_template
app = Flask(__name__)
@app.route("/")
def hello():
new_post=db.child("names").get()
user=new_post.val()
l=user.key()
return l
@app.route("/Taha")
def Taha():
return "Hello taha!"
@app.route("/Daniyal")
def Daniyal():
return "Hello Daniyal"
@app.route("/fahad")
def fahad():
return "Hello fahad"
app.run(debug=True)
|
nilq/baby-python
|
python
|
# 代码仅供学习交流,不得用于商业/非法使用
# 作者:Charles
# 公众号:Charles的皮卡丘
# 视频下载器-Demo版
# 目前支持的平台:
# 网易云课堂: wangyiyun.wangyiyun()
# 音悦台: yinyuetai.yinyuetai()
# B站: bilibili.bilibili()
import os
import threading
from platforms import *
from utils.utils import *
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
from PIL import Image, ImageTk
# 下载器类
class Download_Thread(threading.Thread):
def __init__(self, *args, **kwargs):
super(Download_Thread, self).__init__(*args, **kwargs)
self.__pause = threading.Event()
self.__pause.clear()
self.__running = threading.Event()
self.__running.set()
self.flag = False
# 对应关系:
# 网易云课堂 -> '1'
# 音悦台 -> '2'
# B站 -> '3'
self.engine = None
self.url = None
self.savepath = './videos'
def run(self):
while self.__running.isSet():
self.__pause.wait()
self.flag = True
if self.engine == '1':
self.show_start_info()
try:
res = wangyiyun.wangyiyun().get(self.url, savepath=self.savepath, app='demo')
if res != 200:
raise RuntimeError('url request error...')
except:
self.show_parse_error()
self.show_end_info(savepath=self.savepath)
elif self.engine == '2':
self.show_start_info()
try:
res = yinyuetai.yinyuetai().get(self.url, savepath=self.savepath, app='demo')
if res != 200:
raise RuntimeError('url request error...')
except:
self.show_parse_error()
self.show_end_info(savepath=self.savepath)
elif self.engine == '3':
self.show_start_info()
try:
res = bilibili.bilibili().get(self.url, savepath=self.savepath, app='demo')
if res != 200:
raise RuntimeError('url request error...')
except:
self.show_parse_error()
self.show_end_info(savepath=self.savepath)
else:
title = '解析失败'
msg = '平台选项参数解析失败!'
messagebox.showerror(title, msg)
self.pause()
def pause(self):
self.__pause.clear()
def resume(self):
self.__pause.set()
def stop(self):
self.__running.clear()
def show_start_info(self):
title = '开始下载'
msg = '搜索平台: {}\n已开始下载{},请耐心等待。'.format(self.engine, self.url)
messagebox.showinfo(title, msg)
def show_end_info(self, savepath='./videos'):
title = '下载成功'
msg = '{}下载成功。'.format(self.url)
messagebox.showinfo(title, msg)
def show_parse_error(self):
title = '解析失败'
msg = '视频链接解析失败!'
messagebox.showerror(title, msg)
t_download = Download_Thread()
# 下载器类
class Transfer_Thread(threading.Thread):
def __init__(self, *args, **kwargs):
super(Transfer_Thread, self).__init__(*args, **kwargs)
self.__pause = threading.Event()
self.__pause.clear()
self.__running = threading.Event()
self.__running.set()
self.flag = False
self.origin_file = None
self.target_format = None
self.savepath = 'results'
def run(self):
while self.__running.isSet():
self.__pause.wait()
self.flag = True
title = '开始转换'
msg = '已开始转换视频,请耐心等待。'
messagebox.showinfo(title, msg)
try:
result = transfer(origin_file=self.origin_file, target_format=self.target_format, savepath=self.savepath)
if result is False:
raise RuntimeError('origin_file unsupported...')
except:
title = '转换失败'
msg = '视频转换失败!'
messagebox.showerror(title, msg)
self.pause()
def pause(self):
self.__pause.clear()
def resume(self):
self.__pause.set()
def stop(self):
self.__running.clear()
t_transfer = Transfer_Thread()
# 下载器
def downloader(options, op_engine_var, en_videourl_var):
engine = str(options.index(str(op_engine_var.get())) + 1)
t_download.engine = engine
t_download.url = str(en_videourl_var.get())
if t_download.flag is False:
t_download.start()
t_download.resume()
# 转换器
def converter(en_originvideo_var, op_tarformat_var):
t_transfer.origin_file = str(en_originvideo_var.get())
t_transfer.target_format = str(op_tarformat_var.get())
if t_transfer.flag is False:
t_transfer.start()
t_transfer.resume()
# 选择视频
def ChoiceVideo(en_originvideo):
filepath = filedialog.askopenfilename()
en_originvideo.delete(0, END)
en_originvideo.insert(INSERT, filepath)
# 关于作者
def ShowAuthor():
title = '关于作者'
msg = '作者: Charles\n公众号: Charles的皮卡丘\nGithub: https://github.com/CharlesPikachu/Video-Downloader'
messagebox.showinfo(title, msg)
# 退出程序
def stopDemo(root):
t_download.stop()
t_transfer.stop()
root.destroy()
root.quit()
# 主界面
def Demo(options):
assert len(options) > 0
# 初始化
root = Tk()
root.title('视频下载器V1.0——公众号:Charles的皮卡丘')
root.resizable(False, False)
root.geometry('600x375+400+120')
image_path = './bgimgs/bg1_demo.jpg'
bgimg = Image.open(image_path)
bgimg = ImageTk.PhotoImage(bgimg)
lb_bgimg = Label(root, image=bgimg)
lb_bgimg.grid()
# Menu
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=False)
filemenu.add_command(label='退出', command=lambda: stopDemo(root), font=('楷体', 10))
menubar.add_cascade(label='文件', menu=filemenu)
filemenu = Menu(menubar, tearoff=False)
filemenu.add_command(label='关于作者', command=ShowAuthor, font=('楷体', 10))
menubar.add_cascade(label='更多', menu=filemenu)
root.config(menu=menubar)
# Label组件(标题)
lb_title1 = Label(root, text='视频下载', font=('楷体', 15), bg='white')
lb_title1.place(relx=0.5, rely=0.05, anchor=CENTER)
lb_title2 = Label(root, text='视频转换', font=('楷体', 15), bg='white')
lb_title2.place(relx=0.5, rely=0.55, anchor=CENTER)
# 视频下载部分:
# Label+Entry组件(视频地址)
lb_videourl = Label(root, text='视频地址:', font=('楷体', 10), bg='white')
lb_videourl.place(relx=0.1, rely=0.15, anchor=CENTER)
en_videourl_var = StringVar()
en_videourl = Entry(root, textvariable=en_videourl_var, width=55, fg='gray', relief=GROOVE, bd=3)
en_videourl.insert(0, 'http://study.163.com/course/courseMain.htm?courseId=1003842018')
en_videourl.place(relx=0.49, rely=0.15, anchor=CENTER)
# Label+OptionMenu组件(搜索平台)
lb_engine = Label(root, text='搜索平台:', font=('楷体', 10), bg='white')
lb_engine.place(relx=0.1, rely=0.25, anchor=CENTER)
op_engine_var = StringVar()
op_engine_var.set(options[0])
op_engine = OptionMenu(root, op_engine_var, *options)
op_engine.place(relx=0.26, rely=0.25, anchor=CENTER)
# Button组件(下载与退出)
bt_download = Button(root, text='下载视频', bd=2, width=15, height=2, command=lambda: downloader(options, op_engine_var, en_videourl_var), font=('楷体', 10))
bt_download.place(relx=0.26, rely=0.38, anchor=CENTER)
bt_quit = Button(root, text='退出程序', bd=2, width=15, height=2, command=lambda: stopDemo(root), font=('楷体', 10))
bt_quit.place(relx=0.52, rely=0.38, anchor=CENTER)
# 视频转换部分:
# Label+Entry+Button组件(视频路径)
lb_originvideo = Label(root, text='视频路径:', font=('楷体', 10), bg='white')
lb_originvideo.place(relx=0.1, rely=0.65, anchor=CENTER)
en_originvideo_var = StringVar()
en_originvideo = Entry(root, textvariable=en_originvideo_var, width=55, fg='gray', relief=GROOVE, bd=3)
en_originvideo.insert(0, '请输入/选择视频路径')
en_originvideo.place(relx=0.49, rely=0.65, anchor=CENTER)
bt_choice = Button(root, text='打开', bd=1, width=5, height=1, command=lambda: ChoiceVideo(en_originvideo), font=('楷体', 10))
bt_choice.place(relx=0.86, rely=0.65, anchor=CENTER)
# Label+OptionMenu组件(目标格式)
lb_tarformat = Label(root, text='目标格式:', font=('楷体', 10), bg='white')
lb_tarformat.place(relx=0.1, rely=0.75, anchor=CENTER)
options_format = ['mp4', 'flv', 'avi']
op_tarformat_var = StringVar()
op_tarformat_var.set(options_format[0])
op_tarformat = OptionMenu(root, op_tarformat_var, *options_format)
op_tarformat.place(relx=0.22, rely=0.75, anchor=CENTER)
# Button组件(视频转换)
bt_transfer = Button(root, text='开始转换', bd=2, width=15, height=2, command=lambda: converter(en_originvideo_var, op_tarformat_var), font=('楷体', 10))
bt_transfer.place(relx=0.52, rely=0.75, anchor=CENTER)
root.mainloop()
if __name__ == '__main__':
options = ["1.网易云课堂", "2.音悦台", "3.B站"]
Demo(options)
|
nilq/baby-python
|
python
|
from __future__ import print_function
import urllib2
import json
import re,datetime
import sys
import csv
class L():
"Anonymous container"
def __init__(i,**fields) :
i.override(fields)
def override(i,d): i.__dict__.update(d); return i
def __repr__(i):
d = i.__dict__
name = i.__class__.__name__
return name+'{'+' '.join([':%s %s' % (k,pretty(d[k]))
for k in i.show()])+ '}'
def show(i):
lst = [str(k)+" : "+str(v) for k,v in i.__dict__.iteritems() if v != None]
return ',\t'.join(map(str,lst))
def secs(d0):
d = datetime.datetime(*map(int, re.split('[^\d]', d0)[:-1]))
epoch = datetime.datetime.utcfromtimestamp(0)
delta = d - epoch
return delta.total_seconds()
def dump1(u,commits,csvwriter):
f = open("../token.info","r")
token = f.readline().strip('\n')
f.close()
request = urllib2.Request(u, headers={"Authorization" : "token "+token})
v = urllib2.urlopen(request).read()
w = json.loads(v)
if not w: return False
for event in w:
user = event['author']['login']
print(user)
commit_at = event['commit']['committer']['date']
commit_at = secs(commit_at)
print(commit_at)
csvwriter.writerows([[user,commit_at]])
return True
def dump(u, commits,csvwriter):
try:
return dump1(u, commits,csvwriter)
except Exception as e:
print(e)
print("Contact TA")
return False
def launchDump(pro_name,csvwriter):
page = 1
commits = dict()
while(True):
doNext = dump('https://api.github.com/repos/' + pro_name + '/commits?page=' + str(page), commits,csvwriter)
#print("page "+ str(page))
page += 1
if not doNext : break
pf = open("../proname.info","r")
b = open('proj0.csv','wb')
b.truncate()
a = csv.writer(b)
launchDump(pf.readline().strip('\n'),a)
print("===============END OF THE PROJECT0========================")
b.close()
b = open('proj1.csv','wb')
b.truncate()
a = csv.writer(b)
launchDump(pf.readline().strip('\n'),a)
print("===============END OF THE PROJECT1========================")
b.close()
b = open('proj2.csv','wb')
b.truncate()
a = csv.writer(b)
launchDump(pf.readline().strip('\n'),a)
print("===============END OF THE PROJECT2========================")
b.close()
b = open('proj3.csv','wb')
b.truncate()
a = csv.writer(b)
launchDump(pf.readline().strip('\n'),a)
print("===============END OF THE PROJECT3========================")
b.close()
pf.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
print(2+4)
print(5**2) # is like 5 power 2 (5^2)
print(5*7 - 9*1)
print(4/2)
print(5/2)
print(5//2) # removes the values after the point
print(7 % 3) # This is the remainder or modulus operator
print(1+1 +(2*5))
print(len("Linux"))
|
nilq/baby-python
|
python
|
# SPDX-FileCopyrightText: 2020 Jeff Epler for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import os
# First, just write the file 'hello.txt' to the card
with open("/sd/hello.txt", "w") as f:
print("hello world", file=f)
print()
print("SD card I/O benchmarks")
# Test read and write speed in several scenarios:
# * 512 or 4096 bytes at a time
# * Writing 1 time or 16 times
# First write the content to the SD card, then read it back, reporting the
# time taken.
for sz in 512, 4096:
b = bytearray(sz)
for i in range(sz):
b[i] = 0xaa
for n in (1, 16):
with open("/sd/hello.bin", "wb") as f:
t0 = time.monotonic_ns()
for i in range(n):
f.write(b)
t1 = time.monotonic_ns()
dt = (t1-t0) / 1e9
print(f"write {len(b)} x {n} in {dt}s {n * len(b) / dt / 1000:.1f}Kb/s")
with open("/sd/hello.bin", "rb") as f:
t0 = time.monotonic_ns()
for i in range(n):
f.readinto(b)
t1 = time.monotonic_ns()
dt = (t1-t0) / 1e9
print(f"read {len(b)} x {n} in {dt}s {n * len(b) / dt / 1000:.1f}Kb/s")
print()
# Test "logging" speed and report the time to write each line.
# Note that in this test the file is not closed or flushed after each
# line, so in the event of power loss some lines would be lost. However,
# it allows much more frequent logging overall.
#
# If keeping data integrity is your highest concern, follow the logging
# example, not this logging benchmark!
print("logging test")
with open("/sd/log.txt", "wt") as logfile:
t0 = time.monotonic_ns()
for i in range(10000):
t1 = time.monotonic_ns()
dt = (t1-t0) / 1e9
print(f"Line {i}, {dt:2f}s elapsed", file=logfile)
t1 = time.monotonic_ns()
dt = (t1-t0) / 1e9
print(f"Logged 10000 lines in {dt} seconds, {dt*100:.0f}us/line")
sz = os.stat('/sd/log.txt')[6]
print(f"{sz} bytes written, {sz/dt/1000:.1f}Kb/s")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
key_mapping = {'w': [0, 1], 'x': [0, -1],
'a': [-1, 0], 'd': [1, 0],
's': [0, 0]}
g_last_twist = None
def keys_cb(msg, twist_pub):
global g_last_twist
if len(msg.data) == 0 or not msg.data[0] in key_mapping:
return # unknown key
vels = key_mapping[msg.data[0]]
g_last_twist.angular.z = vels[0]
g_last_twist.linear.x = vels[1]
twist_pub.publish(g_last_twist)
if __name__ == '__main__':
rospy.init_node('keys_to_twist')
twist_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
rospy.Subscriber('keys', String, keys_cb, twist_pub)
rate = rospy.Rate(10)
g_last_twist = Twist() # initializes to zero
while not rospy.is_shutdown():
twist_pub.publish(g_last_twist)
rate.sleep()
|
nilq/baby-python
|
python
|
import abc
import json
import logging
import os
import numpy as np
import tensorflow as tf
def zip_weights(model, ckpt, variables_mapping, self_weight_names, **kwargs):
weights, values = [], []
used_weights = [w for w in model.trainable_weights if w.name in self_weight_names]
for w in used_weights:
var = variables_mapping.get(w.name, None)
if var is None:
logging.warning("Model weight: %s not collected in weights mapping.", w.name)
continue
v = tf.train.load_variable(ckpt, var)
if w.name == "bert/nsp/dense/kernel:0":
v = v.T
weights.append(w)
values.append(v)
if kwargs.get("verbose", True):
logging.info("Load weight: {:60s} <-- {}".format(w.name, variables_mapping[w.name]))
mapped_values = zip(weights, values)
return mapped_values
def parse_pretrained_model_files(pretrained_model_dir):
config_file, ckpt, vocab = None, None, None
pretrained_model_dir = os.path.abspath(pretrained_model_dir)
if not os.path.exists(pretrained_model_dir):
logging.info("pretrain model dir: {} is not exists.".format(pretrained_model_dir))
return config_file, ckpt, vocab
for f in os.listdir(pretrained_model_dir):
if "config" in str(f) and str(f).endswith(".json"):
config_file = os.path.join(pretrained_model_dir, f)
if "vocab" in str(f):
vocab = os.path.join(pretrained_model_dir, f)
if "ckpt" in str(f):
n = ".".join(str(f).split(".")[:-1])
ckpt = os.path.join(pretrained_model_dir, n)
return config_file, ckpt, vocab
class AbstractAdapter(abc.ABC):
"""Abstract model weights adapter."""
@abc.abstractmethod
def adapte_config(self, model_path, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def adapte_weights(self, model, model_config, model_path, **kwargs):
raise NotImplementedError()
class BaseAdapter(AbstractAdapter):
"""Base adapter for pretrained models."""
def __init__(
self,
use_functional_api=True,
with_mlm=False,
with_nsp=False,
with_sop=False,
skip_token_embedding=False,
skip_position_embedding=False,
skip_segment_embedding=False,
skip_embedding_layernorm=False,
skip_pooler=False,
check_weights=True,
verbose=True,
**kwargs
):
self.use_functional_api = use_functional_api
self.with_mlm = with_mlm
self.with_nsp = with_nsp
self.with_sop = with_sop
self.check_weights = check_weights
self.verbose = verbose
self.model_files = None
self._pretrained_weights_map = {}
self.weights_to_skip = set()
# skip weights
self.skip_token_embedding = skip_token_embedding
self.skip_position_embedding = skip_position_embedding
self.skip_segment_embedding = skip_segment_embedding
self.skip_embedding_layernorm = skip_embedding_layernorm
self.skip_pooler = skip_pooler
logging.info(
"Adapter skipping config: %s",
json.dumps(
{
"skip_token_embedding": self.skip_token_embedding,
"skip_position_embedding": self.skip_position_embedding,
"skip_segment_embedding": self.skip_segment_embedding,
"skip_embedding_layernorm": self.skip_embedding_layernorm,
"skip_pooler": self.skip_pooler,
}
),
)
def _parse_files(self, model_path, **kwargs):
config_file, ckpt, vocab = parse_pretrained_model_files(model_path)
return {
"config_file": config_file,
"ckpt": ckpt,
"vocab_file": vocab,
}
def _read_pretrained_weights(self, model_path, **kwargs):
if self.model_files is None:
self.model_files = self._parse_files(model_path, **kwargs)
ckpt = self.model_files["ckpt"]
ckpt_weight_names = [w for (w, _) in tf.train.list_variables(ckpt)]
ckpt_weights_map = {w: tf.train.load_variable(ckpt, w) for w in ckpt_weight_names}
return ckpt_weights_map
def adapte_weights(self, model, model_config, model_path, **kwargs):
self._pretrained_weights_map = self._read_pretrained_weights(model_path, **kwargs)
weights_mapping = {}
bert_weights_mapping = self._adapte_backbone_weights(model, model_config, **kwargs)
weights_mapping.update(bert_weights_mapping)
if self.with_mlm:
mlm_weights = self._adapte_mlm_weights(model, model_config, **kwargs)
weights_mapping.update(mlm_weights)
if self.with_nsp:
nsp_weights = self._adapte_nsp_weights(model, model_config, **kwargs)
weights_mapping.update(nsp_weights)
if self.with_sop:
sop_weights = self._adapte_sop_weights(model, model_config, **kwargs)
weights_mapping.update(sop_weights)
# skip weights
self._skipping_weights(model, **kwargs)
take_values = set(weights_mapping.values())
for k in self._pretrained_weights_map.keys():
if k not in take_values:
logging.info("pretrained weight: {} not used.".format(k))
zipping_weights, zipping_values = self._zip_weights(model, model_config, weights_mapping, **kwargs)
tf.keras.backend.batch_set_value(zip(zipping_weights, zipping_values))
# check weights
self._check_weights(model, zipping_weights, zipping_values, weights_mapping, **kwargs)
@abc.abstractmethod
def _adapte_backbone_weights(self, model, model_config, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _adapte_mlm_weights(self, model, model_config, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _adapte_nsp_weights(self, model, model_config, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _adapte_sop_weights(self, model, model_config, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _zip_weights(self, model, model_config, weights_mapping, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def get_backbone_prefix(self, model):
raise NotImplementedError()
def _skipping_weights(self, model, **kwargs):
backbone_prefix = self.get_backbone_prefix(model)
def _skip(w):
self.weights_to_skip.add(w)
logging.info("Weights will be skipped to load: %s", w)
if self.skip_token_embedding:
_skip("{}/embeddings/word_embeddings:0".format(backbone_prefix))
if self.skip_position_embedding:
_skip("{}/embeddings/position_embeddings:0".format(backbone_prefix))
if self.skip_segment_embedding:
_skip("{}/embeddings/token_type_embeddings:0".format(backbone_prefix))
if self.skip_embedding_layernorm:
_skip("{}/embeddings/LayerNorm/gamma:0".format(backbone_prefix))
_skip("{}/embeddings/LayerNorm/beta:0".format(backbone_prefix))
if self.skip_pooler:
_skip("{}/pooler/dense/kernel:0".format(backbone_prefix))
_skip("{}/pooler/dense/bias:0".format(backbone_prefix))
def _check_weights(self, model, zipping_weights, zipping_values, weights_mapping, **kwargs):
if not self.check_weights:
logging.info("Skipped to check weights due to option `check_weights` set to `False`")
return
for k, v in zip(zipping_weights, zipping_values):
vv = self._pretrained_weights_map[weights_mapping[k.name]]
try:
assert np.allclose(v, vv)
except Exception as e:
logging.warning("{} & {} not close!".format(k, weights_mapping[k.name]))
logging.warning("{} -> \n {}".format(k, v))
logging.warning("{} -> \n {}".format(weights_mapping[k.name], vv))
logging.warning(e)
logging.warning("=" * 80)
class AbstractBertAdapter(BaseAdapter):
"""Abstract Bert adapter"""
def get_backbone_prefix(self, model):
return model.bert_model.name if self.use_functional_api else model.name + "/" + model.bert_model.name
def adapte_config(self, model_path, **kwargs):
if self.model_files is None:
self.model_files = self._parse_files(model_path, **kwargs)
config_file = self.model_files["config_file"]
with open(config_file, mode="rt", encoding="utf8") as fin:
config = json.load(fin)
model_config = {
"vocab_size": config["vocab_size"],
"activation": config["hidden_act"],
"max_positions": config["max_position_embeddings"],
"hidden_size": config["hidden_size"],
"type_vocab_size": config["type_vocab_size"],
"intermediate_size": config["intermediate_size"],
"hidden_dropout_rate": config["hidden_dropout_prob"],
"attention_dropout_rate": config["attention_probs_dropout_prob"],
"initializer_range": config["initializer_range"],
"num_layers": config["num_hidden_layers"],
"num_attention_heads": config["num_attention_heads"],
}
return model_config
class AbstractAlbertAdapter(BaseAdapter):
"""Abstract adapter for albert"""
def get_backbone_prefix(self, model):
return model.albert_model.name if self.use_functional_api else model.name + "/" + model.albert_model.name
def adapte_config(self, model_path, **kwargs):
if self.model_files is None:
self.model_files = self._parse_files(model_path, **kwargs)
config_file = self.model_files["config_file"]
with open(config_file, mode="rt", encoding="utf8") as fin:
config = json.load(fin)
model_config = {
"vocab_size": config["vocab_size"],
"max_positions": config["max_position_embeddings"],
"embedding_size": config["embedding_size"],
"type_vocab_size": config["type_vocab_size"],
"num_layers": config["num_hidden_layers"],
"num_groups": config["num_hidden_groups"],
"num_layers_each_group": config["inner_group_num"],
"hidden_size": config["hidden_size"],
"num_attention_heads": config["num_attention_heads"],
"intermediate_size": config["intermediate_size"],
"activation": config["hidden_act"],
"hidden_dropout_rate": config["hidden_dropout_prob"],
"attention_dropout_rate": config["attention_probs_dropout_prob"],
"initializer_range": config["initializer_range"],
}
return model_config
|
nilq/baby-python
|
python
|
import tensorflow as tf
import value_fns
class ValueFnTests(tf.test.TestCase):
def test_label_attention_fn(self):
with self.test_session():
mode = tf.estimator.ModeKeys.TRAIN
# num_labels x label_embedding_dim
label_embeddings = tf.constant([[0.1, 0.1, 0.1, 0.1],
[0.3, 0.3, 0.3, 0.3],
[0.5, 0.5, 0.5, 0.5],
[10, 10, 10, 10],
[1.0, 1.0, 1.0, 1.0]])
# batch_size x batch_seq_len x num_labels
label_scores = tf.constant([[[-10., 10., -10., -10., -10.],
[-10., -10., -10., 10., -10.],
[10., 10., 10., -10., -10.]],
[[-10., -10., 10., -10., -10.],
[10., -10., -10., -10., -10.],
[-10., 10., -10., -10., -10.]]])
# batch_size x batch_seq_len x label_embedding_dim
expected = tf.constant([[[0.3, 0.3, 0.3, 0.3],
[10., 10., 10., 10.],
[0.3, 0.3, 0.3, 0.3]],
[[0.5, 0.5, 0.5, 0.5],
[0.1, 0.1, 0.1, 0.1],
[0.3, 0.3, 0.3, 0.3]]])
result = value_fns.label_attention(mode, label_scores, label_scores, label_embeddings)
self.assertAllCloseAccordingToType(result.eval(), expected)
if __name__ == '__main__':
tf.test.main()
|
nilq/baby-python
|
python
|
"""
fabfile module containing application-specific tasks.
"""
from fabric.api import task
from fabric.colors import cyan
from fabfile.utils import do
from fabfile.virtualenv import venv_path
@task
def build():
"""
Run application build tasks.
"""
# Generate static assets. Note that we always build assets with the
# production config because dev will never point to compiled files.
print(cyan('\nBuilding static assets...'))
do('mkdir -p app/static/assets/css')
do('mkdir -p app/static/assets/js')
do('export FLASK_CONFIG=config/production.py && %s/bin/python manage.py build' % venv_path)
def run():
"""Start app in debug mode (for development)."""
do('export FLASK_CONFIG=config/dev.py && %s/bin/python manage.py runserver' % venv_path)
def test():
"""Run unit tests"""
print(cyan('\nRunning tests...'))
do('FLASK_CONFIG=config/test.py %s/bin/nosetests --exclude-dir-file=\'.noseexclude\' --with-yanc --with-spec --spec-color -q' % venv_path)
def coverage():
"""Generate test coverage report"""
do('FLASK_CONFIG=config/test.py %s/bin/nosetests --exclude-dir-file=\'.noseexclude\' --with-cov --cov=app --cov-report=html' % venv_path)
def start():
"""Start app using init."""
do('sudo start gunicorn')
def stop():
"""Stop app using init."""
do('sudo stop gunicorn')
def restart():
"""Restart app using init."""
do('sudo restart gunicorn')
|
nilq/baby-python
|
python
|
"""
OpenOpt SOCP example
for the problem http://openopt.org/images/2/28/SOCP.png
"""
from numpy import *
from openopt import SOCP
f = array([-2, 1, 5])
C0 = mat('-13 3 5; -12 12 -6')
d0 = [-3, -2]
q0 = array([-12, -6, 5])
s0 = -12
C1 = mat('-3 6 2; 1 9 2; -1 -19 3')
d1 = [0, 3, -42]
q1 = array([-3, 6, -10])
s1 = 27
p = SOCP(f, C=[C0, C1], d=[d0, d1], q=[q0, q1], s=[s0, s1])
# you could add lb <= x <= ub, Ax <= b, Aeq x = beq constraints
# via p = SOCP(f, ..., A=A, b=b, Aeq=Aeq, beq=beq,lb=lb, ub=ub)
r = p.solve('cvxopt_socp')
x_opt, f_opt = r.xf, r.ff
print(' f_opt: %f x_opt: %s' % (f_opt, x_opt))
# f_opt: -38.346368 x_opt: [-5.01428121 -5.76680444 -8.52162517]
|
nilq/baby-python
|
python
|
from splinter import Browser
b = Browser()
b.visit('http://selenium.dunossauro.live/aula_09_a.html')
if b.is_text_not_present('Carregamento concluído'):
b.find_by_text('Barrinha top').click()
print(b.is_text_present('Barrinha top', wait_time=10))
|
nilq/baby-python
|
python
|
""" Shows how to combine surface terrain with surface properties
"""
from __future__ import print_function
from __future__ import division
import matplotlib as mpl
mpl.interactive(False)
import sys
import numpy as np
import matplotlib.pyplot as plt
from plotting import make_test_data
from plotting import draw
from hillshade import hill_shade, no_blending, INTENSITY_CMAP
from intensity import DEF_AZIMUTH, DEF_ELEVATION
def main():
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
fig.tight_layout()
data = make_test_data('circles')
terrain = 10 * make_test_data('hills', noise_factor=0.05)
assert terrain.shape == data.shape, "{} != {}".format(terrain.shape, data.shape)
print("min data: {}".format(np.min(data)))
print("max data: {}".format(np.max(data)))
# Some color maps to try.
#cmap = plt.cm.get_cmap('bwr')
#cmap = plt.cm.get_cmap('CMRmap')
#cmap = plt.cm.get_cmap('rainbow')
#cmap = plt.cm.get_cmap('cool_r')
cmap = plt.cm.get_cmap('Set1')
# Optionally set the over and under flow colors.
#cmap.set_bad('yellow')
#cmap.set_over('cyan')
#cmap.set_under('magenta')
if ['--autoscale'] in sys.argv:
print ("Auto scale legend")
dnorm = mpl.colors.Normalize()
else:
dmin = 0
dmax = 10
print ("clip legend at ({}, {})".format(dmin, dmax))
dnorm = mpl.colors.Normalize(vmin=dmin, vmax=dmax)
# Don't auto scale the intensities, it gives the wrong impression
inorm = mpl.colors.Normalize(vmin=0.0, vmax=1.0)
azimuth = DEF_AZIMUTH
elevation = DEF_ELEVATION
draw(ax[0, 0], cmap=plt.cm.gist_earth, title='Terrain height',
image_data = terrain)
draw(ax[0, 1], cmap=INTENSITY_CMAP, norm=inorm,
title='Shaded terrain (azim = {}, elev = {})'.format(azimuth, elevation),
image_data = hill_shade(terrain, blend_function=no_blending,
azimuth=azimuth, elevation=elevation))
draw(ax[1, 0], cmap=cmap, norm=dnorm, title='Surface properties',
image_data = data)
draw(ax[1, 1], cmap=cmap, norm=dnorm, title='Shaded terrain with surface properties',
image_data = hill_shade(data, terrain=terrain,
azimuth=azimuth, elevation=elevation,
cmap=cmap, norm=dnorm))
plt.show()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Now make a simple example using the custom projection.
import pdb
import sys
import os
import pkg_resources
pkg_resources.require('matplotlib==1.4.0')
import datetime
from dateutil.relativedelta import relativedelta
import re
import math
from matplotlib.ticker import ScalarFormatter, MultipleLocator
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from StringIO import StringIO
import numpy as np
from numpy import load
# Exception handling, with line number and stuff
import linecache
import sys
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
import imp
imp.load_source('SoundingRoutines', '/nfs/see-fs-01_users/eepdw/python_scripts/Tephigram/Sounding_Routines.py')
imp.load_source('TephigramPlot', '/nfs/see-fs-01_users/eepdw/python_scripts/Tephigram/Tephigram_Functions.py')
from TephigramPlot import *
from SoundingRoutines import *
imp.load_source('GeogFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeogFunctions.py')
from GeogFuncs import *
pmin=200.
station_list_cs=[42182, 43003, 43014, 42867, 43371, 43353, 43285, 43192, 43150, 42339, 40990, 40948]
#station_list_cs=[43003]
date_min=datetime.datetime(1960,5,1,0,0,0)
date_max=datetime.datetime(2014,10,1,0,0,0)
delta = relativedelta(weeks=+1)
variable_list={'pressures': 0, 'temps':1, 'dewpoints':2, 'winddirs':3, 'windspeeds':4, 'pot_temp':5,
'sat_vap_pres':6, 'vap_press':7, 'rel_hum':8, 'wvmr':9, 'sp_hum':10, 'sat_temp':11,
'theta_e':12, 'theta_e_sat':13, 'theta_e_minus_theta_e_sat':14}
variable_list_line={'lcl_temp': 0, 'lcl_vpt':1, 'pbl_pressure':2, 'surface_pressure':3, 'T_eq_0':4}
def variable_name_index_match(variable, variable_list):
for key, value in variable_list.iteritems(): # iter on both keys and values
if key.startswith('%s' % variable) and key.endswith('%s' % variable):
arr_index_var=value
return arr_index_var
# Parse the data
for stat in station_list_cs:
station_name,la,lo, st_height=StationInfoSearch(stat)
load_file = load('/nfs/a90/eepdw/Data/Observations/Radiosonde_Numpy/Radiosonde_Cross_Section_'
'IND_SOUNDING_INTERP_MEAN_Climat_%s_%s_%s_%s.npz'
% (date_min.strftime('%Y%m%d'), date_max.strftime('%Y%m%d'), delta, stat))
data=load_file['date_bin_mean_all_dates_one_station']
dates=load_file['dates_for_plotting']
for bin in range(data.shape[0]):
try:
p=data[bin,0,:]/100
T=data[bin,1,:]-273.15
Td=T-data[bin,2,:]
h=data[bin,15,:]
da=dates[bin]
#print T
#print p
#print Td
#pdb.set_trace()
#u_wind,v_wind = u_v_winds(data[bin,3,:], data[bin,4,:])
u_wind,v_wind = data[bin,-2,:], data[bin,-1,:]
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(10, 8), frameon=False)
#fig.patch.set_visible(False)
tephigram_plot_height=0.85
tephigram_plot_bottom=.085
ax = fig.add_axes([.085,tephigram_plot_bottom,.65,tephigram_plot_height], projection='skewx', frameon=False, axisbg='w')
ax.set_yscale('log')
plt.grid(True)
#pdb.set_trace()
tmax=math.ceil(nanmax(T)/10)*10
tmin=math.floor(nanmin(Td[p>400])/10)*10
pmax=math.ceil(nanmax(p)/50)*50
P=linspace(pmax,pmin,37)
w = array([0.0001,0.0004,0.001, 0.002, 0.004, 0.007, 0.01, 0.016, 0.024, 0.032, 0.064, 0.128])
ax.add_mixratio_isopleths(w,linspace(pmax, 700., 37),color='m',ls='-',alpha=.5,lw=0.5)
ax.add_dry_adiabats(linspace(-40,40,9),P,color='k',ls='-',alpha=.5,lw=0.8)
ax.add_moist_adiabats(linspace(-40,40,18),P,color='k',ls='--',alpha=.5,lw=0.8, do_labels=False)
ax.other_housekeeping(pmax, pmin, 40,-40)
wbax = fig.add_axes([0.75,tephigram_plot_bottom,0.12,tephigram_plot_height],frameon=False, sharey=ax, label='barbs')
ax_text_box = fig.add_axes([0.85,0.085,.12,tephigram_plot_height], frameon=False, axisbg='w')
# Plot the data using normal plotting functions, in this case using semilogy
ax.semilogy(T, p, 'r', linewidth=2)
ax.semilogy(Td, p, 'r',linewidth=2)
# row_labels=(
# 'SLAT',
# 'SLON',
# 'SELV',
# 'SHOW',
# 'LIFT',
# 'LFTV',
# 'SWET',
# 'KINX',
# 'CTOT',
# 'VTOT',
# 'TOTL',
# 'CAPE',
# 'CINS',
# 'CAPV',
# 'CINV',
# 'LFCT',
# 'LFCV',
# 'BRCH',
# 'BRCV',
# 'LCLT',
# 'LCLP',
# 'MLTH',
# 'MLMR',
# 'THCK',
# 'PWAT')
# variable='pbl_pressure'
# var_index = variable_name_index_match(variable, variable_list_line)
# print load_file['date_bin_mean_all_dates_one_station_single'].shape
# pbl_pressure = load_file['date_bin_mean_all_dates_one_station_single'][bin,0,var_index]
# print pbl_pressure
# EQLV, pp, lclp,lfcp, lclt, delta_z, CAPE, CIN=CapeCinPBLInput(p, T, Td, h, st_height, pbl_pressure/100)
# print lclp
# table_vals=(
# #'%s' % station_name,
# #'Climatology - Week beg. %s' % da,
# '%s' % la,
# '%s' % lo,
# '%s' % st_height,
# '%.1f' % ShowalterIndex(T, Td, p), # ['Showalter index',
# '%.1f' % LiftedIndex(T, Td, p, h, st_height), # 'Lifted index',
# '--', # 'LIFT computed using virtual temperature',
# '--', # 'SWEAT index',
# '%.1f' % KIndex(T, Td, p), # 'K index',
# '%.1f' % CrossTotalsIndex(T, Td, p), # 'Cross totals index',
# '%.1f' % VerticalTotalsIndex(T, p), # 'Vertical totals index',
# '%.1f' % TotalTotalsIndex(T, Td, p), # 'Total totals index',
# '%.1f' % CAPE, # 'CAPE',
# '%.1f' % CIN, # 'CIN',
# '--', # 'CAPE using virtual temperature',
# '--', # 'CINS using virtual temperature',
# '%.1f' % lfcp, # 'Level of free convection',
# '--', # 'LFCT using virtual temperature',
# '--' , # 'Bulk Richardson number',
# '--', # 'Bulk richardson using CAPV',
# '%.1f' % lclt, # 'Temp [K] of the Lifted Condensation Level',
# '%.1f' % lclp , # 'Pres [hPa] of the Lifted Condensation Level',
# '--', # 'Mean mixed layer potential temperature',
# '--', # 'Mean mixed layer mixing ratio',
# '--', # '1000 hPa to 500 hPa thickness',
# '--') # 'Precipitable water [mm] for entire sounding']
# Wind barbs
barbs_idx=np.logspace(np.log10(10),np.log10(max(len(u_wind))),num=32).astype(int)
wbax.set_yscale('log')
wbax.xaxis.set_ticks([],[])
wbax.yaxis.grid(True,ls='-',color='y',lw=0.5)
wbax.set_xlim(-1.5,1.5)
wbax.get_yaxis().set_visible(False)
wbax.set_ylim(pmax+100,pmin)
wbax.barbs((zeros(p.shape))[barbs_idx-1],p[barbs_idx-1], u_wind[barbs_idx-1], v_wind[barbs_idx-1])
# Disables the log-formatting that comes with semilogy
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_yticks(linspace(100,1000,10))
ax.set_ylim(pmax,pmin)
ax.set_xlim(-40.,40.)
ax.xaxis.set_ticks([],[])
ax_text_box.xaxis.set_visible(False)
ax_text_box.yaxis.set_visible(False)
for tick in wbax.yaxis.get_major_ticks():
# tick.label1On = False
pass
#wbax.get_yaxis().set_tick_params(size=0,color='y')
# y_loc=1.
# max_string_length = max([len(line) for line in row_labels])
# for t,r in zip(row_labels,table_vals):
# label_rightjust=('{:>%i}' % max_string_length).format(t)
# ax_text_box.text(0.5, y_loc, ' %s:' % (label_rightjust), size=8, horizontalalignment='right')
# ax_text_box.text(0.5, y_loc, ' %s' % (r), size=8, horizontalalignment='left')
# y_loc-=0.04
fig.text(.02,0.965, '%s %s' %(stat, station_name), size=12, horizontalalignment='left')
fig.text(.02,0.035, 'Climatology - Week beg. %s ' %(da.strftime('%m-%d')), size=12, horizontalalignment='left')
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/Radiosonde/Tephigrams/Weekly_Climatology/Weekly_Climatology_%s_%s_%s_Skew_T.png' % (station_name.replace('/','_').replace(' ', '_'), stat, da.strftime('%Y%m%d')))
plt.close()
except Exception:
print PrintException()
|
nilq/baby-python
|
python
|
"""
Stores all the view logic for deckr.
"""
# pylint can't detect the constructor for a Django
# form. So we disable the no-value-for-parameter here.
# pylint: disable=no-value-for-parameter
from os.path import join as pjoin
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from django.template import Template
from deckr.forms import (CreateGameRoomForm, PlayerForm,
UploadGameDefinitionForm)
from deckr.models import GameDefinition, GameRoom, Player
from deckr.sockets import ChatNamespace # pylint: disable=unused-import
from deckr.utils import process_uploaded_file
from engine import game_runner
from zipfile import BadZipfile, LargeZipFile
def index(request):
"""
Simply return the index page without any context.
"""
return render(request, "deckr/index.html", {})
def game_room_staging_area(request, game_room_id):
"""
This view will present the staging game room page for
a given game_id.
"""
room = get_object_or_404(GameRoom, pk=game_room_id)
if request.method == "POST":
form = PlayerForm(request.POST)
if form.is_valid():
player = form.save(commit=False)
player.game_room = room
try:
player.player_id = game_runner.add_player(room.room_id)
player.save()
url = (reverse("deckr.game_room", args=(game_room_id,)) +
"?player_id=" + str(player.pk))
return redirect(url)
except ValueError as exception:
# If there was an error saving the player we catch it and
# add it as an error to the form.
form.add_error('nickname', exception.args[0])
else:
form = PlayerForm()
return render(request,
"deckr/game_room_staging_area.html",
{'form': form,
'game_room': room})
def game_room(request, game_room_id):
"""
This view will present the actual game room page for
a given game id
"""
# Get Player info
player_id = request.GET.get('player_id')
if player_id is None:
player = None
else:
player = get_object_or_404(Player, pk=player_id)
game = get_object_or_404(GameRoom, pk=game_room_id)
# Get GameDefinition info
fin = open(pjoin(game.game_definition.path, 'layout.html')).read()
js_file = open(pjoin(game.game_definition.path, 'game.js')).read()
css_file = open(pjoin(game.game_definition.path, 'game.css')).read()
sub_template = Template(fin)
return render(request, "deckr/game_room.html",
{'sub_template': sub_template,
'game': game,
'game_js': js_file,
'game_css': css_file,
'player': player})
def upload_game_definition(request):
"""
Returns the view to upload a new game.
"""
if request.method == "POST":
form = UploadGameDefinitionForm(request.POST, request.FILES)
if form.is_valid():
game_name = form.cleaned_data['game_name']
try:
GameDefinition.objects.get(name=game_name)
form.add_error('game_name', "Game Definition already exists")
except GameDefinition.DoesNotExist:
try:
path = process_uploaded_file(game_name,
request.FILES['file'])
# Create a new GameDefinition
GameDefinition.objects.create(name=game_name,
path=path)
# Return to the index
return redirect(reverse('deckr.index'))
# If there was an error with the file we catch it and
# add it as an error to the form.
except ValueError as exception:
form.add_error('file', exception.args[0])
except BadZipfile as exception:
form.add_error('file', exception.args[0])
except LargeZipFile as exception:
form.add_error('file', exception.args[0])
else:
form = UploadGameDefinitionForm()
return render(request, "deckr/upload_game_definition.html", {'form': form})
def create_game_room(request):
"""
This will mainly present a CreateGameRoomForm and
process that form when it is posted.
"""
if request.method == "POST":
form = CreateGameRoomForm(request.POST)
if form.is_valid():
# Create a game object in the engine
game_def = form.cleaned_data['game_id']
path = game_def.path
engine_id = game_runner.create_game(path)
# Crate the GameRoom in the webapp
room = GameRoom.objects.create(room_id=engine_id,
game_definition=game_def)
# Redirect to the staging area for the room
return redirect(
reverse("deckr.game_room_staging_area", args=(room.pk,)))
else:
form = CreateGameRoomForm()
return render(request, "deckr/create_game_room.html",
{'form': form})
|
nilq/baby-python
|
python
|
from org.transcrypt.stubs.browser import *
import random
array = []
array_str = ""
def gen_random_int(number, seed):
# console.log("random")
random.seed(seed)
array = list(range(0, number))
random.shuffle(array)
return array
def generate():
global array
global array_str
number = 10
seed = 200
array = gen_random_int(number, seed)
# console.log('run')
array_str= ",".join(array) + "."
# console.log('array_str')
# call gen_random_int() with the given number and seed
# store it to the global variable array
#pass
#array = None
# convert the items into one single string
# the number should be separated by a comma
# and a full stop should end the string.
#pass
#array_str = None
# This line is to placed the string into the HTML
# under div section with the id called "generate"
document.getElementById("generate").innerHTML = array_str
def bubble_sort(array_sort):
# console.log("testing", array_sort)
n = len(array_sort)
swapped = True
while swapped:
swapped = False
new_n = 0
for inner_index in range(1,n):
first_number = array_sort[inner_index - 1]
second_number = array_sort[inner_index]
if first_number > second_number:
swapped = True
array_sort[inner_index - 1], array_sort[inner_index] = array_sort[inner_index], array_sort[inner_index - 1]
new_n = inner_index
n = new_n
def sortnumber1():
array_str = ""
bubble_sort(array)
array_str = ",".join(array) + "."
console.log("run", array)
document.getElementById("sorted").innerHTML = array_str
def sortnumber2():
''' This function is used in Exercise 2.
The function is called when the sort button is clicked.
You need to do the following:
- Get the numbers from a string variable "value".
- Split the string using comma as the separator and convert them to
a list of numbers
- call your sort function, either bubble sort or insertion sort
- create a string of the sorted numbers and store it in array_str
'''
# The following line get the value of the text input called "numbers"
value = document.getElementsByName("string")[0].value
# Throw alert and stop if nothing in the text input
if value == "":
window.alert("Your textbox is empty")
# Throw alert and stop if nothing in the text input
else:
array = value.split(",")
bubble_sort(array)
array_str = ",".join(array) + "."
# Your code should start from here
# store the final string to the variable array_str
pass
document.getElementById("sorted").innerHTML = array_str
|
nilq/baby-python
|
python
|
"""Mathematical utility functions (intended for internal purposes).
A lot of this is experimental and has a high probability of changing in the future.
"""
import functools
import itertools
import math
import operator
import numpy as np
__all__ = [
"argmax",
"chain_dot",
"clamp",
"dot",
"dotvecmat",
"matmul2d",
"minkowski_distance",
"norm",
"outer",
"prod",
"sigmoid",
"sign",
"sherman_morrison",
"softmax",
]
def sherman_morrison(A_inv: dict, u: dict, v: dict) -> dict:
"""Sherman–Morrison formula.
This modifies `A_inv` inplace.
Parameters
----------
A_inv
u
v
Examples
--------
>>> import pprint
>>> from river import utils
>>> A_inv = {
... (0, 0): 0.2,
... (1, 1): 1,
... (2, 2): 1
... }
>>> u = {0: 1, 1: 2, 2: 3}
>>> v = {0: 4}
>>> inv = sherman_morrison(A_inv, u, v)
>>> pprint.pprint(inv)
{(0, 0): 0.111111,
(1, 0): -0.888888,
(1, 1): 1,
(2, 0): -1.333333,
(2, 2): 1}
References
----------
[^1]: [Wikipedia article on the Sherman-Morrison formula](https://www.wikiwand.com/en/Sherman%E2%80%93Morrison_formula)s
"""
den = 1 + dot(dotvecmat(u, A_inv), v)
for k, v in matmul2d(matmul2d(A_inv, outer(u, v)), A_inv).items():
A_inv[k] = A_inv.get(k, 0) - v / den
return A_inv
def dotvecmat(x, A):
"""Vector times matrix from left side, i.e. transpose(x)A.
Parameters
----------
x
A
Examples
----------
>>> from river import utils
>>> x = {0: 4, 1: 5}
>>> A = {
... (0, 0): 0, (0, 1): 1,
... (1, 0): 2, (1, 1): 3
... }
>>> C = dotvecmat(x, A)
>>> print(C)
{0: 10.0, 1: 19.0}
"""
C = {}
for (i, xi), ((j, k), ai) in itertools.product(x.items(), A.items()):
if i != j:
continue
C[k] = C.get(k, 0.0) + xi * ai
return C
def matmul2d(A, B):
"""Multiplication for 2D matrices.
Parameters
----------
A
B
Examples
--------
>>> import pprint
>>> from river import utils
>>> A = {
... (0, 0): 2, (0, 1): 0, (0, 2): 4,
... (1, 0): 5, (1, 1): 6, (1, 2): 0
... }
>>> B = {
... (0, 0): 1, (0, 1): 1, (0, 2): 0, (0, 3): 0,
... (1, 0): 2, (1, 1): 0, (1, 2): 1, (1, 3): 3,
... (2, 0): 4, (2, 1): 0, (2, 2): 0, (2, 3): 0
... }
>>> C = matmul2d(A, B)
>>> pprint.pprint(C)
{(0, 0): 18.0,
(0, 1): 2.0,
(0, 2): 0.0,
(0, 3): 0.0,
(1, 0): 17.0,
(1, 1): 5.0,
(1, 2): 6.0,
(1, 3): 18.0}
"""
C = {}
for ((i, k1), x), ((k2, j), y) in itertools.product(A.items(), B.items()):
if k1 != k2:
continue
C[i, j] = C.get((i, j), 0.0) + x * y
return C
def outer(u: dict, v: dict) -> dict:
"""Outer-product between two vectors.
Parameters
----------
u
v
Examples
--------
>>> import pprint
>>> from river import utils
>>> u = dict(enumerate((1, 2, 3)))
>>> v = dict(enumerate((2, 4, 8)))
>>> uTv = utils.math.outer(u, v)
>>> pprint.pprint(uTv)
{(0, 0): 2,
(0, 1): 4,
(0, 2): 8,
(1, 0): 4,
(1, 1): 8,
(1, 2): 16,
(2, 0): 6,
(2, 1): 12,
(2, 2): 24}
"""
return {
(ki, kj): vi * vj
for (ki, vi), (kj, vj) in itertools.product(u.items(), v.items())
}
def minkowski_distance(a: dict, b: dict, p: int):
"""Minkowski distance.
Parameters
----------
a
b
p
Parameter for the Minkowski distance. When `p=1`, this is equivalent to using the
Manhattan distance. When `p=2`, this is equivalent to using the Euclidean distance.
"""
return sum(
(abs(a.get(k, 0.0) - b.get(k, 0.0))) ** p for k in set([*a.keys(), *b.keys()])
)
def softmax(y_pred: dict):
"""Normalizes a dictionary of predicted probabilities, in-place.
Parameters
----------
y_pred
"""
if not y_pred:
return y_pred
maximum = max(y_pred.values())
total = 0.0
for c, p in y_pred.items():
y_pred[c] = math.exp(p - maximum)
total += y_pred[c]
for c in y_pred:
y_pred[c] /= total
return y_pred
def prod(iterable):
"""Product function.
Parameters
----------
iterable
"""
return functools.reduce(operator.mul, iterable, 1)
def dot(x: dict, y: dict):
"""Returns the dot product of two vectors represented as dicts.
Parameters
----------
x
y
Examples
--------
>>> from river import utils
>>> x = {'x0': 1, 'x1': 2}
>>> y = {'x1': 21, 'x2': 3}
>>> utils.math.dot(x, y)
42
"""
if len(x) < len(y):
return sum(xi * y[i] for i, xi in x.items() if i in y)
return sum(x[i] * yi for i, yi in y.items() if i in x)
def chain_dot(*xs):
"""Returns the dot product of multiple vectors represented as dicts.
Parameters
----------
xs
Examples
--------
>>> from river import utils
>>> x = {'x0': 1, 'x1': 2, 'x2': 1}
>>> y = {'x1': 21, 'x2': 3}
>>> z = {'x1': 2, 'x2': 1 / 3}
>>> utils.math.chain_dot(x, y, z)
85.0
"""
keys = min(xs, key=len)
return sum(prod(x.get(i, 0) for x in xs) for i in keys)
def sigmoid(x: float):
"""Sigmoid function.
Parameters
----------
x
"""
if x < -30:
return 0
if x > 30:
return 1
return 1 / (1 + math.exp(-x))
def clamp(x: float, minimum=0.0, maximum=1.0):
"""Clamp a number.
This is a synonym of clipping.
Parameters
----------
x
minimum
maximum
"""
return max(min(x, maximum), minimum)
def norm(x: dict, order=None):
"""Compute the norm of a dictionaries values.
Parameters
----------
x
order
"""
return np.linalg.norm(list(x.values()), ord=order)
def sign(x: float):
"""Sign function.
Parameters
----------
x
"""
return -1 if x < 0 else (1 if x > 0 else 0)
def argmax(lst: list):
"""Argmax function.
Parameters
----------
lst
"""
return max(range(len(lst)), key=lst.__getitem__)
|
nilq/baby-python
|
python
|
from pymocap.color_terminal import ColorTerminal
from pymocap.event import Event
import struct, os
from datetime import datetime
class NatnetFile:
def __init__(self, path=None, loop=True):
self.path = path
self.loop = loop
# file handles
self.read_file = None
self.write_file = None
# last read frame info
self.currentFrame = None
self.currentFrameTime = None
self.currentFrameIndex = -1
# events
self.loopEvent = Event()
def __del__(self):
self.stop()
def startReading(self):
self.stopReading()
try:
if not self.path:
self.path = 'walk-198frames.binary.recording'
self.read_file = open(self.path, 'rb')
ColorTerminal().success("NatnetFile opened: %s" % self.path)
except:
ColorTerminal().fail("NatnetFile couldn't be opened: %s" % self.path)
self.read_file = None
def stopReading(self):
if self.read_file:
self.read_file.close()
self.read_file = None
ColorTerminal().blue('NatnetFile closed')
def startWriting(self):
self.stopWriting()
try:
if not self.path:
self.path = '/tmp/natnet_'+datetime.now().strftime('%Y_%m_%d_%H_%M_%S')+'.binary'
self.write_file = open(self.path, 'wb')
ColorTerminal().success("NatnetFile opened for writing: %s" % self.path)
except:
ColorTerminal().fail("NatnetFile couldn't be opened for writing: %s" % self.path)
self.write_file = None
def stopWriting(self):
if self.write_file:
self.write_file.close()
self.write_file = None
ColorTerminal().blue('NatnetFile closed')
def stop(self):
self.stopReading()
self.stopWriting()
def setLoop(self, loop):
self.loop = loop
def nextFrame(self):
bytecount = self._readFrameSize() # int: bytes
self.currentFrameTime = self._readFrameTime() # float: seconds
if bytecount == None or self.currentFrameTime == None:
return None
self.currentFrame = self.read_file.read(bytecount)
self.currentFrameIndex += 1
return self.currentFrame
def _readFrameSize(self):
# int is 4 bytes
value = self.read_file.read(4)
# end-of-file?
if not value:
if not self.loop:
return None
# reset file handle
self.read_file.seek(0)
self.currentFrame = None
self.currentFrameTime = None
self.currentFrameIndex = -1
# notify
self.loopEvent(self)
# try again
return self._readFrameSize()
# 'unpack' 4 binary bytes into integer
return struct.unpack('i', value)[0]
def _readFrameTime(self):
# float of 4 bytes
value = self.read_file.read(4)
# end-of-file?
if not value:
# TODO; raise format error?
return None
# 'unpack' 4 binary bytes into float
return struct.unpack('f', value)[0]
def writeFrame(self, frameData, time=0.0):
# frame format;
# 4-bytes binary integer indicating the size of the (binary) frame data
# 4-byte binary float indicating timestamp (in seconds) of the frame
# followed by the binary frame data
# [next frame]
# write 4-byte binary integer; size of the frame data
self.write_file.write(struct.pack('i', len(frameData)))
# write 4-byte binary float; timestamp in seconds
self.write_file.write(struct.pack('f', time))
# write binary frame data
self.write_file.write(frameData)
|
nilq/baby-python
|
python
|
import io
import re
from math import ceil
from . import *
from config import Config
from userbot import CMD_LIST, CMD_HELP
from telethon import custom, events
Andencento_pic = Config.PMPERMIT_PIC or "https://telegra.ph/file/ac32724650ef92663fbd1.png"
cstm_pmp = Config.CUSTOM_PMPERMIT
ALV_PIC = Config.ALIVE_PIC
mssge = (
str(cstm_pmp)
if cstm_pmp
else "**You Have Trespassed To My Master's PM!\nThis Is Illegal And Regarded As Crime.**"
)
USER_BOT_WARN_ZERO = "Enough Of Your Flooding In My Master's PM!! \n\n**🚫 Blocked and Reported**"
ANDENCENTO_FIRST = (
"**🔥 Andencento ULTRA Private Security 🔥**\n\nThis is to inform you that "
"{} is currently unavailable.\nThis is an automated message.\n\n"
"{}\n\n**Please Choose Why You Are Here!!**".format(Andencento_mention, mssge))
cmd = "commands"
andencento = Config.YOUR_NAME
if Config.BOT_USERNAME is not None and tgbot is not None:
@tgbot.on(events.InlineQuery) # pylint:disable=E0602
async def inline_handler(event):
builder = event.builder
result = None
query = event.text
button = paginate_help(0, CMD_LIST, "helpme")
apn = []
for x in CMD_LIST.values():
for y in x:
apn.append(y)
if event.query.user_id == bot.uid and query.startswith("Userbot"):
rev_text = query[::-1]
buttons = paginate_help(0, CMD_LIST, "helpme")
result = builder.article(
"© Andencento-UserBot Help",
text=f"Andencento[🤖](https://telegra.ph/file/ac32724650ef92663fbd1.png)\n🔰 **{andencento}**\n\n📜 __No.of Plugins__ : `{len(CMD_LIST)}` \n🗂️ __Commands__ : `{len(apn)}`",
buttons=buttons,
link_preview=False,
)
elif event.query.user_id == bot.uid and query == "pm_warn":
hel_l = ANDENCENTO_FIRST.format(Andencento_mention, mssge)
result = builder.photo(
file=Andencento_pic,
text=hel_l,
buttons=[
[
custom.Button.inline("📝 Request 📝", data="req"),
custom.Button.inline("💬 Chat 💬", data="chat"),
],
[custom.Button.inline("🚫 Spam 🚫", data="heheboi")],
[custom.Button.inline("Curious ❓", data="pmclick")],
],
)
elif event.query.user_id == bot.uid and query == "repo":
result = builder.article(
title="Repository",
text=f"**⚡ ɛɢɛռɖαʀʏ ᴀғ Andencento Userbot ⚡**",
buttons=[
[Button.url("📑 Repo 📑", "https://t.me/AndencentoSupport")],
[Button.url("🚀 Deploy 🚀", "https://heroku.com/deploy?template=https://github.com/Andencento/Deploy-Andencento")],
],
)
elif query.startswith("http"):
part = query.split(" ")
result = builder.article(
"File uploaded",
text=f"**File uploaded successfully to {part[2]} site.\n\nUpload Time : {part[1][:3]} second\n[ ]({part[0]})",
buttons=[[custom.Button.url("URL", part[0])]],
link_preview=True,
)
else:
result = builder.article(
"@TheEiva",
text="""**Hey! This is [Andencento](https://t.me/Andencento) \nYou can know more about me from the links given below 👇**""",
buttons=[
[
custom.Button.url("🔥 CHANNEL 🔥", "https://t.me/Andencento"),
custom.Button.url(
"⚡ GROUP ⚡", "https://t.me/AndencentoSupport"
),
],
[
custom.Button.url(
"✨ REPO ✨", "https://github.com/Andencento/Andencento"),
custom.Button.url
(
"🔰 TUTORIAL 🔰", "https://www.youtube.com/watch?v=9WxN6aq5wsQ"
)
],
],
link_preview=False,
)
await event.answer([result] if result else None)
@tgbot.on(
events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"pmclick\((.+?)\)")
)
)
async def on_pm_click(event):
if event.query.user_id == bot.uid:
reply_pop_up_alert = "This is for Other Users..."
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
else:
await event.edit(
f"🔰 This is Andencento PM Security for {Eiva_mention} to keep away unwanted retards from spamming PM..."
)
@tgbot.on(
events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"reg\((.+?)\)")
)
)
async def on_pm_click(event):
if event.query.user_id == bot.uid:
reply_pop_up_alert = "This is for other users!"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
else:
await event.edit(
f"✅ **Request Registered** \n\n{Eiva_mention} will now decide to look for your request or not.\n😐 Till then wait patiently and don't spam else block!!"
)
target = await event.client(GetFullUserRequest(event.query.user_id))
first_name = html.escape(target.user.first_name)
ok = event.query.user_id
if first_name is not None:
first_name = first_name.replace("\u2060", "")
tosend = f"**👀 Hey {Eiva_mention} !!** \n\n⚜️ You Got A Request From [{first_name}](tg://user?id={ok}) In PM!!"
await bot.send_message(LOG_GP, tosend)
@tgbot.on(
events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"chat\((.+?)\)")
)
)
async def on_pm_click(event):
event.query.user_id
if event.query.user_id == bot.uid:
reply_pop_up_alert = "This is for other users!"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
else:
await event.edit(
f"Ahh!! You here to do chit-chat!!\n\nPlease wait for {Eiva_mention} to come. Till then keep patience and don't spam."
)
target = await event.client(GetFullUserRequest(event.query.user_id))
ok = event.query.user_id
first_name = html.escape(target.user.first_name)
if first_name is not None:
first_name = first_name.replace("\u2060", "")
tosend = f"**👀 Hey {Eiva_mention} !!** \n\n⚜️ You Got A PM from [{first_name}](tg://user?id={ok}) for random chats!!"
await bot.send_message(LOG_GP, tosend)
@tgbot.on(
events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"heheboi\((.+?)\)")
)
)
async def on_pm_click(event):
if event.query.user_id == bot.uid:
reply_pop_up_alert = "This is for other users!"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
else:
await event.edit(
f"🥴 **Go away from here\nYou Are Blocked Now**"
)
await bot(functions.contacts.BlockRequest(event.query.user_id))
target = await event.client(GetFullUserRequest(event.query.user_id))
ok = event.query.user_id
first_name = html.escape(target.user.first_name)
if first_name is not None:
first_name = first_name.replace("\u2060", "")
first_name = html.escape(target.user.first_name)
await bot.send_message(
LOG_GP,
f"**Blocked** [{first_name}](tg://user?id={ok}) \n\nReason:- Spam",
)
@tgbot.on(
events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"helpme_next\((.+?)\)")
)
)
async def on_plug_in_callback_query_handler(event):
if (
event.query.user_id == bot.uid or event.query.user_id in Config.SUDO_USERS
): # pylint:disable=E0602
current_page_number = int(event.data_match.group(1).decode("UTF-8"))
buttons = paginate_help(current_page_number + 1, CMD_LIST, "helpme")
# https://t.me/TelethonChat/115200
await event.edit(buttons=buttons)
else:
reply_pop_up_alert = (
"Check Pinned Message in\n@ANDENCENTO And\nGet Your Own Userbot"
)
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(
events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"helpme_prev\((.+?)\)")
)
)
async def on_plug_in_callback_query_handler(event):
if (
event.query.user_id == bot.uid or event.query.user_id in Config.SUDO_USERS
): # pylint:disable=E0602
current_page_number = int(event.data_match.group(1).decode("UTF-8"))
buttons = paginate_help(
current_page_number - 1, CMD_LIST, "helpme" # pylint:disable=E0602
)
# https://t.me/TelethonChat/115200
await event.edit(buttons=buttons)
else:
reply_pop_up_alert = (
"Check Pinned Message in\n@ANDENCENTO And\nGet Your Own Userbot"
)
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(
events.callbackquery.CallbackQuery( # pylint:disable=E0602
data=re.compile(b"us_plugin_(.*)")
)
)
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == bot.uid or event.query.user_id in Config.SUDO_USERS:
plugin_name = event.data_match.group(1).decode("UTF-8")
help_string = ""
try:
for i in CMD_LIST[plugin_name]:
help_string += i
help_string += "\n"
except:
pass
if help_string is "":
reply_pop_up_alert = "{} is useless".format(plugin_name)
else:
reply_pop_up_alert = help_string
reply_pop_up_alert += "\n Use .unload {} to remove this plugin\n\
© ANDENCENTo".format(
plugin_name
)
try:
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
except:
with io.BytesIO(str.encode(reply_pop_up_alert)) as out_file:
out_file.name = "{}.txt".format(plugin_name)
await event.client.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=plugin_name,
)
else:
reply_pop_up_alert = (
"Check Pinned Message in\n@ANDENCENTO And\nGet Your Own Userbot"
)
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
def paginate_help(page_number, loaded_plugins, prefix):
number_of_rows = 8
number_of_cols = 3
helpable_plugins = []
for p in loaded_plugins:
if not p.startswith("_"):
helpable_plugins.append(p)
helpable_plugins = sorted(helpable_plugins)
modules = [
custom.Button.inline("{} {}".format(" ", x), data="us_plugin_{}".format(x))
for x in helpable_plugins
]
pairs = list(zip(modules[::number_of_cols], modules[1::number_of_cols]))
if len(modules) % number_of_cols == 1:
pairs.append((modules[-1],))
max_num_pages = ceil(len(pairs) / number_of_rows)
modulo_page = page_number % max_num_pages
if len(pairs) > number_of_rows:
pairs = pairs[
modulo_page * number_of_rows : number_of_rows * (modulo_page + 1)
] + [
(
custom.Button.inline(
"«« Previous", data="{}_prev({})".format(prefix, modulo_page)
),
custom.Button.inline(
"Next »»", data="{}_next({})".format(prefix, modulo_page)
),
)
]
return pairs
|
nilq/baby-python
|
python
|
import numpy as np
import sys
def get_randn(n=10):
randints = np.random.randint(100, size=n)
return ":".join(["%02d" % i for i in randints])
|
nilq/baby-python
|
python
|
'''
hms_client
'''
from hubsync.http_client import client
class Repo(object):
''' Repo
Namespace string `json:"namespace,omitempty"`
Name string `json:"name,omitempty"`
LogoUrl string `json:"logoUrl"`
Summary string `json:"summary,omitempty"`
Description string `json:"description,omitempty"`
Origin string `json:"origin,omitempty"`
Labels []string `json:"labels,omitempty"`
Tags []string `json:"tags,omitempty"`
CodeSource string `json:"codeSource,omitempty"`
IsPub bool `json:"isPub,omitempty"`
IsCertified bool `json:"isCertified,omitempty"`
CreatedAt time.Time `json:"createdAt,omitempty"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
IsDeleted bool `json:"isDeleted,omitempty"`
DeletedAt time.Time `json:"deletedAt,omitempty"`
Stars uint32 `json:"stars,omitempty"`
Pulls uint32 `json:"pulls,omitempty"`
'''
def __init__(self, namespace, repo_name, origin, logo_url = "",
labels=None, is_pub=True, is_certified=False,
code_source=None, summary=None, description=None):
self.namespace = namespace
self.name = repo_name
self.logoUrl = logo_url
self.summary = summary
self.description = description
self.origin = origin
self.labels = labels
self.isPub = is_pub
self.isCertified = is_certified
self.codeSource = code_source
@staticmethod
def from_repo(namespace, repo):
'''
from_repo
generate repo object from repo dict get from DockerHub
'''
meta = repo.get("meta")
full_desc = ""
logo_url = ""
if meta:
full_desc = repo["meta"]["full_description"]
logo = meta.get("logo_url")
if logo:
large = logo.get("large")
small = logo.get("small")
logo_url = large or small
return Repo(namespace=namespace,
repo_name=repo["name"],
origin="docker",
labels=[],
logo_url = logo_url,
summary=repo["description"],
description=full_desc)
class Image(object):
''' Image
Namespace string `json:"namespace"`
RepoName string `json:"repoName"`
Tag string `json:"tag"`
Hash string `json:"hash"`
Size int64 `json:"size"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
IsDeleted bool `json:"isDeleted"`
DeletedAt time.Time `json:"deletedAt"`
'''
def __init__(self, namespace, repo_name, tag, hash_str, size):
self.namespace = namespace
self.repoName = repo_name
self.tag = tag
self.hash = hash_str
self.size = size
@staticmethod
def from_image(namespace, repo_name, tag, image):
hash_str = ""
size = 0
if image:
hash_str = image["Id"][7:]
size = image["Size"]
return Image(namespace=namespace,
repo_name=repo_name,
tag=tag,
hash_str=hash_str,
size=size)
class HmsClient(object):
''' HmsClient
'''
def __init__(self, hms_server):
self.hms_server = hms_server
def add_repo(self, repo):
'''
add_repo
add repo metadata to hms
'''
print "Add repo %s" % repo.name
url1 = "http://%s/v1/hms/namespaces/%s/repos" % (self.hms_server, repo.namespace)
print url1
try:
client.do_put_with_json(url1, repo.__dict__)
except Exception, e:
print e
def update_repo(self, repo):
'''
update_repo
update repo metadata to hms
'''
print "Update repo %s" % repo.name
url1 = "http://%s/v1/hms/namespaces/%s/repos/%s" % (self.hms_server, repo.namespace, repo.name)
print url1
try:
client.do_post_with_json(url1, repo.__dict__)
except Exception as e:
print e
def add_image(self, image):
'''
add_image
add image metadata to hms
'''
print "Add image %s/%s:%s" % (image.namespace, image.repoName, image.tag)
url1 = "http://%s/v1/hms/namespaces/%s/repos/%s/tags" % (self.hms_server, image.namespace, image.repoName)
try:
client.do_put_with_json(url1, image.__dict__)
except Exception as e:
print e
def update_image(self, image):
'''
update_image
update image metadata to hms
'''
print "Update image %s/%s:%s" % (image.namespace, image.repoName, image.tag)
url1 = "http://%s/v1/hms/namespaces/%s/repos/%s/tags/%s" % (self.hms_server, image.namespace, image.repoName, image.tag)
try:
client.do_post_with_json(url1, image.__dict__)
except Exception as e:
print e
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from peyotl.nexson_validation.helper import _NEXEL, errorReturn
from peyotl.nexson_validation.err_generator import (gen_InvalidKeyWarning,
gen_MissingCrucialContentWarning,
gen_MissingMandatoryKeyWarning,
gen_MultipleRootsWarning,
gen_NodeWithMultipleParents,
gen_NoRootWarning,
gen_ReferencedIDNotFoundWarning,
gen_RepeatedOTUWarning,
gen_TreeCycleWarning,
gen_WrongValueTypeWarning)
from peyotl.nexson_syntax.helper import add_literal_meta, \
BADGER_FISH_NEXSON_VERSION, \
delete_first_literal_meta, \
find_val_for_first_bf_l_meta
from peyotl.nexson_validation._validation_base import NexsonValidationAdaptor
from peyotl.utility import get_logger
_LOG = get_logger(__name__)
class BadgerFishValidationAdaptor(NexsonValidationAdaptor):
def __init__(self, obj, logger, **kwargs):
if not hasattr(self, '_syntax_version'):
self._syntax_version = BADGER_FISH_NEXSON_VERSION
self._find_first_literal_meta = find_val_for_first_bf_l_meta
NexsonValidationAdaptor.__init__(self, obj, logger, **kwargs)
def _post_key_check_validate_otus_obj(self, og_nex_id, otus_group, vc):
otu_dict = {}
otu_list = otus_group.get('otu', [])
if isinstance(otu_list, dict):
otu_list = [otu_list]
if not otu_list:
return
vc.push_context(_NEXEL.OTU, (otus_group, og_nex_id))
try:
without_id = []
otu_tuple_list = []
for otu in otu_list:
oid = otu.get('@id')
if oid is None:
without_id.append(otu)
else:
otu_tuple_list.append((oid, otu))
otu_dict[oid] = otu
if without_id:
self._error_event(_NEXEL.NEXML,
obj=without_id,
err_type=gen_MissingCrucialContentWarning,
anc=vc.anc_list,
obj_nex_id=None,
key_list=['@id'])
return errorReturn('lack of "@id" in "otu"')
r = self._validate_otu_list(otu_tuple_list, vc)
if r:
self._otu_by_otug[og_nex_id] = otu_dict
return r
except:
vc.pop_context()
def _post_key_check_validate_tree_group(self, tg_nex_id, trees_group, vc):
otus_el = trees_group.get('@otus')
if otus_el not in self._otu_group_by_id:
kl = ['@otus="{oe}"'.format(oe=otus_el)]
self._error_event(_NEXEL.TREES,
obj=trees_group,
err_type=gen_ReferencedIDNotFoundWarning,
anc=vc.anc_list,
obj_nex_id=tg_nex_id,
key_list=kl)
return errorReturn('bad "@otus" in trees group')
tree_list = trees_group.get('tree', [])
if isinstance(tree_list, dict):
tree_list = [tree_list]
elif not isinstance(tree_list, list):
self._error_event(_NEXEL.TREES,
obj=trees_group,
err_type=gen_WrongValueTypeWarning,
anc=vc.anc_list,
obj_nex_id=tg_nex_id,
key_list=['tree'])
return errorReturn('lack of "tree" in trees group')
for tree_obj in tree_list:
t_nex_id = tree_obj.get('@id')
vc.push_context(_NEXEL.TREE, (trees_group, tg_nex_id))
try:
if t_nex_id is None:
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_MissingCrucialContentWarning,
anc=vc.anc_list,
obj_nex_id=tg_nex_id,
key_list=['@id'])
return errorReturn('no "@id" in tree')
if not self._validate_tree(t_nex_id,
tree_obj,
vc,
otus_group_id=otus_el):
return False
finally:
vc.pop_context()
return True
# pylint: disable=R0915
def _post_key_check_validate_tree(self,
tree_nex_id,
tree_obj,
vc,
otus_group_id=None):
node_list = tree_obj.get('node')
if isinstance(node_list, dict):
node_list = [node_list]
elif (not node_list) or (not isinstance(node_list, list)):
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_MissingCrucialContentWarning,
anc=vc.anc_list,
obj_nex_id=tree_nex_id,
key_list=['node', ])
return errorReturn('no "node" in "trees"')
edge_list = tree_obj.get('edge')
if isinstance(edge_list, dict):
edge_list = [edge_list]
elif (not edge_list) or (not isinstance(edge_list, list)):
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_MissingCrucialContentWarning,
anc=vc.anc_list,
obj_nex_id=tree_nex_id,
key_list=['edge', ])
return errorReturn('no "edge" in tree')
edge_id_list = [(i.get('@id'), i) for i in edge_list]
vc.push_context(_NEXEL.EDGE, (tree_obj, tree_nex_id))
try:
valid = self._validate_edge_list(edge_id_list, vc)
if not valid:
return False
finally:
vc.pop_context()
node_id_obj_list = [(i.get('@id'), i) for i in node_list]
vc.push_context(_NEXEL.NODE, (tree_obj, tree_nex_id))
try:
valid = self._validate_node_list(node_id_obj_list, vc)
if not valid:
return False
finally:
vc.pop_context()
node_dict = {}
for i in node_id_obj_list:
nid, nd = i
node_dict[nid] = nd
missing_src = []
missing_target = []
for el in edge_id_list:
e = el[1]
sid = e.get('@source')
tid = e.get('@target')
if sid not in node_dict:
missing_src.append(sid)
if tid not in node_dict:
missing_target.append(tid)
if missing_src:
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_ReferencedIDNotFoundWarning,
anc=vc.anc_list,
obj_nex_id=tree_nex_id,
key_list=missing_src)
return errorReturn('no "@source" in edge')
if missing_target:
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_ReferencedIDNotFoundWarning,
anc=vc.anc_list,
obj_nex_id=tree_nex_id,
key_list=missing_target)
return errorReturn('no "@target" in edge')
if otus_group_id is None:
tree_group = vc.anc_list[-1][1]
otus_group_id = tree_group.get('@otus')
lowest_nodeid_set = set()
encountered_nodes = set()
edge_by_target = {}
edge_by_source = {}
multi_parent_node = []
for e in edge_list:
t = e.get('@target')
if t in edge_by_target:
multi_parent_node.append(t)
else:
edge_by_target[t] = e
# _LOG.debug('e=' + str(e))
sid = e['@source']
edge_by_source.setdefault(sid, []).append(e)
if multi_parent_node:
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_NodeWithMultipleParents,
anc=vc.anc_list,
obj_nex_id=tree_nex_id,
node_id_list=multi_parent_node)
otuid2leaf = {}
unflagged_leaves = []
nonleaves_with_leaf_flags = []
with_at_root_prop = {}
first_lowest_node = None
for nd in node_list:
nid = nd.get('@id')
cycle_node, path_to_root = construct_path_to_root(nd, encountered_nodes, edge_by_target)
if cycle_node:
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_TreeCycleWarning,
anc=vc.anc_list,
obj_nex_id=tree_nex_id,
cycle_node=cycle_node)
return errorReturn('"@id" in node')
if path_to_root:
lowest_nodeid_set.add(path_to_root[-1])
if first_lowest_node is None:
first_lowest_node = path_to_root[-1]
is_flagged_as_leaf = self._find_first_literal_meta(nd, 'ot:isLeaf')
ch_list = edge_by_source.get(nid)
if ch_list is None:
otu_id = nd.get('@otu')
if otu_id is None:
vc.push_context(_NEXEL.NODE, (tree_obj, tree_nex_id))
try:
self._error_event(_NEXEL.NODE,
obj=nd,
err_type=gen_MissingCrucialContentWarning,
anc=vc.anc_list,
obj_nex_id=nid,
key_list=['@otu', ])
return errorReturn('"@otu" in leaf')
finally:
vc.pop_context()
else:
if otu_id in otuid2leaf:
vc.push_context(_NEXEL.NODE, (tree_obj, tree_nex_id))
try:
self._error_event(_NEXEL.NODE,
obj=nd,
err_type=gen_RepeatedOTUWarning,
anc=vc.anc_list,
obj_nex_id=nid,
key_list=[otu_id])
return errorReturn('repeated "@otu" in leaves')
finally:
vc.pop_context()
otuid2leaf[otu_id] = nd
if not is_flagged_as_leaf:
if not self._logger.retain_deprecated:
add_literal_meta(nd, 'ot:isLeaf', True, self._syntax_version)
else:
unflagged_leaves.append(nid)
elif is_flagged_as_leaf:
if not self._logger.retain_deprecated:
delete_first_literal_meta(nd, 'ot:isLeaf', self._syntax_version)
else:
nonleaves_with_leaf_flags.append(nid)
if nd.get('@root'):
with_at_root_prop[nid] = nd
if unflagged_leaves:
vc.push_context(_NEXEL.NODE, (tree_obj, tree_nex_id))
try:
# _LOG.debug('unflagged_leaves="{f}"'.format(f=unflagged_leaves))
self._error_event(_NEXEL.NODE,
obj=tree_obj,
err_type=gen_MissingMandatoryKeyWarning,
anc=vc.anc_list,
obj_nex_id=unflagged_leaves,
key_list=['ot:isLeaf'])
finally:
vc.pop_context()
if nonleaves_with_leaf_flags:
vc.push_context(_NEXEL.NODE, (tree_obj, tree_nex_id))
try:
self._error_event(_NEXEL.NODE,
obj=tree_obj,
err_type=gen_InvalidKeyWarning,
anc=vc.anc_list,
obj_nex_id=nonleaves_with_leaf_flags,
key_list=['ot:isLeaf'])
return errorReturn('"ot:isLeaf" for internal')
finally:
vc.pop_context()
self._detect_multilabelled_tree(otus_group_id,
tree_nex_id,
otuid2leaf)
if len(lowest_nodeid_set) > 1:
lowest_nodeid_set = list(lowest_nodeid_set)
lowest_nodeid_set.sort()
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_MultipleRootsWarning,
anc=vc.anc_list,
obj_nex_id=tree_nex_id,
node_id_list=lowest_nodeid_set)
return errorReturn('multiple roots in a tree')
root_node_id = first_lowest_node
if root_node_id not in with_at_root_prop:
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_MultipleRootsWarning,
anc=vc.anc_list,
obj_nex_id=tree_nex_id,
node_id_list=list(with_at_root_prop.keys()) + [root_node_id])
return errorReturn('root without "@root"')
elif len(with_at_root_prop) > 1:
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_MultipleRootsWarning,
anc=vc.anc_list,
obj_nex_id=tree_nex_id,
node_id_list=list(with_at_root_prop.keys()))
return errorReturn('Multiple nodes with "@root"')
elif len(with_at_root_prop) == 0:
self._error_event(_NEXEL.TREE,
obj=tree_obj,
err_type=gen_NoRootWarning,
anc=vc.anc_list,
obj_nex_id=tree_nex_id)
return errorReturn('no node with "@root"')
og = set([i['@id'] for i in self._otu_group_by_id[otus_group_id]['otu']])
nli = [(i['@id'], i) for i in node_list]
return self._validate_otu_key_if_present(nli, og, vc)
def _post_key_check_validate_nexml_obj(self, nex_obj, obj_nex_id, vc):
otus_group_list = nex_obj.get('otus', [])
if otus_group_list and isinstance(otus_group_list, dict):
otus_group_list = [otus_group_list]
if not isinstance(otus_group_list, list):
self._error_event(_NEXEL.NEXML,
obj=nex_obj,
err_type=gen_MissingCrucialContentWarning,
anc=vc.anc_list,
obj_nex_id=obj_nex_id,
key_list=['otus'])
return errorReturn('no "otus" in nexml')
vc.push_context(_NEXEL.OTUS, (nex_obj, obj_nex_id))
try:
without_id = []
og_tuple_list = []
for og in otus_group_list:
ogid = og.get('@id')
if ogid is None:
without_id.append(None)
else:
og_tuple_list.append((ogid, og))
if without_id:
self._error_event(_NEXEL.OTUS,
obj=nex_obj,
err_type=gen_MissingCrucialContentWarning,
anc=vc.anc_list,
obj_nex_id=None,
key_list=['otus[*]/@id'])
return errorReturn('otu without "@id"')
if not self._validate_otus_group_list(og_tuple_list, vc):
return False
finally:
vc.pop_context()
# and now the trees...
trees_group_list = nex_obj.get('trees', [])
if trees_group_list and isinstance(trees_group_list, dict):
trees_group_list = [trees_group_list]
if not isinstance(trees_group_list, list):
self._error_event(_NEXEL.NEXML,
obj=nex_obj,
err_type=gen_WrongValueTypeWarning,
anc=vc.anc_list,
obj_nex_id=obj_nex_id,
key_list=['trees'])
return errorReturn('No "trees" in nexml')
vc.push_context(_NEXEL.TREES, (nex_obj, obj_nex_id))
try:
without_id = []
tg_tuple_list = []
for tg in trees_group_list:
tgid = tg.get('@id')
if tgid is None:
without_id.append(tg)
else:
tg_tuple_list.append((tgid, tg))
if without_id:
self._error_event(_NEXEL.TREES,
obj=without_id,
err_type=gen_MissingCrucialContentWarning,
anc=vc.anc_list,
obj_nex_id=None,
key_list=['@id'])
return errorReturn('No "@id" in trees group"')
if not self._validate_trees_group_list(tg_tuple_list, vc):
return False
finally:
vc.pop_context()
ogid2og = {}
for og in otus_group_list:
ogid = og.get('@id')
ogid2og[ogid] = og
if not self._find_first_literal_meta(nex_obj, 'ot:notIntendedForSynthesis'):
cs = self._find_first_literal_meta(nex_obj, 'ot:candidateTreeForSynthesis')
if cs:
if not isinstance(cs, list):
tree_list = [cs]
else:
tree_list = cs
else:
tree_list = []
for tg in trees_group_list:
stree_list = tg.get('tree')
if not isinstance(stree_list, list):
stree_list = [stree_list]
tree_list.extend([i.get('@id') for i in stree_list])
self._generate_ott_warnings(ogid2og, tree_list, (nex_obj, obj_nex_id), vc)
return True
def construct_path_to_root(node, encountered_nodes, edge_by_target):
nid = node.get('@id')
p = []
s = set()
while nid:
# _LOG.debug('node = "{node}" n="{n}"'.format(node=str(node), n=str(n)))
if nid in s:
return nid, p
if nid in encountered_nodes:
return None, []
p.append(nid)
s.add(nid)
encountered_nodes.add(nid)
e = edge_by_target.get(nid)
src = None
if e:
src = e.get('@source')
if src:
nid = src
else:
break
return None, p
|
nilq/baby-python
|
python
|
pytest_plugins = "beancount.ingest.regression_pytest"
|
nilq/baby-python
|
python
|
import sys
sys.path.append('../../')
import open3d
import numpy as np
import time
import os
from ThreeDMatch.Test.tools import get_pcd, get_ETH_keypts, get_desc, loadlog
from sklearn.neighbors import KDTree
import glob
def calculate_M(source_desc, target_desc):
"""
Find the mutually closest point pairs in feature space.
source and target are descriptor for 2 point cloud key points. [5000, 512]
"""
kdtree_s = KDTree(target_desc)
sourceNNdis, sourceNNidx = kdtree_s.query(source_desc, 1)
kdtree_t = KDTree(source_desc)
targetNNdis, targetNNidx = kdtree_t.query(target_desc, 1)
result = []
for i in range(len(sourceNNidx)):
if targetNNidx[sourceNNidx[i]] == i:
result.append([i, sourceNNidx[i][0]])
return np.array(result)
def register2Fragments(id1, id2, keyptspath, descpath, resultpath, desc_name='ppf'):
cloud_bin_s = f'Hokuyo_{id1}'
cloud_bin_t = f'Hokuyo_{id2}'
write_file = f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'
if os.path.exists(os.path.join(resultpath, write_file)):
# print(f"{write_file} already exists.")
return 0, 0, 0
pcd_s = get_pcd(pcdpath, cloud_bin_s)
source_keypts = get_ETH_keypts(pcd_s, keyptspath, cloud_bin_s)
pcd_t = get_pcd(pcdpath, cloud_bin_t)
target_keypts = get_ETH_keypts(pcd_t, keyptspath, cloud_bin_t)
# print(source_keypts.shape)
source_desc = get_desc(descpath, cloud_bin_s, desc_name=desc_name)
target_desc = get_desc(descpath, cloud_bin_t, desc_name=desc_name)
source_desc = np.nan_to_num(source_desc)
target_desc = np.nan_to_num(target_desc)
key = f'{cloud_bin_s.split("_")[-1]}_{cloud_bin_t.split("_")[-1]}'
if key not in gtLog.keys():
num_inliers = 0
inlier_ratio = 0
gt_flag = 0
else:
# find mutually cloest point.
corr = calculate_M(source_desc, target_desc)
gtTrans = gtLog[key]
frag1 = source_keypts[corr[:, 0]]
frag2_pc = open3d.geometry.PointCloud()
frag2_pc.points = open3d.utility.Vector3dVector(target_keypts[corr[:, 1]])
frag2_pc.transform(gtTrans)
frag2 = np.asarray(frag2_pc.points)
distance = np.sqrt(np.sum(np.power(frag1 - frag2, 2), axis=1))
num_inliers = np.sum(distance < 0.1)
inlier_ratio = num_inliers / len(distance)
gt_flag = 1
# calculate the transformation matrix using RANSAC, this is for Registration Recall.
source_pcd = open3d.geometry.PointCloud()
source_pcd.points = open3d.utility.Vector3dVector(source_keypts)
target_pcd = open3d.geometry.PointCloud()
target_pcd.points = open3d.utility.Vector3dVector(target_keypts)
s_desc = open3d.pipelines.registration.Feature()
s_desc.data = source_desc.T
t_desc = open3d.pipelines.registration.Feature()
t_desc.data = target_desc.T
result = open3d.pipelines.registration.registration_ransac_based_on_feature_matching(
source_pcd, target_pcd, s_desc, t_desc,
0.05,
open3d.pipelines.registration.TransformationEstimationPointToPoint(False), 3,
[open3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
open3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(0.05)],
open3d.pipelines.registration.RANSACConvergenceCriteria(50000, 1000))
# write the transformation matrix into .log file for evaluation.
with open(os.path.join(logpath, f'{desc_name}_{timestr}.log'), 'a+') as f:
trans = result.transformation
trans = np.linalg.inv(trans)
s1 = f'{id1}\t {id2}\t 37\n'
f.write(s1)
f.write(f"{trans[0, 0]}\t {trans[0, 1]}\t {trans[0, 2]}\t {trans[0, 3]}\t \n")
f.write(f"{trans[1, 0]}\t {trans[1, 1]}\t {trans[1, 2]}\t {trans[1, 3]}\t \n")
f.write(f"{trans[2, 0]}\t {trans[2, 1]}\t {trans[2, 2]}\t {trans[2, 3]}\t \n")
f.write(f"{trans[3, 0]}\t {trans[3, 1]}\t {trans[3, 2]}\t {trans[3, 3]}\t \n")
s = f"{cloud_bin_s}\t{cloud_bin_t}\t{num_inliers}\t{inlier_ratio:.8f}\t{gt_flag}"
with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'w+') as f:
f.write(s)
return num_inliers, inlier_ratio, gt_flag
def read_register_result(id1, id2):
cloud_bin_s = f'Hokuyo_{id1}'
cloud_bin_t = f'Hokuyo_{id2}'
with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'r') as f:
content = f.readlines()
nums = content[0].replace("\n", "").split("\t")[2:5]
return nums
if __name__ == '__main__':
scene_list = [
'gazebo_summer',
'gazebo_winter',
'wood_autmn',
'wood_summer',
]
desc_name = 'SpinNet'
timestr = sys.argv[1]
inliers_list = []
recall_list = []
for scene in scene_list:
pcdpath = f"../../data/ETH/{scene}/"
interpath = f"../../data/ETH/{scene}/01_Keypoints/"
gtpath = f'../../data/ETH/{scene}/'
keyptspath = interpath # os.path.join(interpath, "keypoints/")
descpath = os.path.join(".", f"{desc_name}_desc_{timestr}/{scene}")
logpath = f"log_result/{scene}-evaluation"
gtLog = loadlog(gtpath)
resultpath = os.path.join(".", f"pred_result/{scene}/{desc_name}_result_{timestr}")
if not os.path.exists(resultpath):
os.makedirs(resultpath)
if not os.path.exists(logpath):
os.makedirs(logpath)
# register each pair
fragments = glob.glob(pcdpath + '*.ply')
num_frag = len(fragments)
print(f"Start Evaluate Descriptor {desc_name} for {scene}")
start_time = time.time()
for id1 in range(num_frag):
for id2 in range(id1 + 1, num_frag):
num_inliers, inlier_ratio, gt_flag = register2Fragments(id1, id2, keyptspath, descpath, resultpath,
desc_name)
print(f"Finish Evaluation, time: {time.time() - start_time:.2f}s")
# evaluate
result = []
for id1 in range(num_frag):
for id2 in range(id1 + 1, num_frag):
line = read_register_result(id1, id2)
result.append([int(line[0]), float(line[1]), int(line[2])])
result = np.array(result)
indices_results = np.sum(result[:, 2] == 1)
correct_match = np.sum(result[:, 1] > 0.05)
recall = float(correct_match / indices_results) * 100
print(f"Correct Match {correct_match}, ground truth Match {indices_results}")
print(f"Recall {recall}%")
ave_num_inliers = np.sum(np.where(result[:, 1] > 0.05, result[:, 0], np.zeros(result.shape[0]))) / correct_match
print(f"Average Num Inliners: {ave_num_inliers}")
recall_list.append(recall)
inliers_list.append(ave_num_inliers)
print(recall_list)
average_recall = sum(recall_list) / len(recall_list)
print(f"All 8 scene, average recall: {average_recall}%")
average_inliers = sum(inliers_list) / len(inliers_list)
print(f"All 8 scene, average num inliers: {average_inliers}")
|
nilq/baby-python
|
python
|
class Space(object):
def __init__(self, col, row, terrain='.'):
self.col = col
self.row = row
self.terrain = terrain
self.occupied = False
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import AcademicNotice
# Register your models here.
class MyModelAdmin(admin.ModelAdmin):
fields = ['title','body']
def save_model(self,request,obj,form,change):
obj.author = request.user
super().save_model(request, obj, form, change)
admin.site.register(AcademicNotice,MyModelAdmin)
|
nilq/baby-python
|
python
|
import tensorflow as tf
from tensorflow import keras
# from tensorflow.
model = tf.keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10)
])
#1
# model.compile(loss='mean_squared_error', optimizer='sgd')
#2
from keras import losses
model.compile(loss=losses.mean_squared_error, optimizer='sgd')
model.compile(loss=tf.losses.mean_squared_error, optimizer='sgd')
|
nilq/baby-python
|
python
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def constructMaximumBinaryTree(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
get max and position
assign max value to node value, return node
recursively call the function to assign left and right based on array split
break when no elements remain
"""
if nums == []:
return None
max_val = max(nums)
max_pos = nums.index(max_val)
node = TreeNode(max_val)
node.left = self.constructMaximumBinaryTree(nums[:max_pos])
node.right = self.constructMaximumBinaryTree(nums[max_pos+1:])
return node
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.