max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
reamber/quaver/QuaNoteMeta.py | Eve-ning/reamber_base_py | 10 | 6618351 | <filename>reamber/quaver/QuaNoteMeta.py<gh_stars>1-10
from reamber.base import item_props
@item_props()
class QuaNoteMeta:
_props = dict(keysounds=['object', []])
| <filename>reamber/quaver/QuaNoteMeta.py<gh_stars>1-10
from reamber.base import item_props
@item_props()
class QuaNoteMeta:
_props = dict(keysounds=['object', []])
| none | 1 | 1.582667 | 2 | |
v_2.0/lib/util/DataTransformers.py | benalcazardiego/RFCV | 0 | 6618352 | <gh_stars>0
#Matrix_transformations_and_capacitance_obtaining
#Description: Calculation of the 'X,Y' matrixs since 'S' matrix
from __future__ import division
from numpy.linalg import inv
import math
def single_z_from_s(data):
# function single_z_from_s(data) from module DataTransformers.py
# Arguments: data
##Returns a 4 elements list with the z-matrix parameters got from the s-parameters listed in "data"
#with a characteristic impedance value of 50 ohms
s11 = data[0]
s12 = data[1]
s21 = data[2]
s22 = data[3]
delta_s = (1-s11)*(1-s22) - s12*s21
#print delta_s
z0 = 50 # characteristing impendance of 50 ohms
#Elements of the 'Z' matrix
z11 = (((1+s11)*(1-s22)+s12*s21)/delta_s)*z0
z12 = (2*s12*z0)/delta_s
z21 = (2*s21*z0)/delta_s
z22 = (((1-s11)*(1+s22)+s12*s21)/delta_s)*z0
return [z11, z12, z21, z22]
def single_y_from_s(data):
# function single_y_from_s(data) from module DataTransformers.py
# Arguments: data
# #Returns a 4 elements list with the y-matrix parameters got from the s-parameters listed in "data" with a
# characteristic impedance value of 50 ohms
# Put data in matrix form for inversion
s = [data[0], data[1], data[2], data[3]]
z = single_z_from_s(s)
matrix_z = [[z[0], z[1]],[z[2], z[3]]]
matrix_y = inv(matrix_z).tolist()
# Other parts of the program expect a flat list, not a matrix
y = [item for sublist in matrix_y for item in sublist] # list flattening magic
return y
def y_from_s(sdata):
# function y_from_s(sdata) from module DataTransformers.py
# Arguments: sdata
# #Returns a list with the form "ydata = [y11_list, y12_list, y21_list, y22_list]" with the y-matrix parameters got from the sdata matrix(list of lists)
y11_list = []
y12_list = []
y21_list = []
y22_list = []
for idx, d in enumerate(sdata[0]):
s11 = sdata[0][idx]
s12 = sdata[1][idx]
s21 = sdata[2][idx]
s22 = sdata[3][idx]
Y = single_y_from_s([s11, s12, s21, s22])
y11_list.append(Y[0])
y12_list.append(Y[1])
y21_list.append(Y[2])
y22_list.append(Y[3])
ydata = [y11_list, y12_list, y21_list, y22_list]
return ydata
def z_from_s(sdata):
# function z_from_s(sdata) from module DataTransformers.py
# Arguments: sdata
# #Returns a list with the form "zdata = [z11_list, z12_list, z21_list, z22_list]" with the y-matrix parameters got from the sdata matrix(list of lists)
z11_list = []
z12_list = []
z21_list = []
z22_list = []
for idx, d in enumerate(sdata[0]):
s11 = sdata[0][idx]
s12 = sdata[1][idx]
s21 = sdata[2][idx]
s22 = sdata[3][idx]
Z = single_z_from_s([s11, s12, s21, s22])
z11_list.append(Z[0])
z12_list.append(Z[1])
z21_list.append(Z[2])
z22_list.append(Z[3])
zdata = [z11_list, z12_list, z21_list, z22_list]
return zdata
def cga_from_s(freq_data, sdata):
# function cga_from_s(freq_data, sdata) from module DataTransformers.py
# Arguments: freq_data, sdata
# #Returns the list of Cga (gate-all) values got from the sdata matrix for a frecuencya freq
ydata = y_from_s(sdata)
cga = []
for freq, ydatum in zip(freq_data, ydata[0]):
cga.append(ydatum.imag/(freq*2*math.pi))
return cga
def cgs_from_s(freq_data, sdata):
# function cgs_from_s(freq_data, sdata) from module DataTransformers.py
# Arguments: freq_data, sdata
# #Returns the list of Cgs (gate-source) values got from the sdata matrix for a frecuencya freq
ydata = y_from_s(sdata)
cgs = []
for freq, ydatum in zip(freq_data, ydata[2]):
cgs.append(-ydatum.imag/(freq*2*math.pi))
return cgs
def cga_from_y(freq_data, ydata):
# function cga_from_s(freq_data, sdata) from module DataTransformers.py
# Arguments: freq_data, sdata
# #Returns the list of Cga (gate-all) values got from the sdata matrix for a frecuencya freq
cga = []
for freq, ydatum in zip(freq_data, ydata[0]):
cga.append(ydatum.imag/(freq*2*math.pi))
return cga
def cgs_from_y(freq_data, ydata):
# function cgs_from_s(freq_data, sdata) from module DataTransformers.py
# Arguments: freq_data, sdata
# #Returns the list of Cgs (gate-source) values got from the sdata matrix for a frecuencya freq
cgs = []
for freq, ydatum in zip(freq_data, ydata[2]):
cgs.append(-ydatum.imag/(freq*2*math.pi))
return cgs | #Matrix_transformations_and_capacitance_obtaining
#Description: Calculation of the 'X,Y' matrixs since 'S' matrix
from __future__ import division
from numpy.linalg import inv
import math
def single_z_from_s(data):
# function single_z_from_s(data) from module DataTransformers.py
# Arguments: data
##Returns a 4 elements list with the z-matrix parameters got from the s-parameters listed in "data"
#with a characteristic impedance value of 50 ohms
s11 = data[0]
s12 = data[1]
s21 = data[2]
s22 = data[3]
delta_s = (1-s11)*(1-s22) - s12*s21
#print delta_s
z0 = 50 # characteristing impendance of 50 ohms
#Elements of the 'Z' matrix
z11 = (((1+s11)*(1-s22)+s12*s21)/delta_s)*z0
z12 = (2*s12*z0)/delta_s
z21 = (2*s21*z0)/delta_s
z22 = (((1-s11)*(1+s22)+s12*s21)/delta_s)*z0
return [z11, z12, z21, z22]
def single_y_from_s(data):
# function single_y_from_s(data) from module DataTransformers.py
# Arguments: data
# #Returns a 4 elements list with the y-matrix parameters got from the s-parameters listed in "data" with a
# characteristic impedance value of 50 ohms
# Put data in matrix form for inversion
s = [data[0], data[1], data[2], data[3]]
z = single_z_from_s(s)
matrix_z = [[z[0], z[1]],[z[2], z[3]]]
matrix_y = inv(matrix_z).tolist()
# Other parts of the program expect a flat list, not a matrix
y = [item for sublist in matrix_y for item in sublist] # list flattening magic
return y
def y_from_s(sdata):
# function y_from_s(sdata) from module DataTransformers.py
# Arguments: sdata
# #Returns a list with the form "ydata = [y11_list, y12_list, y21_list, y22_list]" with the y-matrix parameters got from the sdata matrix(list of lists)
y11_list = []
y12_list = []
y21_list = []
y22_list = []
for idx, d in enumerate(sdata[0]):
s11 = sdata[0][idx]
s12 = sdata[1][idx]
s21 = sdata[2][idx]
s22 = sdata[3][idx]
Y = single_y_from_s([s11, s12, s21, s22])
y11_list.append(Y[0])
y12_list.append(Y[1])
y21_list.append(Y[2])
y22_list.append(Y[3])
ydata = [y11_list, y12_list, y21_list, y22_list]
return ydata
def z_from_s(sdata):
# function z_from_s(sdata) from module DataTransformers.py
# Arguments: sdata
# #Returns a list with the form "zdata = [z11_list, z12_list, z21_list, z22_list]" with the y-matrix parameters got from the sdata matrix(list of lists)
z11_list = []
z12_list = []
z21_list = []
z22_list = []
for idx, d in enumerate(sdata[0]):
s11 = sdata[0][idx]
s12 = sdata[1][idx]
s21 = sdata[2][idx]
s22 = sdata[3][idx]
Z = single_z_from_s([s11, s12, s21, s22])
z11_list.append(Z[0])
z12_list.append(Z[1])
z21_list.append(Z[2])
z22_list.append(Z[3])
zdata = [z11_list, z12_list, z21_list, z22_list]
return zdata
def cga_from_s(freq_data, sdata):
# function cga_from_s(freq_data, sdata) from module DataTransformers.py
# Arguments: freq_data, sdata
# #Returns the list of Cga (gate-all) values got from the sdata matrix for a frecuencya freq
ydata = y_from_s(sdata)
cga = []
for freq, ydatum in zip(freq_data, ydata[0]):
cga.append(ydatum.imag/(freq*2*math.pi))
return cga
def cgs_from_s(freq_data, sdata):
# function cgs_from_s(freq_data, sdata) from module DataTransformers.py
# Arguments: freq_data, sdata
# #Returns the list of Cgs (gate-source) values got from the sdata matrix for a frecuencya freq
ydata = y_from_s(sdata)
cgs = []
for freq, ydatum in zip(freq_data, ydata[2]):
cgs.append(-ydatum.imag/(freq*2*math.pi))
return cgs
def cga_from_y(freq_data, ydata):
# function cga_from_s(freq_data, sdata) from module DataTransformers.py
# Arguments: freq_data, sdata
# #Returns the list of Cga (gate-all) values got from the sdata matrix for a frecuencya freq
cga = []
for freq, ydatum in zip(freq_data, ydata[0]):
cga.append(ydatum.imag/(freq*2*math.pi))
return cga
def cgs_from_y(freq_data, ydata):
# function cgs_from_s(freq_data, sdata) from module DataTransformers.py
# Arguments: freq_data, sdata
# #Returns the list of Cgs (gate-source) values got from the sdata matrix for a frecuencya freq
cgs = []
for freq, ydatum in zip(freq_data, ydata[2]):
cgs.append(-ydatum.imag/(freq*2*math.pi))
return cgs | en | 0.492593 | #Matrix_transformations_and_capacitance_obtaining #Description: Calculation of the 'X,Y' matrixs since 'S' matrix # function single_z_from_s(data) from module DataTransformers.py # Arguments: data ##Returns a 4 elements list with the z-matrix parameters got from the s-parameters listed in "data" #with a characteristic impedance value of 50 ohms #print delta_s # characteristing impendance of 50 ohms #Elements of the 'Z' matrix # function single_y_from_s(data) from module DataTransformers.py # Arguments: data # #Returns a 4 elements list with the y-matrix parameters got from the s-parameters listed in "data" with a # characteristic impedance value of 50 ohms # Put data in matrix form for inversion # Other parts of the program expect a flat list, not a matrix # list flattening magic # function y_from_s(sdata) from module DataTransformers.py # Arguments: sdata # #Returns a list with the form "ydata = [y11_list, y12_list, y21_list, y22_list]" with the y-matrix parameters got from the sdata matrix(list of lists) # function z_from_s(sdata) from module DataTransformers.py # Arguments: sdata # #Returns a list with the form "zdata = [z11_list, z12_list, z21_list, z22_list]" with the y-matrix parameters got from the sdata matrix(list of lists) # function cga_from_s(freq_data, sdata) from module DataTransformers.py # Arguments: freq_data, sdata # #Returns the list of Cga (gate-all) values got from the sdata matrix for a frecuencya freq # function cgs_from_s(freq_data, sdata) from module DataTransformers.py # Arguments: freq_data, sdata # #Returns the list of Cgs (gate-source) values got from the sdata matrix for a frecuencya freq # function cga_from_s(freq_data, sdata) from module DataTransformers.py # Arguments: freq_data, sdata # #Returns the list of Cga (gate-all) values got from the sdata matrix for a frecuencya freq # function cgs_from_s(freq_data, sdata) from module DataTransformers.py # Arguments: freq_data, sdata # #Returns the list of Cgs (gate-source) values got from the sdata matrix for a frecuencya freq | 2.798279 | 3 |
setup.py | pqlx/pws-crypto | 1 | 6618353 | from setuptools import setup, find_packages
requirements = [
"hexdump"
]
setup(name="pws",
version="1.0",
install_requires=requirements,
packages=find_packages())
| from setuptools import setup, find_packages
requirements = [
"hexdump"
]
setup(name="pws",
version="1.0",
install_requires=requirements,
packages=find_packages())
| none | 1 | 1.432428 | 1 | |
ncclient/ncclient_api.py | melihteke/ebook_study | 0 | 6618354 | #!/usr/bin/env python3
from ncclient import manager
conn = manager.connect(
host='192.168.178.142',
port=22,
username='admin',
password='<PASSWORD>',
hostkey_verify=False,
device_params={'name': 'nexus'},
look_for_keys=False
)
for value in conn.server_capabilities:
print(value)
conn.close_session()
#Here is the output
#root@server:/home/python_examples/ncclient# python3 first_ncc_example.py
#urn:ietf:params:netconf:capability:writable-running:1.0
#urn:ietf:params:netconf:capability:url:1.0?scheme=file
#urn:ietf:params:netconf:capability:candidate:1.0
#urn:ietf:params:netconf:capability:rollback-on-error:1.0
#urn:ietf:params:netconf:capability:confirmed-commit:1.0
#urn:ietf:params:netconf:base:1.0
#urn:ietf:params:netconf:capability:validate:1.0
#urn:ietf:params:xml:ns:netconf:base:1.0
#root@server:/home/python_examples/ncclient#
#INSTALL ncclient:
#(venv) $ git clone https://github.com/ncclient/ncclient
#(venv) $ cd ncclient/
#(venv) $ python setup.py install
| #!/usr/bin/env python3
from ncclient import manager
conn = manager.connect(
host='192.168.178.142',
port=22,
username='admin',
password='<PASSWORD>',
hostkey_verify=False,
device_params={'name': 'nexus'},
look_for_keys=False
)
for value in conn.server_capabilities:
print(value)
conn.close_session()
#Here is the output
#root@server:/home/python_examples/ncclient# python3 first_ncc_example.py
#urn:ietf:params:netconf:capability:writable-running:1.0
#urn:ietf:params:netconf:capability:url:1.0?scheme=file
#urn:ietf:params:netconf:capability:candidate:1.0
#urn:ietf:params:netconf:capability:rollback-on-error:1.0
#urn:ietf:params:netconf:capability:confirmed-commit:1.0
#urn:ietf:params:netconf:base:1.0
#urn:ietf:params:netconf:capability:validate:1.0
#urn:ietf:params:xml:ns:netconf:base:1.0
#root@server:/home/python_examples/ncclient#
#INSTALL ncclient:
#(venv) $ git clone https://github.com/ncclient/ncclient
#(venv) $ cd ncclient/
#(venv) $ python setup.py install
| en | 0.239888 | #!/usr/bin/env python3 #Here is the output #root@server:/home/python_examples/ncclient# python3 first_ncc_example.py #urn:ietf:params:netconf:capability:writable-running:1.0 #urn:ietf:params:netconf:capability:url:1.0?scheme=file #urn:ietf:params:netconf:capability:candidate:1.0 #urn:ietf:params:netconf:capability:rollback-on-error:1.0 #urn:ietf:params:netconf:capability:confirmed-commit:1.0 #urn:ietf:params:netconf:base:1.0 #urn:ietf:params:netconf:capability:validate:1.0 #urn:ietf:params:xml:ns:netconf:base:1.0 #root@server:/home/python_examples/ncclient# #INSTALL ncclient: #(venv) $ git clone https://github.com/ncclient/ncclient #(venv) $ cd ncclient/ #(venv) $ python setup.py install | 2.156395 | 2 |
learning_sets/cpp_and_c/py_to_so/setup.py | KangChou/deepcv_project_demo | 0 | 6618355 | <reponame>KangChou/deepcv_project_demo<filename>learning_sets/cpp_and_c/py_to_so/setup.py
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize(["hello.py"]))
#setup(ext_modules = cythonize(["hello1.py", "hello2.py","hello3.py"]))
#python setup.py build_ext
#python test_so.py
| from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize(["hello.py"]))
#setup(ext_modules = cythonize(["hello1.py", "hello2.py","hello3.py"]))
#python setup.py build_ext
#python test_so.py | en | 0.269805 | #setup(ext_modules = cythonize(["hello1.py", "hello2.py","hello3.py"])) #python setup.py build_ext #python test_so.py | 1.614134 | 2 |
python/functions/createNN.py | spatialaudio/geq-design | 1 | 6618356 | <filename>python/functions/createNN.py
"""
Function for creating a neural network
Parameters
----------
modelName : string
name of the created model
Returns
-------
returns keras sequential model
Notes
-----
saves the created model with it's name
"""
import tensorflow as tf
modelName = "ModelSmall123"
def create_model() :
return tf.keras.models.Sequential([
tf.keras.layers.Dense(62,input_shape = (31,),activation='tanh'),
tf.keras.layers.Dense(62,activation="tanh"),
tf.keras.layers.Dense(31,activation="linear"),
])
model = create_model()
model.summary()
model.save("Models/new/"+modelName) | <filename>python/functions/createNN.py
"""
Function for creating a neural network
Parameters
----------
modelName : string
name of the created model
Returns
-------
returns keras sequential model
Notes
-----
saves the created model with it's name
"""
import tensorflow as tf
modelName = "ModelSmall123"
def create_model() :
return tf.keras.models.Sequential([
tf.keras.layers.Dense(62,input_shape = (31,),activation='tanh'),
tf.keras.layers.Dense(62,activation="tanh"),
tf.keras.layers.Dense(31,activation="linear"),
])
model = create_model()
model.summary()
model.save("Models/new/"+modelName) | en | 0.749564 | Function for creating a neural network Parameters ---------- modelName : string name of the created model Returns ------- returns keras sequential model Notes ----- saves the created model with it's name | 3.671535 | 4 |
getTextFromPdf/__init__.py | riccardodl/hackaton | 0 | 6618357 | <reponame>riccardodl/hackaton
import logging
#from ..shared_code import database
import azure.functions as func
import csv
import os
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
barcode = req.params.get('barcode')
if not barcode:
return func.HttpResponse("Missing param",status_code=400)
logging.info(f"{barcode}")
db_handler = Database()
data = db_handler.get_entry(barcode)
if data:
return func.HttpResponse(f"{data}")
else:
return func.HttpResponse(
"This HTTP triggered function executed successfully. Pass a barcode.",
status_code=200
)
class Database(object):
def __init__(self):
self.filepath = 'database.csv'
def get_next_id(self):
if not os.path.exists(self.filepath):
return 0
with open(self.filepath, newline='') as file:
data = csv.reader(file)
count = len(list(data))
return count
def get_entry(self, drug_name):
with open(self.filepath, newline='') as file:
data = csv.reader(file)
for row in data:
if row[1] == drug_name:
return row[3]
return 'Entry not found'
def put_entry(self, barcode_id, drug_name, barcode_type, json):
if os.path.exists(self.filepath):
os.unlink(self.filepath)
with open(self.filepath, 'a+', newline='') as file:
writer = csv.writer(file)
writer.writerow([barcode_id,drug_name,barcode_type,json])
return None
| import logging
#from ..shared_code import database
import azure.functions as func
import csv
import os
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
barcode = req.params.get('barcode')
if not barcode:
return func.HttpResponse("Missing param",status_code=400)
logging.info(f"{barcode}")
db_handler = Database()
data = db_handler.get_entry(barcode)
if data:
return func.HttpResponse(f"{data}")
else:
return func.HttpResponse(
"This HTTP triggered function executed successfully. Pass a barcode.",
status_code=200
)
class Database(object):
def __init__(self):
self.filepath = 'database.csv'
def get_next_id(self):
if not os.path.exists(self.filepath):
return 0
with open(self.filepath, newline='') as file:
data = csv.reader(file)
count = len(list(data))
return count
def get_entry(self, drug_name):
with open(self.filepath, newline='') as file:
data = csv.reader(file)
for row in data:
if row[1] == drug_name:
return row[3]
return 'Entry not found'
def put_entry(self, barcode_id, drug_name, barcode_type, json):
if os.path.exists(self.filepath):
os.unlink(self.filepath)
with open(self.filepath, 'a+', newline='') as file:
writer = csv.writer(file)
writer.writerow([barcode_id,drug_name,barcode_type,json])
return None | en | 0.217445 | #from ..shared_code import database | 2.560604 | 3 |
zouqi/__init__.py | enhuiz/zouqi | 0 | 6618358 | <reponame>enhuiz/zouqi<filename>zouqi/__init__.py<gh_stars>0
from .core import command, start
| from .core import command, start | none | 1 | 1.092078 | 1 | |
setup.py | eman/tempodb-archive | 0 | 6618359 | <reponame>eman/tempodb-archive<gh_stars>0
import os
from setuptools import setup
project_dir = os.path.abspath(os.path.dirname(__file__))
long_descriptions = []
for rst in ('README.rst', 'LICENSE.rst'):
with open(os.path.join(project_dir, rst), 'r') as f:
long_descriptions.append(f.read())
setup(name='tempodb-archive',
version='1.0.0',
description='Archive TempoDB Datapoints',
long_description='\n\n'.join(long_descriptions),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/eman/tempodb-archive',
license='BSD',
py_modules=['tempodb_archive'],
install_requires=['tempodb'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
keywords='tempodb archive',
entry_points={
'console_scripts': ['tempodb-archive=tempodb_archive:main'],
})
| import os
from setuptools import setup
project_dir = os.path.abspath(os.path.dirname(__file__))
long_descriptions = []
for rst in ('README.rst', 'LICENSE.rst'):
with open(os.path.join(project_dir, rst), 'r') as f:
long_descriptions.append(f.read())
setup(name='tempodb-archive',
version='1.0.0',
description='Archive TempoDB Datapoints',
long_description='\n\n'.join(long_descriptions),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/eman/tempodb-archive',
license='BSD',
py_modules=['tempodb_archive'],
install_requires=['tempodb'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
keywords='tempodb archive',
entry_points={
'console_scripts': ['tempodb-archive=tempodb_archive:main'],
}) | none | 1 | 1.414923 | 1 | |
code/abc160_b_02.py | KoyanagiHitoshi/AtCoder | 3 | 6618360 | <reponame>KoyanagiHitoshi/AtCoder
X = int(input())
print(X//500*1000+X % 500//5*5) | X = int(input())
print(X//500*1000+X % 500//5*5) | none | 1 | 3.326185 | 3 | |
Dashboard/urls.py | m-motawea/PyFaaS | 2 | 6618361 | from handlers.login_handler import LoginHandler
from handlers.logout_handler import LogoutHandler
from handlers.register_handler import RegisterHandler
from handlers.upload_handler import UploadHandler
from handlers.dashboard_handler import DashboardHandler
from handlers.execution_handler import ExecutionHandler
def buildUrls(services_conf=None):
url_patterns = [
(r"/pyfaas/upload", UploadHandler, {'config': services_conf['database'], 'session': True, 'broker': services_conf['broker']}),
(r"/pyfaas/function/(.*)", ExecutionHandler, {'config': services_conf['database'], 'broker': services_conf['broker']}),
(r"/pyfaas/login", LoginHandler, {'config': services_conf['database'], 'session': True}),
(r"/pyfaas/logout", LogoutHandler, {'config': services_conf['database'], 'session': True}),
(r"/pyfaas/register", RegisterHandler, {'config': services_conf['database'], 'session': True}),
(r"/pyfaas/functions", DashboardHandler, {'config': services_conf['database'], 'session': True})
]
return url_patterns | from handlers.login_handler import LoginHandler
from handlers.logout_handler import LogoutHandler
from handlers.register_handler import RegisterHandler
from handlers.upload_handler import UploadHandler
from handlers.dashboard_handler import DashboardHandler
from handlers.execution_handler import ExecutionHandler
def buildUrls(services_conf=None):
url_patterns = [
(r"/pyfaas/upload", UploadHandler, {'config': services_conf['database'], 'session': True, 'broker': services_conf['broker']}),
(r"/pyfaas/function/(.*)", ExecutionHandler, {'config': services_conf['database'], 'broker': services_conf['broker']}),
(r"/pyfaas/login", LoginHandler, {'config': services_conf['database'], 'session': True}),
(r"/pyfaas/logout", LogoutHandler, {'config': services_conf['database'], 'session': True}),
(r"/pyfaas/register", RegisterHandler, {'config': services_conf['database'], 'session': True}),
(r"/pyfaas/functions", DashboardHandler, {'config': services_conf['database'], 'session': True})
]
return url_patterns | none | 1 | 2.052776 | 2 | |
src/utils.py | akshaykurmi/stylized-image-captioning | 0 | 6618362 | <filename>src/utils.py
import logging
import os
import random
import numpy as np
import tensorflow as tf
logger = logging.getLogger(__name__)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def init_logging(log_dir):
os.makedirs(log_dir, exist_ok=True)
ch = logging.StreamHandler()
fh = logging.FileHandler(os.path.join(log_dir, "run.log"), mode='w')
ch.setLevel(logging.INFO)
fh.setLevel(logging.INFO)
log_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(log_format)
fh.setFormatter(log_format)
logging.basicConfig(level=logging.INFO, handlers=[ch, fh])
tf.get_logger().setLevel(logging.ERROR)
class MultiCheckpointManager:
def __init__(self, checkpoints_dir, config):
self.checkpoints = {}
self.checkpoint_managers = {}
for checkpoint_name, objects_to_save in config.items():
checkpoint = tf.train.Checkpoint(**objects_to_save)
manager = tf.train.CheckpointManager(checkpoint, os.path.join(checkpoints_dir, checkpoint_name),
max_to_keep=1, keep_checkpoint_every_n_hours=0.5,
checkpoint_name=checkpoint_name)
self.checkpoints[checkpoint_name] = checkpoint
self.checkpoint_managers[checkpoint_name] = manager
def restore(self, checkpoint_names_and_numbers):
for checkpoint_name, checkpoint_number in checkpoint_names_and_numbers.items():
checkpoint_path = f"{self.checkpoint_managers[checkpoint_name]._checkpoint_prefix}-{checkpoint_number}"
self.checkpoints[checkpoint_name].restore(checkpoint_path)
def restore_latest(self):
for checkpoint_name in self.checkpoints.keys():
self.checkpoints[checkpoint_name].restore(self.checkpoint_managers[checkpoint_name].latest_checkpoint)
def save(self, checkpoint_names):
for checkpoint_name in checkpoint_names:
self.checkpoint_managers[checkpoint_name].save()
| <filename>src/utils.py
import logging
import os
import random
import numpy as np
import tensorflow as tf
logger = logging.getLogger(__name__)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def init_logging(log_dir):
os.makedirs(log_dir, exist_ok=True)
ch = logging.StreamHandler()
fh = logging.FileHandler(os.path.join(log_dir, "run.log"), mode='w')
ch.setLevel(logging.INFO)
fh.setLevel(logging.INFO)
log_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(log_format)
fh.setFormatter(log_format)
logging.basicConfig(level=logging.INFO, handlers=[ch, fh])
tf.get_logger().setLevel(logging.ERROR)
class MultiCheckpointManager:
def __init__(self, checkpoints_dir, config):
self.checkpoints = {}
self.checkpoint_managers = {}
for checkpoint_name, objects_to_save in config.items():
checkpoint = tf.train.Checkpoint(**objects_to_save)
manager = tf.train.CheckpointManager(checkpoint, os.path.join(checkpoints_dir, checkpoint_name),
max_to_keep=1, keep_checkpoint_every_n_hours=0.5,
checkpoint_name=checkpoint_name)
self.checkpoints[checkpoint_name] = checkpoint
self.checkpoint_managers[checkpoint_name] = manager
def restore(self, checkpoint_names_and_numbers):
for checkpoint_name, checkpoint_number in checkpoint_names_and_numbers.items():
checkpoint_path = f"{self.checkpoint_managers[checkpoint_name]._checkpoint_prefix}-{checkpoint_number}"
self.checkpoints[checkpoint_name].restore(checkpoint_path)
def restore_latest(self):
for checkpoint_name in self.checkpoints.keys():
self.checkpoints[checkpoint_name].restore(self.checkpoint_managers[checkpoint_name].latest_checkpoint)
def save(self, checkpoint_names):
for checkpoint_name in checkpoint_names:
self.checkpoint_managers[checkpoint_name].save()
| none | 1 | 2.294872 | 2 | |
isimip_data/search/models.py | ISI-MIP/isimip-data | 3 | 6618363 | from django.db import models
class Facet(models.Model):
title = models.CharField(max_length=32)
attribute = models.CharField(max_length=32)
order = models.PositiveIntegerField(default=0)
class Meta:
ordering = ['order']
def __str__(self):
return self.title
| from django.db import models
class Facet(models.Model):
title = models.CharField(max_length=32)
attribute = models.CharField(max_length=32)
order = models.PositiveIntegerField(default=0)
class Meta:
ordering = ['order']
def __str__(self):
return self.title
| none | 1 | 2.224015 | 2 | |
tornado/server.py | sizxy3462g5829bz/mantistable4-modified | 0 | 6618364 | from tornado.ioloop import IOLoop
from tornado.options import define, parse_command_line, options
from tornado.web import Application, RequestHandler
from tornado.websocket import WebSocketHandler, WebSocketClosedError
from tornado.httpserver import HTTPServer
from collections import defaultdict
from urllib.parse import urlparse
import logging
import signal
import time
import json
define('debug', default=False, type=bool, help='Run in debug mode')
define('restport', default=5000, type=int, help='Server rest port')
define('wsport', default=5001, type=int, help='Server websocket port')
define('allowed_hosts', default="localhost:5001", multiple=True,
help='Allowed hosts for cross domain connections')
ws_app = None
class ClientHandler(WebSocketHandler):
def __init__(self, *args):
super().__init__(*args)
self.channel = None
# TODO: Check this
def check_origin(self, origin):
return True
"""
allowed = super().check_origin(origin)
parsed = urlparse(origin.lower())
matched = any(parsed.netloc == host for host in options.allowed_hosts)
print(allowed, parsed, matched)
return options.debug or allowed or matched
"""
def open(self, channel):
self.application.add_subscriber(self, channel)
self.channel = channel
def on_message(self, message):
pass
#print("Received", message)
#self.application.broadcast(self.channel, message)
def on_close(self):
self.application.remove_subscriber(self)
class FrontSyncApplication(Application):
def __init__(self, **kwargs):
routes = [
(r'/ws/(?P<channel>[a-z0-9]+)', ClientHandler),
]
super().__init__(routes, **kwargs)
self.subscriptions = {}
def broadcast(self, channel, message):
print("broadcast this!")
peers = self.get_subscribers(channel)
for peer in peers:
try:
peer.write_message(message)
except WebSocketClosedError:
# Remove dead peer
self.remove_subscriber(peer)
def add_subscriber(self, subscriber, channel):
self.subscriptions[subscriber] = channel
def remove_subscriber(self, subscriber):
if subscriber in self.subscriptions:
self.subscriptions.pop(subscriber)
def get_subscribers(self, channel):
return [
sub
for sub in self.subscriptions
if self.subscriptions[sub] == channel
]
class CommandHandler(RequestHandler):
def post(self):
raw = self.request.body.decode("utf-8")
if len(raw) == 0:
return self.write({
'status': 'bad request',
'request': raw
})
data = json.loads(raw)
if "channel" in data and "payload" in data:
channel = data["channel"]
payload = data["payload"]
ws_app.broadcast(channel, json.dumps(payload))
self.write({
'status': 'ok',
'request': data
})
else:
self.write({
'status': 'bad request',
'request': data
})
def shutdown(server):
ioloop = IOLoop.instance()
logging.info('Stopping server.')
server.stop()
def finalize():
ioloop.stop()
logging.info('Stopped.')
ioloop.add_timeout(time.time() + 1.5, finalize)
if __name__ == "__main__":
parse_command_line()
ws_app = FrontSyncApplication(debug=options.debug)
ws_server = HTTPServer(ws_app)
ws_server.listen(options.wsport)
app = Application([("/", CommandHandler)], debug=True)
app.listen(options.restport)
#signal.signal(signal.SIGINT, lambda sig, frame: shutdown(ws_server))
logging.info('Starting rest server on localhost:{}'.format(options.restport))
logging.info('Starting ws server on localhost:{}'.format(options.wsport))
IOLoop.instance().start()
| from tornado.ioloop import IOLoop
from tornado.options import define, parse_command_line, options
from tornado.web import Application, RequestHandler
from tornado.websocket import WebSocketHandler, WebSocketClosedError
from tornado.httpserver import HTTPServer
from collections import defaultdict
from urllib.parse import urlparse
import logging
import signal
import time
import json
define('debug', default=False, type=bool, help='Run in debug mode')
define('restport', default=5000, type=int, help='Server rest port')
define('wsport', default=5001, type=int, help='Server websocket port')
define('allowed_hosts', default="localhost:5001", multiple=True,
help='Allowed hosts for cross domain connections')
ws_app = None
class ClientHandler(WebSocketHandler):
def __init__(self, *args):
super().__init__(*args)
self.channel = None
# TODO: Check this
def check_origin(self, origin):
return True
"""
allowed = super().check_origin(origin)
parsed = urlparse(origin.lower())
matched = any(parsed.netloc == host for host in options.allowed_hosts)
print(allowed, parsed, matched)
return options.debug or allowed or matched
"""
def open(self, channel):
self.application.add_subscriber(self, channel)
self.channel = channel
def on_message(self, message):
pass
#print("Received", message)
#self.application.broadcast(self.channel, message)
def on_close(self):
self.application.remove_subscriber(self)
class FrontSyncApplication(Application):
def __init__(self, **kwargs):
routes = [
(r'/ws/(?P<channel>[a-z0-9]+)', ClientHandler),
]
super().__init__(routes, **kwargs)
self.subscriptions = {}
def broadcast(self, channel, message):
print("broadcast this!")
peers = self.get_subscribers(channel)
for peer in peers:
try:
peer.write_message(message)
except WebSocketClosedError:
# Remove dead peer
self.remove_subscriber(peer)
def add_subscriber(self, subscriber, channel):
self.subscriptions[subscriber] = channel
def remove_subscriber(self, subscriber):
if subscriber in self.subscriptions:
self.subscriptions.pop(subscriber)
def get_subscribers(self, channel):
return [
sub
for sub in self.subscriptions
if self.subscriptions[sub] == channel
]
class CommandHandler(RequestHandler):
def post(self):
raw = self.request.body.decode("utf-8")
if len(raw) == 0:
return self.write({
'status': 'bad request',
'request': raw
})
data = json.loads(raw)
if "channel" in data and "payload" in data:
channel = data["channel"]
payload = data["payload"]
ws_app.broadcast(channel, json.dumps(payload))
self.write({
'status': 'ok',
'request': data
})
else:
self.write({
'status': 'bad request',
'request': data
})
def shutdown(server):
ioloop = IOLoop.instance()
logging.info('Stopping server.')
server.stop()
def finalize():
ioloop.stop()
logging.info('Stopped.')
ioloop.add_timeout(time.time() + 1.5, finalize)
if __name__ == "__main__":
parse_command_line()
ws_app = FrontSyncApplication(debug=options.debug)
ws_server = HTTPServer(ws_app)
ws_server.listen(options.wsport)
app = Application([("/", CommandHandler)], debug=True)
app.listen(options.restport)
#signal.signal(signal.SIGINT, lambda sig, frame: shutdown(ws_server))
logging.info('Starting rest server on localhost:{}'.format(options.restport))
logging.info('Starting ws server on localhost:{}'.format(options.wsport))
IOLoop.instance().start()
| en | 0.499697 | # TODO: Check this allowed = super().check_origin(origin) parsed = urlparse(origin.lower()) matched = any(parsed.netloc == host for host in options.allowed_hosts) print(allowed, parsed, matched) return options.debug or allowed or matched #print("Received", message) #self.application.broadcast(self.channel, message) # Remove dead peer #signal.signal(signal.SIGINT, lambda sig, frame: shutdown(ws_server)) | 2.158677 | 2 |
project/__init__.py | bharathjinka09/flask-pytest-registration | 0 | 6618365 | <reponame>bharathjinka09/flask-pytest-registration<filename>project/__init__.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
#######################
#### Configuration ####
#######################
# Create the instances of the Flask extensions (flask-sqlalchemy, flask-login, etc.) in
# the global scope, but without any arguments passed in. These instances are not attached
# to the application at this point.
db = SQLAlchemy()
login = LoginManager()
login.login_view = "users.login"
######################################
#### Application Factory Function ####
######################################
def create_app(config_filename=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile(config_filename)
initialize_extensions(app)
register_blueprints(app)
return app
##########################
#### Helper Functions ####
##########################
def initialize_extensions(app):
# Since the application instance is now created, pass it to each Flask
# extension instance to bind it to the Flask application instance (app)
db.init_app(app)
login.init_app(app)
# Flask-Login configuration
from project.models import User
@login.user_loader
def load_user(user_id):
return User.query.filter(User.id == int(user_id)).first()
def register_blueprints(app):
# Since the application instance is now created, register each Blueprint
# with the Flask application instance (app)
from project.recipes import recipes_blueprint
from project.users import users_blueprint
app.register_blueprint(recipes_blueprint)
app.register_blueprint(users_blueprint)
| from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
#######################
#### Configuration ####
#######################
# Create the instances of the Flask extensions (flask-sqlalchemy, flask-login, etc.) in
# the global scope, but without any arguments passed in. These instances are not attached
# to the application at this point.
db = SQLAlchemy()
login = LoginManager()
login.login_view = "users.login"
######################################
#### Application Factory Function ####
######################################
def create_app(config_filename=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile(config_filename)
initialize_extensions(app)
register_blueprints(app)
return app
##########################
#### Helper Functions ####
##########################
def initialize_extensions(app):
# Since the application instance is now created, pass it to each Flask
# extension instance to bind it to the Flask application instance (app)
db.init_app(app)
login.init_app(app)
# Flask-Login configuration
from project.models import User
@login.user_loader
def load_user(user_id):
return User.query.filter(User.id == int(user_id)).first()
def register_blueprints(app):
# Since the application instance is now created, register each Blueprint
# with the Flask application instance (app)
from project.recipes import recipes_blueprint
from project.users import users_blueprint
app.register_blueprint(recipes_blueprint)
app.register_blueprint(users_blueprint) | en | 0.453805 | ####################### #### Configuration #### ####################### # Create the instances of the Flask extensions (flask-sqlalchemy, flask-login, etc.) in # the global scope, but without any arguments passed in. These instances are not attached # to the application at this point. ###################################### #### Application Factory Function #### ###################################### ########################## #### Helper Functions #### ########################## # Since the application instance is now created, pass it to each Flask # extension instance to bind it to the Flask application instance (app) # Flask-Login configuration # Since the application instance is now created, register each Blueprint # with the Flask application instance (app) | 2.804433 | 3 |
mrigtlbridge/mr_igtl_bridge_window.py | ProstateBRP/mr_igtl_bridge | 1 | 6618366 | <filename>mrigtlbridge/mr_igtl_bridge_window.py
import os, time, json, sys
from datetime import datetime
from PyQt5 import QtCore, QtGui, QtWidgets
from . import widget_base
# ------------------------------------MAIN WINDOW------------------------------------
class MainWindow(QtWidgets.QWidget):
def __init__(self, *args):
super().__init__(*args)
self.leftWidget = None
self.rightWidget = None
self.title = "MRI OpenIGTLink Bridge"
def setTitle(self, title):
self.title = title
def setLeftWidget(self, widget):
self.leftWidget = widget
def setRightWidget(self, widget):
self.rightWidget = widget
def setup(self):
self.setWindowTitle(self.title)
topLayout = QtWidgets.QHBoxLayout()
self.setLayout(topLayout)
# --- Left Layout (OpenIGTLink) ---
leftWidget = QtWidgets.QWidget()
topLayout.addWidget(leftWidget)
self.leftWidget.buildGUI(leftWidget)
# Separator
vline = QtWidgets.QFrame()
vline.setFrameShape(QtWidgets.QFrame.VLine)
vline.setFrameShadow(QtWidgets.QFrame.Sunken)
topLayout.addWidget(vline)
# --- Right Layout (Scanner) ---
rightWidget = QtWidgets.QWidget()
topLayout.addWidget(rightWidget)
self.rightWidget.buildGUI(rightWidget)
| <filename>mrigtlbridge/mr_igtl_bridge_window.py
import os, time, json, sys
from datetime import datetime
from PyQt5 import QtCore, QtGui, QtWidgets
from . import widget_base
# ------------------------------------MAIN WINDOW------------------------------------
class MainWindow(QtWidgets.QWidget):
def __init__(self, *args):
super().__init__(*args)
self.leftWidget = None
self.rightWidget = None
self.title = "MRI OpenIGTLink Bridge"
def setTitle(self, title):
self.title = title
def setLeftWidget(self, widget):
self.leftWidget = widget
def setRightWidget(self, widget):
self.rightWidget = widget
def setup(self):
self.setWindowTitle(self.title)
topLayout = QtWidgets.QHBoxLayout()
self.setLayout(topLayout)
# --- Left Layout (OpenIGTLink) ---
leftWidget = QtWidgets.QWidget()
topLayout.addWidget(leftWidget)
self.leftWidget.buildGUI(leftWidget)
# Separator
vline = QtWidgets.QFrame()
vline.setFrameShape(QtWidgets.QFrame.VLine)
vline.setFrameShadow(QtWidgets.QFrame.Sunken)
topLayout.addWidget(vline)
# --- Right Layout (Scanner) ---
rightWidget = QtWidgets.QWidget()
topLayout.addWidget(rightWidget)
self.rightWidget.buildGUI(rightWidget)
| en | 0.543198 | # ------------------------------------MAIN WINDOW------------------------------------ # --- Left Layout (OpenIGTLink) --- # Separator # --- Right Layout (Scanner) --- | 2.395803 | 2 |
prediction/__init__.py | idaholab/Deep-Lynx-ML-Adapter | 0 | 6618367 | <reponame>idaholab/Deep-Lynx-ML-Adapter
# Copyright 2021, Battelle Energy Alliance, LLC
from .ml_prediction import ML_Prediction
| # Copyright 2021, Battelle Energy Alliance, LLC
from .ml_prediction import ML_Prediction | en | 0.693537 | # Copyright 2021, Battelle Energy Alliance, LLC | 0.877541 | 1 |
code/examples/03-pwm/L298N_two_pin.py | yuanyanhui/intro-upy-esp32 | 0 | 6618368 | <gh_stars>0
from machine import Pin, PWM
import time
IN1 = PWM(Pin(22), freq = 1000)
IN2 = PWM(Pin(21), freq = 1000)
while True:
# 全速正转(脉冲宽度1023)
IN1.duty(1023)
IN2.duty(0)
time.sleep(2)
# 停止
IN1.duty(0)
IN2.duty(0)
time.sleep(1)
# 半速反转(脉冲宽度512)
IN1.duty(0)
IN2.duty(512)
time.sleep(2)
# 停止
IN1.duty(0)
IN2.duty(0)
time.sleep(1)
| from machine import Pin, PWM
import time
IN1 = PWM(Pin(22), freq = 1000)
IN2 = PWM(Pin(21), freq = 1000)
while True:
# 全速正转(脉冲宽度1023)
IN1.duty(1023)
IN2.duty(0)
time.sleep(2)
# 停止
IN1.duty(0)
IN2.duty(0)
time.sleep(1)
# 半速反转(脉冲宽度512)
IN1.duty(0)
IN2.duty(512)
time.sleep(2)
# 停止
IN1.duty(0)
IN2.duty(0)
time.sleep(1) | zh | 0.604342 | # 全速正转(脉冲宽度1023) # 停止 # 半速反转(脉冲宽度512) # 停止 | 2.964539 | 3 |
resonances/config.py | apetrov/resonances | 4 | 6618369 | <gh_stars>1-10
import json
from pathlib import Path
def static_init(cls):
if getattr(cls, "static_init", None):
cls.static_init()
return cls
@static_init
class config:
config = None
@classmethod
def get(cls, key):
try:
value = cls.config[key]
except KeyError:
raise Exception('There is no config with key = {}. The full config: {}'.format(key, json.dumps(cls.config)))
except Exception:
raise
return value
@classmethod
def has(cls, key):
if key in cls.config:
return True
return False
@classmethod
def set(cls, key, value):
if not cls.has(key):
raise Exception('There is no config with key = {}. The full config: {}'.format(key, json.dumps(cls.config)))
cls.config[key] = value
@classmethod
def static_init(cls):
config_file_dir = Path(__file__).parent.resolve()
config_file_path = '{}/config.json'.format(str(config_file_dir))
config_file = Path(config_file_path)
if not config_file.exists():
raise Exception('No config.json presented. Looking at {} Cannot continue working.'.format(config_file_path))
with open(config_file_path, "r") as read_file:
cls.config = json.load(read_file)
| import json
from pathlib import Path
def static_init(cls):
if getattr(cls, "static_init", None):
cls.static_init()
return cls
@static_init
class config:
config = None
@classmethod
def get(cls, key):
try:
value = cls.config[key]
except KeyError:
raise Exception('There is no config with key = {}. The full config: {}'.format(key, json.dumps(cls.config)))
except Exception:
raise
return value
@classmethod
def has(cls, key):
if key in cls.config:
return True
return False
@classmethod
def set(cls, key, value):
if not cls.has(key):
raise Exception('There is no config with key = {}. The full config: {}'.format(key, json.dumps(cls.config)))
cls.config[key] = value
@classmethod
def static_init(cls):
config_file_dir = Path(__file__).parent.resolve()
config_file_path = '{}/config.json'.format(str(config_file_dir))
config_file = Path(config_file_path)
if not config_file.exists():
raise Exception('No config.json presented. Looking at {} Cannot continue working.'.format(config_file_path))
with open(config_file_path, "r") as read_file:
cls.config = json.load(read_file) | none | 1 | 2.834411 | 3 | |
tasks/task2/task_2_1.py | eweilow/si1336-simulation-and-modeling | 0 | 6618370 | <reponame>eweilow/si1336-simulation-and-modeling
import matplotlib.pyplot as plt
def run(r, x0, doRound=False):
steps = range(200)
xvals = []
x = x0
for _ in steps:
xvals.append(x)
x = 4 * r * x * (1 - x)
if doRound:
x = round(x, 6)
return steps, xvals
| import matplotlib.pyplot as plt
def run(r, x0, doRound=False):
steps = range(200)
xvals = []
x = x0
for _ in steps:
xvals.append(x)
x = 4 * r * x * (1 - x)
if doRound:
x = round(x, 6)
return steps, xvals | none | 1 | 3.254871 | 3 | |
server/board/migrations/0002_alter_store_biz_num.py | Park-Young-Hun/Courageous-Developer-Server | 29 | 6618371 | <reponame>Park-Young-Hun/Courageous-Developer-Server
# Generated by Django 3.2.6 on 2021-09-06 07:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='store',
name='biz_num',
field=models.CharField(blank=True, db_column='biz_num', max_length=45, null=True),
),
]
| # Generated by Django 3.2.6 on 2021-09-06 07:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='store',
name='biz_num',
field=models.CharField(blank=True, db_column='biz_num', max_length=45, null=True),
),
] | en | 0.875224 | # Generated by Django 3.2.6 on 2021-09-06 07:15 | 1.638461 | 2 |
iolink/interfaces/iqlink/iqlink.py | trinamic/iolink | 8 | 6618372 | <filename>iolink/interfaces/iqlink/iqlink.py
from iolink.port import PortABC, IsduError
import ctypes
import ctypes.util
import os
import re
import sys
MST_DEV_SER_NUM_MAX_LEN = 16
_iqcomm_lib = None
if sys.platform == 'win32':
if hasattr(sys.modules['__main__'], '__file__'):
# there might be no main file, e.g. in interactive interpreter mode
main_file_directory = os.path.dirname(os.path.abspath(sys.modules['__main__'].__file__))
if os.path.isfile(os.path.join(main_file_directory, 'iqcomm.dll')):
_iqcomm_lib = ctypes.windll.LoadLibrary(os.path.join(main_file_directory, 'iqcomm.dll'))
if _iqcomm_lib is None:
this_files_directory = os.path.dirname(__file__)
if os.path.isfile(os.path.join(this_files_directory, 'iqcomm.dll')):
_iqcomm_lib = ctypes.windll.LoadLibrary(os.path.join(this_files_directory, 'iqcomm.dll'))
elif ctypes.util.find_library('iqcomm.dll') is not None:
_iqcomm_lib = ctypes.windll.LoadLibrary(ctypes.util.find_library('iqcomm.dll'))
class MstConfigT(ctypes.Structure):
_fields_ = [
('stackVersion', ctypes.c_uint16),
('cycleTimeOperate', ctypes.c_uint16),
('restOfCycleTimeOperate', ctypes.c_uint16),
('revisionID', ctypes.c_int), # enum
('inspectionLevel', ctypes.c_int), # enum
('deviceVendorID', ctypes.c_uint32),
('deviceID', ctypes.c_uint32),
('deviceFunctionID', ctypes.c_uint16),
('deviceSerialNumber', ctypes.c_uint8 * (MST_DEV_SER_NUM_MAX_LEN + 1)),
('deviceSerialNumberLen', ctypes.c_uint8),
('realBaudrate', ctypes.c_int), # enum
('dsActivState', ctypes.c_int), # enum
('dsUploadEnable', ctypes.c_bool),
('dsDownloadEnable', ctypes.c_bool),
]
class IqLinkPort(PortABC):
# mst_OperModeT
op_modes = {
'INACTIVE': 0,
'AUTO': 3,
'PREOPERATE': 4,
'OPERATE': 5,
}
# mst_StateT
op_states = {
'INACTIVE': 0,
'CHK_FAULT': 3,
'PREOPERATE': 4,
'OPERATE': 5,
}
def __init__(self, **kwargs):
if sys.platform == 'win32':
if _iqcomm_lib is None:
raise FileNotFoundError('iqcomm.dll')
else:
raise NotImplementedError('The iqLink support is only available for windows')
self._port = None
self._error_msg_buffer = ctypes.create_string_buffer(256)
self._check_iqcomm_lib_version()
self._connect()
def power_on(self):
self._switch_power('on')
def power_off(self):
self._switch_power('off')
def change_device_state_to(self, target_state):
self._check_port()
target_state_to_op_modes_str = {
'Inactive': 'INACTIVE',
'PreOperate': 'PREOPERATE',
'Operate': 'OPERATE',
}
op_modes_str = target_state_to_op_modes_str[target_state]
self._go_to_state(op_modes_str)
def get_device_pd_input_and_status(self):
self._check_port()
status = ctypes.c_uint8()
pd_data_buffer = ctypes.create_string_buffer(64)
ret = _iqcomm_lib.mst_GetStatus(self._port,
ctypes.byref(status),
pd_data_buffer,
ctypes.c_uint16(len(pd_data_buffer)),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
return pd_data_buffer[:ret.value], status
def set_device_pd_output(self, data: bytes):
self._check_port()
if not isinstance(data, bytes):
return ValueError
ret = _iqcomm_lib.mst_SetPDValue(self._port,
data,
ctypes.c_uint16(len(data)),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
ret = _iqcomm_lib.mst_SetPDValidity(self._port,
1,
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
def read_device_isdu(self, index, subindex):
self._check_port()
_iqcomm_lib.mst_StartReadOD(self._port, index, subindex, self._error_msg_buffer)
ret = _iqcomm_lib.mst_WaitODRsp(self._port, index, subindex, self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise TimeoutError(self._error_msg_buffer.value.decode('utf8'))
isdu_data_buffer = ctypes.create_string_buffer(1024)
isdu_error = ctypes.c_uint16(0)
ret = _iqcomm_lib.mst_GetReadODRsp(self._port,
isdu_data_buffer,
len(isdu_data_buffer),
ctypes.byref(isdu_error),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise IsduError(isdu_error.value)
return isdu_data_buffer[:ret.value]
def write_device_isdu(self, index, subindex, data):
self._check_port()
_iqcomm_lib.mst_StartWriteOD(self._port, index, subindex, data, len(data), self._error_msg_buffer)
ret = _iqcomm_lib.mst_WaitODRsp(self._port, index, subindex, self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise TimeoutError(self._error_msg_buffer.value.decode('utf8'))
isdu_error = ctypes.c_uint16(0)
ret = _iqcomm_lib.mst_GetWriteODRsp(self._port,
ctypes.byref(isdu_error),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise IsduError(isdu_error.value)
def shut_down(self):
if self._port:
ret = _iqcomm_lib.mst_Disconnect(self._port, self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value)
def _switch_power(self, to):
assert to.upper() in ['ON', 'OFF']
self._check_port()
if to.upper() == 'ON':
ret = _iqcomm_lib.mst_PowerControl(self._port, ctypes.c_uint8(1), self._error_msg_buffer)
else:
ret = _iqcomm_lib.mst_PowerControl(self._port, ctypes.c_uint8(0), self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
def _check_iqcomm_lib_version(self):
version_major = ctypes.c_uint16()
version_minor = ctypes.c_uint16()
_iqcomm_lib.mst_GetVersion(ctypes.byref(version_major),
ctypes.byref(version_minor),
self._error_msg_buffer)
# make sure that iqcomm lib version is at least 2.0
if version_major.value < 2:
raise Exception('This version of the iqcomm lib is not supported')
def _check_port(self):
if not self._port:
raise UnboundLocalError
def _connect(self, com_port=None):
if com_port is not None:
com_port_num = self._com_port_str_to_int(com_port)
ret = _iqcomm_lib.mst_Connect(ctypes.c_uint8(com_port_num),
ctypes.c_uint8(com_port_num),
self._error_msg_buffer)
else:
ret = _iqcomm_lib.mst_Connect(ctypes.c_uint8(0), ctypes.c_uint8(255), self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
self._port = ret
def _go_to_state(self, mode):
set_state = ctypes.c_uint8(self.op_modes[mode])
if mode != 'AUTO':
expected_state = ctypes.c_uint8(self.op_states[mode])
else:
expected_state = ctypes.c_uint8()
actual_state = ctypes.c_uint8()
ret = _iqcomm_lib.mst_SetOperatingMode(self._port,
set_state,
expected_state,
ctypes.byref(actual_state),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
if mode != 'AUTO':
if actual_state.value != expected_state.value:
raise ConnectionRefusedError
@classmethod
def _com_port_str_to_int(cls, comport):
"""
info for the regex expression:
https://stackoverflow.com/questions/16519744/python-regex-to-match-space-character-or-end-of-string
"""
m = re.match(r'COM([0-9]+)(?:\s+|$)', comport)
if not m:
raise NameError
return int(m.groups()[0])
| <filename>iolink/interfaces/iqlink/iqlink.py
from iolink.port import PortABC, IsduError
import ctypes
import ctypes.util
import os
import re
import sys
MST_DEV_SER_NUM_MAX_LEN = 16
_iqcomm_lib = None
if sys.platform == 'win32':
if hasattr(sys.modules['__main__'], '__file__'):
# there might be no main file, e.g. in interactive interpreter mode
main_file_directory = os.path.dirname(os.path.abspath(sys.modules['__main__'].__file__))
if os.path.isfile(os.path.join(main_file_directory, 'iqcomm.dll')):
_iqcomm_lib = ctypes.windll.LoadLibrary(os.path.join(main_file_directory, 'iqcomm.dll'))
if _iqcomm_lib is None:
this_files_directory = os.path.dirname(__file__)
if os.path.isfile(os.path.join(this_files_directory, 'iqcomm.dll')):
_iqcomm_lib = ctypes.windll.LoadLibrary(os.path.join(this_files_directory, 'iqcomm.dll'))
elif ctypes.util.find_library('iqcomm.dll') is not None:
_iqcomm_lib = ctypes.windll.LoadLibrary(ctypes.util.find_library('iqcomm.dll'))
class MstConfigT(ctypes.Structure):
_fields_ = [
('stackVersion', ctypes.c_uint16),
('cycleTimeOperate', ctypes.c_uint16),
('restOfCycleTimeOperate', ctypes.c_uint16),
('revisionID', ctypes.c_int), # enum
('inspectionLevel', ctypes.c_int), # enum
('deviceVendorID', ctypes.c_uint32),
('deviceID', ctypes.c_uint32),
('deviceFunctionID', ctypes.c_uint16),
('deviceSerialNumber', ctypes.c_uint8 * (MST_DEV_SER_NUM_MAX_LEN + 1)),
('deviceSerialNumberLen', ctypes.c_uint8),
('realBaudrate', ctypes.c_int), # enum
('dsActivState', ctypes.c_int), # enum
('dsUploadEnable', ctypes.c_bool),
('dsDownloadEnable', ctypes.c_bool),
]
class IqLinkPort(PortABC):
# mst_OperModeT
op_modes = {
'INACTIVE': 0,
'AUTO': 3,
'PREOPERATE': 4,
'OPERATE': 5,
}
# mst_StateT
op_states = {
'INACTIVE': 0,
'CHK_FAULT': 3,
'PREOPERATE': 4,
'OPERATE': 5,
}
def __init__(self, **kwargs):
if sys.platform == 'win32':
if _iqcomm_lib is None:
raise FileNotFoundError('iqcomm.dll')
else:
raise NotImplementedError('The iqLink support is only available for windows')
self._port = None
self._error_msg_buffer = ctypes.create_string_buffer(256)
self._check_iqcomm_lib_version()
self._connect()
def power_on(self):
self._switch_power('on')
def power_off(self):
self._switch_power('off')
def change_device_state_to(self, target_state):
self._check_port()
target_state_to_op_modes_str = {
'Inactive': 'INACTIVE',
'PreOperate': 'PREOPERATE',
'Operate': 'OPERATE',
}
op_modes_str = target_state_to_op_modes_str[target_state]
self._go_to_state(op_modes_str)
def get_device_pd_input_and_status(self):
self._check_port()
status = ctypes.c_uint8()
pd_data_buffer = ctypes.create_string_buffer(64)
ret = _iqcomm_lib.mst_GetStatus(self._port,
ctypes.byref(status),
pd_data_buffer,
ctypes.c_uint16(len(pd_data_buffer)),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
return pd_data_buffer[:ret.value], status
def set_device_pd_output(self, data: bytes):
self._check_port()
if not isinstance(data, bytes):
return ValueError
ret = _iqcomm_lib.mst_SetPDValue(self._port,
data,
ctypes.c_uint16(len(data)),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
ret = _iqcomm_lib.mst_SetPDValidity(self._port,
1,
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
def read_device_isdu(self, index, subindex):
self._check_port()
_iqcomm_lib.mst_StartReadOD(self._port, index, subindex, self._error_msg_buffer)
ret = _iqcomm_lib.mst_WaitODRsp(self._port, index, subindex, self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise TimeoutError(self._error_msg_buffer.value.decode('utf8'))
isdu_data_buffer = ctypes.create_string_buffer(1024)
isdu_error = ctypes.c_uint16(0)
ret = _iqcomm_lib.mst_GetReadODRsp(self._port,
isdu_data_buffer,
len(isdu_data_buffer),
ctypes.byref(isdu_error),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise IsduError(isdu_error.value)
return isdu_data_buffer[:ret.value]
def write_device_isdu(self, index, subindex, data):
self._check_port()
_iqcomm_lib.mst_StartWriteOD(self._port, index, subindex, data, len(data), self._error_msg_buffer)
ret = _iqcomm_lib.mst_WaitODRsp(self._port, index, subindex, self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise TimeoutError(self._error_msg_buffer.value.decode('utf8'))
isdu_error = ctypes.c_uint16(0)
ret = _iqcomm_lib.mst_GetWriteODRsp(self._port,
ctypes.byref(isdu_error),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise IsduError(isdu_error.value)
def shut_down(self):
if self._port:
ret = _iqcomm_lib.mst_Disconnect(self._port, self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value)
def _switch_power(self, to):
assert to.upper() in ['ON', 'OFF']
self._check_port()
if to.upper() == 'ON':
ret = _iqcomm_lib.mst_PowerControl(self._port, ctypes.c_uint8(1), self._error_msg_buffer)
else:
ret = _iqcomm_lib.mst_PowerControl(self._port, ctypes.c_uint8(0), self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
def _check_iqcomm_lib_version(self):
version_major = ctypes.c_uint16()
version_minor = ctypes.c_uint16()
_iqcomm_lib.mst_GetVersion(ctypes.byref(version_major),
ctypes.byref(version_minor),
self._error_msg_buffer)
# make sure that iqcomm lib version is at least 2.0
if version_major.value < 2:
raise Exception('This version of the iqcomm lib is not supported')
def _check_port(self):
if not self._port:
raise UnboundLocalError
def _connect(self, com_port=None):
if com_port is not None:
com_port_num = self._com_port_str_to_int(com_port)
ret = _iqcomm_lib.mst_Connect(ctypes.c_uint8(com_port_num),
ctypes.c_uint8(com_port_num),
self._error_msg_buffer)
else:
ret = _iqcomm_lib.mst_Connect(ctypes.c_uint8(0), ctypes.c_uint8(255), self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
self._port = ret
def _go_to_state(self, mode):
set_state = ctypes.c_uint8(self.op_modes[mode])
if mode != 'AUTO':
expected_state = ctypes.c_uint8(self.op_states[mode])
else:
expected_state = ctypes.c_uint8()
actual_state = ctypes.c_uint8()
ret = _iqcomm_lib.mst_SetOperatingMode(self._port,
set_state,
expected_state,
ctypes.byref(actual_state),
self._error_msg_buffer)
ret = ctypes.c_int16(ret)
if ret.value < 0:
raise ConnectionError(self._error_msg_buffer.value.decode('utf8'))
if mode != 'AUTO':
if actual_state.value != expected_state.value:
raise ConnectionRefusedError
@classmethod
def _com_port_str_to_int(cls, comport):
"""
info for the regex expression:
https://stackoverflow.com/questions/16519744/python-regex-to-match-space-character-or-end-of-string
"""
m = re.match(r'COM([0-9]+)(?:\s+|$)', comport)
if not m:
raise NameError
return int(m.groups()[0])
| en | 0.75537 | # there might be no main file, e.g. in interactive interpreter mode # enum # enum # enum # enum # mst_OperModeT # mst_StateT # make sure that iqcomm lib version is at least 2.0 info for the regex expression: https://stackoverflow.com/questions/16519744/python-regex-to-match-space-character-or-end-of-string | 1.973329 | 2 |
test/utils_test.py | Yomguithereal/ebbe | 5 | 6618373 | # =============================================================================
# Ebbe Utilities Unit Tests
# =============================================================================
import pytest
from collections import OrderedDict
from itertools import chain
from ebbe import (
get,
getter,
getpath,
pathgetter,
sorted_uniq,
indexed,
grouped,
partitioned,
grouped_items,
partitioned_items
)
class Container(object):
def __init__(self, value, recurse=True):
self.value = value
self.numbers = [4, 5, 6]
if recurse:
self.recursion = Container(self.value, recurse=False)
NESTED_OBJECT = {
'a': {
'b': [{'c': 4}, 45, {'f': [1, 2, 3]}],
'd': {
'e': 5,
'g': Container(45)
}
},
't': 32
}
class TestUtils(object):
def test_get(self):
assert get(NESTED_OBJECT, 't') == 32
assert get(NESTED_OBJECT, 'l') is None
assert get(NESTED_OBJECT, 'l', 27) == 27
assert get([0, 2, 4], 1) == 2
assert get([0, 2, 4], 7) is None
def test_getter(self):
assert getter('t')(NESTED_OBJECT) == 32
assert getter('l')(NESTED_OBJECT) is None
assert getter('l', 27)(NESTED_OBJECT) == 27
assert getter('l')(NESTED_OBJECT, 28) == 28
assert getter('l', 27)(NESTED_OBJECT, 28) == 28
def test_getpath(self):
with pytest.raises(TypeError):
getpath(NESTED_OBJECT, 'test')
assert getpath(NESTED_OBJECT, ['a', 'd', 'e']) == 5
assert getpath(NESTED_OBJECT, ['a', 'd', 'e'], items=None) is None
assert getpath(NESTED_OBJECT, ['a', 'c']) is None
assert getpath(NESTED_OBJECT, ['a', 'c'], 67) == 67
assert getpath(NESTED_OBJECT, ['a', 'b', 1]) == 45
assert getpath(NESTED_OBJECT, ['a', 'b', -1, 'f', -1]) == 3
assert getpath(NESTED_OBJECT, ['a', 'b', 0, 'c']) == 4
assert getpath(NESTED_OBJECT, ['a', 'd', 'g', 'numbers', 1]) is None
assert getpath(NESTED_OBJECT, ['a', 'd', 'g', 'numbers', 1], attributes=True) == 5
assert getpath(NESTED_OBJECT, ['a', 'd', 'g', 3], attributes=True) is None
assert getpath(NESTED_OBJECT, ['a', 'd', 'g', 'recursion', 'numbers'], attributes=True) == [4, 5, 6]
assert getpath(NESTED_OBJECT, 'a.d.e', split_char='.') == 5
assert getpath(NESTED_OBJECT, 'a§d§e', split_char='§') == 5
assert getpath(NESTED_OBJECT, 'a.b.1', split_char='.', parse_indices=True) == 45
assert getpath(NESTED_OBJECT, 'a.b.-1.f.-1', split_char='.', parse_indices=True) == 3
assert getpath([[1, 2]], [3, 4, 17]) is None
def test_pathgetter(self):
with pytest.raises(TypeError):
pathgetter()
assert pathgetter(['a', 'd', 'e'])(NESTED_OBJECT) == 5
assert pathgetter(['a', 'd', 'e'], items=None)(NESTED_OBJECT) is None
assert pathgetter(['a', 'c'])(NESTED_OBJECT) is None
assert pathgetter(['a', 'c'])(NESTED_OBJECT, 67) == 67
assert pathgetter(['a', 'b', 1])(NESTED_OBJECT) == 45
assert pathgetter(['a', 'b', -1, 'f', -1])(NESTED_OBJECT) == 3
assert pathgetter(['a', 'b', 0, 'c'])(NESTED_OBJECT) == 4
assert pathgetter(['a', 'd', 'g', 'numbers', 1])(NESTED_OBJECT) is None
assert pathgetter(['a', 'd', 'g', 'numbers', 1], attributes=True)(NESTED_OBJECT) == 5
assert pathgetter(['a', 'd', 'g', 3], attributes=True)(NESTED_OBJECT) is None
assert pathgetter(['a', 'd', 'g', 'recursion', 'numbers'], attributes=True)(NESTED_OBJECT) == [4, 5, 6]
assert pathgetter('a.d.e', split_char='.')(NESTED_OBJECT) == 5
assert pathgetter('a§d§e', split_char='§')(NESTED_OBJECT) == 5
assert pathgetter('a.b.1', split_char='.', parse_indices=True)(NESTED_OBJECT) == 45
assert pathgetter('a.b.-1.f.-1', split_char='.', parse_indices=True)(NESTED_OBJECT) == 3
tuple_getter = pathgetter(
['a', 'd', 'e'],
['a', 'c'],
['a', 'b', 1]
)
assert tuple_getter(NESTED_OBJECT) == (5, None, 45)
default_getter = pathgetter(['a', 'd', 'e'], default=1337)
assert default_getter(NESTED_OBJECT) == 5
assert default_getter({}) == 1337
def test_sorted_uniq(self):
numbers = [3, 17, 3, 4, 1, 4, 5, 5, 1, -1, 5]
assert sorted_uniq(numbers) == [-1, 1, 3, 4, 5, 17]
assert sorted_uniq(numbers, reverse=True) == [17, 5, 4, 3, 1, -1]
tuples = [(11, 23), (1, 2), (2, 2), (3, 2), (1, 5), (1, 6)]
assert sorted_uniq(tuples, key=getter(1)) == [(1, 2), (1, 5), (1, 6), (11, 23)]
def test_indexed(self):
with pytest.raises(TypeError):
indexed(None)
with pytest.raises(TypeError):
indexed([], None)
with pytest.raises(TypeError):
indexed([], key='test')
assert indexed(range(3)) == {0: 0, 1: 1, 2: 2}
assert indexed(range(3), key=lambda x: x * 10) == {0: 0, 10: 1, 20: 2}
ordered = indexed(range(3), OrderedDict, key=lambda x: x * 10)
assert isinstance(ordered, OrderedDict)
assert ordered == OrderedDict([(0, 0), (10, 1), (20, 2)])
assert indexed(range(3), key=lambda x: x * 10) == {x * 10: x for x in range(3)}
def test_grouped(self):
with pytest.raises(TypeError):
grouped(None)
with pytest.raises(TypeError):
grouped([], None)
with pytest.raises(TypeError):
grouped([], key='test')
assert grouped(chain(range(2), range(3), range(4))) == {
0: [0, 0, 0],
1: [1, 1, 1],
2: [2, 2],
3: [3]
}
def key(x):
return 'ok' if x in [2, 3] else 'not-ok'
def value(x):
return x * 10
assert grouped(range(5), key=key) == {
'ok': [2, 3],
'not-ok': [0, 1, 4]
}
assert grouped(range(5), key=key, value=value) == {
'ok': [20, 30],
'not-ok': [0, 10, 40]
}
assert grouped_items((key(x), x * 10) for x in range(5)) == {
'ok': [20, 30],
'not-ok': [0, 10, 40]
}
assert grouped(chain(range(5), range(5)), container=set, key=key) == {
'ok': {2, 3},
'not-ok': {0, 1, 4}
}
def test_partitioned(self):
with pytest.raises(TypeError):
partitioned(None)
with pytest.raises(TypeError):
partitioned([], None)
with pytest.raises(TypeError):
partitioned([], key='test')
assert partitioned(chain(range(2), range(3), range(4))) == [
[0, 0, 0],
[1, 1, 1],
[2, 2],
[3]
]
def key(x):
return 'ok' if x in [2, 3] else 'not-ok'
def value(x):
return x * 10
assert partitioned(range(5), key=key) == [
[0, 1, 4],
[2, 3]
]
assert partitioned(range(5), key=key, value=value) == [
[0, 10, 40],
[20, 30]
]
assert partitioned_items((key(x), x * 10) for x in range(5)) == [
[0, 10, 40],
[20, 30]
]
assert partitioned(chain(range(5), range(5)), container=set, key=key) == [
{0, 1, 4},
{2, 3}
]
| # =============================================================================
# Ebbe Utilities Unit Tests
# =============================================================================
import pytest
from collections import OrderedDict
from itertools import chain
from ebbe import (
get,
getter,
getpath,
pathgetter,
sorted_uniq,
indexed,
grouped,
partitioned,
grouped_items,
partitioned_items
)
class Container(object):
def __init__(self, value, recurse=True):
self.value = value
self.numbers = [4, 5, 6]
if recurse:
self.recursion = Container(self.value, recurse=False)
NESTED_OBJECT = {
'a': {
'b': [{'c': 4}, 45, {'f': [1, 2, 3]}],
'd': {
'e': 5,
'g': Container(45)
}
},
't': 32
}
class TestUtils(object):
def test_get(self):
assert get(NESTED_OBJECT, 't') == 32
assert get(NESTED_OBJECT, 'l') is None
assert get(NESTED_OBJECT, 'l', 27) == 27
assert get([0, 2, 4], 1) == 2
assert get([0, 2, 4], 7) is None
def test_getter(self):
assert getter('t')(NESTED_OBJECT) == 32
assert getter('l')(NESTED_OBJECT) is None
assert getter('l', 27)(NESTED_OBJECT) == 27
assert getter('l')(NESTED_OBJECT, 28) == 28
assert getter('l', 27)(NESTED_OBJECT, 28) == 28
def test_getpath(self):
with pytest.raises(TypeError):
getpath(NESTED_OBJECT, 'test')
assert getpath(NESTED_OBJECT, ['a', 'd', 'e']) == 5
assert getpath(NESTED_OBJECT, ['a', 'd', 'e'], items=None) is None
assert getpath(NESTED_OBJECT, ['a', 'c']) is None
assert getpath(NESTED_OBJECT, ['a', 'c'], 67) == 67
assert getpath(NESTED_OBJECT, ['a', 'b', 1]) == 45
assert getpath(NESTED_OBJECT, ['a', 'b', -1, 'f', -1]) == 3
assert getpath(NESTED_OBJECT, ['a', 'b', 0, 'c']) == 4
assert getpath(NESTED_OBJECT, ['a', 'd', 'g', 'numbers', 1]) is None
assert getpath(NESTED_OBJECT, ['a', 'd', 'g', 'numbers', 1], attributes=True) == 5
assert getpath(NESTED_OBJECT, ['a', 'd', 'g', 3], attributes=True) is None
assert getpath(NESTED_OBJECT, ['a', 'd', 'g', 'recursion', 'numbers'], attributes=True) == [4, 5, 6]
assert getpath(NESTED_OBJECT, 'a.d.e', split_char='.') == 5
assert getpath(NESTED_OBJECT, 'a§d§e', split_char='§') == 5
assert getpath(NESTED_OBJECT, 'a.b.1', split_char='.', parse_indices=True) == 45
assert getpath(NESTED_OBJECT, 'a.b.-1.f.-1', split_char='.', parse_indices=True) == 3
assert getpath([[1, 2]], [3, 4, 17]) is None
def test_pathgetter(self):
with pytest.raises(TypeError):
pathgetter()
assert pathgetter(['a', 'd', 'e'])(NESTED_OBJECT) == 5
assert pathgetter(['a', 'd', 'e'], items=None)(NESTED_OBJECT) is None
assert pathgetter(['a', 'c'])(NESTED_OBJECT) is None
assert pathgetter(['a', 'c'])(NESTED_OBJECT, 67) == 67
assert pathgetter(['a', 'b', 1])(NESTED_OBJECT) == 45
assert pathgetter(['a', 'b', -1, 'f', -1])(NESTED_OBJECT) == 3
assert pathgetter(['a', 'b', 0, 'c'])(NESTED_OBJECT) == 4
assert pathgetter(['a', 'd', 'g', 'numbers', 1])(NESTED_OBJECT) is None
assert pathgetter(['a', 'd', 'g', 'numbers', 1], attributes=True)(NESTED_OBJECT) == 5
assert pathgetter(['a', 'd', 'g', 3], attributes=True)(NESTED_OBJECT) is None
assert pathgetter(['a', 'd', 'g', 'recursion', 'numbers'], attributes=True)(NESTED_OBJECT) == [4, 5, 6]
assert pathgetter('a.d.e', split_char='.')(NESTED_OBJECT) == 5
assert pathgetter('a§d§e', split_char='§')(NESTED_OBJECT) == 5
assert pathgetter('a.b.1', split_char='.', parse_indices=True)(NESTED_OBJECT) == 45
assert pathgetter('a.b.-1.f.-1', split_char='.', parse_indices=True)(NESTED_OBJECT) == 3
tuple_getter = pathgetter(
['a', 'd', 'e'],
['a', 'c'],
['a', 'b', 1]
)
assert tuple_getter(NESTED_OBJECT) == (5, None, 45)
default_getter = pathgetter(['a', 'd', 'e'], default=1337)
assert default_getter(NESTED_OBJECT) == 5
assert default_getter({}) == 1337
def test_sorted_uniq(self):
numbers = [3, 17, 3, 4, 1, 4, 5, 5, 1, -1, 5]
assert sorted_uniq(numbers) == [-1, 1, 3, 4, 5, 17]
assert sorted_uniq(numbers, reverse=True) == [17, 5, 4, 3, 1, -1]
tuples = [(11, 23), (1, 2), (2, 2), (3, 2), (1, 5), (1, 6)]
assert sorted_uniq(tuples, key=getter(1)) == [(1, 2), (1, 5), (1, 6), (11, 23)]
def test_indexed(self):
with pytest.raises(TypeError):
indexed(None)
with pytest.raises(TypeError):
indexed([], None)
with pytest.raises(TypeError):
indexed([], key='test')
assert indexed(range(3)) == {0: 0, 1: 1, 2: 2}
assert indexed(range(3), key=lambda x: x * 10) == {0: 0, 10: 1, 20: 2}
ordered = indexed(range(3), OrderedDict, key=lambda x: x * 10)
assert isinstance(ordered, OrderedDict)
assert ordered == OrderedDict([(0, 0), (10, 1), (20, 2)])
assert indexed(range(3), key=lambda x: x * 10) == {x * 10: x for x in range(3)}
def test_grouped(self):
with pytest.raises(TypeError):
grouped(None)
with pytest.raises(TypeError):
grouped([], None)
with pytest.raises(TypeError):
grouped([], key='test')
assert grouped(chain(range(2), range(3), range(4))) == {
0: [0, 0, 0],
1: [1, 1, 1],
2: [2, 2],
3: [3]
}
def key(x):
return 'ok' if x in [2, 3] else 'not-ok'
def value(x):
return x * 10
assert grouped(range(5), key=key) == {
'ok': [2, 3],
'not-ok': [0, 1, 4]
}
assert grouped(range(5), key=key, value=value) == {
'ok': [20, 30],
'not-ok': [0, 10, 40]
}
assert grouped_items((key(x), x * 10) for x in range(5)) == {
'ok': [20, 30],
'not-ok': [0, 10, 40]
}
assert grouped(chain(range(5), range(5)), container=set, key=key) == {
'ok': {2, 3},
'not-ok': {0, 1, 4}
}
def test_partitioned(self):
with pytest.raises(TypeError):
partitioned(None)
with pytest.raises(TypeError):
partitioned([], None)
with pytest.raises(TypeError):
partitioned([], key='test')
assert partitioned(chain(range(2), range(3), range(4))) == [
[0, 0, 0],
[1, 1, 1],
[2, 2],
[3]
]
def key(x):
return 'ok' if x in [2, 3] else 'not-ok'
def value(x):
return x * 10
assert partitioned(range(5), key=key) == [
[0, 1, 4],
[2, 3]
]
assert partitioned(range(5), key=key, value=value) == [
[0, 10, 40],
[20, 30]
]
assert partitioned_items((key(x), x * 10) for x in range(5)) == [
[0, 10, 40],
[20, 30]
]
assert partitioned(chain(range(5), range(5)), container=set, key=key) == [
{0, 1, 4},
{2, 3}
]
| en | 0.408079 | # ============================================================================= # Ebbe Utilities Unit Tests # ============================================================================= | 2.670124 | 3 |
commands/intake.py | lnstempunks/MarvinTheParanoidAndroid | 2 | 6618374 | from wpilib.command import Command
import subsystems
import wpilib
class Intake(Command):
"""
Command that runs the intake based on the triggers.
"""
def __init__(self):
super().__init__("Intake")
self.toggle = False
def initialize(self):
pass
def execute(self):
self.toggle = subsystems.mechanisms.get_stopper()
subsystems.mechanisms.set_intake(0.6)
def isFinished(self):
if self.toggle:
wpilib.Timer.delay(0.1)
return self.toggle
def end(self):
subsystems.mechanisms.set_intake(0)
| from wpilib.command import Command
import subsystems
import wpilib
class Intake(Command):
"""
Command that runs the intake based on the triggers.
"""
def __init__(self):
super().__init__("Intake")
self.toggle = False
def initialize(self):
pass
def execute(self):
self.toggle = subsystems.mechanisms.get_stopper()
subsystems.mechanisms.set_intake(0.6)
def isFinished(self):
if self.toggle:
wpilib.Timer.delay(0.1)
return self.toggle
def end(self):
subsystems.mechanisms.set_intake(0)
| en | 0.978802 | Command that runs the intake based on the triggers. | 2.833713 | 3 |
src/ErrorPage.py | Waltrus/Guard-Bot | 5 | 6618375 | <reponame>Waltrus/Guard-Bot
import datetime
now = datetime.datetime.now()
print ("Hata Alım Saati :")
print (now.strftime("%H:%M:%S %d-%m-%Y"))
print ("Lütfen Yetkililere Bildirin!")
| import datetime
now = datetime.datetime.now()
print ("Hata Alım Saati :")
print (now.strftime("%H:%M:%S %d-%m-%Y"))
print ("Lütfen Yetkililere Bildirin!") | none | 1 | 3.376964 | 3 | |
catkin_ws/src/duckietown_microMVP/micromvp_test/include/micromvp_test/utils.py | eric565648/duckietown-pi2 | 0 | 6618376 | <filename>catkin_ws/src/duckietown_microMVP/micromvp_test/include/micromvp_test/utils.py
'''
All components of this library are licensed under the BSD 3-Clause
License.
Copyright (c) 2015-, Algorithmic Robotics and Control Group @Rutgers
(http://arc.cs.rutgers.edu). All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer. Redistributions
in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution. Neither the name of
Rutgers University nor the names of the contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
"""
microMVP useful functions.
"""
# The arduino ID and the respect tag of car (ID, tag), max 10
carInfo = [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6),(7, 7),(8, 8)]
# IP address of the computer running position_.exe (local: zmqPublisherIP = "localhost")
zmqPublisherIP = "localhost"
# Port of xBee sender, can be found in Device Manager, Ports
xBeeSocket = 'COM3'
# Simulation Speed, lower value gives a more precise simulation
simSpeed = 5.0
# Sidelength of tag / Vehicle wheelbase
tagRatio = 0.90
"""
-------------------------------------------------------
Dont make changes under this line
-------------------------------------------------------
"""
# Part 0: configs
zmqPublisherPort = "5556"
wheelBase = 30.0
tagRatio = 0.90
container_width = 1420
container_height = 780
painter_width = 1280
painter_height = 720
spacer = 8
gridCopy = []
# Part 1: Colors
RGB_WHITE = (255, 255, 255)
RGB_BLACK = (0, 0, 0)
RGB_RED = (255, 0, 0)
RGB_GREEN = (0, 255, 0)
RGB_BLUE = (0, 0, 255)
RGB_PINK = (255, 0, 255)
RGB_YELLOW = (255, 255, 0)
RGB_GREY = (128, 128, 128)
RGB_DEEPBLUE = (0, 0, 255)
RGB_PURPLE = (127, 0, 255)
# High contrast colors, from http://godsnotwheregodsnot.blogspot.ru/2012/09/color-distribution-methodology.html
RGB_PATH_COLORS = [(0, 0, 0),(1, 0, 103),(213, 255, 0),(255, 0, 86),(158, 0, 142),(14, 76, 161),\
(255, 229, 2),(0, 95, 57),(0, 255, 0),(149, 0, 58),(255, 147, 126),(164, 36, 0),\
(0, 21, 68),(145, 208, 203),(98, 14, 0),(107, 104, 130),(0, 0, 255),(0, 125, 181),\
(106, 130, 108),(0, 174, 126),(194, 140, 159),(190, 153, 112),(0, 143, 156),(95, 173, 78),\
(255, 0, 0),(255, 0, 246),(255, 2, 157),(104, 61, 59),(255, 116, 163),(150, 138, 232),\
(152, 255, 82),(167, 87, 64),(1, 255, 254),(255, 238, 232),(254, 137, 0),(189, 198, 255),\
(1, 208, 255),(187, 136, 0),(117, 68, 177),(165, 255, 210),(255, 166, 254),(119, 77, 0),\
(122, 71, 130),(38, 52, 0),(0, 71, 84),(67, 0, 44),(181, 0, 255),(255, 177, 103),\
(255, 219, 102),(144, 251, 146),(126, 45, 210),(189, 211, 147),(229, 111, 254),(222, 255, 116),\
(0, 255, 120),(0, 155, 255),(0, 100, 1),(0, 118, 255),(133, 169, 0),(0, 185, 23),\
(120, 130, 49),(0, 255, 198),(255, 110, 65),(232, 94, 190)]
# Part n: Class for a car
class UnitCar:
def __init__(self, tag = "0", ID = "0"):
self.tag = tag
self.ID = ID
self.x = 0.0
self.y = 0.0
self.theta = 0.0
self.lSpeed = 0.0
self.rSpeed = 0.0
self.path = list()
class Boundary:
def __init__(self):
self.u = 2 * wheelBase
self.d = painter_height - 2 * wheelBase
self.l = 2 * wheelBase
self.r = painter_width - 2 * wheelBase
self.width = self.r - self.l
self.height = self.d - self.u
# Part 1: Dialogs in GUI
from pgu import gui
class AboutDialog(gui.Dialog):
def __init__(self, **params):
title = gui.Label("About microMVP")
width = 400
height = 200
doc = gui.Document(width = width)
space = title.style.font.size(" ")
doc.block(align=0)
for word in """microMVP v2.0""".split(" "):
doc.add(gui.Label(word))
doc.space(space)
doc.br(space[1])
doc.block(align=-1)
for word in """microMVP v2.0""".split(" "):
doc.add(gui.Label(word))
doc.space(space)
doc.br(space[1])
gui.Dialog.__init__(self,title,gui.ScrollArea(doc,width,height))
class HelpDialog(gui.Dialog):
def __init__(self, **params):
title = gui.Label("Help")
doc = gui.Document(width=400)
space = title.style.font.size(" ")
doc.br(space[1])
doc.block(align=-1)
for word in """Please refer to http://arc.cs.rutgers.edu/mvp/""".split(" "):
doc.add(gui.Label(word))
doc.space(space)
gui.Dialog.__init__(self,title,doc)
class QuitDialog(gui.Dialog):
def __init__(self, **params):
title = gui.Label("Quit")
t = gui.Table()
t.tr()
t.add(gui.Label("Are you sure you want to quit?"),colspan=2)
t.tr()
e = gui.Button("Okay")
e.connect(gui.CLICK,self.send,gui.QUIT)
t.td(e)
e = gui.Button("Cancel")
e.connect(gui.CLICK,self.close,None)
t.td(e)
gui.Dialog.__init__(self,title,t)
import math
def CheckCollosion(thresh, x1, y1, x2, y2):
if math.sqrt(math.pow(x1 - x2, 2) + math.pow(y1 - y2, 2)) <= thresh:
return True
return False
| <filename>catkin_ws/src/duckietown_microMVP/micromvp_test/include/micromvp_test/utils.py
'''
All components of this library are licensed under the BSD 3-Clause
License.
Copyright (c) 2015-, Algorithmic Robotics and Control Group @Rutgers
(http://arc.cs.rutgers.edu). All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer. Redistributions
in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution. Neither the name of
Rutgers University nor the names of the contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
"""
microMVP useful functions.
"""
# The arduino ID and the respect tag of car (ID, tag), max 10
carInfo = [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6),(7, 7),(8, 8)]
# IP address of the computer running position_.exe (local: zmqPublisherIP = "localhost")
zmqPublisherIP = "localhost"
# Port of xBee sender, can be found in Device Manager, Ports
xBeeSocket = 'COM3'
# Simulation Speed, lower value gives a more precise simulation
simSpeed = 5.0
# Sidelength of tag / Vehicle wheelbase
tagRatio = 0.90
"""
-------------------------------------------------------
Dont make changes under this line
-------------------------------------------------------
"""
# Part 0: configs
zmqPublisherPort = "5556"
wheelBase = 30.0
tagRatio = 0.90
container_width = 1420
container_height = 780
painter_width = 1280
painter_height = 720
spacer = 8
gridCopy = []
# Part 1: Colors
RGB_WHITE = (255, 255, 255)
RGB_BLACK = (0, 0, 0)
RGB_RED = (255, 0, 0)
RGB_GREEN = (0, 255, 0)
RGB_BLUE = (0, 0, 255)
RGB_PINK = (255, 0, 255)
RGB_YELLOW = (255, 255, 0)
RGB_GREY = (128, 128, 128)
RGB_DEEPBLUE = (0, 0, 255)
RGB_PURPLE = (127, 0, 255)
# High contrast colors, from http://godsnotwheregodsnot.blogspot.ru/2012/09/color-distribution-methodology.html
RGB_PATH_COLORS = [(0, 0, 0),(1, 0, 103),(213, 255, 0),(255, 0, 86),(158, 0, 142),(14, 76, 161),\
(255, 229, 2),(0, 95, 57),(0, 255, 0),(149, 0, 58),(255, 147, 126),(164, 36, 0),\
(0, 21, 68),(145, 208, 203),(98, 14, 0),(107, 104, 130),(0, 0, 255),(0, 125, 181),\
(106, 130, 108),(0, 174, 126),(194, 140, 159),(190, 153, 112),(0, 143, 156),(95, 173, 78),\
(255, 0, 0),(255, 0, 246),(255, 2, 157),(104, 61, 59),(255, 116, 163),(150, 138, 232),\
(152, 255, 82),(167, 87, 64),(1, 255, 254),(255, 238, 232),(254, 137, 0),(189, 198, 255),\
(1, 208, 255),(187, 136, 0),(117, 68, 177),(165, 255, 210),(255, 166, 254),(119, 77, 0),\
(122, 71, 130),(38, 52, 0),(0, 71, 84),(67, 0, 44),(181, 0, 255),(255, 177, 103),\
(255, 219, 102),(144, 251, 146),(126, 45, 210),(189, 211, 147),(229, 111, 254),(222, 255, 116),\
(0, 255, 120),(0, 155, 255),(0, 100, 1),(0, 118, 255),(133, 169, 0),(0, 185, 23),\
(120, 130, 49),(0, 255, 198),(255, 110, 65),(232, 94, 190)]
# Part n: Class for a car
class UnitCar:
def __init__(self, tag = "0", ID = "0"):
self.tag = tag
self.ID = ID
self.x = 0.0
self.y = 0.0
self.theta = 0.0
self.lSpeed = 0.0
self.rSpeed = 0.0
self.path = list()
class Boundary:
def __init__(self):
self.u = 2 * wheelBase
self.d = painter_height - 2 * wheelBase
self.l = 2 * wheelBase
self.r = painter_width - 2 * wheelBase
self.width = self.r - self.l
self.height = self.d - self.u
# Part 1: Dialogs in GUI
from pgu import gui
class AboutDialog(gui.Dialog):
def __init__(self, **params):
title = gui.Label("About microMVP")
width = 400
height = 200
doc = gui.Document(width = width)
space = title.style.font.size(" ")
doc.block(align=0)
for word in """microMVP v2.0""".split(" "):
doc.add(gui.Label(word))
doc.space(space)
doc.br(space[1])
doc.block(align=-1)
for word in """microMVP v2.0""".split(" "):
doc.add(gui.Label(word))
doc.space(space)
doc.br(space[1])
gui.Dialog.__init__(self,title,gui.ScrollArea(doc,width,height))
class HelpDialog(gui.Dialog):
def __init__(self, **params):
title = gui.Label("Help")
doc = gui.Document(width=400)
space = title.style.font.size(" ")
doc.br(space[1])
doc.block(align=-1)
for word in """Please refer to http://arc.cs.rutgers.edu/mvp/""".split(" "):
doc.add(gui.Label(word))
doc.space(space)
gui.Dialog.__init__(self,title,doc)
class QuitDialog(gui.Dialog):
def __init__(self, **params):
title = gui.Label("Quit")
t = gui.Table()
t.tr()
t.add(gui.Label("Are you sure you want to quit?"),colspan=2)
t.tr()
e = gui.Button("Okay")
e.connect(gui.CLICK,self.send,gui.QUIT)
t.td(e)
e = gui.Button("Cancel")
e.connect(gui.CLICK,self.close,None)
t.td(e)
gui.Dialog.__init__(self,title,t)
import math
def CheckCollosion(thresh, x1, y1, x2, y2):
if math.sqrt(math.pow(x1 - x2, 2) + math.pow(y1 - y2, 2)) <= thresh:
return True
return False
| en | 0.701976 | All components of this library are licensed under the BSD 3-Clause License. Copyright (c) 2015-, Algorithmic Robotics and Control Group @Rutgers (http://arc.cs.rutgers.edu). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of Rutgers University nor the names of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. microMVP useful functions. # The arduino ID and the respect tag of car (ID, tag), max 10 # IP address of the computer running position_.exe (local: zmqPublisherIP = "localhost") # Port of xBee sender, can be found in Device Manager, Ports # Simulation Speed, lower value gives a more precise simulation # Sidelength of tag / Vehicle wheelbase ------------------------------------------------------- Dont make changes under this line ------------------------------------------------------- # Part 0: configs # Part 1: Colors # High contrast colors, from http://godsnotwheregodsnot.blogspot.ru/2012/09/color-distribution-methodology.html # Part n: Class for a car # Part 1: Dialogs in GUI microMVP v2.0 microMVP v2.0 Please refer to http://arc.cs.rutgers.edu/mvp/ | 1.096364 | 1 |
searx/engines/bing_videos.py | xu1991/open | 1 | 6618377 | """
Bing (Videos)
@website https://www.bing.com/videos
@provide-api yes (http://datamarket.azure.com/dataset/bing/search)
@using-api no
@results HTML
@stable no
@parse url, title, content, thumbnail
"""
from json import loads
from lxml import html
from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url
from searx.url_utils import urlencode
from searx.utils import match_language
categories = ['videos']
paging = True
safesearch = True
time_range_support = True
number_of_results = 28
language_support = True
base_url = 'https://www.bing.com/'
search_string = 'videos/search'\
'?{query}'\
'&count={count}'\
'&first={first}'\
'&scope=video'\
'&FORM=QBLH'
time_range_string = '&qft=+filterui:videoage-lt{interval}'
time_range_dict = {'day': '1440',
'week': '10080',
'month': '43200',
'year': '525600'}
# safesearch definitions
safesearch_types = {2: 'STRICT',
1: 'DEMOTE',
0: 'OFF'}
# do search-request
def request(query, params):
offset = ((params['pageno'] - 1) * number_of_results) + 1
search_path = search_string.format(
query=urlencode({'q': query}),
count=number_of_results,
first=offset)
# safesearch cookie
params['cookies']['SRCHHPGUSR'] = \
'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
# language cookie
language = match_language(params['language'], supported_languages, language_aliases).lower()
params['cookies']['_EDGE_S'] = 'mkt=' + language + '&F=1'
# query and paging
params['url'] = base_url + search_path
# time range
if params['time_range'] in time_range_dict:
params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in dom.xpath('//div[@class="dg_u"]'):
try:
metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0])
info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip()
content = '{0} - {1}'.format(metadata['du'], info)
thumbnail = '{0}th?id={1}'.format(base_url, metadata['thid'])
results.append({'url': metadata['murl'],
'thumbnail': thumbnail,
'title': metadata.get('vt', ''),
'content': content,
'template': 'videos.html'})
except:
continue
return results
| """
Bing (Videos)
@website https://www.bing.com/videos
@provide-api yes (http://datamarket.azure.com/dataset/bing/search)
@using-api no
@results HTML
@stable no
@parse url, title, content, thumbnail
"""
from json import loads
from lxml import html
from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url
from searx.url_utils import urlencode
from searx.utils import match_language
categories = ['videos']
paging = True
safesearch = True
time_range_support = True
number_of_results = 28
language_support = True
base_url = 'https://www.bing.com/'
search_string = 'videos/search'\
'?{query}'\
'&count={count}'\
'&first={first}'\
'&scope=video'\
'&FORM=QBLH'
time_range_string = '&qft=+filterui:videoage-lt{interval}'
time_range_dict = {'day': '1440',
'week': '10080',
'month': '43200',
'year': '525600'}
# safesearch definitions
safesearch_types = {2: 'STRICT',
1: 'DEMOTE',
0: 'OFF'}
# do search-request
def request(query, params):
offset = ((params['pageno'] - 1) * number_of_results) + 1
search_path = search_string.format(
query=urlencode({'q': query}),
count=number_of_results,
first=offset)
# safesearch cookie
params['cookies']['SRCHHPGUSR'] = \
'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
# language cookie
language = match_language(params['language'], supported_languages, language_aliases).lower()
params['cookies']['_EDGE_S'] = 'mkt=' + language + '&F=1'
# query and paging
params['url'] = base_url + search_path
# time range
if params['time_range'] in time_range_dict:
params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in dom.xpath('//div[@class="dg_u"]'):
try:
metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0])
info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip()
content = '{0} - {1}'.format(metadata['du'], info)
thumbnail = '{0}th?id={1}'.format(base_url, metadata['thid'])
results.append({'url': metadata['murl'],
'thumbnail': thumbnail,
'title': metadata.get('vt', ''),
'content': content,
'template': 'videos.html'})
except:
continue
return results
| en | 0.533875 | Bing (Videos) @website https://www.bing.com/videos @provide-api yes (http://datamarket.azure.com/dataset/bing/search) @using-api no @results HTML @stable no @parse url, title, content, thumbnail # safesearch definitions # do search-request # safesearch cookie # language cookie # query and paging # time range # get response from search-request | 2.445328 | 2 |
services/web/server/tests/unit/with_dbs/03/meta_modeling/test_meta_modeling_results.py | elisabettai/osparc-simcore | 0 | 6618378 | <gh_stars>0
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
import json
from typing import Any, Dict, Type
import pytest
from pydantic import BaseModel
from simcore_service_webserver.meta_modeling_results import (
ExtractedResults,
extract_project_results,
)
@pytest.fixture
def fake_workbench() -> Dict[str, Any]:
return {
"0f1e38c9-dcb7-443c-a745-91b97ac28ccc": {
"key": "simcore/services/frontend/data-iterator/funky-range",
"version": "1.0.0",
"label": "Integer iterator",
"inputs": {"linspace_start": 0, "linspace_stop": 2, "linspace_step": 1},
"inputNodes": [],
# some funky output of iterator/param,
"outputs": {"out_1": 1, "out_2": [3, 4]},
},
"e33c6880-1b1d-4419-82d7-270197738aa9": {
"key": "simcore/services/comp/itis/sleeper",
"version": "2.0.0",
"label": "sleeper",
"inputs": {
"input_2": {
"nodeUuid": "0f1e38c9-dcb7-443c-a745-91b97ac28ccc",
"output": "out_1",
},
"input_3": False,
},
"inputNodes": ["0f1e38c9-dcb7-443c-a745-91b97ac28ccc"],
"state": {
"currentStatus": "SUCCESS",
"modified": False,
"dependencies": [],
},
"progress": 100,
"outputs": {
"output_1": {
"store": "0",
"path": "30359da5-ca4d-3288-a553-5f426a204fe6/e33c6880-1b1d-4419-82d7-270197738aa9/single_number.txt",
"eTag": "a87ff679a2f3e71d9181a67b7542122c",
},
"output_2": 7,
},
"runHash": "f92d1836aa1b6b1b031f9e1b982e631814708675c74ba5f02161e0f256382b2b",
},
"4c08265a-427b-4ac3-9eab-1d11c822ada4": {
"key": "simcore/services/comp/itis/sleeper",
"version": "2.0.0",
"label": "sleeper",
"inputNodes": [],
},
"2d0ce8b9-c9c3-43ce-ad2f-ad493898de37": {
"key": "simcore/services/frontend/iterator-consumer/probe/int",
"version": "1.0.0",
"label": "Probe Sensor - Integer",
"inputs": {
"in_1": {
"nodeUuid": "e33c6880-1b1d-4419-82d7-270197738aa9",
"output": "output_2",
}
},
"inputNodes": ["e33c6880-1b1d-4419-82d7-270197738aa9"],
},
"445b44d1-59b3-425c-ac48-7c13e0f2ea5b": {
"key": "simcore/services/frontend/iterator-consumer/probe/int",
"version": "1.0.0",
"label": "Probe Sensor - Integer_2",
"inputs": {
"in_1": {
"nodeUuid": "0f1e38c9-dcb7-443c-a745-91b97ac28ccc",
"output": "out_1",
}
},
"inputNodes": ["0f1e38c9-dcb7-443c-a745-91b97ac28ccc"],
},
"d76fca06-f050-4790-88a8-0aac10c87b39": {
"key": "simcore/services/frontend/parameter/boolean",
"version": "1.0.0",
"label": "Boolean Parameter",
"inputs": {},
"inputNodes": [],
"outputs": {"out_1": True},
},
}
def test_extract_project_results(fake_workbench: Dict[str, Any]):
results = extract_project_results(fake_workbench)
print(json.dumps(results.progress, indent=1))
print(json.dumps(results.labels, indent=1))
print(json.dumps(results.values, indent=1))
# this has to be something that shall be deployable in a table
assert results.progress == {
"4c08265a-427b-4ac3-9eab-1d11c822ada4": 0,
"e33c6880-1b1d-4419-82d7-270197738aa9": 100,
}
# labels are not unique, so there is a map to nodeids
assert results.labels == {
"0f1e38c9-dcb7-443c-a745-91b97ac28ccc": "Integer iterator",
"2d0ce8b9-c9c3-43ce-ad2f-ad493898de37": "Probe Sensor - Integer",
"445b44d1-59b3-425c-ac48-7c13e0f2ea5b": "Probe Sensor - Integer_2",
"d76fca06-f050-4790-88a8-0aac10c87b39": "Boolean Parameter",
}
# this is basically a tree that defines columns
assert results.values == {
"0f1e38c9-dcb7-443c-a745-91b97ac28ccc": {"out_1": 1, "out_2": [3, 4]},
"2d0ce8b9-c9c3-43ce-ad2f-ad493898de37": {"in_1": 7},
"445b44d1-59b3-425c-ac48-7c13e0f2ea5b": {"in_1": 1},
"d76fca06-f050-4790-88a8-0aac10c87b39": {"out_1": True},
}
@pytest.mark.parametrize(
"model_cls",
(ExtractedResults,),
)
def test_models_examples(
model_cls: Type[BaseModel], model_cls_examples: Dict[str, Any]
):
for name, example in model_cls_examples.items():
print(name, ":", json.dumps(example, indent=1))
model_instance = model_cls(**example)
assert model_instance, f"Failed with {name}"
| # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
import json
from typing import Any, Dict, Type
import pytest
from pydantic import BaseModel
from simcore_service_webserver.meta_modeling_results import (
ExtractedResults,
extract_project_results,
)
@pytest.fixture
def fake_workbench() -> Dict[str, Any]:
return {
"0f1e38c9-dcb7-443c-a745-91b97ac28ccc": {
"key": "simcore/services/frontend/data-iterator/funky-range",
"version": "1.0.0",
"label": "Integer iterator",
"inputs": {"linspace_start": 0, "linspace_stop": 2, "linspace_step": 1},
"inputNodes": [],
# some funky output of iterator/param,
"outputs": {"out_1": 1, "out_2": [3, 4]},
},
"e33c6880-1b1d-4419-82d7-270197738aa9": {
"key": "simcore/services/comp/itis/sleeper",
"version": "2.0.0",
"label": "sleeper",
"inputs": {
"input_2": {
"nodeUuid": "0f1e38c9-dcb7-443c-a745-91b97ac28ccc",
"output": "out_1",
},
"input_3": False,
},
"inputNodes": ["0f1e38c9-dcb7-443c-a745-91b97ac28ccc"],
"state": {
"currentStatus": "SUCCESS",
"modified": False,
"dependencies": [],
},
"progress": 100,
"outputs": {
"output_1": {
"store": "0",
"path": "30359da5-ca4d-3288-a553-5f426a204fe6/e33c6880-1b1d-4419-82d7-270197738aa9/single_number.txt",
"eTag": "a87ff679a2f3e71d9181a67b7542122c",
},
"output_2": 7,
},
"runHash": "f92d1836aa1b6b1b031f9e1b982e631814708675c74ba5f02161e0f256382b2b",
},
"4c08265a-427b-4ac3-9eab-1d11c822ada4": {
"key": "simcore/services/comp/itis/sleeper",
"version": "2.0.0",
"label": "sleeper",
"inputNodes": [],
},
"2d0ce8b9-c9c3-43ce-ad2f-ad493898de37": {
"key": "simcore/services/frontend/iterator-consumer/probe/int",
"version": "1.0.0",
"label": "Probe Sensor - Integer",
"inputs": {
"in_1": {
"nodeUuid": "e33c6880-1b1d-4419-82d7-270197738aa9",
"output": "output_2",
}
},
"inputNodes": ["e33c6880-1b1d-4419-82d7-270197738aa9"],
},
"445b44d1-59b3-425c-ac48-7c13e0f2ea5b": {
"key": "simcore/services/frontend/iterator-consumer/probe/int",
"version": "1.0.0",
"label": "Probe Sensor - Integer_2",
"inputs": {
"in_1": {
"nodeUuid": "0f1e38c9-dcb7-443c-a745-91b97ac28ccc",
"output": "out_1",
}
},
"inputNodes": ["0f1e38c9-dcb7-443c-a745-91b97ac28ccc"],
},
"d76fca06-f050-4790-88a8-0aac10c87b39": {
"key": "simcore/services/frontend/parameter/boolean",
"version": "1.0.0",
"label": "Boolean Parameter",
"inputs": {},
"inputNodes": [],
"outputs": {"out_1": True},
},
}
def test_extract_project_results(fake_workbench: Dict[str, Any]):
results = extract_project_results(fake_workbench)
print(json.dumps(results.progress, indent=1))
print(json.dumps(results.labels, indent=1))
print(json.dumps(results.values, indent=1))
# this has to be something that shall be deployable in a table
assert results.progress == {
"4c08265a-427b-4ac3-9eab-1d11c822ada4": 0,
"e33c6880-1b1d-4419-82d7-270197738aa9": 100,
}
# labels are not unique, so there is a map to nodeids
assert results.labels == {
"0f1e38c9-dcb7-443c-a745-91b97ac28ccc": "Integer iterator",
"2d0ce8b9-c9c3-43ce-ad2f-ad493898de37": "Probe Sensor - Integer",
"445b44d1-59b3-425c-ac48-7c13e0f2ea5b": "Probe Sensor - Integer_2",
"d76fca06-f050-4790-88a8-0aac10c87b39": "Boolean Parameter",
}
# this is basically a tree that defines columns
assert results.values == {
"0f1e38c9-dcb7-443c-a745-91b97ac28ccc": {"out_1": 1, "out_2": [3, 4]},
"2d0ce8b9-c9c3-43ce-ad2f-ad493898de37": {"in_1": 7},
"445b44d1-59b3-425c-ac48-7c13e0f2ea5b": {"in_1": 1},
"d76fca06-f050-4790-88a8-0aac10c87b39": {"out_1": True},
}
@pytest.mark.parametrize(
"model_cls",
(ExtractedResults,),
)
def test_models_examples(
model_cls: Type[BaseModel], model_cls_examples: Dict[str, Any]
):
for name, example in model_cls_examples.items():
print(name, ":", json.dumps(example, indent=1))
model_instance = model_cls(**example)
assert model_instance, f"Failed with {name}" | en | 0.795506 | # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable # some funky output of iterator/param, # this has to be something that shall be deployable in a table # labels are not unique, so there is a map to nodeids # this is basically a tree that defines columns | 1.810074 | 2 |
main.py | alberdotpy/tinder_location_changer | 0 | 6618379 | ## Python 3.8
## by alber.py
import sys, os
import wx
from db import insert_into_table
from session import TinderBot
baseFolder = os.path.dirname(os.path.abspath('__file__'))
class Robot(wx.Frame):
def __init__(self, parent, title):
super(Robot, self).__init__(parent, title=title, size=(700, 450))
self.init_ui()
self.Centre()
self.SetTitle("Tinder Location Changer")
self.Maximize(False)
try:
icon = wx.EmptyIcon()
icon.CopyFromBitmap(wx.Bitmap("img\\logo.ico", wx.BITMAP_TYPE_ANY))
self.SetIcon(icon)
except Exception as e:
print("The favicon was not found, please save the favicon in the img directory as icon.png")
def init_ui(self):
nb = wx.Notebook(self)
nb.AddPage(Panel1(nb), "Location Changer")
self.Show(True)
class Panel1(wx.Panel):
def __init__(self, parent):
super(Panel1, self).__init__(parent)
sizer = wx.GridBagSizer(5, 5)
# Header
try:
imageFile = "img\\logo.png"
png = wx.Image(imageFile, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
png = scale_bitmap(png, 80, 80)
logo = wx.StaticBitmap(self, -1, png, (10, 5), (png.GetWidth(), png.GetHeight()))
sizer.Add(logo, pos=(0, 0), span=(3, 6), flag=wx.BOTTOM | wx.ALIGN_CENTER | wx.TOP, border=10)
except Exception as e:
print("The logo file was not found, please save the logo file in the img directory as logo.png")
print(e)
# Logo
# Prices & Promotions
lbl_city = wx.StaticText(self, label="City", style=wx.ALIGN_LEFT)
sizer.Add(lbl_city, pos=(3, 0), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
lbl_lat = wx.StaticText(self, label="Latitude", style=wx.ALIGN_LEFT)
sizer.Add(lbl_lat, pos=(3, 1), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
lbl_lon = wx.StaticText(self, label="Longitude", style=wx.ALIGN_LEFT)
sizer.Add(lbl_lon, pos=(3, 2), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
btn_add = wx.Button(self, label="Add Location")
sizer.Add(btn_add, pos=(4, 3), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
self.Bind(wx.EVT_BUTTON, self.onAdd, btn_add)
self.city_to_add = wx.TextCtrl(self, value="Paris")
sizer.Add(self.city_to_add, pos=(4, 0), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
self.lat_to_add = wx.TextCtrl(self, value="48.8874318")
sizer.Add(self.lat_to_add, pos=(4, 1), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
self.lon_to_add = wx.TextCtrl(self, value="2.2875198")
sizer.Add(self.lon_to_add, pos=(4, 2), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
line = wx.StaticLine(self)
sizer.Add(line, pos=(5, 0), span=(1, 6), flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, border=10)
lbl_select_loc = wx.StaticText(self, label="Selected location : ", style=wx.ALIGN_LEFT)
sizer.Add(lbl_select_loc, pos=(6, 0), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
self.selected_location = wx.TextCtrl(self, value="Buenos Aires")
sizer.Add(self.selected_location, pos=(6, 1), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
btn_gen = wx.Button(self, label="Launch")
sizer.Add(btn_gen, pos=(6, 2), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
self.Bind(wx.EVT_BUTTON, self.onLaunch, btn_gen)
# Result Box
lbl_rbox = wx.StaticText(self, label="Logs :")
sizer.Add(lbl_rbox, pos=(7, 0), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
self.ResultBox = wx.TextCtrl(self, style=wx.TE_READONLY | wx.TE_MULTILINE)
sizer.Add(self.ResultBox, pos=(8, 0), span=(3, 6), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
# Footer
titre = wx.StaticText(self, label="© 2022 - alber.py")
font = wx.Font(7, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)
titre.SetFont(font)
sizer.Add(titre, pos=(12, 0), span=(1, 6), flag=wx.BOTTOM | wx.ALIGN_CENTER | wx.TOP, border=5)
# Sizer
sizer.AddGrowableCol(5, 0)
sizer.AddGrowableRow(9, 0)
self.SetSizer(sizer)
sizer.Fit(self)
sys.stdout = self.ResultBox
def onAdd(self, event):
try:
insert_into_table(table="locations", dict_location={"location": self.city_to_add.GetValue(),
"longitude": self.lon_to_add.GetValue(),
"latitude": self.lat_to_add.GetValue()}
)
except Exception as e:
print("[-] An error has occurred :(")
print("Please, see the details below: ")
print(e)
raise
print(f"[+] Location {self.city_to_add.GetValue()} successfully.")
print()
def onLaunch(self, event):
print(f"[+] Starting session...")
try:
tindersession = TinderBot(location=self.selected_location.GetValue())
tindersession.start()
except Exception as e:
print("[-] An error has occurred :(")
print("Please, see the details below: ")
print(e)
raise
print(f"[+] Sleeping 99 999 seconds...")
def main():
app = wx.App()
Robot(None, 'Robot').Show()
app.MainLoop()
def scale_bitmap(bitmap, width, height):
image = wx.ImageFromBitmap(bitmap)
image = image.Scale(width, height, wx.IMAGE_QUALITY_HIGH)
result = wx.BitmapFromImage(image)
return result
if __name__ == '__main__':
main() | ## Python 3.8
## by alber.py
import sys, os
import wx
from db import insert_into_table
from session import TinderBot
baseFolder = os.path.dirname(os.path.abspath('__file__'))
class Robot(wx.Frame):
def __init__(self, parent, title):
super(Robot, self).__init__(parent, title=title, size=(700, 450))
self.init_ui()
self.Centre()
self.SetTitle("Tinder Location Changer")
self.Maximize(False)
try:
icon = wx.EmptyIcon()
icon.CopyFromBitmap(wx.Bitmap("img\\logo.ico", wx.BITMAP_TYPE_ANY))
self.SetIcon(icon)
except Exception as e:
print("The favicon was not found, please save the favicon in the img directory as icon.png")
def init_ui(self):
nb = wx.Notebook(self)
nb.AddPage(Panel1(nb), "Location Changer")
self.Show(True)
class Panel1(wx.Panel):
def __init__(self, parent):
super(Panel1, self).__init__(parent)
sizer = wx.GridBagSizer(5, 5)
# Header
try:
imageFile = "img\\logo.png"
png = wx.Image(imageFile, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
png = scale_bitmap(png, 80, 80)
logo = wx.StaticBitmap(self, -1, png, (10, 5), (png.GetWidth(), png.GetHeight()))
sizer.Add(logo, pos=(0, 0), span=(3, 6), flag=wx.BOTTOM | wx.ALIGN_CENTER | wx.TOP, border=10)
except Exception as e:
print("The logo file was not found, please save the logo file in the img directory as logo.png")
print(e)
# Logo
# Prices & Promotions
lbl_city = wx.StaticText(self, label="City", style=wx.ALIGN_LEFT)
sizer.Add(lbl_city, pos=(3, 0), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
lbl_lat = wx.StaticText(self, label="Latitude", style=wx.ALIGN_LEFT)
sizer.Add(lbl_lat, pos=(3, 1), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
lbl_lon = wx.StaticText(self, label="Longitude", style=wx.ALIGN_LEFT)
sizer.Add(lbl_lon, pos=(3, 2), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
btn_add = wx.Button(self, label="Add Location")
sizer.Add(btn_add, pos=(4, 3), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
self.Bind(wx.EVT_BUTTON, self.onAdd, btn_add)
self.city_to_add = wx.TextCtrl(self, value="Paris")
sizer.Add(self.city_to_add, pos=(4, 0), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
self.lat_to_add = wx.TextCtrl(self, value="48.8874318")
sizer.Add(self.lat_to_add, pos=(4, 1), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
self.lon_to_add = wx.TextCtrl(self, value="2.2875198")
sizer.Add(self.lon_to_add, pos=(4, 2), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
line = wx.StaticLine(self)
sizer.Add(line, pos=(5, 0), span=(1, 6), flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP | wx.BOTTOM, border=10)
lbl_select_loc = wx.StaticText(self, label="Selected location : ", style=wx.ALIGN_LEFT)
sizer.Add(lbl_select_loc, pos=(6, 0), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
self.selected_location = wx.TextCtrl(self, value="Buenos Aires")
sizer.Add(self.selected_location, pos=(6, 1), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
btn_gen = wx.Button(self, label="Launch")
sizer.Add(btn_gen, pos=(6, 2), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
self.Bind(wx.EVT_BUTTON, self.onLaunch, btn_gen)
# Result Box
lbl_rbox = wx.StaticText(self, label="Logs :")
sizer.Add(lbl_rbox, pos=(7, 0), flag=wx.LEFT | wx.ALIGN_LEFT, border=15)
self.ResultBox = wx.TextCtrl(self, style=wx.TE_READONLY | wx.TE_MULTILINE)
sizer.Add(self.ResultBox, pos=(8, 0), span=(3, 6), flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=15)
# Footer
titre = wx.StaticText(self, label="© 2022 - alber.py")
font = wx.Font(7, wx.DECORATIVE, wx.NORMAL, wx.NORMAL)
titre.SetFont(font)
sizer.Add(titre, pos=(12, 0), span=(1, 6), flag=wx.BOTTOM | wx.ALIGN_CENTER | wx.TOP, border=5)
# Sizer
sizer.AddGrowableCol(5, 0)
sizer.AddGrowableRow(9, 0)
self.SetSizer(sizer)
sizer.Fit(self)
sys.stdout = self.ResultBox
def onAdd(self, event):
try:
insert_into_table(table="locations", dict_location={"location": self.city_to_add.GetValue(),
"longitude": self.lon_to_add.GetValue(),
"latitude": self.lat_to_add.GetValue()}
)
except Exception as e:
print("[-] An error has occurred :(")
print("Please, see the details below: ")
print(e)
raise
print(f"[+] Location {self.city_to_add.GetValue()} successfully.")
print()
def onLaunch(self, event):
print(f"[+] Starting session...")
try:
tindersession = TinderBot(location=self.selected_location.GetValue())
tindersession.start()
except Exception as e:
print("[-] An error has occurred :(")
print("Please, see the details below: ")
print(e)
raise
print(f"[+] Sleeping 99 999 seconds...")
def main():
app = wx.App()
Robot(None, 'Robot').Show()
app.MainLoop()
def scale_bitmap(bitmap, width, height):
image = wx.ImageFromBitmap(bitmap)
image = image.Scale(width, height, wx.IMAGE_QUALITY_HIGH)
result = wx.BitmapFromImage(image)
return result
if __name__ == '__main__':
main() | en | 0.55474 | ## Python 3.8 ## by alber.py # Header # Logo # Prices & Promotions # Result Box # Footer # Sizer | 2.307483 | 2 |
scraper/__init__.py | benjaminLevinson/wemissyou | 3 | 6618380 | <gh_stars>1-10
from . import scraper
__all__ = [scraper]
| from . import scraper
__all__ = [scraper] | none | 1 | 1.083405 | 1 | |
testtoonapilib.py | JohnvandeVrugt/toonapilib4domoticz | 13 | 6618381 | <filename>testtoonapilib.py
# This python test can be used to check toonapilib functionality along with your credentials
# As domoticz is running python 3.x one should test this script with python3.x.
# Usage: python3.x testtoonapilib.py
from toonapilib import Toon
# please fill in your credentials and api key and secret
token = ''
print("Starting toonapilib test")
print("On success the room temperature will be presented")
try:
print("Trying to create a toon object")
toon = Toon(token)
print("Room temperature: ", toon.temperature)
except Exception:
print("An error occurred creating the Toon object")
| <filename>testtoonapilib.py
# This python test can be used to check toonapilib functionality along with your credentials
# As domoticz is running python 3.x one should test this script with python3.x.
# Usage: python3.x testtoonapilib.py
from toonapilib import Toon
# please fill in your credentials and api key and secret
token = ''
print("Starting toonapilib test")
print("On success the room temperature will be presented")
try:
print("Trying to create a toon object")
toon = Toon(token)
print("Room temperature: ", toon.temperature)
except Exception:
print("An error occurred creating the Toon object")
| en | 0.81209 | # This python test can be used to check toonapilib functionality along with your credentials # As domoticz is running python 3.x one should test this script with python3.x. # Usage: python3.x testtoonapilib.py # please fill in your credentials and api key and secret | 2.371003 | 2 |
ib/ext/AnyWrapper.py | gkatsQT/ibpy | 1 | 6618382 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# Translated source for AnyWrapper.
##
# Source file: AnyWrapper.java
# Target file: AnyWrapper.py
#
# Original file copyright original author(s).
# This file copyright <NAME>, <EMAIL>.
#
# WARNING: all changes to this file will be lost.
from ib.lib.overloading import overloaded
class AnyWrapper(object):
""" generated source for AnyWrapper
"""
@overloaded
def error(self, e):
raise NotImplementedError()
@error.register(object, str)
def error_0(self, strval):
raise NotImplementedError()
@error.register(object, int, int, str)
def error_1(self, id, errorCode, errorMsg):
raise NotImplementedError()
def connectionClosed(self):
raise NotImplementedError()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# Translated source for AnyWrapper.
##
# Source file: AnyWrapper.java
# Target file: AnyWrapper.py
#
# Original file copyright original author(s).
# This file copyright <NAME>, <EMAIL>.
#
# WARNING: all changes to this file will be lost.
from ib.lib.overloading import overloaded
class AnyWrapper(object):
""" generated source for AnyWrapper
"""
@overloaded
def error(self, e):
raise NotImplementedError()
@error.register(object, str)
def error_0(self, strval):
raise NotImplementedError()
@error.register(object, int, int, str)
def error_1(self, id, errorCode, errorMsg):
raise NotImplementedError()
def connectionClosed(self):
raise NotImplementedError() | en | 0.600148 | #!/usr/bin/env python # -*- coding: utf-8 -*- ## # Translated source for AnyWrapper. ## # Source file: AnyWrapper.java # Target file: AnyWrapper.py # # Original file copyright original author(s). # This file copyright <NAME>, <EMAIL>. # # WARNING: all changes to this file will be lost. generated source for AnyWrapper | 2.301941 | 2 |
qrcode_styled/svg/figures/corner.py | AdamBrianBright/qrcode_styled | 3 | 6618383 | <gh_stars>1-10
from lxml import etree as d
from qrcode_styled.types import num
from .base import Figure
__all__ = [
'Corner',
'ExtraRoundedCornerSquare',
'ExtraRoundedCornerDot',
]
class Corner(Figure):
def draw(self, x: num, y: num, size: num, rotation: num = 0):
raise NotImplementedError
class ExtraRoundedCornerSquare(Corner):
def draw(self, x: num, y: num, size: num, rotation: num = 0):
dot_size = size / 7
b = 2.5 * dot_size
a = 2 * dot_size
c = 1.5 * dot_size
el = d.Element('path', attrib={
'clip-rule': 'evenodd',
'd': f'M {x} {y + b}'
f'v {a}'
f'a {b} {b}, 0, 0, 0, {b} {b}'
f'h {a}'
f'a {b} {b}, 0, 0, 0, {b} {-b}'
f'v {-a}'
f'a {b} {b}, 0, 0, 0, {-b} {-b}'
f'h {-a}'
f'a {b} {b}, 0, 0, 0, {-b} {b}'
f'M {x + b} {y + dot_size}'
f'h {a}'
f'a {c} {c}, 0, 0, 1, {c} {c}'
f'v {a}'
f'a {c} {c}, 0, 0, 1, {-c} {c}'
f'h {-a}'
f'a {c} {c}, 0, 0, 1, {-c} {-c}'
f'v {-a}'
f'a {c} {c}, 0, 0, 1, {c} {-c}'
})
return self._rotate_figure(el, x, y, size, rotation)
class ExtraRoundedCornerDot(Figure):
def draw(self, x: num, y: num, size: num, rotation: num = 0):
el = d.Element('circle', cx=str(x + size / 2), cy=str(y + size / 2), r=str(size / 2))
return self._rotate_figure(el, x, y, size, rotation)
| from lxml import etree as d
from qrcode_styled.types import num
from .base import Figure
__all__ = [
'Corner',
'ExtraRoundedCornerSquare',
'ExtraRoundedCornerDot',
]
class Corner(Figure):
def draw(self, x: num, y: num, size: num, rotation: num = 0):
raise NotImplementedError
class ExtraRoundedCornerSquare(Corner):
def draw(self, x: num, y: num, size: num, rotation: num = 0):
dot_size = size / 7
b = 2.5 * dot_size
a = 2 * dot_size
c = 1.5 * dot_size
el = d.Element('path', attrib={
'clip-rule': 'evenodd',
'd': f'M {x} {y + b}'
f'v {a}'
f'a {b} {b}, 0, 0, 0, {b} {b}'
f'h {a}'
f'a {b} {b}, 0, 0, 0, {b} {-b}'
f'v {-a}'
f'a {b} {b}, 0, 0, 0, {-b} {-b}'
f'h {-a}'
f'a {b} {b}, 0, 0, 0, {-b} {b}'
f'M {x + b} {y + dot_size}'
f'h {a}'
f'a {c} {c}, 0, 0, 1, {c} {c}'
f'v {a}'
f'a {c} {c}, 0, 0, 1, {-c} {c}'
f'h {-a}'
f'a {c} {c}, 0, 0, 1, {-c} {-c}'
f'v {-a}'
f'a {c} {c}, 0, 0, 1, {c} {-c}'
})
return self._rotate_figure(el, x, y, size, rotation)
class ExtraRoundedCornerDot(Figure):
def draw(self, x: num, y: num, size: num, rotation: num = 0):
el = d.Element('circle', cx=str(x + size / 2), cy=str(y + size / 2), r=str(size / 2))
return self._rotate_figure(el, x, y, size, rotation) | none | 1 | 2.589133 | 3 | |
libs/multihost.py | dedwards-tech/fio-tools | 0 | 6618384 | #! /usr/bin/python
# I incorporated moussa's threading script into remote_exec.py, and created this "similar" script
# multihost.py. It requires PyYaml to be installed on the proxy VM though!
# ~ sudo yum install PyYaml
#
# The command line looks like the following:
# ./multihost.py -y <yaml_input_file> -o ./out -u root -p 'pass!<PASSWORD>'
#
# Here iss an example YAML config file for the script for using Alex tools. Kind of a pain not
# knowing the output file name but it works. It will create a remote folder /scratch/dave,
# Run alex tools with output to /scratch/dave, then tar up specific file output to a known
# file name, then delete the /scratch/dave folder.
#
# NOTE: this will leave the .tgz file on the host, and hopefully replace it each time it is run.
#
# hosts:
# dedwood-03.micron.com:
# command: "mkdir /scratch/dave ; /scratch/blktbl-capture-vmware/dumpreg /scratch/dave ; tar -zcf regdump.tgz /scratch/dave/*_REG.txt ; rm -rf /scratch/dave"
# file: "regdump.tgz"
# dedwood-04.micron.com:
# command: "mkdir /scratch/dave ; /scratch/blktbl-capture-vmware/dumpreg /scratch/dave ; tar -zcf regdump.tgz /scratch/dave/*_REG.txt ; rm -rf /scratch/dave"
# file: "regdump.tgz"
#
# You can also override the command line -u and -p for user name or password, or specify a
# unique one per host in the yaml as below.
#
# hosts:
# l-cheddar1:
# username: jenkins
# password: "<PASSWORD>"
# command: "sudo fio fio/sysbench_4k_w.fio > 4k_w.out"
# file: "4k_w.out"
# l-cheddar2:
# username: jenkins
# password: "<PASSWORD>"
# command: "sudo fio fio/sysbench_4k_r.fio > 4k_r.out"
# file: "4k_r.out"
# l-cheddar3:
# username: jenkins
# password: "<PASSWORD>"
# command: "sudo fio fio/sysbench_4k_w.fio > 4k_w.out"
# file: "4k_w.out"
# l-cheddar4:
# username: jenkins
# password: "<PASSWORD>"
# file: "4k_r.out"
# command: "sudo fio fio/sysbench_4k_r.fio > 4k_r.out"
#
"""
Utility for execution commands on mutliple hosts simultaneously via SSH and YAML input.
"""
import sys
sys.path.append('../libs/');
from remote_exec import SvrRemoteControl, SvrRemoteThread
import argparse
import yaml
import string
import os
# Defaults can be overridden via the command line.
CFG_DEF_TARGET_USER = "root";
CFG_DEF_TARGET_PWD = "<PASSWORD>";
def AddArgs(parser_obj):
parser_obj.add_argument('-y', '--yaml', dest='CfgYamlIn', action='store', required=True, type=argparse.FileType('r'), help='Input file containing YAML host + command config.');
parser_obj.add_argument('-o', '--out', dest='CfgOut', action='store', required=True, help='Folder to place output files.');
parser_obj.add_argument('-u', '--user', dest='CfgUserName', action='store', required=False, default=CFG_DEF_TARGET_USER, help='ESXi host (SSH) user name (root).');
parser_obj.add_argument('-p', '--pwd', dest='CfgUserPwd', action='store', required=False, default=CFG_DEF_TARGET_PWD, help='ESXi (SSH) user password (root).');
def GetArgs():
""";
Supports the command-line arguments listed below.;
""";
# create the top-level parser
parser = argparse.ArgumentParser(description='Remote execution library - input arguments.');
AddArgs(parser);
# parse the args and call whatever function was selected
args = parser.parse_args();
return args;
#############################################
# SvrMultiHost - works of a yaml spec object with input format below. This
# class provides a mechanism to launch SSH commands on multiple hosts simultaneously.
# Main input is command, and response is expected in the form of a file to copy back
# to the current host.
#
# user_name and user_pass fields of the yaml input is optional, default will be to
# use specified user_name and user_pass given during __init__().
#
# hosts:
# host_name1:
# username: "jenkins" (optional - default = 'root')
# password: "<PASSWORD>" (optional - default = '<PASSWORD>')
# command: "sudo fio fio/sysbench_4k_w.fio > fio_w_4k.out"
# file: "fio_w_4k.out"
# host_name2:
# command: "sudo fio fio/sysbench_4k_w.fio > fio_w_4k.out"
# file: "fio_w_4k.out"
#
# NOTE: upon thread completion, the Wait method will copy the expected output file
# from the remote target to the local host. Upon copy back to the local host
# the file name will be prepended with the host name.
#
class SvrMultiHost:
def __init__(self, yaml_obj, user_name='root', user_pass='<PASSWORD>'):
self.ThreadList = list();
self.YamlObj = yaml_obj;
self.DefaultUser = user_name;
self.DefaultPwd = <PASSWORD>_pass;
def __threxec_cb(self, rc):
host_name = rc.HostName;
cmd_str = self.YamlObj['hosts'][host_name]['command']
print "Executing '%s' on host %s" % (cmd_str, host_name);
e_code, out_str = rc.rexec(cmd_str);
return [ e_code, out_str ];
def Start(self):
print "Starting multi-host threads...";
for host in self.YamlObj['hosts']:
user_name = self.YamlObj['hosts'][host].get('username', self.DefaultUser);
user_pass = self.YamlObj['hosts'][host].get('password', self.DefaultPwd);
th = SvrRemoteThread(host, user_name, user_pass, self.__threxec_cb)
if (th.RC.is_connected()):
self.ThreadList.append(th);
th.start();
def Wait(self):
print "\nWaiting for threads to exit...";
for th in self.ThreadList:
th.join();
print "Thread execution complete.";
# Copy files from the specified remote out_folder, and copy to the "cwd",
# once copy is complete, disconnect the remote host; command execution is complete.
def GetFiles(self, out_folder='.'):
print "Retrieving files from remote hosts...";
for th in self.ThreadList:
host_name = th.RC.HostName;
rem_file = self.YamlObj['hosts'][host_name]['file'];
loc_file = "%s/%s_%s" % (out_folder, host_name, rem_file);
print " Retrieving %s from %s" % (rem_file, host_name)
th.RC.get_file(rem_file, loc_file)
# close the remote connection, no more commands allowed
th.disconnect();
print "File retrieval complete.";
def Go(self, out_folder='.', in_folder='.'):
self.Start();
self.Wait();
self.GetFiles(out_folder);
# SvrMultiHostCustom - Base clase for providing callback based factory to extend for your
# own purposes.
#
class SvrMultiHostCustom:
def __init__(self):
self.ThreadList = list();
def AddThread(self, host_thread):
self.ThreadList.append(host_thread);
def Start(self):
print "Starting multi-host threads...";
for th in self.ThreadList:
th.start();
def Wait(self):
for th in self.ThreadList:
th.join();
print "Threads exited, disconnecting..."
for th in self.ThreadList:
th.disconnect();
print "Thread execution complete.";
def Go(self):
self.Start();
self.Wait();
#############################################
# Determine how we were instantiated (command line, or included)
CFG_FROM_CMD_LINE = False;
if (sys.argv[0] == __file__):
CFG_FROM_CMD_LINE = True;
if (CFG_FROM_CMD_LINE):
# We were launched from the command line so execute a test workload, only on the first
# host in the list; this could easily be adapted to work on each host in the list but is
# not necessary for the "unit test" purpose of this basic functionality.
args = GetArgs();
yaml_obj = None;
try:
yaml_obj = yaml.load(args.CfgYamlIn);
except yaml.YAMLError, exc:
print "Error in configuration file: %s" % (exc);
if (yaml_obj is None):
print "ERR: failed to load YAML config file from %s" % (args.CfgYamlIn.name);
raise SystemExit(1);
rem_exec = SvrMultiHost(yaml_obj, user_name=args.CfgUserName, user_pass=args.CfgUserPwd);
rem_exec.Go(args.CfgOut);
raise SystemExit(0);
| #! /usr/bin/python
# I incorporated moussa's threading script into remote_exec.py, and created this "similar" script
# multihost.py. It requires PyYaml to be installed on the proxy VM though!
# ~ sudo yum install PyYaml
#
# The command line looks like the following:
# ./multihost.py -y <yaml_input_file> -o ./out -u root -p 'pass!<PASSWORD>'
#
# Here iss an example YAML config file for the script for using Alex tools. Kind of a pain not
# knowing the output file name but it works. It will create a remote folder /scratch/dave,
# Run alex tools with output to /scratch/dave, then tar up specific file output to a known
# file name, then delete the /scratch/dave folder.
#
# NOTE: this will leave the .tgz file on the host, and hopefully replace it each time it is run.
#
# hosts:
# dedwood-03.micron.com:
# command: "mkdir /scratch/dave ; /scratch/blktbl-capture-vmware/dumpreg /scratch/dave ; tar -zcf regdump.tgz /scratch/dave/*_REG.txt ; rm -rf /scratch/dave"
# file: "regdump.tgz"
# dedwood-04.micron.com:
# command: "mkdir /scratch/dave ; /scratch/blktbl-capture-vmware/dumpreg /scratch/dave ; tar -zcf regdump.tgz /scratch/dave/*_REG.txt ; rm -rf /scratch/dave"
# file: "regdump.tgz"
#
# You can also override the command line -u and -p for user name or password, or specify a
# unique one per host in the yaml as below.
#
# hosts:
# l-cheddar1:
# username: jenkins
# password: "<PASSWORD>"
# command: "sudo fio fio/sysbench_4k_w.fio > 4k_w.out"
# file: "4k_w.out"
# l-cheddar2:
# username: jenkins
# password: "<PASSWORD>"
# command: "sudo fio fio/sysbench_4k_r.fio > 4k_r.out"
# file: "4k_r.out"
# l-cheddar3:
# username: jenkins
# password: "<PASSWORD>"
# command: "sudo fio fio/sysbench_4k_w.fio > 4k_w.out"
# file: "4k_w.out"
# l-cheddar4:
# username: jenkins
# password: "<PASSWORD>"
# file: "4k_r.out"
# command: "sudo fio fio/sysbench_4k_r.fio > 4k_r.out"
#
"""
Utility for execution commands on mutliple hosts simultaneously via SSH and YAML input.
"""
import sys
sys.path.append('../libs/');
from remote_exec import SvrRemoteControl, SvrRemoteThread
import argparse
import yaml
import string
import os
# Defaults can be overridden via the command line.
CFG_DEF_TARGET_USER = "root";
CFG_DEF_TARGET_PWD = "<PASSWORD>";
def AddArgs(parser_obj):
parser_obj.add_argument('-y', '--yaml', dest='CfgYamlIn', action='store', required=True, type=argparse.FileType('r'), help='Input file containing YAML host + command config.');
parser_obj.add_argument('-o', '--out', dest='CfgOut', action='store', required=True, help='Folder to place output files.');
parser_obj.add_argument('-u', '--user', dest='CfgUserName', action='store', required=False, default=CFG_DEF_TARGET_USER, help='ESXi host (SSH) user name (root).');
parser_obj.add_argument('-p', '--pwd', dest='CfgUserPwd', action='store', required=False, default=CFG_DEF_TARGET_PWD, help='ESXi (SSH) user password (root).');
def GetArgs():
""";
Supports the command-line arguments listed below.;
""";
# create the top-level parser
parser = argparse.ArgumentParser(description='Remote execution library - input arguments.');
AddArgs(parser);
# parse the args and call whatever function was selected
args = parser.parse_args();
return args;
#############################################
# SvrMultiHost - works of a yaml spec object with input format below. This
# class provides a mechanism to launch SSH commands on multiple hosts simultaneously.
# Main input is command, and response is expected in the form of a file to copy back
# to the current host.
#
# user_name and user_pass fields of the yaml input is optional, default will be to
# use specified user_name and user_pass given during __init__().
#
# hosts:
# host_name1:
# username: "jenkins" (optional - default = 'root')
# password: "<PASSWORD>" (optional - default = '<PASSWORD>')
# command: "sudo fio fio/sysbench_4k_w.fio > fio_w_4k.out"
# file: "fio_w_4k.out"
# host_name2:
# command: "sudo fio fio/sysbench_4k_w.fio > fio_w_4k.out"
# file: "fio_w_4k.out"
#
# NOTE: upon thread completion, the Wait method will copy the expected output file
# from the remote target to the local host. Upon copy back to the local host
# the file name will be prepended with the host name.
#
class SvrMultiHost:
def __init__(self, yaml_obj, user_name='root', user_pass='<PASSWORD>'):
self.ThreadList = list();
self.YamlObj = yaml_obj;
self.DefaultUser = user_name;
self.DefaultPwd = <PASSWORD>_pass;
def __threxec_cb(self, rc):
host_name = rc.HostName;
cmd_str = self.YamlObj['hosts'][host_name]['command']
print "Executing '%s' on host %s" % (cmd_str, host_name);
e_code, out_str = rc.rexec(cmd_str);
return [ e_code, out_str ];
def Start(self):
print "Starting multi-host threads...";
for host in self.YamlObj['hosts']:
user_name = self.YamlObj['hosts'][host].get('username', self.DefaultUser);
user_pass = self.YamlObj['hosts'][host].get('password', self.DefaultPwd);
th = SvrRemoteThread(host, user_name, user_pass, self.__threxec_cb)
if (th.RC.is_connected()):
self.ThreadList.append(th);
th.start();
def Wait(self):
print "\nWaiting for threads to exit...";
for th in self.ThreadList:
th.join();
print "Thread execution complete.";
# Copy files from the specified remote out_folder, and copy to the "cwd",
# once copy is complete, disconnect the remote host; command execution is complete.
def GetFiles(self, out_folder='.'):
print "Retrieving files from remote hosts...";
for th in self.ThreadList:
host_name = th.RC.HostName;
rem_file = self.YamlObj['hosts'][host_name]['file'];
loc_file = "%s/%s_%s" % (out_folder, host_name, rem_file);
print " Retrieving %s from %s" % (rem_file, host_name)
th.RC.get_file(rem_file, loc_file)
# close the remote connection, no more commands allowed
th.disconnect();
print "File retrieval complete.";
def Go(self, out_folder='.', in_folder='.'):
self.Start();
self.Wait();
self.GetFiles(out_folder);
# SvrMultiHostCustom - Base clase for providing callback based factory to extend for your
# own purposes.
#
class SvrMultiHostCustom:
def __init__(self):
self.ThreadList = list();
def AddThread(self, host_thread):
self.ThreadList.append(host_thread);
def Start(self):
print "Starting multi-host threads...";
for th in self.ThreadList:
th.start();
def Wait(self):
for th in self.ThreadList:
th.join();
print "Threads exited, disconnecting..."
for th in self.ThreadList:
th.disconnect();
print "Thread execution complete.";
def Go(self):
self.Start();
self.Wait();
#############################################
# Determine how we were instantiated (command line, or included)
CFG_FROM_CMD_LINE = False;
if (sys.argv[0] == __file__):
CFG_FROM_CMD_LINE = True;
if (CFG_FROM_CMD_LINE):
# We were launched from the command line so execute a test workload, only on the first
# host in the list; this could easily be adapted to work on each host in the list but is
# not necessary for the "unit test" purpose of this basic functionality.
args = GetArgs();
yaml_obj = None;
try:
yaml_obj = yaml.load(args.CfgYamlIn);
except yaml.YAMLError, exc:
print "Error in configuration file: %s" % (exc);
if (yaml_obj is None):
print "ERR: failed to load YAML config file from %s" % (args.CfgYamlIn.name);
raise SystemExit(1);
rem_exec = SvrMultiHost(yaml_obj, user_name=args.CfgUserName, user_pass=args.CfgUserPwd);
rem_exec.Go(args.CfgOut);
raise SystemExit(0);
| en | 0.72784 | #! /usr/bin/python # I incorporated moussa's threading script into remote_exec.py, and created this "similar" script # multihost.py. It requires PyYaml to be installed on the proxy VM though! # ~ sudo yum install PyYaml # # The command line looks like the following: # ./multihost.py -y <yaml_input_file> -o ./out -u root -p 'pass!<PASSWORD>' # # Here iss an example YAML config file for the script for using Alex tools. Kind of a pain not # knowing the output file name but it works. It will create a remote folder /scratch/dave, # Run alex tools with output to /scratch/dave, then tar up specific file output to a known # file name, then delete the /scratch/dave folder. # # NOTE: this will leave the .tgz file on the host, and hopefully replace it each time it is run. # # hosts: # dedwood-03.micron.com: # command: "mkdir /scratch/dave ; /scratch/blktbl-capture-vmware/dumpreg /scratch/dave ; tar -zcf regdump.tgz /scratch/dave/*_REG.txt ; rm -rf /scratch/dave" # file: "regdump.tgz" # dedwood-04.micron.com: # command: "mkdir /scratch/dave ; /scratch/blktbl-capture-vmware/dumpreg /scratch/dave ; tar -zcf regdump.tgz /scratch/dave/*_REG.txt ; rm -rf /scratch/dave" # file: "regdump.tgz" # # You can also override the command line -u and -p for user name or password, or specify a # unique one per host in the yaml as below. # # hosts: # l-cheddar1: # username: jenkins # password: "<PASSWORD>" # command: "sudo fio fio/sysbench_4k_w.fio > 4k_w.out" # file: "4k_w.out" # l-cheddar2: # username: jenkins # password: "<PASSWORD>" # command: "sudo fio fio/sysbench_4k_r.fio > 4k_r.out" # file: "4k_r.out" # l-cheddar3: # username: jenkins # password: "<PASSWORD>" # command: "sudo fio fio/sysbench_4k_w.fio > 4k_w.out" # file: "4k_w.out" # l-cheddar4: # username: jenkins # password: "<PASSWORD>" # file: "4k_r.out" # command: "sudo fio fio/sysbench_4k_r.fio > 4k_r.out" # Utility for execution commands on mutliple hosts simultaneously via SSH and YAML input. # Defaults can be overridden via the command line. ; Supports the command-line arguments listed below.; # create the top-level parser # parse the args and call whatever function was selected ############################################# # SvrMultiHost - works of a yaml spec object with input format below. This # class provides a mechanism to launch SSH commands on multiple hosts simultaneously. # Main input is command, and response is expected in the form of a file to copy back # to the current host. # # user_name and user_pass fields of the yaml input is optional, default will be to # use specified user_name and user_pass given during __init__(). # # hosts: # host_name1: # username: "jenkins" (optional - default = 'root') # password: "<PASSWORD>" (optional - default = '<PASSWORD>') # command: "sudo fio fio/sysbench_4k_w.fio > fio_w_4k.out" # file: "fio_w_4k.out" # host_name2: # command: "sudo fio fio/sysbench_4k_w.fio > fio_w_4k.out" # file: "fio_w_4k.out" # # NOTE: upon thread completion, the Wait method will copy the expected output file # from the remote target to the local host. Upon copy back to the local host # the file name will be prepended with the host name. # # Copy files from the specified remote out_folder, and copy to the "cwd", # once copy is complete, disconnect the remote host; command execution is complete. # close the remote connection, no more commands allowed # SvrMultiHostCustom - Base clase for providing callback based factory to extend for your # own purposes. # ############################################# # Determine how we were instantiated (command line, or included) # We were launched from the command line so execute a test workload, only on the first # host in the list; this could easily be adapted to work on each host in the list but is # not necessary for the "unit test" purpose of this basic functionality. | 1.964825 | 2 |
python/saveToDB.py | YudieZhang/SampleProject | 1 | 6618385 | <reponame>YudieZhang/SampleProject
import csv
import mysql.connector
import random
def createID(len):
raw = ""
range1 = range(58, 65) # between 0~9 and A~Z
range2 = range(91, 97) # between A~Z and a~z
i = 0
while i < len:
seed = random.randint(48, 122)
if ((seed in range1) or (seed in range2)):
continue;
raw += chr(seed);
i += 1
return raw
class dbInsert():
def __init__(self):
self.mycursor = None
def setup(self):
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="fashiondb"
)
self.mycursor = mydb.cursor()
def execute(self):
#mycursor = self.mycursor
sql_products = "INSERT INTO products (productID,productName,categories,price,ori_price,img_first,img_after,description,p_details) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)"
sql_images = "INSERT INTO images (productID,imUrl) VALUES (%s,%s)"
sql_colors = "INSERT INTO colors (productID,color_title,color_option,color_style) VALUES (%s,%s,%s,%s)"
sql_sizes = "INSERT INTO sizes (productID,size) VALUES (%s,%s)"
sql_reviews = "INSERT INTO reviews (reviewID,productID,reviewerID,nickname,avatar,rate,reviewText,reviewPics,time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)"
sql_tags = "INSERT INTO tags (productID,tags) VALUES (%s,%s)"
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="fashiondb"
)
mycursor = mydb.cursor()
# Save to table products.
## with open('item_info.csv')as f_info:
## reader_info = csv.reader(f_info)
## for parts in reader_info:
## if len(parts) == 9:
## num = parts[0]
## ID = parts[1]
## url = parts[2]
## catg = parts[3]
## p_name = parts[4]
##
## if "SD" in parts[5]:
## price = parts[5].replace('SD','')
## elif "CAD" in parts[5]:
## price = parts[5].replace('CAD','')
## elif "GBP" in parts[5]:
## price = parts[5].replace('GBP','')
## elif "D" in parts[5]:
## price = parts[5].replace('D','')
## else:
## price = parts[5]
##
## if "CA$" in parts[6]:
## ori_price = parts[6].replace('CA$','')
## elif "$" in parts[6]:
## ori_price = parts[6].replace('$','')
## else:
## ori_price = parts[6]
## desc = parts[7]
## p_details = parts[8]
## else:
## print("Info Error: "+str(len(parts)))
##
## with open('categories.csv')as f_catg:
## reader_catg = csv.reader(f_catg)
## for items in reader_catg:
## if len(items) == 4:
## link = items[0]
## img_1 = items[2]
## img_2 = items[3]
## else:
## print("Image Error")
## if link == url:
## #print(str(ID+", "+p_name+", "+catg+", "+price+", "+ori_price+", "+img_1+", "+img_2+", "+desc+", "+p_details+"\n"))
## if ori_price != "":
## val = (ID,p_name,catg,float(price),float(ori_price),img_1,img_2,desc,p_details)
## else:
## val = (ID,p_name,catg,float(price),"",img_1,img_2,desc,p_details)
##
## mycursor.execute(sql_products, val)
## mydb.commit()
## break
# Save to table images.
## with open('images.csv')as f_img:
## reader_img = csv.reader(f_img)
## for parts in reader_img:
## if len(parts) == 2:
## ID = parts[0]
## imUrl = parts[1]
## val = (ID,imUrl)
## mycursor.execute(sql_images, val)
## mydb.commit()
## else:
## print("Image Error: "+str(len(parts)))
# Save to table colors.
with open('colors.csv')as f_color:
reader_color = csv.reader(f_color)
for parts in reader_color:
if len(parts) == 4:
ID = parts[0]
title = parts[1]
option = parts[2]
style = parts[3]
val = (ID,title,option,style)
mycursor.execute(sql_colors, val)
mydb.commit()
else:
print(str(parts))
print("Color Error: "+str(len(parts)))
# Save to table sizes.
with open('sizes.csv')as f_size:
reader_size = csv.reader(f_size)
for parts in reader_size:
if len(parts) == 2:
ID = parts[0]
size = parts[1]
val = (ID,size)
mycursor.execute(sql_sizes, val)
mydb.commit()
else:
print("Size Error: "+str(len(parts)))
# Save to table reviews.
with open('reviews.csv')as f_review:
reader_review = csv.reader(f_review)
for parts in reader_review:
if len(parts) == 8:
reviewID = parts[0]
productID = parts[1]
reviewerID = createID(20)
if "b'" in parts[2]:
nickname = parts[2].replace("b'","")
else:
nickname = parts[2]
if "'" in nickname:
nickname = nickname.replace("'","")
avatar = parts[3]
rate = parts[4]
if "b'" in parts[5]:
review = parts[5].replace("b'","")
else:
review = parts[5]
if "'" in review:
review = review.replace("'","")
r_pics = parts[6]
time = parts[7]
val = (reviewID,productID,reviewerID,nickname,avatar,rate,review,r_pics,time)
mycursor.execute(sql_reviews, val)
mydb.commit()
else:
print("Review Error: "+str(len(parts)))
# Save to table tags.
with open('tags.csv')as f_tags:
reader_tags = csv.reader(f_tags)
for parts in reader_tags:
if len(parts) == 2:
productID = parts[0]
tag = parts[1]
val = (productID,tag)
mycursor.execute(sql_tags, val)
mydb.commit()
else:
print("Tag Error: "+str(len(parts)))
if __name__ == "__main__":
db = dbInsert()
db.setup()
db.execute()
| import csv
import mysql.connector
import random
def createID(len):
raw = ""
range1 = range(58, 65) # between 0~9 and A~Z
range2 = range(91, 97) # between A~Z and a~z
i = 0
while i < len:
seed = random.randint(48, 122)
if ((seed in range1) or (seed in range2)):
continue;
raw += chr(seed);
i += 1
return raw
class dbInsert():
def __init__(self):
self.mycursor = None
def setup(self):
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="fashiondb"
)
self.mycursor = mydb.cursor()
def execute(self):
#mycursor = self.mycursor
sql_products = "INSERT INTO products (productID,productName,categories,price,ori_price,img_first,img_after,description,p_details) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)"
sql_images = "INSERT INTO images (productID,imUrl) VALUES (%s,%s)"
sql_colors = "INSERT INTO colors (productID,color_title,color_option,color_style) VALUES (%s,%s,%s,%s)"
sql_sizes = "INSERT INTO sizes (productID,size) VALUES (%s,%s)"
sql_reviews = "INSERT INTO reviews (reviewID,productID,reviewerID,nickname,avatar,rate,reviewText,reviewPics,time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)"
sql_tags = "INSERT INTO tags (productID,tags) VALUES (%s,%s)"
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="fashiondb"
)
mycursor = mydb.cursor()
# Save to table products.
## with open('item_info.csv')as f_info:
## reader_info = csv.reader(f_info)
## for parts in reader_info:
## if len(parts) == 9:
## num = parts[0]
## ID = parts[1]
## url = parts[2]
## catg = parts[3]
## p_name = parts[4]
##
## if "SD" in parts[5]:
## price = parts[5].replace('SD','')
## elif "CAD" in parts[5]:
## price = parts[5].replace('CAD','')
## elif "GBP" in parts[5]:
## price = parts[5].replace('GBP','')
## elif "D" in parts[5]:
## price = parts[5].replace('D','')
## else:
## price = parts[5]
##
## if "CA$" in parts[6]:
## ori_price = parts[6].replace('CA$','')
## elif "$" in parts[6]:
## ori_price = parts[6].replace('$','')
## else:
## ori_price = parts[6]
## desc = parts[7]
## p_details = parts[8]
## else:
## print("Info Error: "+str(len(parts)))
##
## with open('categories.csv')as f_catg:
## reader_catg = csv.reader(f_catg)
## for items in reader_catg:
## if len(items) == 4:
## link = items[0]
## img_1 = items[2]
## img_2 = items[3]
## else:
## print("Image Error")
## if link == url:
## #print(str(ID+", "+p_name+", "+catg+", "+price+", "+ori_price+", "+img_1+", "+img_2+", "+desc+", "+p_details+"\n"))
## if ori_price != "":
## val = (ID,p_name,catg,float(price),float(ori_price),img_1,img_2,desc,p_details)
## else:
## val = (ID,p_name,catg,float(price),"",img_1,img_2,desc,p_details)
##
## mycursor.execute(sql_products, val)
## mydb.commit()
## break
# Save to table images.
## with open('images.csv')as f_img:
## reader_img = csv.reader(f_img)
## for parts in reader_img:
## if len(parts) == 2:
## ID = parts[0]
## imUrl = parts[1]
## val = (ID,imUrl)
## mycursor.execute(sql_images, val)
## mydb.commit()
## else:
## print("Image Error: "+str(len(parts)))
# Save to table colors.
with open('colors.csv')as f_color:
reader_color = csv.reader(f_color)
for parts in reader_color:
if len(parts) == 4:
ID = parts[0]
title = parts[1]
option = parts[2]
style = parts[3]
val = (ID,title,option,style)
mycursor.execute(sql_colors, val)
mydb.commit()
else:
print(str(parts))
print("Color Error: "+str(len(parts)))
# Save to table sizes.
with open('sizes.csv')as f_size:
reader_size = csv.reader(f_size)
for parts in reader_size:
if len(parts) == 2:
ID = parts[0]
size = parts[1]
val = (ID,size)
mycursor.execute(sql_sizes, val)
mydb.commit()
else:
print("Size Error: "+str(len(parts)))
# Save to table reviews.
with open('reviews.csv')as f_review:
reader_review = csv.reader(f_review)
for parts in reader_review:
if len(parts) == 8:
reviewID = parts[0]
productID = parts[1]
reviewerID = createID(20)
if "b'" in parts[2]:
nickname = parts[2].replace("b'","")
else:
nickname = parts[2]
if "'" in nickname:
nickname = nickname.replace("'","")
avatar = parts[3]
rate = parts[4]
if "b'" in parts[5]:
review = parts[5].replace("b'","")
else:
review = parts[5]
if "'" in review:
review = review.replace("'","")
r_pics = parts[6]
time = parts[7]
val = (reviewID,productID,reviewerID,nickname,avatar,rate,review,r_pics,time)
mycursor.execute(sql_reviews, val)
mydb.commit()
else:
print("Review Error: "+str(len(parts)))
# Save to table tags.
with open('tags.csv')as f_tags:
reader_tags = csv.reader(f_tags)
for parts in reader_tags:
if len(parts) == 2:
productID = parts[0]
tag = parts[1]
val = (productID,tag)
mycursor.execute(sql_tags, val)
mydb.commit()
else:
print("Tag Error: "+str(len(parts)))
if __name__ == "__main__":
db = dbInsert()
db.setup()
db.execute() | en | 0.284144 | # between 0~9 and A~Z # between A~Z and a~z #mycursor = self.mycursor # Save to table products. ## with open('item_info.csv')as f_info: ## reader_info = csv.reader(f_info) ## for parts in reader_info: ## if len(parts) == 9: ## num = parts[0] ## ID = parts[1] ## url = parts[2] ## catg = parts[3] ## p_name = parts[4] ## ## if "SD" in parts[5]: ## price = parts[5].replace('SD','') ## elif "CAD" in parts[5]: ## price = parts[5].replace('CAD','') ## elif "GBP" in parts[5]: ## price = parts[5].replace('GBP','') ## elif "D" in parts[5]: ## price = parts[5].replace('D','') ## else: ## price = parts[5] ## ## if "CA$" in parts[6]: ## ori_price = parts[6].replace('CA$','') ## elif "$" in parts[6]: ## ori_price = parts[6].replace('$','') ## else: ## ori_price = parts[6] ## desc = parts[7] ## p_details = parts[8] ## else: ## print("Info Error: "+str(len(parts))) ## ## with open('categories.csv')as f_catg: ## reader_catg = csv.reader(f_catg) ## for items in reader_catg: ## if len(items) == 4: ## link = items[0] ## img_1 = items[2] ## img_2 = items[3] ## else: ## print("Image Error") ## if link == url: ## #print(str(ID+", "+p_name+", "+catg+", "+price+", "+ori_price+", "+img_1+", "+img_2+", "+desc+", "+p_details+"\n")) ## if ori_price != "": ## val = (ID,p_name,catg,float(price),float(ori_price),img_1,img_2,desc,p_details) ## else: ## val = (ID,p_name,catg,float(price),"",img_1,img_2,desc,p_details) ## ## mycursor.execute(sql_products, val) ## mydb.commit() ## break # Save to table images. ## with open('images.csv')as f_img: ## reader_img = csv.reader(f_img) ## for parts in reader_img: ## if len(parts) == 2: ## ID = parts[0] ## imUrl = parts[1] ## val = (ID,imUrl) ## mycursor.execute(sql_images, val) ## mydb.commit() ## else: ## print("Image Error: "+str(len(parts))) # Save to table colors. # Save to table sizes. # Save to table reviews. # Save to table tags. | 2.785584 | 3 |
app/forms.py | n6151h/roborank | 0 | 6618386 | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SelectField, BooleanField
from wtforms.validators import DataRequired, NumberRange, InputRequired
from wtforms.validators import ValidationError, StopValidation
from app import db
from flask import session
from app import app
import os
class CompetitionForm(FlaskForm):
name = StringField('Competition Name', validators=[InputRequired(message='Please provide a unique compeition name')])
def validate_name(form, field):
if os.path.exists(os.path.join(app.config['COMPETITION_DIR'], field.data + '.db')):
raise StopValidation('Compeition named "{}" already exists. Please choose another name.'.format(field.data))
class TeamForm(FlaskForm):
teamId = IntegerField('Team ID', validators=[DataRequired()])
name = StringField('Team Name (optional)')
def validate_id(form, field):
"""
Make sure teamId specified is unique.
"""
if db.query_db('select count(*) from teams where teamId=?', [field.data])[0]['count(*)'] > 0:
raise StopValidation('Team "{}" already exists. Please specify a unique team name.'.format(field.data))
class DataEntryForm(FlaskForm):
round = IntegerField('Round', validators=[InputRequired(message='Round must be a positive integer.'), NumberRange(min=1, max=20)])
teamId = SelectField('Team ID', validators=[DataRequired()], choices=[], coerce=int)
high_balls = IntegerField('High Balls', validators=[NumberRange(min=0)])
low_balls= IntegerField('Low Balls', validators=[NumberRange(min=0)])
autonomous = BooleanField('Autonomous')
climb = BooleanField('Climbing')
spin_by_colour = BooleanField('Spin (Colour)')
spin_by_rotate = BooleanField('Spin (Rotate)')
def validate_teamId(form, teamId):
"""
Make sure teamId already exists in *teames* table.
"""
if teamId.data in [-1, '-1']:
raise StopValidation('Please select a team from the Team ID drop-down.')
if db.query_db('select count(*) from teams where teamId=?', [teamId.data])[0]['count(*)'] <= 0:
raise StopValidation('Team "{}" has not yet been defined.'.format(teamId.data))
class ParameterForm(FlaskForm):
"""
The ranking algorithm has several parameters that can be modified from their defaults to
alter the resulting ranks. For example, high or low ball scores that are zero can (and probably should) be
penalized, so we can apply a handicap (``zero-balls``) to those values.
"""
zero_balls = IntegerField('Zero-Balls', validators=[NumberRange(min=0, message="Must be non-negative integer." )])
autonomous_points = IntegerField('Autonomous Value', validators=[NumberRange(min=1, message='Must be > 0')])
climb_points = IntegerField('Climb Value', validators=[NumberRange(min=1, message='Must be > 0')])
spin_rot_points = IntegerField('Spin-by-Rotation Value', validators=[NumberRange(min=1, message='Must be > 0')])
spin_col_points = IntegerField('Spin-by-Colour Value', validators=[NumberRange(min=1, message='Must be > 0')])
| from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SelectField, BooleanField
from wtforms.validators import DataRequired, NumberRange, InputRequired
from wtforms.validators import ValidationError, StopValidation
from app import db
from flask import session
from app import app
import os
class CompetitionForm(FlaskForm):
name = StringField('Competition Name', validators=[InputRequired(message='Please provide a unique compeition name')])
def validate_name(form, field):
if os.path.exists(os.path.join(app.config['COMPETITION_DIR'], field.data + '.db')):
raise StopValidation('Compeition named "{}" already exists. Please choose another name.'.format(field.data))
class TeamForm(FlaskForm):
teamId = IntegerField('Team ID', validators=[DataRequired()])
name = StringField('Team Name (optional)')
def validate_id(form, field):
"""
Make sure teamId specified is unique.
"""
if db.query_db('select count(*) from teams where teamId=?', [field.data])[0]['count(*)'] > 0:
raise StopValidation('Team "{}" already exists. Please specify a unique team name.'.format(field.data))
class DataEntryForm(FlaskForm):
round = IntegerField('Round', validators=[InputRequired(message='Round must be a positive integer.'), NumberRange(min=1, max=20)])
teamId = SelectField('Team ID', validators=[DataRequired()], choices=[], coerce=int)
high_balls = IntegerField('High Balls', validators=[NumberRange(min=0)])
low_balls= IntegerField('Low Balls', validators=[NumberRange(min=0)])
autonomous = BooleanField('Autonomous')
climb = BooleanField('Climbing')
spin_by_colour = BooleanField('Spin (Colour)')
spin_by_rotate = BooleanField('Spin (Rotate)')
def validate_teamId(form, teamId):
"""
Make sure teamId already exists in *teames* table.
"""
if teamId.data in [-1, '-1']:
raise StopValidation('Please select a team from the Team ID drop-down.')
if db.query_db('select count(*) from teams where teamId=?', [teamId.data])[0]['count(*)'] <= 0:
raise StopValidation('Team "{}" has not yet been defined.'.format(teamId.data))
class ParameterForm(FlaskForm):
"""
The ranking algorithm has several parameters that can be modified from their defaults to
alter the resulting ranks. For example, high or low ball scores that are zero can (and probably should) be
penalized, so we can apply a handicap (``zero-balls``) to those values.
"""
zero_balls = IntegerField('Zero-Balls', validators=[NumberRange(min=0, message="Must be non-negative integer." )])
autonomous_points = IntegerField('Autonomous Value', validators=[NumberRange(min=1, message='Must be > 0')])
climb_points = IntegerField('Climb Value', validators=[NumberRange(min=1, message='Must be > 0')])
spin_rot_points = IntegerField('Spin-by-Rotation Value', validators=[NumberRange(min=1, message='Must be > 0')])
spin_col_points = IntegerField('Spin-by-Colour Value', validators=[NumberRange(min=1, message='Must be > 0')])
| en | 0.853996 | Make sure teamId specified is unique. Make sure teamId already exists in *teames* table. The ranking algorithm has several parameters that can be modified from their defaults to alter the resulting ranks. For example, high or low ball scores that are zero can (and probably should) be penalized, so we can apply a handicap (``zero-balls``) to those values. | 3.030608 | 3 |
Python/kurosc/setup.py | chriswilly/kuramoto-osc | 1 | 6618387 | from setuptools import setup
__version__ = None
exec(open('kurosc/version.py').read())
setup(name='kurosc', # 'kuramotoNeighbor'
version=__version__,
description='AMATH 575 project on weakly coupled phase synchonous oscillators\
with distance delay and decay and second order interrupting interactions',
url='https://github.com/chriswilly/kuramoto-osc',
author=['<NAME>',
'Group Contributors:',
'<NAME>',
'<NAME>',
'<NAME>'],
author_email=['<EMAIL>','add yours here'],
license='MIT',
packages=['kurosc'],
zip_safe=False
)
| from setuptools import setup
__version__ = None
exec(open('kurosc/version.py').read())
setup(name='kurosc', # 'kuramotoNeighbor'
version=__version__,
description='AMATH 575 project on weakly coupled phase synchonous oscillators\
with distance delay and decay and second order interrupting interactions',
url='https://github.com/chriswilly/kuramoto-osc',
author=['<NAME>',
'Group Contributors:',
'<NAME>',
'<NAME>',
'<NAME>'],
author_email=['<EMAIL>','add yours here'],
license='MIT',
packages=['kurosc'],
zip_safe=False
)
| en | 0.190611 | # 'kuramotoNeighbor' | 1.20916 | 1 |
src/cardinal/db/migrations/versions/25d0f68d0698_role_id_should_be_unique.py | FallenWarrior2k/cardinal.py | 1 | 6618388 | <gh_stars>1-10
"""Role ID should be unique
Revision ID: 25d0f68d0698
Revises: 313ce21eb461
Create Date: 2018-12-10 17:04:30.274466
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "313ce21eb461"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(
op.f("uq_mute_guilds_role_id"), "mute_guilds", ["role_id"]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f("uq_mute_guilds_role_id"), "mute_guilds", type_="unique")
# ### end Alembic commands ###
| """Role ID should be unique
Revision ID: 25d0f68d0698
Revises: 313ce21eb461
Create Date: 2018-12-10 17:04:30.274466
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "313ce21eb461"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(
op.f("uq_mute_guilds_role_id"), "mute_guilds", ["role_id"]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f("uq_mute_guilds_role_id"), "mute_guilds", type_="unique")
# ### end Alembic commands ### | en | 0.531362 | Role ID should be unique Revision ID: 25d0f68d0698 Revises: 313ce21eb461 Create Date: 2018-12-10 17:04:30.274466 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.182841 | 1 |
touchic/linear_gauge.py | prosenjit-mdhslab/touchic | 0 | 6618389 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on May 19 2021
@author: Prosenjit
Custom Qt Widget to show a linear gauge with min-max. The following modifiable
attributes are exposed.
"""
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, pyqtSlot, pyqtSignal
from typing import Union
from .display_config import ICDisplayConfig
from .base_widget import ICBaseWidget, ICWidgetState, ICWidgetPosition
from .linear_axis import ICLinearAxis, ICLinearAxisContainer, ICLinearContainerType
class ICGaugeBar(ICBaseWidget):
"""
Class for a custom widget to draw a colored bar.
Length of the colored bar is proportional to the value.
"""
# bar state has changed signal. it can be current value or alarm status
changed = pyqtSignal(float)
def __init__(self, min_val: float, max_val: float, curr_val: float, position: ICWidgetPosition = ICWidgetPosition.Bottom,
widget_id: int = 0, *args, **kwargs):
super(ICGaugeBar, self).__init__(widget_id, *args, **kwargs)
# minimum and maximum value of the gauge bar
self._gauge_range_min: float = min_val
self._gauge_range_max: float = max_val
# current value of the gauge
self._gauge_val: float = curr_val
# has the current value lead to an alarm
self.alarm_activated = False
# upper alarm level for the gauge
self._alarm_upper_level: float = max_val
self._alarm_upper_level_text: str = "UL"
self._alarm_upper_level_set: bool = False
# lower alarm level for the gauge
self._alarm_lower_level: float = min_val
self._alarm_lower_level_text: str = "LL"
self._alarm_lower_level_set: bool = False
# max level tracking
self._cycle_max: float = curr_val
self._cycle_max_tracking: bool = False
# min level tracking
self._cycle_min: float = curr_val
self._cycle_min_tracking: bool = False
# target tracking
self._target_value: float = curr_val
self._target_tracking: bool = False
# gauge width
self._gauge_width: int = ICDisplayConfig.LinearGaugeWidth
# background colors
self._back_color_light: QtGui.QColor = ICDisplayConfig.LinearGaugeBoxColorLight
self._back_color_dark: QtGui.QColor = ICDisplayConfig.LinearGaugeBoxColorDark
# gauge colors normal
self._gauge_color_normal_light: QtGui.QColor = ICDisplayConfig.LinearGaugeNormalLight
self._gauge_color_normal_dark: QtGui.QColor = ICDisplayConfig.LinearGaugeNormalDark
# gauge colors alarmed
self._gauge_color_alarm_light: QtGui.QColor = ICDisplayConfig.LinearGaugeErrorLight
self._gauge_color_alarm_dark: QtGui.QColor = ICDisplayConfig.LinearGaugeErrorDark
# alarm level text size and color
self._alarm_text_size: int = ICDisplayConfig.LabelTextSize
self._alarm_text_color: QtGui.QColor = ICDisplayConfig.LinearGaugeLimitsColor
# min max line color
self._min_max_color: QtGui.QColor = ICDisplayConfig.LinearGaugeMinMaxColor
# target color
self._target_color: QtGui.QColor = ICDisplayConfig.LinearGaugeTargetColor
# sets the click-ability and focus-ability of the button
self.clickable = True
self.focusable = False
# set the position of the gauge.
self.position = position
# override the base Size policy
self.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
########################################################
# properties
########################################################
# get the minimum limit of the gauge bar
@property
def gauge_range_min(self) -> float:
return self._gauge_range_min
# set the minimum limit of the gauge bar
@gauge_range_min.setter
def gauge_range_min(self, min_val: float) -> None:
self._gauge_range_min = min_val
self.update()
# get the maximum limit of the gauge bar
@property
def gauge_range_max(self) -> float:
return self._gauge_range_max
# set the minimum limit of the gauge bar
@gauge_range_max.setter
def gauge_range_max(self, max_val: float) -> None:
self._gauge_range_max = max_val
self.update()
# get the current value
@property
def gauge_value(self) -> float:
return self._gauge_val
# set the current value
@gauge_value.setter
def gauge_value(self, val: float) -> None:
if self._gauge_val != val:
# limit gauge value to the min and max range
if val < self._gauge_range_min:
self._gauge_val = self._gauge_range_min
elif val > self._gauge_range_max:
self._gauge_val = self._gauge_range_max
else:
self._gauge_val = val
# update the min value
if self._cycle_min_tracking:
if val < self._cycle_min:
self._cycle_min = val
# update the max value
if self._cycle_max_tracking:
if val > self._cycle_max:
self._cycle_max = val
# reset the alarm before testing
self.alarm_activated = False
# check for too low alarm
if self._alarm_lower_level_set:
if val < self._alarm_lower_level:
self.alarm_activated = True
# check for too high alarm
if self._alarm_upper_level_set:
if val > self._alarm_upper_level:
self.alarm_activated = True
self.changed.emit(val)
self.update()
# get the upper level alarm
# tuple of (name, value)
@property
def upper_alarm(self) -> Union[tuple[str, float], tuple[None, None]]:
if self._alarm_upper_level_set:
return self._alarm_upper_level_text, self._alarm_upper_level
else:
return None, None
# set the upper level alarm
@upper_alarm.setter
def upper_alarm(self, alarm: tuple[str, float]) -> None:
# check if upper alarm level is greater than the lower alarm level
if self._alarm_lower_level_set:
if alarm[1] < self._alarm_lower_level:
return
# check if the limit value is in between the max and min values
if self._gauge_range_min <= alarm[1] <= self._gauge_range_max:
self._alarm_upper_level_set = True
self._alarm_upper_level_text = alarm[0]
self._alarm_upper_level = alarm[1]
# check for alarm level
if self._gauge_val > self._alarm_upper_level:
self.alarm_activated = True
self.changed.emit(self._gauge_val)
self.update()
# get the lower level alarm
# tuple of (name, value)
@property
def lower_alarm(self) -> Union[tuple[str, float], tuple[None, None]]:
if self._alarm_lower_level_set:
return self._alarm_lower_level_text, self._alarm_lower_level
else:
return None, None
# set the upper level alarm
@lower_alarm.setter
def lower_alarm(self, alarm: tuple[str, float]) -> None:
# check if lower alarm level is less the upper alarm level
if self._alarm_upper_level_set:
if alarm[1] > self._alarm_upper_level:
return
# check if the limit value is in between the max and min values
if self._gauge_range_min <= alarm[1] <= self._gauge_range_max:
self._alarm_lower_level_set = True
self._alarm_lower_level_text = alarm[0]
self._alarm_lower_level = alarm[1]
# check if alarm is active
if self._gauge_val < self._alarm_lower_level:
self.alarm_activated = True
self.changed.emit(self._gauge_val)
self.update()
@property
def target_value(self) -> Union[float, None]:
if self._target_tracking:
return self._target_value
return None
@target_value.setter
def target_value(self, val: float) -> None:
self._target_tracking = True
self._target_value = val
self.update()
# gauge width
@property
def gauge_width(self) -> int:
return self._gauge_width
@gauge_width.setter
def gauge_width(self, wd: int) -> None:
self._gauge_width = wd
self.update()
# get the background container color of the bar
@property
def container_colors(self) -> tuple[QtGui.QColor, QtGui.QColor]:
return self._back_color_light, self._back_color_dark
# set the background color of the bar
@container_colors.setter
def container_colors(self, clrs: tuple[QtGui.QColor, QtGui.QColor]) -> None:
self._back_color_light = clrs[0]
self._back_color_dark = clrs[1]
self.update()
# get the normal gauge color
@property
def gauge_color_normal(self) -> tuple[QtGui.QColor, QtGui.QColor]:
return self._gauge_color_normal_light, self._gauge_color_normal_dark
# set the normal gauge color
@gauge_color_normal.setter
def gauge_color_normal(self, clr: tuple[QtGui.QColor, QtGui.QColor]) -> None:
self._gauge_color_normal_light = clr[0]
self._gauge_color_normal_dark = clr[1]
self.update()
# get the alarm gauge color
@property
def gauge_color_alarm(self) -> tuple[QtGui.QColor, QtGui.QColor]:
return self._gauge_color_alarm_light, self._gauge_color_alarm_dark
# set the normal gauge color
@gauge_color_alarm.setter
def gauge_color_alarm(self, clr: tuple[QtGui.QColor, QtGui.QColor]) -> None:
self._gauge_color_alarm_light = clr[0]
self._gauge_color_alarm_dark = clr[1]
self.update()
# get the alarm level text size
@property
def alarm_level_text_size(self) -> int:
return self._alarm_text_size
# set the alarm level text size
@alarm_level_text_size.setter
def alarm_level_text_size(self, sz: int) -> None:
self._alarm_text_size = sz
# get the alarm level text color
@property
def alarm_level_text_color(self) -> QtGui.QColor:
return self._alarm_text_color
# set the alarm level text color
@alarm_level_text_color.setter
def alarm_level_text_color(self, clr: QtGui.QColor) -> None:
self._alarm_text_color = clr
# min max color
@property
def min_max_color(self) -> QtGui.QColor:
return self._min_max_color
@min_max_color.setter
def min_max_color(self, clr: QtGui.QColor) -> None:
self._min_max_color = clr
self.update()
# target color
@property
def target_color(self) -> QtGui.QColor:
return self._target_color
@target_color.setter
def target_color(self, clr: QtGui.QColor) -> None:
self._target_color = clr
self.update()
########################################################
# functions
########################################################
# start the cycle max tracking
def start_max_tracking(self) -> None:
self._cycle_max_tracking = True
self._cycle_max = self._gauge_val
# reset the cycle for max tracking
def reset_max_tracking(self) -> None:
self._cycle_max = self._gauge_val
# stop the cycle max tracking
def stop_max_tracking(self) -> None:
self._cycle_max_tracking = False
# start the cycle max tracking
def start_min_tracking(self) -> None:
self._cycle_min_tracking = True
self._cycle_min = self._gauge_val
# reset the cycle for max tracking
def reset_min_tracking(self) -> None:
self._cycle_min = self._gauge_val
# stop the cycle max tracking
def stop_min_tracking(self) -> None:
self._cycle_min_tracking = False
# estimate max width
def estimate_max_gauge_width(self) -> int:
# max width is dependent on the orientation of the widget
if self.position.is_horizontal():
return self._gauge_width + 15 + self._alarm_text_size
else:
# setup the font
painter = QtGui.QPainter(self)
fnt = painter.font()
fnt.setPixelSize(self._alarm_text_size)
fnt.setBold(True)
# create the font matrices
font_matrices = QtGui.QFontMetrics(fnt)
width_lower = font_matrices.horizontalAdvance(self._alarm_lower_level_text)
width_upper = font_matrices.horizontalAdvance(self._alarm_upper_level_text)
text_width = width_upper if width_upper > width_lower else width_lower
return self._gauge_width + 10 + text_width
########################################################
# base class event overrides
########################################################
# TODO: mouse click plots the history
def on_mouse_released(self, event: QtGui.QMouseEvent) -> None:
pass
#######################################################
# overrides and event handlers
########################################################
# override the default paint event
def paintEvent(self, e):
# if hidden or transparent then nothing else to do
if self.state in (ICWidgetState.Hidden, ICWidgetState.Transparent):
return
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
# get the size of the containing widget
bar_width = painter.device().width()
bar_height = painter.device().height()
##########################################
# calculate dimensions
##########################################
if self.position.is_horizontal():
##################################################
# horizontal configurations
##################################################
gauge_start_x = 0
gauge_size_x = bar_width
gauge_size_y = self._gauge_width
# bar position
bar_start_x = 2
bar_size_x = (gauge_size_x - 4) * (self._gauge_val - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
bar_size_y = gauge_size_y - 4
# alarm levels
if self._alarm_lower_level_set:
lower_alarm_pos_x = (gauge_size_x - 4) * (self._alarm_lower_level - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
# calculate the text position
text_width = bar_width / 3
lower_alarm_text_start_x = lower_alarm_pos_x - bar_width / 6
lower_alarm_text_align = Qt.AlignCenter
if self._alarm_upper_level_set:
upper_alarm_pos_x = (gauge_size_x - 4) * (self._alarm_upper_level - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
# calculate the text position
text_width = bar_width / 3
upper_alarm_text_start_x = upper_alarm_pos_x - bar_width / 6
upper_alarm_text_align = Qt.AlignCenter
##################################################
# top & bottom specific calculations
##################################################
if self.position == ICWidgetPosition.Top:
##################################################
# Top
##################################################
gauge_start_y = bar_height - gauge_size_y
bar_start_y = gauge_start_y + 2
# min tracking
if self._cycle_min_tracking:
min_pos_x = (gauge_size_x - 4) * (self._cycle_min - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
min_point_one = QtCore.QPointF(min_pos_x, gauge_start_y + gauge_size_y)
min_point_two = QtCore.QPointF(min_pos_x, gauge_start_y - 5)
# max tracking
if self._cycle_max_tracking:
max_pos_x = (gauge_size_x - 4) * (self._cycle_max - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
max_point_one = QtCore.QPointF(max_pos_x, gauge_start_y + gauge_size_y)
max_point_two = QtCore.QPointF(max_pos_x, gauge_start_y - 5)
# target tracking
if self._target_tracking:
target_pos_x = (gauge_size_x - 4) * (self._target_value - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
target_point_one = QtCore.QPointF(target_pos_x, gauge_start_y + gauge_size_y)
target_point_two = QtCore.QPointF(target_pos_x, gauge_start_y - 5)
# lower alarm level
if self._alarm_lower_level_set:
lower_alarm_point_one = QtCore.QPointF(lower_alarm_pos_x, gauge_start_y + gauge_size_y)
lower_alarm_point_two = QtCore.QPointF(lower_alarm_pos_x, gauge_start_y - 5)
lower_alarm_text_rect = QtCore.QRectF(lower_alarm_text_start_x, gauge_start_y - 15 - self._alarm_text_size,
text_width, self._alarm_text_size + 5)
# upper alarm level
if self._alarm_upper_level_set:
upper_alarm_point_one = QtCore.QPointF(upper_alarm_pos_x, gauge_start_y + gauge_size_y)
upper_alarm_point_two = QtCore.QPointF(upper_alarm_pos_x, gauge_start_y - 5)
upper_alarm_text_rect = QtCore.QRectF(upper_alarm_text_start_x, gauge_start_y - 15 - self._alarm_text_size,
text_width, self._alarm_text_size + 5)
else:
##################################################
# Bottom
##################################################
gauge_start_y = 0
bar_start_y = 2
# min tracking
if self._cycle_min_tracking:
min_pos_x = (gauge_size_x - 4) * (self._cycle_min - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
min_point_one = QtCore.QPointF(min_pos_x, gauge_start_y + gauge_size_y + 5)
min_point_two = QtCore.QPointF(min_pos_x, gauge_start_y)
# max tracking
if self._cycle_max_tracking:
max_pos_x = (gauge_size_x - 4) * (self._cycle_max - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
max_point_one = QtCore.QPointF(max_pos_x, gauge_start_y + gauge_size_y + 5)
max_point_two = QtCore.QPointF(max_pos_x, gauge_start_y)
# target tracking
if self._target_tracking:
target_pos_x = (gauge_size_x - 4) * (self._target_value - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
target_point_one = QtCore.QPointF(target_pos_x, gauge_start_y + gauge_size_y + 5)
target_point_two = QtCore.QPointF(target_pos_x, gauge_start_y)
# lower alarm level
if self._alarm_lower_level_set:
lower_alarm_point_one = QtCore.QPointF(lower_alarm_pos_x, gauge_start_y + gauge_size_y + 5)
lower_alarm_point_two = QtCore.QPointF(lower_alarm_pos_x, gauge_start_y)
lower_alarm_text_rect = QtCore.QRectF(lower_alarm_text_start_x, gauge_start_y + gauge_size_y + 10,
text_width, self._alarm_text_size + 5)
# upper alarm level
if self._alarm_upper_level_set:
upper_alarm_point_one = QtCore.QPointF(upper_alarm_pos_x, gauge_start_y + gauge_size_y + 5)
upper_alarm_point_two = QtCore.QPointF(upper_alarm_pos_x, gauge_start_y)
upper_alarm_text_rect = QtCore.QRectF(upper_alarm_text_start_x, gauge_start_y + gauge_size_y + 10,
text_width, self._alarm_text_size + 5)
else:
##################################################
# Vertical configurations
##################################################
gauge_start_y = 0
gauge_size_y = bar_height
gauge_size_x = self._gauge_width
# bar position
bar_size_y = (gauge_size_y - 4) * (self._gauge_val - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
bar_start_y = (gauge_size_y - 2) - bar_size_y
bar_size_x = gauge_size_x - 4
# alarm levels
if self._alarm_lower_level_set:
# calculate the position
lower_alarm_pos_y = (gauge_size_y - 4) * (self._alarm_lower_level - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
lower_alarm_pos_y = (gauge_size_y - 2) - lower_alarm_pos_y
# calculate where to write the text
lower_alarm_text_pos_y = lower_alarm_pos_y - 0.5 * self._alarm_text_size
if lower_alarm_text_pos_y < 0:
lower_alarm_text_pos_y = 0
if lower_alarm_text_pos_y + self._alarm_text_size + 5 > bar_height:
lower_alarm_text_pos_y = bar_height - self._alarm_text_size - 5
if self._alarm_upper_level_set:
upper_alarm_pos_y = (gauge_size_y - 4) * (self._alarm_upper_level - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
upper_alarm_pos_y = (gauge_size_y - 2) - lower_alarm_pos_y
# calculate where to write the text
upper_alarm_text_pos_y = upper_alarm_pos_y - 0.5 * self._alarm_text_size
if upper_alarm_text_pos_y < 0:
upper_alarm_text_pos_y = 0
if upper_alarm_text_pos_y + self._alarm_text_size + 5 > bar_height:
upper_alarm_text_pos_y = bar_height - self._alarm_text_size - 5
##################################################
# left and right specific calculations
##################################################
if self.position == ICWidgetPosition.Left:
##################################################
# Left
##################################################
gauge_start_x = bar_width - gauge_size_x
bar_start_x = gauge_start_x + 2
# min max positions
if self._cycle_min_tracking:
min_pos_y = (gauge_size_y - 4) * (self._cycle_min - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
min_pos_y = (gauge_size_y - 2) - min_pos_y
min_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, min_pos_y)
min_point_two = QtCore.QPointF(gauge_start_x - 5, min_pos_y)
if self._cycle_max_tracking:
max_pos_y = (gauge_size_y - 4) * (self._cycle_max - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
max_pos_y = (gauge_size_y - 2) - max_pos_y
max_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, max_pos_y)
max_point_two = QtCore.QPointF(gauge_start_x - 5, max_pos_y)
# target position
if self._target_tracking:
target_pos_y = (gauge_size_y - 4) * (self._target_value - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
target_pos_y = (gauge_size_y - 2) - target_pos_y
target_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, target_pos_y)
target_point_two = QtCore.QPointF(gauge_start_x - 5, target_pos_y)
# setup the alarm levels
if self._alarm_lower_level_set:
lower_alarm_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, lower_alarm_pos_y)
lower_alarm_point_two = QtCore.QPointF(gauge_start_x - 5, lower_alarm_pos_y)
lower_alarm_text_rect = QtCore.QRectF(0, lower_alarm_text_pos_y, gauge_start_x - 10, self._alarm_text_size + 5)
lower_alarm_text_align = Qt.AlignRight
if self._alarm_upper_level_set:
upper_alarm_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, upper_alarm_pos_y)
upper_alarm_point_two = QtCore.QPointF(gauge_start_x - 5, upper_alarm_pos_y)
upper_alarm_text_rect = QtCore.QRectF(0, upper_alarm_text_pos_y, gauge_start_x - 10, self._alarm_text_size + 5)
upper_alarm_text_align = Qt.AlignRight
else:
##################################################
# Right
##################################################
gauge_start_x = 0
bar_start_x = 2
# min max positions
if self._cycle_min_tracking:
min_pos_y = (gauge_size_y - 4) * (self._cycle_min - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
min_pos_y = (gauge_size_y - 2) - min_pos_y
min_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, min_pos_y)
min_point_two = QtCore.QPointF(gauge_start_x, min_pos_y)
if self._cycle_max_tracking:
max_pos_y = (gauge_size_y - 4) * (self._cycle_max - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
max_pos_y = (gauge_size_y - 2) - max_pos_y
max_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, max_pos_y)
max_point_two = QtCore.QPointF(gauge_start_x, max_pos_y)
# target position
if self._target_tracking:
target_pos_y = (gauge_size_y - 4) * (self._target_value - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
target_pos_y = (gauge_size_y - 2) - target_pos_y
target_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, target_pos_y)
target_point_two = QtCore.QPointF(gauge_start_x, target_pos_y)
# setup the alarm levels
if self._alarm_lower_level_set:
lower_alarm_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, lower_alarm_pos_y)
lower_alarm_point_two = QtCore.QPointF(gauge_start_x, lower_alarm_pos_y)
lower_alarm_text_rect = QtCore.QRectF(gauge_start_x + gauge_size_x + 10, lower_alarm_text_pos_y,
bar_width - gauge_size_x - 10, self._alarm_text_size + 5)
lower_alarm_text_align = Qt.AlignLeft
if self._alarm_upper_level_set:
upper_alarm_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, upper_alarm_pos_y)
upper_alarm_point_two = QtCore.QPointF(gauge_start_x, upper_alarm_pos_y)
upper_alarm_text_rect = QtCore.QRectF(gauge_start_x + gauge_size_x + 10, upper_alarm_text_pos_y,
bar_width - gauge_size_x - 10, self._alarm_text_size + 5)
upper_alarm_text_align = Qt.AlignLeft
##################################################
# paint the main rectangle
##################################################
rect = QtCore.QRectF(gauge_start_x, gauge_start_y, gauge_size_x, gauge_size_y)
if self.position.is_horizontal():
brush = QtGui.QLinearGradient(rect.topRight(), rect.topLeft())
else:
brush = QtGui.QLinearGradient(rect.bottomLeft(), rect.topLeft())
# define the filling brush
brush.setColorAt(0, self._back_color_light)
brush.setColorAt(1, self._back_color_dark)
painter.setBrush(brush)
# define the pen
pen = QtGui.QPen(ICDisplayConfig.LinearSlideBoxColorLight)
pen.setWidth(1)
pen.setCapStyle(Qt.RoundCap)
pen.setJoinStyle(Qt.RoundJoin)
painter.setPen(pen)
# define the path and draw
path = QtGui.QPainterPath()
path.setFillRule(Qt.WindingFill)
path.addRoundedRect(rect, 10, 10)
painter.drawPath(path)
# leave here for frame only
if self.state == ICWidgetState.FrameOnly:
return
##################################################
# draw the gauge bar
##################################################
rect = QtCore.QRectF(bar_start_x, bar_start_y, bar_size_x, bar_size_y)
brush = QtGui.QLinearGradient(rect.topRight(), rect.bottomLeft())
# set the default color
brush.setColorAt(0, self._gauge_color_normal_light)
brush.setColorAt(1, self._gauge_color_normal_dark)
# check if the current value is below the minimum limit
if self._alarm_lower_level_set or self._alarm_upper_level_set:
if self.alarm_activated:
brush.setColorAt(0, self._gauge_color_alarm_light)
brush.setColorAt(1, self._gauge_color_alarm_dark)
# paint the gauge bar
path = QtGui.QPainterPath()
path.setFillRule(Qt.WindingFill)
path.addRoundedRect(rect, 9, 9)
painter.setBrush(brush)
pen.setWidth(1)
pen.setBrush(brush)
painter.setPen(pen)
painter.drawPath(path)
##################################################
# draw min max tracking
##################################################
pen = painter.pen()
pen.setColor(self._min_max_color)
pen.setWidth(4)
painter.setPen(pen)
if self._cycle_min_tracking:
painter.drawLine(min_point_one, min_point_two)
if self._cycle_max_tracking:
painter.drawLine(max_point_one, max_point_two)
##################################################
# draw target tracking
##################################################
pen = painter.pen()
pen.setColor(self._target_color)
pen.setWidth(4)
painter.setPen(pen)
if self._target_tracking:
painter.drawLine(target_point_one, target_point_two)
##################################################
# draw the limits.
##################################################
# setup the font and pen
fnt = painter.font()
fnt.setBold(True)
fnt.setPixelSize(self._alarm_text_size)
painter.setFont(fnt)
# set up the pen
pen = painter.pen()
pen.setColor(self._alarm_text_color)
pen.setWidth(4)
painter.setPen(pen)
# draw the lower level set point
if self._alarm_lower_level_set:
# draw the alarm level
painter.drawLine(lower_alarm_point_one, lower_alarm_point_two)
# setup the pen for writing the alarm text
pen.setWidth(1)
painter.setPen(pen)
# draw the alarm text
painter.drawText(lower_alarm_text_rect, lower_alarm_text_align, self._alarm_lower_level_text)
# draw the upper level set point
pen.setWidth(4)
painter.setPen(pen)
if self._alarm_upper_level_set:
# draw the alarm level
painter.drawLine(upper_alarm_point_one, upper_alarm_point_two)
# setup the pen for writing the alarm text
pen.setWidth(1)
painter.setPen(pen)
# draw the alarm text
painter.drawText(upper_alarm_text_rect, upper_alarm_text_align, self._alarm_upper_level_text)
class ICLinearGauge(ICLinearAxisContainer):
"""
Compound widget with a Gauge Bar and label for displaying the plotted value
"""
def __init__(self, name: str, unit: str, min_val: float = 0, max_val: float = 100, display_steps: int = 5, show_title: bool = True, show_value: bool = True,
position: ICWidgetPosition = ICWidgetPosition.Left, widget_id: int = 0, *args, **kwargs):
if (not show_value) and (not show_value):
cont_type = ICLinearContainerType.BAR_NO_TITLE_NO_VALUE
elif not show_value:
cont_type = ICLinearContainerType.BAR_NO_VALUE
elif not show_title:
cont_type = ICLinearContainerType.BAR_NO_TITLE
else:
cont_type = ICLinearContainerType.BAR
super(ICLinearGauge, self).__init__(cont_type, widget_id=widget_id, *args, **kwargs)
curr_value = 0.5 * (min_val + max_val)
# create the gauge Bar
self.gauge_bar = ICGaugeBar(min_val, max_val, curr_value, position, widget_id)
self.gauge_bar.changed[float].connect(self.value_changed)
self.add_central_widget(self.gauge_bar)
# initialise the local variables
self.title = name
self.value = curr_value
self.unit = unit
# number of steps for drawing ticks in the gauge bar
self._display_steps: int = display_steps
# selected values and displayed values for the scale
self._scale_values: list[float] = []
self._scale_displayed_values: list[str] = []
# create the display lists
self._scale_values, self._scale_displayed_values = ICLinearAxis.create_ticks(max_val, min_val, display_steps, "{0:.0f}")
# add the scale bar
self.add_first_scale_bar(name, self._scale_values, self._scale_displayed_values, ICWidgetPosition.opposite(position))
self.vertical_gauge_width = ICDisplayConfig.LinearGaugeVerticalMaxWidth
self.horizontal_gauge_height = ICDisplayConfig.LinearGaugeHorizontalMaxHeight
# override the base Size policy
self.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
# call layout update to specify size
self.on_layout_update()
########################################################
# properties
########################################################
########################################################
# functions
########################################################
# override the default show event
def showEvent(self, e):
self.on_layout_update()
########################################################
# slots
########################################################
# handles the signal for value update
# @pyqtSlot(float)
def update_upper_alarm_level(self, new_level: float) -> None:
nm, old_level = self.gauge_bar.upper_alarm
self.gauge_bar.upper_alarm = (nm, new_level)
# @pyqtSlot(float)
def update_lower_alarm_level(self, new_level: float) -> None:
nm, old_level = self.gauge_bar.lower_alarm
self.gauge_bar.lower_alarm = (nm, new_level)
########################################################
# base class event overrides
########################################################
# change layout based on the orientation
def on_layout_update(self) -> None:
gauge_width = self.gauge_bar.estimate_max_gauge_width()
if self.scale_bar_one is not None:
scale_width = self.scale_bar_one.estimate_max_scale_width()
if self.position.is_horizontal():
self.size_hint = (ICDisplayConfig.LinearGaugeHorizontalWidth, ICDisplayConfig.LinearGaugeHorizontalMaxHeight)
self.gauge_bar.size_hint = (ICDisplayConfig.LinearGaugeHorizontalWidth, gauge_width)
if self.scale_bar_one is not None:
self.scale_bar_one.size_hint = (ICDisplayConfig.LinearGaugeHorizontalWidth, scale_width)
else:
self.size_hint = (ICDisplayConfig.LinearGaugeVerticalMaxWidth, ICDisplayConfig.LinearGaugeVerticalHeight)
self.gauge_bar.size_hint = (gauge_width, ICDisplayConfig.LinearGaugeVerticalHeight)
if self.scale_bar_one is not None:
self.scale_bar_one.size_hint = (scale_width, ICDisplayConfig.LinearGaugeVerticalHeight)
def on_value_update(self, value: float) -> None:
self.gauge_bar.gauge_value = value
| # -*- coding: utf-8 -*-
"""
Created on May 19 2021
@author: Prosenjit
Custom Qt Widget to show a linear gauge with min-max. The following modifiable
attributes are exposed.
"""
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, pyqtSlot, pyqtSignal
from typing import Union
from .display_config import ICDisplayConfig
from .base_widget import ICBaseWidget, ICWidgetState, ICWidgetPosition
from .linear_axis import ICLinearAxis, ICLinearAxisContainer, ICLinearContainerType
class ICGaugeBar(ICBaseWidget):
"""
Class for a custom widget to draw a colored bar.
Length of the colored bar is proportional to the value.
"""
# bar state has changed signal. it can be current value or alarm status
changed = pyqtSignal(float)
def __init__(self, min_val: float, max_val: float, curr_val: float, position: ICWidgetPosition = ICWidgetPosition.Bottom,
widget_id: int = 0, *args, **kwargs):
super(ICGaugeBar, self).__init__(widget_id, *args, **kwargs)
# minimum and maximum value of the gauge bar
self._gauge_range_min: float = min_val
self._gauge_range_max: float = max_val
# current value of the gauge
self._gauge_val: float = curr_val
# has the current value lead to an alarm
self.alarm_activated = False
# upper alarm level for the gauge
self._alarm_upper_level: float = max_val
self._alarm_upper_level_text: str = "UL"
self._alarm_upper_level_set: bool = False
# lower alarm level for the gauge
self._alarm_lower_level: float = min_val
self._alarm_lower_level_text: str = "LL"
self._alarm_lower_level_set: bool = False
# max level tracking
self._cycle_max: float = curr_val
self._cycle_max_tracking: bool = False
# min level tracking
self._cycle_min: float = curr_val
self._cycle_min_tracking: bool = False
# target tracking
self._target_value: float = curr_val
self._target_tracking: bool = False
# gauge width
self._gauge_width: int = ICDisplayConfig.LinearGaugeWidth
# background colors
self._back_color_light: QtGui.QColor = ICDisplayConfig.LinearGaugeBoxColorLight
self._back_color_dark: QtGui.QColor = ICDisplayConfig.LinearGaugeBoxColorDark
# gauge colors normal
self._gauge_color_normal_light: QtGui.QColor = ICDisplayConfig.LinearGaugeNormalLight
self._gauge_color_normal_dark: QtGui.QColor = ICDisplayConfig.LinearGaugeNormalDark
# gauge colors alarmed
self._gauge_color_alarm_light: QtGui.QColor = ICDisplayConfig.LinearGaugeErrorLight
self._gauge_color_alarm_dark: QtGui.QColor = ICDisplayConfig.LinearGaugeErrorDark
# alarm level text size and color
self._alarm_text_size: int = ICDisplayConfig.LabelTextSize
self._alarm_text_color: QtGui.QColor = ICDisplayConfig.LinearGaugeLimitsColor
# min max line color
self._min_max_color: QtGui.QColor = ICDisplayConfig.LinearGaugeMinMaxColor
# target color
self._target_color: QtGui.QColor = ICDisplayConfig.LinearGaugeTargetColor
# sets the click-ability and focus-ability of the button
self.clickable = True
self.focusable = False
# set the position of the gauge.
self.position = position
# override the base Size policy
self.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
########################################################
# properties
########################################################
# get the minimum limit of the gauge bar
@property
def gauge_range_min(self) -> float:
return self._gauge_range_min
# set the minimum limit of the gauge bar
@gauge_range_min.setter
def gauge_range_min(self, min_val: float) -> None:
self._gauge_range_min = min_val
self.update()
# get the maximum limit of the gauge bar
@property
def gauge_range_max(self) -> float:
return self._gauge_range_max
# set the minimum limit of the gauge bar
@gauge_range_max.setter
def gauge_range_max(self, max_val: float) -> None:
self._gauge_range_max = max_val
self.update()
# get the current value
@property
def gauge_value(self) -> float:
return self._gauge_val
# set the current value
@gauge_value.setter
def gauge_value(self, val: float) -> None:
if self._gauge_val != val:
# limit gauge value to the min and max range
if val < self._gauge_range_min:
self._gauge_val = self._gauge_range_min
elif val > self._gauge_range_max:
self._gauge_val = self._gauge_range_max
else:
self._gauge_val = val
# update the min value
if self._cycle_min_tracking:
if val < self._cycle_min:
self._cycle_min = val
# update the max value
if self._cycle_max_tracking:
if val > self._cycle_max:
self._cycle_max = val
# reset the alarm before testing
self.alarm_activated = False
# check for too low alarm
if self._alarm_lower_level_set:
if val < self._alarm_lower_level:
self.alarm_activated = True
# check for too high alarm
if self._alarm_upper_level_set:
if val > self._alarm_upper_level:
self.alarm_activated = True
self.changed.emit(val)
self.update()
# get the upper level alarm
# tuple of (name, value)
@property
def upper_alarm(self) -> Union[tuple[str, float], tuple[None, None]]:
if self._alarm_upper_level_set:
return self._alarm_upper_level_text, self._alarm_upper_level
else:
return None, None
# set the upper level alarm
@upper_alarm.setter
def upper_alarm(self, alarm: tuple[str, float]) -> None:
# check if upper alarm level is greater than the lower alarm level
if self._alarm_lower_level_set:
if alarm[1] < self._alarm_lower_level:
return
# check if the limit value is in between the max and min values
if self._gauge_range_min <= alarm[1] <= self._gauge_range_max:
self._alarm_upper_level_set = True
self._alarm_upper_level_text = alarm[0]
self._alarm_upper_level = alarm[1]
# check for alarm level
if self._gauge_val > self._alarm_upper_level:
self.alarm_activated = True
self.changed.emit(self._gauge_val)
self.update()
# get the lower level alarm
# tuple of (name, value)
@property
def lower_alarm(self) -> Union[tuple[str, float], tuple[None, None]]:
if self._alarm_lower_level_set:
return self._alarm_lower_level_text, self._alarm_lower_level
else:
return None, None
# set the upper level alarm
@lower_alarm.setter
def lower_alarm(self, alarm: tuple[str, float]) -> None:
# check if lower alarm level is less the upper alarm level
if self._alarm_upper_level_set:
if alarm[1] > self._alarm_upper_level:
return
# check if the limit value is in between the max and min values
if self._gauge_range_min <= alarm[1] <= self._gauge_range_max:
self._alarm_lower_level_set = True
self._alarm_lower_level_text = alarm[0]
self._alarm_lower_level = alarm[1]
# check if alarm is active
if self._gauge_val < self._alarm_lower_level:
self.alarm_activated = True
self.changed.emit(self._gauge_val)
self.update()
@property
def target_value(self) -> Union[float, None]:
if self._target_tracking:
return self._target_value
return None
@target_value.setter
def target_value(self, val: float) -> None:
self._target_tracking = True
self._target_value = val
self.update()
# gauge width
@property
def gauge_width(self) -> int:
return self._gauge_width
@gauge_width.setter
def gauge_width(self, wd: int) -> None:
self._gauge_width = wd
self.update()
# get the background container color of the bar
@property
def container_colors(self) -> tuple[QtGui.QColor, QtGui.QColor]:
return self._back_color_light, self._back_color_dark
# set the background color of the bar
@container_colors.setter
def container_colors(self, clrs: tuple[QtGui.QColor, QtGui.QColor]) -> None:
self._back_color_light = clrs[0]
self._back_color_dark = clrs[1]
self.update()
# get the normal gauge color
@property
def gauge_color_normal(self) -> tuple[QtGui.QColor, QtGui.QColor]:
return self._gauge_color_normal_light, self._gauge_color_normal_dark
# set the normal gauge color
@gauge_color_normal.setter
def gauge_color_normal(self, clr: tuple[QtGui.QColor, QtGui.QColor]) -> None:
self._gauge_color_normal_light = clr[0]
self._gauge_color_normal_dark = clr[1]
self.update()
# get the alarm gauge color
@property
def gauge_color_alarm(self) -> tuple[QtGui.QColor, QtGui.QColor]:
return self._gauge_color_alarm_light, self._gauge_color_alarm_dark
# set the normal gauge color
@gauge_color_alarm.setter
def gauge_color_alarm(self, clr: tuple[QtGui.QColor, QtGui.QColor]) -> None:
self._gauge_color_alarm_light = clr[0]
self._gauge_color_alarm_dark = clr[1]
self.update()
# get the alarm level text size
@property
def alarm_level_text_size(self) -> int:
return self._alarm_text_size
# set the alarm level text size
@alarm_level_text_size.setter
def alarm_level_text_size(self, sz: int) -> None:
self._alarm_text_size = sz
# get the alarm level text color
@property
def alarm_level_text_color(self) -> QtGui.QColor:
return self._alarm_text_color
# set the alarm level text color
@alarm_level_text_color.setter
def alarm_level_text_color(self, clr: QtGui.QColor) -> None:
self._alarm_text_color = clr
# min max color
@property
def min_max_color(self) -> QtGui.QColor:
return self._min_max_color
@min_max_color.setter
def min_max_color(self, clr: QtGui.QColor) -> None:
self._min_max_color = clr
self.update()
# target color
@property
def target_color(self) -> QtGui.QColor:
return self._target_color
@target_color.setter
def target_color(self, clr: QtGui.QColor) -> None:
self._target_color = clr
self.update()
########################################################
# functions
########################################################
# start the cycle max tracking
def start_max_tracking(self) -> None:
self._cycle_max_tracking = True
self._cycle_max = self._gauge_val
# reset the cycle for max tracking
def reset_max_tracking(self) -> None:
self._cycle_max = self._gauge_val
# stop the cycle max tracking
def stop_max_tracking(self) -> None:
self._cycle_max_tracking = False
# start the cycle max tracking
def start_min_tracking(self) -> None:
self._cycle_min_tracking = True
self._cycle_min = self._gauge_val
# reset the cycle for max tracking
def reset_min_tracking(self) -> None:
self._cycle_min = self._gauge_val
# stop the cycle max tracking
def stop_min_tracking(self) -> None:
self._cycle_min_tracking = False
# estimate max width
def estimate_max_gauge_width(self) -> int:
# max width is dependent on the orientation of the widget
if self.position.is_horizontal():
return self._gauge_width + 15 + self._alarm_text_size
else:
# setup the font
painter = QtGui.QPainter(self)
fnt = painter.font()
fnt.setPixelSize(self._alarm_text_size)
fnt.setBold(True)
# create the font matrices
font_matrices = QtGui.QFontMetrics(fnt)
width_lower = font_matrices.horizontalAdvance(self._alarm_lower_level_text)
width_upper = font_matrices.horizontalAdvance(self._alarm_upper_level_text)
text_width = width_upper if width_upper > width_lower else width_lower
return self._gauge_width + 10 + text_width
########################################################
# base class event overrides
########################################################
# TODO: mouse click plots the history
def on_mouse_released(self, event: QtGui.QMouseEvent) -> None:
pass
#######################################################
# overrides and event handlers
########################################################
# override the default paint event
def paintEvent(self, e):
# if hidden or transparent then nothing else to do
if self.state in (ICWidgetState.Hidden, ICWidgetState.Transparent):
return
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
# get the size of the containing widget
bar_width = painter.device().width()
bar_height = painter.device().height()
##########################################
# calculate dimensions
##########################################
if self.position.is_horizontal():
##################################################
# horizontal configurations
##################################################
gauge_start_x = 0
gauge_size_x = bar_width
gauge_size_y = self._gauge_width
# bar position
bar_start_x = 2
bar_size_x = (gauge_size_x - 4) * (self._gauge_val - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
bar_size_y = gauge_size_y - 4
# alarm levels
if self._alarm_lower_level_set:
lower_alarm_pos_x = (gauge_size_x - 4) * (self._alarm_lower_level - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
# calculate the text position
text_width = bar_width / 3
lower_alarm_text_start_x = lower_alarm_pos_x - bar_width / 6
lower_alarm_text_align = Qt.AlignCenter
if self._alarm_upper_level_set:
upper_alarm_pos_x = (gauge_size_x - 4) * (self._alarm_upper_level - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
# calculate the text position
text_width = bar_width / 3
upper_alarm_text_start_x = upper_alarm_pos_x - bar_width / 6
upper_alarm_text_align = Qt.AlignCenter
##################################################
# top & bottom specific calculations
##################################################
if self.position == ICWidgetPosition.Top:
##################################################
# Top
##################################################
gauge_start_y = bar_height - gauge_size_y
bar_start_y = gauge_start_y + 2
# min tracking
if self._cycle_min_tracking:
min_pos_x = (gauge_size_x - 4) * (self._cycle_min - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
min_point_one = QtCore.QPointF(min_pos_x, gauge_start_y + gauge_size_y)
min_point_two = QtCore.QPointF(min_pos_x, gauge_start_y - 5)
# max tracking
if self._cycle_max_tracking:
max_pos_x = (gauge_size_x - 4) * (self._cycle_max - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
max_point_one = QtCore.QPointF(max_pos_x, gauge_start_y + gauge_size_y)
max_point_two = QtCore.QPointF(max_pos_x, gauge_start_y - 5)
# target tracking
if self._target_tracking:
target_pos_x = (gauge_size_x - 4) * (self._target_value - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
target_point_one = QtCore.QPointF(target_pos_x, gauge_start_y + gauge_size_y)
target_point_two = QtCore.QPointF(target_pos_x, gauge_start_y - 5)
# lower alarm level
if self._alarm_lower_level_set:
lower_alarm_point_one = QtCore.QPointF(lower_alarm_pos_x, gauge_start_y + gauge_size_y)
lower_alarm_point_two = QtCore.QPointF(lower_alarm_pos_x, gauge_start_y - 5)
lower_alarm_text_rect = QtCore.QRectF(lower_alarm_text_start_x, gauge_start_y - 15 - self._alarm_text_size,
text_width, self._alarm_text_size + 5)
# upper alarm level
if self._alarm_upper_level_set:
upper_alarm_point_one = QtCore.QPointF(upper_alarm_pos_x, gauge_start_y + gauge_size_y)
upper_alarm_point_two = QtCore.QPointF(upper_alarm_pos_x, gauge_start_y - 5)
upper_alarm_text_rect = QtCore.QRectF(upper_alarm_text_start_x, gauge_start_y - 15 - self._alarm_text_size,
text_width, self._alarm_text_size + 5)
else:
##################################################
# Bottom
##################################################
gauge_start_y = 0
bar_start_y = 2
# min tracking
if self._cycle_min_tracking:
min_pos_x = (gauge_size_x - 4) * (self._cycle_min - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
min_point_one = QtCore.QPointF(min_pos_x, gauge_start_y + gauge_size_y + 5)
min_point_two = QtCore.QPointF(min_pos_x, gauge_start_y)
# max tracking
if self._cycle_max_tracking:
max_pos_x = (gauge_size_x - 4) * (self._cycle_max - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
max_point_one = QtCore.QPointF(max_pos_x, gauge_start_y + gauge_size_y + 5)
max_point_two = QtCore.QPointF(max_pos_x, gauge_start_y)
# target tracking
if self._target_tracking:
target_pos_x = (gauge_size_x - 4) * (self._target_value - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
target_point_one = QtCore.QPointF(target_pos_x, gauge_start_y + gauge_size_y + 5)
target_point_two = QtCore.QPointF(target_pos_x, gauge_start_y)
# lower alarm level
if self._alarm_lower_level_set:
lower_alarm_point_one = QtCore.QPointF(lower_alarm_pos_x, gauge_start_y + gauge_size_y + 5)
lower_alarm_point_two = QtCore.QPointF(lower_alarm_pos_x, gauge_start_y)
lower_alarm_text_rect = QtCore.QRectF(lower_alarm_text_start_x, gauge_start_y + gauge_size_y + 10,
text_width, self._alarm_text_size + 5)
# upper alarm level
if self._alarm_upper_level_set:
upper_alarm_point_one = QtCore.QPointF(upper_alarm_pos_x, gauge_start_y + gauge_size_y + 5)
upper_alarm_point_two = QtCore.QPointF(upper_alarm_pos_x, gauge_start_y)
upper_alarm_text_rect = QtCore.QRectF(upper_alarm_text_start_x, gauge_start_y + gauge_size_y + 10,
text_width, self._alarm_text_size + 5)
else:
##################################################
# Vertical configurations
##################################################
gauge_start_y = 0
gauge_size_y = bar_height
gauge_size_x = self._gauge_width
# bar position
bar_size_y = (gauge_size_y - 4) * (self._gauge_val - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
bar_start_y = (gauge_size_y - 2) - bar_size_y
bar_size_x = gauge_size_x - 4
# alarm levels
if self._alarm_lower_level_set:
# calculate the position
lower_alarm_pos_y = (gauge_size_y - 4) * (self._alarm_lower_level - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
lower_alarm_pos_y = (gauge_size_y - 2) - lower_alarm_pos_y
# calculate where to write the text
lower_alarm_text_pos_y = lower_alarm_pos_y - 0.5 * self._alarm_text_size
if lower_alarm_text_pos_y < 0:
lower_alarm_text_pos_y = 0
if lower_alarm_text_pos_y + self._alarm_text_size + 5 > bar_height:
lower_alarm_text_pos_y = bar_height - self._alarm_text_size - 5
if self._alarm_upper_level_set:
upper_alarm_pos_y = (gauge_size_y - 4) * (self._alarm_upper_level - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
upper_alarm_pos_y = (gauge_size_y - 2) - lower_alarm_pos_y
# calculate where to write the text
upper_alarm_text_pos_y = upper_alarm_pos_y - 0.5 * self._alarm_text_size
if upper_alarm_text_pos_y < 0:
upper_alarm_text_pos_y = 0
if upper_alarm_text_pos_y + self._alarm_text_size + 5 > bar_height:
upper_alarm_text_pos_y = bar_height - self._alarm_text_size - 5
##################################################
# left and right specific calculations
##################################################
if self.position == ICWidgetPosition.Left:
##################################################
# Left
##################################################
gauge_start_x = bar_width - gauge_size_x
bar_start_x = gauge_start_x + 2
# min max positions
if self._cycle_min_tracking:
min_pos_y = (gauge_size_y - 4) * (self._cycle_min - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
min_pos_y = (gauge_size_y - 2) - min_pos_y
min_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, min_pos_y)
min_point_two = QtCore.QPointF(gauge_start_x - 5, min_pos_y)
if self._cycle_max_tracking:
max_pos_y = (gauge_size_y - 4) * (self._cycle_max - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
max_pos_y = (gauge_size_y - 2) - max_pos_y
max_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, max_pos_y)
max_point_two = QtCore.QPointF(gauge_start_x - 5, max_pos_y)
# target position
if self._target_tracking:
target_pos_y = (gauge_size_y - 4) * (self._target_value - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
target_pos_y = (gauge_size_y - 2) - target_pos_y
target_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, target_pos_y)
target_point_two = QtCore.QPointF(gauge_start_x - 5, target_pos_y)
# setup the alarm levels
if self._alarm_lower_level_set:
lower_alarm_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, lower_alarm_pos_y)
lower_alarm_point_two = QtCore.QPointF(gauge_start_x - 5, lower_alarm_pos_y)
lower_alarm_text_rect = QtCore.QRectF(0, lower_alarm_text_pos_y, gauge_start_x - 10, self._alarm_text_size + 5)
lower_alarm_text_align = Qt.AlignRight
if self._alarm_upper_level_set:
upper_alarm_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x, upper_alarm_pos_y)
upper_alarm_point_two = QtCore.QPointF(gauge_start_x - 5, upper_alarm_pos_y)
upper_alarm_text_rect = QtCore.QRectF(0, upper_alarm_text_pos_y, gauge_start_x - 10, self._alarm_text_size + 5)
upper_alarm_text_align = Qt.AlignRight
else:
##################################################
# Right
##################################################
gauge_start_x = 0
bar_start_x = 2
# min max positions
if self._cycle_min_tracking:
min_pos_y = (gauge_size_y - 4) * (self._cycle_min - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
min_pos_y = (gauge_size_y - 2) - min_pos_y
min_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, min_pos_y)
min_point_two = QtCore.QPointF(gauge_start_x, min_pos_y)
if self._cycle_max_tracking:
max_pos_y = (gauge_size_y - 4) * (self._cycle_max - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
max_pos_y = (gauge_size_y - 2) - max_pos_y
max_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, max_pos_y)
max_point_two = QtCore.QPointF(gauge_start_x, max_pos_y)
# target position
if self._target_tracking:
target_pos_y = (gauge_size_y - 4) * (self._target_value - self._gauge_range_min) / (self._gauge_range_max - self._gauge_range_min)
target_pos_y = (gauge_size_y - 2) - target_pos_y
target_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, target_pos_y)
target_point_two = QtCore.QPointF(gauge_start_x, target_pos_y)
# setup the alarm levels
if self._alarm_lower_level_set:
lower_alarm_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, lower_alarm_pos_y)
lower_alarm_point_two = QtCore.QPointF(gauge_start_x, lower_alarm_pos_y)
lower_alarm_text_rect = QtCore.QRectF(gauge_start_x + gauge_size_x + 10, lower_alarm_text_pos_y,
bar_width - gauge_size_x - 10, self._alarm_text_size + 5)
lower_alarm_text_align = Qt.AlignLeft
if self._alarm_upper_level_set:
upper_alarm_point_one = QtCore.QPointF(gauge_start_x + gauge_size_x + 5, upper_alarm_pos_y)
upper_alarm_point_two = QtCore.QPointF(gauge_start_x, upper_alarm_pos_y)
upper_alarm_text_rect = QtCore.QRectF(gauge_start_x + gauge_size_x + 10, upper_alarm_text_pos_y,
bar_width - gauge_size_x - 10, self._alarm_text_size + 5)
upper_alarm_text_align = Qt.AlignLeft
##################################################
# paint the main rectangle
##################################################
rect = QtCore.QRectF(gauge_start_x, gauge_start_y, gauge_size_x, gauge_size_y)
if self.position.is_horizontal():
brush = QtGui.QLinearGradient(rect.topRight(), rect.topLeft())
else:
brush = QtGui.QLinearGradient(rect.bottomLeft(), rect.topLeft())
# define the filling brush
brush.setColorAt(0, self._back_color_light)
brush.setColorAt(1, self._back_color_dark)
painter.setBrush(brush)
# define the pen
pen = QtGui.QPen(ICDisplayConfig.LinearSlideBoxColorLight)
pen.setWidth(1)
pen.setCapStyle(Qt.RoundCap)
pen.setJoinStyle(Qt.RoundJoin)
painter.setPen(pen)
# define the path and draw
path = QtGui.QPainterPath()
path.setFillRule(Qt.WindingFill)
path.addRoundedRect(rect, 10, 10)
painter.drawPath(path)
# leave here for frame only
if self.state == ICWidgetState.FrameOnly:
return
##################################################
# draw the gauge bar
##################################################
rect = QtCore.QRectF(bar_start_x, bar_start_y, bar_size_x, bar_size_y)
brush = QtGui.QLinearGradient(rect.topRight(), rect.bottomLeft())
# set the default color
brush.setColorAt(0, self._gauge_color_normal_light)
brush.setColorAt(1, self._gauge_color_normal_dark)
# check if the current value is below the minimum limit
if self._alarm_lower_level_set or self._alarm_upper_level_set:
if self.alarm_activated:
brush.setColorAt(0, self._gauge_color_alarm_light)
brush.setColorAt(1, self._gauge_color_alarm_dark)
# paint the gauge bar
path = QtGui.QPainterPath()
path.setFillRule(Qt.WindingFill)
path.addRoundedRect(rect, 9, 9)
painter.setBrush(brush)
pen.setWidth(1)
pen.setBrush(brush)
painter.setPen(pen)
painter.drawPath(path)
##################################################
# draw min max tracking
##################################################
pen = painter.pen()
pen.setColor(self._min_max_color)
pen.setWidth(4)
painter.setPen(pen)
if self._cycle_min_tracking:
painter.drawLine(min_point_one, min_point_two)
if self._cycle_max_tracking:
painter.drawLine(max_point_one, max_point_two)
##################################################
# draw target tracking
##################################################
pen = painter.pen()
pen.setColor(self._target_color)
pen.setWidth(4)
painter.setPen(pen)
if self._target_tracking:
painter.drawLine(target_point_one, target_point_two)
##################################################
# draw the limits.
##################################################
# setup the font and pen
fnt = painter.font()
fnt.setBold(True)
fnt.setPixelSize(self._alarm_text_size)
painter.setFont(fnt)
# set up the pen
pen = painter.pen()
pen.setColor(self._alarm_text_color)
pen.setWidth(4)
painter.setPen(pen)
# draw the lower level set point
if self._alarm_lower_level_set:
# draw the alarm level
painter.drawLine(lower_alarm_point_one, lower_alarm_point_two)
# setup the pen for writing the alarm text
pen.setWidth(1)
painter.setPen(pen)
# draw the alarm text
painter.drawText(lower_alarm_text_rect, lower_alarm_text_align, self._alarm_lower_level_text)
# draw the upper level set point
pen.setWidth(4)
painter.setPen(pen)
if self._alarm_upper_level_set:
# draw the alarm level
painter.drawLine(upper_alarm_point_one, upper_alarm_point_two)
# setup the pen for writing the alarm text
pen.setWidth(1)
painter.setPen(pen)
# draw the alarm text
painter.drawText(upper_alarm_text_rect, upper_alarm_text_align, self._alarm_upper_level_text)
class ICLinearGauge(ICLinearAxisContainer):
"""
Compound widget with a Gauge Bar and label for displaying the plotted value
"""
def __init__(self, name: str, unit: str, min_val: float = 0, max_val: float = 100, display_steps: int = 5, show_title: bool = True, show_value: bool = True,
position: ICWidgetPosition = ICWidgetPosition.Left, widget_id: int = 0, *args, **kwargs):
if (not show_value) and (not show_value):
cont_type = ICLinearContainerType.BAR_NO_TITLE_NO_VALUE
elif not show_value:
cont_type = ICLinearContainerType.BAR_NO_VALUE
elif not show_title:
cont_type = ICLinearContainerType.BAR_NO_TITLE
else:
cont_type = ICLinearContainerType.BAR
super(ICLinearGauge, self).__init__(cont_type, widget_id=widget_id, *args, **kwargs)
curr_value = 0.5 * (min_val + max_val)
# create the gauge Bar
self.gauge_bar = ICGaugeBar(min_val, max_val, curr_value, position, widget_id)
self.gauge_bar.changed[float].connect(self.value_changed)
self.add_central_widget(self.gauge_bar)
# initialise the local variables
self.title = name
self.value = curr_value
self.unit = unit
# number of steps for drawing ticks in the gauge bar
self._display_steps: int = display_steps
# selected values and displayed values for the scale
self._scale_values: list[float] = []
self._scale_displayed_values: list[str] = []
# create the display lists
self._scale_values, self._scale_displayed_values = ICLinearAxis.create_ticks(max_val, min_val, display_steps, "{0:.0f}")
# add the scale bar
self.add_first_scale_bar(name, self._scale_values, self._scale_displayed_values, ICWidgetPosition.opposite(position))
self.vertical_gauge_width = ICDisplayConfig.LinearGaugeVerticalMaxWidth
self.horizontal_gauge_height = ICDisplayConfig.LinearGaugeHorizontalMaxHeight
# override the base Size policy
self.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
# call layout update to specify size
self.on_layout_update()
########################################################
# properties
########################################################
########################################################
# functions
########################################################
# override the default show event
def showEvent(self, e):
self.on_layout_update()
########################################################
# slots
########################################################
# handles the signal for value update
# @pyqtSlot(float)
def update_upper_alarm_level(self, new_level: float) -> None:
nm, old_level = self.gauge_bar.upper_alarm
self.gauge_bar.upper_alarm = (nm, new_level)
# @pyqtSlot(float)
def update_lower_alarm_level(self, new_level: float) -> None:
nm, old_level = self.gauge_bar.lower_alarm
self.gauge_bar.lower_alarm = (nm, new_level)
########################################################
# base class event overrides
########################################################
# change layout based on the orientation
def on_layout_update(self) -> None:
gauge_width = self.gauge_bar.estimate_max_gauge_width()
if self.scale_bar_one is not None:
scale_width = self.scale_bar_one.estimate_max_scale_width()
if self.position.is_horizontal():
self.size_hint = (ICDisplayConfig.LinearGaugeHorizontalWidth, ICDisplayConfig.LinearGaugeHorizontalMaxHeight)
self.gauge_bar.size_hint = (ICDisplayConfig.LinearGaugeHorizontalWidth, gauge_width)
if self.scale_bar_one is not None:
self.scale_bar_one.size_hint = (ICDisplayConfig.LinearGaugeHorizontalWidth, scale_width)
else:
self.size_hint = (ICDisplayConfig.LinearGaugeVerticalMaxWidth, ICDisplayConfig.LinearGaugeVerticalHeight)
self.gauge_bar.size_hint = (gauge_width, ICDisplayConfig.LinearGaugeVerticalHeight)
if self.scale_bar_one is not None:
self.scale_bar_one.size_hint = (scale_width, ICDisplayConfig.LinearGaugeVerticalHeight)
def on_value_update(self, value: float) -> None:
self.gauge_bar.gauge_value = value | de | 0.326143 | # -*- coding: utf-8 -*- Created on May 19 2021
@author: Prosenjit
Custom Qt Widget to show a linear gauge with min-max. The following modifiable
attributes are exposed. Class for a custom widget to draw a colored bar.
Length of the colored bar is proportional to the value. # bar state has changed signal. it can be current value or alarm status # minimum and maximum value of the gauge bar # current value of the gauge # has the current value lead to an alarm # upper alarm level for the gauge # lower alarm level for the gauge # max level tracking # min level tracking # target tracking # gauge width # background colors # gauge colors normal # gauge colors alarmed # alarm level text size and color # min max line color # target color # sets the click-ability and focus-ability of the button # set the position of the gauge. # override the base Size policy ######################################################## # properties ######################################################## # get the minimum limit of the gauge bar # set the minimum limit of the gauge bar # get the maximum limit of the gauge bar # set the minimum limit of the gauge bar # get the current value # set the current value # limit gauge value to the min and max range # update the min value # update the max value # reset the alarm before testing # check for too low alarm # check for too high alarm # get the upper level alarm # tuple of (name, value) # set the upper level alarm # check if upper alarm level is greater than the lower alarm level # check if the limit value is in between the max and min values # check for alarm level # get the lower level alarm # tuple of (name, value) # set the upper level alarm # check if lower alarm level is less the upper alarm level # check if the limit value is in between the max and min values # check if alarm is active # gauge width # get the background container color of the bar # set the background color of the bar # get the normal gauge color # set the normal gauge color # get the alarm gauge color # set the normal gauge color # get the alarm level text size # set the alarm level text size # get the alarm level text color # set the alarm level text color # min max color # target color ######################################################## # functions ######################################################## # start the cycle max tracking # reset the cycle for max tracking # stop the cycle max tracking # start the cycle max tracking # reset the cycle for max tracking # stop the cycle max tracking # estimate max width # max width is dependent on the orientation of the widget # setup the font # create the font matrices ######################################################## # base class event overrides ######################################################## # TODO: mouse click plots the history ####################################################### # overrides and event handlers ######################################################## # override the default paint event # if hidden or transparent then nothing else to do # get the size of the containing widget ########################################## # calculate dimensions ########################################## ################################################## # horizontal configurations ################################################## # bar position # alarm levels # calculate the text position # calculate the text position ################################################## # top & bottom specific calculations ################################################## ################################################## # Top ################################################## # min tracking # max tracking # target tracking # lower alarm level # upper alarm level ################################################## # Bottom ################################################## # min tracking # max tracking # target tracking # lower alarm level # upper alarm level ################################################## # Vertical configurations ################################################## # bar position # alarm levels # calculate the position # calculate where to write the text # calculate where to write the text ################################################## # left and right specific calculations ################################################## ################################################## # Left ################################################## # min max positions # target position # setup the alarm levels ################################################## # Right ################################################## # min max positions # target position # setup the alarm levels ################################################## # paint the main rectangle ################################################## # define the filling brush # define the pen # define the path and draw # leave here for frame only ################################################## # draw the gauge bar ################################################## # set the default color # check if the current value is below the minimum limit # paint the gauge bar ################################################## # draw min max tracking ################################################## ################################################## # draw target tracking ################################################## ################################################## # draw the limits. ################################################## # setup the font and pen # set up the pen # draw the lower level set point # draw the alarm level # setup the pen for writing the alarm text # draw the alarm text # draw the upper level set point # draw the alarm level # setup the pen for writing the alarm text # draw the alarm text Compound widget with a Gauge Bar and label for displaying the plotted value # create the gauge Bar # initialise the local variables # number of steps for drawing ticks in the gauge bar # selected values and displayed values for the scale # create the display lists # add the scale bar # override the base Size policy # call layout update to specify size ######################################################## # properties ######################################################## ######################################################## # functions ######################################################## # override the default show event ######################################################## # slots ######################################################## # handles the signal for value update # @pyqtSlot(float) # @pyqtSlot(float) ######################################################## # base class event overrides ######################################################## # change layout based on the orientation | 3.018632 | 3 |
app/test/test_models.py | ttphan/BeerPython | 0 | 6618390 | import pytest
from sqlalchemy import create_engine
import db
from model import model
from model.model import *
from db import Session, sessionScope
from sqlalchemy.exc import IntegrityError
import bcrypt
import pdb
import inspect, os
@pytest.yield_fixture(scope="module")
def connection():
# in-memory sqlite database
engine = create_engine('sqlite://')
# Create tables
model.Base.metadata.create_all(engine)
# Establish connection, reconfigure session to use the test db
connection = engine.connect()
db.Session.configure(bind = connection)
model.Base.metadata.bind = engine
yield connection
# Teardown
model.Base.metadata.drop_all()
@pytest.yield_fixture
def db_session(connection):
transaction = connection.begin()
with sessionScope() as session:
# Add default tally type 'beer'
session.add(TallyType(label = 'Beer'))
session.commit()
yield session
# Teardown
transaction.rollback()
session.close()
def test_db_sanity_check(db_session):
db_session.add(Member(name = 'foobar'))
assert db_session.query(Member).count() == 1
def test_db_sanity_check_rollback(db_session):
assert db_session.query(Member).count() == 0
class TestTally:
def test_add_tally(self, db_session):
testList = List()
testMember1, testMember2 = Member(name = 'foo'), Member(name = 'bar')
db_session.add(testList)
db_session.commit()
# Add tally
testMember1.addTally(db_session)
assert testMember1.getTotalTallies(db_session) == 1
assert testMember2.getTotalTallies(db_session) == 0
assert db_session.query(Tally).count() == 1
assert db_session.query(Tally).get(1).listId == 1
# Add some more
testMember1.addTally(db_session)
testMember2.addTally(db_session)
assert testMember1.getTotalTallies(db_session) == 2
assert testMember2.getTotalTallies(db_session) == 1
assert db_session.query(Tally).count() == 3
def test_add_different_tally_types(self, db_session):
db_session.add(TallyType(label = 'Cola'))
db_session.add(List())
testMember = Member(name = 'foo')
db_session.commit()
assert db_session.query(TallyType).count() == 2
# Tally beer (default is beer)
testMember.addTally(db_session)
db_session.commit()
assert db_session.query(Tally).get(1).tallyType.label == 'Beer'
# Tally cola and beer
testMember.addTally(db_session, 1, 'Cola')
testMember.addTally(db_session)
db_session.commit()
assert db_session.query(Tally).get(2).tallyType.label == 'Cola'
assert db_session.query(Tally).get(1).tallyType.label == 'Beer'
# Raise IntegrityError, non-existant tally type
testMember.addTally(db_session, 1, 'Chips')
with pytest.raises(IntegrityError):
db_session.commit()
db_session.rollback()
class TestList:
def test_tally_list_dependency_exception(self, db_session):
Member(name = 'foo').addTally(db_session)
with pytest.raises(IntegrityError):
db_session.commit()
db_session.rollback()
def test_add_tally_to_list(self, db_session):
testList = List()
testMember = Member(name = 'foo')
# Add list to active session, sanity check
db_session.add(testList)
db_session.commit()
assert db_session.query(List).count() == 1
# Add tallies to newest list
testMember.addTally(db_session, 2)
assert len(db_session.query(List).get(1).tallies) == 2
# Create new list, should add new tallies to new list
newList = List()
db_session.add(newList)
db_session.commit()
assert db_session.query(List).count() == 2
testMember.addTally(db_session, 3)
assert testMember.getTotalTallies(db_session, 1) == 2
assert testMember.getTotalTallies(db_session) == 3
class TestRoom:
def test_member_room(self, db_session):
testRoom1, testRoom2, testRoom3 = Room(), Room(), Room()
testMember1, testMember2 = Member(name = 'foo'), Member(name = 'bar')
db_session.add(testRoom1)
db_session.add(testRoom2)
db_session.add(testRoom3)
db_session.add(testMember1)
db_session.add(testMember2)
db_session.commit()
# Rooms are empty, members do not have a room, sanity check
assert testRoom1.member == None
assert testRoom2.member == None
assert testRoom3.member == None
assert testMember1.room == None
assert testMember2.room == None
assert db_session.query(Member).filter(Member.room != None).count() == 0
# Populate the rooms
testRoom1.member = testMember1
testMember2.room = testRoom3
assert testRoom1.member == testMember1
assert testRoom3.member == testMember2
assert db_session.query(Member).filter(Member.room != None).count() == 2
# One of the members moves to a different room
testMember1.room = testRoom2
assert testRoom1.member == None
assert testRoom2.member == testMember1
# One of the members leaves the dormitory
testMember2.room = None
assert testMember2.room == None
assert testRoom3.member == None
assert db_session.query(Member).filter(Member.room != None).count() == 1
class testPassword:
def test_password_verification(self, db_session):
db_session.add(Password('<PASSWORD>'))
db_session.commit()
hashed = db_session.query(Password).one().password
assert bcrypt.hashpw('foo', hashed) == hashed
assert bcrypt.hashpw('bar', hashed) != hashed
| import pytest
from sqlalchemy import create_engine
import db
from model import model
from model.model import *
from db import Session, sessionScope
from sqlalchemy.exc import IntegrityError
import bcrypt
import pdb
import inspect, os
@pytest.yield_fixture(scope="module")
def connection():
# in-memory sqlite database
engine = create_engine('sqlite://')
# Create tables
model.Base.metadata.create_all(engine)
# Establish connection, reconfigure session to use the test db
connection = engine.connect()
db.Session.configure(bind = connection)
model.Base.metadata.bind = engine
yield connection
# Teardown
model.Base.metadata.drop_all()
@pytest.yield_fixture
def db_session(connection):
transaction = connection.begin()
with sessionScope() as session:
# Add default tally type 'beer'
session.add(TallyType(label = 'Beer'))
session.commit()
yield session
# Teardown
transaction.rollback()
session.close()
def test_db_sanity_check(db_session):
db_session.add(Member(name = 'foobar'))
assert db_session.query(Member).count() == 1
def test_db_sanity_check_rollback(db_session):
assert db_session.query(Member).count() == 0
class TestTally:
def test_add_tally(self, db_session):
testList = List()
testMember1, testMember2 = Member(name = 'foo'), Member(name = 'bar')
db_session.add(testList)
db_session.commit()
# Add tally
testMember1.addTally(db_session)
assert testMember1.getTotalTallies(db_session) == 1
assert testMember2.getTotalTallies(db_session) == 0
assert db_session.query(Tally).count() == 1
assert db_session.query(Tally).get(1).listId == 1
# Add some more
testMember1.addTally(db_session)
testMember2.addTally(db_session)
assert testMember1.getTotalTallies(db_session) == 2
assert testMember2.getTotalTallies(db_session) == 1
assert db_session.query(Tally).count() == 3
def test_add_different_tally_types(self, db_session):
db_session.add(TallyType(label = 'Cola'))
db_session.add(List())
testMember = Member(name = 'foo')
db_session.commit()
assert db_session.query(TallyType).count() == 2
# Tally beer (default is beer)
testMember.addTally(db_session)
db_session.commit()
assert db_session.query(Tally).get(1).tallyType.label == 'Beer'
# Tally cola and beer
testMember.addTally(db_session, 1, 'Cola')
testMember.addTally(db_session)
db_session.commit()
assert db_session.query(Tally).get(2).tallyType.label == 'Cola'
assert db_session.query(Tally).get(1).tallyType.label == 'Beer'
# Raise IntegrityError, non-existant tally type
testMember.addTally(db_session, 1, 'Chips')
with pytest.raises(IntegrityError):
db_session.commit()
db_session.rollback()
class TestList:
def test_tally_list_dependency_exception(self, db_session):
Member(name = 'foo').addTally(db_session)
with pytest.raises(IntegrityError):
db_session.commit()
db_session.rollback()
def test_add_tally_to_list(self, db_session):
testList = List()
testMember = Member(name = 'foo')
# Add list to active session, sanity check
db_session.add(testList)
db_session.commit()
assert db_session.query(List).count() == 1
# Add tallies to newest list
testMember.addTally(db_session, 2)
assert len(db_session.query(List).get(1).tallies) == 2
# Create new list, should add new tallies to new list
newList = List()
db_session.add(newList)
db_session.commit()
assert db_session.query(List).count() == 2
testMember.addTally(db_session, 3)
assert testMember.getTotalTallies(db_session, 1) == 2
assert testMember.getTotalTallies(db_session) == 3
class TestRoom:
def test_member_room(self, db_session):
testRoom1, testRoom2, testRoom3 = Room(), Room(), Room()
testMember1, testMember2 = Member(name = 'foo'), Member(name = 'bar')
db_session.add(testRoom1)
db_session.add(testRoom2)
db_session.add(testRoom3)
db_session.add(testMember1)
db_session.add(testMember2)
db_session.commit()
# Rooms are empty, members do not have a room, sanity check
assert testRoom1.member == None
assert testRoom2.member == None
assert testRoom3.member == None
assert testMember1.room == None
assert testMember2.room == None
assert db_session.query(Member).filter(Member.room != None).count() == 0
# Populate the rooms
testRoom1.member = testMember1
testMember2.room = testRoom3
assert testRoom1.member == testMember1
assert testRoom3.member == testMember2
assert db_session.query(Member).filter(Member.room != None).count() == 2
# One of the members moves to a different room
testMember1.room = testRoom2
assert testRoom1.member == None
assert testRoom2.member == testMember1
# One of the members leaves the dormitory
testMember2.room = None
assert testMember2.room == None
assert testRoom3.member == None
assert db_session.query(Member).filter(Member.room != None).count() == 1
class testPassword:
def test_password_verification(self, db_session):
db_session.add(Password('<PASSWORD>'))
db_session.commit()
hashed = db_session.query(Password).one().password
assert bcrypt.hashpw('foo', hashed) == hashed
assert bcrypt.hashpw('bar', hashed) != hashed
| en | 0.794347 | # in-memory sqlite database # Create tables # Establish connection, reconfigure session to use the test db # Teardown # Add default tally type 'beer' # Teardown # Add tally # Add some more # Tally beer (default is beer) # Tally cola and beer # Raise IntegrityError, non-existant tally type # Add list to active session, sanity check # Add tallies to newest list # Create new list, should add new tallies to new list # Rooms are empty, members do not have a room, sanity check # Populate the rooms # One of the members moves to a different room # One of the members leaves the dormitory | 2.337734 | 2 |
vw/to_data_format.py | rjawor/tagging | 0 | 6618391 | <reponame>rjawor/tagging
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, re
text_pattern = re.compile(r'<ref>(.*?)<\/ref>\s*<dev>(.*?)\|<\/dev>')
printVerse = len(sys.argv) > 3 and sys.argv[3] == 'verse'
with open(sys.argv[1]) as input_file, open(sys.argv[2], 'w') as output_file:
for line in input_file:
match = text_pattern.search(line)
if match:
verse = match.group(1)
sentence_text = match.group(2)
in_cvb = 0
words = []
for word in sentence_text.split():
if word.startswith('<cvb>'):
word = word[5:]
in_cvb = 1
curr_cvb = in_cvb
if word.endswith('</cvb>'):
word = word[:-6]
in_cvb = 0
words.append(word+'_'+str(curr_cvb))
if printVerse:
output_file.write(verse+' ')
output_file.write(' '.join(words)+'\n')
else:
sys.stderr.write('Non matching line: '+line)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, re
text_pattern = re.compile(r'<ref>(.*?)<\/ref>\s*<dev>(.*?)\|<\/dev>')
printVerse = len(sys.argv) > 3 and sys.argv[3] == 'verse'
with open(sys.argv[1]) as input_file, open(sys.argv[2], 'w') as output_file:
for line in input_file:
match = text_pattern.search(line)
if match:
verse = match.group(1)
sentence_text = match.group(2)
in_cvb = 0
words = []
for word in sentence_text.split():
if word.startswith('<cvb>'):
word = word[5:]
in_cvb = 1
curr_cvb = in_cvb
if word.endswith('</cvb>'):
word = word[:-6]
in_cvb = 0
words.append(word+'_'+str(curr_cvb))
if printVerse:
output_file.write(verse+' ')
output_file.write(' '.join(words)+'\n')
else:
sys.stderr.write('Non matching line: '+line) | en | 0.44423 | #!/usr/bin/python # -*- coding: utf-8 -*- | 3.226442 | 3 |
florana/extract.py | josiest/Flora-Data-Extraction | 0 | 6618392 | <reponame>josiest/Flora-Data-Extraction<filename>florana/extract.py<gh_stars>0
import textract
import re
import json
import argparse
import os
import textwrap
import itertools
from pathlib import Path
from collections import OrderedDict
file_dir = Path(__file__).parent.absolute()
cwd = Path()
# TODO: Recursively iterate through a directory
# Data to extract:
# species name | states and provinces it appears in | classifier
def main():
# Build the command line argument parser
description = '''
Extract data from genus treatment pdfs of "Flora of North America
The csv ouptut files should have the following format:
<genus name>, <locations appeared in>, <classifier>
Example usage:
python -m florana.extract -A -o data.csv
'''
prog='python -m florana.extract'
fmt_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=fmt_class,
description=textwrap.dedent(description),
prog=prog)
parser.add_argument('-A', action='store_true',
help='parse all pdf files in the current directory')
parser.add_argument('filenames', metavar='F', nargs='*',
help='the treatment files to extract from')
parser.add_argument('-o', action='store',
help='specify a single output file (csv)')
success = True
args = parser.parse_args()
treatments = []
# The user specified to parse all pdf files in the directory
if args.A and not args.filenames:
treatments = [fn for fn in os.listdir() if '.pdf' in fn]
# The user specified the files manually
elif args.filenames:
treatments = args.filenames
else:
message = 'Please either specify filenames manually or use the '\
'"parse all" flag (-A).'
raise ValueError(message)
locations = ''
classifiers = ''
sep = ''
error = '' # Brief error message for program ouput to console
log_error = '' # Verbose error message for error.log
for treatment in treatments:
# name the csv file after the pdf input
match = re.match(r'([\w\.]+)\.pdf', treatment)
if not match:
print(f'"{treatment}" is not a pdf file!')
success = False
continue
fn = match[1]
# If the extracting algorithm couldn't find locations, keep track of
# the error messages
results = extract_from(treatment)
if results['error']:
success = False
error += sep+results['error']
log_error += sep+results['verbose-error']
# If the user specified a single output file, compile all the
# lines into a single string and write to a file later
if args.o:
locations += sep+results['locations']
classifiers += sep+results['classifiers']
# If the user didn't specify a single output file write the files
# for each treatment as we go
else:
with open(fn+'.csv', 'w') as f:
f.write(results['locations'])
with open(fn+'-classifiers.csv', 'w') as f:
f.write(results['classifiers'])
sep = '\n'
# if the user specified a single output file, now is when we write it
if args.o:
# locations file
fn = args.o
# classifiers file
idfn = ''
# The user may have alread include the file extension
try:
i = fn.index('.csv')
idfn = fn[:i]+'-classifiers'+fn[i:]
# If the user didn't include the file extension, add it
except ValueError:
fn += '.csv'
idfn = fn+'-classifiers.csv'
with open(fn, 'w') as f:
f.write(locations)
with open(idfn, 'w') as f:
f.write(classifiers)
if success:
print('Data was extracted successfully')
else:
print(error)
with open('error.log', 'wb') as f:
f.write(log_error.encode('utf8'))
print('An error occured when extracting the flora data. See ' \
'error.log for more details.')
def extract_from(treatment):
"""Extract the data from the genus treatment.
Parameters:
treatment - a pdf file name of the genus treatment.
data_type - "locations" or "classifiers"
Returns a dict of results with the following format
"locations" - a string of species names and locations they appear in
"classifiers" - a string of species names and their classifiers
"error" - a brief error message stating which species the algorithm
couldn't find locations for
"verbose-error" - an error message stating which species the algorithm
couldn't find locations for as well as the block of
text that the algorithm searched in for the locations
Raises a Value error if the genus isn't found in the treatment.
"""
text = load_treatment(treatment)
genus = genus_in(text)
if not genus:
raise ValueError("No genus was found!")
data = {'locations': '', 'classifiers': '',
'error': '', 'verbose-error': ''}
locsep = ''
errsep = ''
idsep = ''
for block, name in partition(text, genus):
ids = ids_in(block)
data['classifiers'] += f'{idsep}{name}, {ids}'
idsep = '\n'
locs = '\n'.join(f'{name}, {loc}' for loc in locs_in(block))
if not locs:
data['error'] += f"{errsep}Couldn't find locations for {name}"
data['verbose-error'] += f"{errsep}Couldn't find locations for " \
f"{name} in:\n\n{block}\n"
errsep = '\n'
else:
data['locations'] += locsep+locs
locsep = '\n'
return data
def load_treatment(fn, encoding='utf-8'):
""" Load the treatment using textract
Parameters:
fn - the file name of the treatment
encoding - the encoding of the file (defaults to utf-8)
"""
path = Path.joinpath(Path.cwd(), fn)
return textract.process(str(path), encoding=encoding).decode(encoding)
# regex patterns
# --- Genus pattern ---
#
# Assumes that the file contains the genus name in the following format:
#
# n. GENUS
#
# Where n is an arbitrary natural and GENUS is all-caps. GENUS doesn't
# necessarily end the line
genus_pattern = re.compile(r'^[ ]*\d+[a-z]*\.[ ]*([A-Z]+)\s+',
flags=re.MULTILINE)
def genus_in(treatment):
"""Return the genus name in the given treatment string.
If the genus couldn't be found, an empty string is returned.
"""
genus_match = genus_pattern.search(treatment)
# If the genus name couldn't be found, return an empty string
if not genus_match:
return ""
# Else, get the first match and de-"caps-lock" it
genus = genus_match[1]
return genus[0]+(genus[1:].lower())
def partition(treatment, genus):
"""Yield the block and name in treatment associated with each species*.
*Note that this includes subspecies.
treatment - the treatment text (a string)
species - a list of species names
"""
# Find all the species names in the treatment and reorder them in the order
# they appear in the text
name_gens = [keys_in(subgroup, genus) for subgroup in subgroups(treatment)]
names = sorted(itertools.chain(*name_gens),
key=lambda s: int(s.split('.')[0]))
# We want to remove the number before each name and also remove any
# duplicates while preserving order. OrderedDict can acheive this
names = (' '.join(name.split(' ')[1:3]).strip() for name in names)
names = OrderedDict.fromkeys(names).keys()
for block, name in species_blocks(treatment, names):
# each species block may have subspecies
has_subspecies = False
for sub_block, sub_name in subspecies_blocks(block, name):
has_subspecies = True
yield sub_block, sub_name
if not has_subspecies:
yield block, name
def subgroups(treatment):
"""Generate each subgroup block in order."""
# Find all occurences of genus headers
headers = list(genus_pattern.finditer(treatment))
i, j = 0, 0
# If there are subgroups, the first header is for the entire treatement and
# there's no species key before the header for the first subgroup, so take
# the first header out of the list
if len(headers) > 1:
headers = headers[1:]
for next_header in headers:
# Update j to the start of the current header: we're really yielding
# the previous match
j = next_header.start()
# If the block starts at index 0, then we haven't even reached the first
# subgroup block, so don't yield yet
if i > 0:
yield treatment[i:j]
# Update i to the start of the current header: on the next iteration
# it will become the start of the previous header and j will be the
# start of the current header.
i = j
# Once this is encountered, all info is irrelevant for this program
try:
k = treatment.lower().index("other reference")
except:
k = -1
if i > 0:
yield treatment[j:k]
# If there were no matches, then a genus couldn't be found
else:
raise ValueError("No genus was found!")
def keys_in(subgroup, genus):
"""Generate all species names from the species key in a subgroup block.
subgroup - the subgroup block containing the species
genus - of the species
"""
key_pattern = build_key_pattern(genus)
has_species_key = False
for match in key_pattern.finditer(subgroup):
has_species_key = True
yield match[0]
# it's possible that the text has no species key - this happens when
# there's only one species
if not has_species_key:
# Compile the intro pattern without knowing what the species is. Since
# there's only one species this is fine.
intro_pattern = build_intro_pattern(genus)
intro = intro_pattern.search(subgroup)
if not intro:
raise ValueError('No species found!')
else:
yield '1. '+' '.join(intro.groups())
def species_blocks(treatment, names):
"""Generate all species blocks* and names in treatment.
*Note that this includes all subspecies if any.
treatment - the treatment text
names - an ordered list of all species names that appear in the treatment
"""
error=''
i, j = 0, 0
# Split the whole text into blocks based on the introduction to each subsp.
for next_name in names:
# split the name up into its individual parts in order to pass once
# again into the intro_pattern builder, this time compiling to look
# for a specific species.
if len(next_name.split(' ')) > 2:
if error:
error += '\n'
error += f'"{next_name}" is too long: expected 2 words!'
continue
genus, species = next_name.split(' ')
intro_pattern = build_intro_pattern(genus, species=species)
intro = intro_pattern.search(treatment)
# Produce error message if species introduction couldn't be found
if not intro:
if error:
error += '\n'
error += f'Could not find species introduction for "{next_name}"'
continue
j = intro.start()
# If i > j, then something went wrong when we reordered the search
# results.
if i > j:
if error:
error += '\n'
error += f'When searching in {next_name}: Indices ({i}, {j}) are '\
'out of order!'
# If the block starts at index 0, then we haven't even reached the first
# species block, so don't yield yet
elif i > 0:
yield treatment[i:j], name
name = next_name
i = j
# Finally yield the "current" match (the last match).
try:
k = treatment.index("OTHER REFERENCES")
except ValueError:
k = -1
if i > 0:
yield treatment[j:k], name
if error:
error += "\nErrors occured while partitioning species blocks!"
raise ValueError(error)
def subspecies_blocks(block, species):
"""Generate all subspecies blocks in a species block, if any.
block - the species block to look in
species - the species name of the form "<genus> <species>"
"""
if len(species.split(' ')) > 2:
raise ValueError(f'"{species}" is too long: expected 2 words!')
genus, species = species.split(' ')
# Build the intro pattern to specifically look for subspecies
intro_pattern = build_intro_pattern(genus, species=species,
subspecies=r'[a-z]+')
error = ''
i, j = 0, 0
name = ''
# go through each subspecies introduction match
for intro in intro_pattern.finditer(block):
# Start
j = intro.start()
# Only yield the previous match when we've actually found it
if i > 0:
if i > j:
if error:
error += '\n'
error += f'When searching in "{name}" block: Indices ({i}, {j}'\
') are out of order!'
yield block[i:j], name
# The name should include the entire species, including the subspecies
# The intro pattern should have matched all of these.
name = ' '.join(intro.groups())
i = j
# It's possible that there are no subspecies. The intro pattern wouldn't
# have found anything and i would have never been incremented. If this is
# the case we don't want to yield anything, otherwise yield the rest of
# subspecies block until the end of the species block
if i > 0:
yield block[j:-1], name
if error:
error += "\nErrors occured when partitioning the treatment"
raise ValueError(error)
def build_key_pattern(genus):
"""Build a regex pattern for the genus key
Parameters:
genus - the genus of the file (a string)
The pattern has one subgroup: the genus and species name
"""
# --- Species name from index line ---
#
# Relies on the assumption that index lines have the following format
#
# n. <genus> <species> [(in part)]\n
#
# Where n is an arbitrary natural, genus is specified, species is a
# lowercase word and "(in part)" doesn't necessarily appear.
#
# The key pattern matches two subgroups:
# 1. The number that orders how the species appears in the text
# 2. The genus and species name
key_pattern = re.compile(r'(\d+)\.[ ]*('+genus+r' (?:x\\)?[a-z\-]+)'+
r'(?: \(in part\))?\s*\n', flags=re.MULTILINE)
return key_pattern
def build_intro_pattern(genus, species=r'(?:x\\)?[a-z\-]+', subspecies=''):
"""Build a regex pattern for a species introduction.
Paramters:
genus - of the species
species - specific species to look for (defaults to any)
subspecies - the subspecies to look for (defaults to empty string)
The regex pattern has three potenital subgroups.
1 - the genus name
2 - the species name
3 - the subspecies name (if specified)
"""
# --- Species Introduction ---
#
# Relies on the assumption that a species introduction is formatted as:
#
# n[a]*. Species name {arbitrary text} [(subsp|var). name] {arbitrary text}
#
# Where n is an arbitrary natural and a is an arbitrary alphabetical
# character.
# This will match the "n[a]*" part of the inroduction
pattern = r'^\d+'
# if the subspecies was specified, we know there must be alphabetical
# numbering on them
if subspecies:
pattern += r'[a-z]+'
if 'x\\' in species and '[a-z' not in species:
species = species.replace('x\\', 'x\\\\')
# This will now match the 'n[a]*. Species name' part of the introduction
pattern += r'\.[ ]*('+genus+') ('+species+')'
# if the subspecies was specified, we know there must be some descriptor
# followed by 'subsp.' and the subspecies name
#
# i.e. the '{arbitrary text} [(subsp|var). name] {arbitrary text}' part of
# the introduction is now matched
if subspecies:
pattern += r'.*?(?:subsp|var)\.\s*('+subspecies+')'
return re.compile(pattern, flags=re.MULTILINE|re.DOTALL)
# --- Finding classifiers ---
#
# Always terminates the line
# Always set off by spaces (never punctuation - before or after)
# If a common name (of the form "* Common name") appears, there will be
# text between the date and classifiers
# Otherwise it's possible to have a "(parenthetical statement)" between
# the date and the classifier, but usually not
# It's possible that there are no classifiers
id_pattern = re.compile(r'([CEFIW ]+)\s*$', re.MULTILINE)
def ids_in(block):
"""Finds the classifiers for a species.
Parameters:
block - a block of text (a string) with its scope limited to a single
species or subspecies
Returns an empty string if there are no classifiers for this species.
"""
error = ''
sep = ''
for line in block.split('\n'):
matches = id_pattern.findall(line)
# If matches were found return the last match (the pattern is meant to
# be searched from the end of the line)
if matches:
return matches[-1].strip()
# if no matches found, there are no classifiers; return an empty string
return ''
# --- Finding provinces ---
#
# abbreviations and full state names are listed in geography.txt and
# locations.txt so grab each of them
# I could just use a string, but I want to '|'.join(loc_names) so it'll be
# easier to '|' the two to gether
loc_names = []
for fn in ('geography.txt', 'locations.txt'):
path = Path.joinpath(file_dir, fn)
with open(path) as f:
s = f.read()
# these are special regex charaters, so escape them wherever they
# appear
for r in ('.', '(', ')'):
s = s.replace(r, '\\'+r)
# I want to '|' each province name, but since they have non-alphabetic
# characters I need to group each name w/o capturing, hence the (?:)
#
# Also cut off the last blank line
loc_names.extend('(?:'+m+')' for m in s.split('\n')[:-1])
# If one string is a substring of another, regex will match with whatever
# comes first in the pattern. We want to match the longest substrings possible
# so sort the location names by length
#
# Also replace spaces in each name with arbitrary whitespace
loc_names = sorted((loc.replace(' ', r'\s*') for loc in loc_names),
key=len, reverse=True)
# Assumes locations have the following format:
#
# {<beginning of line>, <;> or <,>} {location name (may include newlines)}{<;>,
# <,> or <end of line>}
loc_pattern_str = r'[^;,]\s*('+'|'.join(loc_names)+r')(?:[;,]|\s*?$|\s*?\n)'
loc_pattern = re.compile(loc_pattern_str, re.MULTILINE)
# --- Location Paragraph Pattern ---
#
# Assumes That locations that a species appears in meets the following format:
#
# 0{arbitrary white space}m; {locations on an abitrary number of lines where
# countries are separated by ';' and states/provinces are separated by ','}.\n
#
# The line doesn't necessarily begin at 0, but a line does end at '.\n'
loc_text_pattern = re.compile(r'0[\)\]]?\s+?m;.*?(?<!Nfld|Labr|..St)'+
r'\.\s*?(?:\n|$)', re.DOTALL|re.MULTILINE)
loc_exception_pattern = re.compile(r'(?:Flowering.*?;|introduced;)' \
r'.*?\.\s*?(?:\n|$)', re.DOTALL|re.MULTILINE)
# load the key which maps full state and province names to their abbreviations
key_fn = 'key.json'
key_path = Path.joinpath(file_dir, key_fn)
key = {}
with open(key_path) as f:
key = json.load(f)
def locs_in(block):
"""Generates the locations that a species appears in.
Parameters:
block - a block of text (a string) with its scope limited to a single
species or subspecies
"""
# First find the locations paragraph
loc_match = loc_text_pattern.search(block)
if not loc_match:
loc_match = loc_exception_pattern.search(block)
loc_text = ""
if loc_match:
loc_text = loc_match[0]
# find all states and provinces in the paragraph
locs = loc_pattern.findall(re.sub('[Bb]aja\s*[Cc]alifornia', '', loc_text))
# remove duplicates
#locs = {key[loc] if loc in key else loc for loc in matches}
for loc in locs:
# in replace all whitespace with a single space
loc = ' '.join(loc.split())
# convert full state and province names to their abbreviations
if loc in key:
loc = key[loc]
# Handle Nfld/Labr differentiation
# if specified, yield the relevant one
if '(Labr.)' in loc:
yield 'Labr.'
elif '(Nfld.)' in loc:
yield 'Nfld.'
# otherwise yield both if both
elif 'Nfld' in loc and 'Labr' in loc:
yield 'Nfld.'
yield 'Labr.'
# now that these cases have been handled, yield as usual
elif loc:
yield loc
if __name__ == '__main__':
main()
| import textract
import re
import json
import argparse
import os
import textwrap
import itertools
from pathlib import Path
from collections import OrderedDict
file_dir = Path(__file__).parent.absolute()
cwd = Path()
# TODO: Recursively iterate through a directory
# Data to extract:
# species name | states and provinces it appears in | classifier
def main():
# Build the command line argument parser
description = '''
Extract data from genus treatment pdfs of "Flora of North America
The csv ouptut files should have the following format:
<genus name>, <locations appeared in>, <classifier>
Example usage:
python -m florana.extract -A -o data.csv
'''
prog='python -m florana.extract'
fmt_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=fmt_class,
description=textwrap.dedent(description),
prog=prog)
parser.add_argument('-A', action='store_true',
help='parse all pdf files in the current directory')
parser.add_argument('filenames', metavar='F', nargs='*',
help='the treatment files to extract from')
parser.add_argument('-o', action='store',
help='specify a single output file (csv)')
success = True
args = parser.parse_args()
treatments = []
# The user specified to parse all pdf files in the directory
if args.A and not args.filenames:
treatments = [fn for fn in os.listdir() if '.pdf' in fn]
# The user specified the files manually
elif args.filenames:
treatments = args.filenames
else:
message = 'Please either specify filenames manually or use the '\
'"parse all" flag (-A).'
raise ValueError(message)
locations = ''
classifiers = ''
sep = ''
error = '' # Brief error message for program ouput to console
log_error = '' # Verbose error message for error.log
for treatment in treatments:
# name the csv file after the pdf input
match = re.match(r'([\w\.]+)\.pdf', treatment)
if not match:
print(f'"{treatment}" is not a pdf file!')
success = False
continue
fn = match[1]
# If the extracting algorithm couldn't find locations, keep track of
# the error messages
results = extract_from(treatment)
if results['error']:
success = False
error += sep+results['error']
log_error += sep+results['verbose-error']
# If the user specified a single output file, compile all the
# lines into a single string and write to a file later
if args.o:
locations += sep+results['locations']
classifiers += sep+results['classifiers']
# If the user didn't specify a single output file write the files
# for each treatment as we go
else:
with open(fn+'.csv', 'w') as f:
f.write(results['locations'])
with open(fn+'-classifiers.csv', 'w') as f:
f.write(results['classifiers'])
sep = '\n'
# if the user specified a single output file, now is when we write it
if args.o:
# locations file
fn = args.o
# classifiers file
idfn = ''
# The user may have alread include the file extension
try:
i = fn.index('.csv')
idfn = fn[:i]+'-classifiers'+fn[i:]
# If the user didn't include the file extension, add it
except ValueError:
fn += '.csv'
idfn = fn+'-classifiers.csv'
with open(fn, 'w') as f:
f.write(locations)
with open(idfn, 'w') as f:
f.write(classifiers)
if success:
print('Data was extracted successfully')
else:
print(error)
with open('error.log', 'wb') as f:
f.write(log_error.encode('utf8'))
print('An error occured when extracting the flora data. See ' \
'error.log for more details.')
def extract_from(treatment):
"""Extract the data from the genus treatment.
Parameters:
treatment - a pdf file name of the genus treatment.
data_type - "locations" or "classifiers"
Returns a dict of results with the following format
"locations" - a string of species names and locations they appear in
"classifiers" - a string of species names and their classifiers
"error" - a brief error message stating which species the algorithm
couldn't find locations for
"verbose-error" - an error message stating which species the algorithm
couldn't find locations for as well as the block of
text that the algorithm searched in for the locations
Raises a Value error if the genus isn't found in the treatment.
"""
text = load_treatment(treatment)
genus = genus_in(text)
if not genus:
raise ValueError("No genus was found!")
data = {'locations': '', 'classifiers': '',
'error': '', 'verbose-error': ''}
locsep = ''
errsep = ''
idsep = ''
for block, name in partition(text, genus):
ids = ids_in(block)
data['classifiers'] += f'{idsep}{name}, {ids}'
idsep = '\n'
locs = '\n'.join(f'{name}, {loc}' for loc in locs_in(block))
if not locs:
data['error'] += f"{errsep}Couldn't find locations for {name}"
data['verbose-error'] += f"{errsep}Couldn't find locations for " \
f"{name} in:\n\n{block}\n"
errsep = '\n'
else:
data['locations'] += locsep+locs
locsep = '\n'
return data
def load_treatment(fn, encoding='utf-8'):
""" Load the treatment using textract
Parameters:
fn - the file name of the treatment
encoding - the encoding of the file (defaults to utf-8)
"""
path = Path.joinpath(Path.cwd(), fn)
return textract.process(str(path), encoding=encoding).decode(encoding)
# regex patterns
# --- Genus pattern ---
#
# Assumes that the file contains the genus name in the following format:
#
# n. GENUS
#
# Where n is an arbitrary natural and GENUS is all-caps. GENUS doesn't
# necessarily end the line
genus_pattern = re.compile(r'^[ ]*\d+[a-z]*\.[ ]*([A-Z]+)\s+',
flags=re.MULTILINE)
def genus_in(treatment):
"""Return the genus name in the given treatment string.
If the genus couldn't be found, an empty string is returned.
"""
genus_match = genus_pattern.search(treatment)
# If the genus name couldn't be found, return an empty string
if not genus_match:
return ""
# Else, get the first match and de-"caps-lock" it
genus = genus_match[1]
return genus[0]+(genus[1:].lower())
def partition(treatment, genus):
"""Yield the block and name in treatment associated with each species*.
*Note that this includes subspecies.
treatment - the treatment text (a string)
species - a list of species names
"""
# Find all the species names in the treatment and reorder them in the order
# they appear in the text
name_gens = [keys_in(subgroup, genus) for subgroup in subgroups(treatment)]
names = sorted(itertools.chain(*name_gens),
key=lambda s: int(s.split('.')[0]))
# We want to remove the number before each name and also remove any
# duplicates while preserving order. OrderedDict can acheive this
names = (' '.join(name.split(' ')[1:3]).strip() for name in names)
names = OrderedDict.fromkeys(names).keys()
for block, name in species_blocks(treatment, names):
# each species block may have subspecies
has_subspecies = False
for sub_block, sub_name in subspecies_blocks(block, name):
has_subspecies = True
yield sub_block, sub_name
if not has_subspecies:
yield block, name
def subgroups(treatment):
"""Generate each subgroup block in order."""
# Find all occurences of genus headers
headers = list(genus_pattern.finditer(treatment))
i, j = 0, 0
# If there are subgroups, the first header is for the entire treatement and
# there's no species key before the header for the first subgroup, so take
# the first header out of the list
if len(headers) > 1:
headers = headers[1:]
for next_header in headers:
# Update j to the start of the current header: we're really yielding
# the previous match
j = next_header.start()
# If the block starts at index 0, then we haven't even reached the first
# subgroup block, so don't yield yet
if i > 0:
yield treatment[i:j]
# Update i to the start of the current header: on the next iteration
# it will become the start of the previous header and j will be the
# start of the current header.
i = j
# Once this is encountered, all info is irrelevant for this program
try:
k = treatment.lower().index("other reference")
except:
k = -1
if i > 0:
yield treatment[j:k]
# If there were no matches, then a genus couldn't be found
else:
raise ValueError("No genus was found!")
def keys_in(subgroup, genus):
"""Generate all species names from the species key in a subgroup block.
subgroup - the subgroup block containing the species
genus - of the species
"""
key_pattern = build_key_pattern(genus)
has_species_key = False
for match in key_pattern.finditer(subgroup):
has_species_key = True
yield match[0]
# it's possible that the text has no species key - this happens when
# there's only one species
if not has_species_key:
# Compile the intro pattern without knowing what the species is. Since
# there's only one species this is fine.
intro_pattern = build_intro_pattern(genus)
intro = intro_pattern.search(subgroup)
if not intro:
raise ValueError('No species found!')
else:
yield '1. '+' '.join(intro.groups())
def species_blocks(treatment, names):
"""Generate all species blocks* and names in treatment.
*Note that this includes all subspecies if any.
treatment - the treatment text
names - an ordered list of all species names that appear in the treatment
"""
error=''
i, j = 0, 0
# Split the whole text into blocks based on the introduction to each subsp.
for next_name in names:
# split the name up into its individual parts in order to pass once
# again into the intro_pattern builder, this time compiling to look
# for a specific species.
if len(next_name.split(' ')) > 2:
if error:
error += '\n'
error += f'"{next_name}" is too long: expected 2 words!'
continue
genus, species = next_name.split(' ')
intro_pattern = build_intro_pattern(genus, species=species)
intro = intro_pattern.search(treatment)
# Produce error message if species introduction couldn't be found
if not intro:
if error:
error += '\n'
error += f'Could not find species introduction for "{next_name}"'
continue
j = intro.start()
# If i > j, then something went wrong when we reordered the search
# results.
if i > j:
if error:
error += '\n'
error += f'When searching in {next_name}: Indices ({i}, {j}) are '\
'out of order!'
# If the block starts at index 0, then we haven't even reached the first
# species block, so don't yield yet
elif i > 0:
yield treatment[i:j], name
name = next_name
i = j
# Finally yield the "current" match (the last match).
try:
k = treatment.index("OTHER REFERENCES")
except ValueError:
k = -1
if i > 0:
yield treatment[j:k], name
if error:
error += "\nErrors occured while partitioning species blocks!"
raise ValueError(error)
def subspecies_blocks(block, species):
"""Generate all subspecies blocks in a species block, if any.
block - the species block to look in
species - the species name of the form "<genus> <species>"
"""
if len(species.split(' ')) > 2:
raise ValueError(f'"{species}" is too long: expected 2 words!')
genus, species = species.split(' ')
# Build the intro pattern to specifically look for subspecies
intro_pattern = build_intro_pattern(genus, species=species,
subspecies=r'[a-z]+')
error = ''
i, j = 0, 0
name = ''
# go through each subspecies introduction match
for intro in intro_pattern.finditer(block):
# Start
j = intro.start()
# Only yield the previous match when we've actually found it
if i > 0:
if i > j:
if error:
error += '\n'
error += f'When searching in "{name}" block: Indices ({i}, {j}'\
') are out of order!'
yield block[i:j], name
# The name should include the entire species, including the subspecies
# The intro pattern should have matched all of these.
name = ' '.join(intro.groups())
i = j
# It's possible that there are no subspecies. The intro pattern wouldn't
# have found anything and i would have never been incremented. If this is
# the case we don't want to yield anything, otherwise yield the rest of
# subspecies block until the end of the species block
if i > 0:
yield block[j:-1], name
if error:
error += "\nErrors occured when partitioning the treatment"
raise ValueError(error)
def build_key_pattern(genus):
"""Build a regex pattern for the genus key
Parameters:
genus - the genus of the file (a string)
The pattern has one subgroup: the genus and species name
"""
# --- Species name from index line ---
#
# Relies on the assumption that index lines have the following format
#
# n. <genus> <species> [(in part)]\n
#
# Where n is an arbitrary natural, genus is specified, species is a
# lowercase word and "(in part)" doesn't necessarily appear.
#
# The key pattern matches two subgroups:
# 1. The number that orders how the species appears in the text
# 2. The genus and species name
key_pattern = re.compile(r'(\d+)\.[ ]*('+genus+r' (?:x\\)?[a-z\-]+)'+
r'(?: \(in part\))?\s*\n', flags=re.MULTILINE)
return key_pattern
def build_intro_pattern(genus, species=r'(?:x\\)?[a-z\-]+', subspecies=''):
"""Build a regex pattern for a species introduction.
Paramters:
genus - of the species
species - specific species to look for (defaults to any)
subspecies - the subspecies to look for (defaults to empty string)
The regex pattern has three potenital subgroups.
1 - the genus name
2 - the species name
3 - the subspecies name (if specified)
"""
# --- Species Introduction ---
#
# Relies on the assumption that a species introduction is formatted as:
#
# n[a]*. Species name {arbitrary text} [(subsp|var). name] {arbitrary text}
#
# Where n is an arbitrary natural and a is an arbitrary alphabetical
# character.
# This will match the "n[a]*" part of the inroduction
pattern = r'^\d+'
# if the subspecies was specified, we know there must be alphabetical
# numbering on them
if subspecies:
pattern += r'[a-z]+'
if 'x\\' in species and '[a-z' not in species:
species = species.replace('x\\', 'x\\\\')
# This will now match the 'n[a]*. Species name' part of the introduction
pattern += r'\.[ ]*('+genus+') ('+species+')'
# if the subspecies was specified, we know there must be some descriptor
# followed by 'subsp.' and the subspecies name
#
# i.e. the '{arbitrary text} [(subsp|var). name] {arbitrary text}' part of
# the introduction is now matched
if subspecies:
pattern += r'.*?(?:subsp|var)\.\s*('+subspecies+')'
return re.compile(pattern, flags=re.MULTILINE|re.DOTALL)
# --- Finding classifiers ---
#
# Always terminates the line
# Always set off by spaces (never punctuation - before or after)
# If a common name (of the form "* Common name") appears, there will be
# text between the date and classifiers
# Otherwise it's possible to have a "(parenthetical statement)" between
# the date and the classifier, but usually not
# It's possible that there are no classifiers
id_pattern = re.compile(r'([CEFIW ]+)\s*$', re.MULTILINE)
def ids_in(block):
"""Finds the classifiers for a species.
Parameters:
block - a block of text (a string) with its scope limited to a single
species or subspecies
Returns an empty string if there are no classifiers for this species.
"""
error = ''
sep = ''
for line in block.split('\n'):
matches = id_pattern.findall(line)
# If matches were found return the last match (the pattern is meant to
# be searched from the end of the line)
if matches:
return matches[-1].strip()
# if no matches found, there are no classifiers; return an empty string
return ''
# --- Finding provinces ---
#
# abbreviations and full state names are listed in geography.txt and
# locations.txt so grab each of them
# I could just use a string, but I want to '|'.join(loc_names) so it'll be
# easier to '|' the two to gether
loc_names = []
for fn in ('geography.txt', 'locations.txt'):
path = Path.joinpath(file_dir, fn)
with open(path) as f:
s = f.read()
# these are special regex charaters, so escape them wherever they
# appear
for r in ('.', '(', ')'):
s = s.replace(r, '\\'+r)
# I want to '|' each province name, but since they have non-alphabetic
# characters I need to group each name w/o capturing, hence the (?:)
#
# Also cut off the last blank line
loc_names.extend('(?:'+m+')' for m in s.split('\n')[:-1])
# If one string is a substring of another, regex will match with whatever
# comes first in the pattern. We want to match the longest substrings possible
# so sort the location names by length
#
# Also replace spaces in each name with arbitrary whitespace
loc_names = sorted((loc.replace(' ', r'\s*') for loc in loc_names),
key=len, reverse=True)
# Assumes locations have the following format:
#
# {<beginning of line>, <;> or <,>} {location name (may include newlines)}{<;>,
# <,> or <end of line>}
loc_pattern_str = r'[^;,]\s*('+'|'.join(loc_names)+r')(?:[;,]|\s*?$|\s*?\n)'
loc_pattern = re.compile(loc_pattern_str, re.MULTILINE)
# --- Location Paragraph Pattern ---
#
# Assumes That locations that a species appears in meets the following format:
#
# 0{arbitrary white space}m; {locations on an abitrary number of lines where
# countries are separated by ';' and states/provinces are separated by ','}.\n
#
# The line doesn't necessarily begin at 0, but a line does end at '.\n'
loc_text_pattern = re.compile(r'0[\)\]]?\s+?m;.*?(?<!Nfld|Labr|..St)'+
r'\.\s*?(?:\n|$)', re.DOTALL|re.MULTILINE)
loc_exception_pattern = re.compile(r'(?:Flowering.*?;|introduced;)' \
r'.*?\.\s*?(?:\n|$)', re.DOTALL|re.MULTILINE)
# load the key which maps full state and province names to their abbreviations
key_fn = 'key.json'
key_path = Path.joinpath(file_dir, key_fn)
key = {}
with open(key_path) as f:
key = json.load(f)
def locs_in(block):
"""Generates the locations that a species appears in.
Parameters:
block - a block of text (a string) with its scope limited to a single
species or subspecies
"""
# First find the locations paragraph
loc_match = loc_text_pattern.search(block)
if not loc_match:
loc_match = loc_exception_pattern.search(block)
loc_text = ""
if loc_match:
loc_text = loc_match[0]
# find all states and provinces in the paragraph
locs = loc_pattern.findall(re.sub('[Bb]aja\s*[Cc]alifornia', '', loc_text))
# remove duplicates
#locs = {key[loc] if loc in key else loc for loc in matches}
for loc in locs:
# in replace all whitespace with a single space
loc = ' '.join(loc.split())
# convert full state and province names to their abbreviations
if loc in key:
loc = key[loc]
# Handle Nfld/Labr differentiation
# if specified, yield the relevant one
if '(Labr.)' in loc:
yield 'Labr.'
elif '(Nfld.)' in loc:
yield 'Nfld.'
# otherwise yield both if both
elif 'Nfld' in loc and 'Labr' in loc:
yield 'Nfld.'
yield 'Labr.'
# now that these cases have been handled, yield as usual
elif loc:
yield loc
if __name__ == '__main__':
main() | en | 0.872845 | # TODO: Recursively iterate through a directory # Data to extract: # species name | states and provinces it appears in | classifier # Build the command line argument parser Extract data from genus treatment pdfs of "Flora of North America The csv ouptut files should have the following format: <genus name>, <locations appeared in>, <classifier> Example usage: python -m florana.extract -A -o data.csv # The user specified to parse all pdf files in the directory # The user specified the files manually # Brief error message for program ouput to console # Verbose error message for error.log # name the csv file after the pdf input # If the extracting algorithm couldn't find locations, keep track of # the error messages # If the user specified a single output file, compile all the # lines into a single string and write to a file later # If the user didn't specify a single output file write the files # for each treatment as we go # if the user specified a single output file, now is when we write it # locations file # classifiers file # The user may have alread include the file extension # If the user didn't include the file extension, add it Extract the data from the genus treatment. Parameters: treatment - a pdf file name of the genus treatment. data_type - "locations" or "classifiers" Returns a dict of results with the following format "locations" - a string of species names and locations they appear in "classifiers" - a string of species names and their classifiers "error" - a brief error message stating which species the algorithm couldn't find locations for "verbose-error" - an error message stating which species the algorithm couldn't find locations for as well as the block of text that the algorithm searched in for the locations Raises a Value error if the genus isn't found in the treatment. Load the treatment using textract Parameters: fn - the file name of the treatment encoding - the encoding of the file (defaults to utf-8) # regex patterns # --- Genus pattern --- # # Assumes that the file contains the genus name in the following format: # # n. GENUS # # Where n is an arbitrary natural and GENUS is all-caps. GENUS doesn't # necessarily end the line Return the genus name in the given treatment string. If the genus couldn't be found, an empty string is returned. # If the genus name couldn't be found, return an empty string # Else, get the first match and de-"caps-lock" it Yield the block and name in treatment associated with each species*. *Note that this includes subspecies. treatment - the treatment text (a string) species - a list of species names # Find all the species names in the treatment and reorder them in the order # they appear in the text # We want to remove the number before each name and also remove any # duplicates while preserving order. OrderedDict can acheive this # each species block may have subspecies Generate each subgroup block in order. # Find all occurences of genus headers # If there are subgroups, the first header is for the entire treatement and # there's no species key before the header for the first subgroup, so take # the first header out of the list # Update j to the start of the current header: we're really yielding # the previous match # If the block starts at index 0, then we haven't even reached the first # subgroup block, so don't yield yet # Update i to the start of the current header: on the next iteration # it will become the start of the previous header and j will be the # start of the current header. # Once this is encountered, all info is irrelevant for this program # If there were no matches, then a genus couldn't be found Generate all species names from the species key in a subgroup block. subgroup - the subgroup block containing the species genus - of the species # it's possible that the text has no species key - this happens when # there's only one species # Compile the intro pattern without knowing what the species is. Since # there's only one species this is fine. Generate all species blocks* and names in treatment. *Note that this includes all subspecies if any. treatment - the treatment text names - an ordered list of all species names that appear in the treatment # Split the whole text into blocks based on the introduction to each subsp. # split the name up into its individual parts in order to pass once # again into the intro_pattern builder, this time compiling to look # for a specific species. # Produce error message if species introduction couldn't be found # If i > j, then something went wrong when we reordered the search # results. # If the block starts at index 0, then we haven't even reached the first # species block, so don't yield yet # Finally yield the "current" match (the last match). Generate all subspecies blocks in a species block, if any. block - the species block to look in species - the species name of the form "<genus> <species>" # Build the intro pattern to specifically look for subspecies # go through each subspecies introduction match # Start # Only yield the previous match when we've actually found it # The name should include the entire species, including the subspecies # The intro pattern should have matched all of these. # It's possible that there are no subspecies. The intro pattern wouldn't # have found anything and i would have never been incremented. If this is # the case we don't want to yield anything, otherwise yield the rest of # subspecies block until the end of the species block Build a regex pattern for the genus key Parameters: genus - the genus of the file (a string) The pattern has one subgroup: the genus and species name # --- Species name from index line --- # # Relies on the assumption that index lines have the following format # # n. <genus> <species> [(in part)]\n # # Where n is an arbitrary natural, genus is specified, species is a # lowercase word and "(in part)" doesn't necessarily appear. # # The key pattern matches two subgroups: # 1. The number that orders how the species appears in the text # 2. The genus and species name Build a regex pattern for a species introduction. Paramters: genus - of the species species - specific species to look for (defaults to any) subspecies - the subspecies to look for (defaults to empty string) The regex pattern has three potenital subgroups. 1 - the genus name 2 - the species name 3 - the subspecies name (if specified) # --- Species Introduction --- # # Relies on the assumption that a species introduction is formatted as: # # n[a]*. Species name {arbitrary text} [(subsp|var). name] {arbitrary text} # # Where n is an arbitrary natural and a is an arbitrary alphabetical # character. # This will match the "n[a]*" part of the inroduction # if the subspecies was specified, we know there must be alphabetical # numbering on them # This will now match the 'n[a]*. Species name' part of the introduction # if the subspecies was specified, we know there must be some descriptor # followed by 'subsp.' and the subspecies name # # i.e. the '{arbitrary text} [(subsp|var). name] {arbitrary text}' part of # the introduction is now matched # --- Finding classifiers --- # # Always terminates the line # Always set off by spaces (never punctuation - before or after) # If a common name (of the form "* Common name") appears, there will be # text between the date and classifiers # Otherwise it's possible to have a "(parenthetical statement)" between # the date and the classifier, but usually not # It's possible that there are no classifiers Finds the classifiers for a species. Parameters: block - a block of text (a string) with its scope limited to a single species or subspecies Returns an empty string if there are no classifiers for this species. # If matches were found return the last match (the pattern is meant to # be searched from the end of the line) # if no matches found, there are no classifiers; return an empty string # --- Finding provinces --- # # abbreviations and full state names are listed in geography.txt and # locations.txt so grab each of them # I could just use a string, but I want to '|'.join(loc_names) so it'll be # easier to '|' the two to gether # these are special regex charaters, so escape them wherever they # appear # I want to '|' each province name, but since they have non-alphabetic # characters I need to group each name w/o capturing, hence the (?:) # # Also cut off the last blank line # If one string is a substring of another, regex will match with whatever # comes first in the pattern. We want to match the longest substrings possible # so sort the location names by length # # Also replace spaces in each name with arbitrary whitespace # Assumes locations have the following format: # # {<beginning of line>, <;> or <,>} {location name (may include newlines)}{<;>, # <,> or <end of line>} # --- Location Paragraph Pattern --- # # Assumes That locations that a species appears in meets the following format: # # 0{arbitrary white space}m; {locations on an abitrary number of lines where # countries are separated by ';' and states/provinces are separated by ','}.\n # # The line doesn't necessarily begin at 0, but a line does end at '.\n' # load the key which maps full state and province names to their abbreviations Generates the locations that a species appears in. Parameters: block - a block of text (a string) with its scope limited to a single species or subspecies # First find the locations paragraph # find all states and provinces in the paragraph # remove duplicates #locs = {key[loc] if loc in key else loc for loc in matches} # in replace all whitespace with a single space # convert full state and province names to their abbreviations # Handle Nfld/Labr differentiation # if specified, yield the relevant one # otherwise yield both if both # now that these cases have been handled, yield as usual | 3.086779 | 3 |
libscampi/contrib/cms/communism/storage.py | azpm/django-scampi-cms | 2 | 6618393 | <filename>libscampi/contrib/cms/communism/storage.py
from django.core.files.storage import Storage
class URLStorage(Storage):
def delete(self, name):
raise NotImplementedError()
def exists(self, name):
return True
def listdir(self, path):
raise NotImplementedError()
def size(self, name):
return 0
def url(self, name):
return name
def _open(self, name, mode):
raise NotImplementedError()
def _save(self, name, content):
raise NotImplementedError()
def get_available_name(self, name):
raise NotImplementedError()
def get_valid_name(self, name):
raise NotImplementedError()
| <filename>libscampi/contrib/cms/communism/storage.py
from django.core.files.storage import Storage
class URLStorage(Storage):
def delete(self, name):
raise NotImplementedError()
def exists(self, name):
return True
def listdir(self, path):
raise NotImplementedError()
def size(self, name):
return 0
def url(self, name):
return name
def _open(self, name, mode):
raise NotImplementedError()
def _save(self, name, content):
raise NotImplementedError()
def get_available_name(self, name):
raise NotImplementedError()
def get_valid_name(self, name):
raise NotImplementedError()
| none | 1 | 2.394227 | 2 | |
dht22.py | nicklela/cloud4rpi | 0 | 6618394 | # -*- coding: utf-8 -*-
import logging
import Adafruit_DHT
import time
DHT_PIN = 4
RETRY_COUNT=5
HUMIDITY_MAX = 100
TEMPERATURE_MAX = 100
class DHT22(object):
def __init__(self, dht_pin = DHT_PIN):
self.sensor = Adafruit_DHT.DHT22
self.pin = DHT_PIN
self.humidity = 0
self.temperature = 0
def read(self):
humidity, temperature = Adafruit_DHT.read_retry(self.sensor, self.pin)
retry_count = 0
while(humidity is None or temperature is None):
retry_count += 1
if(retry_count > RETRY_COUNT):
humidity = 0
temperature = 0
logging.error("No response from DHT22")
break
time.sleep(1)
logging.warn("Failed to read DHT22, retry in {0}/{1}".format(retry_count, RETRY_COUNT))
humidity, temperature = Adafruit_DHT.read_retry(self.sensor, self.pin)
if humidity > HUMIDITY_MAX:
logging.error("Humidity is invalid:{0}".format(humidity))
else:
self.humidity = humidity
if temperature > TEMPERATURE_MAX:
logging.error("Temperature is invalid:{0}".format(temperature))
else:
self.temperature = temperature
return self.humidity, self.temperature
def read_temperature(self):
humidity, temperature = self.read()
return round(temperature, 2)
def read_humidity(self):
humidity, temperature = self.read()
return round(humidity, 2)
| # -*- coding: utf-8 -*-
import logging
import Adafruit_DHT
import time
DHT_PIN = 4
RETRY_COUNT=5
HUMIDITY_MAX = 100
TEMPERATURE_MAX = 100
class DHT22(object):
def __init__(self, dht_pin = DHT_PIN):
self.sensor = Adafruit_DHT.DHT22
self.pin = DHT_PIN
self.humidity = 0
self.temperature = 0
def read(self):
humidity, temperature = Adafruit_DHT.read_retry(self.sensor, self.pin)
retry_count = 0
while(humidity is None or temperature is None):
retry_count += 1
if(retry_count > RETRY_COUNT):
humidity = 0
temperature = 0
logging.error("No response from DHT22")
break
time.sleep(1)
logging.warn("Failed to read DHT22, retry in {0}/{1}".format(retry_count, RETRY_COUNT))
humidity, temperature = Adafruit_DHT.read_retry(self.sensor, self.pin)
if humidity > HUMIDITY_MAX:
logging.error("Humidity is invalid:{0}".format(humidity))
else:
self.humidity = humidity
if temperature > TEMPERATURE_MAX:
logging.error("Temperature is invalid:{0}".format(temperature))
else:
self.temperature = temperature
return self.humidity, self.temperature
def read_temperature(self):
humidity, temperature = self.read()
return round(temperature, 2)
def read_humidity(self):
humidity, temperature = self.read()
return round(humidity, 2)
| en | 0.769321 | # -*- coding: utf-8 -*- | 3.106101 | 3 |
alivedb_integrity.py | techcoderx/AvalonStream | 1 | 6618395 | <filename>alivedb_integrity.py
# TODO Versioning for supported releases only
integrity = {
'package-lock.json': '9e951eb20a8c36bb38a226d6c5d696b44e5283b9998100477e6b77143765d702',
'package.json': '34667abf424c7d5d5fa1e992dc6a8310752d40206ddede4d989413c7ee70a5d4',
'src/alivedb.js': 'fc303a65eaa37411601c96ff54e082bca6f2c2924884678a916b7b6cbc324fd4',
'src/config.js': 'e24c519311e7b5a63deb1ce2f3fdcc96e69df686c5da2f382e7ee58aa1a2a669',
'src/cryptography.js': 'b32e80dd1c4b67fed8d452b0749bf6ab8a21fb43dcff9fa51ca9fa94f668010d',
'src/helper.js': 'd4bbb9972a9e717ec7ddabad1c1496aa81be02ea528ea48a380b4391bec771a9',
'src/index.js': '056f301cee1afa1375249f083100c82c28a84eb3e4ae44ee3185e6eb5cf5867f',
'src/middleware.js': 'fee086814c64b60acc7503ffc11cad20d7e667fe4bcbd14314ddc4b4d18b049b',
'src/server.js': 'a1e0d54df6c5074cf1c76eb738f2207985028498a508878560218be4929cbcfe',
'src/validator.js': '842460b872fc287983661ad537aecd1974d3c44febda9f9687a5c221676a38b8'
}
version = '1.0.0a' | <filename>alivedb_integrity.py
# TODO Versioning for supported releases only
integrity = {
'package-lock.json': '9e951eb20a8c36bb38a226d6c5d696b44e5283b9998100477e6b77143765d702',
'package.json': '34667abf424c7d5d5fa1e992dc6a8310752d40206ddede4d989413c7ee70a5d4',
'src/alivedb.js': 'fc303a65eaa37411601c96ff54e082bca6f2c2924884678a916b7b6cbc324fd4',
'src/config.js': 'e24c519311e7b5a63deb1ce2f3fdcc96e69df686c5da2f382e7ee58aa1a2a669',
'src/cryptography.js': 'b32e80dd1c4b67fed8d452b0749bf6ab8a21fb43dcff9fa51ca9fa94f668010d',
'src/helper.js': 'd4bbb9972a9e717ec7ddabad1c1496aa81be02ea528ea48a380b4391bec771a9',
'src/index.js': '056f301cee1afa1375249f083100c82c28a84eb3e4ae44ee3185e6eb5cf5867f',
'src/middleware.js': 'fee086814c64b60acc7503ffc11cad20d7e667fe4bcbd14314ddc4b4d18b049b',
'src/server.js': 'a1e0d54df6c5074cf1c76eb738f2207985028498a508878560218be4929cbcfe',
'src/validator.js': '842460b872fc287983661ad537aecd1974d3c44febda9f9687a5c221676a38b8'
}
version = '1.0.0a' | en | 0.7745 | # TODO Versioning for supported releases only | 1.492116 | 1 |
raysect/core/math/function/float/function3d/interpolate/tests/__init__.py | raysect/source | 71 | 6618396 | # from .test_interpolator_3d import *
| # from .test_interpolator_3d import *
| en | 0.281907 | # from .test_interpolator_3d import * | 1.027811 | 1 |
labs/lab2/expFitTest.py | MITLLRacecar/racecar-daniel-chuang | 0 | 6618397 | <filename>labs/lab2/expFitTest.py
from pylab import *
from scipy.optimize import curve_fit
x = np.array([17848, 4984.0, 2276, 1299, 822])
y = np.array([40, 80, 120, 160, 200])
def func(x, a, b, c, d):
return a*np.exp(-c*(x-b))+d
popt, pcov = curve_fit(func, x, y, [100,400,0.001,0])
print(popt)
plot(x,y)
x=linspace(400,6000,10000)
plot(x,func(x,*popt))
show() | <filename>labs/lab2/expFitTest.py
from pylab import *
from scipy.optimize import curve_fit
x = np.array([17848, 4984.0, 2276, 1299, 822])
y = np.array([40, 80, 120, 160, 200])
def func(x, a, b, c, d):
return a*np.exp(-c*(x-b))+d
popt, pcov = curve_fit(func, x, y, [100,400,0.001,0])
print(popt)
plot(x,y)
x=linspace(400,6000,10000)
plot(x,func(x,*popt))
show() | none | 1 | 2.568237 | 3 | |
app.py | dhavalocked/alchemy-backend | 0 | 6618398 | # IMPORTS
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
import os
import numpy as np
import flask
import io
import cv2
import numpy as np
from base64 import b64encode
from os import makedirs
from os.path import join, basename
import os
from sys import argv
import json
from flask import Flask, render_template, request
from flask_uploads import UploadSet, configure_uploads, IMAGES
from scannable_paper import getResponseFromImage, evaluateOmrQuestion
from werkzeug.utils import secure_filename
import os
# CONFIG
photos = UploadSet('photos', IMAGES)
# CONFIG
app = Flask(__name__, instance_relative_config=True)
#from waitress import serve
app.config['UPLOADED_PHOTOS_DEST'] = 'static/'
configure_uploads(app, photos)
app.config.from_object(os.environ['APP_SETTINGS'])
from tools import upload_file_to_s3,upload_filename_to_s3
from ocr import ocr_prediction
ALLOWED_EXTENSIONS = app.config["ALLOWED_EXTENSIONS"]
# ROUTES
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'POST':
# There is no file selected to upload
if "user_file" not in request.files:
return "No user_file key in request.files"
file = request.files["user_file"]
# There is no file selected to upload
if file.filename == "":
return "Please select a file"
# File is selected, upload to S3 and show S3 URL
if file and allowed_file(file.filename):
file.filename = secure_filename(file.filename)
output = upload_file_to_s3(file, app.config["S3_BUCKET"])
return str(output)
else:
return render_template("index.html")
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {"success": False}
responses = []
q_types = ["ocr","ocr", "ocr", "omr","omr"]
idx_char_omr = { 1 : "A", 2 : "B", 3 : "C", 4: "D"}
print(request.files)
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST" and 'photo' in request.files:
filename = photos.save(request.files['photo'])
success , answers, question_array_names= getResponseFromImage(filename)
if success == True:
for i in range(len(answers)):
q_img = "answers"+str(i+1)+".png"
if q_types[i] == "omr":
img = cv2.imread(os.path.join('./answers',q_img))
detected_omr_ans = evaluateOmrQuestion(img);
print("detected",detected_omr_ans)
responses.append(idx_char_omr[detected_omr_ans[0] ] )
if q_types[i] =="ocr":
img = cv2.imread(os.path.join('./answers',q_img))
responses.append(ocr_prediction(img))
questions = []
for i in range(len(question_array_names)):
q_img = question_array_names[i]
file = os.path.join('questions',q_img)
output = upload_filename_to_s3(file,q_img, app.config["S3_BUCKET"])
questions.append(str(output))
data["predictions"] = responses
data["questions"] = questions
# indicate that the request was a success
data["success"] = True
#print(question_array_names)
else :
data["message"] = "Not able to detect regions properly"
print(data)
# return the data dictionary as a JSON response
return flask.jsonify(data) | # IMPORTS
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
import os
import numpy as np
import flask
import io
import cv2
import numpy as np
from base64 import b64encode
from os import makedirs
from os.path import join, basename
import os
from sys import argv
import json
from flask import Flask, render_template, request
from flask_uploads import UploadSet, configure_uploads, IMAGES
from scannable_paper import getResponseFromImage, evaluateOmrQuestion
from werkzeug.utils import secure_filename
import os
# CONFIG
photos = UploadSet('photos', IMAGES)
# CONFIG
app = Flask(__name__, instance_relative_config=True)
#from waitress import serve
app.config['UPLOADED_PHOTOS_DEST'] = 'static/'
configure_uploads(app, photos)
app.config.from_object(os.environ['APP_SETTINGS'])
from tools import upload_file_to_s3,upload_filename_to_s3
from ocr import ocr_prediction
ALLOWED_EXTENSIONS = app.config["ALLOWED_EXTENSIONS"]
# ROUTES
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'POST':
# There is no file selected to upload
if "user_file" not in request.files:
return "No user_file key in request.files"
file = request.files["user_file"]
# There is no file selected to upload
if file.filename == "":
return "Please select a file"
# File is selected, upload to S3 and show S3 URL
if file and allowed_file(file.filename):
file.filename = secure_filename(file.filename)
output = upload_file_to_s3(file, app.config["S3_BUCKET"])
return str(output)
else:
return render_template("index.html")
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {"success": False}
responses = []
q_types = ["ocr","ocr", "ocr", "omr","omr"]
idx_char_omr = { 1 : "A", 2 : "B", 3 : "C", 4: "D"}
print(request.files)
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST" and 'photo' in request.files:
filename = photos.save(request.files['photo'])
success , answers, question_array_names= getResponseFromImage(filename)
if success == True:
for i in range(len(answers)):
q_img = "answers"+str(i+1)+".png"
if q_types[i] == "omr":
img = cv2.imread(os.path.join('./answers',q_img))
detected_omr_ans = evaluateOmrQuestion(img);
print("detected",detected_omr_ans)
responses.append(idx_char_omr[detected_omr_ans[0] ] )
if q_types[i] =="ocr":
img = cv2.imread(os.path.join('./answers',q_img))
responses.append(ocr_prediction(img))
questions = []
for i in range(len(question_array_names)):
q_img = question_array_names[i]
file = os.path.join('questions',q_img)
output = upload_filename_to_s3(file,q_img, app.config["S3_BUCKET"])
questions.append(str(output))
data["predictions"] = responses
data["questions"] = questions
# indicate that the request was a success
data["success"] = True
#print(question_array_names)
else :
data["message"] = "Not able to detect regions properly"
print(data)
# return the data dictionary as a JSON response
return flask.jsonify(data) | en | 0.934864 | # IMPORTS # CONFIG # CONFIG #from waitress import serve # ROUTES # There is no file selected to upload # There is no file selected to upload # File is selected, upload to S3 and show S3 URL # initialize the data dictionary that will be returned from the # view # ensure an image was properly uploaded to our endpoint # indicate that the request was a success #print(question_array_names) # return the data dictionary as a JSON response | 2.343621 | 2 |
puq/montecarlo.py | zoidy/puq | 0 | 6618399 | """
Basic Monte Carlo Method
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
import numpy as np
from puq.util import process_data
from puq.psweep import PSweep
from logging import info, debug, exception, warning, critical
from puq.response import SampledFunc
from puq.jpickle import pickle
from puq.pdf import UniformPDF, ExperimentalPDF
class MonteCarlo(PSweep):
"""
Class implementing Monte Carlo sampling.
Args:
params: Input list of :class:`Parameter`\s.
num: Number of samples to use.
response(boolean): Generate a response surface using the sample
points.
iteration_cb(function): A function to call after completion.
"""
def __init__(self, params, num, response=True, iteration_cb=None):
PSweep.__init__(self, iteration_cb)
self.params = params
num = int(num)
self.num = num
self.response = response
self._start_at = 0
if self.response:
if hasattr(p, 'use_samples_val') and p.use_samples_val:
#when constructing a response surface, ignore use_samples_val
#since the surface must be constructed so as to cover the entire
#rangem of the input parameters.
print("Warning: ignoring option 'use_samples_val' for {}".format(p.name))
# To generate a complete response surface, use Uniform distributions
# with the same range as the original distributions.
for p in self.params:
p.values = UniformPDF(*p.pdf.range).random(num)
else:
for p in self.params:
#only generate new samples if use_samples is false
#see CustomParameter in parameter.py
if hasattr(p, 'use_samples_val') and p.use_samples_val:
if np.size(p.values)<num:
#if the number of samples in the parameter is less than the number
#of desired MC runs, raise exception. If the number is greater, only
#the first num samples are used
raise Exception("Expected {} samples for parameter {}, found {}".format(num,p.name,np.size(p.values)))
else:
#if p is a ConstantParameter, p.values will all be the same number, which
#is the behavior we want.
p.values = p.pdf.random(num)
# Returns a list of name,value tuples
# For example, [('t', 1.0), ('freq', 133862.0)]
# This is one realization of the parameters. Since this function
# returns a generator which can be iterated like a list,
# yielding an iterable list of lists of tuples.
def get_args(self):
for i in xrange(self._start_at, self.num):
yield [(p.name, p.values[i],p.description) for p in self.params]
def _do_pdf(self, hf, data):
if self.response:
# The response surface was built using Uniform distributions.
# We are interested in the mean and deviation of the data
# that would have been produced using the real PDFs. For this,
# we need to compute a weighted mean and deviation
weights = np.prod([p.pdf.pdf(p.values) for p in self.params], 0)
tweight = np.sum(weights)
mean = np.average(data, weights=weights)
dev = np.sqrt(np.dot(weights, (data - mean)**2) / tweight)
rsd = np.vstack(([p.values for p in self.params], data))
rs = pickle(SampledFunc(*rsd, params=self.params))
print "Mean = %s" % mean
print "StdDev = %s" % dev
return [('response', rs), ('mean', mean), ('dev', dev)]
else:
pdf = ExperimentalPDF(data, fit=0)
mean = np.mean(data)
dev = np.std(data)
print "Mean = %s" % mean
print "StdDev = %s" % dev
return [('pdf', pickle(pdf)), ('samples', data), ('mean', mean), ('dev', dev)]
def analyze(self, hf):
debug('')
process_data(hf, 'montecarlo', self._do_pdf)
def extend(self, num):
if num <= 0:
print "Monte Carlo extend requires a valid num argument."
raise ValueError
for p in self.params:
if self.response:
p.values = np.concatenate((p.values, UniformPDF(*p.pdf.range).random(num)))
else:
if hasattr(p, 'use_samples_val') and p.use_samples_val:
if np.size(p.values)<self.num+num:
#may need to allow passing in additional custom samples so
#the user can add more if needed. Else exception will be thrown when
#trying to extend run where user defined the parameter values manually
raise Exception("Not enough samples for param {}. Expected {} found {}".format(p.name,
self.num+num,np.size(p.values)))
p.values = np.concatenate((p.values, p.pdf.random(num)))
self._start_at = self.num
self.num += num
| """
Basic Monte Carlo Method
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
import numpy as np
from puq.util import process_data
from puq.psweep import PSweep
from logging import info, debug, exception, warning, critical
from puq.response import SampledFunc
from puq.jpickle import pickle
from puq.pdf import UniformPDF, ExperimentalPDF
class MonteCarlo(PSweep):
"""
Class implementing Monte Carlo sampling.
Args:
params: Input list of :class:`Parameter`\s.
num: Number of samples to use.
response(boolean): Generate a response surface using the sample
points.
iteration_cb(function): A function to call after completion.
"""
def __init__(self, params, num, response=True, iteration_cb=None):
PSweep.__init__(self, iteration_cb)
self.params = params
num = int(num)
self.num = num
self.response = response
self._start_at = 0
if self.response:
if hasattr(p, 'use_samples_val') and p.use_samples_val:
#when constructing a response surface, ignore use_samples_val
#since the surface must be constructed so as to cover the entire
#rangem of the input parameters.
print("Warning: ignoring option 'use_samples_val' for {}".format(p.name))
# To generate a complete response surface, use Uniform distributions
# with the same range as the original distributions.
for p in self.params:
p.values = UniformPDF(*p.pdf.range).random(num)
else:
for p in self.params:
#only generate new samples if use_samples is false
#see CustomParameter in parameter.py
if hasattr(p, 'use_samples_val') and p.use_samples_val:
if np.size(p.values)<num:
#if the number of samples in the parameter is less than the number
#of desired MC runs, raise exception. If the number is greater, only
#the first num samples are used
raise Exception("Expected {} samples for parameter {}, found {}".format(num,p.name,np.size(p.values)))
else:
#if p is a ConstantParameter, p.values will all be the same number, which
#is the behavior we want.
p.values = p.pdf.random(num)
# Returns a list of name,value tuples
# For example, [('t', 1.0), ('freq', 133862.0)]
# This is one realization of the parameters. Since this function
# returns a generator which can be iterated like a list,
# yielding an iterable list of lists of tuples.
def get_args(self):
for i in xrange(self._start_at, self.num):
yield [(p.name, p.values[i],p.description) for p in self.params]
def _do_pdf(self, hf, data):
if self.response:
# The response surface was built using Uniform distributions.
# We are interested in the mean and deviation of the data
# that would have been produced using the real PDFs. For this,
# we need to compute a weighted mean and deviation
weights = np.prod([p.pdf.pdf(p.values) for p in self.params], 0)
tweight = np.sum(weights)
mean = np.average(data, weights=weights)
dev = np.sqrt(np.dot(weights, (data - mean)**2) / tweight)
rsd = np.vstack(([p.values for p in self.params], data))
rs = pickle(SampledFunc(*rsd, params=self.params))
print "Mean = %s" % mean
print "StdDev = %s" % dev
return [('response', rs), ('mean', mean), ('dev', dev)]
else:
pdf = ExperimentalPDF(data, fit=0)
mean = np.mean(data)
dev = np.std(data)
print "Mean = %s" % mean
print "StdDev = %s" % dev
return [('pdf', pickle(pdf)), ('samples', data), ('mean', mean), ('dev', dev)]
def analyze(self, hf):
debug('')
process_data(hf, 'montecarlo', self._do_pdf)
def extend(self, num):
if num <= 0:
print "Monte Carlo extend requires a valid num argument."
raise ValueError
for p in self.params:
if self.response:
p.values = np.concatenate((p.values, UniformPDF(*p.pdf.range).random(num)))
else:
if hasattr(p, 'use_samples_val') and p.use_samples_val:
if np.size(p.values)<self.num+num:
#may need to allow passing in additional custom samples so
#the user can add more if needed. Else exception will be thrown when
#trying to extend run where user defined the parameter values manually
raise Exception("Not enough samples for param {}. Expected {} found {}".format(p.name,
self.num+num,np.size(p.values)))
p.values = np.concatenate((p.values, p.pdf.random(num)))
self._start_at = self.num
self.num += num
| en | 0.79905 | Basic Monte Carlo Method This file is part of PUQ Copyright (c) 2013 PUQ Authors See LICENSE file for terms. Class implementing Monte Carlo sampling. Args: params: Input list of :class:`Parameter`\s. num: Number of samples to use. response(boolean): Generate a response surface using the sample points. iteration_cb(function): A function to call after completion. #when constructing a response surface, ignore use_samples_val #since the surface must be constructed so as to cover the entire #rangem of the input parameters. # To generate a complete response surface, use Uniform distributions # with the same range as the original distributions. #only generate new samples if use_samples is false #see CustomParameter in parameter.py #if the number of samples in the parameter is less than the number #of desired MC runs, raise exception. If the number is greater, only #the first num samples are used #if p is a ConstantParameter, p.values will all be the same number, which #is the behavior we want. # Returns a list of name,value tuples # For example, [('t', 1.0), ('freq', 133862.0)] # This is one realization of the parameters. Since this function # returns a generator which can be iterated like a list, # yielding an iterable list of lists of tuples. # The response surface was built using Uniform distributions. # We are interested in the mean and deviation of the data # that would have been produced using the real PDFs. For this, # we need to compute a weighted mean and deviation #may need to allow passing in additional custom samples so #the user can add more if needed. Else exception will be thrown when #trying to extend run where user defined the parameter values manually | 3.123354 | 3 |
calculate_difflib.py | kamujun/calculate_word_similarity | 0 | 6618400 | import difflib
software = ['wget', 'tzdata', 'trousers', 'teamd', 'teamd', 'tar', 'systemd-sysv',
'systemd-sysv', 'sudo', 'snappy', 'rpm-python27', 'rpm-build-libs', 'rootfiles',
'rdma', 'quota', 'pyxattr', 'python27-virtualenv', 'python27-simplejson', 'python-iniparse',
'python-IPy', 'python', 'pygpgme', 'pygobject3-base', 'procmail', 'pm-utils', 'pinentry',
'perl-threads', 'perl-libs', 'parted', 'os-prober', 'openssh-clients', 'oci-register-machine',
'nss-softokn-freebl', 'nss-softokn-freebl', 'npm', 'net-tools', 'mesa-libGL', 'mariadb-libs',
'lua', 'libuser', 'libselinux-utils', 'libproxy', 'libpipeline', 'libpciaccess', 'libpciaccess',
'libpath_utils', 'libnl3', 'libini_config', 'libidn', 'libgssglue', 'libgomp', 'libgcrypt', 'libgcrypt',
'libgcc', 'libedit', 'libdb', 'libcom_err', 'libcom_err', 'libcap-ng', 'libXfixes', 'libSM',
'kernel-headers', 'kbd', 'java-1.8.0-openjdk', 'initscripts', 'hardlink', 'hardlink', 'hardlink',
'gssproxy', 'groff-base', 'grep', 'gobject-introspection', 'freetype', 'fipscheck', 'ethtool',
'elfutils-libelf', 'diffutils', 'device-mapper-event', 'device-mapper-event', 'dejavu-sans-fonts',
'curl', 'crontabs', 'cronie', 'container-selinux', 'container-selinux', 'chkconfig', 'chkconfig',
'checkpolicy', 'bzip2', 'bind-license', 'aws-cfn-bootstrap', 'aws-cfn-bootstrap', 'aws-cfn-bootstrap',
'audit-libs', 'NetworkManager-team', 'Amazon SSM Agent', 'Amazon SSM Agent', 'AWS Tools for Windows',
'httpd']
def calculate(input_string):
match_result = {}
for software_name in software:
match_result[software_name] = difflib.SequenceMatcher(None, software_name, input_string).ratio()
print(difflib.SequenceMatcher(None, software_name, input_string).ratio())
for k, v in sorted(match_result.items(), key=lambda x: x[1]):
print(str(k) + ": " + str(v))
if __name__ == '__main__':
product = 'http_server'
calculate(product)
| import difflib
software = ['wget', 'tzdata', 'trousers', 'teamd', 'teamd', 'tar', 'systemd-sysv',
'systemd-sysv', 'sudo', 'snappy', 'rpm-python27', 'rpm-build-libs', 'rootfiles',
'rdma', 'quota', 'pyxattr', 'python27-virtualenv', 'python27-simplejson', 'python-iniparse',
'python-IPy', 'python', 'pygpgme', 'pygobject3-base', 'procmail', 'pm-utils', 'pinentry',
'perl-threads', 'perl-libs', 'parted', 'os-prober', 'openssh-clients', 'oci-register-machine',
'nss-softokn-freebl', 'nss-softokn-freebl', 'npm', 'net-tools', 'mesa-libGL', 'mariadb-libs',
'lua', 'libuser', 'libselinux-utils', 'libproxy', 'libpipeline', 'libpciaccess', 'libpciaccess',
'libpath_utils', 'libnl3', 'libini_config', 'libidn', 'libgssglue', 'libgomp', 'libgcrypt', 'libgcrypt',
'libgcc', 'libedit', 'libdb', 'libcom_err', 'libcom_err', 'libcap-ng', 'libXfixes', 'libSM',
'kernel-headers', 'kbd', 'java-1.8.0-openjdk', 'initscripts', 'hardlink', 'hardlink', 'hardlink',
'gssproxy', 'groff-base', 'grep', 'gobject-introspection', 'freetype', 'fipscheck', 'ethtool',
'elfutils-libelf', 'diffutils', 'device-mapper-event', 'device-mapper-event', 'dejavu-sans-fonts',
'curl', 'crontabs', 'cronie', 'container-selinux', 'container-selinux', 'chkconfig', 'chkconfig',
'checkpolicy', 'bzip2', 'bind-license', 'aws-cfn-bootstrap', 'aws-cfn-bootstrap', 'aws-cfn-bootstrap',
'audit-libs', 'NetworkManager-team', 'Amazon SSM Agent', 'Amazon SSM Agent', 'AWS Tools for Windows',
'httpd']
def calculate(input_string):
match_result = {}
for software_name in software:
match_result[software_name] = difflib.SequenceMatcher(None, software_name, input_string).ratio()
print(difflib.SequenceMatcher(None, software_name, input_string).ratio())
for k, v in sorted(match_result.items(), key=lambda x: x[1]):
print(str(k) + ": " + str(v))
if __name__ == '__main__':
product = 'http_server'
calculate(product)
| none | 1 | 1.256803 | 1 | |
src/heartex/add_sentiment_chart.py | heartexlabs/brand-sentiment-analysis | 21 | 6618401 | <reponame>heartexlabs/brand-sentiment-analysis<filename>src/heartex/add_sentiment_chart.py<gh_stars>10-100
# python3 script
import csv
import json
import optparse
import pandas as pd
import heartex
import time
def resample_by_time(times, values, period):
""" Resample values by time
:param times: timestamps
:param values: +1 and -1 or other float values
:param period: 1T (minute), 1H, 1D, 1M, 1Y
:return: x - time axis, y - values
"""
data = pd.DataFrame({'time': pd.to_datetime(times, unit='s'), 'values': values})
data = data.set_index('time').astype('float').resample(period)
data = data.mean()
data = data.fillna(0)
x = data.index.astype(str).tolist()
y = data.values[:, 0].tolist()
return x, y
def run(options):
""" 1 Read CSV with news
2 Recognize sentiment using Haertex
3 Collect positives and negatives
4 Resample
5 Save output json for chart
"""
# read csv
data = []
with open(options.input, newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
data.append({'text': row['text'], 'time': int(row['timestamp'])})
# heartex predict
predictions = heartex.run_predict(**vars(options), data=data)
# collect score values (positives & negatives)
for i, p in enumerate(predictions.json()):
data[i]['value'] = 0
if p['score'] > options.score:
for row in p['result']:
if 'Positive' in row['value']['choices']:
data[i]['value'] = +1
if 'Negative' in row['value']['choices']:
data[i]['value'] = -1
# resample
times = [d['time'] for d in data]
values = [d['value'] for d in data]
x, y = resample_by_time(times, values, options.period)
# save output
output = {'news': data, 'chart': {'x': x, 'y': y}}
with open(options.output, 'w') as f:
json.dump(output, f)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-t', '--token', dest='token', help='heartex token')
parser.add_option('-p', '--project', type=int, dest='project', help='project id')
parser.add_option('-i', '--input', dest='input', default='news.csv', help='input file name')
parser.add_option('-s', '--score', type=float, dest='score', default=0.50, help='score used to filter')
parser.add_option('-d', '--period', dest='period', default='1D', help='pandas period: 1T (minute), 1H, 1D, 1M, 1Y')
parser.add_option('-o', '--output', dest='output', default='web/data/output.json', help='output filename for charts')
parser.add_option('-l', '--loop', action='store_true', dest='loop', default=False, help='run in loop')
options, args = parser.parse_args()
# rebuild news charts every 5 seconds
while True:
print(f'Run {options.input} => {options.output}')
run(options)
if not options.loop:
break
time.sleep(5)
| # python3 script
import csv
import json
import optparse
import pandas as pd
import heartex
import time
def resample_by_time(times, values, period):
""" Resample values by time
:param times: timestamps
:param values: +1 and -1 or other float values
:param period: 1T (minute), 1H, 1D, 1M, 1Y
:return: x - time axis, y - values
"""
data = pd.DataFrame({'time': pd.to_datetime(times, unit='s'), 'values': values})
data = data.set_index('time').astype('float').resample(period)
data = data.mean()
data = data.fillna(0)
x = data.index.astype(str).tolist()
y = data.values[:, 0].tolist()
return x, y
def run(options):
""" 1 Read CSV with news
2 Recognize sentiment using Haertex
3 Collect positives and negatives
4 Resample
5 Save output json for chart
"""
# read csv
data = []
with open(options.input, newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
data.append({'text': row['text'], 'time': int(row['timestamp'])})
# heartex predict
predictions = heartex.run_predict(**vars(options), data=data)
# collect score values (positives & negatives)
for i, p in enumerate(predictions.json()):
data[i]['value'] = 0
if p['score'] > options.score:
for row in p['result']:
if 'Positive' in row['value']['choices']:
data[i]['value'] = +1
if 'Negative' in row['value']['choices']:
data[i]['value'] = -1
# resample
times = [d['time'] for d in data]
values = [d['value'] for d in data]
x, y = resample_by_time(times, values, options.period)
# save output
output = {'news': data, 'chart': {'x': x, 'y': y}}
with open(options.output, 'w') as f:
json.dump(output, f)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-t', '--token', dest='token', help='heartex token')
parser.add_option('-p', '--project', type=int, dest='project', help='project id')
parser.add_option('-i', '--input', dest='input', default='news.csv', help='input file name')
parser.add_option('-s', '--score', type=float, dest='score', default=0.50, help='score used to filter')
parser.add_option('-d', '--period', dest='period', default='1D', help='pandas period: 1T (minute), 1H, 1D, 1M, 1Y')
parser.add_option('-o', '--output', dest='output', default='web/data/output.json', help='output filename for charts')
parser.add_option('-l', '--loop', action='store_true', dest='loop', default=False, help='run in loop')
options, args = parser.parse_args()
# rebuild news charts every 5 seconds
while True:
print(f'Run {options.input} => {options.output}')
run(options)
if not options.loop:
break
time.sleep(5) | en | 0.489442 | # python3 script Resample values by time :param times: timestamps :param values: +1 and -1 or other float values :param period: 1T (minute), 1H, 1D, 1M, 1Y :return: x - time axis, y - values 1 Read CSV with news 2 Recognize sentiment using Haertex 3 Collect positives and negatives 4 Resample 5 Save output json for chart # read csv # heartex predict # collect score values (positives & negatives) # resample # save output # rebuild news charts every 5 seconds | 3.211735 | 3 |
ecom/api/views.py | RandyARk79/Ecom1 | 0 | 6618402 | <gh_stars>0
from django.shortcuts import render
from django.http import JsonResponse
# Create your views here.
def home(request):
return JsonResponse({'info': 'Django react course', 'name': '<NAME>'})
| from django.shortcuts import render
from django.http import JsonResponse
# Create your views here.
def home(request):
return JsonResponse({'info': 'Django react course', 'name': '<NAME>'}) | en | 0.968116 | # Create your views here. | 1.863223 | 2 |
baiducloudengine.py | root79-glit/BaiduCloudHelper | 51 | 6618403 | <gh_stars>10-100
# coding=utf-8
import utils
import errmsg
import re
import io
import os
import traceback
import base64
import json
import requests
import random
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
# Crypto代替rsa参考:https://git.wageningenur.nl/aflit001/ibrowser/commit/1b2437fe81af9a8511bf847c1ada69a9de8df893?view=parallel&w=1
# 兼容2.7和3.x
try:
import cookielib
import urllib
except ImportError:
import http.cookiejar as cookielib
import urllib.parse as urllib
'''
这是一个百度云引擎模块
目前已经实现功能:
登陆
退出登陆
自动登录
目标功能:
获取文件目录
获取下载链接
获取文件大小
'''
home_url = 'https://pan.baidu.com'
passport_url = 'https://passport.baidu.com/v2/api/?'
logout_url = 'https://passport.baidu.com/?logout&u=https%3A%2F%2Fpan.baidu.com%2F'
# 验证码
captcha_url = 'https://passport.baidu.com/cgi-bin/genimage?'
pan_api_url = 'http://pan.baidu.com/api/'
disk_home_url = 'https://pan.baidu.com/disk/home'
pcs_rest_url = 'http://d.pcs.baidu.com/rest/2.0/pcs/file'
get_publickey_url = 'https://passport.baidu.com/v2/getpublickey?token='
max_retry_times = 10
class BaiduCloudEngine():
def __init__(self, webserver=False, user_agent='netdisk;172.16.17.32;PC;PC-Windows;10.0.10240;WindowsBaiduYunGuanJia'):
'''
初始化百度云引擎
私有变量self.window为当前的WindowEngine句柄
私有变量self.cj为cookie
私有变量self.opener为urllib2的opener
私有变量self.headers为自定义user-agent
:param window:当前WindowEngine句柄,默认为None
:param user_agent: 默认为win10 chrome
'''
self.webserver = webserver
self.session = requests.session()
self.headers = { 'Accept':'*/*',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.8',
'User-Agent': user_agent
}
self.file_list = {}
# 用于网页模式
self.verifycode = ''
self.verifycode_img_url = ''
self.mail_verify_mode = False
# 读取cookie
cookie = self.get_cookies()
if cookie is not None and cookie is not False:
self.session.cookies = cookie
self.get_token()
self.logined = True
else:
self.logined = False
def get_session(self):
return self.session
def get_headers(self):
return self.headers
def get_response(self, url, post_data=None, html=True, headers=None):
'''
获取http返回内容
:param url: 地址
:param post_data: post数据,默认为None
:param html: 是否请求的是html数据
:param headers: 可以自定义请求头
:returns: http返回内容,错误返回''
'''
req_headers = self.headers
if headers is not None:
for header in headers:
req_headers[header] = headers[header]
tryed_time = 0
while tryed_time <= 3:
try:
if post_data is None:
response = self.session.get(url, headers=req_headers)
else:
response = self.session.post(url, data=post_data, headers=req_headers)
break
except Exception:
tryed_time += 1
utils.show_msg('Get url %s timeout, tryedtime=%d' % (url, tryed_time))
if tryed_time > 3:
utils.show_msg(traceback.print_exc())
if post_data is not None:
utils.show_msg('错误:Post url %s failed.' % url)
else:
utils.show_msg('错误:Open url %s failed.' % url)
return ''
return response.content
def check_login(self, username):
'''
检查登陆信息,获取token和codestring
此函数不再使用
:param username: 用户名
:returns: 正常返回值为string格式值为codestring
0为失败,None为发生错误
'''
response = self.get_response(home_url)
if response == '':
return False
else:
# 获取dv
try:
tmp = re.findall('id=\"dv_Input\" type=\"hidden\" value=\"(.*?)\"', response)
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get dv_Input.')
return False
codestring = None
if not self.get_token():
return False
# logincheck
passport_logincheck_url = passport_url + 'logincheck&&token=%s' % self.token
passport_logincheck_url += '&tpl=pp&apiver=v3&tt=%s' % utils.get_time()
passport_logincheck_url += '&username=%s' % urllib.quote(username)
passport_logincheck_url += '&isphone=false&callback=bd__cbs__' + utils.get_callback_function()
passport_logincheck_response = self.get_response(passport_logincheck_url)
json = utils.get_json_from_response(passport_logincheck_response)
try:
json = eval(json[0])
codeString = json['data']['codeString']
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get passport logincheck\'s response json.')
return False
return codeString
def get_token(self):
'''
通过getapi获取token
:returns: False为发生错误
'''
passport_getapi_url = passport_url + 'getapi&tpl=pp&apiver=v3&tt=%s' % utils.get_time()
passport_getapi_url += '&class=login&logintype=basicLogin&callback=bd__cbs__' + utils.get_callback_function()
passport_getapi_response = self.get_response(passport_getapi_url)
json = utils.get_json_from_response(passport_getapi_response)
try:
json = eval(json[0])
self.token = json['data']['token']
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get passport getapi\'s response json.')
return False
return True
def get_publickey(self, ):
'''
参考自https://github.com/ly0/baidupcsapi/blob/master/baidupcsapi/api.py
根据项目部分修改
'''
url = get_publickey_url + self.token
content = self.get_response(url)
jdata = json.loads(content.replace(b'\'', b'"').decode('utf-8'))
return jdata['pubkey'], jdata['key']
def get_verifycode(self):
'''
命令行模式获取用户输入的验证码,网页服务器模式获取验证码地址
:returns: 若开启了网页服务器返回值为False,否则返回用户输入的验证码
'''
from PIL import Image
if self.codestring != '':
# 验证码
verifycode_img_url = captcha_url + self.codestring
if self.webserver:
self.verifycode_img_url = verifycode_img_url
self.verifycode = ''
return False
else:
verifycode_img_response = self.get_response(verifycode_img_url, html=False)
verifycode_img_bytes = io.BytesIO(verifycode_img_response)
verifycode_img = Image.open(verifycode_img_bytes)
verifycode_img.show()
# 兼容3.x
try:
captch = raw_input("Enter verifycode:")
except NameError:
captch = input("Enter verifycode:")
verifycode_img.close()
else:
captch = ''
return captch
def login(self, username, password, verify='', mail_verify=''):
'''
进行登陆
可能会弹出窗口输入验证码
:param username: 用户名
:param password:密码
:param verify: 验证码,默认为空
:returns: -1为错误,1为成功,2为需要验证码,3为需要邮箱验证码
'''
# 如果是验证邮箱mode
if self.mail_verify_mode:
if self.check_mail_verify_code(mail_verify):
self.login_success()
return 1
else:
return -1
retry = 0
while retry <= max_retry_times:
if self.verifycode != '':
captch = self.verifycode
self.verifycode_img_url = ''
self.verifycode = ''
else:
'''
self.codestring = self.check_login(username)
if self.codestring == 0:
return False
if self.codestring is None:
utils.show_msg('错误:codestring is None.')
return False
'''
if not self.get_token():
return -1
self.codestring = ''
captch = self.get_verifycode()
# 如果为网页模式则返回2
if captch is False:
return 2
# 此处参考自https://github.com/ly0/baidupcsapi/blob/master/baidupcsapi/api.py
pubkey, rsakey = self.get_publickey()
key = RSA.importKey(pubkey)
password_rsaed = base64.b64encode(PKCS1_v1_5.new(key).encrypt(password.encode('utf-8')))
# 以上为参考,变量、函数名、使用函数根据项目需求略微修改
#计算gid
gid = "xxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx"
for i in xrange(29):
gid = re.sub('x', utils.get_gid_char(0), gid,1)
gid = re.sub('y', utils.get_gid_char(8), gid,1)
post_data = {"staticpage": "http://www.baidu.com/cache/user/html/v3Jump.html",
"charset": "utf-8",
"token": self.token,
"tpl": "pp",
"subpro": "",
"apiver": "v3",
"tt": utils.get_time(),
"codestring": self.codestring,
"safeflg": "0",
"u": "https://passport.baidu.com/",
"isPhone": "false",
"quick_user": "0",
"logintype": "basicLogin",
"logLoginType": "pc_loginBasic",
#"idc": "",
"loginmerge": "true",
"username": username,
"password": <PASSWORD>,
"verifycode": captch, # 验证码
"mem_pass": "on",
"rsakey": str(rsakey),
"crypttype": "12",
"ppui_logintime": random.randint(10000,99999),
"callback": "parent.bd__pcbs__" + utils.get_callback_function(),
"detect": 1,
"foreignusername": "",
"gid": gid
}
'''
gid 原始代码:
return "xxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g, function(e) {
var t = 16 * Math.random() | 0
, n = "x" == e ? t : 3 & t | 8;
return n.toString(16)
}).toUpperCase()
分析后得出此代码为:x填充16进制随机数,y填充大于等于8的16进制随机数
'''
passport_logincheck_response = self.get_response(passport_url + 'login', post_data)
try:
tmp = re.findall('decodeURIComponent\(\"(.*?)\"\)', passport_logincheck_response)
jump_url = tmp[0]
jump_url = jump_url.replace('\\', '')
tmp = re.findall('accounts\s*?=\s*?\'(.*?)\'', passport_logincheck_response)
account = tmp[0]
jump_url += '?'
tmp = re.findall('href\s*?\+=\s*?"(.*?)"', passport_logincheck_response)
jump_url += tmp[0]
jump_url += account
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t go to jump page.')
return -1
# 错误处理
try:
tmp = re.findall('err_no=([-]?\d*)', jump_url)
errno = tmp[0]
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get check login error number.')
return -1
if errno == '3' or errno == '6' or errno == '200010':
# 验证码错误,需要重新输入
pass
elif errno == '257':
# 未输入验证码或者首次登陆需要获取验证码
tmp = re.findall('&codeString=([A-Za-z0-9]*?)&userName', passport_logincheck_response)
if len(tmp) > 0:
self.codestring = tmp[0]
else:
utils.show_msg('错误:无法获取codeString信息.')
return -1
# 获取当前验证码
result = self.get_verifycode()
if result is False:
# 如果为网页模式则返回False
utils.show_msg('错误:登陆错误,请重新尝试,错误代码:' + errno + ',错误信息:' + errmsg.get_login_errmsg(errno))
return 2
else:
# 如果为命令行模式则直接进入下一次循环
self.verifycode = result
continue
elif errno == '0' or errno == '18' or errno == '400032' or errno == '400034' or errno == '400037' or errno == '400401':
self.login_success()
return 1
elif errno == '120019':
utils.show_msg('错误:%s,短时间密码错误次数过多, 请先通过 passport.baidu.com 解除锁定' % errno)
return -1
elif errno == '120021':
# 用户需要外部认证(邮箱)
self.auth_token = re.findall('authtoken=([^&]+)', passport_logincheck_response)[0]
self.loginproxy_url = re.findall('loginproxy=([^&]+)', passport_logincheck_response)[0]
responese = self.get_response('https://passport.baidu.com/v2/sapi/authwidgetverify' +
'?authtoken=' + self.auth_token +
'&type=email' +
'&apiver=v3' +
'&action=send' +
'&vcode=' +
'&questionAndAnswer=' +
'&needsid=' +
'&rsakey=' +
'&countrycode=' +
'&subpro=' +
'&callback=' +
'&tpl=mn' +
'&u=https://www.baidu.com/'
)
responese = json.loads(responese)
if responese['errmsg'] is None:
if self.webserver:
# 如果是网页模式,直接返回3
self.mail_verify_mode = True
return 3
else:
self.check_mail_verify_code(input('请输入邮箱验证码:'))
else:
utils.show_msg('错误:发送安全验证请求失败')
return -1
utils.show_msg('错误:登陆错误,请重新尝试,错误代码:' + errno + ',错误信息:' + errmsg.get_login_errmsg(errno))
retry += 1
utils.show_msg('错误:超出最大重试次数:' + str(max_retry_times))
return -1
def check_mail_verify_code(self, vcode):
'''
验证邮箱验证码
:param vcode: 邮箱验证码
:return: True为成功,False为失败或发生错误
'''
responese = self.get_response('https://passport.baidu.com/v2/sapi/authwidgetverify' +
'?authtoken=' + self.auth_token +
'&type=email' +
'&apiver=v3' +
'&action=check' +
'&vcode=' + str(vcode) +
'&questionAndAnswer=' +
'&needsid=' +
'&rsakey=' +
'&countrycode=' +
'&subpro=' +
'&callback=')
responese = json.loads(responese)
if responese['errno'] == 110000:
loginproxy_resp = self.get_response(urllib.unquote(self.loginproxy_url.decode()))
self.mail_verify_mode = False
return True
else:
utils.show_msg('错误:邮箱验证码错误' + responese['errno'])
return False
def login_success(self):
'''
登陆成功后执行,并设置logined状态
'''
self.get_response(disk_home_url)
self.save_cookies()
self.logined = True
def logout(self):
'''
退出登陆
:returns: True为成功,False为失败或发生错误
'''
#passport_logout_response = self.get_response(logout_url)
self.session.cookies = cookielib.CookieJar()
response = self.get_response(logout_url)
check_logout = re.findall('login-main', response)
if len(check_logout) > 0:
self.logined = False
self.remove_cookies()
return True
else:
return False
def save_cookies(self):
'''
保存cookie
:returns: True为成功,False为失败或发生错误
'''
try:
if os.path.isfile('cookie.list'):
os.remove('cookie.list')
fd = open('cookie.list', "wb+")
cookie_list = requests.utils.dict_from_cookiejar(self.session.cookies)
fd.write(json.dumps(cookie_list))
fd.close()
return True
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t save cookie list file.')
return False
def get_cookies(self):
'''
读取cookie
:returns: 返回值直接赋值给requests.session(),None为无cookie
'''
response = self.get_response(home_url)
if response is False:
return False
try:
if os.path.isfile('cookie.list'):
fd = open('cookie.list', "rb+")
else:
return None
cookie = fd.read()
fd.close()
if cookie == '':
return None
else:
return requests.utils.cookiejar_from_dict(json.loads(cookie))
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get cookie list file.')
return False
def remove_cookies(self):
'''
删除cookie
'''
if os.path.exists('cookie.list'):
os.remove('cookie.list')
def do_pan_api(self, api, args):
'''
执行百度网盘api
:param api: 需要执行的api
:param args: string格式参数
:returns: 结果True or False
'''
api_url = pan_api_url + api + '?'
api_url += 'channel=chunlei&clienttype=0&web=1&t=%s' % utils.get_time()
api_url += '&bdstoken=' + self.token
for arg in args:
api_url += '&%s=%s' % (arg, args[arg])
pan_api_response = self.get_response(api_url)
json = pan_api_response
try:
json = eval(json)
errno = str(json['errno'])
if errno == '0':
return json['list']
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg("错误:Can't get pan api:" + api + " response json.")
return False
# 错误处理
if errno == '-6':
# cookie失效
self.logined = False
self.remove_cookies()
utils.show_msg('错误:cookies已失效,请刷新页面重新登陆')
else:
utils.show_msg('错误:执行百度云api:' + api + '时出错,错误代码:' + errno + ',错误信息:' + errmsg.get_errmsg_by_errno(errno))
return False
def get_list(self, dir, page=None, page_size=None, order='name', desc='1'):
'''
获取目录列表,默认全部获取
注意输入必须全是string格式
:param dir:目录路径
:param page:第几页
:param page_size:每页几条记录,默认20
:param order: 排序字段
可选:time 修改时间
name 文件名
size 大小,注意目录无大小
:param desc:1为降序,0为升序,默认为降序
:returns: dict格式文件信息,server_filename和path为unicode编码,错误返回False
'''
args = {
"_": utils.get_time(),
"dir": urllib.quote(dir),
"order": order,
"desc" : desc,
}
if page is not None:
args['page'] = page
if page_size is not None:
args['num'] = page_size
result = self.do_pan_api('list', args)
if result != False:
for file in result:
file['server_filename'] = eval('u\'' + file['server_filename'] + '\'')
file['path'] = eval('u\'' + file['path'] + '\'.replace(\'\\\\\',\'\')')
self.file_list[dir] = result
return result
def get_download_url(self, dir, link):
'''
获取下载链接
:param dir: 目录
:returns: string格式下载链接
'''
if link == '0':
url = self.get_baiducloudclient_url(dir)
else:
url = pcs_rest_url
url += '?method=%s&app_id=%s&path=%s' % ('download', '250528', urllib.quote(dir))
return url
def get_file_size(self, url):
'''
获取文件大小
:param url: 文件链接
:return: 文件大小,错误返回False
'''
headers = {
'Range': 'bytes=0-4'
}
try:
response = self.session.get(url, headers=headers)
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Get file size failed.url %s.' % url)
return False
content_range = response.headers['content-range']
size = int(re.match('bytes 0-4/(\d+)', content_range).group(1))
return size
def check_file(self, dir, file_name):
'''
检查在已缓存文件list中是否存在文件
:param dir: 路径,不包含文件名,结尾无/
:param file_name: 文件名
:return: json格式文件信息,server_filename和path为unicode编码,错误返回False
'''
try:
for file in self.file_list[dir]:
if file['server_filename'] == file_name:
return file
except Exception:
utils.show_msg('错误:Check file failed.')
return False
def get_baiducloudclient_url(self, dir):
headers = {
'User-Agent': 'netdisk;4.6.2.0;PC;PC-Windows;10.0.10240;WindowsBaiduYunGuanJia'
}
# 申请加速,若失败自动获取的是普通链接
url = 'https://pan.baidu.com/rest/2.0/membership/speeds/freqctrl'
postdata = {
'method': 'consume'
}
'''
get为获取是否可以使用状态,freq_cnt=1时可以使用
consume为获取链接
'''
try:
responese = self.get_response(url, post_data=postdata, headers=headers)
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Get file size failed.url %s.' % url)
return False
# 获取链接
url = 'https://d.pcs.baidu.com/rest/2.0/pcs/file?time=' + utils.get_time() + '&clienttype=21&version=2.1.0&vip=0&method=locatedownload&app_id=250528&esl=1&ver=4.0&dtype=1&ehps=1&check_blue=1&path=' + dir + '&err_ver=1.0'
try:
response = self.get_response(url, headers=headers)
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Get file size failed.url %s.' % url)
return False
# 获取第一个url
url_info = json.loads(response)
return url_info['urls'][0]['url'] | # coding=utf-8
import utils
import errmsg
import re
import io
import os
import traceback
import base64
import json
import requests
import random
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
# Crypto代替rsa参考:https://git.wageningenur.nl/aflit001/ibrowser/commit/1b2437fe81af9a8511bf847c1ada69a9de8df893?view=parallel&w=1
# 兼容2.7和3.x
try:
import cookielib
import urllib
except ImportError:
import http.cookiejar as cookielib
import urllib.parse as urllib
'''
这是一个百度云引擎模块
目前已经实现功能:
登陆
退出登陆
自动登录
目标功能:
获取文件目录
获取下载链接
获取文件大小
'''
home_url = 'https://pan.baidu.com'
passport_url = 'https://passport.baidu.com/v2/api/?'
logout_url = 'https://passport.baidu.com/?logout&u=https%3A%2F%2Fpan.baidu.com%2F'
# 验证码
captcha_url = 'https://passport.baidu.com/cgi-bin/genimage?'
pan_api_url = 'http://pan.baidu.com/api/'
disk_home_url = 'https://pan.baidu.com/disk/home'
pcs_rest_url = 'http://d.pcs.baidu.com/rest/2.0/pcs/file'
get_publickey_url = 'https://passport.baidu.com/v2/getpublickey?token='
max_retry_times = 10
class BaiduCloudEngine():
def __init__(self, webserver=False, user_agent='netdisk;172.16.17.32;PC;PC-Windows;10.0.10240;WindowsBaiduYunGuanJia'):
'''
初始化百度云引擎
私有变量self.window为当前的WindowEngine句柄
私有变量self.cj为cookie
私有变量self.opener为urllib2的opener
私有变量self.headers为自定义user-agent
:param window:当前WindowEngine句柄,默认为None
:param user_agent: 默认为win10 chrome
'''
self.webserver = webserver
self.session = requests.session()
self.headers = { 'Accept':'*/*',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.8',
'User-Agent': user_agent
}
self.file_list = {}
# 用于网页模式
self.verifycode = ''
self.verifycode_img_url = ''
self.mail_verify_mode = False
# 读取cookie
cookie = self.get_cookies()
if cookie is not None and cookie is not False:
self.session.cookies = cookie
self.get_token()
self.logined = True
else:
self.logined = False
def get_session(self):
return self.session
def get_headers(self):
return self.headers
def get_response(self, url, post_data=None, html=True, headers=None):
'''
获取http返回内容
:param url: 地址
:param post_data: post数据,默认为None
:param html: 是否请求的是html数据
:param headers: 可以自定义请求头
:returns: http返回内容,错误返回''
'''
req_headers = self.headers
if headers is not None:
for header in headers:
req_headers[header] = headers[header]
tryed_time = 0
while tryed_time <= 3:
try:
if post_data is None:
response = self.session.get(url, headers=req_headers)
else:
response = self.session.post(url, data=post_data, headers=req_headers)
break
except Exception:
tryed_time += 1
utils.show_msg('Get url %s timeout, tryedtime=%d' % (url, tryed_time))
if tryed_time > 3:
utils.show_msg(traceback.print_exc())
if post_data is not None:
utils.show_msg('错误:Post url %s failed.' % url)
else:
utils.show_msg('错误:Open url %s failed.' % url)
return ''
return response.content
def check_login(self, username):
'''
检查登陆信息,获取token和codestring
此函数不再使用
:param username: 用户名
:returns: 正常返回值为string格式值为codestring
0为失败,None为发生错误
'''
response = self.get_response(home_url)
if response == '':
return False
else:
# 获取dv
try:
tmp = re.findall('id=\"dv_Input\" type=\"hidden\" value=\"(.*?)\"', response)
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get dv_Input.')
return False
codestring = None
if not self.get_token():
return False
# logincheck
passport_logincheck_url = passport_url + 'logincheck&&token=%s' % self.token
passport_logincheck_url += '&tpl=pp&apiver=v3&tt=%s' % utils.get_time()
passport_logincheck_url += '&username=%s' % urllib.quote(username)
passport_logincheck_url += '&isphone=false&callback=bd__cbs__' + utils.get_callback_function()
passport_logincheck_response = self.get_response(passport_logincheck_url)
json = utils.get_json_from_response(passport_logincheck_response)
try:
json = eval(json[0])
codeString = json['data']['codeString']
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get passport logincheck\'s response json.')
return False
return codeString
def get_token(self):
'''
通过getapi获取token
:returns: False为发生错误
'''
passport_getapi_url = passport_url + 'getapi&tpl=pp&apiver=v3&tt=%s' % utils.get_time()
passport_getapi_url += '&class=login&logintype=basicLogin&callback=bd__cbs__' + utils.get_callback_function()
passport_getapi_response = self.get_response(passport_getapi_url)
json = utils.get_json_from_response(passport_getapi_response)
try:
json = eval(json[0])
self.token = json['data']['token']
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get passport getapi\'s response json.')
return False
return True
def get_publickey(self, ):
'''
参考自https://github.com/ly0/baidupcsapi/blob/master/baidupcsapi/api.py
根据项目部分修改
'''
url = get_publickey_url + self.token
content = self.get_response(url)
jdata = json.loads(content.replace(b'\'', b'"').decode('utf-8'))
return jdata['pubkey'], jdata['key']
def get_verifycode(self):
'''
命令行模式获取用户输入的验证码,网页服务器模式获取验证码地址
:returns: 若开启了网页服务器返回值为False,否则返回用户输入的验证码
'''
from PIL import Image
if self.codestring != '':
# 验证码
verifycode_img_url = captcha_url + self.codestring
if self.webserver:
self.verifycode_img_url = verifycode_img_url
self.verifycode = ''
return False
else:
verifycode_img_response = self.get_response(verifycode_img_url, html=False)
verifycode_img_bytes = io.BytesIO(verifycode_img_response)
verifycode_img = Image.open(verifycode_img_bytes)
verifycode_img.show()
# 兼容3.x
try:
captch = raw_input("Enter verifycode:")
except NameError:
captch = input("Enter verifycode:")
verifycode_img.close()
else:
captch = ''
return captch
def login(self, username, password, verify='', mail_verify=''):
'''
进行登陆
可能会弹出窗口输入验证码
:param username: 用户名
:param password:密码
:param verify: 验证码,默认为空
:returns: -1为错误,1为成功,2为需要验证码,3为需要邮箱验证码
'''
# 如果是验证邮箱mode
if self.mail_verify_mode:
if self.check_mail_verify_code(mail_verify):
self.login_success()
return 1
else:
return -1
retry = 0
while retry <= max_retry_times:
if self.verifycode != '':
captch = self.verifycode
self.verifycode_img_url = ''
self.verifycode = ''
else:
'''
self.codestring = self.check_login(username)
if self.codestring == 0:
return False
if self.codestring is None:
utils.show_msg('错误:codestring is None.')
return False
'''
if not self.get_token():
return -1
self.codestring = ''
captch = self.get_verifycode()
# 如果为网页模式则返回2
if captch is False:
return 2
# 此处参考自https://github.com/ly0/baidupcsapi/blob/master/baidupcsapi/api.py
pubkey, rsakey = self.get_publickey()
key = RSA.importKey(pubkey)
password_rsaed = base64.b64encode(PKCS1_v1_5.new(key).encrypt(password.encode('utf-8')))
# 以上为参考,变量、函数名、使用函数根据项目需求略微修改
#计算gid
gid = "xxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx"
for i in xrange(29):
gid = re.sub('x', utils.get_gid_char(0), gid,1)
gid = re.sub('y', utils.get_gid_char(8), gid,1)
post_data = {"staticpage": "http://www.baidu.com/cache/user/html/v3Jump.html",
"charset": "utf-8",
"token": self.token,
"tpl": "pp",
"subpro": "",
"apiver": "v3",
"tt": utils.get_time(),
"codestring": self.codestring,
"safeflg": "0",
"u": "https://passport.baidu.com/",
"isPhone": "false",
"quick_user": "0",
"logintype": "basicLogin",
"logLoginType": "pc_loginBasic",
#"idc": "",
"loginmerge": "true",
"username": username,
"password": <PASSWORD>,
"verifycode": captch, # 验证码
"mem_pass": "on",
"rsakey": str(rsakey),
"crypttype": "12",
"ppui_logintime": random.randint(10000,99999),
"callback": "parent.bd__pcbs__" + utils.get_callback_function(),
"detect": 1,
"foreignusername": "",
"gid": gid
}
'''
gid 原始代码:
return "xxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g, function(e) {
var t = 16 * Math.random() | 0
, n = "x" == e ? t : 3 & t | 8;
return n.toString(16)
}).toUpperCase()
分析后得出此代码为:x填充16进制随机数,y填充大于等于8的16进制随机数
'''
passport_logincheck_response = self.get_response(passport_url + 'login', post_data)
try:
tmp = re.findall('decodeURIComponent\(\"(.*?)\"\)', passport_logincheck_response)
jump_url = tmp[0]
jump_url = jump_url.replace('\\', '')
tmp = re.findall('accounts\s*?=\s*?\'(.*?)\'', passport_logincheck_response)
account = tmp[0]
jump_url += '?'
tmp = re.findall('href\s*?\+=\s*?"(.*?)"', passport_logincheck_response)
jump_url += tmp[0]
jump_url += account
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t go to jump page.')
return -1
# 错误处理
try:
tmp = re.findall('err_no=([-]?\d*)', jump_url)
errno = tmp[0]
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get check login error number.')
return -1
if errno == '3' or errno == '6' or errno == '200010':
# 验证码错误,需要重新输入
pass
elif errno == '257':
# 未输入验证码或者首次登陆需要获取验证码
tmp = re.findall('&codeString=([A-Za-z0-9]*?)&userName', passport_logincheck_response)
if len(tmp) > 0:
self.codestring = tmp[0]
else:
utils.show_msg('错误:无法获取codeString信息.')
return -1
# 获取当前验证码
result = self.get_verifycode()
if result is False:
# 如果为网页模式则返回False
utils.show_msg('错误:登陆错误,请重新尝试,错误代码:' + errno + ',错误信息:' + errmsg.get_login_errmsg(errno))
return 2
else:
# 如果为命令行模式则直接进入下一次循环
self.verifycode = result
continue
elif errno == '0' or errno == '18' or errno == '400032' or errno == '400034' or errno == '400037' or errno == '400401':
self.login_success()
return 1
elif errno == '120019':
utils.show_msg('错误:%s,短时间密码错误次数过多, 请先通过 passport.baidu.com 解除锁定' % errno)
return -1
elif errno == '120021':
# 用户需要外部认证(邮箱)
self.auth_token = re.findall('authtoken=([^&]+)', passport_logincheck_response)[0]
self.loginproxy_url = re.findall('loginproxy=([^&]+)', passport_logincheck_response)[0]
responese = self.get_response('https://passport.baidu.com/v2/sapi/authwidgetverify' +
'?authtoken=' + self.auth_token +
'&type=email' +
'&apiver=v3' +
'&action=send' +
'&vcode=' +
'&questionAndAnswer=' +
'&needsid=' +
'&rsakey=' +
'&countrycode=' +
'&subpro=' +
'&callback=' +
'&tpl=mn' +
'&u=https://www.baidu.com/'
)
responese = json.loads(responese)
if responese['errmsg'] is None:
if self.webserver:
# 如果是网页模式,直接返回3
self.mail_verify_mode = True
return 3
else:
self.check_mail_verify_code(input('请输入邮箱验证码:'))
else:
utils.show_msg('错误:发送安全验证请求失败')
return -1
utils.show_msg('错误:登陆错误,请重新尝试,错误代码:' + errno + ',错误信息:' + errmsg.get_login_errmsg(errno))
retry += 1
utils.show_msg('错误:超出最大重试次数:' + str(max_retry_times))
return -1
def check_mail_verify_code(self, vcode):
'''
验证邮箱验证码
:param vcode: 邮箱验证码
:return: True为成功,False为失败或发生错误
'''
responese = self.get_response('https://passport.baidu.com/v2/sapi/authwidgetverify' +
'?authtoken=' + self.auth_token +
'&type=email' +
'&apiver=v3' +
'&action=check' +
'&vcode=' + str(vcode) +
'&questionAndAnswer=' +
'&needsid=' +
'&rsakey=' +
'&countrycode=' +
'&subpro=' +
'&callback=')
responese = json.loads(responese)
if responese['errno'] == 110000:
loginproxy_resp = self.get_response(urllib.unquote(self.loginproxy_url.decode()))
self.mail_verify_mode = False
return True
else:
utils.show_msg('错误:邮箱验证码错误' + responese['errno'])
return False
def login_success(self):
'''
登陆成功后执行,并设置logined状态
'''
self.get_response(disk_home_url)
self.save_cookies()
self.logined = True
def logout(self):
'''
退出登陆
:returns: True为成功,False为失败或发生错误
'''
#passport_logout_response = self.get_response(logout_url)
self.session.cookies = cookielib.CookieJar()
response = self.get_response(logout_url)
check_logout = re.findall('login-main', response)
if len(check_logout) > 0:
self.logined = False
self.remove_cookies()
return True
else:
return False
def save_cookies(self):
'''
保存cookie
:returns: True为成功,False为失败或发生错误
'''
try:
if os.path.isfile('cookie.list'):
os.remove('cookie.list')
fd = open('cookie.list', "wb+")
cookie_list = requests.utils.dict_from_cookiejar(self.session.cookies)
fd.write(json.dumps(cookie_list))
fd.close()
return True
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t save cookie list file.')
return False
def get_cookies(self):
'''
读取cookie
:returns: 返回值直接赋值给requests.session(),None为无cookie
'''
response = self.get_response(home_url)
if response is False:
return False
try:
if os.path.isfile('cookie.list'):
fd = open('cookie.list', "rb+")
else:
return None
cookie = fd.read()
fd.close()
if cookie == '':
return None
else:
return requests.utils.cookiejar_from_dict(json.loads(cookie))
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Can\'t get cookie list file.')
return False
def remove_cookies(self):
'''
删除cookie
'''
if os.path.exists('cookie.list'):
os.remove('cookie.list')
def do_pan_api(self, api, args):
'''
执行百度网盘api
:param api: 需要执行的api
:param args: string格式参数
:returns: 结果True or False
'''
api_url = pan_api_url + api + '?'
api_url += 'channel=chunlei&clienttype=0&web=1&t=%s' % utils.get_time()
api_url += '&bdstoken=' + self.token
for arg in args:
api_url += '&%s=%s' % (arg, args[arg])
pan_api_response = self.get_response(api_url)
json = pan_api_response
try:
json = eval(json)
errno = str(json['errno'])
if errno == '0':
return json['list']
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg("错误:Can't get pan api:" + api + " response json.")
return False
# 错误处理
if errno == '-6':
# cookie失效
self.logined = False
self.remove_cookies()
utils.show_msg('错误:cookies已失效,请刷新页面重新登陆')
else:
utils.show_msg('错误:执行百度云api:' + api + '时出错,错误代码:' + errno + ',错误信息:' + errmsg.get_errmsg_by_errno(errno))
return False
def get_list(self, dir, page=None, page_size=None, order='name', desc='1'):
'''
获取目录列表,默认全部获取
注意输入必须全是string格式
:param dir:目录路径
:param page:第几页
:param page_size:每页几条记录,默认20
:param order: 排序字段
可选:time 修改时间
name 文件名
size 大小,注意目录无大小
:param desc:1为降序,0为升序,默认为降序
:returns: dict格式文件信息,server_filename和path为unicode编码,错误返回False
'''
args = {
"_": utils.get_time(),
"dir": urllib.quote(dir),
"order": order,
"desc" : desc,
}
if page is not None:
args['page'] = page
if page_size is not None:
args['num'] = page_size
result = self.do_pan_api('list', args)
if result != False:
for file in result:
file['server_filename'] = eval('u\'' + file['server_filename'] + '\'')
file['path'] = eval('u\'' + file['path'] + '\'.replace(\'\\\\\',\'\')')
self.file_list[dir] = result
return result
def get_download_url(self, dir, link):
'''
获取下载链接
:param dir: 目录
:returns: string格式下载链接
'''
if link == '0':
url = self.get_baiducloudclient_url(dir)
else:
url = pcs_rest_url
url += '?method=%s&app_id=%s&path=%s' % ('download', '250528', urllib.quote(dir))
return url
def get_file_size(self, url):
'''
获取文件大小
:param url: 文件链接
:return: 文件大小,错误返回False
'''
headers = {
'Range': 'bytes=0-4'
}
try:
response = self.session.get(url, headers=headers)
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Get file size failed.url %s.' % url)
return False
content_range = response.headers['content-range']
size = int(re.match('bytes 0-4/(\d+)', content_range).group(1))
return size
def check_file(self, dir, file_name):
'''
检查在已缓存文件list中是否存在文件
:param dir: 路径,不包含文件名,结尾无/
:param file_name: 文件名
:return: json格式文件信息,server_filename和path为unicode编码,错误返回False
'''
try:
for file in self.file_list[dir]:
if file['server_filename'] == file_name:
return file
except Exception:
utils.show_msg('错误:Check file failed.')
return False
def get_baiducloudclient_url(self, dir):
headers = {
'User-Agent': 'netdisk;4.6.2.0;PC;PC-Windows;10.0.10240;WindowsBaiduYunGuanJia'
}
# 申请加速,若失败自动获取的是普通链接
url = 'https://pan.baidu.com/rest/2.0/membership/speeds/freqctrl'
postdata = {
'method': 'consume'
}
'''
get为获取是否可以使用状态,freq_cnt=1时可以使用
consume为获取链接
'''
try:
responese = self.get_response(url, post_data=postdata, headers=headers)
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Get file size failed.url %s.' % url)
return False
# 获取链接
url = 'https://d.pcs.baidu.com/rest/2.0/pcs/file?time=' + utils.get_time() + '&clienttype=21&version=2.1.0&vip=0&method=locatedownload&app_id=250528&esl=1&ver=4.0&dtype=1&ehps=1&check_blue=1&path=' + dir + '&err_ver=1.0'
try:
response = self.get_response(url, headers=headers)
except Exception:
utils.show_msg(traceback.print_exc())
utils.show_msg('错误:Get file size failed.url %s.' % url)
return False
# 获取第一个url
url_info = json.loads(response)
return url_info['urls'][0]['url'] | zh | 0.753908 | # coding=utf-8 # Crypto代替rsa参考:https://git.wageningenur.nl/aflit001/ibrowser/commit/1b2437fe81af9a8511bf847c1ada69a9de8df893?view=parallel&w=1 # 兼容2.7和3.x 这是一个百度云引擎模块 目前已经实现功能: 登陆 退出登陆 自动登录 目标功能: 获取文件目录 获取下载链接 获取文件大小 # 验证码 初始化百度云引擎 私有变量self.window为当前的WindowEngine句柄 私有变量self.cj为cookie 私有变量self.opener为urllib2的opener 私有变量self.headers为自定义user-agent :param window:当前WindowEngine句柄,默认为None :param user_agent: 默认为win10 chrome # 用于网页模式 # 读取cookie 获取http返回内容 :param url: 地址 :param post_data: post数据,默认为None :param html: 是否请求的是html数据 :param headers: 可以自定义请求头 :returns: http返回内容,错误返回'' 检查登陆信息,获取token和codestring 此函数不再使用 :param username: 用户名 :returns: 正常返回值为string格式值为codestring 0为失败,None为发生错误 # 获取dv # logincheck 通过getapi获取token :returns: False为发生错误 参考自https://github.com/ly0/baidupcsapi/blob/master/baidupcsapi/api.py 根据项目部分修改 命令行模式获取用户输入的验证码,网页服务器模式获取验证码地址 :returns: 若开启了网页服务器返回值为False,否则返回用户输入的验证码 # 验证码 # 兼容3.x 进行登陆 可能会弹出窗口输入验证码 :param username: 用户名 :param password:密码 :param verify: 验证码,默认为空 :returns: -1为错误,1为成功,2为需要验证码,3为需要邮箱验证码 # 如果是验证邮箱mode self.codestring = self.check_login(username) if self.codestring == 0: return False if self.codestring is None: utils.show_msg('错误:codestring is None.') return False # 如果为网页模式则返回2 # 此处参考自https://github.com/ly0/baidupcsapi/blob/master/baidupcsapi/api.py # 以上为参考,变量、函数名、使用函数根据项目需求略微修改 #计算gid #"idc": "", # 验证码 gid 原始代码: return "xxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g, function(e) { var t = 16 * Math.random() | 0 , n = "x" == e ? t : 3 & t | 8; return n.toString(16) }).toUpperCase() 分析后得出此代码为:x填充16进制随机数,y填充大于等于8的16进制随机数 # 错误处理 # 验证码错误,需要重新输入 # 未输入验证码或者首次登陆需要获取验证码 # 获取当前验证码 # 如果为网页模式则返回False # 如果为命令行模式则直接进入下一次循环 # 用户需要外部认证(邮箱) # 如果是网页模式,直接返回3 验证邮箱验证码 :param vcode: 邮箱验证码 :return: True为成功,False为失败或发生错误 登陆成功后执行,并设置logined状态 退出登陆 :returns: True为成功,False为失败或发生错误 #passport_logout_response = self.get_response(logout_url) 保存cookie :returns: True为成功,False为失败或发生错误 读取cookie :returns: 返回值直接赋值给requests.session(),None为无cookie 删除cookie 执行百度网盘api :param api: 需要执行的api :param args: string格式参数 :returns: 结果True or False # 错误处理 # cookie失效 获取目录列表,默认全部获取 注意输入必须全是string格式 :param dir:目录路径 :param page:第几页 :param page_size:每页几条记录,默认20 :param order: 排序字段 可选:time 修改时间 name 文件名 size 大小,注意目录无大小 :param desc:1为降序,0为升序,默认为降序 :returns: dict格式文件信息,server_filename和path为unicode编码,错误返回False 获取下载链接 :param dir: 目录 :returns: string格式下载链接 获取文件大小 :param url: 文件链接 :return: 文件大小,错误返回False 检查在已缓存文件list中是否存在文件 :param dir: 路径,不包含文件名,结尾无/ :param file_name: 文件名 :return: json格式文件信息,server_filename和path为unicode编码,错误返回False # 申请加速,若失败自动获取的是普通链接 get为获取是否可以使用状态,freq_cnt=1时可以使用 consume为获取链接 # 获取链接 # 获取第一个url | 2.14076 | 2 |
nsd1802/python/day23/mysite/polls/views.py | MrWangwf/nsd1806 | 0 | 6618404 | from django.shortcuts import render, HttpResponse, get_object_or_404, redirect
from .models import Question
def index(request):
# 把polls/index.html网页发送给用户,存在polls/templates下
qlist = Question.objects.order_by('-pub_date')[:5]
context = {'qlist': qlist}
return render(request, 'polls/index.html', context)
def hello(request):
return render(request, 'polls/hello.html')
def detail(request, question_id):
# return HttpResponse('你正在查看第%s个问题' % question_id)
# q = Question.objects.get(id=question_id)
q = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'q': q})
# 字典的key相当于变量名,value相当于是变量值,传递给模板文件
def result(request, question_id):
# return HttpResponse('你正在查看第%s个问题的结果' % question_id)
q = Question.objects.get(id=question_id)
return render(request, 'polls/result.html', {'q': q})
def vote(request, question_id):
# return HttpResponse('你正在为第%s个问题投票' % question_id)
q = Question.objects.get(id=question_id)
c = q.choice_set.get(id=request.POST['choice'])
c.votes += 1
c.save()
# 重定向到urls.py中定义的name='result'那个网址
return redirect('result', question_id=question_id)
# 登陆面面
def home(request):
return render(request, 'polls/home.html')
# 验证用户是否登陆成功
def login(request):
username = request.POST.get('username')
password = request.POST.get('pwd')
if username == 'bob' and password == '<PASSWORD>':
request.session['IS_LOGIN'] = True
return redirect('index')
return redirect('home')
# 已登陆用户可以访问,如果没有登陆,重定向到登陆页面
def protected(request):
is_login = request.session.get('IS_LOGIN', False)
if is_login:
return HttpResponse('OK')
return redirect('home')
def mytest(request):
i = 10
alist = ['bob', 'alice']
adict = {'name': 'tom', 'age': 25}
context = {'i': i, 'alist': alist, 'adict': adict}
return render(request, 'polls/mytest.html', context)
| from django.shortcuts import render, HttpResponse, get_object_or_404, redirect
from .models import Question
def index(request):
# 把polls/index.html网页发送给用户,存在polls/templates下
qlist = Question.objects.order_by('-pub_date')[:5]
context = {'qlist': qlist}
return render(request, 'polls/index.html', context)
def hello(request):
return render(request, 'polls/hello.html')
def detail(request, question_id):
# return HttpResponse('你正在查看第%s个问题' % question_id)
# q = Question.objects.get(id=question_id)
q = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'q': q})
# 字典的key相当于变量名,value相当于是变量值,传递给模板文件
def result(request, question_id):
# return HttpResponse('你正在查看第%s个问题的结果' % question_id)
q = Question.objects.get(id=question_id)
return render(request, 'polls/result.html', {'q': q})
def vote(request, question_id):
# return HttpResponse('你正在为第%s个问题投票' % question_id)
q = Question.objects.get(id=question_id)
c = q.choice_set.get(id=request.POST['choice'])
c.votes += 1
c.save()
# 重定向到urls.py中定义的name='result'那个网址
return redirect('result', question_id=question_id)
# 登陆面面
def home(request):
return render(request, 'polls/home.html')
# 验证用户是否登陆成功
def login(request):
username = request.POST.get('username')
password = request.POST.get('pwd')
if username == 'bob' and password == '<PASSWORD>':
request.session['IS_LOGIN'] = True
return redirect('index')
return redirect('home')
# 已登陆用户可以访问,如果没有登陆,重定向到登陆页面
def protected(request):
is_login = request.session.get('IS_LOGIN', False)
if is_login:
return HttpResponse('OK')
return redirect('home')
def mytest(request):
i = 10
alist = ['bob', 'alice']
adict = {'name': 'tom', 'age': 25}
context = {'i': i, 'alist': alist, 'adict': adict}
return render(request, 'polls/mytest.html', context)
| zh | 0.634899 | # 把polls/index.html网页发送给用户,存在polls/templates下 # return HttpResponse('你正在查看第%s个问题' % question_id) # q = Question.objects.get(id=question_id) # 字典的key相当于变量名,value相当于是变量值,传递给模板文件 # return HttpResponse('你正在查看第%s个问题的结果' % question_id) # return HttpResponse('你正在为第%s个问题投票' % question_id) # 重定向到urls.py中定义的name='result'那个网址 # 登陆面面 # 验证用户是否登陆成功 # 已登陆用户可以访问,如果没有登陆,重定向到登陆页面 | 2.186841 | 2 |
Day-103/function-6.py | arvimal/100DaysofCode-Python | 1 | 6618405 | <reponame>arvimal/100DaysofCode-Python
#!/usr/bin/env python3
# Return your age in dog years :)
# Assuming a human year is equivalent to seven dog years.
def dog_years(name, age):
return "{}, you are {} years old in dog years".format(name, age * 7)
print(dog_years("Lola", 16))
print(dog_years("Baby", 0))
| #!/usr/bin/env python3
# Return your age in dog years :)
# Assuming a human year is equivalent to seven dog years.
def dog_years(name, age):
return "{}, you are {} years old in dog years".format(name, age * 7)
print(dog_years("Lola", 16))
print(dog_years("Baby", 0)) | en | 0.795519 | #!/usr/bin/env python3 # Return your age in dog years :) # Assuming a human year is equivalent to seven dog years. | 4.046825 | 4 |
src/backend.py | zacharyjhumphrey/algorithms-sorting-project | 0 | 6618406 | <gh_stars>0
from typing import Callable
# random.sample(range(10, 30), 5)
MIN_RANDOM_VALUE_RANGE = -1
MAX_RANDOM_VALUE_RANGE = -1
DEFAULT_ARRAY_SIZE = -1
def pancake_sort(arr: list[int]) -> list[int]:
"""
pancake_sort _summary_
Args:
arr (list[int]): array to sort
Returns:
list[int]: sorted array
"""
if len(arr) <= 1:
return arr
for size in range(len(arr), 1, -1):
maxindex = max(range(size), key=arr.__getitem__)
if maxindex+1 != size:
# This indexed max needs moving
if maxindex != 0:
# Flip the max item to the left
arr[:maxindex+1] = reversed(arr[:maxindex+1])
# Flip it into its final position
arr[:size] = reversed(arr[:size])
return arr
# https://rosettacode.org/wiki/Sorting_algorithms/Pancake_sort#Python
def bubble_sort(arr: list[int]) -> list[int]:
"""
bubble_sort _summary_
Args:
arr (list[int]): _description_
Returns:
list[int]: _description_
"""
changed = True
while changed:
changed = False
for i in xrange(len(arr) - 1):
if arr[i] > arr[i+1]:
arr[i], arr[i+1] = arr[i+1], arr[i]
changed = True
return arr
# https://rosettacode.org/wiki/Sorting_algorithms/Bubble_sort#Python
def quick_sort(arr: list[int]) -> list[int]:
"""
quick_sort _summary_
Args:
arr (list[int]): array to sort
Returns:
list[int]: sorted array
"""
return (quick_sort([y for y in arr[1:] if y < arr[0]]) +
arr[:1] +
quick_sort([y for y in arr[1:] if y >= arr[0]])) if len(arr) > 1 else arr
# https://rosettacode.org/wiki/Sorting_algorithms/Quicksort#Python
def merge(left, right):
result = []
left_idx, right_idx = 0, 0
while left_idx < len(left) and right_idx < len(right):
# change the direction of this comparison to change the direction of the sort
if left[left_idx] <= right[right_idx]:
result.append(left[left_idx])
left_idx += 1
else:
result.append(right[right_idx])
right_idx += 1
if left_idx < len(left):
result.extend(left[left_idx:])
if right_idx < len(right):
result.extend(right[right_idx:])
return result
def merge_sort(arr: list[int]) -> list[int]:
"""
merge_sort _summary_
Args:
arr (list[int]): array to sort
Returns:
list[int]: sorted array
"""
if len(arr) <= 1:
return arr
middle = len(arr) // 2
left = arr[:middle]
right = arr[middle:]
left = merge_sort(left)
right = merge_sort(right)
return list(merge(left, right))
# https://rosettacode.org/wiki/Sorting_algorithms/Merge_sort#Python
def time_sort(sorting_fn: Callable[[list[int]], list[int]], arr: list[int]) -> int:
"""
time_sort takes a function and an array and returns how long it took
for that array to be sorted
Args:
sorting_fn (Callable[[list[int]], list[int]]): sorting function to use
arr (list[int]): array to sort
Returns:
int: amount of time it took to sort
"""
return -1
def create_sorted_array_with_one_mistake(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
"""
create_sorted_array_with_one_swap makes an array that only has one value out of place
example: [1, 2, 7, 3, 4]
in this case, 7 is out of place, but moving it to the end of the array will
easily sort the array
TODO Make methods similar to this but they have smaller elements or something
Args:
size (int, optional): size of the array . Defaults to DEFAULT_ARRAY_SIZE.
Returns:
list[int]: _description_
"""
return []
def create_reversed_array(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
"""
create_reversed_array creates a list of numbers that is sorted in descending order
Args:
size (int): size of the array
Returns:
list[int]: sorted array
"""
return []
def create_sorted_array(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
return []
def create_array_of_one_value(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
return []
def create_random_array(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
return []
# TODO Think of more cases that we should look for when sorting arrays | from typing import Callable
# random.sample(range(10, 30), 5)
MIN_RANDOM_VALUE_RANGE = -1
MAX_RANDOM_VALUE_RANGE = -1
DEFAULT_ARRAY_SIZE = -1
def pancake_sort(arr: list[int]) -> list[int]:
"""
pancake_sort _summary_
Args:
arr (list[int]): array to sort
Returns:
list[int]: sorted array
"""
if len(arr) <= 1:
return arr
for size in range(len(arr), 1, -1):
maxindex = max(range(size), key=arr.__getitem__)
if maxindex+1 != size:
# This indexed max needs moving
if maxindex != 0:
# Flip the max item to the left
arr[:maxindex+1] = reversed(arr[:maxindex+1])
# Flip it into its final position
arr[:size] = reversed(arr[:size])
return arr
# https://rosettacode.org/wiki/Sorting_algorithms/Pancake_sort#Python
def bubble_sort(arr: list[int]) -> list[int]:
"""
bubble_sort _summary_
Args:
arr (list[int]): _description_
Returns:
list[int]: _description_
"""
changed = True
while changed:
changed = False
for i in xrange(len(arr) - 1):
if arr[i] > arr[i+1]:
arr[i], arr[i+1] = arr[i+1], arr[i]
changed = True
return arr
# https://rosettacode.org/wiki/Sorting_algorithms/Bubble_sort#Python
def quick_sort(arr: list[int]) -> list[int]:
"""
quick_sort _summary_
Args:
arr (list[int]): array to sort
Returns:
list[int]: sorted array
"""
return (quick_sort([y for y in arr[1:] if y < arr[0]]) +
arr[:1] +
quick_sort([y for y in arr[1:] if y >= arr[0]])) if len(arr) > 1 else arr
# https://rosettacode.org/wiki/Sorting_algorithms/Quicksort#Python
def merge(left, right):
result = []
left_idx, right_idx = 0, 0
while left_idx < len(left) and right_idx < len(right):
# change the direction of this comparison to change the direction of the sort
if left[left_idx] <= right[right_idx]:
result.append(left[left_idx])
left_idx += 1
else:
result.append(right[right_idx])
right_idx += 1
if left_idx < len(left):
result.extend(left[left_idx:])
if right_idx < len(right):
result.extend(right[right_idx:])
return result
def merge_sort(arr: list[int]) -> list[int]:
"""
merge_sort _summary_
Args:
arr (list[int]): array to sort
Returns:
list[int]: sorted array
"""
if len(arr) <= 1:
return arr
middle = len(arr) // 2
left = arr[:middle]
right = arr[middle:]
left = merge_sort(left)
right = merge_sort(right)
return list(merge(left, right))
# https://rosettacode.org/wiki/Sorting_algorithms/Merge_sort#Python
def time_sort(sorting_fn: Callable[[list[int]], list[int]], arr: list[int]) -> int:
"""
time_sort takes a function and an array and returns how long it took
for that array to be sorted
Args:
sorting_fn (Callable[[list[int]], list[int]]): sorting function to use
arr (list[int]): array to sort
Returns:
int: amount of time it took to sort
"""
return -1
def create_sorted_array_with_one_mistake(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
"""
create_sorted_array_with_one_swap makes an array that only has one value out of place
example: [1, 2, 7, 3, 4]
in this case, 7 is out of place, but moving it to the end of the array will
easily sort the array
TODO Make methods similar to this but they have smaller elements or something
Args:
size (int, optional): size of the array . Defaults to DEFAULT_ARRAY_SIZE.
Returns:
list[int]: _description_
"""
return []
def create_reversed_array(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
"""
create_reversed_array creates a list of numbers that is sorted in descending order
Args:
size (int): size of the array
Returns:
list[int]: sorted array
"""
return []
def create_sorted_array(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
return []
def create_array_of_one_value(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
return []
def create_random_array(size: int = DEFAULT_ARRAY_SIZE) -> list[int]:
return []
# TODO Think of more cases that we should look for when sorting arrays | en | 0.624554 | # random.sample(range(10, 30), 5) pancake_sort _summary_ Args: arr (list[int]): array to sort Returns: list[int]: sorted array # This indexed max needs moving # Flip the max item to the left # Flip it into its final position # https://rosettacode.org/wiki/Sorting_algorithms/Pancake_sort#Python bubble_sort _summary_ Args: arr (list[int]): _description_ Returns: list[int]: _description_ # https://rosettacode.org/wiki/Sorting_algorithms/Bubble_sort#Python quick_sort _summary_ Args: arr (list[int]): array to sort Returns: list[int]: sorted array # https://rosettacode.org/wiki/Sorting_algorithms/Quicksort#Python # change the direction of this comparison to change the direction of the sort merge_sort _summary_ Args: arr (list[int]): array to sort Returns: list[int]: sorted array # https://rosettacode.org/wiki/Sorting_algorithms/Merge_sort#Python time_sort takes a function and an array and returns how long it took for that array to be sorted Args: sorting_fn (Callable[[list[int]], list[int]]): sorting function to use arr (list[int]): array to sort Returns: int: amount of time it took to sort create_sorted_array_with_one_swap makes an array that only has one value out of place example: [1, 2, 7, 3, 4] in this case, 7 is out of place, but moving it to the end of the array will easily sort the array TODO Make methods similar to this but they have smaller elements or something Args: size (int, optional): size of the array . Defaults to DEFAULT_ARRAY_SIZE. Returns: list[int]: _description_ create_reversed_array creates a list of numbers that is sorted in descending order Args: size (int): size of the array Returns: list[int]: sorted array # TODO Think of more cases that we should look for when sorting arrays | 4.050944 | 4 |
lib/root_lib.py | Almas-Ali/Almas | 0 | 6618407 | import getpass
import os
from src.defpsd import *
from lib.STD_lib import *
from lib.ali_lib import *
if __name__ == '__main__':
print('Use interpreter or get strick.')
exit()
psd = ''
def root_mode(pmt):
global psd
try:
if psd == defpsd:
if pmt[0:4] == 'sudo':
pmt = pmt[5:].strip()
if pmt == '':
return None
else:
try:
STD_lib(pmt)
except:
ali_lib(pmt)
else:
print('almas: sudo not found.')
else:
psd = getpass.getpass(prompt='Password: ', stream=None)
if psd == defpsd:
if pmt[0:4] == 'sudo':
pmt = pmt[5:].strip()
try:
STD_lib(pmt)
except:
try:
ali_lib(pmt)
except:
pass
else:
print('almas: sudo not found.')
else:
print('Wrong Password !')
except Exception as e:
print(e)
| import getpass
import os
from src.defpsd import *
from lib.STD_lib import *
from lib.ali_lib import *
if __name__ == '__main__':
print('Use interpreter or get strick.')
exit()
psd = ''
def root_mode(pmt):
global psd
try:
if psd == defpsd:
if pmt[0:4] == 'sudo':
pmt = pmt[5:].strip()
if pmt == '':
return None
else:
try:
STD_lib(pmt)
except:
ali_lib(pmt)
else:
print('almas: sudo not found.')
else:
psd = getpass.getpass(prompt='Password: ', stream=None)
if psd == defpsd:
if pmt[0:4] == 'sudo':
pmt = pmt[5:].strip()
try:
STD_lib(pmt)
except:
try:
ali_lib(pmt)
except:
pass
else:
print('almas: sudo not found.')
else:
print('Wrong Password !')
except Exception as e:
print(e)
| none | 1 | 2.412795 | 2 | |
bdg_distractor_generation.py | FawziElNaggar/Question-MCQ-_Answer_Generation | 0 | 6618408 | <reponame>FawziElNaggar/Question-MCQ-_Answer_Generation
# -*- coding: utf-8 -*-
"""BDG(Distractor Generation).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kHLsn3aUtsTJ_n4EgK_et2A1_Z0Q8dpQ
"""
from nlgeval import NLGEval
nlgeval = NLGEval(
metrics_to_omit=['METEOR', 'EmbeddingAverageCosineSimilairty', 'SkipThoughtCS', 'VectorExtremaCosineSimilarity',
'GreedyMatchingScore', 'CIDEr'])
!wget https://github.com/voidful/BDG/releases/download/v2.0/BDG.pt
!wget https://github.com/voidful/BDG/releases/download/v2.0/BDG_ANPM.pt
!wget https://github.com/voidful/BDG/releases/download/v2.0/BDG_PM.pt
from transformers import RobertaTokenizer
from transformers import RobertaForMultipleChoice
import torch
from torch.distributions import Categorical
import itertools as it
import nlp2go
tokenizer = RobertaTokenizer.from_pretrained("LIAMF-USP/roberta-large-finetuned-race")
model = RobertaForMultipleChoice.from_pretrained("LIAMF-USP/roberta-large-finetuned-race")
model.eval()
model.to("cuda")
dg_model = nlp2go.Model('./BDG.pt')
dg_model_pm = nlp2go.Model('./BDG_PM.pt')
dg_model_both = nlp2go.Model('./BDG_ANPM.pt')
def get_all_Options(context, question, answer):
d_input = context + '</s>' + question + '</s>' + answer
choices = dg_model.predict(d_input, decodenum=3)['result']
choices_pm = dg_model_pm.predict(d_input, decodenum=3)['result']
choices_both = dg_model_both.predict(d_input, decodenum=3)['result']
all_options = choices + choices_pm + choices_both
return all_options
def selection(context, question, answer, all_options):
max_combin = [0, []]
for combin in set(it.combinations(all_options, 3)):
options = list(combin) + [answer]
keep = True
for i in set(it.combinations(options, 2)):
a = "".join([char if char.isalpha() or char == " " else " " + char + " " for char in i[0]])
b = "".join([char if char.isalpha() or char == " " else " " + char + " " for char in i[1]])
metrics_dict = nlgeval.compute_individual_metrics([a], b)
if metrics_dict['Bleu_1'] > 0.5:
keep = False
break
if keep:
prompt = context + tokenizer.sep_token + question
encoding_input = []
for choice in options:
encoding_input.append([prompt, choice])
encoding_input.append([prompt, answer])
labels = torch.tensor(len(options) - 1).unsqueeze(0)
encoding = tokenizer(encoding_input, return_tensors='pt', padding=True, truncation='only_first')
outputs = model(**{k: v.unsqueeze(0).to('cuda') for k, v in encoding.items()},
labels=labels.to('cuda')) # batch size is 1
entropy = Categorical(probs=torch.softmax(outputs.logits, -1)).entropy().tolist()[0]
if entropy >= max_combin[0]:
max_combin = [entropy, options]
return max_combin[1][:-1]
| # -*- coding: utf-8 -*-
"""BDG(Distractor Generation).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kHLsn3aUtsTJ_n4EgK_et2A1_Z0Q8dpQ
"""
from nlgeval import NLGEval
nlgeval = NLGEval(
metrics_to_omit=['METEOR', 'EmbeddingAverageCosineSimilairty', 'SkipThoughtCS', 'VectorExtremaCosineSimilarity',
'GreedyMatchingScore', 'CIDEr'])
!wget https://github.com/voidful/BDG/releases/download/v2.0/BDG.pt
!wget https://github.com/voidful/BDG/releases/download/v2.0/BDG_ANPM.pt
!wget https://github.com/voidful/BDG/releases/download/v2.0/BDG_PM.pt
from transformers import RobertaTokenizer
from transformers import RobertaForMultipleChoice
import torch
from torch.distributions import Categorical
import itertools as it
import nlp2go
tokenizer = RobertaTokenizer.from_pretrained("LIAMF-USP/roberta-large-finetuned-race")
model = RobertaForMultipleChoice.from_pretrained("LIAMF-USP/roberta-large-finetuned-race")
model.eval()
model.to("cuda")
dg_model = nlp2go.Model('./BDG.pt')
dg_model_pm = nlp2go.Model('./BDG_PM.pt')
dg_model_both = nlp2go.Model('./BDG_ANPM.pt')
def get_all_Options(context, question, answer):
d_input = context + '</s>' + question + '</s>' + answer
choices = dg_model.predict(d_input, decodenum=3)['result']
choices_pm = dg_model_pm.predict(d_input, decodenum=3)['result']
choices_both = dg_model_both.predict(d_input, decodenum=3)['result']
all_options = choices + choices_pm + choices_both
return all_options
def selection(context, question, answer, all_options):
max_combin = [0, []]
for combin in set(it.combinations(all_options, 3)):
options = list(combin) + [answer]
keep = True
for i in set(it.combinations(options, 2)):
a = "".join([char if char.isalpha() or char == " " else " " + char + " " for char in i[0]])
b = "".join([char if char.isalpha() or char == " " else " " + char + " " for char in i[1]])
metrics_dict = nlgeval.compute_individual_metrics([a], b)
if metrics_dict['Bleu_1'] > 0.5:
keep = False
break
if keep:
prompt = context + tokenizer.sep_token + question
encoding_input = []
for choice in options:
encoding_input.append([prompt, choice])
encoding_input.append([prompt, answer])
labels = torch.tensor(len(options) - 1).unsqueeze(0)
encoding = tokenizer(encoding_input, return_tensors='pt', padding=True, truncation='only_first')
outputs = model(**{k: v.unsqueeze(0).to('cuda') for k, v in encoding.items()},
labels=labels.to('cuda')) # batch size is 1
entropy = Categorical(probs=torch.softmax(outputs.logits, -1)).entropy().tolist()[0]
if entropy >= max_combin[0]:
max_combin = [entropy, options]
return max_combin[1][:-1] | en | 0.89169 | # -*- coding: utf-8 -*- BDG(Distractor Generation).ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1kHLsn3aUtsTJ_n4EgK_et2A1_Z0Q8dpQ # batch size is 1 | 1.816216 | 2 |
link_to_the_past/timespec.py | zsquareplusc/lttp-backup | 0 | 6618409 | <filename>link_to_the_past/timespec.py
#!/usr/bin/env python3
# encoding: utf-8
#
# (C) 2012-2016 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
Link To The Past - a backup tool
Parse time specification for locating old backups.
"""
import datetime
from .error import BackupException
def get_limit(timespec, now=None):
"""\
Parse a string describing a time difference from now.
It returns a time right after the desired point. The idea is to use it this
way:
if some_time < limit:
print "It is older"
The ``now`` parameter is there only for testing purposes.
>>> today = datetime.datetime(2012, 4, 1, 16, 55)
>>> get_limit('1 hour ago', today)
datetime.datetime(2012, 4, 1, 15, 55)
>>> get_limit('yesterday', today)
datetime.datetime(2012, 4, 1, 0, 0)
>>> get_limit('1 day ago', today)
datetime.datetime(2012, 4, 1, 0, 0)
>>> get_limit('2 days ago', today)
datetime.datetime(2012, 3, 31, 0, 0)
>>> get_limit('2 weeks ago', today)
datetime.datetime(2012, 3, 19, 0, 0)
>>> get_limit('1 month ago', today) # XXX not yet accurate
datetime.datetime(2012, 3, 2, 0, 0)
>>> get_limit('1 year ago', today) # XXX not yet accurate
datetime.datetime(2011, 4, 3, 0, 0)
"""
if now is None:
now = datetime.datetime.now()
if timespec.endswith('ago'):
amount, unit, ago = timespec.split()
if unit in ('hour', 'hours'):
delta = datetime.timedelta(seconds=3600 * int(amount))
return now - delta
elif unit in ('day', 'days'):
delta = datetime.timedelta(days=int(amount) - 1)
elif unit in ('week', 'weeks'):
delta = datetime.timedelta(days=7 * int(amount) - 1)
elif unit in ('month', 'months'):
delta = datetime.timedelta(days=31 * int(amount) - 1) # XXX not exact months
elif unit in ('year', 'years'):
delta = datetime.timedelta(days=365 * int(amount) - 1) # XXX not exact years
else:
raise ValueError('do not recognize unit (2nd word) in: {!r}'.format(timespec))
limit = datetime.datetime(now.year, now.month, now.day) - delta
elif timespec == 'yesterday':
limit = datetime.datetime(now.year, now.month, now.day)
else:
raise ValueError('do not recognize time specification: {!r}'.format(timespec))
return limit
def get_by_timespec(backups, timespec):
"""\
backups is a list of names of backups (strings representing dates)
timespec is a string describing a date, time difference or order
"""
backups.sort()
# by order
if timespec is None or timespec == 'last':
return backups[-1]
elif timespec == 'previous':
return backups[-2]
elif timespec == 'first':
return backups[0]
elif timespec.startswith('-'):
n = int(timespec)
if -len(backups) < n < 0:
return backups[n]
else:
# by absolute date, just compare strings
latest_match = None
for backup in backups:
if backup.startswith(timespec):
latest_match = backup
if latest_match is not None:
return latest_match
# by time delta description
limit = get_limit(timespec)
for backup in backups:
t = datetime.datetime.strptime(backup, '%Y-%m-%d_%H%M%S')
if t < limit:
latest_match = backup
if latest_match is not None:
return latest_match
raise BackupException('No backup found matching {!r}'.format(timespec))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
import doctest
doctest.testmod()
| <filename>link_to_the_past/timespec.py
#!/usr/bin/env python3
# encoding: utf-8
#
# (C) 2012-2016 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
Link To The Past - a backup tool
Parse time specification for locating old backups.
"""
import datetime
from .error import BackupException
def get_limit(timespec, now=None):
"""\
Parse a string describing a time difference from now.
It returns a time right after the desired point. The idea is to use it this
way:
if some_time < limit:
print "It is older"
The ``now`` parameter is there only for testing purposes.
>>> today = datetime.datetime(2012, 4, 1, 16, 55)
>>> get_limit('1 hour ago', today)
datetime.datetime(2012, 4, 1, 15, 55)
>>> get_limit('yesterday', today)
datetime.datetime(2012, 4, 1, 0, 0)
>>> get_limit('1 day ago', today)
datetime.datetime(2012, 4, 1, 0, 0)
>>> get_limit('2 days ago', today)
datetime.datetime(2012, 3, 31, 0, 0)
>>> get_limit('2 weeks ago', today)
datetime.datetime(2012, 3, 19, 0, 0)
>>> get_limit('1 month ago', today) # XXX not yet accurate
datetime.datetime(2012, 3, 2, 0, 0)
>>> get_limit('1 year ago', today) # XXX not yet accurate
datetime.datetime(2011, 4, 3, 0, 0)
"""
if now is None:
now = datetime.datetime.now()
if timespec.endswith('ago'):
amount, unit, ago = timespec.split()
if unit in ('hour', 'hours'):
delta = datetime.timedelta(seconds=3600 * int(amount))
return now - delta
elif unit in ('day', 'days'):
delta = datetime.timedelta(days=int(amount) - 1)
elif unit in ('week', 'weeks'):
delta = datetime.timedelta(days=7 * int(amount) - 1)
elif unit in ('month', 'months'):
delta = datetime.timedelta(days=31 * int(amount) - 1) # XXX not exact months
elif unit in ('year', 'years'):
delta = datetime.timedelta(days=365 * int(amount) - 1) # XXX not exact years
else:
raise ValueError('do not recognize unit (2nd word) in: {!r}'.format(timespec))
limit = datetime.datetime(now.year, now.month, now.day) - delta
elif timespec == 'yesterday':
limit = datetime.datetime(now.year, now.month, now.day)
else:
raise ValueError('do not recognize time specification: {!r}'.format(timespec))
return limit
def get_by_timespec(backups, timespec):
"""\
backups is a list of names of backups (strings representing dates)
timespec is a string describing a date, time difference or order
"""
backups.sort()
# by order
if timespec is None or timespec == 'last':
return backups[-1]
elif timespec == 'previous':
return backups[-2]
elif timespec == 'first':
return backups[0]
elif timespec.startswith('-'):
n = int(timespec)
if -len(backups) < n < 0:
return backups[n]
else:
# by absolute date, just compare strings
latest_match = None
for backup in backups:
if backup.startswith(timespec):
latest_match = backup
if latest_match is not None:
return latest_match
# by time delta description
limit = get_limit(timespec)
for backup in backups:
t = datetime.datetime.strptime(backup, '%Y-%m-%d_%H%M%S')
if t < limit:
latest_match = backup
if latest_match is not None:
return latest_match
raise BackupException('No backup found matching {!r}'.format(timespec))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
import doctest
doctest.testmod()
| en | 0.48423 | #!/usr/bin/env python3 # encoding: utf-8 # # (C) 2012-2016 <NAME> <<EMAIL>> # # SPDX-License-Identifier: BSD-3-Clause \ Link To The Past - a backup tool Parse time specification for locating old backups. \ Parse a string describing a time difference from now. It returns a time right after the desired point. The idea is to use it this way: if some_time < limit: print "It is older" The ``now`` parameter is there only for testing purposes. >>> today = datetime.datetime(2012, 4, 1, 16, 55) >>> get_limit('1 hour ago', today) datetime.datetime(2012, 4, 1, 15, 55) >>> get_limit('yesterday', today) datetime.datetime(2012, 4, 1, 0, 0) >>> get_limit('1 day ago', today) datetime.datetime(2012, 4, 1, 0, 0) >>> get_limit('2 days ago', today) datetime.datetime(2012, 3, 31, 0, 0) >>> get_limit('2 weeks ago', today) datetime.datetime(2012, 3, 19, 0, 0) >>> get_limit('1 month ago', today) # XXX not yet accurate datetime.datetime(2012, 3, 2, 0, 0) >>> get_limit('1 year ago', today) # XXX not yet accurate datetime.datetime(2011, 4, 3, 0, 0) # XXX not exact months # XXX not exact years \ backups is a list of names of backups (strings representing dates) timespec is a string describing a date, time difference or order # by order # by absolute date, just compare strings # by time delta description # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - | 3.340028 | 3 |
sieve.py | msghera/Study-of-Goldbach-Conjecture | 0 | 6618410 | class sieve :
def __init__(self, __limit = 1000002):
self.__limit = __limit
self.prime = [2]
self.bs = [1]*__limit
self.bs[0]=0
self.bs[1]=0
for i in range(4, __limit, 2) : self.bs[i] = 0
for i in range(3, __limit, 2) :
if self.bs[i] == 1 :
self.prime.append(i)
for j in range(i*i, __limit, i) : self.bs[j] = 0
def get_limit (self) :
return self.__limit
def __len__ (self) :
return len(self.prime)
def get_prime(self, n):
try:
return self.prime[n-1]
except:
print('Range out of bound.')
def is_prime(self, num):
if num <= self.__limit :
return True if self.bs[num] == 1 else False
else :
for _prime in prime :
if num%prime == 0 :
return False
return True
if __name__ == '__main__':
s = sieve()
print(s.bs[:10])
| class sieve :
def __init__(self, __limit = 1000002):
self.__limit = __limit
self.prime = [2]
self.bs = [1]*__limit
self.bs[0]=0
self.bs[1]=0
for i in range(4, __limit, 2) : self.bs[i] = 0
for i in range(3, __limit, 2) :
if self.bs[i] == 1 :
self.prime.append(i)
for j in range(i*i, __limit, i) : self.bs[j] = 0
def get_limit (self) :
return self.__limit
def __len__ (self) :
return len(self.prime)
def get_prime(self, n):
try:
return self.prime[n-1]
except:
print('Range out of bound.')
def is_prime(self, num):
if num <= self.__limit :
return True if self.bs[num] == 1 else False
else :
for _prime in prime :
if num%prime == 0 :
return False
return True
if __name__ == '__main__':
s = sieve()
print(s.bs[:10])
| none | 1 | 3.511884 | 4 | |
get_ids.py | kitanata/ackarma-data-spike | 0 | 6618411 | import requests
import time
from tqdm import tqdm
from ddlgenerator.ddlgenerator import Table
from bs4 import BeautifulSoup
all_players_url = 'http://basketball.realgm.com/nba/players'
response = requests.get(all_players_url)
soup = BeautifulSoup(response.content, 'html.parser')
table = soup.find('tbody')
players = []
for tr in table.find_all('tr'):
curr_player = {}
for td in tr.find_all('td'):
if td['data-th'] == '#':
curr_player['number'] = td['rel']
else:
curr_player[td['data-th']] = td['rel']
if td['data-th'] == 'Player':
link = td.a.get('href')
link_parts = link.split('/')
curr_player['pk'] = int(link_parts[-1])
players.append(curr_player)
players_table_sql = Table(
players,
table_name='Players',
pk_name='pk',
force_pk=True
).sql(dialect='postgresql', inserts=True)
print(players_table_sql)
player_games = []
for player in tqdm(players):
time.sleep(0.5)
url = 'http://basketball.realgm.com/player/x/GameLogs/{}'.format(
player['pk']
)
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
thead = soup.find('thead')
header_lookup = []
try:
for tr in thead.find_all('tr'):
for th in tr.find_all('th'):
header_lookup.append(th.string)
except AttributeError:
continue
table = soup.find('tbody')
for tr in table.find_all('tr'):
curr_game = {
'player_id': player['pk']
}
for idx, td in enumerate(tr.find_all('td')):
curr_game[header_lookup[idx]] = td['rel']
player_games.append(curr_game)
game_sql = Table(
player_games,
table_name='GameStats'
).sql(dialect='postgresql', inserts=True)
print(game_sql)
| import requests
import time
from tqdm import tqdm
from ddlgenerator.ddlgenerator import Table
from bs4 import BeautifulSoup
all_players_url = 'http://basketball.realgm.com/nba/players'
response = requests.get(all_players_url)
soup = BeautifulSoup(response.content, 'html.parser')
table = soup.find('tbody')
players = []
for tr in table.find_all('tr'):
curr_player = {}
for td in tr.find_all('td'):
if td['data-th'] == '#':
curr_player['number'] = td['rel']
else:
curr_player[td['data-th']] = td['rel']
if td['data-th'] == 'Player':
link = td.a.get('href')
link_parts = link.split('/')
curr_player['pk'] = int(link_parts[-1])
players.append(curr_player)
players_table_sql = Table(
players,
table_name='Players',
pk_name='pk',
force_pk=True
).sql(dialect='postgresql', inserts=True)
print(players_table_sql)
player_games = []
for player in tqdm(players):
time.sleep(0.5)
url = 'http://basketball.realgm.com/player/x/GameLogs/{}'.format(
player['pk']
)
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
thead = soup.find('thead')
header_lookup = []
try:
for tr in thead.find_all('tr'):
for th in tr.find_all('th'):
header_lookup.append(th.string)
except AttributeError:
continue
table = soup.find('tbody')
for tr in table.find_all('tr'):
curr_game = {
'player_id': player['pk']
}
for idx, td in enumerate(tr.find_all('td')):
curr_game[header_lookup[idx]] = td['rel']
player_games.append(curr_game)
game_sql = Table(
player_games,
table_name='GameStats'
).sql(dialect='postgresql', inserts=True)
print(game_sql)
| none | 1 | 2.695761 | 3 | |
DUST/plot/animate.py | Ovewh/DUST | 1 | 6618412 | import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.animation as animation
def make_animation(data ,map_func, title,
figsize=(16,8),
fps = 20,
extent =[70,120, 25, 50],
intervall = 150,
vmin = None,
vmax = None,
**kwargs):
"""
DESCRIPTION
===========
Create animation of image sequence made from the a 3D data array
data is xarray.dataarray, with one temporal dimension and two spatial dimmension
(lon and lat). Saves animation as an mp4 video file.
"""
if vmin ==None and vmax == None:
dat_min = data.min()
dat_max = data.max()
elif vmin != None and vmax == None:
dat_min = vmin
dat_max = data.max()
elif vmin == None and vmax != None:
dat_min = data.min()
dat_max = vmax
else:
dat_max = vmax
dat_min = vmin
default_options = dict(title = 'DUST animation', comment='Movie for sequence of images'
)
default_options.update(kwargs)
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title=default_options['title'], artist='DUST',
comment=default_options['comment'])
writer = FFMpegWriter(fps=fps, metadata=metadata)
fig ,ax = plt.subplots(figsize=figsize, subplot_kw={'projection': ccrs.PlateCarree()})
Artists = namedtuple("Artists", ("mesh", "time"))
cmap = _gen_flexpart_colormap()
levels = _gen_log_clevs(dat_min,dat_max)
norm = mpl.colors.LogNorm(vmin=levels[0], vmax=levels[-1])
artists = Artists(
ax.pcolormesh(data.lon.values, data.lat.values, data[0].values, animated=True,
transform = ccrs.PlateCarree(),
norm=norm,
cmap = cmap),
ax.text(1, 1, "", fontsize=20, transform=ax.transAxes,
horizontalalignment='right', verticalalignment='bottom'),)
cax = fig.add_axes([ax.get_position().x1+0.01,ax.get_position().y0,0.02,ax.get_position().height])
clabels = list(levels[::10]) # #clevs, by 10 steps
clabels.append(levels[-1]) # add the last label
cb = plt.colorbar(artists.mesh,cax=cax,label = data.units, extend = 'max')
cb.set_ticks(clabels)
cb.set_ticklabels(['%3.2g' % cl for cl in clabels])
frames = [d_i for d_i in data]
init = partial(_init_fig, ax=ax, fig=fig, artists=artists,extent=extent,map_func=map_func,
title=title, data=data[0])
update = partial(_update_artist,artists=artists)
ani = animation.FuncAnimation(fig=fig, func=update, frames=frames, init_func=init
,interval=intervall, repeat_delay=5000)
return ani
def _init_fig(fig, ax, artists, extent, map_func , title, data):
ax.set_title(title, fontsize=22)
ax = map_func(ax)
if 'lon0' in data.attrs:
ax.scatter(data.lon0, data.lat0, marker = '*', s=40, transform = ccrs.PlateCarree(), color ='black')
artists.mesh.set_array([])
return artists
def _update_artist(frame, artists):
# print(frame.values.ravel)
artists.mesh.set_array(frame.values.ravel())
date = pd.to_datetime(str(frame.time.values))
artists.time.set_text(date.strftime('%Y%m%d %H%M'))
def _animate(d_i, ax, extent):
date = pd.to_datetime(str(d_i.time.values))
ax.set_title(date.strftime('%Y%m%d %H%M'))
fig, ax = mpl_base_map_plot(d_i, ax=ax,colorbar=False, mark_receptor=True) | import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.animation as animation
def make_animation(data ,map_func, title,
figsize=(16,8),
fps = 20,
extent =[70,120, 25, 50],
intervall = 150,
vmin = None,
vmax = None,
**kwargs):
"""
DESCRIPTION
===========
Create animation of image sequence made from the a 3D data array
data is xarray.dataarray, with one temporal dimension and two spatial dimmension
(lon and lat). Saves animation as an mp4 video file.
"""
if vmin ==None and vmax == None:
dat_min = data.min()
dat_max = data.max()
elif vmin != None and vmax == None:
dat_min = vmin
dat_max = data.max()
elif vmin == None and vmax != None:
dat_min = data.min()
dat_max = vmax
else:
dat_max = vmax
dat_min = vmin
default_options = dict(title = 'DUST animation', comment='Movie for sequence of images'
)
default_options.update(kwargs)
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title=default_options['title'], artist='DUST',
comment=default_options['comment'])
writer = FFMpegWriter(fps=fps, metadata=metadata)
fig ,ax = plt.subplots(figsize=figsize, subplot_kw={'projection': ccrs.PlateCarree()})
Artists = namedtuple("Artists", ("mesh", "time"))
cmap = _gen_flexpart_colormap()
levels = _gen_log_clevs(dat_min,dat_max)
norm = mpl.colors.LogNorm(vmin=levels[0], vmax=levels[-1])
artists = Artists(
ax.pcolormesh(data.lon.values, data.lat.values, data[0].values, animated=True,
transform = ccrs.PlateCarree(),
norm=norm,
cmap = cmap),
ax.text(1, 1, "", fontsize=20, transform=ax.transAxes,
horizontalalignment='right', verticalalignment='bottom'),)
cax = fig.add_axes([ax.get_position().x1+0.01,ax.get_position().y0,0.02,ax.get_position().height])
clabels = list(levels[::10]) # #clevs, by 10 steps
clabels.append(levels[-1]) # add the last label
cb = plt.colorbar(artists.mesh,cax=cax,label = data.units, extend = 'max')
cb.set_ticks(clabels)
cb.set_ticklabels(['%3.2g' % cl for cl in clabels])
frames = [d_i for d_i in data]
init = partial(_init_fig, ax=ax, fig=fig, artists=artists,extent=extent,map_func=map_func,
title=title, data=data[0])
update = partial(_update_artist,artists=artists)
ani = animation.FuncAnimation(fig=fig, func=update, frames=frames, init_func=init
,interval=intervall, repeat_delay=5000)
return ani
def _init_fig(fig, ax, artists, extent, map_func , title, data):
ax.set_title(title, fontsize=22)
ax = map_func(ax)
if 'lon0' in data.attrs:
ax.scatter(data.lon0, data.lat0, marker = '*', s=40, transform = ccrs.PlateCarree(), color ='black')
artists.mesh.set_array([])
return artists
def _update_artist(frame, artists):
# print(frame.values.ravel)
artists.mesh.set_array(frame.values.ravel())
date = pd.to_datetime(str(frame.time.values))
artists.time.set_text(date.strftime('%Y%m%d %H%M'))
def _animate(d_i, ax, extent):
date = pd.to_datetime(str(d_i.time.values))
ax.set_title(date.strftime('%Y%m%d %H%M'))
fig, ax = mpl_base_map_plot(d_i, ax=ax,colorbar=False, mark_receptor=True) | en | 0.765853 | DESCRIPTION =========== Create animation of image sequence made from the a 3D data array data is xarray.dataarray, with one temporal dimension and two spatial dimmension (lon and lat). Saves animation as an mp4 video file. # #clevs, by 10 steps # add the last label # print(frame.values.ravel) | 3.10825 | 3 |
jarviscli/plugins/systemOptions.py | jay4563/Jarvis | 0 | 6618413 | <reponame>jay4563/Jarvis<filename>jarviscli/plugins/systemOptions.py
import os
from platform import architecture, dist, release, mac_ver
from platform import system as sys
from colorama import Fore, Style
from plugin import LINUX, MACOS, PYTHON2, PYTHON3, plugin
@plugin(plattform=MACOS, native="pmset")
def screen_off__MAC(jarvis, s):
"""Turn of screen instantly"""
os.system('pmset displaysleepnow')
@plugin(plattform=LINUX, native="xset")
def screen_off__LINUX(jarvis, s):
"""Turn of screen instantly"""
os.system('xset dpms force off')
@plugin(plattform=MACOS)
def Os__MAC(jarvis, s):
"""Displays information about your operating system"""
jarvis.say(Style.BRIGHT + '[!] Operating System Information' + Style.RESET_ALL, Fore.BLUE)
jarvis.say('[*] Kernel: ' + sys(), Fore.GREEN)
jarvis.say('[*] Kernel Release Version: ' + release(), Fore.GREEN)
jarvis.say('[*] macOS System version: ' + mac_ver()[0], Fore.GREEN)
for _ in architecture():
if _ is not '':
jarvis.say('[*] ' + _, Fore.GREEN)
@plugin(plattform=LINUX)
def Os__LINUX(jarvis, s):
"""Displays information about your operating system"""
jarvis.say('[!] Operating System Information', Fore.BLUE)
jarvis.say('[*] ' + sys(), Fore.GREEN)
jarvis.say('[*] ' + release(), Fore.GREEN)
jarvis.say('[*] ' + dist()[0], Fore.GREEN)
for _ in architecture():
jarvis.say('[*] ' + _, Fore.GREEN)
@plugin(python=PYTHON3, plattform=LINUX)
def systeminfo__PY3_LINUX(jarvis, s):
"""Display system information with distribution logo"""
from archey import archey
archey.main()
@plugin(python=PYTHON3, plattform=MACOS, native="screenfetch")
def systeminfo__PY3_MAC(jarvis, s):
"""Display system information with distribution logo"""
os.system("screenfetch")
@plugin(python=PYTHON2, native="screenfetch")
def systeminfo__PY2(jarvis, s):
"""Display system information with distribution logo"""
os.system("screenfetch")
| import os
from platform import architecture, dist, release, mac_ver
from platform import system as sys
from colorama import Fore, Style
from plugin import LINUX, MACOS, PYTHON2, PYTHON3, plugin
@plugin(plattform=MACOS, native="pmset")
def screen_off__MAC(jarvis, s):
"""Turn of screen instantly"""
os.system('pmset displaysleepnow')
@plugin(plattform=LINUX, native="xset")
def screen_off__LINUX(jarvis, s):
"""Turn of screen instantly"""
os.system('xset dpms force off')
@plugin(plattform=MACOS)
def Os__MAC(jarvis, s):
"""Displays information about your operating system"""
jarvis.say(Style.BRIGHT + '[!] Operating System Information' + Style.RESET_ALL, Fore.BLUE)
jarvis.say('[*] Kernel: ' + sys(), Fore.GREEN)
jarvis.say('[*] Kernel Release Version: ' + release(), Fore.GREEN)
jarvis.say('[*] macOS System version: ' + mac_ver()[0], Fore.GREEN)
for _ in architecture():
if _ is not '':
jarvis.say('[*] ' + _, Fore.GREEN)
@plugin(plattform=LINUX)
def Os__LINUX(jarvis, s):
"""Displays information about your operating system"""
jarvis.say('[!] Operating System Information', Fore.BLUE)
jarvis.say('[*] ' + sys(), Fore.GREEN)
jarvis.say('[*] ' + release(), Fore.GREEN)
jarvis.say('[*] ' + dist()[0], Fore.GREEN)
for _ in architecture():
jarvis.say('[*] ' + _, Fore.GREEN)
@plugin(python=PYTHON3, plattform=LINUX)
def systeminfo__PY3_LINUX(jarvis, s):
"""Display system information with distribution logo"""
from archey import archey
archey.main()
@plugin(python=PYTHON3, plattform=MACOS, native="screenfetch")
def systeminfo__PY3_MAC(jarvis, s):
"""Display system information with distribution logo"""
os.system("screenfetch")
@plugin(python=PYTHON2, native="screenfetch")
def systeminfo__PY2(jarvis, s):
"""Display system information with distribution logo"""
os.system("screenfetch") | en | 0.878811 | Turn of screen instantly Turn of screen instantly Displays information about your operating system Displays information about your operating system Display system information with distribution logo Display system information with distribution logo Display system information with distribution logo | 2.423581 | 2 |
management/commands/ncbi_update.py | Allcor/Django-app-collecties | 0 | 6618414 | <gh_stars>0
#!/usr/bin/python3
'''
Script to read the names.dmp file from NCBI taxonemy ftp.
ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdump_readme.txt
ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz
as of now just populates the database, should be modified so it can be used to update.
6-6-2017:
made a file handeler in collectie/utils
now a function to add the data to the website database
https://docs.djangoproject.com/en/dev/howto/custom-management-commands/
7-6-2017:
decided to play it safe and add nodes only when parent node is alreaddy present,
Use Django models to check if id exists and add/change if nessesary
8-6-2017:
had to make tax_id 1 in the shell.
names are also added with the .create() function on the node.
'''
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from naktdata.settings import BASE_DIR
from collectie.utils.ncbi_taxdump import Dump_files
from collectie.models import NCBI_nodes, NCBI_names
#TODO show last update on site
#TODO what if a node is deleted? how to detect.
#TODO how often is a node remade? should the local specific names be moved.
#TODO how often is a parent_node changed? should be improved.
#TODO make a logger instance specific for NCBI changes found wile checking
class Command(BaseCommand):
help = 'help string here'
def _check_nodes(self):
#for comparing the NCBI_taxonemy and local taxonemy node table.
# adding the parent_node_id derectly to 'parent_tax_id_id' should work,
# using ncbi_nodes_set.create() should be a safer way to populate
# Django creates a set in the parent of the foreighnkey relations
# if parent tax_id is not present, the node is saved so the connection can be made later
parent_dict = {} # {parent_id:[(tax_id,rank)]}
def node_set_create(parent_node, new_id, new_rank):
# recursive inner function to create entries
new_node = parent_node.ncbi_nodes_set.create(tax_id=new_id, rank=new_rank)
if new_id in parent_dict:
parent_list = parent_dict.pop(new_id)
for next_id,next_rank in parent_list:
node_set_create(new_node,next_id,next_rank)
# itterates over NCBI_taxonemy node id's
# checks if node exists and makes changes when needed.
for node_lib in self.dmp.fetch_nodes():
#TODO not all availeble variables are yielded, only those shown here.
check_tax_id = node_lib['node_id']
check_parent_tax_id = node_lib['parent_node_id']
check_rank = node_lib['node_rank']
check_hidden = node_lib['hidden']
#check if the id exists
if NCBI_nodes.objects.filter(tax_id=check_tax_id).exists():
#check if changes are required
n = NCBI_nodes.objects.get(tax_id=check_tax_id)
if n.rank != check_rank:
n.rank = check_rank
n.save()
if n.parent_tax_id_id != check_parent_tax_id:
#TODO not sure how to handel this yet, how to add something to a ncbi_nodes_set of a parent does not exist?
# could add to the parent_dict? and remove current instance?
# https://docs.djangoproject.com/en/1.11/topics/db/queries/#additional-methods-to-handle-related-objects
n.parent_tax_id_id = check_parent_tax_id
n.save()
else:
#add new id to database
if NCBI_nodes.objects.filter(tax_id=check_parent_tax_id).exists():
p = NCBI_nodes.objects.get(tax_id=check_parent_tax_id)
node_set_create(p,check_tax_id,check_rank)
else:
if check_parent_tax_id in parent_dict:
parent_dict[check_parent_tax_id].append((check_tax_id,check_rank))
else:
parent_dict[check_parent_tax_id] = [(check_tax_id,check_rank)]
def _check_names(self):
#itterates on NCBI_taxonemy names
#checks if name exists and makes changes if needed.
for name_lib in self.dmp.fetch_names():
#the 'unique_name' variable is not yielded,
check_tax_id = name_lib['node_id']
check_name_txt = name_lib['synonym']
check_name_class = name_lib['label']
#the tax_id is not checked as the nodes are created beforehand
n = NCBI_nodes.objects.get(tax_id=check_tax_id)
# TODO not sure what needs to be checked, original has a id,name,class combined key
if check_name_class == 'scientific name' and n.ncbi_names_set.filter(name_class=check_name_class).exists():
name = n.ncbi_names_set.get(name_class='scientific name')
if name != check_name_txt:
name.name_txt = check_name_txt
name.save()
n.scientific_name = check_name_txt
n.save()
elif not n.ncbi_names_set.filter(name_txt=check_name_txt, name_class=check_name_class).exists():
#create new name entry
n.ncbi_names_set.create(name_txt=check_name_txt, name_class=check_name_class)
def handle(self, *args, **options):
logging.basicConfig(filename=BASE_DIR+'/logs/ncbi_update.log',level=logging.DEBUG, filemode='a+')
#fetching the newest dump
self.dmp = Dump_files()
#update nodes
with transaction.atomic():
self._check_nodes()
#update names
with transaction.atomic():
self._check_names()
| #!/usr/bin/python3
'''
Script to read the names.dmp file from NCBI taxonemy ftp.
ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdump_readme.txt
ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz
as of now just populates the database, should be modified so it can be used to update.
6-6-2017:
made a file handeler in collectie/utils
now a function to add the data to the website database
https://docs.djangoproject.com/en/dev/howto/custom-management-commands/
7-6-2017:
decided to play it safe and add nodes only when parent node is alreaddy present,
Use Django models to check if id exists and add/change if nessesary
8-6-2017:
had to make tax_id 1 in the shell.
names are also added with the .create() function on the node.
'''
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from naktdata.settings import BASE_DIR
from collectie.utils.ncbi_taxdump import Dump_files
from collectie.models import NCBI_nodes, NCBI_names
#TODO show last update on site
#TODO what if a node is deleted? how to detect.
#TODO how often is a node remade? should the local specific names be moved.
#TODO how often is a parent_node changed? should be improved.
#TODO make a logger instance specific for NCBI changes found wile checking
class Command(BaseCommand):
help = 'help string here'
def _check_nodes(self):
#for comparing the NCBI_taxonemy and local taxonemy node table.
# adding the parent_node_id derectly to 'parent_tax_id_id' should work,
# using ncbi_nodes_set.create() should be a safer way to populate
# Django creates a set in the parent of the foreighnkey relations
# if parent tax_id is not present, the node is saved so the connection can be made later
parent_dict = {} # {parent_id:[(tax_id,rank)]}
def node_set_create(parent_node, new_id, new_rank):
# recursive inner function to create entries
new_node = parent_node.ncbi_nodes_set.create(tax_id=new_id, rank=new_rank)
if new_id in parent_dict:
parent_list = parent_dict.pop(new_id)
for next_id,next_rank in parent_list:
node_set_create(new_node,next_id,next_rank)
# itterates over NCBI_taxonemy node id's
# checks if node exists and makes changes when needed.
for node_lib in self.dmp.fetch_nodes():
#TODO not all availeble variables are yielded, only those shown here.
check_tax_id = node_lib['node_id']
check_parent_tax_id = node_lib['parent_node_id']
check_rank = node_lib['node_rank']
check_hidden = node_lib['hidden']
#check if the id exists
if NCBI_nodes.objects.filter(tax_id=check_tax_id).exists():
#check if changes are required
n = NCBI_nodes.objects.get(tax_id=check_tax_id)
if n.rank != check_rank:
n.rank = check_rank
n.save()
if n.parent_tax_id_id != check_parent_tax_id:
#TODO not sure how to handel this yet, how to add something to a ncbi_nodes_set of a parent does not exist?
# could add to the parent_dict? and remove current instance?
# https://docs.djangoproject.com/en/1.11/topics/db/queries/#additional-methods-to-handle-related-objects
n.parent_tax_id_id = check_parent_tax_id
n.save()
else:
#add new id to database
if NCBI_nodes.objects.filter(tax_id=check_parent_tax_id).exists():
p = NCBI_nodes.objects.get(tax_id=check_parent_tax_id)
node_set_create(p,check_tax_id,check_rank)
else:
if check_parent_tax_id in parent_dict:
parent_dict[check_parent_tax_id].append((check_tax_id,check_rank))
else:
parent_dict[check_parent_tax_id] = [(check_tax_id,check_rank)]
def _check_names(self):
#itterates on NCBI_taxonemy names
#checks if name exists and makes changes if needed.
for name_lib in self.dmp.fetch_names():
#the 'unique_name' variable is not yielded,
check_tax_id = name_lib['node_id']
check_name_txt = name_lib['synonym']
check_name_class = name_lib['label']
#the tax_id is not checked as the nodes are created beforehand
n = NCBI_nodes.objects.get(tax_id=check_tax_id)
# TODO not sure what needs to be checked, original has a id,name,class combined key
if check_name_class == 'scientific name' and n.ncbi_names_set.filter(name_class=check_name_class).exists():
name = n.ncbi_names_set.get(name_class='scientific name')
if name != check_name_txt:
name.name_txt = check_name_txt
name.save()
n.scientific_name = check_name_txt
n.save()
elif not n.ncbi_names_set.filter(name_txt=check_name_txt, name_class=check_name_class).exists():
#create new name entry
n.ncbi_names_set.create(name_txt=check_name_txt, name_class=check_name_class)
def handle(self, *args, **options):
logging.basicConfig(filename=BASE_DIR+'/logs/ncbi_update.log',level=logging.DEBUG, filemode='a+')
#fetching the newest dump
self.dmp = Dump_files()
#update nodes
with transaction.atomic():
self._check_nodes()
#update names
with transaction.atomic():
self._check_names() | en | 0.762075 | #!/usr/bin/python3 Script to read the names.dmp file from NCBI taxonemy ftp. ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdump_readme.txt ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz as of now just populates the database, should be modified so it can be used to update. 6-6-2017: made a file handeler in collectie/utils now a function to add the data to the website database https://docs.djangoproject.com/en/dev/howto/custom-management-commands/ 7-6-2017: decided to play it safe and add nodes only when parent node is alreaddy present, Use Django models to check if id exists and add/change if nessesary 8-6-2017: had to make tax_id 1 in the shell. names are also added with the .create() function on the node. #TODO show last update on site #TODO what if a node is deleted? how to detect. #TODO how often is a node remade? should the local specific names be moved. #TODO how often is a parent_node changed? should be improved. #TODO make a logger instance specific for NCBI changes found wile checking #for comparing the NCBI_taxonemy and local taxonemy node table. # adding the parent_node_id derectly to 'parent_tax_id_id' should work, # using ncbi_nodes_set.create() should be a safer way to populate # Django creates a set in the parent of the foreighnkey relations # if parent tax_id is not present, the node is saved so the connection can be made later # {parent_id:[(tax_id,rank)]} # recursive inner function to create entries # itterates over NCBI_taxonemy node id's # checks if node exists and makes changes when needed. #TODO not all availeble variables are yielded, only those shown here. #check if the id exists #check if changes are required #TODO not sure how to handel this yet, how to add something to a ncbi_nodes_set of a parent does not exist? # could add to the parent_dict? and remove current instance? # https://docs.djangoproject.com/en/1.11/topics/db/queries/#additional-methods-to-handle-related-objects #add new id to database #itterates on NCBI_taxonemy names #checks if name exists and makes changes if needed. #the 'unique_name' variable is not yielded, #the tax_id is not checked as the nodes are created beforehand # TODO not sure what needs to be checked, original has a id,name,class combined key #create new name entry #fetching the newest dump #update nodes #update names | 2.318192 | 2 |
hpimdm/packet/PacketHPIMIamUpstream.py | pedrofran12/hpim_dm | 1 | 6618415 | <reponame>pedrofran12/hpim_dm<gh_stars>1-10
import struct
import socket
import ipaddress
###########################################################################################################
# JSON FORMAT
###########################################################################################################
class PacketHPIMUpstreamJson():
PIM_TYPE = "I_AM_UPSTREAM"
def __init__(self, source, group, metric_preference, metric, sequence_number):
self.source = source
self.group = group
self.metric = metric
self.metric_preference = metric_preference
self.sequence_number = sequence_number
def bytes(self) -> bytes:
"""
Obtain Protocol IamUpstream Packet in a format to be transmitted (JSON)
"""
msg = {"SOURCE": self.source,
"GROUP": self.group,
"METRIC": self.metric,
"METRIC_PREFERENCE": self.metric_preference,
"SN": self.sequence_number
}
return msg
@classmethod
def parse_bytes(cls, data: bytes):
"""
Parse received Protocol IamUpstream Packet from JSON format and convert it into ProtocolUpstream object
"""
source = data["SOURCE"]
group = data["GROUP"]
metric = data["METRIC"]
metric_preference = data["METRIC_PREFERENCE"]
sn = data["SN"]
return cls(source, group, metric_preference, metric, sn)
###########################################################################################################
# BINARY FORMAT
###########################################################################################################
'''
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Tree Source IP |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Tree Group IP |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sequence Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Metric Preference |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Metric |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
'''
class PacketHPIMUpstream:
PIM_TYPE = 2
PIM_HDR_INSTALL = "! 4s 4s L L L"
PIM_HDR_INSTALL_LEN = struct.calcsize(PIM_HDR_INSTALL)
FAMILY = socket.AF_INET
def __init__(self, source_ip, group_ip, metric_preference, metric, sequence_number):
if type(source_ip) not in (str, bytes) or type(group_ip) not in (str, bytes):
raise Exception
if type(source_ip) is bytes:
source_ip = socket.inet_ntop(self.FAMILY, source_ip)
if type(group_ip) is bytes:
group_ip = socket.inet_ntop(self.FAMILY, group_ip)
self.source = source_ip
self.group = group_ip
self.metric = metric
self.metric_preference = metric_preference
self.sequence_number = sequence_number
def bytes(self) -> bytes:
"""
Obtain Protocol IamUpstream Packet in a format to be transmitted (binary)
"""
msg = struct.pack(self.PIM_HDR_INSTALL, socket.inet_pton(self.FAMILY, self.source),
socket.inet_pton(self.FAMILY, self.group), self.sequence_number,
self.metric_preference, self.metric)
return msg
def __len__(self):
return len(self.bytes())
@classmethod
def parse_bytes(cls, data: bytes):
"""
Parse received Protocol IamUpstream Packet from binary format and convert it into ProtocolUpstream object
"""
(tree_source, tree_group, sn, mp, m) = struct.unpack(cls.PIM_HDR_INSTALL,
data[:cls.PIM_HDR_INSTALL_LEN])
return cls(tree_source, tree_group, mp, m, sn)
class PacketHPIMUpstream_v6(PacketHPIMUpstream):
PIM_HDR_INSTALL = "! 16s 16s L L L"
PIM_HDR_INSTALL_LEN = struct.calcsize(PIM_HDR_INSTALL)
FAMILY = socket.AF_INET6
def __init__(self, source_ip, group_ip, metric_preference, metric, sequence_number):
super().__init__(source_ip, group_ip, metric_preference, metric, sequence_number)
| import struct
import socket
import ipaddress
###########################################################################################################
# JSON FORMAT
###########################################################################################################
class PacketHPIMUpstreamJson():
PIM_TYPE = "I_AM_UPSTREAM"
def __init__(self, source, group, metric_preference, metric, sequence_number):
self.source = source
self.group = group
self.metric = metric
self.metric_preference = metric_preference
self.sequence_number = sequence_number
def bytes(self) -> bytes:
"""
Obtain Protocol IamUpstream Packet in a format to be transmitted (JSON)
"""
msg = {"SOURCE": self.source,
"GROUP": self.group,
"METRIC": self.metric,
"METRIC_PREFERENCE": self.metric_preference,
"SN": self.sequence_number
}
return msg
@classmethod
def parse_bytes(cls, data: bytes):
"""
Parse received Protocol IamUpstream Packet from JSON format and convert it into ProtocolUpstream object
"""
source = data["SOURCE"]
group = data["GROUP"]
metric = data["METRIC"]
metric_preference = data["METRIC_PREFERENCE"]
sn = data["SN"]
return cls(source, group, metric_preference, metric, sn)
###########################################################################################################
# BINARY FORMAT
###########################################################################################################
'''
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Tree Source IP |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Tree Group IP |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sequence Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Metric Preference |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Metric |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
'''
class PacketHPIMUpstream:
PIM_TYPE = 2
PIM_HDR_INSTALL = "! 4s 4s L L L"
PIM_HDR_INSTALL_LEN = struct.calcsize(PIM_HDR_INSTALL)
FAMILY = socket.AF_INET
def __init__(self, source_ip, group_ip, metric_preference, metric, sequence_number):
if type(source_ip) not in (str, bytes) or type(group_ip) not in (str, bytes):
raise Exception
if type(source_ip) is bytes:
source_ip = socket.inet_ntop(self.FAMILY, source_ip)
if type(group_ip) is bytes:
group_ip = socket.inet_ntop(self.FAMILY, group_ip)
self.source = source_ip
self.group = group_ip
self.metric = metric
self.metric_preference = metric_preference
self.sequence_number = sequence_number
def bytes(self) -> bytes:
"""
Obtain Protocol IamUpstream Packet in a format to be transmitted (binary)
"""
msg = struct.pack(self.PIM_HDR_INSTALL, socket.inet_pton(self.FAMILY, self.source),
socket.inet_pton(self.FAMILY, self.group), self.sequence_number,
self.metric_preference, self.metric)
return msg
def __len__(self):
return len(self.bytes())
@classmethod
def parse_bytes(cls, data: bytes):
"""
Parse received Protocol IamUpstream Packet from binary format and convert it into ProtocolUpstream object
"""
(tree_source, tree_group, sn, mp, m) = struct.unpack(cls.PIM_HDR_INSTALL,
data[:cls.PIM_HDR_INSTALL_LEN])
return cls(tree_source, tree_group, mp, m, sn)
class PacketHPIMUpstream_v6(PacketHPIMUpstream):
PIM_HDR_INSTALL = "! 16s 16s L L L"
PIM_HDR_INSTALL_LEN = struct.calcsize(PIM_HDR_INSTALL)
FAMILY = socket.AF_INET6
def __init__(self, source_ip, group_ip, metric_preference, metric, sequence_number):
super().__init__(source_ip, group_ip, metric_preference, metric, sequence_number) | es | 0.609044 | ########################################################################################################### # JSON FORMAT ########################################################################################################### Obtain Protocol IamUpstream Packet in a format to be transmitted (JSON) Parse received Protocol IamUpstream Packet from JSON format and convert it into ProtocolUpstream object ########################################################################################################### # BINARY FORMAT ########################################################################################################### 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Tree Source IP | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Tree Group IP | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Sequence Number | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Metric Preference | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Metric | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Obtain Protocol IamUpstream Packet in a format to be transmitted (binary) Parse received Protocol IamUpstream Packet from binary format and convert it into ProtocolUpstream object | 2.865905 | 3 |
Applied-Text-Mining-in-Python/youtube video downloader.py | rahul263-stack/PROJECT-Dump | 1 | 6618416 | from pytube import YouTube
link =input("Enter the link: ")
video = YouTube(link)
stream = audio.streams.get_highest_resolution()
stream.download() | from pytube import YouTube
link =input("Enter the link: ")
video = YouTube(link)
stream = audio.streams.get_highest_resolution()
stream.download() | none | 1 | 2.72786 | 3 | |
suricate/ui/rest_app.py | engjoy/suricate | 0 | 6618417 | # coding=utf-8
"""
RESTful API implementation.
"""
__author__ = 'tmetsch'
import bottle
class RestApi(object):
"""
RESTful API.
WSGI app can be retrieved by calling 'get_wsgi_app'.
"""
def __init__(self):
"""
Initialize the RESTful API.
"""
self.app = bottle.Bottle()
self._setup_routing()
def _setup_routing(self):
"""
Setup routing.
"""
pass
def get_wsgi_app(self):
"""
Return the WSGI app.
"""
return self.app
| # coding=utf-8
"""
RESTful API implementation.
"""
__author__ = 'tmetsch'
import bottle
class RestApi(object):
"""
RESTful API.
WSGI app can be retrieved by calling 'get_wsgi_app'.
"""
def __init__(self):
"""
Initialize the RESTful API.
"""
self.app = bottle.Bottle()
self._setup_routing()
def _setup_routing(self):
"""
Setup routing.
"""
pass
def get_wsgi_app(self):
"""
Return the WSGI app.
"""
return self.app
| en | 0.712856 | # coding=utf-8 RESTful API implementation. RESTful API. WSGI app can be retrieved by calling 'get_wsgi_app'. Initialize the RESTful API. Setup routing. Return the WSGI app. | 2.551396 | 3 |
src/export_depth_maps_dialog.py | memento42429/metashape-scripts | 40 | 6618418 | <reponame>memento42429/metashape-scripts
# Exports depth map of each camera.
#
# This is python script for Metashape Pro. Scripts repository: https://github.com/agisoft-llc/metashape-scripts
import Metashape
from PySide2 import QtGui, QtCore, QtWidgets
try:
import numpy as np
except ImportError:
print("Please ensure that you installed numpy via 'pip install numpy' - see https://agisoft.freshdesk.com/support/solutions/articles/31000136860-how-to-install-external-python-module-to-metashape-professional-package")
raise
class ExportDepthDlg(QtWidgets.QDialog):
def __init__ (self, parent):
QtWidgets.QDialog.__init__(self, parent)
self.setWindowTitle("Export depth maps")
self.btnQuit = QtWidgets.QPushButton("&Close")
self.btnP1 = QtWidgets.QPushButton("&Export")
self.pBar = QtWidgets.QProgressBar()
self.pBar.setTextVisible(False)
# self.selTxt =QtWidgets.QLabel()
# self.selTxt.setText("Apply to:")
self.radioBtn_all = QtWidgets.QRadioButton("Apply to all cameras")
self.radioBtn_sel = QtWidgets.QRadioButton("Apply to selected")
self.radioBtn_all.setChecked(True)
self.radioBtn_sel.setChecked(False)
self.formTxt = QtWidgets.QLabel()
self.formTxt.setText("Export format:")
self.formCmb = QtWidgets.QComboBox()
self.formCmb.addItem("1-band F32")
self.formCmb.addItem("Grayscale 8-bit")
self.formCmb.addItem("Grayscale 16-bit")
# creating layout
layout = QtWidgets.QGridLayout()
layout.setSpacing(10)
layout.addWidget(self.radioBtn_all, 0, 0)
layout.addWidget(self.radioBtn_sel, 1, 0)
layout.addWidget(self.formTxt, 0, 1)
layout.addWidget(self.formCmb, 1, 1)
layout.addWidget(self.btnP1, 2, 0)
layout.addWidget(self.btnQuit, 2, 1)
layout.addWidget(self.pBar, 3, 0, 1, 2)
self.setLayout(layout)
QtCore.QObject.connect(self.btnP1, QtCore.SIGNAL("clicked()"), self.export_depth)
QtCore.QObject.connect(self.btnQuit, QtCore.SIGNAL("clicked()"), self, QtCore.SLOT("reject()"))
self.exec()
def export_depth(self):
app = QtWidgets.QApplication.instance()
global doc
doc = Metashape.app.document
# active chunk
chunk = doc.chunk
if self.formCmb.currentText() == "1-band F32":
F32 = True
elif self.formCmb.currentText() == "Grayscale 8-bit":
F32 = False
elif self.formCmb.currentText() == "Grayscale 16-bit":
F32 = False
else:
print("Script aborted: unexpected error.")
return 0
selected = False
camera_list = list()
if self.radioBtn_sel.isChecked():
selected = True
for camera in chunk.cameras:
if camera.selected and camera.transform and (camera.type == Metashape.Camera.Type.Regular):
camera_list.append(camera)
elif self.radioBtn_all.isChecked():
selected = False
camera_list = [camera for camera in chunk.cameras if (camera.transform and camera.type == Metashape.Camera.Type.Regular)]
if not len(camera_list):
print("Script aborted: nothing to export.")
return 0
output_folder = Metashape.app.getExistingDirectory("Specify the export folder:")
if not output_folder:
print("Script aborted: invalid output folder.")
return 0
print("Script started...")
app.processEvents()
if chunk.transform.scale:
scale = chunk.transform.scale
else:
scale = 1
count = 0
for camera in camera_list:
if camera in chunk.depth_maps.keys():
depth = chunk.depth_maps[camera].image()
if not F32:
img = np.frombuffer(depth.tostring(), dtype=np.float32)
depth_range = img.max() - img.min()
img = depth - img.min()
img = img * (1. / depth_range)
if self.formCmb.currentText() == "Grayscale 8-bit":
img = img.convert("RGB", "U8")
img = 255 - img
img = img - 255 * (img * (1 / 255)) # normalized
img = img.convert("RGB", "U8")
elif self.formCmb.currentText() == "Grayscale 16-bit":
img = img.convert("RGB", "U16")
img = 65535 - img
img = img - 65535 * (img * (1 / 65535)) # normalized
img = img.convert("RGB", "U16")
else:
img = depth * scale
img.save(output_folder + "/" + camera.label + ".tif")
print("Processed depth for " + camera.label)
count += 1
self.pBar.setValue(int(count / len(camera_list) * 100))
app.processEvents()
self.pBar.setValue(100)
print("Script finished. Total cameras processed: " + str(count))
print("Depth maps exported to:\n " + output_folder)
return 1
def export_depth_maps():
app = QtWidgets.QApplication.instance()
parent = app.activeWindow()
dlg = ExportDepthDlg(parent)
label = "Scripts/Export Depth Maps"
Metashape.app.addMenuItem(label, export_depth_maps)
print("To execute this script press {}".format(label))
| # Exports depth map of each camera.
#
# This is python script for Metashape Pro. Scripts repository: https://github.com/agisoft-llc/metashape-scripts
import Metashape
from PySide2 import QtGui, QtCore, QtWidgets
try:
import numpy as np
except ImportError:
print("Please ensure that you installed numpy via 'pip install numpy' - see https://agisoft.freshdesk.com/support/solutions/articles/31000136860-how-to-install-external-python-module-to-metashape-professional-package")
raise
class ExportDepthDlg(QtWidgets.QDialog):
def __init__ (self, parent):
QtWidgets.QDialog.__init__(self, parent)
self.setWindowTitle("Export depth maps")
self.btnQuit = QtWidgets.QPushButton("&Close")
self.btnP1 = QtWidgets.QPushButton("&Export")
self.pBar = QtWidgets.QProgressBar()
self.pBar.setTextVisible(False)
# self.selTxt =QtWidgets.QLabel()
# self.selTxt.setText("Apply to:")
self.radioBtn_all = QtWidgets.QRadioButton("Apply to all cameras")
self.radioBtn_sel = QtWidgets.QRadioButton("Apply to selected")
self.radioBtn_all.setChecked(True)
self.radioBtn_sel.setChecked(False)
self.formTxt = QtWidgets.QLabel()
self.formTxt.setText("Export format:")
self.formCmb = QtWidgets.QComboBox()
self.formCmb.addItem("1-band F32")
self.formCmb.addItem("Grayscale 8-bit")
self.formCmb.addItem("Grayscale 16-bit")
# creating layout
layout = QtWidgets.QGridLayout()
layout.setSpacing(10)
layout.addWidget(self.radioBtn_all, 0, 0)
layout.addWidget(self.radioBtn_sel, 1, 0)
layout.addWidget(self.formTxt, 0, 1)
layout.addWidget(self.formCmb, 1, 1)
layout.addWidget(self.btnP1, 2, 0)
layout.addWidget(self.btnQuit, 2, 1)
layout.addWidget(self.pBar, 3, 0, 1, 2)
self.setLayout(layout)
QtCore.QObject.connect(self.btnP1, QtCore.SIGNAL("clicked()"), self.export_depth)
QtCore.QObject.connect(self.btnQuit, QtCore.SIGNAL("clicked()"), self, QtCore.SLOT("reject()"))
self.exec()
def export_depth(self):
app = QtWidgets.QApplication.instance()
global doc
doc = Metashape.app.document
# active chunk
chunk = doc.chunk
if self.formCmb.currentText() == "1-band F32":
F32 = True
elif self.formCmb.currentText() == "Grayscale 8-bit":
F32 = False
elif self.formCmb.currentText() == "Grayscale 16-bit":
F32 = False
else:
print("Script aborted: unexpected error.")
return 0
selected = False
camera_list = list()
if self.radioBtn_sel.isChecked():
selected = True
for camera in chunk.cameras:
if camera.selected and camera.transform and (camera.type == Metashape.Camera.Type.Regular):
camera_list.append(camera)
elif self.radioBtn_all.isChecked():
selected = False
camera_list = [camera for camera in chunk.cameras if (camera.transform and camera.type == Metashape.Camera.Type.Regular)]
if not len(camera_list):
print("Script aborted: nothing to export.")
return 0
output_folder = Metashape.app.getExistingDirectory("Specify the export folder:")
if not output_folder:
print("Script aborted: invalid output folder.")
return 0
print("Script started...")
app.processEvents()
if chunk.transform.scale:
scale = chunk.transform.scale
else:
scale = 1
count = 0
for camera in camera_list:
if camera in chunk.depth_maps.keys():
depth = chunk.depth_maps[camera].image()
if not F32:
img = np.frombuffer(depth.tostring(), dtype=np.float32)
depth_range = img.max() - img.min()
img = depth - img.min()
img = img * (1. / depth_range)
if self.formCmb.currentText() == "Grayscale 8-bit":
img = img.convert("RGB", "U8")
img = 255 - img
img = img - 255 * (img * (1 / 255)) # normalized
img = img.convert("RGB", "U8")
elif self.formCmb.currentText() == "Grayscale 16-bit":
img = img.convert("RGB", "U16")
img = 65535 - img
img = img - 65535 * (img * (1 / 65535)) # normalized
img = img.convert("RGB", "U16")
else:
img = depth * scale
img.save(output_folder + "/" + camera.label + ".tif")
print("Processed depth for " + camera.label)
count += 1
self.pBar.setValue(int(count / len(camera_list) * 100))
app.processEvents()
self.pBar.setValue(100)
print("Script finished. Total cameras processed: " + str(count))
print("Depth maps exported to:\n " + output_folder)
return 1
def export_depth_maps():
app = QtWidgets.QApplication.instance()
parent = app.activeWindow()
dlg = ExportDepthDlg(parent)
label = "Scripts/Export Depth Maps"
Metashape.app.addMenuItem(label, export_depth_maps)
print("To execute this script press {}".format(label)) | en | 0.595341 | # Exports depth map of each camera. # # This is python script for Metashape Pro. Scripts repository: https://github.com/agisoft-llc/metashape-scripts # self.selTxt =QtWidgets.QLabel() # self.selTxt.setText("Apply to:") # creating layout # active chunk # normalized # normalized | 1.905027 | 2 |
ESTACIO/EX ESTACIO 14.py | gnabaes/Exe-Python | 0 | 6618419 | ''' formatação de string utilizando f-string, como a definição de largura de uma string, formatação de float e de datas.'''
from datetime import datetime
frutas = ['jabuticaba', 'laranja', 'Uva', 'Banana']
for fruta in frutas:
minha_fruta = f'nome: {fruta:12} - numero de letras: {len(fruta):3}' # 12 e o 3 são numeros de espaço
print(minha_fruta)
print()
pi = 3.1415
meu_numero = f'O numero PI é : {pi:.1f}'
meu_numero_deslocadao = f' o meu numero deslocadao é {pi:6.1f}' # o numero 6 é largura e o numero depois do ponto é a presição
meu_numero_maispreciso = f' o numero PI masi preciso é: {pi:.4f}'
print(meu_numero)
print(meu_numero_maispreciso)
print(meu_numero_deslocadao)
print()
data = datetime.now()
minha_data = f'a data de hoje é {data}'
minha_dataformatada = f'a data formatada é {data:%d/%m/%y}' #ue indica que desejamos exibir a data no formato “dia/mês/ano”
print(minha_data)
print(minha_dataformatada)
| ''' formatação de string utilizando f-string, como a definição de largura de uma string, formatação de float e de datas.'''
from datetime import datetime
frutas = ['jabuticaba', 'laranja', 'Uva', 'Banana']
for fruta in frutas:
minha_fruta = f'nome: {fruta:12} - numero de letras: {len(fruta):3}' # 12 e o 3 são numeros de espaço
print(minha_fruta)
print()
pi = 3.1415
meu_numero = f'O numero PI é : {pi:.1f}'
meu_numero_deslocadao = f' o meu numero deslocadao é {pi:6.1f}' # o numero 6 é largura e o numero depois do ponto é a presição
meu_numero_maispreciso = f' o numero PI masi preciso é: {pi:.4f}'
print(meu_numero)
print(meu_numero_maispreciso)
print(meu_numero_deslocadao)
print()
data = datetime.now()
minha_data = f'a data de hoje é {data}'
minha_dataformatada = f'a data formatada é {data:%d/%m/%y}' #ue indica que desejamos exibir a data no formato “dia/mês/ano”
print(minha_data)
print(minha_dataformatada)
| pt | 0.995439 | formatação de string utilizando f-string, como a definição de largura de uma string, formatação de float e de datas. # 12 e o 3 são numeros de espaço # o numero 6 é largura e o numero depois do ponto é a presição #ue indica que desejamos exibir a data no formato “dia/mês/ano” | 4.164292 | 4 |
useraccount/models.py | JunbeomGwak/2020-Fido-Hackathon | 0 | 6618420 | <filename>useraccount/models.py<gh_stars>0
# -*- coding: utf-8 -*-
from django.contrib.auth.models import(BaseUserManager, AbstractBaseUser, PermissionsMixin)
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.db.models import ImageField
from django.utils.translation import ugettext_lazy as _
from urllib.parse import urlparse
from django.core.files import File
from .common import file_upload_path
class UserManager(BaseUserManager):
use_in_migrations = True
def create_user(self, username, user_id, company, companycode, password=<PASSWORD>):
if not username:
raise ValueError(_('Users must have an name!'))
user = self.model(
username=username,
user_id = user_id,
company= company,
companycode = companycode,
)
user.set_password(<PASSWORD>)
user.save()
return user
def create_superuser(self, username, user_id, company, companycode, password):
user = self.create_user(
username=username,
user_id = user_id,
company = company,
companycode = companycode,
password=password,
)
user.set_password(password)
user.is_superuser = True
user.is_active = True
user.save()
return user
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(max_length=50, unique=True, verbose_name="이름")
user_id = models.CharField(max_length=40, unique=True, verbose_name="아이디")
company = models.CharField(max_length=100, verbose_name="회사")
companycode = models.CharField(max_length=100, verbose_name="회사코드")
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
class Meta:
db_table = 'django_user'
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['user_id', 'company', 'companycode']
def __str__(self):
return self.username
def get_full_name(self):
return self.username
def get_short_name(self):
return self.username
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All superusers are staff
return self.is_superuser
get_full_name.short_description = _('Full name')
class UploadFileModel(models.Model):
title = models.TextField(default='')
file = models.FileField(null=True)
| <filename>useraccount/models.py<gh_stars>0
# -*- coding: utf-8 -*-
from django.contrib.auth.models import(BaseUserManager, AbstractBaseUser, PermissionsMixin)
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.db.models import ImageField
from django.utils.translation import ugettext_lazy as _
from urllib.parse import urlparse
from django.core.files import File
from .common import file_upload_path
class UserManager(BaseUserManager):
use_in_migrations = True
def create_user(self, username, user_id, company, companycode, password=<PASSWORD>):
if not username:
raise ValueError(_('Users must have an name!'))
user = self.model(
username=username,
user_id = user_id,
company= company,
companycode = companycode,
)
user.set_password(<PASSWORD>)
user.save()
return user
def create_superuser(self, username, user_id, company, companycode, password):
user = self.create_user(
username=username,
user_id = user_id,
company = company,
companycode = companycode,
password=password,
)
user.set_password(password)
user.is_superuser = True
user.is_active = True
user.save()
return user
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(max_length=50, unique=True, verbose_name="이름")
user_id = models.CharField(max_length=40, unique=True, verbose_name="아이디")
company = models.CharField(max_length=100, verbose_name="회사")
companycode = models.CharField(max_length=100, verbose_name="회사코드")
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
class Meta:
db_table = 'django_user'
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['user_id', 'company', 'companycode']
def __str__(self):
return self.username
def get_full_name(self):
return self.username
def get_short_name(self):
return self.username
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All superusers are staff
return self.is_superuser
get_full_name.short_description = _('Full name')
class UploadFileModel(models.Model):
title = models.TextField(default='')
file = models.FileField(null=True)
| en | 0.766259 | # -*- coding: utf-8 -*- # Simplest possible answer: All superusers are staff | 2.193494 | 2 |
tests/processors/test_audio.py | ankitshah009/dcase_util | 122 | 6618421 | <gh_stars>100-1000
import nose.tools
import dcase_util
def test_AudioReadingProcessor():
# Simple reading
processor = dcase_util.processors.AudioReadingProcessor()
audio = processor.process(filename=dcase_util.utils.Example.audio_filename())
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 2)
nose.tools.eq_(audio.length, 441001)
# Mono reading
processor = dcase_util.processors.AudioReadingProcessor(mono=True)
audio = processor.process(filename=dcase_util.utils.Example.audio_filename())
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.length, 441001)
# Focus segment
processor = dcase_util.processors.AudioReadingProcessor(mono=True)
audio = processor.process(
filename=dcase_util.utils.Example.audio_filename(),
focus_start_seconds=1.0,
focus_duration_seconds=2.0
).freeze()
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.length, 44100*2.0)
# Focus channel
processor = dcase_util.processors.AudioReadingProcessor()
audio = processor.process(
filename=dcase_util.utils.Example.audio_filename(),
focus_channel='mixdown',
focus_start_seconds=1.0,
focus_duration_seconds=2.0
).freeze()
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.length, 44100*2.0)
def test_MonoAudioReadingProcessor():
# Simple reading
processor = dcase_util.processors.MonoAudioReadingProcessor()
audio = processor.process(filename=dcase_util.utils.Example.audio_filename())
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.length, 441001)
| import nose.tools
import dcase_util
def test_AudioReadingProcessor():
# Simple reading
processor = dcase_util.processors.AudioReadingProcessor()
audio = processor.process(filename=dcase_util.utils.Example.audio_filename())
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 2)
nose.tools.eq_(audio.length, 441001)
# Mono reading
processor = dcase_util.processors.AudioReadingProcessor(mono=True)
audio = processor.process(filename=dcase_util.utils.Example.audio_filename())
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.length, 441001)
# Focus segment
processor = dcase_util.processors.AudioReadingProcessor(mono=True)
audio = processor.process(
filename=dcase_util.utils.Example.audio_filename(),
focus_start_seconds=1.0,
focus_duration_seconds=2.0
).freeze()
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.length, 44100*2.0)
# Focus channel
processor = dcase_util.processors.AudioReadingProcessor()
audio = processor.process(
filename=dcase_util.utils.Example.audio_filename(),
focus_channel='mixdown',
focus_start_seconds=1.0,
focus_duration_seconds=2.0
).freeze()
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.length, 44100*2.0)
def test_MonoAudioReadingProcessor():
# Simple reading
processor = dcase_util.processors.MonoAudioReadingProcessor()
audio = processor.process(filename=dcase_util.utils.Example.audio_filename())
nose.tools.eq_(audio.fs, 44100)
nose.tools.eq_(len(audio.data.shape), 1)
nose.tools.eq_(audio.length, 441001) | en | 0.74982 | # Simple reading # Mono reading # Focus segment # Focus channel # Simple reading | 2.353005 | 2 |
projeto_morse/app_principal/models.py | Danilo-Xaxa/morse | 0 | 6618422 | <filename>projeto_morse/app_principal/models.py<gh_stars>0
from django.db import models
# Create your models here.
class TraducaoMorse(models.Model):
morse = models.TextField(max_length=1000)
texto = models.TextField(max_length=500)
def __str__(self):
return self.morse + ' -> ' + self.texto
class TraducaoTexto(models.Model):
texto = models.TextField(max_length=1000)
morse = models.TextField(max_length=2500)
def __str__(self):
return self.texto + ' -> ' + self.morse
| <filename>projeto_morse/app_principal/models.py<gh_stars>0
from django.db import models
# Create your models here.
class TraducaoMorse(models.Model):
morse = models.TextField(max_length=1000)
texto = models.TextField(max_length=500)
def __str__(self):
return self.morse + ' -> ' + self.texto
class TraducaoTexto(models.Model):
texto = models.TextField(max_length=1000)
morse = models.TextField(max_length=2500)
def __str__(self):
return self.texto + ' -> ' + self.morse
| en | 0.963489 | # Create your models here. | 2.307631 | 2 |
src/Twelfth Chapter/Example3.py | matthijskrul/ThinkPython | 0 | 6618423 | <filename>src/Twelfth Chapter/Example3.py
import random
def make_random_ints_no_dups(num, lower_bound, upper_bound):
"""
Generate a list containing num random ints between
lower_bound and upper_bound. upper_bound is an open bound.
The result list cannot contain duplicates.
"""
result = []
rng = random.Random()
for i in range(num):
while True:
candidate = rng.randrange(lower_bound, upper_bound)
if candidate not in result:
break
result.append(candidate)
return result
xs = make_random_ints_no_dups(5, 1, 10000000)
print(xs)
| <filename>src/Twelfth Chapter/Example3.py
import random
def make_random_ints_no_dups(num, lower_bound, upper_bound):
"""
Generate a list containing num random ints between
lower_bound and upper_bound. upper_bound is an open bound.
The result list cannot contain duplicates.
"""
result = []
rng = random.Random()
for i in range(num):
while True:
candidate = rng.randrange(lower_bound, upper_bound)
if candidate not in result:
break
result.append(candidate)
return result
xs = make_random_ints_no_dups(5, 1, 10000000)
print(xs)
| en | 0.709293 | Generate a list containing num random ints between lower_bound and upper_bound. upper_bound is an open bound. The result list cannot contain duplicates. | 3.853682 | 4 |
debug/stories/fastest_game_story.py | plastr/extrasolar-game | 0 | 6618424 | <gh_stars>0
# Copyright (c) 2010-2012 Lazy 8 Studios, LLC.
# All rights reserved.
from front import Constants, debug
from front.tools import replay_game
class StoryBeats(object):
# An adhoc beat that is run before AT_LANDER to emulate completing the tutorials before
# creating any targets.
class COMPLETE_TUTORIALS(replay_game.ReplayGameBeat):
CLIENT_PROGRESS = Constants.SIMULATOR_PROGRESS_KEYS
class AT_LANDER(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_LANDER01']
AT_LANDER.before_beat_run_beat(COMPLETE_TUTORIALS)
class ID_5_SPECIES(replay_game.ReplayGameBeat):
ID_SPECIES = debug.rects.PLANTS[0:5]
class AT_ARTIFACT01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ARTIFACT01']
class AT_ARTIFACT01_CLOSEUP(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ARTIFACT01']
class JUST_INSIDE_SANDBOX(replay_game.ReplayGameBeat):
CREATE_NEXT_TARGETS = 2
class ID_10_SPECIES(replay_game.ReplayGameBeat):
ID_SPECIES = debug.rects.PLANTS[5:11]
class AT_STUCK_ROVER(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ROVER_DISASSEMBLED']
class AT_AUDIO_TUTORIAL01_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL001']
class AT_AUDIO_MYSTERY01_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02']
class AT_GPS(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_MANMADE005']
MESSAGES_UNLOCK = ['MSG_ENCRYPTION01']
class AT_CENTRAL_MONUMENT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN08']
class AT_OBELISK02_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB01']
class AT_RUINS01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN09']
class AT_RUINS02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN09']
class AT_RUINS03(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN09']
class AT_RUINS_SIGNAL(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN10']
class AT_OBELISK03_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB02']
class AT_OBELISK04_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB03']
class AT_CODED_LOC(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_MANMADE006']
class TOWARD_TURING_ROVER_02(replay_game.ReplayGameBeat):
MESSAGES_UNLOCK = ['MSG_ENCRYPTION02']
class TOWARD_TURING_ROVER_04(replay_game.ReplayGameBeat):
MESSAGES_FORWARD = [('MSG_ENCRYPTION02', 'ENKI')]
class AT_TURING_ROVER(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_MANMADE007']
MESSAGES_UNLOCK = ['MSG_ENKI02d']
class TOWARD_OBELISK05_02(replay_game.ReplayGameBeat):
MESSAGES_UNLOCK = ['MSG_BACKb']
class AT_OBELISK05_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB04']
class AT_OBELISK06_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB05']
class TOWARD_CENTRAL_MONUMENT_PLAYBACK02(replay_game.ReplayGameBeat):
MESSAGES_UNLOCK = ['MSG_OBELISK06b']
class TOWARD_MISSING_ROVER03(replay_game.ReplayGameBeat):
CREATE_NEXT_TARGETS = 2
class AFTER_MISSING_ROVER01(replay_game.ReplayGameBeat):
pass
class AFTER_MISSING_ROVER02(replay_game.ReplayGameBeat):
BEAT_ARRIVAL_DELTA = 90000
RENDER_ADHOC_TARGET = True
AFTER_MISSING_ROVER01.after_beat_run_beat(AFTER_MISSING_ROVER02)
class AFTER_MISSING_ROVER03(replay_game.ReplayGameBeat):
BEAT_ARRIVAL_DELTA = 600
MESSAGES_UNLOCK = ['MSG_LASTTHINGa']
AFTER_MISSING_ROVER02.after_beat_run_beat(AFTER_MISSING_ROVER03)
class AFTER_MISSING_ROVER04(replay_game.ReplayGameBeat):
BEAT_ARRIVAL_DELTA = 691200
ID_SPECIES = ['SPC_PLANT65535']
AFTER_MISSING_ROVER03.after_beat_run_beat(AFTER_MISSING_ROVER04)
class SCI_FIND_COMMON_FIRST_TAGS(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT021_SUB04', 'SPC_PLANT024_SUB04', 'SPC_PLANT65535']
class SCI_FIND_COMMON_SECOND_TAGS(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT021_SUB04', 'SPC_PLANT024_SUB04']
class SCI_FIND_COMMONb(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT024_SUB04']
class SCI_FIND_COMMONa(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT021_SUB04']
class ID_15_SPECIES(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT012']
class ID_GORDY_TREE_01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032']
class ID_GORDY_TREE_02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032']
class ID_GORDY_TREE_03(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032']
class ID_GORDY_TREE_YOUNG(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032_SUB01']
class ID_GORDY_TREE_DEAD(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032_SUB02']
class ID_BRISTLETONGUE_VARIANT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL006']
class ID_THIRD_CNIDERIA(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT034']
class ID_STARSPORE_OPEN_CLOSED_01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT028', 'SPC_PLANT028_SUB03']
class ID_STARSPORE_OPEN_CLOSED_02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT028', 'SPC_PLANT028_SUB03']
class ID_BIOLUMINESCENCE_DAY(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT015', 'SPC_PLANT022', 'SPC_PLANT031']
class ID_BIOLUMINESCENCE_NIGHT_01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT015_SUB05', 'SPC_PLANT022_SUB05', 'SPC_PLANT031_SUB05']
class ID_BIOLUMINESCENCE_NIGHT_02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT015_SUB05', 'SPC_PLANT022_SUB05', 'SPC_PLANT031_SUB05']
class ID_SAIL_FLYER_01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL004']
class ID_SAIL_FLYER_02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL004']
class ID_SAIL_FLYER_03(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL004']
class OUTSIDE_AUDIO_MYSTERY07_ZONE(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT014', 'SPC_PLANT014_SUB04']
class AT_AUDIO_MYSTERY07(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT014_SUB04']
def routes():
return [debug.routes.struct(debug.routes.FASTEST_STORY_ROVER1),
debug.routes.struct(debug.routes.FASTEST_STORY_ROVER2),
debug.routes.struct(debug.routes.FASTEST_STORY_ROVER3)]
def beats():
return [StoryBeats]
| # Copyright (c) 2010-2012 Lazy 8 Studios, LLC.
# All rights reserved.
from front import Constants, debug
from front.tools import replay_game
class StoryBeats(object):
# An adhoc beat that is run before AT_LANDER to emulate completing the tutorials before
# creating any targets.
class COMPLETE_TUTORIALS(replay_game.ReplayGameBeat):
CLIENT_PROGRESS = Constants.SIMULATOR_PROGRESS_KEYS
class AT_LANDER(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_LANDER01']
AT_LANDER.before_beat_run_beat(COMPLETE_TUTORIALS)
class ID_5_SPECIES(replay_game.ReplayGameBeat):
ID_SPECIES = debug.rects.PLANTS[0:5]
class AT_ARTIFACT01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ARTIFACT01']
class AT_ARTIFACT01_CLOSEUP(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ARTIFACT01']
class JUST_INSIDE_SANDBOX(replay_game.ReplayGameBeat):
CREATE_NEXT_TARGETS = 2
class ID_10_SPECIES(replay_game.ReplayGameBeat):
ID_SPECIES = debug.rects.PLANTS[5:11]
class AT_STUCK_ROVER(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ROVER_DISASSEMBLED']
class AT_AUDIO_TUTORIAL01_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL001']
class AT_AUDIO_MYSTERY01_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02']
class AT_GPS(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_MANMADE005']
MESSAGES_UNLOCK = ['MSG_ENCRYPTION01']
class AT_CENTRAL_MONUMENT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN08']
class AT_OBELISK02_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB01']
class AT_RUINS01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN09']
class AT_RUINS02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN09']
class AT_RUINS03(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN09']
class AT_RUINS_SIGNAL(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN10']
class AT_OBELISK03_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB02']
class AT_OBELISK04_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB03']
class AT_CODED_LOC(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_MANMADE006']
class TOWARD_TURING_ROVER_02(replay_game.ReplayGameBeat):
MESSAGES_UNLOCK = ['MSG_ENCRYPTION02']
class TOWARD_TURING_ROVER_04(replay_game.ReplayGameBeat):
MESSAGES_FORWARD = [('MSG_ENCRYPTION02', 'ENKI')]
class AT_TURING_ROVER(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_MANMADE007']
MESSAGES_UNLOCK = ['MSG_ENKI02d']
class TOWARD_OBELISK05_02(replay_game.ReplayGameBeat):
MESSAGES_UNLOCK = ['MSG_BACKb']
class AT_OBELISK05_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB04']
class AT_OBELISK06_PINPOINT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_UNKNOWN_ORIGIN02_SUB05']
class TOWARD_CENTRAL_MONUMENT_PLAYBACK02(replay_game.ReplayGameBeat):
MESSAGES_UNLOCK = ['MSG_OBELISK06b']
class TOWARD_MISSING_ROVER03(replay_game.ReplayGameBeat):
CREATE_NEXT_TARGETS = 2
class AFTER_MISSING_ROVER01(replay_game.ReplayGameBeat):
pass
class AFTER_MISSING_ROVER02(replay_game.ReplayGameBeat):
BEAT_ARRIVAL_DELTA = 90000
RENDER_ADHOC_TARGET = True
AFTER_MISSING_ROVER01.after_beat_run_beat(AFTER_MISSING_ROVER02)
class AFTER_MISSING_ROVER03(replay_game.ReplayGameBeat):
BEAT_ARRIVAL_DELTA = 600
MESSAGES_UNLOCK = ['MSG_LASTTHINGa']
AFTER_MISSING_ROVER02.after_beat_run_beat(AFTER_MISSING_ROVER03)
class AFTER_MISSING_ROVER04(replay_game.ReplayGameBeat):
BEAT_ARRIVAL_DELTA = 691200
ID_SPECIES = ['SPC_PLANT65535']
AFTER_MISSING_ROVER03.after_beat_run_beat(AFTER_MISSING_ROVER04)
class SCI_FIND_COMMON_FIRST_TAGS(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT021_SUB04', 'SPC_PLANT024_SUB04', 'SPC_PLANT65535']
class SCI_FIND_COMMON_SECOND_TAGS(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT021_SUB04', 'SPC_PLANT024_SUB04']
class SCI_FIND_COMMONb(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT024_SUB04']
class SCI_FIND_COMMONa(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT021_SUB04']
class ID_15_SPECIES(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT012']
class ID_GORDY_TREE_01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032']
class ID_GORDY_TREE_02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032']
class ID_GORDY_TREE_03(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032']
class ID_GORDY_TREE_YOUNG(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032_SUB01']
class ID_GORDY_TREE_DEAD(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT032_SUB02']
class ID_BRISTLETONGUE_VARIANT(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL006']
class ID_THIRD_CNIDERIA(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT034']
class ID_STARSPORE_OPEN_CLOSED_01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT028', 'SPC_PLANT028_SUB03']
class ID_STARSPORE_OPEN_CLOSED_02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT028', 'SPC_PLANT028_SUB03']
class ID_BIOLUMINESCENCE_DAY(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT015', 'SPC_PLANT022', 'SPC_PLANT031']
class ID_BIOLUMINESCENCE_NIGHT_01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT015_SUB05', 'SPC_PLANT022_SUB05', 'SPC_PLANT031_SUB05']
class ID_BIOLUMINESCENCE_NIGHT_02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT015_SUB05', 'SPC_PLANT022_SUB05', 'SPC_PLANT031_SUB05']
class ID_SAIL_FLYER_01(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL004']
class ID_SAIL_FLYER_02(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL004']
class ID_SAIL_FLYER_03(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_ANIMAL004']
class OUTSIDE_AUDIO_MYSTERY07_ZONE(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT014', 'SPC_PLANT014_SUB04']
class AT_AUDIO_MYSTERY07(replay_game.ReplayGameBeat):
ID_SPECIES = ['SPC_PLANT014_SUB04']
def routes():
return [debug.routes.struct(debug.routes.FASTEST_STORY_ROVER1),
debug.routes.struct(debug.routes.FASTEST_STORY_ROVER2),
debug.routes.struct(debug.routes.FASTEST_STORY_ROVER3)]
def beats():
return [StoryBeats] | en | 0.819016 | # Copyright (c) 2010-2012 Lazy 8 Studios, LLC. # All rights reserved. # An adhoc beat that is run before AT_LANDER to emulate completing the tutorials before # creating any targets. | 2.269341 | 2 |
robot_status.py | osm3000/PyRobo2D | 0 | 6618425 | <filename>robot_status.py
class RobotStatus:
def __init__(self, log_robot_pos=True, log_robot_sensors=True, properties={}):
self.log_robot_pos = log_robot_pos
self.log_robot_sensors = log_robot_sensors
self.robot_position = []
self.robot_rotation = []
self.robot_sensors_readings = []
self.collisions = []
self.ball_collect = []
self.game_over = False
self.game_score = 0
self.properties = properties
if len(self.properties.keys()) == 0:
self.properties['position'] = True
self.properties['rotation'] = True
self.properties['sensors'] = True
self.properties['collision'] = False
self.properties['ball_flag'] = True
def __str__(self):
final_string = "History length: " + str(len(self.robot_rotation)) + "\n"
final_string += "Robot position: " + str(self.robot_position[-1]) + "\n"
final_string += "Robot rotation: " + str(self.robot_rotation[-1]) + "\n"
final_string += "Sensors: " + str(self.robot_sensors_readings[-1]) + "\n"
final_string += "Collisions: " + str(self.collisions[-1]) + "\n"
final_string += "Ball Falg: " + str(self.ball_collect[-1]) + "\n"
return final_string
def get_robot_status(self):
robot_status_vector = []
# print ("self.ball_collect: ", self.ball_collect[-1])
if self.properties['position']:
robot_status_vector += self.robot_position[-1]
if self.properties['rotation']:
robot_status_vector += [self.robot_rotation[-1]]
if self.properties['sensors']:
robot_status_vector += self.robot_sensors_readings[-1]
if self.properties['ball_flag']:
# robot_status_vector += [self.ball_collect[-1]]
robot_status_vector += [int(self.ball_collect[-1])]
return robot_status_vector
| <filename>robot_status.py
class RobotStatus:
def __init__(self, log_robot_pos=True, log_robot_sensors=True, properties={}):
self.log_robot_pos = log_robot_pos
self.log_robot_sensors = log_robot_sensors
self.robot_position = []
self.robot_rotation = []
self.robot_sensors_readings = []
self.collisions = []
self.ball_collect = []
self.game_over = False
self.game_score = 0
self.properties = properties
if len(self.properties.keys()) == 0:
self.properties['position'] = True
self.properties['rotation'] = True
self.properties['sensors'] = True
self.properties['collision'] = False
self.properties['ball_flag'] = True
def __str__(self):
final_string = "History length: " + str(len(self.robot_rotation)) + "\n"
final_string += "Robot position: " + str(self.robot_position[-1]) + "\n"
final_string += "Robot rotation: " + str(self.robot_rotation[-1]) + "\n"
final_string += "Sensors: " + str(self.robot_sensors_readings[-1]) + "\n"
final_string += "Collisions: " + str(self.collisions[-1]) + "\n"
final_string += "Ball Falg: " + str(self.ball_collect[-1]) + "\n"
return final_string
def get_robot_status(self):
robot_status_vector = []
# print ("self.ball_collect: ", self.ball_collect[-1])
if self.properties['position']:
robot_status_vector += self.robot_position[-1]
if self.properties['rotation']:
robot_status_vector += [self.robot_rotation[-1]]
if self.properties['sensors']:
robot_status_vector += self.robot_sensors_readings[-1]
if self.properties['ball_flag']:
# robot_status_vector += [self.ball_collect[-1]]
robot_status_vector += [int(self.ball_collect[-1])]
return robot_status_vector
| en | 0.458583 | # print ("self.ball_collect: ", self.ball_collect[-1]) # robot_status_vector += [self.ball_collect[-1]] | 3.252209 | 3 |
Coding/Competitive_Coding/CodeForces/0 - 1300/atest.py | Phantom586/My_Codes | 0 | 6618426 | n = int(input())
lst = list(map(int, input().split()))
mn = lst[0]
c = 0
index = 1
for i in range(1, len(lst)):
if lst[i] < mn:
c = 0
mn = lst[i]
index = i+1
elif lst[i] == mn:
c += 1
if c > 0:
print("Still Rozdil")
else:
print(index) | n = int(input())
lst = list(map(int, input().split()))
mn = lst[0]
c = 0
index = 1
for i in range(1, len(lst)):
if lst[i] < mn:
c = 0
mn = lst[i]
index = i+1
elif lst[i] == mn:
c += 1
if c > 0:
print("Still Rozdil")
else:
print(index) | none | 1 | 3.249556 | 3 | |
trees/nodes_in_a_subtree.py | elenaborisova/LeetCode-Solutions | 0 | 6618427 | from collections import deque
class Node:
def __init__(self, data):
self.val = data
self.children = []
def count_of_nodes(root, queries, s):
letters_map = {i: c for i, c in enumerate(s, start=1)}
result = []
for query in queries:
node_val, letter = query
queue = deque([root])
visited = []
new_root_is_found = False
while queue:
current = queue.popleft()
if not new_root_is_found and current.val == node_val:
new_root_is_found = True
queue = deque([current])
continue
for child in current.children:
queue.append(child)
if new_root_is_found and current.val == letter:
visited.append(current)
count = len([n for n in visited if letters_map[n.val] == letter])
result.append(count)
return result
# Testcase 1
s_1 = 'aba'
root_1 = Node(1)
root_1.children.append(Node(2))
root_1.children.append(Node(3))
queries_1 = [(1, 'a')]
expected_1 = [2]
print(count_of_nodes(root_1, queries_1, s_1) == expected_1)
# Testcase 2
s_2 = 'abaacab'
root_2 = Node(1)
root_2.children.append(Node(2))
root_2.children.append(Node(3))
root_2.children.append(Node(7))
root_2.children[0].children.append(Node(4))
root_2.children[0].children.append(Node(5))
root_2.children[1].children.append(Node(6))
queries_2 = [(1, 'a'), (2, 'b'), (3, 'a')]
expected_2 = [4, 1, 2]
print(count_of_nodes(root_2, queries_2, s_2) == expected_2)
| from collections import deque
class Node:
def __init__(self, data):
self.val = data
self.children = []
def count_of_nodes(root, queries, s):
letters_map = {i: c for i, c in enumerate(s, start=1)}
result = []
for query in queries:
node_val, letter = query
queue = deque([root])
visited = []
new_root_is_found = False
while queue:
current = queue.popleft()
if not new_root_is_found and current.val == node_val:
new_root_is_found = True
queue = deque([current])
continue
for child in current.children:
queue.append(child)
if new_root_is_found and current.val == letter:
visited.append(current)
count = len([n for n in visited if letters_map[n.val] == letter])
result.append(count)
return result
# Testcase 1
s_1 = 'aba'
root_1 = Node(1)
root_1.children.append(Node(2))
root_1.children.append(Node(3))
queries_1 = [(1, 'a')]
expected_1 = [2]
print(count_of_nodes(root_1, queries_1, s_1) == expected_1)
# Testcase 2
s_2 = 'abaacab'
root_2 = Node(1)
root_2.children.append(Node(2))
root_2.children.append(Node(3))
root_2.children.append(Node(7))
root_2.children[0].children.append(Node(4))
root_2.children[0].children.append(Node(5))
root_2.children[1].children.append(Node(6))
queries_2 = [(1, 'a'), (2, 'b'), (3, 'a')]
expected_2 = [4, 1, 2]
print(count_of_nodes(root_2, queries_2, s_2) == expected_2)
| uk | 0.111278 | # Testcase 1 # Testcase 2 | 3.68187 | 4 |
uvio/streamio/buffered_stream.py | srossross/uvio | 3 | 6618428 | <gh_stars>1-10
from .stream_wrapper import StreamWrapper
from .buffer_utils import StreamRead, StreamReadline
class BufferedStream(StreamWrapper):
def __init__(self, stream):
super().__init__(stream)
self.buffering = False
self._read_buffer = b''
self._eof = False
self._readers = []
self.stream.data(self._notify_reader_data)
self.stream.end(self._notify_reader_end)
def unshift(self, buf):
'''unshift(buf)
Not implemented:
unshift a buffer
'''
raise NotImplementedError("this stream is not buffering input")
self._read_buffer = buf + self._read_buffer
def read(self, n):
'''read(n)
read at most n bytes from the stream
'''
self.buffering = True
reader = StreamRead(self, n)
if not reader.done():
self._readers.append(reader)
return reader
def readline(self, max_size=None, end=b'\n'):
'''readline(max_size=None, end=b'\\n')
read and return bytes until `end` is encountered
'''
self.buffering = True
reader = StreamReadline(self, max_size, end)
if not reader.done():
self._readers.append(reader)
return reader
def _notify_reader_data(self, buf):
if self.buffering:
self._read_buffer += buf
while self._readers:
if self._readers[0].done():
reader = self._readers.pop(0)
reader.notify()
def _notify_reader_end(self):
self._eof = True
while self._readers:
if self._readers[0].done():
reader = self._readers.pop(0)
reader.notify()
| from .stream_wrapper import StreamWrapper
from .buffer_utils import StreamRead, StreamReadline
class BufferedStream(StreamWrapper):
def __init__(self, stream):
super().__init__(stream)
self.buffering = False
self._read_buffer = b''
self._eof = False
self._readers = []
self.stream.data(self._notify_reader_data)
self.stream.end(self._notify_reader_end)
def unshift(self, buf):
'''unshift(buf)
Not implemented:
unshift a buffer
'''
raise NotImplementedError("this stream is not buffering input")
self._read_buffer = buf + self._read_buffer
def read(self, n):
'''read(n)
read at most n bytes from the stream
'''
self.buffering = True
reader = StreamRead(self, n)
if not reader.done():
self._readers.append(reader)
return reader
def readline(self, max_size=None, end=b'\n'):
'''readline(max_size=None, end=b'\\n')
read and return bytes until `end` is encountered
'''
self.buffering = True
reader = StreamReadline(self, max_size, end)
if not reader.done():
self._readers.append(reader)
return reader
def _notify_reader_data(self, buf):
if self.buffering:
self._read_buffer += buf
while self._readers:
if self._readers[0].done():
reader = self._readers.pop(0)
reader.notify()
def _notify_reader_end(self):
self._eof = True
while self._readers:
if self._readers[0].done():
reader = self._readers.pop(0)
reader.notify() | en | 0.792129 | unshift(buf) Not implemented: unshift a buffer read(n) read at most n bytes from the stream readline(max_size=None, end=b'\\n') read and return bytes until `end` is encountered | 3.106164 | 3 |
apps/core/models.py | tayyabRazzaq/leaderboard | 0 | 6618429 | """
core app database models
"""
from django.db import models
from django.contrib.auth.models import User
from apps.core.constants import GENDER_CHOICES
# Create your models here.
class Person(models.Model):
"""
person model to add custom fields to user model
"""
user = models.OneToOneField(User)
fathers_name = models.CharField(max_length=30, null=True, blank=True)
date_of_birth = models.DateField(null=True, blank=True)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
cellphone_number = models.CharField(null=True, blank=True, max_length=20)
chat_id = models.CharField(blank=True, max_length=150, verbose_name='Chat ID')
full_name = models.CharField(max_length=100, blank=True, null=True, db_index=True)
| """
core app database models
"""
from django.db import models
from django.contrib.auth.models import User
from apps.core.constants import GENDER_CHOICES
# Create your models here.
class Person(models.Model):
"""
person model to add custom fields to user model
"""
user = models.OneToOneField(User)
fathers_name = models.CharField(max_length=30, null=True, blank=True)
date_of_birth = models.DateField(null=True, blank=True)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
cellphone_number = models.CharField(null=True, blank=True, max_length=20)
chat_id = models.CharField(blank=True, max_length=150, verbose_name='Chat ID')
full_name = models.CharField(max_length=100, blank=True, null=True, db_index=True)
| en | 0.904818 | core app database models # Create your models here. person model to add custom fields to user model | 2.764351 | 3 |
daedalus/token.py | nsetzer/daedalus | 1 | 6618430 |
import ast as pyast
class TokenError(Exception):
def __init__(self, token, message):
self.original_message = message
message = "type: %s line: %d column: %d (%r) %s" % (token.type, token.line, token.index, token.value, message)
super(TokenError, self).__init__(message)
self.token = token
def ast2json_obj(ast):
obj = {}
for pair in ast.children:
if pair.type == Token.T_BINARY and pair.value == ':':
lhs, rhs = pair.children
if lhs.type == Token.T_STRING:
key = pyast.literal_eval(lhs.value)
else:
raise ValueError("%s:%s" % (lhs.type, lhs.value))
val = ast2json(rhs)
obj[key] = val
else:
raise ValueError("%s:%s" % (pair.type, pair.value))
return obj
def ast2json_seq(ast):
seq = []
for val in ast.children:
seq.append(ast2json(val))
return seq
def ast2json(ast):
if ast.type == Token.T_OBJECT:
return ast2json_obj(ast)
elif ast.type == Token.T_LIST:
return ast2json_seq(ast)
elif ast.type == Token.T_STRING:
return pyast.literal_eval(ast.value)
elif ast.type == Token.T_NUMBER:
return pyast.literal_eval(ast.value)
elif ast.type == Token.T_PREFIX:
if ast.value == "-" and ast.children and ast.children[0].type == Token.T_NUMBER:
val = ast2json(ast.children[0])
return -val
else:
raise ValueError("%s:%s" % (ast.type, ast.value))
elif ast.type == Token.T_KEYWORD:
if ast.value == 'true':
return True
elif ast.value == 'false':
return False
elif ast.value == 'null':
return None
elif ast.value == 'undefined':
return None
else:
raise ValueError("%s:%s" % (ast.type, ast.value))
else:
raise ValueError("%s:%s" % (ast.type, ast.value))
class Token(object):
# tokens produced by the lexer
T_TEXT = "T_TEXT"
T_KEYWORD = "T_KEYWORD"
T_NUMBER = "T_NUMBER"
T_STRING = "T_STRING"
T_TEMPLATE_STRING = "T_TEMPLATE_STRING"
T_DOCUMENTATION = "T_DOCUMENTATION"
T_SPECIAL = "T_SPECIAL"
T_NEWLINE = "T_NEWLINE"
T_REGEX = "T_REGEX"
# tokens created by the parser
T_MODULE = "T_MODULE"
T_ATTR = "T_ATTR"
T_GROUPING = "T_GROUPING"
T_ARGLIST = "T_ARGLIST"
T_FUNCTIONCALL = "T_FUNCTIONCALL"
T_SUBSCR = "T_SUBSCR"
T_BLOCK = "T_BLOCK"
T_BLOCK_LABEL = "T_BLOCK_LABEL"
T_LIST = "T_LIST"
T_OBJECT = "T_OBJECT"
T_TUPLE = "T_TUPLE" # immutable list
T_RECORD = "T_RECORD" # immutable object
T_PREFIX = "T_PREFIX"
T_POSTFIX = "T_POSTFIX"
T_BINARY = "T_BINARY"
T_TERNARY = "T_TERNARY"
T_COMMA = "T_COMMA"
T_ASSIGN = "T_ASSIGN"
# tokens created by the parser (processed keywords)
T_GET_ATTR = "T_GET_ATTR"
T_BREAK = "T_BREAK"
T_BRANCH = "T_BRANCH"
T_CASE = "T_CASE"
T_CATCH = "T_CATCH"
T_CLASS = "T_CLASS"
T_CLASS_BLOCK = "T_CLASS_BLOCK"
T_CONTINUE = "T_CONTINUE"
T_DEFAULT = "T_DEFAULT"
T_DOWHILE = "T_DOWHILE"
T_EXPORT = "T_EXPORT"
T_EXPORT_DEFAULT = "T_EXPORT_DEFAULT"
T_IMPORT = "T_IMPORT"
T_IMPORT_JS_MODULE = "T_IMPORT_JS_MODULE"
T_IMPORT_JS_MODULE_AS = "T_IMPORT_JS_MODULE_AS"
T_IMPORT_MODULE = "T_IMPORT_MODULE"
T_PYIMPORT = "T_PYIMPORT"
T_INCLUDE = "T_INCLUDE"
T_FINALLY = "T_FINALLY"
T_FOR = "T_FOR"
T_FOR_OF = "T_FOR_OF"
T_FOR_AWAIT_OF = "T_FOR_AWAIT_OF"
T_FOR_IN = "T_FOR_IN"
T_NEW = "T_NEW"
T_RETURN = "T_RETURN"
T_SWITCH = "T_SWITCH"
T_THROW = "T_THROW"
T_TRY = "T_TRY"
T_VAR = "T_VAR"
T_WHILE = "T_WHILE"
T_OPTIONAL_CHAINING = "T_OPTIONAL_CHAINING"
T_UNPACK_SEQUENCE = "T_UNPACK_SEQUENCE"
T_UNPACK_OBJECT = "T_UNPACK_OBJECT"
T_LOGICAL_AND = "T_LOGICAL_AND"
T_LOGICAL_OR = "T_LOGICAL_OR"
T_INSTANCE_OF = "T_INSTANCE_OF"
T_SPREAD = "T_SPREAD"
T_STATIC_PROPERTY = "T_STATIC_PROPERTY"
T_YIELD = "T_YIELD"
T_YIELD_FROM = "T_YIELD_FROM"
T_INTERFACE = "T_INTERFACE"
# function types
T_FUNCTION = "T_FUNCTION"
T_ASYNC_FUNCTION = "T_ASYNC_FUNCTION"
T_GENERATOR = "T_GENERATOR"
T_ASYNC_GENERATOR = "T_ASYNC_GENERATOR"
T_ANONYMOUS_FUNCTION = "T_ANONYMOUS_FUNCTION"
T_ASYNC_ANONYMOUS_FUNCTION = "T_ASYNC_ANONYMOUS_FUNCTION"
T_ANONYMOUS_GENERATOR = "T_ANONYMOUS_GENERATOR"
T_ASYNC_ANONYMOUS_GENERATOR = "T_ASYNC_ANONYMOUS_GENERATOR"
T_METHOD = "T_METHOD"
T_LAMBDA = "T_LAMBDA" # arrow function
# these variables are assigned by the transform engine for variable scopes
T_GLOBAL_VAR = 'T_GLOBAL_VAR'
T_LOCAL_VAR = 'T_LOCAL_VAR'
T_DELETE_VAR = 'T_DELETE_VAR'
T_CLOSURE = 'T_CLOSURE'
T_CELL_VAR = 'T_CELL_VAR'
T_FREE_VAR = 'T_FREE_VAR'
T_TEMPLATE_EXPRESSION = "T_TEMPLATE_EXPRESSION"
T_TAGGED_TEMPLATE = "T_TAGGED_TEMPLATE"
# a token which stands for no token
T_EMPTY_TOKEN = "T_EMPTY_TOKEN"
T_TYPE = "T_TYPE"
# tokens created by the compiler
T_BLOCK_PUSH = "T_BLOCK_PUSH"
T_BLOCK_POP = "T_BLOCK_POP"
def __init__(self, type, line=0, index=0, value="", children=None):
super(Token, self).__init__()
self.type = type
self.line = line
self.index = index
self.value = value
self.children = list(children) if children is not None else []
self.file = None
self.original_value = None
self.ref = None
self.ref_attr = 0 # 1: define, 2: store, 4: load
def __str__(self):
return self.toString(False, 0)
def __repr__(self):
return "Token(Token.%s, %r, %r, %r)" % (
self.type, self.line, self.index, self.value)
def toString(self, pretty=True, depth=0, pad=" "):
if pretty == 3:
s = "%s<%r>" % (self.type, self.value)
parts = ["%s%s\n" % (pad * depth, s)]
for child in self.children:
try:
parts.append(child.toString(pretty, depth + 1, pad))
except:
print(child)
return ''.join(parts)
elif pretty == 2:
if len(self.children) == 0:
s = "\n%sTOKEN(%r, %r)" % (" " * depth, self.type, self.value)
return s
else:
s = "\n%sTOKEN(%r, %r, " % (" " * depth, self.type, self.value)
c = [child.toString(pretty, depth + 1) for child in self.children]
return s + ', '.join(c) + ")"
elif pretty:
s = "%s<%s,%s,%r>" % (self.type, self.line, self.index, self.value)
parts = ["%s%s\n" % (pad * depth, s)]
for child in self.children:
try:
parts.append(child.toString(pretty, depth + 1))
except:
print(child)
return ''.join(parts)
elif self.children:
s = "%s<%r>" % (self.type, self.value)
t = ','.join(child.toString(False) for child in self.children)
return "%s{%s}" % (s, t)
else:
return "%s<%r>" % (self.type, self.value)
def flatten(self, depth=0):
items = [(depth, self)]
for child in self.children:
items.extend(child.flatten(depth + 1))
return items
@staticmethod
def basicType(token):
return token and (token.type in (Token.T_TEXT, Token.T_NUMBER, Token.T_STRING) or
token.type == Token.T_SPECIAL and token.value in "])")
def clone(self, **keys):
tok = Token(self.type, self.line, self.index, self.value)
tok.file = self.file
tok.original_value = self.original_value
tok.children = [c.clone() for c in self.children]
tok.ref = self.ref
tok.ref_attr = self.ref_attr
tok.__dict__.update(keys)
return tok
@staticmethod
def deepCopy(token):
queue = []
root = Token(token.type, token.line, token.index, token.value)
root.file = token.file
root.original_value = token.original_value
for child in reversed(token.children):
queue.append((child, root))
while queue:
tok, parent = queue.pop()
new_tok = Token(tok.type, tok.line, tok.index, tok.value)
new_tok.file = tok.file
new_tok.original_value = tok.original_value
parent.children.append(new_tok)
for child in reversed(tok.children):
queue.append((child, new_tok))
return root
def toJson(self):
return ast2json(self)
|
import ast as pyast
class TokenError(Exception):
def __init__(self, token, message):
self.original_message = message
message = "type: %s line: %d column: %d (%r) %s" % (token.type, token.line, token.index, token.value, message)
super(TokenError, self).__init__(message)
self.token = token
def ast2json_obj(ast):
obj = {}
for pair in ast.children:
if pair.type == Token.T_BINARY and pair.value == ':':
lhs, rhs = pair.children
if lhs.type == Token.T_STRING:
key = pyast.literal_eval(lhs.value)
else:
raise ValueError("%s:%s" % (lhs.type, lhs.value))
val = ast2json(rhs)
obj[key] = val
else:
raise ValueError("%s:%s" % (pair.type, pair.value))
return obj
def ast2json_seq(ast):
seq = []
for val in ast.children:
seq.append(ast2json(val))
return seq
def ast2json(ast):
if ast.type == Token.T_OBJECT:
return ast2json_obj(ast)
elif ast.type == Token.T_LIST:
return ast2json_seq(ast)
elif ast.type == Token.T_STRING:
return pyast.literal_eval(ast.value)
elif ast.type == Token.T_NUMBER:
return pyast.literal_eval(ast.value)
elif ast.type == Token.T_PREFIX:
if ast.value == "-" and ast.children and ast.children[0].type == Token.T_NUMBER:
val = ast2json(ast.children[0])
return -val
else:
raise ValueError("%s:%s" % (ast.type, ast.value))
elif ast.type == Token.T_KEYWORD:
if ast.value == 'true':
return True
elif ast.value == 'false':
return False
elif ast.value == 'null':
return None
elif ast.value == 'undefined':
return None
else:
raise ValueError("%s:%s" % (ast.type, ast.value))
else:
raise ValueError("%s:%s" % (ast.type, ast.value))
class Token(object):
# tokens produced by the lexer
T_TEXT = "T_TEXT"
T_KEYWORD = "T_KEYWORD"
T_NUMBER = "T_NUMBER"
T_STRING = "T_STRING"
T_TEMPLATE_STRING = "T_TEMPLATE_STRING"
T_DOCUMENTATION = "T_DOCUMENTATION"
T_SPECIAL = "T_SPECIAL"
T_NEWLINE = "T_NEWLINE"
T_REGEX = "T_REGEX"
# tokens created by the parser
T_MODULE = "T_MODULE"
T_ATTR = "T_ATTR"
T_GROUPING = "T_GROUPING"
T_ARGLIST = "T_ARGLIST"
T_FUNCTIONCALL = "T_FUNCTIONCALL"
T_SUBSCR = "T_SUBSCR"
T_BLOCK = "T_BLOCK"
T_BLOCK_LABEL = "T_BLOCK_LABEL"
T_LIST = "T_LIST"
T_OBJECT = "T_OBJECT"
T_TUPLE = "T_TUPLE" # immutable list
T_RECORD = "T_RECORD" # immutable object
T_PREFIX = "T_PREFIX"
T_POSTFIX = "T_POSTFIX"
T_BINARY = "T_BINARY"
T_TERNARY = "T_TERNARY"
T_COMMA = "T_COMMA"
T_ASSIGN = "T_ASSIGN"
# tokens created by the parser (processed keywords)
T_GET_ATTR = "T_GET_ATTR"
T_BREAK = "T_BREAK"
T_BRANCH = "T_BRANCH"
T_CASE = "T_CASE"
T_CATCH = "T_CATCH"
T_CLASS = "T_CLASS"
T_CLASS_BLOCK = "T_CLASS_BLOCK"
T_CONTINUE = "T_CONTINUE"
T_DEFAULT = "T_DEFAULT"
T_DOWHILE = "T_DOWHILE"
T_EXPORT = "T_EXPORT"
T_EXPORT_DEFAULT = "T_EXPORT_DEFAULT"
T_IMPORT = "T_IMPORT"
T_IMPORT_JS_MODULE = "T_IMPORT_JS_MODULE"
T_IMPORT_JS_MODULE_AS = "T_IMPORT_JS_MODULE_AS"
T_IMPORT_MODULE = "T_IMPORT_MODULE"
T_PYIMPORT = "T_PYIMPORT"
T_INCLUDE = "T_INCLUDE"
T_FINALLY = "T_FINALLY"
T_FOR = "T_FOR"
T_FOR_OF = "T_FOR_OF"
T_FOR_AWAIT_OF = "T_FOR_AWAIT_OF"
T_FOR_IN = "T_FOR_IN"
T_NEW = "T_NEW"
T_RETURN = "T_RETURN"
T_SWITCH = "T_SWITCH"
T_THROW = "T_THROW"
T_TRY = "T_TRY"
T_VAR = "T_VAR"
T_WHILE = "T_WHILE"
T_OPTIONAL_CHAINING = "T_OPTIONAL_CHAINING"
T_UNPACK_SEQUENCE = "T_UNPACK_SEQUENCE"
T_UNPACK_OBJECT = "T_UNPACK_OBJECT"
T_LOGICAL_AND = "T_LOGICAL_AND"
T_LOGICAL_OR = "T_LOGICAL_OR"
T_INSTANCE_OF = "T_INSTANCE_OF"
T_SPREAD = "T_SPREAD"
T_STATIC_PROPERTY = "T_STATIC_PROPERTY"
T_YIELD = "T_YIELD"
T_YIELD_FROM = "T_YIELD_FROM"
T_INTERFACE = "T_INTERFACE"
# function types
T_FUNCTION = "T_FUNCTION"
T_ASYNC_FUNCTION = "T_ASYNC_FUNCTION"
T_GENERATOR = "T_GENERATOR"
T_ASYNC_GENERATOR = "T_ASYNC_GENERATOR"
T_ANONYMOUS_FUNCTION = "T_ANONYMOUS_FUNCTION"
T_ASYNC_ANONYMOUS_FUNCTION = "T_ASYNC_ANONYMOUS_FUNCTION"
T_ANONYMOUS_GENERATOR = "T_ANONYMOUS_GENERATOR"
T_ASYNC_ANONYMOUS_GENERATOR = "T_ASYNC_ANONYMOUS_GENERATOR"
T_METHOD = "T_METHOD"
T_LAMBDA = "T_LAMBDA" # arrow function
# these variables are assigned by the transform engine for variable scopes
T_GLOBAL_VAR = 'T_GLOBAL_VAR'
T_LOCAL_VAR = 'T_LOCAL_VAR'
T_DELETE_VAR = 'T_DELETE_VAR'
T_CLOSURE = 'T_CLOSURE'
T_CELL_VAR = 'T_CELL_VAR'
T_FREE_VAR = 'T_FREE_VAR'
T_TEMPLATE_EXPRESSION = "T_TEMPLATE_EXPRESSION"
T_TAGGED_TEMPLATE = "T_TAGGED_TEMPLATE"
# a token which stands for no token
T_EMPTY_TOKEN = "T_EMPTY_TOKEN"
T_TYPE = "T_TYPE"
# tokens created by the compiler
T_BLOCK_PUSH = "T_BLOCK_PUSH"
T_BLOCK_POP = "T_BLOCK_POP"
def __init__(self, type, line=0, index=0, value="", children=None):
super(Token, self).__init__()
self.type = type
self.line = line
self.index = index
self.value = value
self.children = list(children) if children is not None else []
self.file = None
self.original_value = None
self.ref = None
self.ref_attr = 0 # 1: define, 2: store, 4: load
def __str__(self):
return self.toString(False, 0)
def __repr__(self):
return "Token(Token.%s, %r, %r, %r)" % (
self.type, self.line, self.index, self.value)
def toString(self, pretty=True, depth=0, pad=" "):
if pretty == 3:
s = "%s<%r>" % (self.type, self.value)
parts = ["%s%s\n" % (pad * depth, s)]
for child in self.children:
try:
parts.append(child.toString(pretty, depth + 1, pad))
except:
print(child)
return ''.join(parts)
elif pretty == 2:
if len(self.children) == 0:
s = "\n%sTOKEN(%r, %r)" % (" " * depth, self.type, self.value)
return s
else:
s = "\n%sTOKEN(%r, %r, " % (" " * depth, self.type, self.value)
c = [child.toString(pretty, depth + 1) for child in self.children]
return s + ', '.join(c) + ")"
elif pretty:
s = "%s<%s,%s,%r>" % (self.type, self.line, self.index, self.value)
parts = ["%s%s\n" % (pad * depth, s)]
for child in self.children:
try:
parts.append(child.toString(pretty, depth + 1))
except:
print(child)
return ''.join(parts)
elif self.children:
s = "%s<%r>" % (self.type, self.value)
t = ','.join(child.toString(False) for child in self.children)
return "%s{%s}" % (s, t)
else:
return "%s<%r>" % (self.type, self.value)
def flatten(self, depth=0):
items = [(depth, self)]
for child in self.children:
items.extend(child.flatten(depth + 1))
return items
@staticmethod
def basicType(token):
return token and (token.type in (Token.T_TEXT, Token.T_NUMBER, Token.T_STRING) or
token.type == Token.T_SPECIAL and token.value in "])")
def clone(self, **keys):
tok = Token(self.type, self.line, self.index, self.value)
tok.file = self.file
tok.original_value = self.original_value
tok.children = [c.clone() for c in self.children]
tok.ref = self.ref
tok.ref_attr = self.ref_attr
tok.__dict__.update(keys)
return tok
@staticmethod
def deepCopy(token):
queue = []
root = Token(token.type, token.line, token.index, token.value)
root.file = token.file
root.original_value = token.original_value
for child in reversed(token.children):
queue.append((child, root))
while queue:
tok, parent = queue.pop()
new_tok = Token(tok.type, tok.line, tok.index, tok.value)
new_tok.file = tok.file
new_tok.original_value = tok.original_value
parent.children.append(new_tok)
for child in reversed(tok.children):
queue.append((child, new_tok))
return root
def toJson(self):
return ast2json(self)
| en | 0.864368 | # tokens produced by the lexer # tokens created by the parser # immutable list # immutable object # tokens created by the parser (processed keywords) # function types # arrow function # these variables are assigned by the transform engine for variable scopes # a token which stands for no token # tokens created by the compiler # 1: define, 2: store, 4: load | 2.650456 | 3 |
main.py | jerryyip/respeaker_adapter | 2 | 6618431 |
import os
import signal
from microphone import Microphone
from bing_base import *
from bing_recognizer import *
from bing_tts import *
from player import Player
import pyaudio
import sys
#from relay import Relay
try:
from creds import BING_KEY
except ImportError:
print('Get a key from https://www.microsoft.com/cognitive-services/en-us/speech-api and create creds.py with the key')
sys.exit(-1)
ACCESS_TOKEN = "<KEY>"
#import time
from relay import Relay
script_dir = os.path.dirname(os.path.realpath(__file__))
hi = os.path.join(script_dir, 'audio/hi.wav')
bing = BingBase(BING_KEY)
recognizer = BingVoiceRecognizer(bing)
tts = BingTTS(bing)
mission_completed = False
awake = False
pa = pyaudio.PyAudio()
mic = Microphone(pa)
player = Player(pa)
#mic.player = player
relay1 = Relay(ACCESS_TOKEN)
relay1.set_tts(tts)
relay1.set_player(player)
def handle_int(sig, frame):
global mission_completed
print "Terminating..."
mission_completed = True
mic.close()
player.close()
# worker.stop()
pa.terminate()
signal.signal(signal.SIGINT, handle_int)
#worker.start()
while not mission_completed:
if not awake:
#print('test1')
if mic.detect():
awake = True
player.play(hi)
continue
data = mic.listen()
if not data:
awake = False
print ('no data')
continue
# recognize speech using Microsoft Bing Voice Recognition
try:
text = recognizer.recognize(data, language='en-US')
print('Bing:' + text.encode('utf-8'))
relay1.text = text
relay1.run()
#print('test2')
if text.find('bye bye') > -1:
awake = False
elif text.find('shut down') > -1:
handle_int(0,0)
except UnknownValueError:
print("Microsoft Bing Voice Recognition could not understand audio")
except RequestError as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
awake = False
#time.sleep(2)
|
import os
import signal
from microphone import Microphone
from bing_base import *
from bing_recognizer import *
from bing_tts import *
from player import Player
import pyaudio
import sys
#from relay import Relay
try:
from creds import BING_KEY
except ImportError:
print('Get a key from https://www.microsoft.com/cognitive-services/en-us/speech-api and create creds.py with the key')
sys.exit(-1)
ACCESS_TOKEN = "<KEY>"
#import time
from relay import Relay
script_dir = os.path.dirname(os.path.realpath(__file__))
hi = os.path.join(script_dir, 'audio/hi.wav')
bing = BingBase(BING_KEY)
recognizer = BingVoiceRecognizer(bing)
tts = BingTTS(bing)
mission_completed = False
awake = False
pa = pyaudio.PyAudio()
mic = Microphone(pa)
player = Player(pa)
#mic.player = player
relay1 = Relay(ACCESS_TOKEN)
relay1.set_tts(tts)
relay1.set_player(player)
def handle_int(sig, frame):
global mission_completed
print "Terminating..."
mission_completed = True
mic.close()
player.close()
# worker.stop()
pa.terminate()
signal.signal(signal.SIGINT, handle_int)
#worker.start()
while not mission_completed:
if not awake:
#print('test1')
if mic.detect():
awake = True
player.play(hi)
continue
data = mic.listen()
if not data:
awake = False
print ('no data')
continue
# recognize speech using Microsoft Bing Voice Recognition
try:
text = recognizer.recognize(data, language='en-US')
print('Bing:' + text.encode('utf-8'))
relay1.text = text
relay1.run()
#print('test2')
if text.find('bye bye') > -1:
awake = False
elif text.find('shut down') > -1:
handle_int(0,0)
except UnknownValueError:
print("Microsoft Bing Voice Recognition could not understand audio")
except RequestError as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
awake = False
#time.sleep(2)
| en | 0.46102 | #from relay import Relay #import time #mic.player = player # worker.stop() #worker.start() #print('test1') # recognize speech using Microsoft Bing Voice Recognition #print('test2') #time.sleep(2) | 2.604029 | 3 |
examples/nested_dags.py | raztud/dagger | 9 | 6618432 | """
# Nested DAGs.
This example showcases how multiple dags can be composed together.
In a real setting, you can use this feature to modularize your workflow definitions and keep each unit simple, testable and reusable.
## Behavior
Say we want to model a workflow that composes, records and publishes a music album.
The first step will be to brainstorm a few themes, and come up with a mapping of tracks -> themes.
Next, each of the tracks will go through a long process that involves composing the track, arranging it and recording it. Since this is a fairly complex process in and of itself, and we will want to repeat it multiple times during the recording of the album, we abstract it into a separate DAG.
Finally, when all tracks have been independently recorded, we want to put them together and publish the album.
## Implementation
Notice how we used a Python function to generate a `compose_and_record_song` DAG. We want to supply the themes we brainstormed before as inputs to the DAG. We will only know what those inputs are once there is a brainstorming session, so we pass them as arguments to the function.
Now, we can invoke the `compose_and_record_song` function multiple times in the parent DAG: one time for each track we want to record. There, we link the inputs of the inner DAG to the outputs of the brainstorming task.
Finally, we connect the "publish-album" task to the outputs of the inner DAGs.
"""
from typing import Mapping
from dagger import DAG, Task
from dagger.input import FromNodeOutput, FromParam
from dagger.output import FromKey, FromReturnValue
def brainstorm_themes() -> Mapping[str, str]: # noqa
themes = {
"first": "love",
"second": "loss",
}
print(f"Themes: {themes}")
return themes
def compose_song(theme: str, style: str) -> str: # noqa
composition = f"{style} song about {theme}"
print(f"Composition: {composition}")
return composition
def record_song(composition: str) -> str: # noqa
recording = f"recording of ({composition})"
print(f"Recording: {recording}")
return recording
def publish_album(album_name: str, first_song: str, second_song: str) -> dict: # noqa
album = {
"name": album_name,
"tracks": [first_song, second_song],
}
print(f"Album: {album}")
return album
def compose_and_record_song(theme, style): # noqa
return DAG(
{
"compose": Task(
compose_song,
inputs={
"theme": FromParam(),
"style": FromParam(),
},
outputs={
"composition": FromReturnValue(),
},
),
"record": Task(
record_song,
inputs={
"composition": FromNodeOutput("compose", "composition"),
},
outputs={
"recording": FromReturnValue(),
},
),
},
inputs={
"theme": theme,
"style": style,
},
outputs={"song": FromNodeOutput("record", "recording")},
)
dag = DAG(
nodes={
"brainstorm-themes": Task(
brainstorm_themes,
outputs={
"first_theme": FromKey("first"),
"second_theme": FromKey("second"),
},
),
"record-first-song": compose_and_record_song(
theme=FromNodeOutput("brainstorm-themes", "first_theme"),
style=FromParam(),
),
"record-second-song": compose_and_record_song(
theme=FromNodeOutput("brainstorm-themes", "second_theme"),
style=FromParam(),
),
"publish-album": Task(
publish_album,
inputs={
"album_name": FromParam(),
"first_song": FromNodeOutput("record-first-song", "song"),
"second_song": FromNodeOutput("record-second-song", "song"),
},
outputs={
"album": FromReturnValue(),
},
),
},
inputs={
"album_name": FromParam(),
"style": FromParam(),
},
outputs={
"album": FromNodeOutput("publish-album", "album"),
},
)
if __name__ == "__main__":
"""Define a command-line interface for this DAG, using the CLI runtime. Check the documentation to understand why this is relevant or necessary."""
from dagger.runtime.cli import invoke
invoke(dag)
| """
# Nested DAGs.
This example showcases how multiple dags can be composed together.
In a real setting, you can use this feature to modularize your workflow definitions and keep each unit simple, testable and reusable.
## Behavior
Say we want to model a workflow that composes, records and publishes a music album.
The first step will be to brainstorm a few themes, and come up with a mapping of tracks -> themes.
Next, each of the tracks will go through a long process that involves composing the track, arranging it and recording it. Since this is a fairly complex process in and of itself, and we will want to repeat it multiple times during the recording of the album, we abstract it into a separate DAG.
Finally, when all tracks have been independently recorded, we want to put them together and publish the album.
## Implementation
Notice how we used a Python function to generate a `compose_and_record_song` DAG. We want to supply the themes we brainstormed before as inputs to the DAG. We will only know what those inputs are once there is a brainstorming session, so we pass them as arguments to the function.
Now, we can invoke the `compose_and_record_song` function multiple times in the parent DAG: one time for each track we want to record. There, we link the inputs of the inner DAG to the outputs of the brainstorming task.
Finally, we connect the "publish-album" task to the outputs of the inner DAGs.
"""
from typing import Mapping
from dagger import DAG, Task
from dagger.input import FromNodeOutput, FromParam
from dagger.output import FromKey, FromReturnValue
def brainstorm_themes() -> Mapping[str, str]: # noqa
themes = {
"first": "love",
"second": "loss",
}
print(f"Themes: {themes}")
return themes
def compose_song(theme: str, style: str) -> str: # noqa
composition = f"{style} song about {theme}"
print(f"Composition: {composition}")
return composition
def record_song(composition: str) -> str: # noqa
recording = f"recording of ({composition})"
print(f"Recording: {recording}")
return recording
def publish_album(album_name: str, first_song: str, second_song: str) -> dict: # noqa
album = {
"name": album_name,
"tracks": [first_song, second_song],
}
print(f"Album: {album}")
return album
def compose_and_record_song(theme, style): # noqa
return DAG(
{
"compose": Task(
compose_song,
inputs={
"theme": FromParam(),
"style": FromParam(),
},
outputs={
"composition": FromReturnValue(),
},
),
"record": Task(
record_song,
inputs={
"composition": FromNodeOutput("compose", "composition"),
},
outputs={
"recording": FromReturnValue(),
},
),
},
inputs={
"theme": theme,
"style": style,
},
outputs={"song": FromNodeOutput("record", "recording")},
)
dag = DAG(
nodes={
"brainstorm-themes": Task(
brainstorm_themes,
outputs={
"first_theme": FromKey("first"),
"second_theme": FromKey("second"),
},
),
"record-first-song": compose_and_record_song(
theme=FromNodeOutput("brainstorm-themes", "first_theme"),
style=FromParam(),
),
"record-second-song": compose_and_record_song(
theme=FromNodeOutput("brainstorm-themes", "second_theme"),
style=FromParam(),
),
"publish-album": Task(
publish_album,
inputs={
"album_name": FromParam(),
"first_song": FromNodeOutput("record-first-song", "song"),
"second_song": FromNodeOutput("record-second-song", "song"),
},
outputs={
"album": FromReturnValue(),
},
),
},
inputs={
"album_name": FromParam(),
"style": FromParam(),
},
outputs={
"album": FromNodeOutput("publish-album", "album"),
},
)
if __name__ == "__main__":
"""Define a command-line interface for this DAG, using the CLI runtime. Check the documentation to understand why this is relevant or necessary."""
from dagger.runtime.cli import invoke
invoke(dag)
| en | 0.908258 | # Nested DAGs. This example showcases how multiple dags can be composed together. In a real setting, you can use this feature to modularize your workflow definitions and keep each unit simple, testable and reusable. ## Behavior Say we want to model a workflow that composes, records and publishes a music album. The first step will be to brainstorm a few themes, and come up with a mapping of tracks -> themes. Next, each of the tracks will go through a long process that involves composing the track, arranging it and recording it. Since this is a fairly complex process in and of itself, and we will want to repeat it multiple times during the recording of the album, we abstract it into a separate DAG. Finally, when all tracks have been independently recorded, we want to put them together and publish the album. ## Implementation Notice how we used a Python function to generate a `compose_and_record_song` DAG. We want to supply the themes we brainstormed before as inputs to the DAG. We will only know what those inputs are once there is a brainstorming session, so we pass them as arguments to the function. Now, we can invoke the `compose_and_record_song` function multiple times in the parent DAG: one time for each track we want to record. There, we link the inputs of the inner DAG to the outputs of the brainstorming task. Finally, we connect the "publish-album" task to the outputs of the inner DAGs. # noqa # noqa # noqa # noqa # noqa Define a command-line interface for this DAG, using the CLI runtime. Check the documentation to understand why this is relevant or necessary. | 3.428912 | 3 |
backend/route/init_api.py | WestonLu/chinese-ocr | 1 | 6618433 | from flask_restplus import Api
from .ocr_route import ocr_app_api
api = Api(
title='简单的中文 OCR API',
version='1.0',
description='识别并返回图片(支持jpg, jpeg, png格式)中的文本',
)
api.add_namespace(ocr_app_api)
| from flask_restplus import Api
from .ocr_route import ocr_app_api
api = Api(
title='简单的中文 OCR API',
version='1.0',
description='识别并返回图片(支持jpg, jpeg, png格式)中的文本',
)
api.add_namespace(ocr_app_api)
| none | 1 | 1.860869 | 2 | |
bootstrapper/lib/db_models.py | mgl-ld/panos-bootstrapper | 8 | 6618434 | <reponame>mgl-ld/panos-bootstrapper<gh_stars>1-10
from sqlalchemy import Column, Integer, String
from .db import Base
class Template(Base):
__tablename__ = 'templates'
id = Column(Integer, primary_key=True)
# simple name of the template
name = Column(String(50), unique=True)
# type of the template - bootstrap or init-cfg-static.txt
type = Column(String(32), unique=False)
# simple description of this template
description = Column(String(120), unique=False)
# actual text of the jinja template
template = Column(String(), unique=False)
def __init__(self, name=None, description=None, type='bootstrap', template=""):
self.name = name
self.description = description
self.type = type
self.template = template
def __repr__(self):
return '<Template %r>' % (self.name) | from sqlalchemy import Column, Integer, String
from .db import Base
class Template(Base):
__tablename__ = 'templates'
id = Column(Integer, primary_key=True)
# simple name of the template
name = Column(String(50), unique=True)
# type of the template - bootstrap or init-cfg-static.txt
type = Column(String(32), unique=False)
# simple description of this template
description = Column(String(120), unique=False)
# actual text of the jinja template
template = Column(String(), unique=False)
def __init__(self, name=None, description=None, type='bootstrap', template=""):
self.name = name
self.description = description
self.type = type
self.template = template
def __repr__(self):
return '<Template %r>' % (self.name) | en | 0.229978 | # simple name of the template # type of the template - bootstrap or init-cfg-static.txt # simple description of this template # actual text of the jinja template | 2.90834 | 3 |
pixie/utils/checks.py | Discord4Life/ToukaMusic | 158 | 6618435 | <reponame>Discord4Life/ToukaMusic
from . import setup_file
def is_owner(ctx):
return ctx.message.author.id == setup_file["discord"]['owner_id']
def pixie_admin(ctx):
if is_owner(ctx):
return True
return ctx.message.author.id in setup_file["discord"]["pixie_admins"]
def server_owner(ctx):
if is_owner(ctx):
return True
if pixie_admin(ctx):
return True
return ctx.message.author == ctx.message.server.owner
def server_admin(ctx):
if is_owner(ctx):
return True
if pixie_admin(ctx):
return True
if server_owner(ctx):
return True
return ctx.message.author.server_permissions.administrator
def server_moderator(ctx):
if is_owner(ctx):
return True
if pixie_admin(ctx):
return True
if server_owner(ctx):
return True
if server_admin(ctx):
return True
return "pixie" in [role.name.lower() for role in ctx.message.author.roles]
| from . import setup_file
def is_owner(ctx):
return ctx.message.author.id == setup_file["discord"]['owner_id']
def pixie_admin(ctx):
if is_owner(ctx):
return True
return ctx.message.author.id in setup_file["discord"]["pixie_admins"]
def server_owner(ctx):
if is_owner(ctx):
return True
if pixie_admin(ctx):
return True
return ctx.message.author == ctx.message.server.owner
def server_admin(ctx):
if is_owner(ctx):
return True
if pixie_admin(ctx):
return True
if server_owner(ctx):
return True
return ctx.message.author.server_permissions.administrator
def server_moderator(ctx):
if is_owner(ctx):
return True
if pixie_admin(ctx):
return True
if server_owner(ctx):
return True
if server_admin(ctx):
return True
return "pixie" in [role.name.lower() for role in ctx.message.author.roles] | none | 1 | 2.536554 | 3 | |
bufscript/bsl_codegen.py | thefifthmatt/bufscript | 1 | 6618436 | """Generates BSL code from a program in IR."""
import collections
from bufscript import bsl
from bufscript.intermediate import *
from bufscript.optypes import *
def output_bsl_codes(program):
all_bsl = collections.OrderedDict()
for function in program:
code = bsl.BslCode()
all_bsl[function.name] = code
# Generate code for each block
for block in function.cfg.blocks:
code.append_label(block.get_label())
for instr in block.instrs:
if isinstance(instr, MoveInstr):
code.append_move(instr.dest, instr.src)
elif isinstance(instr, OpInstr):
# TODO: Consider adding IR phase where all a+b=c ops are transformed to a+=b.
bsl.generators[instr.op].generate(code, instr.dest, instr.srcs)
elif isinstance(instr, CondInstr):
# Conditional Jump: first if "true", second if "false" -
# but this means that the jump goes to the second target
# (and if the following block is not the immediate successor,
# unconditional jump to it)
if len(block.succ) != 2:
raise RuntimeError('Block %s used with a CondInstr' % block)
true_label = block.succ[0].dest.get_label()
# Negation should probably happen at the IR level, rather than the BSL level.
bsl.branchGenerators[instr.op].generate(code, instr.srcs, true_label, negate=False)
elif isinstance(instr, FuncInstr):
if instr.name == 'rng':
code.append('rng.%s.%s.%s;', instr.srcs[0], instr.srcs[1], instr.dest)
elif instr.name == 'fpt':
code.append('fpt.%s.%s.%s.%s.%s;', instr.dest, instr.srcs[0], instr.srcs[1], instr.srcs[2], instr.srcs[3])
elif instr.name == 'slp':
code.append('slp.%s;', instr.srcs[0])
elif instr.name == 'tme':
code.append('tme.%s;', instr.dest)
else:
raise CompInternalError(instr)
elif isinstance(instr, LiteralInstr):
if instr.op == LiteralOp.MSG:
code.append('msg.%s;', instr.src)
elif instr.op == LiteralOp.CALL:
code.append('cal.%s;', instr.src)
else:
raise CompInternalError(instr)
elif isinstance(instr, NoOpInstr):
code.append('nop;')
else:
raise CompInternalError(instr)
# We're done with instructions; Now we might need to add an unconditional jump to the
# next block, if the index isn't ours + 1.
if len(block.succ):
# For unconditional, go to only next branch. For conditional, go to false branch.
if block.succ[-1].dest.index != (block.index + 1):
code.append('jmp.%s;', block.succ[-1].dest.get_label())
return all_bsl
| """Generates BSL code from a program in IR."""
import collections
from bufscript import bsl
from bufscript.intermediate import *
from bufscript.optypes import *
def output_bsl_codes(program):
all_bsl = collections.OrderedDict()
for function in program:
code = bsl.BslCode()
all_bsl[function.name] = code
# Generate code for each block
for block in function.cfg.blocks:
code.append_label(block.get_label())
for instr in block.instrs:
if isinstance(instr, MoveInstr):
code.append_move(instr.dest, instr.src)
elif isinstance(instr, OpInstr):
# TODO: Consider adding IR phase where all a+b=c ops are transformed to a+=b.
bsl.generators[instr.op].generate(code, instr.dest, instr.srcs)
elif isinstance(instr, CondInstr):
# Conditional Jump: first if "true", second if "false" -
# but this means that the jump goes to the second target
# (and if the following block is not the immediate successor,
# unconditional jump to it)
if len(block.succ) != 2:
raise RuntimeError('Block %s used with a CondInstr' % block)
true_label = block.succ[0].dest.get_label()
# Negation should probably happen at the IR level, rather than the BSL level.
bsl.branchGenerators[instr.op].generate(code, instr.srcs, true_label, negate=False)
elif isinstance(instr, FuncInstr):
if instr.name == 'rng':
code.append('rng.%s.%s.%s;', instr.srcs[0], instr.srcs[1], instr.dest)
elif instr.name == 'fpt':
code.append('fpt.%s.%s.%s.%s.%s;', instr.dest, instr.srcs[0], instr.srcs[1], instr.srcs[2], instr.srcs[3])
elif instr.name == 'slp':
code.append('slp.%s;', instr.srcs[0])
elif instr.name == 'tme':
code.append('tme.%s;', instr.dest)
else:
raise CompInternalError(instr)
elif isinstance(instr, LiteralInstr):
if instr.op == LiteralOp.MSG:
code.append('msg.%s;', instr.src)
elif instr.op == LiteralOp.CALL:
code.append('cal.%s;', instr.src)
else:
raise CompInternalError(instr)
elif isinstance(instr, NoOpInstr):
code.append('nop;')
else:
raise CompInternalError(instr)
# We're done with instructions; Now we might need to add an unconditional jump to the
# next block, if the index isn't ours + 1.
if len(block.succ):
# For unconditional, go to only next branch. For conditional, go to false branch.
if block.succ[-1].dest.index != (block.index + 1):
code.append('jmp.%s;', block.succ[-1].dest.get_label())
return all_bsl
| en | 0.897354 | Generates BSL code from a program in IR. # Generate code for each block # TODO: Consider adding IR phase where all a+b=c ops are transformed to a+=b. # Conditional Jump: first if "true", second if "false" - # but this means that the jump goes to the second target # (and if the following block is not the immediate successor, # unconditional jump to it) # Negation should probably happen at the IR level, rather than the BSL level. # We're done with instructions; Now we might need to add an unconditional jump to the # next block, if the index isn't ours + 1. # For unconditional, go to only next branch. For conditional, go to false branch. | 2.868957 | 3 |
Tests/autolog.py | MarcPartensky/Python-2020 | 1 | 6618437 | <reponame>MarcPartensky/Python-2020
# Written by <NAME>, <EMAIL>, www.anyall.org
# * Originally written Aug. 2005
# * Posted to gist.github.com/16173 on Oct. 2008
# Copyright (c) 2003-2006 Open Source Applications Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re, sys, types
"""
Have all your function & method calls automatically logged, in indented outline
form - unlike the stack snapshots in an interactive debugger, it tracks call
structure & stack depths across time!
It hooks into all function calls that you specify, and logs each time they're
called. I find it especially useful when I don't know what's getting called
when, or need to continuously test for state changes. (by hacking this file)
Originally inspired from the python cookbook:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/198078
Currently you can
- tag functions or individual methods to be autologged
- tag an entire class's methods to be autologged
- tag an entire module's classes and functions to be autologged
TODO:
- allow tagging of ALL modules in the program on startup?
CAVEATS:
- certain classes barf when you logclass() them -- most notably,
SWIG-generated wrappers, and perhaps others.
USAGE: see examples on the bottom of this file.
Viewing tips
============
If your terminal can't keep up, try xterm or putty, they seem to be highest
performance. xterm is available for all platforms through X11...
Also try: (RunChandler > log &); tail -f log
Also, you can "less -R log" afterward and get the colors correct.
If you have long lines, less -RS kills wrapping, enhancing readability. Also
can chop at formatAllArgs().
If you want long lines to be chopped realtime, try piping through less::
RunChandler | less -RS
but then you have to hit 'space' lots to prevent chandler from freezing.
less's 'F' command is supposed to do this correctly but doesn't work for me.
"""
#@@@ should use the standard python logging system?
log = sys.stdout
# Globally incremented across function calls, so tracks stack depth
indent = 0
indStr = ' '
# ANSI escape codes for terminals.
# X11 xterm: always works, all platforms
# cygwin dosbox: run through |cat and then colors work
# linux: works on console & gnome-terminal
# mac: untested
BLACK = "\033[0;30m"
BLUE = "\033[0;34m"
GREEN = "\033[0;32m"
CYAN = "\033[0;36m"
RED = "\033[0;31m"
PURPLE = "\033[0;35m"
BROWN = "\033[0;33m"
GRAY = "\033[0;37m"
BOLDGRAY = "\033[1;30m"
BOLDBLUE = "\033[1;34m"
BOLDGREEN = "\033[1;32m"
BOLDCYAN = "\033[1;36m"
BOLDRED = "\033[1;31m"
BOLDPURPLE = "\033[1;35m"
BOLDYELLOW = "\033[1;33m"
WHITE = "\033[1;37m"
NORMAL = "\033[0m"
def indentlog(message):
global log, indStr, indent
print("%s%s" %(indStr*indent, message), file=log)
log.flush()
def shortstr(obj):
"""
Where to put gritty heuristics to make an object appear in most useful
form. defaults to __str__.
"""
if "wx." in str(obj.__class__) or obj.__class__.__name__.startswith("wx"):
shortclassname = obj.__class__.__name__
##shortclassname = str(obj.__class__).split('.')[-1]
if hasattr(obj, "blockItem") and hasattr(obj.blockItem, "blockName"):
moreInfo = "block:'%s'" %obj.blockItem.blockName
else:
moreInfo = "at %d" %id(obj)
return "<%s %s>" % (shortclassname, moreInfo)
else:
return str(obj)
def formatAllArgs(args, kwds):
"""
makes a nice string representation of all the arguments
"""
allargs = []
for item in args:
allargs.append('%s' % shortstr(item))
for key,item in list(kwds.items()):
allargs.append('%s=%s' % (key,shortstr(item)))
formattedArgs = ', '.join(allargs)
if len(formattedArgs) > 150:
return formattedArgs[:146] + " ..."
return formattedArgs
def logmodules(listOfModules):
for m in listOfModules:
bindmodule(m)
def logmodule(module, logMatch=".*", logNotMatch="nomatchasfdasdf"):
"""
WARNING: this seems to break if you import SWIG wrapper classes
directly into the module namespace ... logclass() creates weirdness when
used on them, for some reason.
@param module: could be either an actual module object, or the string
you can import (which seems to be the same thing as its
__name__). So you can say logmodule(__name__) at the end
of a module definition, to log all of it.
"""
allow = lambda s: re.match(logMatch, s) and not re.match(logNotMatch, s)
if isinstance(module, str):
d = {}
exec("import %s" % module, d)
import sys
module = sys.modules[module]
names = list(module.__dict__.keys())
for name in names:
if not allow(name): continue
value = getattr(module, name)
if isinstance(value, type):
setattr(module, name, logclass(value))
print("autolog.logmodule(): bound %s" %name, file=log)
elif isinstance(value, types.FunctionType):
setattr(module, name, logfunction(value))
print("autolog.logmodule(): bound %s" %name, file=log)
def logfunction(theFunction, displayName=None):
"""a decorator."""
if not displayName: displayName = theFunction.__name__
def _wrapper(*args, **kwds):
global indent
argstr = formatAllArgs(args, kwds)
# Log the entry into the function
indentlog("%s%s%s (%s) " % (BOLDRED,displayName,NORMAL, argstr))
log.flush()
indent += 1
returnval = theFunction(*args,**kwds)
indent -= 1
# Log return
##indentlog("return: %s"% shortstr(returnval)
return returnval
return _wrapper
def logmethod(theMethod, displayName=None):
"""use this for class or instance methods, it formats with the object out front."""
if not displayName: displayName = theMethod.__name__
def _methodWrapper(self, *args, **kwds):
"Use this one for instance or class methods"
global indent
argstr = formatAllArgs(args, kwds)
selfstr = shortstr(self)
#print >> log,"%s%s. %s (%s) " % (indStr*indent,selfstr,methodname,argstr)
indentlog("%s.%s%s%s (%s) " % (selfstr, BOLDRED,theMethod.__name__,NORMAL, argstr))
log.flush()
indent += 1
if theMethod.__name__ == 'OnSize':
indentlog("position, size = %s%s %s%s" %(BOLDBLUE, self.GetPosition(), self.GetSize(), NORMAL))
returnval = theMethod(self, *args,**kwds)
indent -= 1
return returnval
return _methodWrapper
def logclass(cls, methodsAsFunctions=False,
logMatch=".*", logNotMatch="asdfnomatch"):
"""
A class "decorator". But python doesn't support decorator syntax for
classes, so do it manually::
class C(object):
...
C = logclass(C)
@param methodsAsFunctions: set to True if you always want methodname first
in the display. Probably breaks if you're using class/staticmethods?
"""
allow = lambda s: re.match(logMatch, s) and not re.match(logNotMatch, s) and \
s not in ('__str__','__repr__')
namesToCheck = list(cls.__dict__.keys())
for name in namesToCheck:
if not allow(name): continue
# unbound methods show up as mere functions in the values of
# cls.__dict__,so we have to go through getattr
value = getattr(cls, name)
if methodsAsFunctions and callable(value):
setattr(cls, name, logfunction(value))
elif isinstance(value, types.MethodType):
#a normal instance method
if value.__self__ == None:
setattr(cls, name, logmethod(value))
#class & static method are more complex.
#a class method
elif value.__self__ == cls:
w = logmethod(value.__func__,
displayName="%s.%s" %(cls.__name__, value.__name__))
setattr(cls, name, classmethod(w))
else: assert False
#a static method
elif isinstance(value, types.FunctionType):
w = logfunction(value,
displayName="%s.%s" %(cls.__name__, value.__name__))
setattr(cls, name, staticmethod(w))
return cls
class LogMetaClass(type):
"""
Alternative to logclass(), you set this as a class's __metaclass__.
It will not work if the metaclass has already been overridden (e.g.
schema.Item or zope.interface (used in Twisted)
Also, it should fail for class/staticmethods, that hasnt been added here
yet.
"""
def __new__(cls,classname,bases,classdict):
logmatch = re.compile(classdict.get('logMatch','.*'))
lognotmatch = re.compile(classdict.get('logNotMatch', 'nevermatchthisstringasdfasdf'))
for attr,item in list(classdict.items()):
if callable(item) and logmatch.match(attr) and not lognotmatch.match(attr):
classdict['_H_%s'%attr] = item # rebind the method
classdict[attr] = logmethod(item) # replace method by wrapper
return type.__new__(cls,classname,bases,classdict)
# ---------------------------- Tests and examples ----------------------------
if __name__=='__main__':
print(); print("------------------- single function logging ---------------")
@logfunction
def test():
return 42
test()
print(); print("------------------- single method logging -----------------")
class Test1(object):
def __init__(self):
self.a = 10
@logmethod
def add(self,a,b): return a+b
@logmethod
def fac(self,val):
if val == 1:
return 1
else:
return val * self.fac(val-1)
@logfunction
def fac2(self, val):
if val == 1:
return 1
else:
return val * self.fac2(val-1)
t = Test1()
t.add(5,6)
t.fac(4)
print("--- tagged as @logfunction, doesn't understand 'self' is special:")
t.fac2(4)
print(); print("""-------------------- class "decorator" usage ------------------""")
class Test2(object):
#will be ignored
def __init__(self):
self.a = 10
def ignoreThis(self): pass
def add(self,a,b):return a+b
def fac(self,val):
if val == 1:
return 1
else:
return val * self.fac(val-1)
Test2 = logclass(Test2, logMatch='fac|add')
t2 = Test2()
t2.add(5,6)
t2.fac(4)
t2.ignoreThis()
print(); print("-------------------- metaclass usage ------------------")
class Test3(object, metaclass=LogMetaClass):
logNotMatch = 'ignoreThis'
def __init__(self): pass
def fac(self,val):
if val == 1:
return 1
else:
return val * self.fac(val-1)
def ignoreThis(self): pass
t3 = Test3()
t3.fac(4)
t3.ignoreThis()
print(); print("-------------- testing static & classmethods --------------")
class Test4(object):
@classmethod
def cm(cls, a, b):
print(cls)
return a+b
def im(self, a, b):
print(self)
return a+b
@staticmethod
def sm(a,b): return a+b
Test4 = logclass(Test4)
Test4.cm(4,3)
Test4.sm(4,3)
t4 = Test4()
t4.im(4,3)
t4.sm(4,3)
t4.cm(4,3)
#print; print "-------------- static & classmethods: where to put decorators? --------------"
#class Test5(object):
#@classmethod
#@logmethod
#def cm(cls, a, b):
#print cls
#return a+b
#@logmethod
#def im(self, a, b):
#print self
#return a+b
#@staticmethod
#@logfunction
#def sm(a,b): return a+b
#Test5.cm(4,3)
#Test5.sm(4,3)
#t5 = Test5()
#t5.im(4,3)
#t5.sm(4,3)
#t5.cm(4,3)
| # Written by <NAME>, <EMAIL>, www.anyall.org
# * Originally written Aug. 2005
# * Posted to gist.github.com/16173 on Oct. 2008
# Copyright (c) 2003-2006 Open Source Applications Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re, sys, types
"""
Have all your function & method calls automatically logged, in indented outline
form - unlike the stack snapshots in an interactive debugger, it tracks call
structure & stack depths across time!
It hooks into all function calls that you specify, and logs each time they're
called. I find it especially useful when I don't know what's getting called
when, or need to continuously test for state changes. (by hacking this file)
Originally inspired from the python cookbook:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/198078
Currently you can
- tag functions or individual methods to be autologged
- tag an entire class's methods to be autologged
- tag an entire module's classes and functions to be autologged
TODO:
- allow tagging of ALL modules in the program on startup?
CAVEATS:
- certain classes barf when you logclass() them -- most notably,
SWIG-generated wrappers, and perhaps others.
USAGE: see examples on the bottom of this file.
Viewing tips
============
If your terminal can't keep up, try xterm or putty, they seem to be highest
performance. xterm is available for all platforms through X11...
Also try: (RunChandler > log &); tail -f log
Also, you can "less -R log" afterward and get the colors correct.
If you have long lines, less -RS kills wrapping, enhancing readability. Also
can chop at formatAllArgs().
If you want long lines to be chopped realtime, try piping through less::
RunChandler | less -RS
but then you have to hit 'space' lots to prevent chandler from freezing.
less's 'F' command is supposed to do this correctly but doesn't work for me.
"""
#@@@ should use the standard python logging system?
log = sys.stdout
# Globally incremented across function calls, so tracks stack depth
indent = 0
indStr = ' '
# ANSI escape codes for terminals.
# X11 xterm: always works, all platforms
# cygwin dosbox: run through |cat and then colors work
# linux: works on console & gnome-terminal
# mac: untested
BLACK = "\033[0;30m"
BLUE = "\033[0;34m"
GREEN = "\033[0;32m"
CYAN = "\033[0;36m"
RED = "\033[0;31m"
PURPLE = "\033[0;35m"
BROWN = "\033[0;33m"
GRAY = "\033[0;37m"
BOLDGRAY = "\033[1;30m"
BOLDBLUE = "\033[1;34m"
BOLDGREEN = "\033[1;32m"
BOLDCYAN = "\033[1;36m"
BOLDRED = "\033[1;31m"
BOLDPURPLE = "\033[1;35m"
BOLDYELLOW = "\033[1;33m"
WHITE = "\033[1;37m"
NORMAL = "\033[0m"
def indentlog(message):
global log, indStr, indent
print("%s%s" %(indStr*indent, message), file=log)
log.flush()
def shortstr(obj):
"""
Where to put gritty heuristics to make an object appear in most useful
form. defaults to __str__.
"""
if "wx." in str(obj.__class__) or obj.__class__.__name__.startswith("wx"):
shortclassname = obj.__class__.__name__
##shortclassname = str(obj.__class__).split('.')[-1]
if hasattr(obj, "blockItem") and hasattr(obj.blockItem, "blockName"):
moreInfo = "block:'%s'" %obj.blockItem.blockName
else:
moreInfo = "at %d" %id(obj)
return "<%s %s>" % (shortclassname, moreInfo)
else:
return str(obj)
def formatAllArgs(args, kwds):
"""
makes a nice string representation of all the arguments
"""
allargs = []
for item in args:
allargs.append('%s' % shortstr(item))
for key,item in list(kwds.items()):
allargs.append('%s=%s' % (key,shortstr(item)))
formattedArgs = ', '.join(allargs)
if len(formattedArgs) > 150:
return formattedArgs[:146] + " ..."
return formattedArgs
def logmodules(listOfModules):
for m in listOfModules:
bindmodule(m)
def logmodule(module, logMatch=".*", logNotMatch="nomatchasfdasdf"):
"""
WARNING: this seems to break if you import SWIG wrapper classes
directly into the module namespace ... logclass() creates weirdness when
used on them, for some reason.
@param module: could be either an actual module object, or the string
you can import (which seems to be the same thing as its
__name__). So you can say logmodule(__name__) at the end
of a module definition, to log all of it.
"""
allow = lambda s: re.match(logMatch, s) and not re.match(logNotMatch, s)
if isinstance(module, str):
d = {}
exec("import %s" % module, d)
import sys
module = sys.modules[module]
names = list(module.__dict__.keys())
for name in names:
if not allow(name): continue
value = getattr(module, name)
if isinstance(value, type):
setattr(module, name, logclass(value))
print("autolog.logmodule(): bound %s" %name, file=log)
elif isinstance(value, types.FunctionType):
setattr(module, name, logfunction(value))
print("autolog.logmodule(): bound %s" %name, file=log)
def logfunction(theFunction, displayName=None):
"""a decorator."""
if not displayName: displayName = theFunction.__name__
def _wrapper(*args, **kwds):
global indent
argstr = formatAllArgs(args, kwds)
# Log the entry into the function
indentlog("%s%s%s (%s) " % (BOLDRED,displayName,NORMAL, argstr))
log.flush()
indent += 1
returnval = theFunction(*args,**kwds)
indent -= 1
# Log return
##indentlog("return: %s"% shortstr(returnval)
return returnval
return _wrapper
def logmethod(theMethod, displayName=None):
"""use this for class or instance methods, it formats with the object out front."""
if not displayName: displayName = theMethod.__name__
def _methodWrapper(self, *args, **kwds):
"Use this one for instance or class methods"
global indent
argstr = formatAllArgs(args, kwds)
selfstr = shortstr(self)
#print >> log,"%s%s. %s (%s) " % (indStr*indent,selfstr,methodname,argstr)
indentlog("%s.%s%s%s (%s) " % (selfstr, BOLDRED,theMethod.__name__,NORMAL, argstr))
log.flush()
indent += 1
if theMethod.__name__ == 'OnSize':
indentlog("position, size = %s%s %s%s" %(BOLDBLUE, self.GetPosition(), self.GetSize(), NORMAL))
returnval = theMethod(self, *args,**kwds)
indent -= 1
return returnval
return _methodWrapper
def logclass(cls, methodsAsFunctions=False,
logMatch=".*", logNotMatch="asdfnomatch"):
"""
A class "decorator". But python doesn't support decorator syntax for
classes, so do it manually::
class C(object):
...
C = logclass(C)
@param methodsAsFunctions: set to True if you always want methodname first
in the display. Probably breaks if you're using class/staticmethods?
"""
allow = lambda s: re.match(logMatch, s) and not re.match(logNotMatch, s) and \
s not in ('__str__','__repr__')
namesToCheck = list(cls.__dict__.keys())
for name in namesToCheck:
if not allow(name): continue
# unbound methods show up as mere functions in the values of
# cls.__dict__,so we have to go through getattr
value = getattr(cls, name)
if methodsAsFunctions and callable(value):
setattr(cls, name, logfunction(value))
elif isinstance(value, types.MethodType):
#a normal instance method
if value.__self__ == None:
setattr(cls, name, logmethod(value))
#class & static method are more complex.
#a class method
elif value.__self__ == cls:
w = logmethod(value.__func__,
displayName="%s.%s" %(cls.__name__, value.__name__))
setattr(cls, name, classmethod(w))
else: assert False
#a static method
elif isinstance(value, types.FunctionType):
w = logfunction(value,
displayName="%s.%s" %(cls.__name__, value.__name__))
setattr(cls, name, staticmethod(w))
return cls
class LogMetaClass(type):
"""
Alternative to logclass(), you set this as a class's __metaclass__.
It will not work if the metaclass has already been overridden (e.g.
schema.Item or zope.interface (used in Twisted)
Also, it should fail for class/staticmethods, that hasnt been added here
yet.
"""
def __new__(cls,classname,bases,classdict):
logmatch = re.compile(classdict.get('logMatch','.*'))
lognotmatch = re.compile(classdict.get('logNotMatch', 'nevermatchthisstringasdfasdf'))
for attr,item in list(classdict.items()):
if callable(item) and logmatch.match(attr) and not lognotmatch.match(attr):
classdict['_H_%s'%attr] = item # rebind the method
classdict[attr] = logmethod(item) # replace method by wrapper
return type.__new__(cls,classname,bases,classdict)
# ---------------------------- Tests and examples ----------------------------
if __name__=='__main__':
print(); print("------------------- single function logging ---------------")
@logfunction
def test():
return 42
test()
print(); print("------------------- single method logging -----------------")
class Test1(object):
def __init__(self):
self.a = 10
@logmethod
def add(self,a,b): return a+b
@logmethod
def fac(self,val):
if val == 1:
return 1
else:
return val * self.fac(val-1)
@logfunction
def fac2(self, val):
if val == 1:
return 1
else:
return val * self.fac2(val-1)
t = Test1()
t.add(5,6)
t.fac(4)
print("--- tagged as @logfunction, doesn't understand 'self' is special:")
t.fac2(4)
print(); print("""-------------------- class "decorator" usage ------------------""")
class Test2(object):
#will be ignored
def __init__(self):
self.a = 10
def ignoreThis(self): pass
def add(self,a,b):return a+b
def fac(self,val):
if val == 1:
return 1
else:
return val * self.fac(val-1)
Test2 = logclass(Test2, logMatch='fac|add')
t2 = Test2()
t2.add(5,6)
t2.fac(4)
t2.ignoreThis()
print(); print("-------------------- metaclass usage ------------------")
class Test3(object, metaclass=LogMetaClass):
logNotMatch = 'ignoreThis'
def __init__(self): pass
def fac(self,val):
if val == 1:
return 1
else:
return val * self.fac(val-1)
def ignoreThis(self): pass
t3 = Test3()
t3.fac(4)
t3.ignoreThis()
print(); print("-------------- testing static & classmethods --------------")
class Test4(object):
@classmethod
def cm(cls, a, b):
print(cls)
return a+b
def im(self, a, b):
print(self)
return a+b
@staticmethod
def sm(a,b): return a+b
Test4 = logclass(Test4)
Test4.cm(4,3)
Test4.sm(4,3)
t4 = Test4()
t4.im(4,3)
t4.sm(4,3)
t4.cm(4,3)
#print; print "-------------- static & classmethods: where to put decorators? --------------"
#class Test5(object):
#@classmethod
#@logmethod
#def cm(cls, a, b):
#print cls
#return a+b
#@logmethod
#def im(self, a, b):
#print self
#return a+b
#@staticmethod
#@logfunction
#def sm(a,b): return a+b
#Test5.cm(4,3)
#Test5.sm(4,3)
#t5 = Test5()
#t5.im(4,3)
#t5.sm(4,3)
#t5.cm(4,3) | en | 0.790474 | # Written by <NAME>, <EMAIL>, www.anyall.org # * Originally written Aug. 2005 # * Posted to gist.github.com/16173 on Oct. 2008 # Copyright (c) 2003-2006 Open Source Applications Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Have all your function & method calls automatically logged, in indented outline form - unlike the stack snapshots in an interactive debugger, it tracks call structure & stack depths across time! It hooks into all function calls that you specify, and logs each time they're called. I find it especially useful when I don't know what's getting called when, or need to continuously test for state changes. (by hacking this file) Originally inspired from the python cookbook: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/198078 Currently you can - tag functions or individual methods to be autologged - tag an entire class's methods to be autologged - tag an entire module's classes and functions to be autologged TODO: - allow tagging of ALL modules in the program on startup? CAVEATS: - certain classes barf when you logclass() them -- most notably, SWIG-generated wrappers, and perhaps others. USAGE: see examples on the bottom of this file. Viewing tips ============ If your terminal can't keep up, try xterm or putty, they seem to be highest performance. xterm is available for all platforms through X11... Also try: (RunChandler > log &); tail -f log Also, you can "less -R log" afterward and get the colors correct. If you have long lines, less -RS kills wrapping, enhancing readability. Also can chop at formatAllArgs(). If you want long lines to be chopped realtime, try piping through less:: RunChandler | less -RS but then you have to hit 'space' lots to prevent chandler from freezing. less's 'F' command is supposed to do this correctly but doesn't work for me. #@@@ should use the standard python logging system? # Globally incremented across function calls, so tracks stack depth # ANSI escape codes for terminals. # X11 xterm: always works, all platforms # cygwin dosbox: run through |cat and then colors work # linux: works on console & gnome-terminal # mac: untested Where to put gritty heuristics to make an object appear in most useful form. defaults to __str__. ##shortclassname = str(obj.__class__).split('.')[-1] makes a nice string representation of all the arguments WARNING: this seems to break if you import SWIG wrapper classes directly into the module namespace ... logclass() creates weirdness when used on them, for some reason. @param module: could be either an actual module object, or the string you can import (which seems to be the same thing as its __name__). So you can say logmodule(__name__) at the end of a module definition, to log all of it. a decorator. # Log the entry into the function # Log return ##indentlog("return: %s"% shortstr(returnval) use this for class or instance methods, it formats with the object out front. #print >> log,"%s%s. %s (%s) " % (indStr*indent,selfstr,methodname,argstr) A class "decorator". But python doesn't support decorator syntax for classes, so do it manually:: class C(object): ... C = logclass(C) @param methodsAsFunctions: set to True if you always want methodname first in the display. Probably breaks if you're using class/staticmethods? # unbound methods show up as mere functions in the values of # cls.__dict__,so we have to go through getattr #a normal instance method #class & static method are more complex. #a class method #a static method Alternative to logclass(), you set this as a class's __metaclass__. It will not work if the metaclass has already been overridden (e.g. schema.Item or zope.interface (used in Twisted) Also, it should fail for class/staticmethods, that hasnt been added here yet. # rebind the method # replace method by wrapper # ---------------------------- Tests and examples ---------------------------- -------------------- class "decorator" usage ------------------ #will be ignored #print; print "-------------- static & classmethods: where to put decorators? --------------" #class Test5(object): #@classmethod #@logmethod #def cm(cls, a, b): #print cls #return a+b #@logmethod #def im(self, a, b): #print self #return a+b #@staticmethod #@logfunction #def sm(a,b): return a+b #Test5.cm(4,3) #Test5.sm(4,3) #t5 = Test5() #t5.im(4,3) #t5.sm(4,3) #t5.cm(4,3) | 2.042945 | 2 |
book/models.py | arkarhtethan/django-library-management-system | 1 | 6618438 | <gh_stars>1-10
from django.db import models
from django.utils.timezone import datetime
def image_upload_path(instance, filename):
_, extension = filename.split('.')
current_time = str(datetime.now())
filename = f'{current_time}.{extension}'
return filename
class CreatedUpdatedMixin(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Shelf(CreatedUpdatedMixin):
"""Model definition for Shelf."""
name = models.CharField(max_length=100, verbose_name="Shelf name")
active = models.BooleanField(default=True)
class Meta:
"""Meta definition for Category."""
ordering = ('-created_at', '-updated_at')
verbose_name = 'Shelf'
verbose_name_plural = 'Shelfs'
def __str__(self):
return self.name
class Category(CreatedUpdatedMixin):
"""Model definition for Category."""
name = models.CharField(max_length=50)
slug = models.SlugField(null=True, blank=True)
class Meta:
"""Meta definition for Category."""
ordering = ('-created_at', '-updated_at')
verbose_name = 'Category'
verbose_name_plural = 'Categories'
def __str__(self):
"""Unicode representation of Category."""
return self.name
class Book(CreatedUpdatedMixin):
"""Model definition for Book."""
name = models.CharField(max_length=50)
author = models.ManyToManyField(to="Author", related_name='books')
category = models.ForeignKey(
"Category", on_delete=models.SET_NULL, null=True, blank=True, related_name='books')
amount = models.PositiveIntegerField()
price = models.DecimalField(
max_digits=20, decimal_places=2, verbose_name="Unit Price")
slug = models.SlugField(null=True, blank=True)
available = models.BooleanField(default=True)
description = models.TextField()
image = models.ImageField(
upload_to=image_upload_path, null=True, blank=True)
shelf = models.ForeignKey(
to="Shelf", on_delete=models.SET_NULL, null=True, blank=True, related_name='books')
class Meta:
"""Meta definition for Book."""
ordering = ('-created_at', '-updated_at')
verbose_name = 'Book'
verbose_name_plural = 'Books'
def __str__(self):
"""Unicode representation of Book."""
return self.name
class Author(CreatedUpdatedMixin):
"""Model definition for Author."""
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
slug = models.SlugField(null=True, blank=True)
born = models.DateTimeField()
died = models.DateTimeField(null=True, blank=True)
image = models.ImageField(
upload_to=image_upload_path, null=True, blank=True)
class Meta:
"""Meta definition for Author."""
ordering = ('-created_at', '-updated_at')
verbose_name = 'Author'
verbose_name_plural = 'Authors'
def __str__(self):
"""Unicode representation of Author."""
return f'{self.first_name} {self.last_name}'
| from django.db import models
from django.utils.timezone import datetime
def image_upload_path(instance, filename):
_, extension = filename.split('.')
current_time = str(datetime.now())
filename = f'{current_time}.{extension}'
return filename
class CreatedUpdatedMixin(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Shelf(CreatedUpdatedMixin):
"""Model definition for Shelf."""
name = models.CharField(max_length=100, verbose_name="Shelf name")
active = models.BooleanField(default=True)
class Meta:
"""Meta definition for Category."""
ordering = ('-created_at', '-updated_at')
verbose_name = 'Shelf'
verbose_name_plural = 'Shelfs'
def __str__(self):
return self.name
class Category(CreatedUpdatedMixin):
"""Model definition for Category."""
name = models.CharField(max_length=50)
slug = models.SlugField(null=True, blank=True)
class Meta:
"""Meta definition for Category."""
ordering = ('-created_at', '-updated_at')
verbose_name = 'Category'
verbose_name_plural = 'Categories'
def __str__(self):
"""Unicode representation of Category."""
return self.name
class Book(CreatedUpdatedMixin):
"""Model definition for Book."""
name = models.CharField(max_length=50)
author = models.ManyToManyField(to="Author", related_name='books')
category = models.ForeignKey(
"Category", on_delete=models.SET_NULL, null=True, blank=True, related_name='books')
amount = models.PositiveIntegerField()
price = models.DecimalField(
max_digits=20, decimal_places=2, verbose_name="Unit Price")
slug = models.SlugField(null=True, blank=True)
available = models.BooleanField(default=True)
description = models.TextField()
image = models.ImageField(
upload_to=image_upload_path, null=True, blank=True)
shelf = models.ForeignKey(
to="Shelf", on_delete=models.SET_NULL, null=True, blank=True, related_name='books')
class Meta:
"""Meta definition for Book."""
ordering = ('-created_at', '-updated_at')
verbose_name = 'Book'
verbose_name_plural = 'Books'
def __str__(self):
"""Unicode representation of Book."""
return self.name
class Author(CreatedUpdatedMixin):
"""Model definition for Author."""
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
slug = models.SlugField(null=True, blank=True)
born = models.DateTimeField()
died = models.DateTimeField(null=True, blank=True)
image = models.ImageField(
upload_to=image_upload_path, null=True, blank=True)
class Meta:
"""Meta definition for Author."""
ordering = ('-created_at', '-updated_at')
verbose_name = 'Author'
verbose_name_plural = 'Authors'
def __str__(self):
"""Unicode representation of Author."""
return f'{self.first_name} {self.last_name}' | en | 0.813205 | Model definition for Shelf. Meta definition for Category. Model definition for Category. Meta definition for Category. Unicode representation of Category. Model definition for Book. Meta definition for Book. Unicode representation of Book. Model definition for Author. Meta definition for Author. Unicode representation of Author. | 2.345704 | 2 |
cvbapp/migrations/0010_auto_20170718_1234.py | OnlineS3/3.1.-Collaborative-vision-building | 0 | 6618439 | <reponame>OnlineS3/3.1.-Collaborative-vision-building
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-18 11:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cvbapp', '0009_scheduledmeetings'),
]
operations = [
migrations.RenameModel(
old_name='ScheduledMeetings',
new_name='ScheduledMeeting',
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-18 11:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cvbapp', '0009_scheduledmeetings'),
]
operations = [
migrations.RenameModel(
old_name='ScheduledMeetings',
new_name='ScheduledMeeting',
),
] | en | 0.771851 | # -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-07-18 11:34 | 1.527779 | 2 |
tools/nd2json.py | jccardonar/trigger | 380 | 6618440 | #!/usr/bin/env python
# nd2json.py - Converts netdevices.xml to netdevices.json and reports
# performance stuff
from xml.etree.cElementTree import ElementTree, parse
try:
import simplejson as json
except ImportError:
import json
import sys
import time
if len(sys.argv) < 2:
sys.exit("usage: %s </path/to/netdevices.xml>" % sys.argv[0])
else:
ndfile = sys.argv[1]
print # Parse XML
print 'Parsing XML', ndfile
start = time.time()
nodes = parse(ndfile).findall('device')
print 'Done:', time.time() - start, 'seconds.'
devices = []
print # Convert to Python structure
print 'Converting to Python structure.'
start = time.time()
for node in nodes:
dev = {}
for e in node.getchildren():
dev[e.tag] = e.text
devices.append(dev)
print 'Done:', time.time() - start, 'seconds.'
print # Convert to JSON
'''
print 'Dumping to JSON...'
start = time.time()
jsondata = json.dumps(devices)
print 'Done:', time.time() - start, 'seconds.'
'''
print # Writing to file
outfile = 'netdevices.json'
with open(outfile, 'wb') as f:
print 'Writing to disk...'
start = time.time()
json.dump(devices, f, ensure_ascii=False, check_circular=False, indent=4)
#json.dump(devices, f, ensure_ascii=False, check_circular=False)
#f.write(jsondata)
print 'Done:', time.time() - start, 'seconds.'
#print 'Wrote {0} bytes to {1}'.format(len(jsondata), outfile)
print # Reading from file
with open(outfile, 'rb') as g:
print 'Reading from disk...'
start = time.time()
jsondata = json.load(g)
print 'Done:', time.time() - start, 'seconds.'
| #!/usr/bin/env python
# nd2json.py - Converts netdevices.xml to netdevices.json and reports
# performance stuff
from xml.etree.cElementTree import ElementTree, parse
try:
import simplejson as json
except ImportError:
import json
import sys
import time
if len(sys.argv) < 2:
sys.exit("usage: %s </path/to/netdevices.xml>" % sys.argv[0])
else:
ndfile = sys.argv[1]
print # Parse XML
print 'Parsing XML', ndfile
start = time.time()
nodes = parse(ndfile).findall('device')
print 'Done:', time.time() - start, 'seconds.'
devices = []
print # Convert to Python structure
print 'Converting to Python structure.'
start = time.time()
for node in nodes:
dev = {}
for e in node.getchildren():
dev[e.tag] = e.text
devices.append(dev)
print 'Done:', time.time() - start, 'seconds.'
print # Convert to JSON
'''
print 'Dumping to JSON...'
start = time.time()
jsondata = json.dumps(devices)
print 'Done:', time.time() - start, 'seconds.'
'''
print # Writing to file
outfile = 'netdevices.json'
with open(outfile, 'wb') as f:
print 'Writing to disk...'
start = time.time()
json.dump(devices, f, ensure_ascii=False, check_circular=False, indent=4)
#json.dump(devices, f, ensure_ascii=False, check_circular=False)
#f.write(jsondata)
print 'Done:', time.time() - start, 'seconds.'
#print 'Wrote {0} bytes to {1}'.format(len(jsondata), outfile)
print # Reading from file
with open(outfile, 'rb') as g:
print 'Reading from disk...'
start = time.time()
jsondata = json.load(g)
print 'Done:', time.time() - start, 'seconds.'
| en | 0.468527 | #!/usr/bin/env python # nd2json.py - Converts netdevices.xml to netdevices.json and reports # performance stuff # Parse XML # Convert to Python structure # Convert to JSON print 'Dumping to JSON...' start = time.time() jsondata = json.dumps(devices) print 'Done:', time.time() - start, 'seconds.' # Writing to file #json.dump(devices, f, ensure_ascii=False, check_circular=False) #f.write(jsondata) #print 'Wrote {0} bytes to {1}'.format(len(jsondata), outfile) # Reading from file | 2.83954 | 3 |
experiments/transfer/yolo_air.py | e2crawfo/auto_yolo | 54 | 6618441 | <gh_stars>10-100
import argparse
from auto_yolo import envs
from dps.tf.updater import DummyUpdater
readme = "yolo_air transfer experiment"
distributions = [
dict(min_chars=1, max_chars=5),
dict(min_chars=6, max_chars=10),
dict(min_chars=11, max_chars=15),
]
durations = dict(
long=dict(
max_hosts=1, ppn=12, cpp=2, gpu_set="0,1,2,3", wall_time="48hours",
project="rpp-bengioy", cleanup_time="20mins",
slack_time="5mins", n_repeats=8, step_time_limit="48hours"),
build=dict(
max_hosts=1, ppn=1, cpp=2, gpu_set="0", wall_time="3hours",
project="rpp-bengioy", cleanup_time="2mins",
slack_time="2mins", n_repeats=1, step_time_limit="3hours", n_param_settings=1,
config=dict(
get_updater=DummyUpdater,
render_hook=None,
load_path=None,
do_train=False,
curriculum=[
dict(min_chars=1, max_chars=5, postprocessing="random"),
dict(min_chars=6, max_chars=10, postprocessing="random"),
dict(min_chars=11, max_chars=15, postprocessing="random")] + [
dict(min_chars=n, max_chars=n, n_train=32) for n in range(1, 21)])
),
short=dict(
max_hosts=1, ppn=2, cpp=2, gpu_set="0", wall_time="20mins",
project="rpp-bengioy", cleanup_time="1mins",
slack_time="1mins", n_repeats=1, n_param_settings=4),
small_oak=dict(
max_hosts=1, ppn=4, cpp=2, gpu_set="0", wall_time="30mins",
project="rpp-bengioy", cleanup_time="1mins",
slack_time="1mins", n_repeats=2, kind="parallel", host_pool=":"),
build_oak=dict(
max_hosts=1, ppn=1, cpp=2, gpu_set="0", wall_time="1year",
project="rpp-bengioy", cleanup_time="1mins",
slack_time="1mins", n_repeats=1, kind="parallel", host_pool=":",
config=dict(do_train=False)),
oak=dict(
max_hosts=1, ppn=4, cpp=2, gpu_set="0", wall_time="1year",
project="rpp-bengioy", cleanup_time="1mins",
slack_time="1mins", n_repeats=6, kind="parallel", host_pool=":",
step_time_limit="1year"),
)
config = dict(max_steps=int(2e5), min_chars=11, max_chars=15)
config["background_cfg:mode"] = "learn_solid"
parser = argparse.ArgumentParser()
parser.add_argument("--no-lookback", action="store_true")
args, _ = parser.parse_known_args()
if args.no_lookback:
config["n_lookback"] = 0
envs.run_experiment(
"yolo_air_transfer", config, readme,
alg="yolo_air_transfer", task="scatter", durations=durations,
distributions=distributions
)
| import argparse
from auto_yolo import envs
from dps.tf.updater import DummyUpdater
readme = "yolo_air transfer experiment"
distributions = [
dict(min_chars=1, max_chars=5),
dict(min_chars=6, max_chars=10),
dict(min_chars=11, max_chars=15),
]
durations = dict(
long=dict(
max_hosts=1, ppn=12, cpp=2, gpu_set="0,1,2,3", wall_time="48hours",
project="rpp-bengioy", cleanup_time="20mins",
slack_time="5mins", n_repeats=8, step_time_limit="48hours"),
build=dict(
max_hosts=1, ppn=1, cpp=2, gpu_set="0", wall_time="3hours",
project="rpp-bengioy", cleanup_time="2mins",
slack_time="2mins", n_repeats=1, step_time_limit="3hours", n_param_settings=1,
config=dict(
get_updater=DummyUpdater,
render_hook=None,
load_path=None,
do_train=False,
curriculum=[
dict(min_chars=1, max_chars=5, postprocessing="random"),
dict(min_chars=6, max_chars=10, postprocessing="random"),
dict(min_chars=11, max_chars=15, postprocessing="random")] + [
dict(min_chars=n, max_chars=n, n_train=32) for n in range(1, 21)])
),
short=dict(
max_hosts=1, ppn=2, cpp=2, gpu_set="0", wall_time="20mins",
project="rpp-bengioy", cleanup_time="1mins",
slack_time="1mins", n_repeats=1, n_param_settings=4),
small_oak=dict(
max_hosts=1, ppn=4, cpp=2, gpu_set="0", wall_time="30mins",
project="rpp-bengioy", cleanup_time="1mins",
slack_time="1mins", n_repeats=2, kind="parallel", host_pool=":"),
build_oak=dict(
max_hosts=1, ppn=1, cpp=2, gpu_set="0", wall_time="1year",
project="rpp-bengioy", cleanup_time="1mins",
slack_time="1mins", n_repeats=1, kind="parallel", host_pool=":",
config=dict(do_train=False)),
oak=dict(
max_hosts=1, ppn=4, cpp=2, gpu_set="0", wall_time="1year",
project="rpp-bengioy", cleanup_time="1mins",
slack_time="1mins", n_repeats=6, kind="parallel", host_pool=":",
step_time_limit="1year"),
)
config = dict(max_steps=int(2e5), min_chars=11, max_chars=15)
config["background_cfg:mode"] = "learn_solid"
parser = argparse.ArgumentParser()
parser.add_argument("--no-lookback", action="store_true")
args, _ = parser.parse_known_args()
if args.no_lookback:
config["n_lookback"] = 0
envs.run_experiment(
"yolo_air_transfer", config, readme,
alg="yolo_air_transfer", task="scatter", durations=durations,
distributions=distributions
) | none | 1 | 1.90892 | 2 | |
OSINT-Reconnaissance/inquisitor/inquisitor/__init__.py | FOGSEC/TID3xploits | 5 | 6618442 | import inquisitor.assets.block
import inquisitor.assets.email
import inquisitor.assets.host
import inquisitor.assets.linkedin
import inquisitor.assets.registrant
import sys
import unqlite
ASSET_MODULES = [
inquisitor.assets.registrant,
inquisitor.assets.block,
inquisitor.assets.host,
inquisitor.assets.email,
inquisitor.assets.linkedin,
]
class IntelligenceRepository:
def __init__(self, path):
self.database = unqlite.UnQLite(path)
self.repositories = dict()
for asset_module in ASSET_MODULES:
identifier = asset_module.REPOSITORY
repository = self.database.collection(identifier)
repository.create()
self.repositories[identifier] = repository
def get_asset_data(self, asset):
module = sys.modules[asset.__class__.__module__]
repository = self.repositories[module.REPOSITORY]
identifier = module.OBJECT_ID
query = getattr(asset, identifier)
results = repository.filter(lambda a: a['data'][identifier] == query)
return results[0] if results else None
def get_asset_object(self, asset, create=False, store=False):
result = self.get_asset_data(asset)
asset_type = asset.__class__
if result:
__id = result['__id']
data = result['data']
obj = asset_type.__new__(asset_type)
for name, value in data.iteritems():
setattr(obj, name, value)
obj.transforms = dict(obj.transforms)
return (__id, obj)
elif create:
asset_module = sys.modules[asset_type.__module__]
asset = asset_type(getattr(asset, asset_module.OBJECT_ID))
result = (None, asset)
if store:
result[0] = self.put_asset_object(asset)
return result
return None
def get_asset_string(
self,
asset_type,
identifier,
create=False,
store=False
):
query = asset_type.__new__(asset_type)
module = sys.modules[asset_type.__module__]
setattr(query, module.OBJECT_ID, identifier)
return self.get_asset_object(query, create=create, store=store)
def get_assets(self, include, limit=None):
results = set()
for asset_module in ASSET_MODULES:
asset_class = asset_module.ASSET_CLASS
repository = self.repositories[asset_module.REPOSITORY]
index = 0
for data in repository.all():
data = data['data']
obj = asset_class.__new__(asset_class)
for name, value in data.iteritems():
setattr(obj, name, value)
obj.transforms = dict(obj.transforms)
if include(obj, data):
results.add(obj)
index += 1
if limit and index >= limit:
break
return results
def put_asset_object(self, asset, overwrite=False):
result = None
module = sys.modules[asset.__class__.__module__]
repository = self.repositories[module.REPOSITORY]
exists = self.get_asset_data(asset)
if not exists:
result = repository.store({'data': asset.__dict__})
elif overwrite:
repository.update(exists['__id'], {'data': asset.__dict__})
result = exists['__id']
if not exists or overwrite:
for related in asset.related(self):
self.put_asset_object(related, overwrite=False)
return result
def put_asset_string(
self,
asset_type,
identifier,
owned=None,
overwrite=False
):
asset = asset_type(identifier, owned=owned)
self.put_asset_object(asset, overwrite=overwrite)
| import inquisitor.assets.block
import inquisitor.assets.email
import inquisitor.assets.host
import inquisitor.assets.linkedin
import inquisitor.assets.registrant
import sys
import unqlite
ASSET_MODULES = [
inquisitor.assets.registrant,
inquisitor.assets.block,
inquisitor.assets.host,
inquisitor.assets.email,
inquisitor.assets.linkedin,
]
class IntelligenceRepository:
def __init__(self, path):
self.database = unqlite.UnQLite(path)
self.repositories = dict()
for asset_module in ASSET_MODULES:
identifier = asset_module.REPOSITORY
repository = self.database.collection(identifier)
repository.create()
self.repositories[identifier] = repository
def get_asset_data(self, asset):
module = sys.modules[asset.__class__.__module__]
repository = self.repositories[module.REPOSITORY]
identifier = module.OBJECT_ID
query = getattr(asset, identifier)
results = repository.filter(lambda a: a['data'][identifier] == query)
return results[0] if results else None
def get_asset_object(self, asset, create=False, store=False):
result = self.get_asset_data(asset)
asset_type = asset.__class__
if result:
__id = result['__id']
data = result['data']
obj = asset_type.__new__(asset_type)
for name, value in data.iteritems():
setattr(obj, name, value)
obj.transforms = dict(obj.transforms)
return (__id, obj)
elif create:
asset_module = sys.modules[asset_type.__module__]
asset = asset_type(getattr(asset, asset_module.OBJECT_ID))
result = (None, asset)
if store:
result[0] = self.put_asset_object(asset)
return result
return None
def get_asset_string(
self,
asset_type,
identifier,
create=False,
store=False
):
query = asset_type.__new__(asset_type)
module = sys.modules[asset_type.__module__]
setattr(query, module.OBJECT_ID, identifier)
return self.get_asset_object(query, create=create, store=store)
def get_assets(self, include, limit=None):
results = set()
for asset_module in ASSET_MODULES:
asset_class = asset_module.ASSET_CLASS
repository = self.repositories[asset_module.REPOSITORY]
index = 0
for data in repository.all():
data = data['data']
obj = asset_class.__new__(asset_class)
for name, value in data.iteritems():
setattr(obj, name, value)
obj.transforms = dict(obj.transforms)
if include(obj, data):
results.add(obj)
index += 1
if limit and index >= limit:
break
return results
def put_asset_object(self, asset, overwrite=False):
result = None
module = sys.modules[asset.__class__.__module__]
repository = self.repositories[module.REPOSITORY]
exists = self.get_asset_data(asset)
if not exists:
result = repository.store({'data': asset.__dict__})
elif overwrite:
repository.update(exists['__id'], {'data': asset.__dict__})
result = exists['__id']
if not exists or overwrite:
for related in asset.related(self):
self.put_asset_object(related, overwrite=False)
return result
def put_asset_string(
self,
asset_type,
identifier,
owned=None,
overwrite=False
):
asset = asset_type(identifier, owned=owned)
self.put_asset_object(asset, overwrite=overwrite)
| none | 1 | 2.17342 | 2 | |
Dirichlet_Process_and_Gibbs_Sampling.py | MostafaBouziane/projet_recherche_mostafa | 0 | 6618443 | <filename>Dirichlet_Process_and_Gibbs_Sampling.py
import numpy as np
import math
from random import random
from math import exp
#------------------------------- fonctions nécessaires pour l'échantillonage de Gibbs et le calcul du logarithme de vraisemblance---------------#
def tirage_multinomiale(x):
a=random()
b=0
for i in range (len(x)):
b=b+x[i]
if a<b :
break
return i
def LikeHood_Log(p,data):
logv=0
for i in range (len(data)):
m=p[2*i]
s=p[2*i+1]
logv=logv- 0.5 * math.log(s) - (data[i] - m)**2/(2*s)
return logv
def maj_of_Params(donne):
"""
Input:
- donne : N'xD matrix, with D the dataset size, and N' the number
of observations of the current cluster
Output:
- p is a list of D lists, where D is the ambiant dimension, and
each of the D element is a two components list composed of
the mean of the dataset and its standard deviation.
$[[\mu1, \sigma1], [\mu2, \sigma2], \dots]$
"""
p=[]
for a in range (donne.shape[1]):
m = np.mean(donne[:,a])
v = np.std(donne[:,a])
if v==0:
v=1
p = p + [m, v]
return p
#------------------------------------L'algorithme Principale: échantillonage de Gibbs et processus de Dirichlet------------------------------#
def Dirichlet_vs_Gibbs(data,alpha,iterations):
"""
data should be a NxD matrix, where N is the number of observations and D is the dataset dimension.
"""
for e in range (iterations):
clusters=len(data[:,0])*[0]
nbrp=[]
to=[]
eta=[]
ab=max(clusters)
if ab==0:
ab=1
for b in range (ab+1):
nbrp.append(clusters.count(b))
MaxLikeHood=-float("inf")
for c in range (ab):
for d in range (len(clusters)):
if clusters[d]==c:
to.append(data[d])
to=np.array(to)
eta.append(maj_of_Params(to))
for f in range (len(np.array(data)[:,0])):
ge=[]
be=[]
proba=[]
nbrp=[]
ab=max(clusters)
if ab==0:
nbrp=[150]
else :
for b in range (ab+1):
nbrp.append(clusters.count(b))
nbrp[clusters[f]]=nbrp[clusters[f]]-1
if nbrp[clusters[f]]==0:
del eta[f]
del clusters[f]
for g in range (len(clusters)-f):
clusters[g+f]=clusters[g+f]-1
else:
for k in range (len(clusters)):
if clusters[k]==clusters[f]:
be.append(data[clusters[k]])
be=np.array(be)
eta[clusters[f]] = maj_of_Params(be)
for l in range (len(nbrp)) :
proba.append(math.log10(nbrp[l]))
proba.append(math.log10(alpha))
for m in range (len(nbrp)):
proba[m]=proba[m]+LikeHood_Log(eta[m],data[m])
for n in range (len(proba)):
proba[n]=exp(proba[n]-max(proba))
probas=np.array(proba)
probas=probas/sum(probas)
clusters[f]=tirage_multinomiale(probas)
nbrp[0]=clusters.count(0)
ab=max(clusters)
for b in range (ab,ab+1):
nbrp.append(clusters.count(b))
if max(clusters)<clusters[f]:
nbrp[clusters[f]]=0
else:
nbrp[clusters[f]]=nbrp[clusters[f]]+1
for q in range(len(clusters)):
if clusters[q]==clusters[f]:
ge.append(data[clusters[q]])
ge=np.array(ge)
eta.append(maj_of_Params(ge))
if iterations>1:
v=0
for r in range(len(np.array(data)[:,0])):
v=v+LikeHood_Log(eta[clusters[r]],data[r])
if v>MaxLikeHood:
MaxLikeHood=v
LikeHoodClusters=clusters
else:
LikeHoodClusters =clusters
return LikeHoodClusters
| <filename>Dirichlet_Process_and_Gibbs_Sampling.py
import numpy as np
import math
from random import random
from math import exp
#------------------------------- fonctions nécessaires pour l'échantillonage de Gibbs et le calcul du logarithme de vraisemblance---------------#
def tirage_multinomiale(x):
a=random()
b=0
for i in range (len(x)):
b=b+x[i]
if a<b :
break
return i
def LikeHood_Log(p,data):
logv=0
for i in range (len(data)):
m=p[2*i]
s=p[2*i+1]
logv=logv- 0.5 * math.log(s) - (data[i] - m)**2/(2*s)
return logv
def maj_of_Params(donne):
"""
Input:
- donne : N'xD matrix, with D the dataset size, and N' the number
of observations of the current cluster
Output:
- p is a list of D lists, where D is the ambiant dimension, and
each of the D element is a two components list composed of
the mean of the dataset and its standard deviation.
$[[\mu1, \sigma1], [\mu2, \sigma2], \dots]$
"""
p=[]
for a in range (donne.shape[1]):
m = np.mean(donne[:,a])
v = np.std(donne[:,a])
if v==0:
v=1
p = p + [m, v]
return p
#------------------------------------L'algorithme Principale: échantillonage de Gibbs et processus de Dirichlet------------------------------#
def Dirichlet_vs_Gibbs(data,alpha,iterations):
"""
data should be a NxD matrix, where N is the number of observations and D is the dataset dimension.
"""
for e in range (iterations):
clusters=len(data[:,0])*[0]
nbrp=[]
to=[]
eta=[]
ab=max(clusters)
if ab==0:
ab=1
for b in range (ab+1):
nbrp.append(clusters.count(b))
MaxLikeHood=-float("inf")
for c in range (ab):
for d in range (len(clusters)):
if clusters[d]==c:
to.append(data[d])
to=np.array(to)
eta.append(maj_of_Params(to))
for f in range (len(np.array(data)[:,0])):
ge=[]
be=[]
proba=[]
nbrp=[]
ab=max(clusters)
if ab==0:
nbrp=[150]
else :
for b in range (ab+1):
nbrp.append(clusters.count(b))
nbrp[clusters[f]]=nbrp[clusters[f]]-1
if nbrp[clusters[f]]==0:
del eta[f]
del clusters[f]
for g in range (len(clusters)-f):
clusters[g+f]=clusters[g+f]-1
else:
for k in range (len(clusters)):
if clusters[k]==clusters[f]:
be.append(data[clusters[k]])
be=np.array(be)
eta[clusters[f]] = maj_of_Params(be)
for l in range (len(nbrp)) :
proba.append(math.log10(nbrp[l]))
proba.append(math.log10(alpha))
for m in range (len(nbrp)):
proba[m]=proba[m]+LikeHood_Log(eta[m],data[m])
for n in range (len(proba)):
proba[n]=exp(proba[n]-max(proba))
probas=np.array(proba)
probas=probas/sum(probas)
clusters[f]=tirage_multinomiale(probas)
nbrp[0]=clusters.count(0)
ab=max(clusters)
for b in range (ab,ab+1):
nbrp.append(clusters.count(b))
if max(clusters)<clusters[f]:
nbrp[clusters[f]]=0
else:
nbrp[clusters[f]]=nbrp[clusters[f]]+1
for q in range(len(clusters)):
if clusters[q]==clusters[f]:
ge.append(data[clusters[q]])
ge=np.array(ge)
eta.append(maj_of_Params(ge))
if iterations>1:
v=0
for r in range(len(np.array(data)[:,0])):
v=v+LikeHood_Log(eta[clusters[r]],data[r])
if v>MaxLikeHood:
MaxLikeHood=v
LikeHoodClusters=clusters
else:
LikeHoodClusters =clusters
return LikeHoodClusters
| en | 0.460308 | #------------------------------- fonctions nécessaires pour l'échantillonage de Gibbs et le calcul du logarithme de vraisemblance---------------# Input: - donne : N'xD matrix, with D the dataset size, and N' the number of observations of the current cluster Output: - p is a list of D lists, where D is the ambiant dimension, and each of the D element is a two components list composed of the mean of the dataset and its standard deviation. $[[\mu1, \sigma1], [\mu2, \sigma2], \dots]$ #------------------------------------L'algorithme Principale: échantillonage de Gibbs et processus de Dirichlet------------------------------# data should be a NxD matrix, where N is the number of observations and D is the dataset dimension. | 2.920334 | 3 |
packs/bitbucket/actions/delete_issues.py | userlocalhost2000/st2contrib | 164 | 6618444 | <reponame>userlocalhost2000/st2contrib
from lib.action import BitBucketAction
class DeleteIssuesAction(BitBucketAction):
def run(self, repo, ids):
"""
Delete an issue
"""
bb = self._get_client(repo=repo)
for i in ids:
success, result = bb.issue.delete(issue_id=i)
return result
| from lib.action import BitBucketAction
class DeleteIssuesAction(BitBucketAction):
def run(self, repo, ids):
"""
Delete an issue
"""
bb = self._get_client(repo=repo)
for i in ids:
success, result = bb.issue.delete(issue_id=i)
return result | en | 0.989759 | Delete an issue | 2.384696 | 2 |
personal_finance/personal_finance/api/mf_api/mf_api.py | fderyckel/personal_finance | 4 | 6618445 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import requests
import frappe
def get_all_mf_codes():
link = get_mf_link() + "mf"
results = requests.get(link)
return results.json()
def get_mf_quote(scheme_code):
link = get_mf_link() + "mf/" + str(scheme_code)
results = requests.get(link)
results = results.json()
if results.get("status", "FAIL") == "SUCCESS":
results = frappe._dict(results)
return results
else:
print(f"No Results found for {scheme_code}")
frappe.msgprint(f"No Results found for {scheme_code}")
def get_mf_link():
return "https://api.mfapi.in/"
| # -*- coding: utf-8 -*-
# Copyright (c) 2020, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import requests
import frappe
def get_all_mf_codes():
link = get_mf_link() + "mf"
results = requests.get(link)
return results.json()
def get_mf_quote(scheme_code):
link = get_mf_link() + "mf/" + str(scheme_code)
results = requests.get(link)
results = results.json()
if results.get("status", "FAIL") == "SUCCESS":
results = frappe._dict(results)
return results
else:
print(f"No Results found for {scheme_code}")
frappe.msgprint(f"No Results found for {scheme_code}")
def get_mf_link():
return "https://api.mfapi.in/"
| en | 0.773797 | # -*- coding: utf-8 -*- # Copyright (c) 2020, <NAME> and contributors # For license information, please see license.txt | 2.421474 | 2 |
ausdex/__init__.py | rbturnbull/ausdex | 0 | 6618446 | from .inflation import calc_inflation, Location
| from .inflation import calc_inflation, Location
| none | 1 | 1.14547 | 1 | |
enrich.py | i40-Tools/OntEnrich | 1 | 6618447 | """Module for enlarging existing ontology based on knowledge from DBpedia.
"""
from landscape import Ontology, DBpedia
import json
import sys
def main(options_path):
"""Main function.
Describes abstract algorithm of the ontology enriching.
"""
options = json.load(open(options_path))
ont = Ontology(options["input_file"])
ont_query = """
PREFIX sto: <https://w3id.org/i40/sto#>
SELECT ?sub ?dbPediaResource WHERE {
?sub sto:hasDBpediaResource ?dbPediaResource .
}
"""
print('...starting enrichment process')
trip_num = 0
subj_num = 0
for row in ont.query(ont_query):
subject = row[0]
resource = get_resource(row[1])
whitelist = ', '.join(options["whitelist"])
dbpedia_query = 'SELECT ?pred ?obj WHERE {' + \
'<http://dbpedia.org/resource/' + resource + '> ?pred ?obj . '
if whitelist:
dbpedia_query += 'FILTER(?pred IN (' + whitelist + '))'
dbpedia_query += '}'
dbpedia_result = DBpedia().query(dbpedia_query)
print('/', sep=' ', end='', flush=True)
ont = set_blacklist(ont, options["blacklist"])
logs = ont.enrich(subject, dbpedia_result)
trip_num += len(logs["trip"])
subj_num += 1
ont = set_prefixes(ont, options["prefixes"])
filename = get_filename(options["input_file"])
ont.export(filename + '(enriched).ttl')
print('') # for moving to the next line in the command line
print('...saving file as "' + filename + '(enriched).ttl"')
print('Enriched ' + str(subj_num) + ' subjects with ' + str(trip_num) + ' triples.')
def get_filename(path):
"""Getter of file name from path.
"""
full_file_name = path.split('/')[-1]
file_name = full_file_name.split('.')[0]
return file_name
def get_resource(row):
"""Getter of resource from STO query result row.
"""
resource_split_list = row.split('/')
resource = '/'.join(resource_split_list[4:])
return resource
def set_blacklist(ont, blacklist):
"""Setter of ontology black list.
Here all predicates that should be excluded while fetching data from DPpedia are specified.
"""
for url in blacklist:
ont.blacklist.add(url)
return ont
def set_prefixes(ont, prefixes):
"""Setter of ontology prefixes.
Here all custom prefixes are specified.
"""
for prefix in prefixes:
ont.set_prefix(prefix["prfx"], prefix["uri"])
return ont
if __name__ == "__main__":
if len(sys.argv) == 2:
main(sys.argv[1])
else:
print('ERROR: wrong number of arguments.')
| """Module for enlarging existing ontology based on knowledge from DBpedia.
"""
from landscape import Ontology, DBpedia
import json
import sys
def main(options_path):
"""Main function.
Describes abstract algorithm of the ontology enriching.
"""
options = json.load(open(options_path))
ont = Ontology(options["input_file"])
ont_query = """
PREFIX sto: <https://w3id.org/i40/sto#>
SELECT ?sub ?dbPediaResource WHERE {
?sub sto:hasDBpediaResource ?dbPediaResource .
}
"""
print('...starting enrichment process')
trip_num = 0
subj_num = 0
for row in ont.query(ont_query):
subject = row[0]
resource = get_resource(row[1])
whitelist = ', '.join(options["whitelist"])
dbpedia_query = 'SELECT ?pred ?obj WHERE {' + \
'<http://dbpedia.org/resource/' + resource + '> ?pred ?obj . '
if whitelist:
dbpedia_query += 'FILTER(?pred IN (' + whitelist + '))'
dbpedia_query += '}'
dbpedia_result = DBpedia().query(dbpedia_query)
print('/', sep=' ', end='', flush=True)
ont = set_blacklist(ont, options["blacklist"])
logs = ont.enrich(subject, dbpedia_result)
trip_num += len(logs["trip"])
subj_num += 1
ont = set_prefixes(ont, options["prefixes"])
filename = get_filename(options["input_file"])
ont.export(filename + '(enriched).ttl')
print('') # for moving to the next line in the command line
print('...saving file as "' + filename + '(enriched).ttl"')
print('Enriched ' + str(subj_num) + ' subjects with ' + str(trip_num) + ' triples.')
def get_filename(path):
"""Getter of file name from path.
"""
full_file_name = path.split('/')[-1]
file_name = full_file_name.split('.')[0]
return file_name
def get_resource(row):
"""Getter of resource from STO query result row.
"""
resource_split_list = row.split('/')
resource = '/'.join(resource_split_list[4:])
return resource
def set_blacklist(ont, blacklist):
"""Setter of ontology black list.
Here all predicates that should be excluded while fetching data from DPpedia are specified.
"""
for url in blacklist:
ont.blacklist.add(url)
return ont
def set_prefixes(ont, prefixes):
"""Setter of ontology prefixes.
Here all custom prefixes are specified.
"""
for prefix in prefixes:
ont.set_prefix(prefix["prfx"], prefix["uri"])
return ont
if __name__ == "__main__":
if len(sys.argv) == 2:
main(sys.argv[1])
else:
print('ERROR: wrong number of arguments.')
| en | 0.76299 | Module for enlarging existing ontology based on knowledge from DBpedia. Main function.
Describes abstract algorithm of the ontology enriching. PREFIX sto: <https://w3id.org/i40/sto#>
SELECT ?sub ?dbPediaResource WHERE {
?sub sto:hasDBpediaResource ?dbPediaResource .
} # for moving to the next line in the command line Getter of file name from path. Getter of resource from STO query result row. Setter of ontology black list.
Here all predicates that should be excluded while fetching data from DPpedia are specified. Setter of ontology prefixes.
Here all custom prefixes are specified. | 3.101795 | 3 |
tests/test_calibration.py | OleMartinChristensen/MATS-L1-processsing | 0 | 6618448 | import pytest
from mats_l1_processing.read_and_calibrate_all_files import main
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "MIT"
def test_calibrate():
main("testdata/RacFiles_out/", "tests/calibration_data_test.toml")
def test_plot():
main(
"testdata/RacFiles_out/",
"tests/calibration_data_test.toml",
calibrate=False,
plot=True,
)
def test_calibrate_and_plot():
main(
"testdata/RacFiles_out/",
"tests/calibration_data_test.toml",
calibrate=True,
plot=True,
)
def test_readfunctions():
from mats_l1_processing.read_in_functions import readprotocol, read_all_files_in_root_directory
from mats_l1_processing.LindasCalibrationFunctions import read_all_files_in_protocol
directory='testdata/210215OHBLimbImage/'
protocol='protocol_dark_bright_100um_incl_IR3.txt'
read_from="rac"
df_protocol=readprotocol(directory+protocol)
df_bright=df_protocol[df_protocol.DarkBright=='B']
CCDitems=read_all_files_in_protocol(df_bright, read_from,directory)
CCDitems=read_all_files_in_root_directory(read_from,directory)
read_from="imgview"
CCDitems=read_all_files_in_root_directory(read_from,directory)
| import pytest
from mats_l1_processing.read_and_calibrate_all_files import main
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "MIT"
def test_calibrate():
main("testdata/RacFiles_out/", "tests/calibration_data_test.toml")
def test_plot():
main(
"testdata/RacFiles_out/",
"tests/calibration_data_test.toml",
calibrate=False,
plot=True,
)
def test_calibrate_and_plot():
main(
"testdata/RacFiles_out/",
"tests/calibration_data_test.toml",
calibrate=True,
plot=True,
)
def test_readfunctions():
from mats_l1_processing.read_in_functions import readprotocol, read_all_files_in_root_directory
from mats_l1_processing.LindasCalibrationFunctions import read_all_files_in_protocol
directory='testdata/210215OHBLimbImage/'
protocol='protocol_dark_bright_100um_incl_IR3.txt'
read_from="rac"
df_protocol=readprotocol(directory+protocol)
df_bright=df_protocol[df_protocol.DarkBright=='B']
CCDitems=read_all_files_in_protocol(df_bright, read_from,directory)
CCDitems=read_all_files_in_root_directory(read_from,directory)
read_from="imgview"
CCDitems=read_all_files_in_root_directory(read_from,directory)
| none | 1 | 1.959078 | 2 | |
cosa/config.py | zsisco/CoSA | 52 | 6618449 | <filename>cosa/config.py
# Copyright 2018 Stanford University
#
# Licensed under the modified BSD (3-clause BSD) License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import defaultdict, namedtuple
import configparser
import itertools
from pathlib import Path
from typing import Callable, Dict, Sequence, NamedTuple
from cosa.analyzers.mcsolver import VerificationStrategy
from cosa.problem import ProblemsManager, VerificationType
GENERAL = "GENERAL"
DEFAULT = "DEFAULT"
PROBLEM = "PROBLEM"
BUILTIN = "BUILTIN"
class CosaArgGroup(argparse._ArgumentGroup):
def __init__(self, container, category, group, *args, **kwargs):
self._category = category
self._config_files = container._config_files
self._defaults = container._defaults
self._types = container._types
self._add_long_option = container._add_long_option
argparse._ArgumentGroup.__init__(self, container, '%s.%s'%(category, group), *args, **kwargs)
def add_argument(self, *args, default=None, action=None,
dest:str=None, is_config_file:bool=False,
type:Callable=str, **kwargs):
option_name = self._add_long_option(self._category, args, dest)
if is_config_file:
self._config_files.add(option_name)
if dest is None:
dest = option_name
# save the default (if not already set)
if option_name not in self._defaults:
self._defaults[option_name] = default
if option_name not in self._types:
if action == 'store_true':
self._types[option_name] = bool
else:
self._types[option_name] = type
# always set argparse's default to None so that we can identify
# unset arguments
super().add_argument(*args, default=None, dest=dest, action=action, **kwargs)
def add_mutually_exclusive_group(self, **kwargs):
group = CosaMutuallyExclusiveGroup(self, self._category, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
class CosaMutuallyExclusiveGroup(argparse._MutuallyExclusiveGroup):
def __init__(self, container, category, **kwargs):
self._category = category
self._config_files = container._config_files
self._defaults = container._config_files
self._types = container._types
self._add_long_option = container._add_long_option
argparse._MutuallyExclusiveGroup.__init__(self, container, **kwargs)
# placement of this line important -- need to override the None title
if hasattr(container, 'title'):
self.title = container.title
def add_argument(self, *args, default=None, action=None,
dest:str=None, is_config_file:bool=False,
type:Callable=str, **kwargs):
option_name = self._add_long_option(self._category, args, dest)
if is_config_file:
self._config_files.add(option_name)
if dest is None:
dest = option_name
# save the default (if not already set)
if option_name not in self._defaults:
self._defaults[option_name] = default
if option_name not in self._types:
if action == 'store_true':
self._types[option_name] = bool
else:
self._types[option_name] = type
super().add_argument(*args, default=None, dest=dest, action=action, **kwargs)
class CosaArgParser(argparse.ArgumentParser):
'''
The CosaArgParser extends the library class argparse.ArgumentParser to allow
nested namespaces using a '.' syntax. This is especially useful for unifying
the command line interface and the problem file syntax.
'''
def __init__(self, *args, **kwargs):
# a set of namespaced options for problem files
# expecting case-insensitive namespaces 'GENERAL' and 'PROBLEM'
# problem files also support a DEFAULT section which is not
# represented in this structure
self._config_files = set()
self._defaults = dict()
self._types = dict()
self._problem_options = defaultdict(set)
self._problem_type = None
argparse.ArgumentParser.__init__(self, *args, **kwargs)
def add_argument(self, *args, default=None, action=None,
dest:str=None, is_config_file:bool=False,
type:Callable=str, **kwargs):
# adding option with no category group, results in the DEFAULT group
option_name = self._add_long_option(BUILTIN, args, dest)
if dest is None:
dest = option_name
if is_config_file:
self._config_files.add(option_name)
# save the default (if not already set)
if option_name not in self._defaults:
self._defaults[option_name] = default
if option_name not in self._types:
if action == 'store_true':
self._types[option_name] = bool
else:
self._types[option_name] = type
# always set argparse's default to None so that we can identify
# unset arguments
super().add_argument(*args, default=None, dest=dest, action=action, **kwargs)
def add_argument_group(self, group_str:str, *args, **kwargs)->CosaArgGroup:
# no specific category results in BUILTIN
group = CosaArgGroup(self, BUILTIN, group_str, *args, **kwargs)
self._action_groups.append(group)
return group
def add_general_group(self, group_str:str, *args, **kwargs)->CosaArgGroup:
group = CosaArgGroup(self, GENERAL, group_str, *args, **kwargs)
self._action_groups.append(group)
return group
def add_problem_group(self, group_str:str, *args, **kwargs)->CosaArgGroup:
group = CosaArgGroup(self, PROBLEM, group_str, *args, **kwargs)
self._action_groups.append(group)
return group
def _add_long_option(self, namespace:str, options:Sequence[str], dest:str)->str:
'''
Identify the long version of the option
'''
assert len(options) >= 1, "Expecting at least one option"
if dest is not None:
option = dest
else:
long_options = []
for o in options:
if len(o) > 1 and o[:2] == '--':
long_options.append(o[2:].replace('-', '_'))
assert len(long_options) <= 1, "Expecting at most one long option"
option = long_options[0] if long_options else next(iter(options))
if namespace and namespace != BUILTIN:
assert option not in itertools.chain(*self._problem_options.values())
self._problem_options[namespace].add(option)
return option
def set_defaults(self, **kwargs):
for k, v in kwargs.items():
self._defaults[k] = v
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
current_title = ''
action_groups = self._action_groups[1:]
action_groups.sort(key=lambda x: x.title)
for action_group in itertools.chain([self._action_groups[0]], action_groups):
if '.' in action_group.title:
title, subtitle = action_group.title.split('.')
if current_title != title:
if current_title:
formatter.end_section()
formatter.start_section(title.upper())
current_title = title
formatter.start_section(subtitle)
else:
formatter.start_section(action_group.title.upper())
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def add_mutually_exclusive_group(self, **kwargs)->CosaMutuallyExclusiveGroup:
group = CosaMutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def get_default_problem_manager(self, **kwargs)->ProblemsManager:
'''
Returns the problem manager with default general options, which can be overriden
with the keyword arguments.
See the options.py file for the possible keywords
where dashes in long option names are replaced by underscores
(and leading dashes are removed)
e.g. --trace-prefix is trace_prefix
'''
unknown_gen_options = kwargs.keys() - self._problem_options[GENERAL]
if unknown_gen_options:
raise RuntimeError("Expecting only general options in section"
"but got {}.\nGeneral options include:\n"
"{}".format(unknown_gen_options,
'\n\t'.join(self._problem_options[GENERAL])))
general_options = dict()
for option in self._problem_options[GENERAL]:
if option in kwargs:
general_options[option] = kwargs[option]
else:
general_options[option] = self._defaults[option]
problem_defaults = {o:self._defaults[o] for o in self._problem_options[PROBLEM]}
# convert defaults to expected type
for k, v in problem_defaults.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
problem_defaults[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
return ProblemsManager(Path("./"), general_options, problem_defaults)
def parse_args(self)->ProblemsManager:
command_line_args = vars(super().parse_args())
config_files = []
for config_file in self._config_files:
if command_line_args[config_file] is not None:
config_files.append(command_line_args[config_file])
if config_files:
assert len(config_files) == 1, "Expecting only a single configuration file"
problems_manager = self.read_problem_file(command_line_args[config_file], _command_line_args=command_line_args)
else:
# get general options
general_options = dict()
for option in self._problem_options[GENERAL]:
if command_line_args[option] is not None:
general_options[option] = command_line_args[option]
else:
general_options[option] = self._defaults[option]
# convert options to expected type
for k, v in general_options.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
general_options[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
# create default options for only problem fields
problem_defaults = {o:self._defaults[o] for o in self._problem_options[PROBLEM]}
# convert defaults to expected type
for k, v in problem_defaults.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
problem_defaults[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
problems_manager = ProblemsManager(Path("./"), general_options, problem_defaults)
# generate a single problem
single_problem_options = dict()
for option in self._problem_options[PROBLEM]:
if command_line_args[option] is not None:
single_problem_options[option] = command_line_args[option]
else:
single_problem_options[option] = self._defaults[option]
for k, v in single_problem_options.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
single_problem_options[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
# calling with frozen=False keeps the problem mutable for now (might not to override options)
problems_manager.add_problem(**single_problem_options, frozen=False)
# run any manual option handling
# modifies the problems_manager in-place
self._option_handling(problems_manager)
# freeze the problems
# now all existing problems are (immutable) namedtuples
# note, you can still add new problems, but they must be frozen (e.g. immutable)
problems_manager.freeze()
return problems_manager
def parse_config(self, config_file:Path)->configparser.ConfigParser:
parser = configparser.ConfigParser()
parser.optionxform=str
with config_file.open("r") as f:
parser.read_string(f.read())
return parser
def read_problem_file(self, config_file:str,
_command_line_args:Dict[str, str]=dict(),
**kwargs)->ProblemsManager:
'''
Reads a problem file and then overrides defaults with command line options
if any were provided.
Users should not pass _command_line_args directly, that is for internal use only.
Instead, pass options through keyword arguments.
'''
config_filepath = Path(config_file)
config_args = self.parse_config(config_filepath)
general_options = dict(config_args[GENERAL])
# populate command line arguments with keyword arguments if provided
if kwargs:
# check that all options are valid
unknown_kwargs = (kwargs.keys() - self._problem_options[GENERAL]) - \
self._problem_options[PROBLEM]
if unknown_kwargs:
raise RuntimeError("Expected only valid CoSA options as "
"keyword arguments but got {}.\nPlease select "
"from:\n\t{}\n\nValid options can be also be viewed "
"with --help".format(unknown_kwargs,
'\n\t'.join(
sorted(itertools.chain(
self._problem_options[GENERAL],
self._problem_options[PROBLEM])))))
# command line arguments should contain everything or nothing
# populate with none if we need to override with keyword arguments
if not _command_line_args:
for option in itertools.chain(self._problem_options[GENERAL],
self._problem_options[PROBLEM]):
_command_line_args[option] = None
for option, v in kwargs.items():
_command_line_args[option] = v
# remove default options
# -- configparser automatically populates defaults
# in every section, which we don't want
for option in config_args[DEFAULT]:
general_options.pop(option, None)
unknown_gen_options = general_options.keys() - self._problem_options[GENERAL]
if unknown_gen_options:
raise RuntimeError("Expecting only general options in section"
" [GENERAL] but got {} in {}".format(unknown_gen_options, config_file))
# populate with general defaults
# as an optimization, don't even check _command_line_args if it's empty
if _command_line_args:
for option in self._problem_options[GENERAL]:
if option not in general_options or general_options[option] is None:
if _command_line_args[option] is not None:
general_options[option] = _command_line_args[option]
else:
general_options[option] = self._defaults[option]
else:
for option in self._problem_options[GENERAL]:
if option not in general_options or general_options[option] is None:
general_options[option] = self._defaults[option]
problem_defaults = {o:self._defaults[o] for o in self._problem_options[PROBLEM]}
default_options = dict(config_args[DEFAULT])
unknown_default_options = default_options.keys() - self._problem_options[PROBLEM]
if unknown_default_options:
raise RuntimeError("Expecting only problem options in section"
" [DEFAULT] but got {} in {}".format(unknown_default_options, config_file))
for option, value in default_options.items():
# override the defaults with problem defaults
problem_defaults[option] = value
# convert defaults to expected type
for k, v in problem_defaults.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
problem_defaults[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
# convert options to expected type
for k, v in general_options.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
# handle the 'False' case, note that bool('False') evaluates to True
if self._types[k] == bool and isinstance(v, str):
if v == 'True':
general_options[k] = True
elif v == 'False':
general_options[k] = False
else:
raise RuntimeError("Expecting True or False as an option for {} but got {}".format(k, v))
else:
general_options[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
# Generate the problems_manager and populate it
problems_manager = ProblemsManager(config_filepath.parent, general_options, problem_defaults)
# Recall priority order
# command line > problem option > problem defaults > defaults
for section in config_args:
if section == DEFAULT or section == GENERAL:
continue
problem_file_options = dict(config_args[section])
unknown_problem_file_options = problem_file_options.keys() - self._problem_options[PROBLEM]
if unknown_problem_file_options:
raise RuntimeError("Expecting only problem options "
"in problem section but got {} in {}".format(unknown_problem_file_options, config_file))
# The [HEADER] style sections become problem names
problem_file_options['name'] = section
if _command_line_args:
for arg in self._problem_options[PROBLEM]:
if _command_line_args[arg] is not None:
# overwrite config file with command line arguments
problem_file_options[arg] = _command_line_args[arg]
# if the option has still not been set, find a default
# problem defaults were already given priority
if arg not in problem_file_options:
problem_file_options[arg] = problem_defaults[arg]
else:
# set defaults if not explicitly set in this particular problem
for arg in self._problem_options[PROBLEM]:
if arg not in problem_file_options:
problem_file_options[arg] = problem_defaults[arg]
for k, v in problem_file_options.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
# handle the 'False' case, note that bool('False') evaluates to True
if self._types[k] == bool and isinstance(v, str):
if v == 'True':
problem_file_options[k] = True
elif v == 'False':
problem_file_options[k] = False
else:
raise RuntimeError("Expecting True or False as an option for {} but got {}".format(k, v))
else:
problem_file_options[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
try:
# using frozen=False keeps the problems mutable for now
problems_manager.add_problem(**problem_file_options, frozen=False)
except TypeError as e:
if len(e.args) > 0:
message = e.args[0]
if "unexpected keyword argument" in message:
unknown_option = message[message.find("argument ")+9:]
raise RuntimeError("Unknown option in problem file: {}".format(unknown_option))
else:
raise e
return problems_manager
def _option_handling(self, problems_manager:ProblemsManager)->None:
'''
Do any necessary manual option handling.
E.g. if some options implicitly set other options, this needs to happen here
This method should be (carefully) modified whenever a new option is added that is not
completely independent of other options (e.g. might affect how other options need to be set).
'''
general_config = problems_manager.general_config
# handle case where no properties are given
# i.e. expecting embedded assertions in the model file
# command_line is True when no problem file was used (e.g. not argument for --problems)
command_line = general_config.problems is None
if command_line and len(problems_manager.problems) == 1:
pbm = problems_manager.problems[0]
if pbm.properties is None and \
pbm.verification not in {VerificationType.EQUIVALENCE, VerificationType.SIMULATION}:
# use the provided (command line) options as defaults
problems_manager.set_defaults(pbm)
# remove the problem
problems_manager._problems = []
problems_manager._problems_status = dict()
################## synchronizing clock automatically abstracts ###################
if general_config.synchronize:
general_config.abstract_clock = True
# iterate through problems and fix options
for problem in problems_manager.problems:
########################### parametric model checking ############################
# parametric model checking uses strategy BWD
# need to set the strategy for interpreting traces correctly
if problem.verification == VerificationType.PARAMETRIC:
problem.strategy = VerificationStrategy.BWD
return problems_manager
| <filename>cosa/config.py
# Copyright 2018 Stanford University
#
# Licensed under the modified BSD (3-clause BSD) License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import defaultdict, namedtuple
import configparser
import itertools
from pathlib import Path
from typing import Callable, Dict, Sequence, NamedTuple
from cosa.analyzers.mcsolver import VerificationStrategy
from cosa.problem import ProblemsManager, VerificationType
GENERAL = "GENERAL"
DEFAULT = "DEFAULT"
PROBLEM = "PROBLEM"
BUILTIN = "BUILTIN"
class CosaArgGroup(argparse._ArgumentGroup):
def __init__(self, container, category, group, *args, **kwargs):
self._category = category
self._config_files = container._config_files
self._defaults = container._defaults
self._types = container._types
self._add_long_option = container._add_long_option
argparse._ArgumentGroup.__init__(self, container, '%s.%s'%(category, group), *args, **kwargs)
def add_argument(self, *args, default=None, action=None,
dest:str=None, is_config_file:bool=False,
type:Callable=str, **kwargs):
option_name = self._add_long_option(self._category, args, dest)
if is_config_file:
self._config_files.add(option_name)
if dest is None:
dest = option_name
# save the default (if not already set)
if option_name not in self._defaults:
self._defaults[option_name] = default
if option_name not in self._types:
if action == 'store_true':
self._types[option_name] = bool
else:
self._types[option_name] = type
# always set argparse's default to None so that we can identify
# unset arguments
super().add_argument(*args, default=None, dest=dest, action=action, **kwargs)
def add_mutually_exclusive_group(self, **kwargs):
group = CosaMutuallyExclusiveGroup(self, self._category, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
class CosaMutuallyExclusiveGroup(argparse._MutuallyExclusiveGroup):
def __init__(self, container, category, **kwargs):
self._category = category
self._config_files = container._config_files
self._defaults = container._config_files
self._types = container._types
self._add_long_option = container._add_long_option
argparse._MutuallyExclusiveGroup.__init__(self, container, **kwargs)
# placement of this line important -- need to override the None title
if hasattr(container, 'title'):
self.title = container.title
def add_argument(self, *args, default=None, action=None,
dest:str=None, is_config_file:bool=False,
type:Callable=str, **kwargs):
option_name = self._add_long_option(self._category, args, dest)
if is_config_file:
self._config_files.add(option_name)
if dest is None:
dest = option_name
# save the default (if not already set)
if option_name not in self._defaults:
self._defaults[option_name] = default
if option_name not in self._types:
if action == 'store_true':
self._types[option_name] = bool
else:
self._types[option_name] = type
super().add_argument(*args, default=None, dest=dest, action=action, **kwargs)
class CosaArgParser(argparse.ArgumentParser):
'''
The CosaArgParser extends the library class argparse.ArgumentParser to allow
nested namespaces using a '.' syntax. This is especially useful for unifying
the command line interface and the problem file syntax.
'''
def __init__(self, *args, **kwargs):
# a set of namespaced options for problem files
# expecting case-insensitive namespaces 'GENERAL' and 'PROBLEM'
# problem files also support a DEFAULT section which is not
# represented in this structure
self._config_files = set()
self._defaults = dict()
self._types = dict()
self._problem_options = defaultdict(set)
self._problem_type = None
argparse.ArgumentParser.__init__(self, *args, **kwargs)
def add_argument(self, *args, default=None, action=None,
dest:str=None, is_config_file:bool=False,
type:Callable=str, **kwargs):
# adding option with no category group, results in the DEFAULT group
option_name = self._add_long_option(BUILTIN, args, dest)
if dest is None:
dest = option_name
if is_config_file:
self._config_files.add(option_name)
# save the default (if not already set)
if option_name not in self._defaults:
self._defaults[option_name] = default
if option_name not in self._types:
if action == 'store_true':
self._types[option_name] = bool
else:
self._types[option_name] = type
# always set argparse's default to None so that we can identify
# unset arguments
super().add_argument(*args, default=None, dest=dest, action=action, **kwargs)
def add_argument_group(self, group_str:str, *args, **kwargs)->CosaArgGroup:
# no specific category results in BUILTIN
group = CosaArgGroup(self, BUILTIN, group_str, *args, **kwargs)
self._action_groups.append(group)
return group
def add_general_group(self, group_str:str, *args, **kwargs)->CosaArgGroup:
group = CosaArgGroup(self, GENERAL, group_str, *args, **kwargs)
self._action_groups.append(group)
return group
def add_problem_group(self, group_str:str, *args, **kwargs)->CosaArgGroup:
group = CosaArgGroup(self, PROBLEM, group_str, *args, **kwargs)
self._action_groups.append(group)
return group
def _add_long_option(self, namespace:str, options:Sequence[str], dest:str)->str:
'''
Identify the long version of the option
'''
assert len(options) >= 1, "Expecting at least one option"
if dest is not None:
option = dest
else:
long_options = []
for o in options:
if len(o) > 1 and o[:2] == '--':
long_options.append(o[2:].replace('-', '_'))
assert len(long_options) <= 1, "Expecting at most one long option"
option = long_options[0] if long_options else next(iter(options))
if namespace and namespace != BUILTIN:
assert option not in itertools.chain(*self._problem_options.values())
self._problem_options[namespace].add(option)
return option
def set_defaults(self, **kwargs):
for k, v in kwargs.items():
self._defaults[k] = v
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
current_title = ''
action_groups = self._action_groups[1:]
action_groups.sort(key=lambda x: x.title)
for action_group in itertools.chain([self._action_groups[0]], action_groups):
if '.' in action_group.title:
title, subtitle = action_group.title.split('.')
if current_title != title:
if current_title:
formatter.end_section()
formatter.start_section(title.upper())
current_title = title
formatter.start_section(subtitle)
else:
formatter.start_section(action_group.title.upper())
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def add_mutually_exclusive_group(self, **kwargs)->CosaMutuallyExclusiveGroup:
group = CosaMutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def get_default_problem_manager(self, **kwargs)->ProblemsManager:
'''
Returns the problem manager with default general options, which can be overriden
with the keyword arguments.
See the options.py file for the possible keywords
where dashes in long option names are replaced by underscores
(and leading dashes are removed)
e.g. --trace-prefix is trace_prefix
'''
unknown_gen_options = kwargs.keys() - self._problem_options[GENERAL]
if unknown_gen_options:
raise RuntimeError("Expecting only general options in section"
"but got {}.\nGeneral options include:\n"
"{}".format(unknown_gen_options,
'\n\t'.join(self._problem_options[GENERAL])))
general_options = dict()
for option in self._problem_options[GENERAL]:
if option in kwargs:
general_options[option] = kwargs[option]
else:
general_options[option] = self._defaults[option]
problem_defaults = {o:self._defaults[o] for o in self._problem_options[PROBLEM]}
# convert defaults to expected type
for k, v in problem_defaults.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
problem_defaults[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
return ProblemsManager(Path("./"), general_options, problem_defaults)
def parse_args(self)->ProblemsManager:
command_line_args = vars(super().parse_args())
config_files = []
for config_file in self._config_files:
if command_line_args[config_file] is not None:
config_files.append(command_line_args[config_file])
if config_files:
assert len(config_files) == 1, "Expecting only a single configuration file"
problems_manager = self.read_problem_file(command_line_args[config_file], _command_line_args=command_line_args)
else:
# get general options
general_options = dict()
for option in self._problem_options[GENERAL]:
if command_line_args[option] is not None:
general_options[option] = command_line_args[option]
else:
general_options[option] = self._defaults[option]
# convert options to expected type
for k, v in general_options.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
general_options[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
# create default options for only problem fields
problem_defaults = {o:self._defaults[o] for o in self._problem_options[PROBLEM]}
# convert defaults to expected type
for k, v in problem_defaults.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
problem_defaults[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
problems_manager = ProblemsManager(Path("./"), general_options, problem_defaults)
# generate a single problem
single_problem_options = dict()
for option in self._problem_options[PROBLEM]:
if command_line_args[option] is not None:
single_problem_options[option] = command_line_args[option]
else:
single_problem_options[option] = self._defaults[option]
for k, v in single_problem_options.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
single_problem_options[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
# calling with frozen=False keeps the problem mutable for now (might not to override options)
problems_manager.add_problem(**single_problem_options, frozen=False)
# run any manual option handling
# modifies the problems_manager in-place
self._option_handling(problems_manager)
# freeze the problems
# now all existing problems are (immutable) namedtuples
# note, you can still add new problems, but they must be frozen (e.g. immutable)
problems_manager.freeze()
return problems_manager
def parse_config(self, config_file:Path)->configparser.ConfigParser:
parser = configparser.ConfigParser()
parser.optionxform=str
with config_file.open("r") as f:
parser.read_string(f.read())
return parser
def read_problem_file(self, config_file:str,
_command_line_args:Dict[str, str]=dict(),
**kwargs)->ProblemsManager:
'''
Reads a problem file and then overrides defaults with command line options
if any were provided.
Users should not pass _command_line_args directly, that is for internal use only.
Instead, pass options through keyword arguments.
'''
config_filepath = Path(config_file)
config_args = self.parse_config(config_filepath)
general_options = dict(config_args[GENERAL])
# populate command line arguments with keyword arguments if provided
if kwargs:
# check that all options are valid
unknown_kwargs = (kwargs.keys() - self._problem_options[GENERAL]) - \
self._problem_options[PROBLEM]
if unknown_kwargs:
raise RuntimeError("Expected only valid CoSA options as "
"keyword arguments but got {}.\nPlease select "
"from:\n\t{}\n\nValid options can be also be viewed "
"with --help".format(unknown_kwargs,
'\n\t'.join(
sorted(itertools.chain(
self._problem_options[GENERAL],
self._problem_options[PROBLEM])))))
# command line arguments should contain everything or nothing
# populate with none if we need to override with keyword arguments
if not _command_line_args:
for option in itertools.chain(self._problem_options[GENERAL],
self._problem_options[PROBLEM]):
_command_line_args[option] = None
for option, v in kwargs.items():
_command_line_args[option] = v
# remove default options
# -- configparser automatically populates defaults
# in every section, which we don't want
for option in config_args[DEFAULT]:
general_options.pop(option, None)
unknown_gen_options = general_options.keys() - self._problem_options[GENERAL]
if unknown_gen_options:
raise RuntimeError("Expecting only general options in section"
" [GENERAL] but got {} in {}".format(unknown_gen_options, config_file))
# populate with general defaults
# as an optimization, don't even check _command_line_args if it's empty
if _command_line_args:
for option in self._problem_options[GENERAL]:
if option not in general_options or general_options[option] is None:
if _command_line_args[option] is not None:
general_options[option] = _command_line_args[option]
else:
general_options[option] = self._defaults[option]
else:
for option in self._problem_options[GENERAL]:
if option not in general_options or general_options[option] is None:
general_options[option] = self._defaults[option]
problem_defaults = {o:self._defaults[o] for o in self._problem_options[PROBLEM]}
default_options = dict(config_args[DEFAULT])
unknown_default_options = default_options.keys() - self._problem_options[PROBLEM]
if unknown_default_options:
raise RuntimeError("Expecting only problem options in section"
" [DEFAULT] but got {} in {}".format(unknown_default_options, config_file))
for option, value in default_options.items():
# override the defaults with problem defaults
problem_defaults[option] = value
# convert defaults to expected type
for k, v in problem_defaults.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
problem_defaults[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
# convert options to expected type
for k, v in general_options.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
# handle the 'False' case, note that bool('False') evaluates to True
if self._types[k] == bool and isinstance(v, str):
if v == 'True':
general_options[k] = True
elif v == 'False':
general_options[k] = False
else:
raise RuntimeError("Expecting True or False as an option for {} but got {}".format(k, v))
else:
general_options[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
# Generate the problems_manager and populate it
problems_manager = ProblemsManager(config_filepath.parent, general_options, problem_defaults)
# Recall priority order
# command line > problem option > problem defaults > defaults
for section in config_args:
if section == DEFAULT or section == GENERAL:
continue
problem_file_options = dict(config_args[section])
unknown_problem_file_options = problem_file_options.keys() - self._problem_options[PROBLEM]
if unknown_problem_file_options:
raise RuntimeError("Expecting only problem options "
"in problem section but got {} in {}".format(unknown_problem_file_options, config_file))
# The [HEADER] style sections become problem names
problem_file_options['name'] = section
if _command_line_args:
for arg in self._problem_options[PROBLEM]:
if _command_line_args[arg] is not None:
# overwrite config file with command line arguments
problem_file_options[arg] = _command_line_args[arg]
# if the option has still not been set, find a default
# problem defaults were already given priority
if arg not in problem_file_options:
problem_file_options[arg] = problem_defaults[arg]
else:
# set defaults if not explicitly set in this particular problem
for arg in self._problem_options[PROBLEM]:
if arg not in problem_file_options:
problem_file_options[arg] = problem_defaults[arg]
for k, v in problem_file_options.items():
if v is not None:
assert k in self._types, "Expecting to have (at least default) type info for every option"
try:
# handle the 'False' case, note that bool('False') evaluates to True
if self._types[k] == bool and isinstance(v, str):
if v == 'True':
problem_file_options[k] = True
elif v == 'False':
problem_file_options[k] = False
else:
raise RuntimeError("Expecting True or False as an option for {} but got {}".format(k, v))
else:
problem_file_options[k] = self._types[k](v)
except ValueError as e:
raise ValueError("Cannot convert '{}' to expected type {}".format(v, self._types[k]))
try:
# using frozen=False keeps the problems mutable for now
problems_manager.add_problem(**problem_file_options, frozen=False)
except TypeError as e:
if len(e.args) > 0:
message = e.args[0]
if "unexpected keyword argument" in message:
unknown_option = message[message.find("argument ")+9:]
raise RuntimeError("Unknown option in problem file: {}".format(unknown_option))
else:
raise e
return problems_manager
def _option_handling(self, problems_manager:ProblemsManager)->None:
'''
Do any necessary manual option handling.
E.g. if some options implicitly set other options, this needs to happen here
This method should be (carefully) modified whenever a new option is added that is not
completely independent of other options (e.g. might affect how other options need to be set).
'''
general_config = problems_manager.general_config
# handle case where no properties are given
# i.e. expecting embedded assertions in the model file
# command_line is True when no problem file was used (e.g. not argument for --problems)
command_line = general_config.problems is None
if command_line and len(problems_manager.problems) == 1:
pbm = problems_manager.problems[0]
if pbm.properties is None and \
pbm.verification not in {VerificationType.EQUIVALENCE, VerificationType.SIMULATION}:
# use the provided (command line) options as defaults
problems_manager.set_defaults(pbm)
# remove the problem
problems_manager._problems = []
problems_manager._problems_status = dict()
################## synchronizing clock automatically abstracts ###################
if general_config.synchronize:
general_config.abstract_clock = True
# iterate through problems and fix options
for problem in problems_manager.problems:
########################### parametric model checking ############################
# parametric model checking uses strategy BWD
# need to set the strategy for interpreting traces correctly
if problem.verification == VerificationType.PARAMETRIC:
problem.strategy = VerificationStrategy.BWD
return problems_manager
| en | 0.75471 | # Copyright 2018 Stanford University # # Licensed under the modified BSD (3-clause BSD) License. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # save the default (if not already set) # always set argparse's default to None so that we can identify # unset arguments # placement of this line important -- need to override the None title # save the default (if not already set) The CosaArgParser extends the library class argparse.ArgumentParser to allow nested namespaces using a '.' syntax. This is especially useful for unifying the command line interface and the problem file syntax. # a set of namespaced options for problem files # expecting case-insensitive namespaces 'GENERAL' and 'PROBLEM' # problem files also support a DEFAULT section which is not # represented in this structure # adding option with no category group, results in the DEFAULT group # save the default (if not already set) # always set argparse's default to None so that we can identify # unset arguments # no specific category results in BUILTIN Identify the long version of the option # usage # description # positionals, optionals and user-defined groups # epilog # determine help from format above Returns the problem manager with default general options, which can be overriden with the keyword arguments. See the options.py file for the possible keywords where dashes in long option names are replaced by underscores (and leading dashes are removed) e.g. --trace-prefix is trace_prefix # convert defaults to expected type # get general options # convert options to expected type # create default options for only problem fields # convert defaults to expected type # generate a single problem # calling with frozen=False keeps the problem mutable for now (might not to override options) # run any manual option handling # modifies the problems_manager in-place # freeze the problems # now all existing problems are (immutable) namedtuples # note, you can still add new problems, but they must be frozen (e.g. immutable) Reads a problem file and then overrides defaults with command line options if any were provided. Users should not pass _command_line_args directly, that is for internal use only. Instead, pass options through keyword arguments. # populate command line arguments with keyword arguments if provided # check that all options are valid # command line arguments should contain everything or nothing # populate with none if we need to override with keyword arguments # remove default options # -- configparser automatically populates defaults # in every section, which we don't want # populate with general defaults # as an optimization, don't even check _command_line_args if it's empty # override the defaults with problem defaults # convert defaults to expected type # convert options to expected type # handle the 'False' case, note that bool('False') evaluates to True # Generate the problems_manager and populate it # Recall priority order # command line > problem option > problem defaults > defaults # The [HEADER] style sections become problem names # overwrite config file with command line arguments # if the option has still not been set, find a default # problem defaults were already given priority # set defaults if not explicitly set in this particular problem # handle the 'False' case, note that bool('False') evaluates to True # using frozen=False keeps the problems mutable for now Do any necessary manual option handling. E.g. if some options implicitly set other options, this needs to happen here This method should be (carefully) modified whenever a new option is added that is not completely independent of other options (e.g. might affect how other options need to be set). # handle case where no properties are given # i.e. expecting embedded assertions in the model file # command_line is True when no problem file was used (e.g. not argument for --problems) # use the provided (command line) options as defaults # remove the problem ################## synchronizing clock automatically abstracts ################### # iterate through problems and fix options ########################### parametric model checking ############################ # parametric model checking uses strategy BWD # need to set the strategy for interpreting traces correctly | 2.254318 | 2 |
setup.py | MasashiSode/MOBO | 30 | 6618450 | from setuptools import setup, find_packages
setup(
name='mobo',
version='0.0.2',
description='Multi-objective bayesian optimization package',
# long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/MasashiSode',
# license=license,
packages=find_packages(exclude=['tests', 'docs']),
)
| from setuptools import setup, find_packages
setup(
name='mobo',
version='0.0.2',
description='Multi-objective bayesian optimization package',
# long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/MasashiSode',
# license=license,
packages=find_packages(exclude=['tests', 'docs']),
)
| en | 0.184306 | # long_description=readme, # license=license, | 0.958156 | 1 |