id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
110570 | # -*- coding: utf-8 -*-
# vim: set ft=python ts=4 sw=4 expandtab:
import datetime
from unittest.mock import MagicMock, patch
import pytest
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.date import DateTrigger
from busypie import SECOND, wait
from tzlocal import get_localzone
from vplan.engine.config import DailyJobConfig, SchedulerConfig
from vplan.engine.exception import EngineError
from vplan.engine.scheduler import (
schedule_daily_job,
schedule_immediate_job,
scheduler,
shutdown_scheduler,
start_scheduler,
unschedule_daily_job,
)
from vplan.util import now
INDICATOR = None
JOB_DELAY_SEC = 2 # if the job is intermittently unreliable, increase this value slightly
def job_function(message):
"""Job function to be scheduled; it sets INDICATOR so we can tell that the job worked."""
global INDICATOR # pylint: disable=global-statement
INDICATOR = message
def in_future(seconds, tz):
"""Return a time some number of seconds into the future"""
timestamp = now(tz=tz)
future = timestamp + datetime.timedelta(seconds=seconds)
return future.time().replace(microsecond=0)
def assert_no_jobs():
"""Assert that there are no jobs"""
assert len(scheduler().get_jobs()) == 0
def assert_daily_job_definition(job_id, kwargs):
"""Assert that the job definition matches expectations."""
jobs = scheduler().get_jobs()
assert len(jobs) > 0 # there should be at least one job
job = jobs[0]
assert job.id == job_id
assert job.func is job_function
assert job.kwargs == kwargs
assert job.misfire_grace_time == 1
assert isinstance(job.trigger, CronTrigger)
assert job.trigger.jitter == 0
# we can't confirm much about the schedule, but the actual execution behavior should prove that's ok
def assert_immediate_job_definition(job_id, kwargs):
"""Assert that the job definition matches expectations."""
jobs = scheduler().get_jobs()
assert len(jobs) > 0 # there should be at least one job
job = jobs[0]
assert job.id == job_id
assert job.func is job_function
assert job.kwargs == kwargs
assert isinstance(job.trigger, DateTrigger)
# we can't confirm much about the schedule, but the actual execution behavior should prove that's ok
class TestLifecycle:
@patch("vplan.engine.scheduler.config")
def test_scheduler_lifecycle(self, config, tmpdir):
shutdown_scheduler()
# this sets things up so the daily job is scheduled a few seconds from now, so we can check that it runs
tz = get_localzone()
time1 = in_future(seconds=JOB_DELAY_SEC, tz=tz)
time2 = in_future(seconds=JOB_DELAY_SEC * 2, tz=tz)
database_url = "sqlite+pysqlite:///%s" % tmpdir.join("jobs.sqlite").realpath()
daily = DailyJobConfig(jitter_sec=0, misfire_grace_sec=1)
scheduler_config = SchedulerConfig(database_url=database_url, daily_job=daily)
config.return_value = MagicMock(scheduler=scheduler_config)
try:
start_scheduler()
assert scheduler() is not None
# Create a daily job and make sure it executes
schedule_daily_job(
job_id="test_job", trigger_time=time1, func=job_function, kwargs={"message": "job #1"}, time_zone="%s" % tz
)
assert_daily_job_definition("test_job", {"message": "job #1"})
wait().at_most(JOB_DELAY_SEC, SECOND).until(lambda: INDICATOR == "job #1")
# Recreate the daily job and make sure updates are reflected
schedule_daily_job(
job_id="test_job", trigger_time=time2, func=job_function, kwargs={"message": "job #2"}, time_zone="%s" % tz
)
assert_daily_job_definition("test_job", {"message": "job #2"})
wait().at_most(JOB_DELAY_SEC, SECOND).until(lambda: INDICATOR == "job #2")
# Remove the daily job and make sure the change takes effect
unschedule_daily_job(job_id="test_job")
assert_no_jobs()
# Confirm that we don't get an error removing a nonexistent job
unschedule_daily_job(job_id="test_job")
# Schedule an immediate job, make sure it runs immediately, and make sure it goes away when done
schedule_immediate_job(job_id="immediate_job", func=job_function, kwargs={"message": "job #3"})
assert_immediate_job_definition("immediate_job", {"message": "job #3"})
wait().at_most(JOB_DELAY_SEC, SECOND).until(lambda: INDICATOR == "job #3")
assert_no_jobs()
finally:
shutdown_scheduler()
with pytest.raises(EngineError, match=r"Scheduler is not available"):
scheduler()
| StarcoderdataPython |
3379472 | listOriginal = [1,2,3,4,5,6,7,8,9,10]
result = list(map(lambda x: x**2, filter(lambda y: y%2==0, listOriginal)))
print(result)
print([x**2 for x in [y for y in listOriginal if y%2==0]])
print([x+y+z for x in range(1, 3) for y in range(11, 13) for z in range(101, 103)]) | StarcoderdataPython |
4823015 | """Problem 1002 from URI Judge Online"""
# pylint: disable-msg=C0103
r = input()
PI = 3.14159
area = pow(r, 2)*PI
print "A={0:.4f}".format(area)
| StarcoderdataPython |
3354126 | """Regression tests from real-world examples"""
import pytest
import pymergevcd.io_manager
@pytest.mark.manual
def test_regression_two_files(record_property):
"""Trying to merge two files
Currently not publicly available source files, hence no good test.
"""
record_property('req', 'SW-AS-nnn-deadbeef')
ifile1 = 'tests/Test_Datenpfad2_LE0_A_1_a_expected.vcd'
ifile2 = 'tests/Test_Datenpfad2_LE0_A_1_a_given.vcd'
ofile = 'test_regression_two_files.vcd'
iom = pymergevcd.io_manager.InputOutputManager()
iom.merge_files([ifile1, ifile2], ofile)
assert True
| StarcoderdataPython |
3333122 | """
Some useful functions
"""
from __future__ import division
import numpy as np
# A series of variables and dimension names that Salem will understand
valid_names = dict()
valid_names['x_dim'] = ['west_east', 'lon', 'longitude', 'longitudes', 'lons',
'xlong', 'xlong_m', 'dimlon', 'x', 'lon_3', 'long',
'phony_dim_0', 'eastings', 'easting', 'nlon', 'nlong',
'grid_longitude_t']
valid_names['y_dim'] = ['south_north', 'lat', 'latitude', 'latitudes', 'lats',
'xlat', 'xlat_m', 'dimlat', 'y','lat_3', 'phony_dim_1',
'northings', 'northing', 'nlat', 'grid_latitude_t']
valid_names['z_dim'] = ['levelist','level', 'pressure', 'press', 'zlevel', 'z',
'bottom_top']
valid_names['t_dim'] = ['time', 'times', 'xtime']
valid_names['lon_var'] = ['lon', 'longitude', 'longitudes', 'lons', 'long']
valid_names['lat_var'] = ['lat', 'latitude', 'latitudes', 'lats']
valid_names['time_var'] = ['time', 'times']
ds_cols = ['feature', 'parameter', 'frequency_interval', 'aggregation_statistic', 'units', 'wrf_standard_name', 'cf_standard_name', 'scale_factor']
param_func_mappings = {'temp_at_2': ['T2'],
'precip_at_0': ['RAINNC'],
'snow_at_0': ['SNOWNC'],
'runoff_at_0': ['SFROFF'],
'recharge_at_0': ['UDROFF'],
'pressure_at_0': ['PSFC'],
'shortwave_rad_at_0': ['SWDOWN'],
'longwave_rad_at_0': ['GLW'],
'heat_flux_at_0': ['GRDFLX'],
'relative_humidity_at_2': ['T2', 'Q2', 'PSFC'],
'wind_speed_at_2': ['U10', 'V10'],
'wind_speed_at_10': ['U10', 'V10'],
# 'reference_et_at_0': ['T2', 'Q2', 'U10', 'V10', 'SWDOWN', 'GLW', 'GRDFLX', 'PSFC', 'ALBEDO']
}
# param_file_mappings = {'temp_at_2': ['2m_temperature_*.nc'],
# 'precip_at_0': ['total_precipitation_*.nc'],
# 'snow_at_0': ['snowfall_*.nc'],
# 'runoff_at_0': ['surface_runoff_*.nc'],
# 'recharge_at_0': ['sub_surface_runoff_*.nc'],
# 'pressure_at_0': ['surface_pressure_*.nc'],
# 'shortwave_rad_at_0': ['surface_net_solar_radiation_*.nc'],
# 'longwave_rad_at_0': ['surface_net_thermal_radiation_*.nc'],
# 'heat_flux_at_0': ['surface_latent_heat_flux_*.nc'],
# 'relative_humidity_at_2': ['2m_temperature_*.nc', '2m_dewpoint_temperature_*.nc'],
# 'wind_speed_at_2': ['10m_u_component_of_wind_*.nc', '10m_v_component_of_wind_*.nc'],
# 'wind_speed_at_10': ['10m_u_component_of_wind_*.nc', '10m_v_component_of_wind_*.nc'],
# 'reference_et_at_0': ['2m_temperature_*.nc', '2m_dewpoint_temperature_*.nc', '10m_u_component_of_wind_*.nc', '10m_v_component_of_wind_*.nc', 'surface_net_solar_radiation_*.nc', 'surface_net_thermal_radiation_*.nc', 'surface_latent_heat_flux_*.nc', 'surface_pressure_*.nc'],
# 'pet_at_0': ['potential_evaporation_*.nc'],
# 'evaporation_at_0': ['total_evaporation_*.nc']
# }
# param_height_mappings = {'t2m': 2,
# 'd2m': 2,
# 'tp': 0,
# 'sf': 0,
# 'sro': 0,
# 'ssro': 0,
# 'sp': 0,
# 'ssr': 0,
# 'str': 0,
# 'slhf': 0,
# 'u10': 10,
# 'v10': 10,
# 'pev': 0,
# 'e': 0
# }
# raw_param_encodings = {'T2': {'scale_factor': 0.01, 'dtype': 'int16', '_FillValue': -9999},
# 'RAINNC': {'scale_factor': 0.1, 'dtype': 'int16', '_FillValue': -9999},
# 'SNOWNC': {'scale_factor': 0.1, 'dtype': 'int16', '_FillValue': -9999},
# 'SFROFF': {'scale_factor': 0.1, 'dtype': 'int16', '_FillValue': -9999},
# 'UDROFF': {'scale_factor': 0.1, 'dtype': 'int16', '_FillValue': -9999},
# 'PSFC': {'scale_factor': 0.01, 'dtype': 'int16', '_FillValue': -9999},
# 'SWDOWN': {'scale_factor': 0.01, 'dtype': 'int16', '_FillValue': -9999},
# 'GLW': {'scale_factor': 0.01, 'dtype': 'int16', '_FillValue': -9999},
# 'GRDFLX': {'scale_factor': 0.01, 'dtype': 'int16', '_FillValue': -9999},
# 'Q2': {'scale_factor': 0.01, 'dtype': 'int16', '_FillValue': -9999},
# 'U10': {'scale_factor': 0.01, 'dtype': 'int16', '_FillValue': -9999},
# 'V10': {'scale_factor': 0.01, 'dtype': 'int16', '_FillValue': -9999},
# # 'reference_et_at_0': ['T2', 'Q2', 'U10', 'V10', 'SWDOWN', 'GLW', 'GRDFLX', 'PSFC', 'ALBEDO']
# }
def str_in_list(l1, l2):
"""Check if one element of l1 is in l2 and if yes, returns the name of
that element in a list (could be more than one.
Examples
--------
>>> print(str_in_list(['time', 'lon'], ['temp','time','prcp']))
['time']
>>> print(str_in_list(['time', 'lon'], ['temp','time','prcp','lon']))
['time', 'lon']
"""
return [i for i in l1 if i.lower() in l2]
def nice_scale(mapextent, maxlen=0.15):
"""Returns a nice number for a legend scale of a map.
Parameters
----------
mapextent : float
the total extent of the map
maxlen : float
from 0 to 1, the maximum relative length allowed for the scale
Examples
--------
>>> print(nice_scale(140))
20.0
>>> print(nice_scale(140, maxlen=0.5))
50.0
"""
d = np.array([1, 2, 5])
e = (np.ones(12) * 10) ** (np.arange(12)-5)
candidates = np.matmul(e[:, None], d[None, :]).flatten()
return np.max(candidates[candidates / mapextent <= maxlen])
def reduce(arr, factor=1, how=np.mean):
"""Reduces an array's size by a given factor.
The reduction can be done by any reduction function (default is mean).
Parameters
----------
arr : ndarray
an array of at least 2 dimensions (the reduction is done on the two
last dimensions).
factor : int
the factor to apply for reduction (must be a divider of the original
axis dimension!).
how : func
the reduction function
Returns
-------
the reduced array
"""
arr = np.asarray(arr)
shape = list(arr.shape)
newshape = shape[:-2] + [np.round(shape[-2] / factor).astype(int), factor,
np.round(shape[-1] / factor).astype(int), factor]
return how(how(arr.reshape(*newshape), axis=len(newshape)-3),
axis=len(newshape)-2)
| StarcoderdataPython |
52657 | <filename>tools/cp.py<gh_stars>100-1000
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Copy a file.
This module works much like the cp posix command - it takes 2 arguments:
(src, dst) and copies the file with path |src| to |dst|.
"""
import shutil
import sys
import os
def Main(src, dst):
# Use copy instead of copyfile to ensure the executable bit is copied.
path = os.path.dirname(dst)
is_exit = os.path.exists(path)
if not is_exit:
os.makedirs(path)
if os.path.isdir(src):
if os.path.exists(dst):
shutil.rmtree(dst)
return shutil.copytree(src, dst)
else:
return shutil.copy(src, dst)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1], sys.argv[2]))
| StarcoderdataPython |
161703 | """
This module has simple examples of multicore programs.
The first few examples are the same as those in
IoTPy/IoTPy/tests/multicore_test.py
"""
import sys
import os
import threading
import random
import multiprocessing
import numpy as np
sys.path.append(os.path.abspath("../multiprocessing"))
sys.path.append(os.path.abspath("../core"))
sys.path.append(os.path.abspath("../agent_types"))
sys.path.append(os.path.abspath("../helper_functions"))
sys.path.append(os.path.abspath("../../examples/timing"))
"""
This module contains tests:
* offset_estimation_test()
which tests code from multicore.py in multiprocessing.
"""
import sys
import os
import threading
import random
import multiprocessing
import numpy as np
import time
sys.path.append(os.path.abspath("../../IoTPy/multiprocessing"))
sys.path.append(os.path.abspath("../../IoTPy/core"))
sys.path.append(os.path.abspath("../../IoTPy/agent_types"))
sys.path.append(os.path.abspath("../../IoTPy/helper_functions"))
# multicore is in "../../IoTPy/multiprocessing"
from multicore import multicore, copy_data_to_stream
# basics, run, print_stream are in "../../IoTPy/helper_functions"
from basics import map_e, map_l, map_w, merge_e
from run import run
from print_stream import print_stream
# stream is in ../../IoTPy/core
from stream import Stream
@map_e
def double(v): return 2*v
@map_e
def increment(v): return v+1
@map_e
def square(v): return v**2
@map_e
def identity(v): return v
@map_e
def multiply(v, multiplicand): return v*multiplicand
@map_e
def add(v, addend): return v+addend
@map_e
def multiply_and_add(element, multiplicand, addend):
return element*multiplicand + addend
@map_l
def filter_then_square(sequence, filter_threshold):
return [element**2 for element in sequence
if element < filter_threshold]
@map_w
def sum_window(window):
return sum(window)
@merge_e
def sum_numbers(numbers):
return sum(numbers)
def copy_in_to_out(in_streams, out_streams):
identity(in_streams[0], out_streams[0])
def print_input_stream(in_streams, out_streams):
print_stream(in_streams[0], in_streams[0].name)
# Target of source thread.
def source_thread_target(proc, stream_name):
num_steps=5
step_size=4
for i in range(num_steps):
data = list(range(i*step_size, (i+1)*step_size))
copy_data_to_stream(data, proc, stream_name)
time.sleep(0)
return
# Target of source thread reading from a file
def read_file_thread_target(proc, stream_name):
filename = 'test.dat'
window_size = 2
with open(filename) as the_file:
data = list(map(float, the_file))
for i in range(0, window_size, len(data)):
window = data[i:i+window_size]
copy_data_to_stream(data, proc, stream_name)
time.sleep(0)
return
def pass_data_from_one_process_to_another():
# Specify processes and connections.
processes = \
{
'source_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [('out', 'i')],
'compute_func': copy_in_to_out,
'sources':
{'sequence':
{'type': 'i',
'func': source_thread_target
},
},
'actuators': {}
},
'output_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [],
'compute_func': print_input_stream,
'sources': {},
'actuators': {}
}
}
connections = \
{
'source_process' :
{
'out' : [('output_process', 'in')],
'sequence' : [('source_process', 'in')]
},
'output_process':{}
}
multicore(processes, connections)
def pass_data_from_one_process_to_another_v2():
# Example that uses floats and shows a source
# thread that reads a file.
# Specify processes and connections.
processes = \
{
'source_process':
{'in_stream_names_types': [('in', 'f')],
'out_stream_names_types': [('out', 'f')],
'compute_func': copy_in_to_out,
'sources':
{'sequence':
{'type': 'f',
'func': read_file_thread_target
},
},
'actuators': {}
},
'output_process':
{'in_stream_names_types': [('in', 'f')],
'out_stream_names_types': [],
'compute_func': print_input_stream,
'sources': {},
'actuators': {}
}
}
connections = \
{
'source_process' :
{
'out' : [('output_process', 'in')],
'sequence' : [('source_process', 'in')]
},
'output_process':{}
}
multicore(processes, connections)
def test_multicore_with_single_process():
processes = \
{
'process':
{'in_stream_names_types': [('in', 'f')],
'out_stream_names_types': [],
'compute_func': print_input_stream,
'sources':
{'sequence':
{'type': 'f',
'func': read_file_thread_target
},
},
'actuators': {}
}
}
connections = \
{
'process' :
{
'sequence' : [('process', 'in')]
}
}
multicore(processes, connections)
def test_1_single_process():
"""
This is a single process example which is converted into a
multicore example in test_1(), see below.
The partitioning to obtain multiple cores and threads is
done as follows.
(1) put_data_in_stream() is converted to a function which
is the target of a thread. In test_1() this function is
source_thread_target(proc, stream_name)
(2) double(x,y) is put in a separate process. The compute
function of this process is f(). Since the parameters
of compute_func are in_streams and out_streams, we get
f from double in the following way:
def f(in_streams, out_streams):
double(in_stream=in_streams[0], out_stream=out_streams[0])
(3) increment() and print_stream() are in a separate process.
The compute function of this process is g().
Run both test_1_single_process() and test_1() and look at
their identical outputs.
"""
# ********************************************************
# We will put this function in its own thread in test_1()
def put_data_in_stream(stream):
num_steps=5
step_size=4
for i in range(num_steps):
data = list(range(i*step_size, (i+1)*step_size))
stream.extend(data)
run()
return
# ********************************************************
# We will put these lines in a separate process in test_1()
x = Stream('x')
y = Stream('y')
double(x, y)
# *********************************************************
# We will put these lines in a separate process in test_1().
s = Stream(name='s')
increment(y, s)
print_stream(s, name=s.name)
# *********************************************************
# This function is executed in a separate thread in test_1().
put_data_in_stream(x)
#--------------------------------------------------------------------
def test_1():
"""
Example with two processes:
source process feeds aggregate process.
"""
# Functions wrapped by agents
def f(in_streams, out_streams):
double(in_streams[0], out_streams[0])
def g(in_streams, out_streams):
s = Stream(name='s')
increment(in_streams[0], s)
print_stream(s, name=s.name)
# Specify processes and connections.
processes = \
{
'source_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [('out', 'i')],
'compute_func': f,
'sources':
{'acceleration':
{'type': 'i',
'func': source_thread_target
},
},
'actuators': {}
},
'aggregate_and_output_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [],
'compute_func': g,
'sources': {},
'actuators': {}
}
}
connections = \
{
'source_process' :
{
'out' : [('aggregate_and_output_process', 'in')],
'acceleration' : [('source_process', 'in')]
},
'aggregate_and_output_process':
{}
}
multicore(processes, connections)
#--------------------------------------------------------------------
def test_2():
"""
Example with three processes connected linearly.
source process feeds filter and square process which feeds
aggregate and output process.
"""
# Functions wrapped by agents
def f(in_streams, out_streams):
multiply_and_add(in_streams[0], out_streams[0],
multiplicand=2, addend=1)
def g(in_streams, out_streams):
filter_then_square(in_streams[0], out_streams[0],
filter_threshold=20)
def h(in_streams, out_streams):
s = Stream('s')
sum_window(in_streams[0], s, window_size=3, step_size=3)
print_stream(s, name=s.name)
# Specify processes and connections.
processes = \
{
'source_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [('out', 'i')],
'compute_func': f,
'sources':
{'acceleration':
{'type': 'i',
'func': source_thread_target
},
},
'actuators': {}
},
'filter_and_square_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [('filtered', 'i')],
'compute_func': g,
'sources': {},
'actuators': {}
},
'aggregate_and_output_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [],
'compute_func': h,
'sources': {},
'actuators': {}
}
}
connections = \
{
'source_process' :
{
'out' : [('filter_and_square_process', 'in')],
'acceleration' : [('source_process', 'in')]
},
'filter_and_square_process' :
{
'filtered' : [('aggregate_and_output_process', 'in')],
},
'aggregate_and_output_process':
{}
}
multicore(processes, connections)
#--------------------------------------------------------------------
def test_3():
"""
Example with three processes connected as a star.
source process feeds both process_1 and process_2.
"""
# Functions wrapped by agents
def f(in_streams, out_streams):
multiply_and_add(in_streams[0], out_streams[0],
multiplicand=2, addend=1)
def g(in_streams, out_streams):
t = Stream('t')
filter_then_square(in_streams[0], t,
filter_threshold=20)
print_stream(t, name='p1')
def sums(in_streams, out_streams):
s = Stream('s')
sum_window(in_streams[0], s, window_size=3, step_size=3)
print_stream(s, name=' p2')
processes = \
{
'source_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [('out', 'i')],
'compute_func': f,
'sources':
{'acceleration':
{'type': 'i',
'func': source_thread_target
},
}
},
'process_1':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [],
'compute_func': g,
'sources': {}
},
'process_2':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [],
'compute_func': sums,
'sources': {}
}
}
connections = \
{
'source_process' :
{
'out' : [('process_1', 'in'), ('process_2', 'in')],
'acceleration' : [('source_process', 'in')]
},
'process_1':
{
},
'process_2':
{
}
}
multicore(processes, connections)
#--------------------------------------------------------------------
def test_4():
"""
Example with four processes connected as a diamond.
source process feeds both multiply_process and square_process,
both of which feed merge_process.
"""
# Functions wrapped by agents
def f(in_streams, out_streams):
identity(in_streams[0], out_streams[0])
def g(in_streams, out_streams):
multiply(in_streams[0], out_streams[0],
multiplicand=2)
def h(in_streams, out_streams):
square(in_streams[0], out_streams[0])
def m(in_streams, out_streams):
s = Stream('s')
sum_numbers(in_streams, s)
print_stream(s, name='s')
processes = \
{
'source_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [('out', 'i')],
'compute_func': f,
'sources':
{'acceleration':
{'type': 'i',
'func': source_thread_target
},
}
},
'multiply_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [('out', 'i')],
'compute_func': g,
'sources': {}
},
'square_process':
{'in_stream_names_types': [('in', 'i')],
'out_stream_names_types': [('out', 'i')],
'compute_func': h,
'sources': {}
},
'merge_process':
{'in_stream_names_types': [('in_multiply', 'i'),
('in_square', 'i')],
'out_stream_names_types': [],
'compute_func': m,
'sources': {}
}
}
connections = \
{
'source_process' :
{
'out' : [('multiply_process', 'in'), ('square_process', 'in')],
'acceleration' : [('source_process', 'in')]
},
'multiply_process':
{
'out' : [('merge_process', 'in_multiply')]
},
'square_process':
{
'out' : [('merge_process', 'in_square')]
},
'merge_process':
{
}
}
multicore(processes, connections)
if __name__ == '__main__':
test_multicore_with_single_process()
print ('starting pass data from one process to another')
print ('in[j] = j')
print ('')
pass_data_from_one_process_to_another()
print ('')
print ('')
print ('--------------------------------')
print ('')
print ('')
print ('starting pass data from one process to another')
print ('in[j] = j')
print ('')
pass_data_from_one_process_to_another_v2()
print ('')
print ('')
print ('--------------------------------')
print ('')
print ('')
print ('starting test_1')
print ('s[j] = 2*j + 1')
print ('')
test_1()
print ('')
print ('')
print ('--------------------------------')
print ('')
print ('')
print ('start test_1_single_process')
print ('Output of test_1_single process() is identical:')
print ('to output of test_1()')
print ('[1, 3, 5, ..., 39]')
print ('')
test_1_single_process()
print ('')
print ('')
print ('--------------------------------')
print ('')
print ('')
print ('starting test_2')
print ('Output of source_process is: ')
print ('[1, 3, 5, 7, 9, 11, .... ,39 ]')
print ('')
print ('Output of filter_and_square_process is:')
print ('[1, 9, 25, 49, 81, 121, 169, 225, 289, 361]')
print ('')
print('Output of aggregate_and_output_process is:')
print('[1+9+25, 49+81+121, 169+225+289] which is:')
print ('[35, 251, 683]')
print ('')
test_2()
print ('')
print ('')
print ('--------------------------------')
print ('')
print ('')
print ('starting test_3')
print ('')
print ('p1 is [1, 3, 5,...., 39]')
print ('')
print ('p2 is [1+3+5, 7+9+11, 13+15+17, ..]')
print ('')
test_3()
print ('')
print ('')
print ('--------------------------------')
print ('')
print ('')
print ('starting test_4')
print ('')
print ('Output of source process is:')
print ('[0, 1, 2, 3, ...., 19]')
print ('')
print ('Output of multiply process is source*2:')
print ('[0, 2, 4, 6, .... 38]')
print ('')
print ('Output of square process is source**2:')
print ('[0, 1, 4, 9, ... 361]')
print ('')
print ('Output of aggregate process is:')
print ('[0+0, 2+1, 4+4, 6+9, ..., 38+361]')
print ('')
test_4()
| StarcoderdataPython |
180003 | import arpy
# from subprocess import Popen
# auto push to git
arpy.task("push", ["git add .", "git commit -m 'updates'", "git push origin master"], ".", ignorelist=[".git"])
| StarcoderdataPython |
1674888 | <reponame>tcchrist/renku-python
# -*- coding: utf-8 -*-
#
# Copyright 2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JSON-LD dataset migrations."""
import itertools
import json
import os
import uuid
from pathlib import Path
import pyld
from renku.core.models.jsonld import read_yaml, write_yaml
from renku.core.utils.migrate import get_pre_0_3_4_datasets_metadata
def migrate(client):
"""Migration function."""
_migrate_project_metadata(client)
_migrate_datasets_metadata(client)
def _migrate_project_metadata(client):
"""Apply all initial JSON-LD migrations to project."""
jsonld_translate = {
"http://schema.org/name": "http://xmlns.com/foaf/0.1/name",
"http://schema.org/Project": "http://xmlns.com/foaf/0.1/Project",
}
_apply_on_the_fly_jsonld_migrations(
path=client.renku_metadata_path,
jsonld_context=_INITIAL_JSONLD_PROJECT_CONTEXT,
fields=_PROJECT_FIELDS,
jsonld_translate=jsonld_translate,
)
def _migrate_datasets_metadata(client):
"""Apply all initial JSON-LD migrations to datasets."""
jsonld_migrations = {
"dctypes:Dataset": [_migrate_dataset_schema, _migrate_absolute_paths],
"schema:Dataset": [
_migrate_absolute_paths,
_migrate_doi_identifier,
_migrate_same_as_structure,
_migrate_dataset_file_id,
],
}
old_metadata_paths = get_pre_0_3_4_datasets_metadata(client)
new_metadata_paths = client.renku_datasets_path.rglob(client.METADATA)
for path in itertools.chain(old_metadata_paths, new_metadata_paths):
_apply_on_the_fly_jsonld_migrations(
path=path,
jsonld_context=_INITIAL_JSONLD_DATASET_CONTEXT,
fields=_DATASET_FIELDS,
client=client,
jsonld_migrations=jsonld_migrations,
)
def _apply_on_the_fly_jsonld_migrations(
path, jsonld_context, fields, client=None, jsonld_migrations=None, jsonld_translate=None
):
data = read_yaml(path)
if not isinstance(data, dict) and not isinstance(data, list):
# NOTE: metadata file is probably not an actual renku file
return
if jsonld_translate:
# perform the translation
data = pyld.jsonld.expand(data)
data_str = json.dumps(data)
for k, v in jsonld_translate.items():
data_str = data_str.replace(v, k)
data = json.loads(data_str)
data = pyld.jsonld.compact(data, jsonld_context)
data.setdefault("@context", jsonld_context)
_migrate_types(data)
if jsonld_migrations:
schema_type = data.get("@type")
migrations = []
if isinstance(schema_type, list):
for schema in schema_type:
migrations += jsonld_migrations.get(schema, [])
elif isinstance(schema_type, str):
migrations += jsonld_migrations.get(schema_type, [])
for migration in set(migrations):
data = migration(data, client)
if data["@context"] != jsonld_context:
# merge new context into old context to prevent properties
# getting lost in jsonld expansion
if isinstance(data["@context"], str):
data["@context"] = {"@base": data["@context"]}
data["@context"].update(jsonld_context)
try:
compacted = pyld.jsonld.compact(data, jsonld_context)
except Exception:
compacted = data
else:
compacted = data
data = {}
for k, v in compacted.items():
if k in fields:
no_value_context = isinstance(v, dict) and "@context" not in v
has_nested_context = k in compacted["@context"] and "@context" in compacted["@context"][k]
if no_value_context and has_nested_context:
# Propagate down context
v["@context"] = compacted["@context"][k]["@context"]
data[k] = v
data["@context"] = jsonld_context
_migrate_types(data)
write_yaml(path, data)
def _migrate_dataset_schema(data, client):
"""Migrate from old dataset formats."""
if "authors" not in data:
return
data["@context"]["creator"] = data["@context"].pop("authors", {"@container": "list"})
data["creator"] = data.pop("authors", {})
files = data.get("files", [])
if isinstance(files, dict):
files = files.values()
for file_ in files:
file_["creator"] = file_.pop("authors", {})
return data
def _migrate_absolute_paths(data, client):
"""Migrate dataset paths to use relative path."""
raw_path = data.get("path", ".")
path = Path(raw_path)
if path.is_absolute():
try:
data["path"] = str(path.relative_to(os.getcwd()))
except ValueError:
elements = raw_path.split("/")
index = elements.index(".renku")
data["path"] = str(Path("/".join(elements[index:])))
files = data.get("files", [])
if isinstance(files, dict):
files = list(files.values())
for file_ in files:
path = Path(file_.get("path"), ".")
if path.is_absolute():
file_["path"] = str(path.relative_to((os.getcwd())))
data["files"] = files
return data
def _migrate_doi_identifier(data, client):
"""If the dataset _id is doi, make it a UUID."""
from renku.core.utils.doi import is_doi
from renku.core.utils.uuid import is_uuid
_id = data.get("_id", "")
identifier = data.get("identifier", "")
if not is_uuid(_id):
if not is_uuid(identifier):
data["identifier"] = str(uuid.uuid4())
if is_doi(data.get("_id", "")):
data["same_as"] = {"@type": ["schema:URL"], "url": data["_id"]}
if data.get("@context"):
data["@context"].setdefault(
"same_as",
{
"@id": "schema:sameAs",
"@type": "schema:URL",
"@context": {"@version": "1.1", "url": "schema:url", "schema": "http://schema.org/"},
},
)
data["_id"] = data["identifier"]
return data
def _migrate_same_as_structure(data, client):
"""Changes sameAs string to schema:URL object."""
same_as = data.get("same_as")
if same_as and isinstance(same_as, str):
data["same_as"] = {"@type": ["schema:URL"], "url": same_as}
if data.get("@context"):
data["@context"].setdefault(
"same_as",
{
"@id": "schema:sameAs",
"@type": "schema:URL",
"@context": {"@version": "1.1", "url": "schema:url", "schema": "http://schema.org/"},
},
)
return data
def _migrate_dataset_file_id(data, client):
"""Ensure dataset files have a fully qualified url as id."""
host = "localhost"
if client:
host = client.remote.get("host") or host
host = os.environ.get("RENKU_DOMAIN") or host
files = data.get("files", [])
for file_ in files:
if not file_["_id"].startswith("http"):
file_["_id"] = "https://{host}/{id}".format(host=host, id=file_["_id"])
return data
def _migrate_types(data):
"""Fix types."""
from renku.core.utils.migrate import migrate_types
migrate_types(data)
_PROJECT_FIELDS = {"_id", "created", "creator", "name", "updated", "version"}
_DATASET_FIELDS = {
"_id",
"_label",
"_project",
"based_on",
"created",
"creator",
"date_published",
"description",
"files",
"identifier",
"in_language",
"keywords",
"license",
"name",
"path",
"same_as",
"short_name",
"tags",
"url",
"version",
}
_INITIAL_JSONLD_PROJECT_CONTEXT = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"@version": 1.1,
"name": "schema:name",
"created": "schema:dateCreated",
"updated": "schema:dateUpdated",
"version": "schema:schemaVersion",
"creator": {
"@id": "schema:creator",
"@context": {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"@version": 1.1,
"name": "schema:name",
"email": "schema:email",
"label": "rdfs:label",
"affiliation": "schema:affiliation",
"alternate_name": "schema:alternateName",
"_id": "@id",
},
},
"_id": "@id",
}
_INITIAL_JSONLD_DATASET_CONTEXT = {
"schema": "http://schema.org/",
"@version": 1.1,
"prov": "http://www.w3.org/ns/prov#",
"wfprov": "http://purl.org/wf4ever/wfprov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"path": "prov:atLocation",
"_id": "@id",
"_project": {
"@id": "schema:isPartOf",
"@context": {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"@version": 1.1,
"name": "schema:name",
"created": "schema:dateCreated",
"updated": "schema:dateUpdated",
"version": "schema:schemaVersion",
"creator": {
"@id": "schema:creator",
"@context": {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"@version": 1.1,
"name": "schema:name",
"email": "schema:email",
"label": "rdfs:label",
"affiliation": "schema:affiliation",
"alternate_name": "schema:alternateName",
"_id": "@id",
},
},
"_id": "@id",
},
},
"creator": {
"@id": "schema:creator",
"@context": {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"@version": 1.1,
"name": "schema:name",
"email": "schema:email",
"label": "rdfs:label",
"affiliation": "schema:affiliation",
"alternate_name": "schema:alternateName",
"_id": "@id",
},
},
"date_published": "schema:datePublished",
"description": "schema:description",
"identifier": "schema:identifier",
"in_language": {
"@id": "schema:inLanguage",
"@context": {
"schema": "http://schema.org/",
"@version": 1.1,
"alternate_name": "schema:alternateName",
"name": "schema:name",
},
},
"keywords": "schema:keywords",
"based_on": "schema:isBasedOn",
"license": "schema:license",
"name": "schema:name",
"url": "schema:url",
"version": "schema:version",
"created": "schema:dateCreated",
"files": {
"@id": "schema:hasPart",
"@context": {
"schema": "http://schema.org/",
"@version": 1.1,
"prov": "http://www.w3.org/ns/prov#",
"wfprov": "http://purl.org/wf4ever/wfprov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"path": "prov:atLocation",
"_id": "@id",
"_label": "rdfs:label",
"_project": {
"@id": "schema:isPartOf",
"@context": {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"@version": 1.1,
"name": "schema:name",
"created": "schema:dateCreated",
"updated": "schema:dateUpdated",
"version": "schema:schemaVersion",
"creator": {
"@id": "schema:creator",
"@context": {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"@version": 1.1,
"name": "schema:name",
"email": "schema:email",
"label": "rdfs:label",
"affiliation": "schema:affiliation",
"alternate_name": "schema:alternateName",
"_id": "@id",
},
},
"_id": "@id",
},
},
"creator": {
"@id": "schema:creator",
"@context": {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"@version": 1.1,
"name": "schema:name",
"email": "schema:email",
"label": "rdfs:label",
"affiliation": "schema:affiliation",
"alternate_name": "schema:alternateName",
"_id": "@id",
},
},
"added": "schema:dateCreated",
"name": "schema:name",
"url": "schema:url",
"external": "renku:external",
"based_on": "schema:isBasedOn",
"renku": "https://swissdatasciencecenter.github.io/renku-ontology#",
},
},
"tags": {
"@id": "schema:subjectOf",
"@context": {
"schema": "http://schema.org/",
"@version": 1.1,
"name": "schema:name",
"description": "schema:description",
"commit": "schema:location",
"created": "schema:startDate",
"dataset": "schema:about",
"_id": "@id",
},
},
"same_as": {
"@id": "schema:sameAs",
"@context": {"schema": "http://schema.org/", "@version": 1.1, "url": "schema:url", "_id": "@id"},
},
"short_name": "schema:alternateName",
}
| StarcoderdataPython |
1676006 | import json
import logging
import os
import shutil
import socket
from os.path import join
from hacksport.operations import execute
from shell_manager.bundle import get_bundle, get_bundle_root
from shell_manager.util import (BUNDLE_ROOT, DEPLOYED_ROOT, get_problem,
get_problem_root, HACKSPORTS_ROOT, PROBLEM_ROOT,
STAGING_ROOT)
logger = logging.getLogger(__name__)
def get_all_problems():
""" Returns a dictionary of name:object mappings """
problems = {}
if os.path.isdir(PROBLEM_ROOT):
for name in os.listdir(PROBLEM_ROOT):
try:
problem = get_problem(get_problem_root(name, absolute=True))
problems[name] = problem
except FileNotFoundError as e:
pass
return problems
def get_all_bundles():
""" Returns a dictionary of name:object mappings """
bundles = {}
if os.path.isdir(BUNDLE_ROOT):
for name in os.listdir(BUNDLE_ROOT):
try:
bundle = get_bundle(get_bundle_root(name, absolute=True))
bundles[name] = bundle
except FileNotFoundError as e:
pass
return bundles
def get_all_problem_instances(problem_path):
""" Returns a list of instances for a given problem """
instances = []
instances_dir = join(DEPLOYED_ROOT, problem_path)
if os.path.isdir(instances_dir):
for name in os.listdir(instances_dir):
if name.endswith(".json"):
try:
instance = json.loads(
open(join(instances_dir, name)).read())
except Exception as e:
continue
instances.append(instance)
return instances
def publish(args, config):
""" Main entrypoint for publish """
problems = get_all_problems()
bundles = get_all_bundles()
output = {"problems": [], "bundles": []}
for path, problem in problems.items():
problem["instances"] = get_all_problem_instances(path)
problem["sanitized_name"] = path
output["problems"].append(problem)
for _, bundle in bundles.items():
output["bundles"].append(bundle)
print(json.dumps(output, indent=2))
def clean(args, config):
""" Main entrypoint for clean """
lock_file = join(HACKSPORTS_ROOT, "deploy.lock")
# remove staging directories
if os.path.isdir(STAGING_ROOT):
logger.info("Removing the staging directories")
shutil.rmtree(STAGING_ROOT)
# remove lock file
if os.path.isfile(lock_file):
logger.info("Removing the stale lock file")
os.remove(lock_file)
# TODO: potentially perform more cleaning
def status(args, config):
""" Main entrypoint for status """
bundles = get_all_bundles()
problems = get_all_problems()
def get_instance_status(instance):
status = {
"instance_number": instance["instance_number"],
"port": instance["port"] if "port" in instance else None,
"flag": instance["flag"]
}
status["connection"] = False
if "port" in instance:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", instance["port"]))
s.close()
status["connection"] = True
except ConnectionRefusedError as e:
pass
if instance["service"]:
result = execute(["systemctl", "is-failed", instance["service"]], allow_error=True)
else:
result = execute(["systemctl", "is-failed"], allow_error=True)
status["service"] = result.return_code == 1
if status["port"] is not None and not status["connection"]:
status["service"] = False
return status
def get_problem_status(path, problem):
problem_status = {"name": problem["name"]}
instances = get_all_problem_instances(path)
instance_statuses = []
for instance in instances:
instance_statuses.append(get_instance_status(instance))
problem_status["instances"] = instance_statuses
return problem_status
def print_problem_status(problem, path, prefix=""):
def pprint(string):
print("{}{}".format(prefix, string))
pprint("* [{}] {} ({})".format(
len(problem["instances"]), problem['name'], path))
if args.all:
for instance in problem["instances"]:
pprint(" - Instance {}".format(instance["instance_number"]))
pprint(" flag: {}".format(instance["flag"]))
pprint(" port: {}".format(instance["port"]))
pprint(" service: {}".format("active" if instance[
"service"] else "failed"))
pprint(" connection: {}".format("online" if instance[
"connection"] else "offline"))
def print_bundle(bundle, path, prefix=""):
def pprint(string):
print("{}{}".format(prefix, string))
pprint("[{} ({})]".format(bundle["name"], path))
for problem_path in bundle["problems"]:
problem = problems.get(problem_path, None)
if problem is None:
pprint(" ! Invalid problem '{}' !".format(problem_path))
continue
pprint(" {} ({})".format(problem['name'], problem_path))
def get_bundle_status(bundle):
problem_statuses = []
for problem_path in bundle["problems"]:
problem = problems.get(problem_path)
problem_statuses.append(get_problem_status(problem_path, problem))
bundle["problems"] = problem_statuses
return bundle
if args.problem is not None:
problem = problems.get(args.problem, None)
if problem is None:
print("Could not find problem \"{}\"".format(args.problem))
return
problem_status = get_problem_status(args.problem, problem)
if args.json:
print(json.dumps(problem_status, indent=4))
else:
print_problem_status(problem_status, args.problem, prefix="")
elif args.bundle is not None:
bundle = bundles.get(args.bundle, None)
if bundle is None:
print("Could not find bundle \"{}\"".format(args.bundle))
return
if args.json:
print(json.dumps(get_bundle_status(bundle), indent=4))
else:
print_bundle(bundle, args.bundle, prefix="")
else:
return_code = 0
if args.json:
result = {
"bundles":
bundles,
"problems":
list(
map(lambda tup: get_problem_status(*tup), problems.items()))
}
print(json.dumps(result, indent=4))
elif args.errors_only:
for path, problem in problems.items():
problem_status = get_problem_status(path, problem)
# Determine if any problem instance is offline
for instance_status in problem_status["instances"]:
if not instance_status["service"]:
print_problem_status(problem_status, path, prefix=" ")
return_code = 1
else:
print("** Installed Bundles [{}] **".format(len(bundles)))
shown_problems = []
for path, bundle in bundles.items():
print_bundle(bundle, path, prefix=" ")
print("** Installed Problems [{}] **".format(len(problems)))
for path, problem in problems.items():
problem_status = get_problem_status(path, problem)
# Determine if any problem instance is offline
for instance_status in problem_status["instances"]:
if not instance_status["service"]:
return_code = 1
print_problem_status(problem_status, path, prefix=" ")
if return_code != 0:
exit(return_code)
| StarcoderdataPython |
1737569 | <reponame>kagemeka/atcoder-submissions<filename>jp.atcoder/typical90/typical90_s/26013390.py
import sys
import typing
import numba as nb
import numpy as np
@nb.njit((nb.i8[:], ), cache=True)
def solve(a: np.ndarray) -> typing.NoReturn:
n = len(a)
inf = 1 << 60
dp = np.full((n, n), inf, np.int64)
for i in range(n - 1):
dp[i, i + 1] = np.abs(a[i] - a[i + 1])
for d in range(2, n):
for l in range(n - d):
r = l + d
dp[l, r] = dp[l + 1, r - 1] + np.abs(a[l] - a[r])
for m in range(l + 1, r - 1):
dp[l, r] = min(
dp[l, r],
dp[l, m] + dp[m + 1, r],
)
print(dp[0, -1])
def main() -> typing.NoReturn:
n = int(input())
a = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
solve(a)
main()
| StarcoderdataPython |
1742936 | """evaluate.py
Script to create a system response for a given gold standard and then compare
the system response to that gold standard.
USAGE:
$ python evaluate.py --run --gold DIR1 --system DIR2 [OPTIONS]
$ python evaluate.py --comp --gold DIR1 --system DIR2 [OPTIONS]
$ python evaluate.py --diff --gold DIR1 --system DIR2 --out DIR3 [OPTIONS]
In the first invocation, the script takes the gold standard files in DIR1 and
for each file creates a system file in DIR2 that does not have the gold standard
tags but the tags generated by the system. In the second invocation, the script
compares the system results to the gold standard and writes precision, recall
and f-score results to the standard output. In the third invocation, html files
showing the difference between files will be written to DIR3.
All files in the gold standard are expected to be TTK files. See the code in
utilities.convert for how to convert to the TTK format.
OPTIONS:
--limit INT
Caps the number of files processed from the directory. If no limit is
given all files will be processed.
--display=CHOICE1,CHOICE2,...
This determines what entities pairs are displayed. By default all entity
pairs from the gold and system tags are displayed: matches, partial
matches, false positives and false negatives. But if the --display option
is used then only the ones listed are displayed. Available choices are:
EXACT_MATCH, PARTIAL_MATCH, NO_MATCH_FP and NO_MATCH_FN. This option is
only relevant for the third invocation above. Example:
--display=PARTIAL_MATCH,NO_MATCH_FN
With this value only partial matches and false negatives are displayed.
"""
from __future__ import absolute_import
from __future__ import print_function
import os, sys, shutil, copy, getopt
from io import StringIO
from six.moves import range
sys.path.insert(0, '..')
sys.path.insert(0, '.')
from __future__ import division
import tarsqi
from library.main import LIBRARY
# Keep the directory this script was called from for later use (Tarsqi will
# change current directories while processing), also keep the directory of this
# script around.
EXEC_DIR = os.getcwd()
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
EVENT = LIBRARY.timeml.EVENT
TIMEX = LIBRARY.timeml.TIMEX
ALINK = LIBRARY.timeml.ALINK
SLINK = LIBRARY.timeml.SLINK
TLINK = LIBRARY.timeml.TLINK
LINK_TAGS = (ALINK, SLINK, TLINK)
TIMEML_TAGS = (EVENT, TIMEX, ALINK, SLINK, TLINK)
TID = LIBRARY.timeml.TID
EIID = LIBRARY.timeml.EIID
RELTYPE = LIBRARY.timeml.RELTYPE
TIME_ID = LIBRARY.timeml.TIME_ID
EVENT_INSTANCE_ID = LIBRARY.timeml.EVENT_INSTANCE_ID
RELATED_TO_TIME = LIBRARY.timeml.RELATED_TO_TIME
RELATED_TO_EVENT_INSTANCE = LIBRARY.timeml.RELATED_TO_EVENT_INSTANCE
SUBORDINATED_EVENT_INSTANCE = LIBRARY.timeml.SUBORDINATED_EVENT_INSTANCE
# the four kinds of aligned entities
EXACT_MATCH = 'EXACT_MATCH'
PARTIAL_MATCH = 'PARTIAL_MATCH'
NO_MATCH_FP = 'NO_MATCH_FP'
NO_MATCH_FN = 'NO_MATCH_FN'
DISPLAY_CHOICES = [EXACT_MATCH, PARTIAL_MATCH, NO_MATCH_FP, NO_MATCH_FN]
# style file used for the html display of differences
CSS = """
<style>
div { display: block }
p { margin: 5pt; margin-bottom: 15pt; padding: 5pt; }
table { margin-bottom: 25px; width: 100%; }
table.scores { margin: 10px; margin-bottom: 25px; width: auto; }
.bordered { border: thin dotted black; }
sup.s { color: darkred; font-weight: bold; }
sup.chunk { color: darkblue; font-weight: bold; }
sup.pos { color: darkblue; font-weight: bold; }
sup.lex { color: darkgreen; font-weight: bold; font-size: 60%; }
.bracket { color: darkblue; font-weight: bold; }
.sbracket { color: darkred; font-weight: bold; }
entity { color: darkred; text-decoration: underline; }")
</style>
"""
def create_system_files_from_gold_standard(gold_dir, system_dir, limit):
"""Take the TTK files in gold_dir and create TTK files in system_dir that have
the same text and docelement tags, do not have the other tarsqi tags from
the gold standard and have tags as added by the current system."""
print(system_dir)
if os.path.exists(system_dir):
exit("Error: directory %s already exists" % system_dir)
else:
os.makedirs(system_dir)
# get the absolute paths now because components may change the current directory
gold_dir = os.path.abspath(gold_dir)
system_dir = os.path.abspath(system_dir)
count = 0
for fname in os.listdir(gold_dir):
count += 1
if count > limit:
break
print(fname)
gold_file = os.path.join(gold_dir, fname)
system_file = os.path.join(system_dir, fname)
create_system_file_from_gold_standard(gold_file, system_file)
def create_system_file_from_gold_standard(gold_file, system_file):
"""Take gold_file, a TTK file, and create the TTK file system_file that has
the same text and docelement tags, does not have the other tarsqi tags from
the gold standard and has tags as added by the current system."""
# TODO: need to deal with the fact that with THYME we have a ttk version and
# we use source=ttk, but there really needs to be a metadata parser that
# does works for THYME documents. One option is to have the conversion find
# the DCT.
tarsqi_inst, tarsqidoc = tarsqi.load_ttk_document(gold_file)
# before you reset, keep the docelement tags so that we do not have to rerun
# the document parser
docelement_tags = [t for t in tarsqidoc.tags.all_tags() if t.name == 'docelement']
tarsqidoc.tags.reset()
for tag in docelement_tags:
tarsqidoc.tags.append(tag)
tarsqidoc.tags.index()
for (name, wrapper) in tarsqi_inst.pipeline:
tarsqi_inst._apply_component(name, wrapper, tarsqidoc)
tarsqidoc.print_all(system_file)
def compare_dirs(gold_dir, system_dir, limit=sys.maxsize):
"""Generate the precision, recall and f-score numbers for the directories."""
fstats = []
fnames = _collect_files(gold_dir, system_dir, limit)
for fname in fnames:
print(fname)
fstats.append(
FileStatistics(os.path.join(gold_dir, fname),
os.path.join(system_dir, fname)))
dstats = DirectoryStatistics(system_dir, fstats)
dstats.pp()
def view_differences(gold_dir, system_dir, display_dir, display_choices,
limit=sys.maxsize):
"""Create HTML files that view the differences."""
display_dir = _create_display_dir(display_dir)
fnames = _collect_files(gold_dir, system_dir, limit)
for fname in fnames:
print(fname)
FileStatistics(os.path.join(gold_dir, fname),
os.path.join(system_dir, fname),
display_dir, display_choices)
def _collect_files(gold_dir, system_dir, limit):
"""Return the list of files to run the comparison on."""
gold_files = os.listdir(gold_dir)
system_files = os.listdir(system_dir)
# don't assume the directory content is the same, take the intersection
fnames = sorted(list(set(gold_files).intersection(set(system_files))))
# TODO: includes a hack to avoid a file, get rid of it
fnames = [f for f in fnames[:limit] if not f.endswith('wsj_0907.tml')]
return fnames
def _create_display_dir(display_dir):
"""Create the display directory and initialize it with the icons needed for
the display."""
if display_dir is not None:
if not os.path.isabs(display_dir):
display_dir = os.path.abspath(os.path.join(EXEC_DIR, display_dir))
if os.path.exists(display_dir):
exit("ERROR: directory '%s' already exists" % display_dir)
else:
# setup the output directory
os.makedirs(display_dir)
os.makedirs(os.path.join(display_dir, 'icons'))
icons = ('check-green.png', 'check-orange.png', 'cross-red.png')
for icon in icons:
shutil.copyfile(os.path.join(SCRIPT_DIR, 'icons', icon),
os.path.join(display_dir, 'icons', icon))
return display_dir
def _get_annotations(tag_repository):
"""Return a dictionary of the TimeML annotations in the tag repository."""
# TODO: is there solid motivation to use this instead of TagRepository
# itself?
timeml_tags = (EVENT, TIMEX, ALINK, SLINK, TLINK)
annotations = { tagname: {} for tagname in timeml_tags }
event_idx = {}
timex_idx = {}
for tag in tag_repository.all_tags():
if tag.name == EVENT:
event_idx[tag.attrs[EIID]] = tag
elif tag.name == TIMEX:
timex_idx[tag.attrs[TID]] = tag
for tag in tag_repository.all_tags():
if tag.name in timeml_tags:
offsets = _get_offsets(tag, event_idx, timex_idx)
if offsets is not None:
annotations[tag.name][offsets] = tag.attrs
return annotations
def _get_offsets(tag, event_idx, timex_idx):
"""Get begin and end offsets for the tag. For an event or time, this is a pair
of offsets, for example (13,16). For a link, this is pair of the offsets of
the source and target of the link, for example ((13,16),(24,29))."""
if tag.name in LINK_TAGS:
id1, id1_type = tag.attrs.get(TIME_ID), TIMEX
if id1 is None:
saved = "%s-%s" % (id1, id1_type)
id1, id1_type = tag.attrs.get(EVENT_INSTANCE_ID), EVENT
id2, id2_type = tag.attrs.get(RELATED_TO_TIME), TIMEX
if id2 is None:
id2, id2_type = tag.attrs.get(RELATED_TO_EVENT_INSTANCE), EVENT
if id2 is None:
id2, id2_type = tag.attrs.get(SUBORDINATED_EVENT_INSTANCE), EVENT
offsets = [_retrieve_from_index(id1, id1_type, event_idx, timex_idx),
_retrieve_from_index(id2, id2_type, event_idx, timex_idx)]
if len(offsets) != 2:
_offset_warning("unexpected offsets", tag, offsets)
return None
elif offsets[0][0] is None or offsets[1][0] is None:
_offset_warning("cannot find source and/or target", tag, offsets)
return None
else:
return tuple(offsets)
else:
return (tag.begin, tag.end)
def _retrieve_from_index(identifier, tagtype, event_idx, timex_idx):
idx = event_idx if tagtype == EVENT else timex_idx
try:
return (idx[identifier].begin, idx[identifier].end)
except KeyError:
return (None, None)
def precision(tp, fp):
try:
return (tp / (tp + fp))
except ZeroDivisionError:
return None
def recall(tp, fn):
try:
return tp / (tp + fn)
except ZeroDivisionError:
return None
def fscore(tp, fp, fn):
p = precision(tp, fp)
r = recall(tp, fn)
if p is None or r is None:
return None
try:
return (2 * p * r) / (p + r)
except ZeroDivisionError:
return None
def _as_float_string(f):
"""Takes a floating point number and returns it as a formatted string"""
return "%.2f" % f if f is not None else 'nil'
def _offset_warning(message, tag, offsets):
print("WARNING: %s" % message)
print(" %s" % offsets)
print(" %s" % tag.as_ttk_tag())
def print_annotations(annotations, tag=None):
for tagname in sorted(annotations):
if tag is not None and tag != tagname:
continue
print("\n", tagname)
for offsets in sorted(annotations[tagname]):
attrs = annotations[tagname][offsets].items()
attrs_str = ' '.join(["%s=%s" % (a,v) for a,v in attrs])
print(" %s %s" % (offsets, attrs_str))
class FileStatistics(object):
def __init__(self, gold_file, system_file,
display_dir=None, display_choices=None):
tarsqi_instance, tarsqi_doc = tarsqi.load_ttk_document(gold_file)
self.tarsqidoc_gold = tarsqi_doc
tarsqi_instance, tarsqi_doc = tarsqi.load_ttk_document(system_file)
self.tarsqidoc_system = tarsqi_doc
self.filename = system_file
self.gold = _get_annotations(self.tarsqidoc_gold.tags)
self.system = _get_annotations(self.tarsqidoc_system.tags)
self.events = EntityStatistics(self, EVENT, display_dir, display_choices)
self.timexes = EntityStatistics(self, TIMEX, display_dir, display_choices)
self.alinks = LinkStatistics(self.filename, ALINK, self.gold, self.system)
self.slinks = LinkStatistics(self.filename, SLINK, self.gold, self.system)
self.tlinks = LinkStatistics(self.filename, TLINK, self.gold, self.system)
def __str__(self):
return "%s\n%s\n%s\n%s\n%s" % (self.events, self.timexes,
self.alinks, self.slinks, self.tlinks)
class DirectoryStatistics(FileStatistics):
def __init__(self, directory, statslist):
self.filename = directory
self.statistics = statslist
self.events = AggregateEntityStatistics(directory, [s.events for s in statslist])
self.timexes = AggregateEntityStatistics(directory, [s.timexes for s in statslist])
self.alinks = AggregateLinkStatistics(directory, [s.alinks for s in statslist])
self.slinks = AggregateLinkStatistics(directory, [s.slinks for s in statslist])
self.tlinks = AggregateLinkStatistics(directory, [s.tlinks for s in statslist])
def __str__(self):
return "%s\n%s\n%s\n%s\n%s" % (
self.events, self.timexes, self.alinks, self.slinks, self.tlinks)
def pp(self):
print("\n%s\n" % self)
class EntityStatistics(object):
def __init__(self, file_statistics, tagname, display_dir, display_choices):
self.filename = file_statistics.filename
self.tagname = tagname
self.tarsqidoc_gold = file_statistics.tarsqidoc_gold
self.tarsqidoc_system = file_statistics.tarsqidoc_system
self.gold_tags = file_statistics.gold[self.tagname]
self.system_tags = file_statistics.system[self.tagname]
self.tp = 0
self.fp = 0
self.fn = 0
self._collect_counts()
# the following code presents the differences between the gold and the
# system, the underlying counting should probably be used for the P&R as
# well (allowing strict versus relaxed matching, whereas the above only
# has strict matching).
if display_dir is not None:
Viewer(self, display_dir, display_choices)
def __str__(self):
return "<Statistics %s %s tp:%s fp:%s fn:%s precision=%s recall=%s f-score=%s>" % \
(self.tagname, self.filename, self.tp, self.fp, self.fn,
_as_float_string(self.precision()),
_as_float_string(self.recall()),
_as_float_string(self.fscore()))
def precision(self):
return precision(self.tp, self.fp)
def recall(self):
return recall(self.tp, self.fn)
def fscore(self):
return fscore(self.tp, self.fp, self.fn)
def _collect_counts(self):
"""Collect the counts for true positives, false positives and false
negatives."""
# TODO. This does not take the full-range into account and therefore
# gives much lower numbers for cases where multi-token events were
# imported. It also does not allow for relaxed matching.
for t in self.system_tags.keys():
if t in self.gold_tags:
self.tp += 1
else:
self.fp += 1
for t in self.gold_tags.keys():
if t not in self.system_tags:
self.fn += 1
class LinkStatistics(object):
def __init__(self, filename, tagname, gold_annotations, system_annotations):
self.filename = filename
self.tagname = tagname
self.gold_tags = gold_annotations[tagname]
self.system_tags = system_annotations[tagname]
self.overlap = self._overlap(self.gold_tags, self.system_tags)
self.correct = 0
self.incorrect = 0
for offset in self.overlap:
if self.gold_tags[offset][RELTYPE] == self.system_tags[offset][RELTYPE]:
self.correct += 1
else:
self.incorrect += 1
def __str__(self):
accuracy = self.accuracy()
astring = "nil" if accuracy is None else "%.2f" % accuracy
return "<Statistics %s %s correct:%s incorrect:%s accuracy:%s>" % \
(self.tagname, self.filename, self.correct, self.incorrect, astring)
@staticmethod
def _overlap(annotations1, annotations2):
"""Now just gets the keys that both have in common, should include links where
source and target are reversed."""
return [val for val in annotations1 if val in annotations2]
def accuracy(self):
try:
return self.correct / (self.correct + self.incorrect)
except ZeroDivisionError:
return None
class AggregateEntityStatistics(EntityStatistics):
def __init__(self, directory, statistics_list):
self.tagname = statistics_list[0].tagname
self.filename = directory
self.statistics = statistics_list
self.tp = sum([stats.tp for stats in statistics_list])
self.fp = sum([stats.fp for stats in statistics_list])
self.fn = sum([stats.fn for stats in statistics_list])
class AggregateLinkStatistics(LinkStatistics):
def __init__(self, directory, statistics_list):
self.tagname = statistics_list[0].tagname
self.filename = directory
self.statistics = statistics_list
self.correct = sum([stats.correct for stats in statistics_list])
self.incorrect = sum([stats.incorrect for stats in statistics_list])
class Viewer(object):
"""Creates the HTML files that show the differences between the entities in
two files."""
def __init__(self, entity_statistics, display_dir, display_choices):
"""Take the data from the EntityStatistics instance (which got most of those
from the FileStatistics instance)."""
self.entity_stats = entity_statistics
self.filename = entity_statistics.filename
self.tagname = entity_statistics.tagname
self.tarsqidoc_gold = entity_statistics.tarsqidoc_gold
self.tarsqidoc_system = entity_statistics.tarsqidoc_system
self.gold_tags = entity_statistics.gold_tags
self.system_tags = entity_statistics.system_tags
self.display_dir = display_dir
self.display_choices = display_choices
self._build_idxs()
self._align_tags()
self._display_aligned_tags()
def _build_idxs(self):
"""Builds indexes that store the begin and end offset of s, ng and vg
tags. In addition, it stores the end offset of a lex tag and the lex
tag's associated pos."""
self.open_idx = { 's': set(), 'ng': set(), 'vg': set() }
self.close_idx = { 's': set(), 'ng': set(), 'vg': set(), 'lex': {} }
s_tags = self.tarsqidoc_system.tags.find_tags('s')
vg_tags = self.tarsqidoc_system.tags.find_tags('vg')
ng_tags = self.tarsqidoc_system.tags.find_tags('ng')
lex_tags = self.tarsqidoc_system.tags.find_tags('lex')
open_idx = { 's': set(), 'ng': set(), 'vg': set() }
close_idx = { 's': set(), 'ng': set(), 'vg': set(), 'lex': {} }
self._update_idxs(s_tags, 's')
self._update_idxs(ng_tags, 'ng')
self._update_idxs(vg_tags, 'vg')
for lex in lex_tags:
self.close_idx['lex'][lex.end] = lex.attrs['pos']
def _update_idxs(self, tags, tagname):
for t in tags:
self.open_idx[tagname].add(t.begin)
self.close_idx[tagname].add(t.end)
def _align_tags(self):
"""Takes two lists of annotations ordered on text position and returns
them as lists of paired up annotations. Annotations will only pair up if
they overlap, if a gold or system annotation does not overlap with a
counterpart on the other side then it will be in a pair with None."""
gold = [EntityAnnotation(k, v) for k, v in self.gold_tags.items()]
system = [EntityAnnotation(k, v) for k, v in self.system_tags.items()]
# Removing duplicates also sorts the annotations
gold = self._remove_duplicates(gold)
system = self._remove_duplicates(system)
self.alignments = []
while gold or system:
if not gold:
self.alignments.append(Alignment(self, None, system.pop(0)))
elif not system:
self.alignments.append(Alignment(self, gold.pop(0), None))
elif gold[0].overlaps_with(system[0]):
self.alignments.append(Alignment(self, gold.pop(0), system.pop(0)))
elif gold[0].end < system[0].begin:
self.alignments.append(Alignment(self, gold.pop(0), None))
elif gold[0].begin > system[0].end:
self.alignments.append(Alignment(self, None, system.pop(0)))
else:
exit("ERROR: no option available, infinite loop starting...")
@staticmethod
def _remove_duplicates(annotations):
"""This is to remove duplicates from the annotations. The reason why
this was put in is that with tag import there are cases when an imported
tag spans two chunks and it will be imported into each chunk. This needs
to be fixed in the tag import of course, but in th emean time we do not
want it dilute results here. The result is sorted on text position."""
tmp = {}
for annotation in sorted(annotations):
tmp[annotation.offsets()] = annotation
return sorted(tmp.values())
def _display_aligned_tags(self):
# NOTE: when we run this we are in the ttk directory, even though we
# started in the testing subdirectory, adjust paths as needed
fname = os.path.join(self.display_dir, os.path.basename(self.filename))
fh = open("%s.%s.html" % (fname, self.tagname), 'w')
fh.write("<html>\n<head>%s</head>\n\n" % CSS)
fh.write("<body class=scores>\n\n")
fh.write("<h2>Precision and recall on this file</h2>\n\n")
self._display_p_and_r(fh)
fh.write("<h2>Aligning the key and response %s tags</h2>\n\n" % self.tagname)
self._display_legend(fh)
for alignment in self.alignments:
if self.display_choices[alignment.status]:
alignment.html(fh)
fh.write("</body>\n</html>\n")
def _display_p_and_r(self, fh):
stats = self.entity_stats
# P&R as calculated on the EntityStatistics
p1, r1, f1 = stats.precision(), stats.recall(), stats.fscore()
# P&R as calculated here, which uses the alignments array which takes
# into account the full-range attribute, so it gets much higher results
# for cases when we impoerted tags.
tp, fp, fn = self._count_matches(strict=True)
p2, r2, f2 = precision(tp, fp), recall(tp, fn), fscore(tp, fp, fn)
tp, fp, fn = self._count_matches(strict=False)
p3, r3, f3 = precision(tp, fp), recall(tp, fn), fscore(tp, fp, fn)
self._p_and_r_table(fh, ('strict', 'relaxed'), (p2, p3), (r2, r3), (f2, f3))
def _count_matches(self, strict=True):
tp, fp, fn = 0, 0, 0
for alignment in self.alignments:
if alignment.status == NO_MATCH_FP:
fp += 1
elif alignment.status == NO_MATCH_FN:
fn += 1
elif alignment.status == PARTIAL_MATCH:
if strict:
fp += 1
fn += 1
else:
tp += 1
elif alignment.status == EXACT_MATCH:
tp += 1
return (tp, fp, fn)
def _p_and_r_table(self, fh, headers, p_scores, r_scores, f_scores):
fh.write("<table class=scores cellpadding=8 cellspacing=0 border=1>\n")
nbsp, p_str, r_str, f_str = ' ', 'precision', 'recall', 'f-score'
HTML.row(fh, [nbsp] + list(headers))
HTML.row(fh, [p_str] + [ _as_float_string(p) for p in p_scores])
HTML.row(fh, [r_str] + [ _as_float_string(r) for r in r_scores])
HTML.row(fh, [f_str] + [ _as_float_string(f) for f in f_scores])
fh.write("</table>\n\n")
def _display_legend(self, fh):
def img(src): return '<img src="icons/%s.png" height=20>' % src
fh.write("<table class=scores cellpadding=8 cellspacing=0 border=1>\n")
em = len([a for a in self.alignments if a.status == EXACT_MATCH])
pm = len([a for a in self.alignments if a.status == PARTIAL_MATCH])
fp = len([a for a in self.alignments if a.status == NO_MATCH_FP])
fn = len([a for a in self.alignments if a.status == NO_MATCH_FN])
HTML.row(fh, [img("check-green"), 'exact match', em])
HTML.row(fh, [img("check-orange"), 'partial match', pm])
HTML.row(fh, [img('cross-red') + 'p',
'mismatch, false positive (precision error)', fp])
HTML.row(fh, [img('cross-red') + 'r',
'mismatch, false negative (recall error)', fn])
fh.write("</table>\n")
icons = { EXACT_MATCH: img('check-green'),
PARTIAL_MATCH: img('check-orange'),
NO_MATCH_FP: img('cross-red') + 'p',
NO_MATCH_FN: img('cross-red') + 'r' }
showing = [icons[choice]
for choice in DISPLAY_CHOICES
if self.display_choices[choice] is True]
fh.write("<p class=bordered>Showing: %s</p>\n"
% ' '.join(showing))
class EntityAnnotation(object):
"""Simple interface for an entity annotation."""
def __init__(self, offsets, attrs):
self.begin = offsets[0]
self.end = offsets[1]
# we keep these around so we can use them for sorting
self.begin_head = self.begin
self.end_head = self.end
self.attrs = attrs
full_range = self.attrs.get('full-range')
if full_range is not None:
begin, end = full_range.split('-')
self.begin = int(begin)
self.end = int(end)
self.tarsqidoc = None # filled in later by the Alignment instance
def __str__(self):
return "<EntityAnnotation %s:%s %s>" % (self.begin, self.end, self.attrs)
def __eq__(self, other):
return (self.begin == other.begin) \
and (self.end == other.end) \
and (self.begin_head == other.begin_head)
def __ne__(self, other):
return (self.begin != other.begin) \
or (self.end != other.end) \
or (self.begin_head != other.begin_head)
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def _compare(self, other):
# TODO: revisit this later, it is Python3 compliant, but that's about
# the best you can say
def comp(x, y):
return (x > y) - (x < y)
begin_comp = comp(self.begin, other.begin)
if begin_comp != 0:
return begin_comp
end_comp = comp(self.end, other.end)
if end_comp != 0:
return end_comp
return comp(self.begin_head, other.begin_head)
def overlaps_with(self, other):
return not (self.end <= other.begin or other.end <= self.begin)
def has_same_span(self, other):
return self.begin == other.begin and self.end == other.end
def offsets(self):
return (self.begin, self.end)
class Alignment(object):
def __init__(self, entitystats, gold_annotation, system_annotation):
self.tarsqidoc_gold = entitystats.tarsqidoc_gold
self.tarsqidoc_system = entitystats.tarsqidoc_system
self.gold_annotation = gold_annotation
self.system_annotation = system_annotation
self.open_idx = entitystats.open_idx
self.close_idx = entitystats.close_idx
if gold_annotation is not None:
self.gold_annotation.tarsqidoc = self.tarsqidoc_gold
if system_annotation is not None:
self.system_annotation.tarsqidoc = self.tarsqidoc_system
if self.gold_annotation is None:
self.status = NO_MATCH_FP
elif self.system_annotation is None:
self.status = NO_MATCH_FN
elif self.gold_annotation.has_same_span(self.system_annotation):
self.status = EXACT_MATCH
else:
self.status = PARTIAL_MATCH
def html(self, fh):
def oneliner(text):
return ' '.join(text.strip().split())
image = self._get_status_image()
p1, p2, text_span = self._get_span()
span1 = self._get_span_with_entity(p1, text_span, self.gold_annotation)
span2 = self._get_span_with_entity(p1, text_span, self.system_annotation)
text = text_span.replace("\n", "<br/>")
tagged_fragment = self._get_tagged_fragment(p1, p2, text_span)
fh.write("<table cellpadding=5 cellspacing=4>\n\n")
fh.write("<tr>\n")
fh.write(" <td valign=top width=40>%s</td>\n" % image)
fh.write(" <td class=bordered>\n")
fh.write(" <span class=entity_span><i>%s:%s</i></span><br/>\n" % (p1, p2))
fh.write(" <span class=entity_span>%s</span><br/>\n" % oneliner(span1))
fh.write(" <span class=entity_span>%s</span>\n" % oneliner(span2))
fh.write(" </td>\n")
fh.write("</tr>\n\n")
fh.write("<tr>\n")
fh.write(" <td valign=top> </td>\n")
fh.write(" <td class=bordered>%s</td>\n" % text)
fh.write("</tr>\n\n")
fh.write("<tr>\n")
fh.write(" <td valign=top> </td>\n")
fh.write(" <td class=bordered>%s</td>\n" % tagged_fragment)
fh.write("</tr>\n\n")
fh.write("</table>\n\n")
def _get_status_image(self):
if self.status == EXACT_MATCH:
return '<img src="icons/check-green.png" height=20>'
elif self.status == PARTIAL_MATCH:
return '<img src="icons/check-orange.png" height=20>'
elif self.status == NO_MATCH_FP:
return '<img src="icons/cross-red.png" height=20>p'
elif self.status == NO_MATCH_FN:
return '<img src="icons/cross-red.png" height=20>r'
def _get_span(self):
offsets = []
for annotation in self.gold_annotation, self.system_annotation:
if annotation is not None:
offsets.extend([annotation.begin, annotation.end])
offsets.sort()
span_begin = offsets[0] - 50
span_end = offsets[-1] + 50
if span_begin < 0:
span_begin = 0
if span_end > len(self.tarsqidoc_gold.sourcedoc.text):
span_end = len(self.tarsqidoc_gold.sourcedoc.text) -1
return (span_begin, span_end,
self.tarsqidoc_gold.sourcedoc[span_begin:span_end])
def _get_span_with_entity(self, p1, text_span, annotation):
if annotation is None:
return text_span
else:
a1 = annotation.begin - p1
a2 = annotation.end - p1
return "%s<entity>%s</entity>%s" \
% (text_span[:a1], text_span[a1:a2], text_span[a2:])
def _get_tagged_fragment(self, p1, p2, text):
def tag(cl, text): return "<sup class=%s>%s</sup>" % (cl, text)
def brc(cl, bracket): return "<span class=%s>%s</span>" % (cl, bracket)
output = StringIO()
for i in range(0, p2-p1):
i_adjusted = i + p1
if i_adjusted in self.open_idx['s']:
output.write('%s%s' % (tag('s', 's'), brc('sbracket', '[')))
if i_adjusted in self.open_idx['ng']:
output.write('%s%s' % (tag('chunk', 'ng'), brc('bracket', '[')))
if i_adjusted in self.open_idx['vg']:
output.write('%s%s' % (tag('chunk', 'vg'), brc('bracket', '[')))
output.write(text[i])
if i_adjusted + 1 in self.close_idx['lex']:
output.write(tag('lex', self.close_idx['lex'][i_adjusted + 1]))
if i_adjusted + 1 in self.close_idx['ng']:
output.write('%s%s' % (brc('bracket', ']'), tag('chunk', 'ng')))
if i_adjusted + 1 in self.close_idx['vg']:
output.write('%s%s' % (brc('bracket', ']'), tag('chunk', 'vg')))
if i_adjusted + 1 in self.close_idx['s']:
output.write('%s%s' % (brc('sbracket', ']'), tag('s', 's')))
return output.getvalue()
class HTML(object):
"""Utility class for printing HTML to a file handle."""
@classmethod
def row(self, fh, elements):
fh.write("<tr>\n")
for e in elements:
align = ' align=right' if isinstance(e, int) else ''
fh.write(" <td%s>%s\n" % (align, e))
fh.write("</tr>\n")
if __name__ == '__main__':
options = ['run', 'comp' , 'diff',
'gold=', 'system=', 'out=', 'display=', 'limit=']
(opts, args) = getopt.getopt(sys.argv[1:], '', options)
opts = { k:v for k,v in opts }
gold = os.path.abspath(opts.get('--gold'))
system = os.path.abspath(opts.get('--system'))
limit = int(opts.get('--limit', sys.maxsize))
out = opts.get('--out')
display = opts.get('--display')
display_categories = [EXACT_MATCH, PARTIAL_MATCH, NO_MATCH_FP, NO_MATCH_FN]
if display is None:
display_choices = { c:True for c in display_categories }
else:
display_choices = { c:False for c in display_categories }
for choice in display.split(','):
display_choices[choice] = True
if '--run' in opts:
create_system_files_from_gold_standard(gold, system, limit)
elif '--comp' in opts:
compare_dirs(gold, system, limit)
elif '--diff' in opts:
view_differences(gold, system, out, display_choices, limit)
| StarcoderdataPython |
3215456 | <filename>lib/ansible/modules/cloud/alicloud/_ali_eni_facts.py
#!/usr/bin/python
# Copyright (c) 2017 Alibaba Group Holding Limited. <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ali_eni_facts
short_description: Gather facts about ENI interfaces in Alibaba Cloud
description:
- Gather facts about ENI interfaces in Alibaba Cloud
version_added: "2.8.0"
options:
eni_ids:
description:
- A list of ENI IDs that exist in your account.
aliases: ['ids']
name_prefix:
description:
- Use a name prefix to filter network interfaces.
tags:
description:
- A hash/dictionaries of network interface tags. C({"key":"value"})
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/58512.htm) for parameter details.
Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dashes ("-") to
connect different words in one parameter. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using 'tags' instead.
'NetworkInterfaceId.N' will be appended to I(eni_ids) automatically.
author:
- "<NAME> (@xiaozhu36)"
requirements:
- "python >= 2.6"
- "footmark >= 1.8.0"
extends_documentation_fragment:
- alicloud
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the Alibaba Cloud Guide for details.
# Gather facts about all ENIs
- ali_eni_facts:
# Gather facts about a particular ENI
- ali_eni_facts:
eni_ids:
- eni-xxxxxxx
- eni-yyyyyyy
filters:
type: Secondary
# Gather facts about a particular ENI
- ali_eni_facts:
filters:
network_interface_name: my-test-eni
type: Secondary
# Gather facts based on vpc and name_prefix
- ali_eni_facts:
name_prefix: foo
filters:
vswitch_id: vpc-dsfh2ef2
'''
RETURN = '''
interfaces:
description: List of matching elastic network interfaces
returned: always
type: complex
contains:
associated_public_ip:
description: The public IP address associated with the ENI.
type: string
sample: 172.16.58.3
zone_id:
description: The availability zone of the ENI is in.
returned: always
type: string
sample: cn-beijing-a
name:
description: interface name
type: string
sample: my-eni
creation_time:
description: The time the eni was created.
returned: always
type: string
sample: "2018-06-25T04:08Z"
description:
description: interface description
type: string
sample: My new network interface
security_groups:
description: list of security group ids
type: list
sample: [ "sg-f8a8a9da", "sg-xxxxxx" ]
network_interface_id:
description: network interface id
type: string
sample: "eni-123456"
id:
description: network interface id (alias for network_interface_id)
type: string
sample: "eni-123456"
instance_id:
description: Attached instance id
type: string
sample: "i-123456"
mac_address:
description: interface's physical address
type: string
sample: "00:00:5E:00:53:23"
private_ip_address:
description: primary ip address of this interface
type: string
sample: 10.20.30.40
private_ip_addresses:
description: list of all private ip addresses associated to this interface
type: list of dictionaries
sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
state:
description: network interface status
type: string
sample: "pending"
vswitch_id:
description: which vswitch the interface is bound
type: string
sample: vsw-b33i43f3
vpc_id:
description: which vpc this network interface is bound
type: string
sample: vpc-cj3ht4ogn
type:
description: type of the ENI
type: string
sample: Secondary
tags:
description: Any tags assigned to the ENI.
returned: always
type: dict
sample: {}
ids:
description: List of elastic network interface IDs
returned: always
type: list
sample: [eni-12345er, eni-3245fs]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
HAS_FOOTMARK = False
try:
from footmark.exception import ECSResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
eni_ids=dict(type='list', aliases=['ids']),
name_prefix=dict(),
tags=dict(type='dict'),
filters=dict(type='dict'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg='footmark required for the module ali_eni_facts')
interfaces = []
ids = []
filters = module.params["filters"]
if not filters:
filters = {}
eni_ids = module.params["eni_ids"]
if not eni_ids:
eni_ids = []
for key, value in list(filters.items()):
if str(key).startswith("NetworkInterfaceId") or \
str(key).startswith("network_interface_id") or \
str(key).startswith("network-interface-id"):
if value not in eni_ids:
eni_ids.append(value)
if eni_ids:
filters['network_interface_ids'] = eni_ids
name_prefix = module.params["name_prefix"]
if module.params['tags']:
filters['tags'] = module.params['tags']
try:
for eni in ecs_connect(module).describe_network_interfaces(**filters):
if name_prefix and not str(eni.name).startswith(name_prefix):
continue
interfaces.append(eni.read())
ids.append(eni.id)
module.exit_json(changed=False, ids=ids, interfaces=interfaces)
except Exception as e:
module.fail_json(msg=str("Unable to get network interfaces, error:{0}".format(e)))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3291124 | <filename>small/GCD.py
#calcolare il massimo comune divisore alcuni numeri
import sys
def Split(str, a):
if len(a)== 1: return str.split(a[0])
else: return Split(a[1].join(str.split(a[0])), a[1:])
def MCD(primo_numero, secondo_numero):
if primo_numero % secondo_numero== 0: return secondo_numero
else: return MCD(secondo_numero, primo_numero % secondo_numero)
stringa_numeri= input('Enter the numbers (let a space or a comma between the numbers): ')
numeri= set(Split(stringa_numeri, (' ',',')))
for i in numeri:
numeri.remove(i)
numeri.add(int(i))
if 0 in numeri:
numeri.remove(0)
if len(numeri) == 0: sys.exit('The GCD is 0')
cnumeri= numeri.copy()
primo_numero=cnumeri.pop()
if len(numeri)== 0:
print('The GCD is', primo_numero)
quit()
while cnumeri != set():
secondo_numero = cnumeri.pop()
primo_numero= MCD(primo_numero, secondo_numero)
print('The Greatest Common Divisor is', primo_numero)
| StarcoderdataPython |
3284157 | import model
import dataclasses
import asyncio
import struct
import typing
import ujson
import time
@dataclasses.dataclass
class Response:
"""The format of a Minecraft server response packet."""
version_name: str
version_protocol: int
player_max: int
players_online: int
sample: list[dict[str, str]] | None
motd: str
favicon: str
# Forge-specific?
modded_type: str | None
mods: list[dict[str, str]] | None
@classmethod
def transform(cls, data: dict) -> "Response":
"""Transform a raw response into an instance of `Response`."""
description = data["description"]
if isinstance(description, dict):
motd = description.get("text", "")
motd += "".join(e["text"] for e in description.get("extra", []))
else:
motd = description
player_metadata = data["players"]
sample = player_metadata.get("sample", None)
favicon = data.get("favicon", None)
mod_metadata = data.get("modinfo", None)
modded = mod_metadata["type"] if mod_metadata is not None else None
mods = mod_metadata["modList"] if mod_metadata is not None else None
return cls(
version_name=data["version"]["name"],
version_protocol=data["version"]["protocol"],
player_max=data["players"]["max"],
players_online=data["players"]["online"],
sample=sample,
motd=motd,
favicon=favicon,
modded_type=modded,
mods=mods
)
class Packet:
@classmethod
def string(cls, characters: str) -> bytes:
"""Convert a string into a series of bytes, prefixed with a `VarInt` denoting its length."""
length = VarInt.encode(len(characters))
return length + characters.encode("utf-8")
@classmethod
def encapsulate(cls, packet_id: bytes, data: bytes) -> bytes:
"""Encapsulate `data` into a Minecraft packet."""
combined = packet_id + data
length = VarInt.encode(len(combined))
return length + combined
@classmethod
def handshake(cls, address: str, port: int) -> bytes:
"""Create a Minecraft handshake packet."""
protocol_version = VarInt.encode(757)
server_address = cls.string(address)
server_port = struct.pack("!H", port)
next_state = VarInt.encode(1)
data = protocol_version + server_address + server_port + next_state
return Packet.encapsulate(b"\x00", data)
@classmethod
def request(cls) -> bytes:
"""Create a Minecraft request packet."""
return Packet.encapsulate(b"\x00", b"")
@classmethod
def ping(cls) -> bytes:
"""Create a Minecraft ping packet. Contains current UNIX timestamp."""
unix_time = time.time_ns() // 1000000
converted = struct.pack("!Q", unix_time)
return Packet.encapsulate(b"\x01", converted)
class VarInt:
@classmethod
def encode(cls, value: int) -> bytes:
"""Create an variable-length encoded integer from a Python integer."""
transformed = bytes()
while (value & ~0x7F) != 0:
# If there are set bits higher from bit 7 onwards (zero-indexed),
# take the last 7 bits and set bit 7 to indicate more bits are needed.
transformed += ((value & 0x7F) | 0x80).to_bytes(1, "little")
value >>= 7
# Afterwards, whatever is left in the lower 7 bits is written as-is.
transformed += value.to_bytes(1, "little")
return transformed
@classmethod
def decode(cls, bytestream: bytes, *, count: bool=False) -> typing.Union[tuple[int, int], int]:
"""Create a Python integer from a variable-length encoded integer."""
value = 0
bytecount = 0
for index, byte in enumerate(bytestream):
value |= (byte & 0x7F) << (index * 7)
if (byte & 0x80) != 0x80:
bytecount = index + 1
break
return (value, bytecount) if count else value
class Backend:
@classmethod
async def ping(cls, address: str, port: int) -> Response:
"""Use Minecraft's Server List Ping feature to ping `address`."""
reader, writer = await asyncio.open_connection(address, port)
# Send a Handshake packet.
handshake = Packet.handshake(address, port)
writer.write(handshake)
await writer.drain()
# Send a Request packet.
request = Packet.encapsulate(b"\x00", b"")
writer.write(request)
await writer.drain()
# Send a Ping packet so the server returns its response faster.
# The server seems to just wait for a timeout otherwise.
ping = Packet.ping()
writer.write(ping)
await writer.drain()
# Await the server's response and then close the connection.
response = await reader.read()
writer.close()
await writer.wait_closed()
response_len, offset_one = VarInt.decode(response, count=True)
response_id, offset_two = VarInt.decode(response[offset_one:], count=True)
json_len, offset_three = VarInt.decode(response[offset_one + offset_two:], count=True)
total_offset = offset_one + offset_two + offset_three
json_data = response[total_offset:total_offset + json_len]
json_data = ujson.loads(json_data)
return Response.transform(json_data)
def setup(bot: model.Bakerbot) -> None:
pass
| StarcoderdataPython |
1624627 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2018-05-06 09:25
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('rango', '0012_remove_photo_location'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='tag',
),
migrations.AddField(
model_name='photo',
name='tag',
field=models.ForeignKey(to='rango.Tag',
on_delete=django.db.models.deletion.CASCADE),
preserve_default=False,
),
]
| StarcoderdataPython |
4822243 | # Django settings for expression_data project.
try:
from localsettings import *
except ImportError:
pass
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'expression_data.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'expression_data.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'data/templates',
'experiments/templates',
'genes/templates',
'reserachers/templates',
'expression_data/templates',
)
AUTH_PROFILE_MODULE = 'researchers.Researcher'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'braces',
'south',
'data',
'experiments',
'genes',
'researchers'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| StarcoderdataPython |
4836233 | #!/usr/bin/python3
import re
from app.views import debug, PARSER_DEBUG
PARSER_DEBUG = False
NMAP_PORTS = re.compile(".*Ports:\s")
NMAP_TAB = re.compile("\t")
NMAP_HOST = re.compile("Host:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+\((.*?)\)")
# Parses nmap greppable format in an object
class NMService():
def __init__(self, line = None):
self.port = None
self.state = None
self.protocol = None
self.owner = None
self.name = None
self.rpc_info = None
self.version = None
if not (line is None):
temp_array = line.split("/")
#debug("Service Array size:"+str(len(temp_array))+"\n")
if len(temp_array) > 6:
self.port = temp_array[0]
self.state = temp_array[1]
self.protocol = temp_array[2]
self.owner = temp_array[3]
self.name = temp_array[4]
self.rpc_info = temp_array[5]
self.version = temp_array[6]
else:
debug("Error parsing services in nmap Format, received:"+str(len(temp_array))+"/7 for line: "+str(line)+"\n")
def __str__(self):
return self.port.lower()+":"+self.protocol.lower()+":"+self.name.lower()
def getList(self):
return {'port':self.port, 'state':self.state, 'protocol':self.protocol, 'owner':self.owner, 'name':self.name, 'info':self.rpc_info, 'version':self.version}
def match(self, Other):
if Other.port != self.port:
return False
if Other.state != self.state:
return False
if Other.protocol != self.protocol:
return False
if Other.owner != self.owner:
return False
if Other.name != self.name:
return False
if Other.rpc_info != self.rpc_info:
return False
if Other.version != self.version:
return False
return True
# Similar NMService, includes multiple service objects
class NMHost:
def __init__(self, *args):
self.line = None
self.name = None
if (len(args)>=1):
self.line = args[0]
if (len(args)>=2):
self.name = args[1]
self.services = None
self.info = ""
self.ipv4 = ""
self.nname = ""
self.full_ports = "" #same as info
if (self.line is not None):
#First TAB contains the Hostname and IP address
Finding = NMAP_TAB.split(self.line)
debug(Finding)
Host = NMAP_HOST.match(Finding[0])
self.nname = Host.group(2).strip()
self.ipv4 = Host.group(1)
if self.name is None:
if self.nname != "":
self.name = self.nname
else:
self.name = self.ipv4
Finding = NMAP_PORTS.split(Finding[1])
#Ignored States have another TAB
Finding = NMAP_TAB.split(Finding[1])
RPorts = Finding[0].rstrip('\n')
if len(RPorts)>1:
self.info = RPorts
self.full_ports = RPorts
temp_array = self.info.split(", ")
self.services = []
debug("Data in temp_array:"+str(temp_array)+"\n")
for service in temp_array:
SOBJ = NMService(service)
if SOBJ.name is not None:
self.services.append(SOBJ)
else:
debug("Parsing the service line returns an empty object, skipping:"+service+"\n")
else:
debug("In Parsing the host line, we got an empty line, skipping:"+str(self.line)+":"+str(len(args))+"\n")
def __str__(self):
return self.name+":"+self.info
def getList(self):
return {'name':self.name, 'nname':self.nname, 'ipv4':self.ipv4, 'services':self.info}
| StarcoderdataPython |
124402 | n = int(input())
# n = 3
sum1 = 0
sum2 = 0
for i in range(1, n + 1):
# print("i = ", i)
if i % 2 == 0:
sum1 += i
else:
sum2 += i
if sum1 == 0:
print(sum2)
else:
print(sum2 - sum1)
| StarcoderdataPython |
5302 | <reponame>waschag-tvk/pywaschedv
import datetime
from django.utils import timezone
from django.test import TestCase
from django.contrib.auth.models import (
User,
)
from wasch.models import (
Appointment,
WashUser,
WashParameters,
# not models:
AppointmentError,
StatusRights,
)
from wasch import tvkutils, payment
class WashUserTestCase(TestCase):
def test_god(self):
god, _ = WashUser.objects.get_or_create_god()
self.assertTrue(god.isActivated)
self.assertTrue(god.user.is_staff)
self.assertTrue(god.user.is_superuser)
group_names = (group.name for group in god.user.groups.all())
for expected_group in StatusRights(9).groups:
self.assertIn(expected_group, group_names)
class AppointmentTestCase(TestCase):
exampleUserName = 'waschexample'
examplePoorUserName = 'poor'
exampleTime = Appointment.manager.scheduled_appointment_times()[-1]
exampleTooOldTime = timezone.make_aware(datetime.datetime(1991, 12, 25))
exampleTooOldReference = 4481037
exampleMachine, exampleBrokenMachine, lastMachine = \
tvkutils.get_or_create_machines()[0]
def setUp(self):
tvkutils.setup()
self.exampleMachine.isAvailable = True # though this is default
self.exampleMachine.save()
self.exampleBrokenMachine.isAvailable = False
self.exampleMachine.save()
WashUser.objects.create_enduser(self.exampleUserName, isActivated=True)
WashUser.objects.create_enduser(
self.examplePoorUserName, isActivated=False)
def _createExample(self):
user = User.objects.get(username=self.exampleUserName)
return Appointment.objects.create(
time=self.exampleTime, machine=self.exampleMachine, user=user,
wasUsed=False)
def test_create(self):
result = self._createExample()
self.assertEqual(result.time, self.exampleTime)
self.assertEqual(result.machine, self.exampleMachine)
self.assertEqual(result.user.username, self.exampleUserName)
self.assertTrue(Appointment.manager.appointment_exists(
result.time, result.machine))
self.assertFalse(Appointment.manager.bookable(
result.time, result.machine, result.user))
self.assertEqual(
Appointment.manager.why_not_bookable(
result.time, result.machine, result.user),
41, # Appointment taken
)
result.cancel()
self.assertTrue(Appointment.manager.bookable(
result.time, result.machine, result.user))
def test_bookable(self):
user = User.objects.get(username=self.exampleUserName)
poorUser = User.objects.get(username=self.examplePoorUserName)
god, _ = WashUser.objects.get_or_create_god()
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTime, self.exampleMachine, poorUser),
31, # User not active
)
self.assertTrue(Appointment.manager.bookable(
self.exampleTime, self.exampleMachine, user))
self.assertTrue(Appointment.manager.bookable(
self.exampleTime, self.exampleMachine, god.user))
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTooOldTime, self.exampleMachine, user),
11, # Unsupported time
)
unsavedTooOldAppointment = Appointment.from_reference(
self.exampleTooOldReference, user)
self.assertEqual(self.exampleTooOldReference, Appointment(
time=self.exampleTooOldTime, machine=self.exampleMachine,
user=user).reference)
self.assertEqual(unsavedTooOldAppointment.time, self.exampleTooOldTime)
self.assertEqual(unsavedTooOldAppointment.machine, self.exampleMachine)
self.assertEqual(
unsavedTooOldAppointment.user.username, self.exampleUserName)
self.assertEqual(
unsavedTooOldAppointment.reference, self.exampleTooOldReference)
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTime, self.exampleBrokenMachine, user),
21, # Machine out of service
)
def test_make_appointment(self):
user = User.objects.get(username=self.exampleUserName)
god, _ = WashUser.objects.get_or_create_god()
appointment = Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
reference = appointment.reference
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTime, self.exampleMachine, god.user),
41, # Appointment taken
)
with self.assertRaises(AppointmentError) as ae:
Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
self.assertEqual(ae.exception.reason, 41)
appointment.cancel()
self.assertEqual(
appointment,
Appointment.manager.filter_for_reference(reference).get())
WashParameters.objects.update_value('bonus-method', 'empty')
self.assertTrue(Appointment.manager.bookable(
self.exampleTime, self.exampleMachine, user))
with self.assertRaises(payment.PaymentError):
Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
def test_use(self):
user = User.objects.get(username=self.exampleUserName)
appointment = Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
appointment.use()
with self.assertRaises(AppointmentError) as ae:
appointment.use()
self.assertEqual(ae.exception.reason, 61) # Appointment already used
with self.assertRaises(AppointmentError) as ae:
appointment.rebook()
self.assertEqual(ae.exception.reason, 41) # Appointment taken
with self.assertRaises(AppointmentError) as ae:
appointment.cancel()
self.assertEqual(ae.exception.reason, 61) # Appointment already used
self.assertTrue(appointment.wasUsed)
| StarcoderdataPython |
1714941 | <reponame>lyric-com/idol<gh_stars>0
# DO NOT EDIT
# This file was generated by idol_py, any changes will be lost when idol_py is rerun again
from typing import MutableMapping
from .test_atleast_one import (
TestsBasicTestAtleastOne as CodegenTestsBasicTestAtleastOne,
)
from ...__idol__ import Map
TestsBasicTestMap = MutableMapping[str, CodegenTestsBasicTestAtleastOne]
TestsBasicTestMap = Map.of(CodegenTestsBasicTestAtleastOne, dict(atleast_one=False))
locals()["TestsBasicTestMap"] = Map.of(
CodegenTestsBasicTestAtleastOne, dict(atleast_one=False)
)
| StarcoderdataPython |
4825333 | <gh_stars>0
from datetime import datetime
from typing import Optional
from bson import ObjectId
from pydantic import BaseModel, EmailStr, Field
from .pyobject_id import PyObjectId
class UserModel(BaseModel): # pylint: disable=too-few-public-methods
"""
User model for database
"""
id: PyObjectId = Field(default_factory=PyObjectId, alias="_id")
username: str = Field(...)
email: EmailStr = Field(...)
password: str = Field(...)
created_at: datetime = Field(default=datetime.now())
last_login: Optional[datetime] = Field(default=datetime.now())
is_admin: bool = Field(default=False)
class Config: # pylint: disable=too-few-public-methods
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
schema_extra = {
'example': {
'username': 'MyUsername',
'email': '<EMAIL>',
'created_at': '1996-02-01T00:00:00',
'last_login': '1996-02-01T00:00:00',
'is_admin': 'false'
}
}
class UserSignUpModel(BaseModel):
"""
User model for validate sign up data
"""
username: str = Field(..., min_length=3, max_length=100)
email: EmailStr = Field(...)
password: str = Field(..., min_length=6)
class User(BaseModel):
username: str
email: EmailStr
is_admin: bool
| StarcoderdataPython |
197721 | <gh_stars>100-1000
"""
An example using both tensorflow and numpy implementations of viterbi
replicating example on wikipedia
"""
from __future__ import print_function
__author__ = '<NAME> <<EMAIL>>'
import tensorflow as tf
import numpy as np
from tensorflow_hmm import HMMNumpy, HMMTensorflow
def dptable(V, pathScores, states):
print(" ".join(("%10d" % i) for i in range(V.shape[0])))
for i, y in enumerate(pathScores.T):
print("%.7s: " % states[i])
print(" ".join("%.7s" % ("%f" % yy) for yy in y))
def main():
p0 = np.array([0.6, 0.4])
emi = np.array([[0.5, 0.1],
[0.4, 0.3],
[0.1, 0.6]])
trans = np.array([[0.7, 0.3],
[0.4, 0.6]])
states = {0: 'Healthy', 1: 'Fever'}
obs = {0: 'normal', 1: 'cold', 2: 'dizzy'}
obs_seq = np.array([0, 1, 2])
print()
print("TensorFlow Example: ")
tf_model = HMMTensorflow(trans, p0)
y = emi[obs_seq]
tf_s_graph, tf_scores_graph = tf_model.viterbi_decode(y)
tf_s = tf.Session().run(tf_s_graph)
print("Most likely States: ", [obs[s] for s in tf_s])
tf_scores = tf.Session().run(tf_scores_graph)
pathScores = np.array(np.exp(tf_scores))
dptable(pathScores, pathScores, states)
print()
print("numpy Example: ")
np_model = HMMNumpy(trans, p0)
y = emi[obs_seq]
np_states, np_scores = np_model.viterbi_decode(y)
print("Most likely States: ", [obs[s] for s in np_states])
pathScores = np.array(np.exp(np_scores))
dptable(pathScores, pathScores, states)
if __name__ == "__main__":
main()
| StarcoderdataPython |
75358 | <filename>tests/unit/injector/test_injector.py
from dataclasses import dataclass, field
import pytest
from predico.field_types import injected
from predico.injector import inject, InvalidInjectable
@dataclass
class Shoe:
size: int = 77
@dataclass
class Athlete:
shoe: Shoe = Shoe()
def test_injector_props():
""" Create instance based on data from passed-in props """
shoe = Shoe(size=55)
props = dict(shoe=shoe)
injectables = dict()
athlete = inject(props, injectables, Athlete)
assert 55 == athlete.shoe.size
def test_injector_injected():
""" Create instance from data based on injectables """
shoe = Shoe(size=66)
props = dict()
injectables = {Shoe.__name__: shoe}
athlete = inject(props, injectables, Athlete)
assert 66 == athlete.shoe.size
def test_injector_injected_double():
""" Tell the injector to hand attribute of another injectable """
@dataclass
class InjectedAthlete:
shoe_size: int = injected(Shoe, attr='size')
shoe = Shoe(size=88)
props = dict()
injectables = {Shoe.__name__: shoe}
athlete = inject(props, injectables, InjectedAthlete)
assert 88 == athlete.shoe_size
def test_injector_injected_callable():
""" Tell the injector to hand call result of another injectable """
@dataclass
class CallableShoe:
size: int
def __call__(self):
return self.size + 5
@dataclass
class InjectedAthlete:
shoe_size: int = injected(CallableShoe)
shoe = CallableShoe(size=70)
props = dict()
injectables = {CallableShoe.__name__: shoe}
athlete = inject(props, injectables, InjectedAthlete)
assert 75 == athlete.shoe_size
def test_injector_injectedattr_missing_class():
""" Ask for a class not registered as injectable """
class Jersey:
pass
@dataclass
class InjectedAthlete:
shoe_size: int = injected(Jersey, attr='size')
shoe = Shoe(size=88)
props = dict()
injectables = {Shoe.__name__: shoe}
with pytest.raises(InvalidInjectable) as exc:
inject(props, injectables, InjectedAthlete)
expected = 'Invalid injected type Jersey requested from type'
assert expected == str(exc.value)
def test_injector_fielddefault():
props = dict()
injectables = dict()
athlete = inject(props, injectables, Athlete)
assert 77 == athlete.shoe.size
def test_injector_precedence():
# When both props and injectable, choose props
shoe = Shoe(size=55)
props = dict(shoe=shoe)
shoe = Shoe(size=66)
injectables = {Shoe.__name__: shoe}
athlete = inject(props, injectables, Athlete)
assert 55 == athlete.shoe.size
def test_injector_defaultvalue():
# Field has a default value which should be used instead of
# injection
default_shoesize = Shoe(size=34523)
@dataclass
class DefaultValueAthlete:
shoe: Shoe = default_shoesize
props = dict()
injectables = dict()
athlete = inject(props, injectables, DefaultValueAthlete)
assert 34523 == athlete.shoe.size
def test_injector_defaultfactory():
# Field has a default value which should be used instead of
# injection
@dataclass
class DefaultValueAthlete:
shoe: Shoe = field(default_factory=Shoe)
props = dict()
injectables = dict()
athlete = inject(props, injectables, DefaultValueAthlete)
assert 77 == athlete.shoe.size
def test_injector_failure():
# Dataclass wants a value, doesn't have a default, and it
# isn't in props or injector
@dataclass
class AgeAthlete:
age: int # Note that this field is required
props = dict()
injectables = dict()
with pytest.raises(TypeError):
inject(props, injectables, AgeAthlete)
| StarcoderdataPython |
1793732 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Create config file to run evaluation"""
__author__ = '<NAME>, <NAME>, <NAME>, <NAME>, <NAME> '
__email__ = '<EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>'
import os
import argparse
import xml.etree.ElementTree as ET
from xml.dom import minidom
import copy
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('out_dir', help='system output directory')
parser.add_argument('model_dir', help='human summaries directory')
parser.add_argument('rouge_config_file', help='ROUGE configuration file')
return parser.parse_args()
###### Template is of this format ######
# <EVAL ID="D1001-A.M.100.A">
# <PEER-ROOT>/dropbox/18-19/573/Data/mydata</PEER-ROOT>
# <MODEL-ROOT>/dropbox/18-19/573/Data/models/devtest/</MODEL-ROOT>
# <INPUT-FORMAT TYPE="SPL"/>
# <PEERS>
# <P ID="1">D1001-A.M.100.A.1</P>
# </PEERS>
# <MODELS>
# <M ID="A">D1001-A.M.100.A.A</M>
# <M ID="B">D1001-A.M.100.A.B</M>
# <M ID="F">D1001-A.M.100.A.F</M>
# <M ID="H">D1001-A.M.100.A.H</M>
# </MODELS>
# </EVAL>
def create_elem_template(out_dir, model_dir):
template = ET.Element('EVAL')
peer_root = ET.Element('PEER-ROOT')
peer_root.text = out_dir
model_root = ET.Element('MODEL-ROOT')
model_root.text = model_dir
input_format = ET.Element('INPUT-FORMAT', {'TYPE': 'SPL'})
peers = ET.Element('PEERS')
models = ET.Element('MODELS')
template.append(peer_root)
template.append(model_root)
template.append(input_format)
template.append(peers)
template.append(models)
return template
def create_xml_tree(out_dir, model_dir):
template = create_elem_template(out_dir, model_dir)
out_dir_list = sorted(os.listdir(out_dir))
model_dir_dict = {}
for model_sum_name in os.listdir(model_dir):
eval_id, p_id = model_sum_name.rsplit('.', 1)
if eval_id not in model_dir_dict:
model_dir_dict[eval_id] = []
model_dir_dict[eval_id].append(model_sum_name)
# build tree
root = ET.Element('ROUGE_EVAL', {'version': '1.5.5'})
for sys_sum_name in out_dir_list:
eval_elem = copy.deepcopy(template)
eval_id, p_id = sys_sum_name.rsplit('.', 1)
eval_elem.set('ID', eval_id)
peers = eval_elem.find('PEERS')
models = eval_elem.find('MODELS')
p = ET.Element('P', {'ID': p_id})
p.text = sys_sum_name
peers.append(p)
if eval_id in model_dir_dict:
for model_sum_name in sorted(model_dir_dict[eval_id]):
m_id = model_sum_name.rsplit('.', 1)[1]
m = ET.Element('M', {'ID': m_id})
m.text = model_sum_name
models.append(m)
if len(models) > 0: #we have gold examples to compare against!
root.append(eval_elem)
return root
def create_config_file(out_dir, model_dir, config_file):
root = create_xml_tree(out_dir, model_dir)
xmlstr = minidom.parseString(ET.tostring(root)).toprettyxml()
with open(config_file, 'w') as f:
f.write(xmlstr[23:])
f.write('\n')
def main():
args = parse_args()
create_config_file(args.out_dir, args.model_dir, args.rouge_config_file)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4819375 | from core.protocol import TopicProtocol
from core.map_report import MapReport
import argparse
import os
def find_topics():
result = {}
topic_dir = os.path.join(os.path.dirname(__file__), 'map')
for module in os.listdir(topic_dir):
if module[0:2] == '__':
continue
elif module[-3:] == '.py':
name = module[0:-3]
full_qualification = 'map.' + name
result[name] = full_qualification
return result
def main():
parser = argparse.ArgumentParser(
description='Create a report for a StarCraft 2 map')
parser.add_argument(
'input_type',
choices=['map', 'replay'],
help='the input file type')
parser.add_argument(
'input_file',
type=str,
help='the input file path')
parser.add_argument(
'output_dir',
type=str,
help='the output directory path')
available_topics = find_topics();
parser.add_argument(
'--topic',
choices=available_topics.keys(),
action='append',
help='a topic to include in the report')
args = parser.parse_args()
chosen_topics = [ available_topics[chosen] for chosen in args.topic ]
report = MapReport(args, chosen_topics)
report.write()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3284423 | <reponame>grammatek/regina_normalizer
from regina_normalizer import unicode_maps as um
from regina_normalizer import dict_data
"""
Handles Unicode cleaning and Unicode normalizing of text. To simplify further processing, text normalizeing and
grapheme-to-phoneme conversion, we clean the text of most unicode characters not contained in the Icelandic
alphabet, and also delete or substitue a number of punctuation characters and special symbols.
"""
# the Icelandic alphabet
CHAR_SET = ['a', 'á', 'b', 'd', 'ð', 'e', 'é', 'f', 'g', 'h', 'i', 'í', 'j', 'k', 'l', 'm', 'n', 'o', 'ó', 'p', 'r',
's', 't', 'u', 'ú', 'v', 'y', 'ý', 'þ', 'æ', 'ö', 'x']
PRON_DICT = dict_data.PronDict.get_lexicon()
def normalize_encoding(text):
""" Normalize the unicode encoding of the input text. This includes deleting or substituting certain characters
and symbols, as defined in unicode_maps"""
normalized_text = text
for c in text:
repl = get_replacement(c)
if repl is not None:
normalized_text = normalized_text.replace(c, repl)
if should_delete(c):
normalized_text = normalized_text.replace(c, '')
return normalized_text
def get_replacement(char):
if char in um.unified_dictionary:
return um.unified_dictionary[char]
def should_delete(char):
return char in um.delete_chars_map
def get_ice_alpha_replacement(char):
if char in um.post_dict_lookup:
return um.post_dict_lookup[char]
return ''
def normalize_alphabet(sentences):
"""
This method is the last in the normalization process. That is, we already have
normalized the text with regards to abbreviations, digits, etc., but as last procedure
we need to ensure that no non-valid characters are delivered to the g2p system.
Before replaceing possible non-valid characters, we make a lexicon-lookup, since
words with non-Icelandic characters might be stored there, even if automatic g2p
would fail.
TODO: this needs more careful handling and a "contract" with the g2p module: which
characters should be allowed?
"""
if isinstance(sentences, list):
sentence_list = sentences
else:
sentence_list = [sentences]
normalized_sentences = []
norm_sent = ''
for sent in sentence_list:
tokens = sent.split()
for token in tokens:
if token not in PRON_DICT:
for ind, char in enumerate(token):
# is it an Icelandic character?
if char.lower() not in CHAR_SET:
replacement = get_ice_alpha_replacement(char)
# we found a replacement for the non-Icelandic character
if len(replacement) > 0:
token = token.replace(char, replacement)
# sounds odd if parenthesis are ignored and don't cause the tts voice
# to pause a little, try a comma
# TODO: we might need a more general approach to this, i.e. which
# symbols and punctuation chars should cause the voice to pause?
elif (char == '(' or char == ')' or char == '"'):
token = token.replace(char, ",")
# we want to keep punctuation marks still present in the normalized
# string, but delete the unknown character otherwise
elif char not in ['.',',',':','!','?']:
token = token.replace(char, "")
# we restore the original string with valid words / characters only
norm_sent += token + ' '
# don't add an extra space if we deleted the word
normalized_sentences.append(norm_sent.lower().strip())
return normalized_sentences
def normalize_alphabet_from_tuples(normalized_tuples):
"""
For normalization requests that require the return value to be a list of tuples perserving the original tokens,
this method hands the normalized tokens over to the normalize_alphabet method and returns the tuple list
with the alphabet-normalized tokens.
:param normalized_tuples: pairs of original tokens with their normalization
:return: same list of tuples as the input, where the normalized tokens have been run through alpahbet-normalizing
"""
new_tuple_list = []
for tuple in normalized_tuples:
final_norm = normalize_alphabet(tuple[1])
new_tuple_list.append((tuple[0], final_norm[0]))
return new_tuple_list
def main():
text = 'norma\u00adlize this and \u0394'
normalized = normalize_encoding(text)
print(text)
print(normalized)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3227178 | <reponame>tingiskhan/pyfilter
import torch
from torch.distributions import Distribution
from torch.nn import Module
from abc import ABC
class BaseApproximation(Module, ABC):
"""
Abstract base class for constructing variational approximations.
"""
def __init__(self):
super().__init__()
def initialize(self, shape: torch.Size):
"""
Method to be overridden by derived classes. Initializes the required attributes of the approximation given the
shape and the model.
Args:
shape: The shape of the resulting approximation.
"""
raise NotImplementedError()
def get_approximation(self) -> Distribution:
"""
Method to be overridden by derived classes. Returns the distribution of the variational approximation.
"""
raise NotImplementedError()
| StarcoderdataPython |
93855 | <filename>super_laser_gui.py
import sys, os, serial, datetime, time
import numpy as np
from configparser import ConfigParser
import scipy
from scipy.interpolate import interp1d
import fileinput
from simple_pid import PID
from wlm import *
# shouldn't need this -> from Fiber import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'Super Laser Locker'
self.left = 0
self.top = 0
self.width = 1500
self.height = 400
self.set_point = 0 # THz
self.update_interval = 1 # ms
self.no_of_points = 100
self.no_of_arduinos = 1
self.wlm = WavelengthMeter()
self.initUI()
self.timer = QTimer()
self.timer.timeout.connect(self.tick)
self.timer.start(self.update_interval)
def tick(self):
all_freq_reads = self.wlm.frequencies
for laser in self.laser_objs.keys():
freq_read = all_freq_reads[int(self.laser_objs[laser].chan) - 1]
if freq_read >= 0:
freq_mess = freq_read
if self.laser_objs[laser].lockable and self.laser_objs[laser].lock_check.isChecked():
control = self.laser_objs[laser].my_pid(freq_mess)
ard_num = int(4095.0/20 * control + 4095.0/2.0)
mystr = '{:04d}'.format(ard_num).encode('utf-8')
self.laser_objs[laser].my_ard.ser.write(mystr)
elif freq_read == -3.0:
freq_mess = 'UNDER'
elif freq_read == -4.0:
freq_mess = 'OVER'
else:
freq_mess = 'ERROR'
self.laser_objs[laser].update_frequency(freq_mess)
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.main_layout = QHBoxLayout()
self.laser_objs = {}
self.lasers = read_config()
for las in self.lasers.keys():
newlas = Laser()
newlas.update_name(las)
newlas.update_lockable(self.lasers[las]['lockable'])
newlas.update_channel(self.lasers[las]['chan'])
newlas.filename = self.lasers[las]['setfile']
# if newlas.lockable:
# newlas.my_ard = Arduino(self.lasers[las]['com_port'])
# else:
# print('arduino not created')
newlas.get_setpoint()
self.main_layout.addWidget(newlas)
self.laser_objs[las] = newlas
self.setLayout(self.main_layout)
self.show()
class Laser(QWidget):
def __init__(self):
super().__init__()
self.name = ''
self.type = ''
self.chan = 0
self.lockable = True
self.locking = False
self.frequency = 0
self.setpoint = 0
self.p = 100
self.i = 1000
self.d = 0
self.ard_mess = 2048
self.offset = 0
self.fstep = 50
self.filename = ''
self.basefolder = 'z:\\'
self.my_pid = PID(self.p,self.i,self.d,self.setpoint,sample_time = 0.01, output_limits = [-10,10])
self.my_ard = ''
self.layout = QGridLayout()
self.name_label = QLabel(self.name)
name_font = QFont("Times",30,QFont.Bold)
self.name_label.setFont(name_font)
self.set_label = QLabel('Setpoint (THz):')
self.ard_label = QLabel('Control Value:')
self.off_label = QLabel('Set Offset (THz):')
self.step_label = QLabel('Step Size (MHz):')
self.p_label = QLabel('P:')
self.i_label = QLabel('I:')
self.d_label = QLabel('D:')
self.freq_value = QLabel("{0:.6f}".format(self.frequency))
self.set_value = QLineEdit("{0:.6f}".format(self.setpoint))
self.ard_value = QLabel(str(self.ard_mess))
self.off_value = QLineEdit("{0:.6f}".format(self.offset))
self.step_value = QLineEdit(str(self.fstep))
self.p_value = QLineEdit(str(self.p))
self.i_value = QLineEdit(str(self.i))
self.d_value = QLineEdit(str(self.d))
self.scan_label = QLabel('Frequency Shift (MHz):')
self.laser_scan = QSpinBox()
self.laser_scan.setMinimum(-5000)
self.laser_scan.setMaximum(5000)
self.laser_scan.setSingleStep(np.int(self.fstep))
self.laser_scan.valueChanged.connect(self.set_setpoint)
self.laser_scan.valueChanged.connect(self.set_fstep)
self.p_value.returnPressed.connect(self.update_p)
self.i_value.returnPressed.connect(self.update_i)
self.d_value.returnPressed.connect(self.update_d)
self.pid_label = QLabel('PID Values')
self.lock_check = QCheckBox('Lock')
self.layout.addWidget(self.name_label,0,0)
self.layout.addWidget(QLabel('THz'),0,2)
self.layout.addWidget(self.freq_value,0,1)
self.layout.addWidget(self.lock_check,1,0)
self.layout.addWidget(self.ard_label,2,0)
self.layout.addWidget(self.ard_value,2,1)
self.layout.addWidget(self.set_label,3,0)
self.layout.addWidget(self.set_value,3,1)
self.layout.addWidget(self.scan_label,4,0)
self.layout.addWidget(self.laser_scan,4,1)
self.layout.addWidget(self.off_label,5,0)
self.layout.addWidget(self.off_value,5,1)
self.layout.addWidget(self.step_label,6,0)
self.layout.addWidget(self.step_value,6,1)
self.layout.addWidget(self.pid_label,7,0)
self.layout.addWidget(self.p_label,8,0)
self.layout.addWidget(self.p_value,8,1)
self.layout.addWidget(self.i_label,9,0)
self.layout.addWidget(self.i_value,9,1)
self.layout.addWidget(self.d_label,10,0)
self.layout.addWidget(self.d_value,10,1)
self.setLayout(self.layout)
def update_frequency(self,new_freq):
nf = new_freq
self.frequency = nf
try:
self.freq_value.setText("{0:.6f}".format(nf))
except:
self.freq_value.setText(nf)
def update_name(self,new_name):
nn = str(new_name)
self.name = nn
self.name_label.setText(nn)
def update_channel(self,new_chan):
nc = int(new_chan)
self.chan = new_chan
def update_p(self):
self.p = np.float(self.p_value.text())
self.my_pid.Kp = self.p
def update_i(self):
self.i = np.float(self.i_value.text())
self.my_pid.Ki = self.i
def update_d(self):
self.d = np.float(self.d_value.text())
self.my_pid.Kd = self.d
# def update_type(self,new_type):
# nt = str(new_type)
# self.type = nt
# self.type_label.setText(nt)
def update_lockable(self,new_lockable):
if new_lockable == 'True':
nl = True
else:
nl = False
self.lockable = nl
if not self.lockable:
self.ard_label.hide()
self.ard_value.hide()
self.scan_label.hide()
self.laser_scan.hide()
self.off_label.hide()
self.off_value.hide()
self.step_label.hide()
self.step_value.hide()
self.pid_label.hide()
self.p_label.hide()
self.p_value.hide()
self.i_label.hide()
self.i_value.hide()
self.d_value.hide()
self.d_label.hide()
self.lock_check.hide()
def set_fstep(self):
new_fstep = np.int(self.step_value.text())
self.laser_scan.setSingleStep(new_fstep)
self.fstep = new_fstep
def set_setpoint(self):
ns = np.float(self.offset) + np.float(self.fstep)*1e-6
filepath = self.basefolder + self.filename
file = open(filepath,'w')
file.write(str(ns))
file.close()
self.setpoint = ns
self.set_value.setText("{0:.6f}".format(ns))
self.my_pid.setpoint = self.setpoint
def get_setpoint(self):
filepath = self.basefolder + self.filename
file = open(filepath,'r')
new_set = np.float(file.readline())
file.close()
self.setpoint = new_set
self.set_value.setText("{0:.6f}".format(new_set))
self.off_value.setText("{0:.6f}".format(new_set))
def update_ardmess(self,new_mess):
nm = new_mess
self.ard_mess = new_mess
self.ard_label.setText(new_mess)
class Arduino():
def __init__(self,com_port):
serial_port = com_port
baud_rate = 9600;
try:
self.ser = serial.Serial(serial_port, baud_rate,
bytesize=serial.SEVENBITS,
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_ONE,
timeout=1)
except:
try:
ser.close()
except:
print ("Serial port already closed" )
self.ser = serial.Serial(serial_port, baud_rate,
bytesize=serial.SEVENBITS,
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_ONE,
timeout=1)
def read_config(filename = 'laser_config.ini'):
config = ConfigParser()
config.read(filename)
laser_ids = config.sections()
# make dictionary out of config
lasers = {}
for l in laser_ids:
opts = config.options(l)
lasers[l] = {}
for o in opts:
lasers[l][o] = config.get(l, o)
return lasers
def set_dark(app):
dark_palette = QPalette()
dark_palette.setColor(QPalette.Window, QColor(53, 53, 53))
dark_palette.setColor(QPalette.WindowText, Qt.white)
dark_palette.setColor(QPalette.Base, QColor(25, 25, 25))
dark_palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
dark_palette.setColor(QPalette.ToolTipBase, Qt.white)
dark_palette.setColor(QPalette.ToolTipText, Qt.white)
dark_palette.setColor(QPalette.Text, Qt.white)
dark_palette.setColor(QPalette.Button, QColor(53, 53, 53))
dark_palette.setColor(QPalette.ButtonText, Qt.white)
dark_palette.setColor(QPalette.BrightText, Qt.red)
dark_palette.setColor(QPalette.Link, QColor(42, 130, 218))
dark_palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
dark_palette.setColor(QPalette.HighlightedText, Qt.black)
app.setPalette(dark_palette)
app.setStyleSheet("QToolTip { color: #ffffff; background-color: #2a82da; border: 1px solid white; }")
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle('Fusion')
set_dark(app)
ex = App()
sys.exit(app.exec_()) | StarcoderdataPython |
96003 | class InvalidateEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.Control.Invalidated event.
InvalidateEventArgs(invalidRect: Rectangle)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return InvalidateEventArgs()
@staticmethod
def __new__(self,invalidRect):
""" __new__(cls: type,invalidRect: Rectangle) """
pass
InvalidRect=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Rectangle that contains the invalidated window area.
Get: InvalidRect(self: InvalidateEventArgs) -> Rectangle
"""
| StarcoderdataPython |
4841065 | <filename>ppcls/arch/backbone/model_zoo/repvgg.py
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Code was based on https://github.com/DingXiaoH/RepVGG
# reference: https://arxiv.org/abs/2101.03697
import paddle.nn as nn
import paddle
import numpy as np
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"RepVGG_A0":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_A0_pretrained.pdparams",
"RepVGG_A1":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_A1_pretrained.pdparams",
"RepVGG_A2":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_A2_pretrained.pdparams",
"RepVGG_B0":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B0_pretrained.pdparams",
"RepVGG_B1":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B1_pretrained.pdparams",
"RepVGG_B2":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2_pretrained.pdparams",
"RepVGG_B1g2":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B1g2_pretrained.pdparams",
"RepVGG_B1g4":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B1g4_pretrained.pdparams",
"RepVGG_B2g4":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2g4_pretrained.pdparams",
"RepVGG_B3g4":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B3g4_pretrained.pdparams",
}
__all__ = list(MODEL_URLS.keys())
optional_groupwise_layers = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26]
g2_map = {l: 2 for l in optional_groupwise_layers}
g4_map = {l: 4 for l in optional_groupwise_layers}
class ConvBN(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=1):
super(ConvBN, self).__init__()
self.conv = nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias_attr=False)
self.bn = nn.BatchNorm2D(num_features=out_channels)
def forward(self, x):
y = self.conv(x)
y = self.bn(y)
return y
class RepVGGBlock(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros'):
super(RepVGGBlock, self).__init__()
self.is_repped = False
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.padding_mode = padding_mode
assert kernel_size == 3
assert padding == 1
padding_11 = padding - kernel_size // 2
self.nonlinearity = nn.ReLU()
self.rbr_identity = nn.BatchNorm2D(
num_features=in_channels
) if out_channels == in_channels and stride == 1 else None
self.rbr_dense = ConvBN(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups)
self.rbr_1x1 = ConvBN(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding_11,
groups=groups)
def forward(self, inputs):
if self.is_repped:
return self.nonlinearity(self.rbr_reparam(inputs))
if self.rbr_identity is None:
id_out = 0
else:
id_out = self.rbr_identity(inputs)
return self.nonlinearity(
self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out)
def rep(self):
if not hasattr(self, 'rbr_reparam'):
self.rbr_reparam = nn.Conv2D(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
padding_mode=self.padding_mode)
kernel, bias = self.get_equivalent_kernel_bias()
self.rbr_reparam.weight.set_value(kernel)
self.rbr_reparam.bias.set_value(bias)
self.is_repped = True
def get_equivalent_kernel_bias(self):
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
return kernel3x3 + self._pad_1x1_to_3x3_tensor(
kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
def _pad_1x1_to_3x3_tensor(self, kernel1x1):
if kernel1x1 is None:
return 0
else:
return nn.functional.pad(kernel1x1, [1, 1, 1, 1])
def _fuse_bn_tensor(self, branch):
if branch is None:
return 0, 0
if isinstance(branch, ConvBN):
kernel = branch.conv.weight
running_mean = branch.bn._mean
running_var = branch.bn._variance
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn._epsilon
else:
assert isinstance(branch, nn.BatchNorm2D)
if not hasattr(self, 'id_tensor'):
input_dim = self.in_channels // self.groups
kernel_value = np.zeros(
(self.in_channels, input_dim, 3, 3), dtype=np.float32)
for i in range(self.in_channels):
kernel_value[i, i % input_dim, 1, 1] = 1
self.id_tensor = paddle.to_tensor(kernel_value)
kernel = self.id_tensor
running_mean = branch._mean
running_var = branch._variance
gamma = branch.weight
beta = branch.bias
eps = branch._epsilon
std = (running_var + eps).sqrt()
t = (gamma / std).reshape((-1, 1, 1, 1))
return kernel * t, beta - running_mean * gamma / std
class RepVGG(nn.Layer):
def __init__(self,
num_blocks,
width_multiplier=None,
override_groups_map=None,
class_num=1000):
super(RepVGG, self).__init__()
assert len(width_multiplier) == 4
self.override_groups_map = override_groups_map or dict()
assert 0 not in self.override_groups_map
self.in_planes = min(64, int(64 * width_multiplier[0]))
self.stage0 = RepVGGBlock(
in_channels=3,
out_channels=self.in_planes,
kernel_size=3,
stride=2,
padding=1)
self.cur_layer_idx = 1
self.stage1 = self._make_stage(
int(64 * width_multiplier[0]), num_blocks[0], stride=2)
self.stage2 = self._make_stage(
int(128 * width_multiplier[1]), num_blocks[1], stride=2)
self.stage3 = self._make_stage(
int(256 * width_multiplier[2]), num_blocks[2], stride=2)
self.stage4 = self._make_stage(
int(512 * width_multiplier[3]), num_blocks[3], stride=2)
self.gap = nn.AdaptiveAvgPool2D(output_size=1)
self.linear = nn.Linear(int(512 * width_multiplier[3]), class_num)
def _make_stage(self, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
blocks = []
for stride in strides:
cur_groups = self.override_groups_map.get(self.cur_layer_idx, 1)
blocks.append(
RepVGGBlock(
in_channels=self.in_planes,
out_channels=planes,
kernel_size=3,
stride=stride,
padding=1,
groups=cur_groups))
self.in_planes = planes
self.cur_layer_idx += 1
return nn.Sequential(*blocks)
def forward(self, x):
out = self.stage0(x)
out = self.stage1(out)
out = self.stage2(out)
out = self.stage3(out)
out = self.stage4(out)
out = self.gap(out)
out = paddle.flatten(out, start_axis=1)
out = self.linear(out)
return out
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def RepVGG_A0(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[2, 4, 14, 1],
width_multiplier=[0.75, 0.75, 0.75, 2.5],
override_groups_map=None,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_A0"], use_ssld=use_ssld)
return model
def RepVGG_A1(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[2, 4, 14, 1],
width_multiplier=[1, 1, 1, 2.5],
override_groups_map=None,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_A1"], use_ssld=use_ssld)
return model
def RepVGG_A2(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[2, 4, 14, 1],
width_multiplier=[1.5, 1.5, 1.5, 2.75],
override_groups_map=None,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_A2"], use_ssld=use_ssld)
return model
def RepVGG_B0(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[4, 6, 16, 1],
width_multiplier=[1, 1, 1, 2.5],
override_groups_map=None,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_B0"], use_ssld=use_ssld)
return model
def RepVGG_B1(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[4, 6, 16, 1],
width_multiplier=[2, 2, 2, 4],
override_groups_map=None,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_B1"], use_ssld=use_ssld)
return model
def RepVGG_B1g2(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[4, 6, 16, 1],
width_multiplier=[2, 2, 2, 4],
override_groups_map=g2_map,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_B1g2"], use_ssld=use_ssld)
return model
def RepVGG_B1g4(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[4, 6, 16, 1],
width_multiplier=[2, 2, 2, 4],
override_groups_map=g4_map,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_B1g4"], use_ssld=use_ssld)
return model
def RepVGG_B2(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[4, 6, 16, 1],
width_multiplier=[2.5, 2.5, 2.5, 5],
override_groups_map=None,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_B2"], use_ssld=use_ssld)
return model
def RepVGG_B2g4(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[4, 6, 16, 1],
width_multiplier=[2.5, 2.5, 2.5, 5],
override_groups_map=g4_map,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_B2g4"], use_ssld=use_ssld)
return model
def RepVGG_B3g4(pretrained=False, use_ssld=False, **kwargs):
model = RepVGG(
num_blocks=[4, 6, 16, 1],
width_multiplier=[3, 3, 3, 5],
override_groups_map=g4_map,
**kwargs)
_load_pretrained(
pretrained, model, MODEL_URLS["RepVGG_B3g4"], use_ssld=use_ssld)
return model
| StarcoderdataPython |
3378573 | <reponame>michaelgundlach/7billionhumans
# Generated from SBHasm.g4 by ANTLR 4.7.1
# encoding: utf-8
import sys
from io import StringIO
from antlr4 import *
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3)")
buf.write("\u0102\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\3\2\6\2D\n\2\r\2\16\2E\3\3\3\3")
buf.write("\3\3\3\3\3\3\5\3M\n\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4")
buf.write("\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\5\4a\n\4\3\5")
buf.write("\3\5\3\5\5\5f\n\5\3\6\3\6\3\6\5\6k\n\6\3\7\3\7\3\7\5\7")
buf.write("p\n\7\3\b\3\b\3\t\3\t\3\t\3\t\3\t\6\ty\n\t\r\t\16\tz\3")
buf.write("\t\3\t\3\t\6\t\u0080\n\t\r\t\16\t\u0081\5\t\u0084\n\t")
buf.write("\3\t\3\t\3\n\3\n\3\n\3\n\7\n\u008c\n\n\f\n\16\n\u008f")
buf.write("\13\n\3\13\3\13\3\13\5\13\u0094\n\13\3\13\3\13\3\13\3")
buf.write("\13\5\13\u009a\n\13\3\f\3\f\3\r\3\r\3\r\3\r\5\r\u00a2")
buf.write("\n\r\3\16\3\16\3\17\3\17\3\17\3\17\5\17\u00aa\n\17\3\20")
buf.write("\3\20\3\20\3\20\3\20\3\20\3\20\5\20\u00b3\n\20\3\20\3")
buf.write("\20\3\20\3\20\3\20\5\20\u00ba\n\20\3\21\3\21\3\21\3\21")
buf.write("\3\21\3\21\3\21\3\21\5\21\u00c4\n\21\3\22\3\22\3\22\5")
buf.write("\22\u00c9\n\22\3\23\3\23\3\23\5\23\u00ce\n\23\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\25\3\25\3\26\3\26\3\27\3\27\3\27\3\30")
buf.write("\3\30\3\30\3\31\3\31\3\31\3\32\3\32\3\32\5\32\u00e5\n")
buf.write("\32\3\32\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36")
buf.write("\3\37\3\37\3 \3 \3!\3!\3!\3!\3!\3!\3!\6!\u00fc\n!\r!\16")
buf.write("!\u00fd\3!\3!\3!\2\2\"\2\4\6\b\n\f\16\20\22\24\26\30\32")
buf.write("\34\36 \"$&(*,.\60\62\64\668:<>@\2\3\3\2 !\2\u0115\2C")
buf.write("\3\2\2\2\4L\3\2\2\2\6`\3\2\2\2\bb\3\2\2\2\ng\3\2\2\2\f")
buf.write("l\3\2\2\2\16q\3\2\2\2\20s\3\2\2\2\22\u0087\3\2\2\2\24")
buf.write("\u0093\3\2\2\2\26\u009b\3\2\2\2\30\u00a1\3\2\2\2\32\u00a3")
buf.write("\3\2\2\2\34\u00a5\3\2\2\2\36\u00ab\3\2\2\2 \u00bb\3\2")
buf.write("\2\2\"\u00c5\3\2\2\2$\u00ca\3\2\2\2&\u00cf\3\2\2\2(\u00d4")
buf.write("\3\2\2\2*\u00d6\3\2\2\2,\u00d8\3\2\2\2.\u00db\3\2\2\2")
buf.write("\60\u00de\3\2\2\2\62\u00e1\3\2\2\2\64\u00e8\3\2\2\2\66")
buf.write("\u00ea\3\2\2\28\u00ec\3\2\2\2:\u00ee\3\2\2\2<\u00f0\3")
buf.write("\2\2\2>\u00f2\3\2\2\2@\u00f4\3\2\2\2BD\5\4\3\2CB\3\2\2")
buf.write("\2DE\3\2\2\2EC\3\2\2\2EF\3\2\2\2F\3\3\2\2\2GM\5\6\4\2")
buf.write("HM\5\26\f\2IM\5\16\b\2JM\5,\27\2KM\5.\30\2LG\3\2\2\2L")
buf.write("H\3\2\2\2LI\3\2\2\2LJ\3\2\2\2LK\3\2\2\2LM\3\2\2\2MN\3")
buf.write("\2\2\2NO\7\34\2\2O\5\3\2\2\2Pa\7\5\2\2Qa\5\n\6\2Ra\5\b")
buf.write("\5\2Sa\5\20\t\2Ta\5\34\17\2Ua\5(\25\2Va\5\36\20\2Wa\5")
buf.write(" \21\2Xa\5\"\22\2Ya\5$\23\2Za\5&\24\2[a\7\23\2\2\\a\5")
buf.write("\b\5\2]a\5\60\31\2^a\5\62\32\2_a\5@!\2`P\3\2\2\2`Q\3\2")
buf.write("\2\2`R\3\2\2\2`S\3\2\2\2`T\3\2\2\2`U\3\2\2\2`V\3\2\2\2")
buf.write("`W\3\2\2\2`X\3\2\2\2`Y\3\2\2\2`Z\3\2\2\2`[\3\2\2\2`\\")
buf.write("\3\2\2\2`]\3\2\2\2`^\3\2\2\2`_\3\2\2\2a\7\3\2\2\2be\7")
buf.write("\7\2\2cf\5\64\33\2df\5\66\34\2ec\3\2\2\2ed\3\2\2\2ef\3")
buf.write("\2\2\2f\t\3\2\2\2gj\7\6\2\2hk\5\f\7\2ik\5\66\34\2jh\3")
buf.write("\2\2\2ji\3\2\2\2k\13\3\2\2\2lo\5\64\33\2mn\7\36\2\2np")
buf.write("\5\f\7\2om\3\2\2\2op\3\2\2\2p\r\3\2\2\2qr\7\33\2\2r\17")
buf.write("\3\2\2\2st\7\b\2\2tu\5\22\n\2uv\7\35\2\2vx\7\34\2\2wy")
buf.write("\5\4\3\2xw\3\2\2\2yz\3\2\2\2zx\3\2\2\2z{\3\2\2\2{\u0083")
buf.write("\3\2\2\2|}\5*\26\2}\177\7\34\2\2~\u0080\5\4\3\2\177~\3")
buf.write("\2\2\2\u0080\u0081\3\2\2\2\u0081\177\3\2\2\2\u0081\u0082")
buf.write("\3\2\2\2\u0082\u0084\3\2\2\2\u0083|\3\2\2\2\u0083\u0084")
buf.write("\3\2\2\2\u0084\u0085\3\2\2\2\u0085\u0086\7\n\2\2\u0086")
buf.write("\21\3\2\2\2\u0087\u008d\5\24\13\2\u0088\u0089\t\2\2\2")
buf.write("\u0089\u008a\7\34\2\2\u008a\u008c\5\24\13\2\u008b\u0088")
buf.write("\3\2\2\2\u008c\u008f\3\2\2\2\u008d\u008b\3\2\2\2\u008d")
buf.write("\u008e\3\2\2\2\u008e\23\3\2\2\2\u008f\u008d\3\2\2\2\u0090")
buf.write("\u0094\5\64\33\2\u0091\u0094\5\30\r\2\u0092\u0094\58\35")
buf.write("\2\u0093\u0090\3\2\2\2\u0093\u0091\3\2\2\2\u0093\u0092")
buf.write("\3\2\2\2\u0094\u0095\3\2\2\2\u0095\u0099\7\27\2\2\u0096")
buf.write("\u009a\5\64\33\2\u0097\u009a\5\30\r\2\u0098\u009a\58\35")
buf.write("\2\u0099\u0096\3\2\2\2\u0099\u0097\3\2\2\2\u0099\u0098")
buf.write("\3\2\2\2\u009a\25\3\2\2\2\u009b\u009c\7\3\2\2\u009c\27")
buf.write("\3\2\2\2\u009d\u00a2\5\32\16\2\u009e\u00a2\5\66\34\2\u009f")
buf.write("\u00a2\7\22\2\2\u00a0\u00a2\7\24\2\2\u00a1\u009d\3\2\2")
buf.write("\2\u00a1\u009e\3\2\2\2\u00a1\u009f\3\2\2\2\u00a1\u00a0")
buf.write("\3\2\2\2\u00a2\31\3\2\2\2\u00a3\u00a4\7\25\2\2\u00a4\33")
buf.write("\3\2\2\2\u00a5\u00a9\7\f\2\2\u00a6\u00aa\58\35\2\u00a7")
buf.write("\u00aa\5\64\33\2\u00a8\u00aa\5\66\34\2\u00a9\u00a6\3\2")
buf.write("\2\2\u00a9\u00a7\3\2\2\2\u00a9\u00a8\3\2\2\2\u00aa\35")
buf.write("\3\2\2\2\u00ab\u00ac\5\66\34\2\u00ac\u00ad\7\37\2\2\u00ad")
buf.write("\u00b2\7\r\2\2\u00ae\u00b3\5\64\33\2\u00af\u00b3\5\66")
buf.write("\34\2\u00b0\u00b3\7\22\2\2\u00b1\u00b3\58\35\2\u00b2\u00ae")
buf.write("\3\2\2\2\u00b2\u00af\3\2\2\2\u00b2\u00b0\3\2\2\2\u00b2")
buf.write("\u00b1\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b4\u00b9\5:\36\2")
buf.write("\u00b5\u00ba\5\64\33\2\u00b6\u00ba\5\66\34\2\u00b7\u00ba")
buf.write("\7\22\2\2\u00b8\u00ba\58\35\2\u00b9\u00b5\3\2\2\2\u00b9")
buf.write("\u00b6\3\2\2\2\u00b9\u00b7\3\2\2\2\u00b9\u00b8\3\2\2\2")
buf.write("\u00ba\37\3\2\2\2\u00bb\u00bc\5\66\34\2\u00bc\u00bd\7")
buf.write("\37\2\2\u00bd\u00c3\7\16\2\2\u00be\u00c4\5\64\33\2\u00bf")
buf.write("\u00c4\5\66\34\2\u00c0\u00c4\7\22\2\2\u00c1\u00c4\58\35")
buf.write("\2\u00c2\u00c4\7\24\2\2\u00c3\u00be\3\2\2\2\u00c3\u00bf")
buf.write("\3\2\2\2\u00c3\u00c0\3\2\2\2\u00c3\u00c1\3\2\2\2\u00c3")
buf.write("\u00c2\3\2\2\2\u00c4!\3\2\2\2\u00c5\u00c8\7\17\2\2\u00c6")
buf.write("\u00c9\5\64\33\2\u00c7\u00c9\5\66\34\2\u00c8\u00c6\3\2")
buf.write("\2\2\u00c8\u00c7\3\2\2\2\u00c9#\3\2\2\2\u00ca\u00cd\7")
buf.write("\20\2\2\u00cb\u00ce\5\64\33\2\u00cc\u00ce\5\66\34\2\u00cd")
buf.write("\u00cb\3\2\2\2\u00cd\u00cc\3\2\2\2\u00ce%\3\2\2\2\u00cf")
buf.write("\u00d0\5\66\34\2\u00d0\u00d1\7\37\2\2\u00d1\u00d2\7\21")
buf.write("\2\2\u00d2\u00d3\5\32\16\2\u00d3\'\3\2\2\2\u00d4\u00d5")
buf.write("\7\13\2\2\u00d5)\3\2\2\2\u00d6\u00d7\7\t\2\2\u00d7+\3")
buf.write("\2\2\2\u00d8\u00d9\7(\2\2\u00d9\u00da\58\35\2\u00da-\3")
buf.write("\2\2\2\u00db\u00dc\7)\2\2\u00dc\u00dd\58\35\2\u00dd/\3")
buf.write("\2\2\2\u00de\u00df\7\"\2\2\u00df\u00e0\5<\37\2\u00e0\61")
buf.write("\3\2\2\2\u00e1\u00e4\7#\2\2\u00e2\u00e5\5> \2\u00e3\u00e5")
buf.write("\5\64\33\2\u00e4\u00e2\3\2\2\2\u00e4\u00e3\3\2\2\2\u00e5")
buf.write("\u00e6\3\2\2\2\u00e6\u00e7\5<\37\2\u00e7\63\3\2\2\2\u00e8")
buf.write("\u00e9\7\32\2\2\u00e9\65\3\2\2\2\u00ea\u00eb\7\26\2\2")
buf.write("\u00eb\67\3\2\2\2\u00ec\u00ed\7\31\2\2\u00ed9\3\2\2\2")
buf.write("\u00ee\u00ef\7\30\2\2\u00ef;\3\2\2\2\u00f0\u00f1\7%\2")
buf.write("\2\u00f1=\3\2\2\2\u00f2\u00f3\7$\2\2\u00f3?\3\2\2\2\u00f4")
buf.write("\u00f5\5\66\34\2\u00f5\u00f6\7\37\2\2\u00f6\u00f7\7&\2")
buf.write("\2\u00f7\u00f8\5\f\7\2\u00f8\u00f9\7\35\2\2\u00f9\u00fb")
buf.write("\7\34\2\2\u00fa\u00fc\5\4\3\2\u00fb\u00fa\3\2\2\2\u00fc")
buf.write("\u00fd\3\2\2\2\u00fd\u00fb\3\2\2\2\u00fd\u00fe\3\2\2\2")
buf.write("\u00fe\u00ff\3\2\2\2\u00ff\u0100\7\'\2\2\u0100A\3\2\2")
buf.write("\2\27EL`ejoz\u0081\u0083\u008d\u0093\u0099\u00a1\u00a9")
buf.write("\u00b2\u00b9\u00c3\u00c8\u00cd\u00e4\u00fd")
return buf.getvalue()
class SBHasmParser ( Parser ):
grammarFileName = "SBHasm.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"'step'", "'pickup'", "'if'", "'else:'", "'endif'",
"'drop'", "'write'", "'calc'", "'set'", "'takefrom'",
"'giveto'", "'nearest'", "'myitem'", "'end'", "'nothing'",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"':'", "','", "'='", "'and'", "'or'", "'listenfor'",
"'tell'", "'everyone'", "<INVALID>", "'foreachdir'",
"'endfor'", "'comment'", "'DEFINE COMMENT'" ]
symbolicNames = [ "<INVALID>", "COMMENT", "WHITESPACE", "JUMP", "STEP",
"PICKUP", "IF", "ELSE", "ENDIF", "DROP", "WRITE",
"CALC", "SET", "TAKE", "GIVE", "NEAREST", "MYITEM",
"END", "NOTHING", "ITEM", "MEM", "COMPARE", "CALC_OP",
"NUMBER", "DIRECTION", "LABEL", "EOL", "COLON", "COMMA",
"EQUAL", "AND", "OR", "LISTEN", "TELL", "EVERYONE",
"MESSAGE", "FOREACHDIR", "ENDFOR", "GAMECOMMENT",
"GAMECOMMENTDEF" ]
RULE_asm = 0
RULE_line = 1
RULE_cmd = 2
RULE_pickup = 3
RULE_step = 4
RULE_directions = 5
RULE_label = 6
RULE_cond = 7
RULE_expressions = 8
RULE_expression = 9
RULE_comment = 10
RULE_items = 11
RULE_item = 12
RULE_write = 13
RULE_calc = 14
RULE_setval = 15
RULE_take = 16
RULE_give = 17
RULE_nearest = 18
RULE_drop = 19
RULE_sonst = 20
RULE_sbhcomment = 21
RULE_sbhcommentd = 22
RULE_listen = 23
RULE_tell = 24
RULE_direction = 25
RULE_mem = 26
RULE_number = 27
RULE_calcop = 28
RULE_message = 29
RULE_everyone = 30
RULE_foreachdir = 31
ruleNames = [ "asm", "line", "cmd", "pickup", "step", "directions",
"label", "cond", "expressions", "expression", "comment",
"items", "item", "write", "calc", "setval", "take", "give",
"nearest", "drop", "sonst", "sbhcomment", "sbhcommentd",
"listen", "tell", "direction", "mem", "number", "calcop",
"message", "everyone", "foreachdir" ]
EOF = Token.EOF
COMMENT=1
WHITESPACE=2
JUMP=3
STEP=4
PICKUP=5
IF=6
ELSE=7
ENDIF=8
DROP=9
WRITE=10
CALC=11
SET=12
TAKE=13
GIVE=14
NEAREST=15
MYITEM=16
END=17
NOTHING=18
ITEM=19
MEM=20
COMPARE=21
CALC_OP=22
NUMBER=23
DIRECTION=24
LABEL=25
EOL=26
COLON=27
COMMA=28
EQUAL=29
AND=30
OR=31
LISTEN=32
TELL=33
EVERYONE=34
MESSAGE=35
FOREACHDIR=36
ENDFOR=37
GAMECOMMENT=38
GAMECOMMENTDEF=39
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class AsmContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def line(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.LineContext)
else:
return self.getTypedRuleContext(SBHasmParser.LineContext,i)
def getRuleIndex(self):
return SBHasmParser.RULE_asm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAsm" ):
listener.enterAsm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAsm" ):
listener.exitAsm(self)
def asm(self):
localctx = SBHasmParser.AsmContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_asm)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 65
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 64
self.line()
self.state = 67
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SBHasmParser.COMMENT) | (1 << SBHasmParser.JUMP) | (1 << SBHasmParser.STEP) | (1 << SBHasmParser.PICKUP) | (1 << SBHasmParser.IF) | (1 << SBHasmParser.DROP) | (1 << SBHasmParser.WRITE) | (1 << SBHasmParser.TAKE) | (1 << SBHasmParser.GIVE) | (1 << SBHasmParser.END) | (1 << SBHasmParser.MEM) | (1 << SBHasmParser.LABEL) | (1 << SBHasmParser.EOL) | (1 << SBHasmParser.LISTEN) | (1 << SBHasmParser.TELL) | (1 << SBHasmParser.GAMECOMMENT) | (1 << SBHasmParser.GAMECOMMENTDEF))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LineContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EOL(self):
return self.getToken(SBHasmParser.EOL, 0)
def cmd(self):
return self.getTypedRuleContext(SBHasmParser.CmdContext,0)
def comment(self):
return self.getTypedRuleContext(SBHasmParser.CommentContext,0)
def label(self):
return self.getTypedRuleContext(SBHasmParser.LabelContext,0)
def sbhcomment(self):
return self.getTypedRuleContext(SBHasmParser.SbhcommentContext,0)
def sbhcommentd(self):
return self.getTypedRuleContext(SBHasmParser.SbhcommentdContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_line
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLine" ):
listener.enterLine(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLine" ):
listener.exitLine(self)
def line(self):
localctx = SBHasmParser.LineContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_line)
try:
self.enterOuterAlt(localctx, 1)
self.state = 74
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.JUMP, SBHasmParser.STEP, SBHasmParser.PICKUP, SBHasmParser.IF, SBHasmParser.DROP, SBHasmParser.WRITE, SBHasmParser.TAKE, SBHasmParser.GIVE, SBHasmParser.END, SBHasmParser.MEM, SBHasmParser.LISTEN, SBHasmParser.TELL]:
self.state = 69
self.cmd()
pass
elif token in [SBHasmParser.COMMENT]:
self.state = 70
self.comment()
pass
elif token in [SBHasmParser.LABEL]:
self.state = 71
self.label()
pass
elif token in [SBHasmParser.GAMECOMMENT]:
self.state = 72
self.sbhcomment()
pass
elif token in [SBHasmParser.GAMECOMMENTDEF]:
self.state = 73
self.sbhcommentd()
pass
elif token in [SBHasmParser.EOL]:
pass
else:
pass
self.state = 76
self.match(SBHasmParser.EOL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def JUMP(self):
return self.getToken(SBHasmParser.JUMP, 0)
def step(self):
return self.getTypedRuleContext(SBHasmParser.StepContext,0)
def pickup(self):
return self.getTypedRuleContext(SBHasmParser.PickupContext,0)
def cond(self):
return self.getTypedRuleContext(SBHasmParser.CondContext,0)
def write(self):
return self.getTypedRuleContext(SBHasmParser.WriteContext,0)
def drop(self):
return self.getTypedRuleContext(SBHasmParser.DropContext,0)
def calc(self):
return self.getTypedRuleContext(SBHasmParser.CalcContext,0)
def setval(self):
return self.getTypedRuleContext(SBHasmParser.SetvalContext,0)
def take(self):
return self.getTypedRuleContext(SBHasmParser.TakeContext,0)
def give(self):
return self.getTypedRuleContext(SBHasmParser.GiveContext,0)
def nearest(self):
return self.getTypedRuleContext(SBHasmParser.NearestContext,0)
def END(self):
return self.getToken(SBHasmParser.END, 0)
def listen(self):
return self.getTypedRuleContext(SBHasmParser.ListenContext,0)
def tell(self):
return self.getTypedRuleContext(SBHasmParser.TellContext,0)
def foreachdir(self):
return self.getTypedRuleContext(SBHasmParser.ForeachdirContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_cmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmd" ):
listener.enterCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmd" ):
listener.exitCmd(self)
def cmd(self):
localctx = SBHasmParser.CmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_cmd)
try:
self.state = 94
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 78
self.match(SBHasmParser.JUMP)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 79
self.step()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 80
self.pickup()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 81
self.cond()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 82
self.write()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 83
self.drop()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 84
self.calc()
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 85
self.setval()
pass
elif la_ == 9:
self.enterOuterAlt(localctx, 9)
self.state = 86
self.take()
pass
elif la_ == 10:
self.enterOuterAlt(localctx, 10)
self.state = 87
self.give()
pass
elif la_ == 11:
self.enterOuterAlt(localctx, 11)
self.state = 88
self.nearest()
pass
elif la_ == 12:
self.enterOuterAlt(localctx, 12)
self.state = 89
self.match(SBHasmParser.END)
pass
elif la_ == 13:
self.enterOuterAlt(localctx, 13)
self.state = 90
self.pickup()
pass
elif la_ == 14:
self.enterOuterAlt(localctx, 14)
self.state = 91
self.listen()
pass
elif la_ == 15:
self.enterOuterAlt(localctx, 15)
self.state = 92
self.tell()
pass
elif la_ == 16:
self.enterOuterAlt(localctx, 16)
self.state = 93
self.foreachdir()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PickupContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def PICKUP(self):
return self.getToken(SBHasmParser.PICKUP, 0)
def direction(self):
return self.getTypedRuleContext(SBHasmParser.DirectionContext,0)
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_pickup
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPickup" ):
listener.enterPickup(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPickup" ):
listener.exitPickup(self)
def pickup(self):
localctx = SBHasmParser.PickupContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_pickup)
try:
self.enterOuterAlt(localctx, 1)
self.state = 96
self.match(SBHasmParser.PICKUP)
self.state = 99
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 97
self.direction()
pass
elif token in [SBHasmParser.MEM]:
self.state = 98
self.mem()
pass
elif token in [SBHasmParser.EOL]:
pass
else:
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StepContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def STEP(self):
return self.getToken(SBHasmParser.STEP, 0)
def directions(self):
return self.getTypedRuleContext(SBHasmParser.DirectionsContext,0)
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_step
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStep" ):
listener.enterStep(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStep" ):
listener.exitStep(self)
def step(self):
localctx = SBHasmParser.StepContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_step)
try:
self.enterOuterAlt(localctx, 1)
self.state = 101
self.match(SBHasmParser.STEP)
self.state = 104
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 102
self.directions()
pass
elif token in [SBHasmParser.MEM]:
self.state = 103
self.mem()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DirectionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def direction(self):
return self.getTypedRuleContext(SBHasmParser.DirectionContext,0)
def COMMA(self):
return self.getToken(SBHasmParser.COMMA, 0)
def directions(self):
return self.getTypedRuleContext(SBHasmParser.DirectionsContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_directions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDirections" ):
listener.enterDirections(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDirections" ):
listener.exitDirections(self)
def directions(self):
localctx = SBHasmParser.DirectionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_directions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 106
self.direction()
self.state = 109
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SBHasmParser.COMMA:
self.state = 107
self.match(SBHasmParser.COMMA)
self.state = 108
self.directions()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabelContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LABEL(self):
return self.getToken(SBHasmParser.LABEL, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_label
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabel" ):
listener.enterLabel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabel" ):
listener.exitLabel(self)
def label(self):
localctx = SBHasmParser.LabelContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_label)
try:
self.enterOuterAlt(localctx, 1)
self.state = 111
self.match(SBHasmParser.LABEL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CondContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IF(self):
return self.getToken(SBHasmParser.IF, 0)
def expressions(self):
return self.getTypedRuleContext(SBHasmParser.ExpressionsContext,0)
def COLON(self):
return self.getToken(SBHasmParser.COLON, 0)
def EOL(self, i:int=None):
if i is None:
return self.getTokens(SBHasmParser.EOL)
else:
return self.getToken(SBHasmParser.EOL, i)
def ENDIF(self):
return self.getToken(SBHasmParser.ENDIF, 0)
def line(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.LineContext)
else:
return self.getTypedRuleContext(SBHasmParser.LineContext,i)
def sonst(self):
return self.getTypedRuleContext(SBHasmParser.SonstContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_cond
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCond" ):
listener.enterCond(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCond" ):
listener.exitCond(self)
def cond(self):
localctx = SBHasmParser.CondContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_cond)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 113
self.match(SBHasmParser.IF)
self.state = 114
self.expressions()
self.state = 115
self.match(SBHasmParser.COLON)
self.state = 116
self.match(SBHasmParser.EOL)
self.state = 118
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 117
self.line()
self.state = 120
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SBHasmParser.COMMENT) | (1 << SBHasmParser.JUMP) | (1 << SBHasmParser.STEP) | (1 << SBHasmParser.PICKUP) | (1 << SBHasmParser.IF) | (1 << SBHasmParser.DROP) | (1 << SBHasmParser.WRITE) | (1 << SBHasmParser.TAKE) | (1 << SBHasmParser.GIVE) | (1 << SBHasmParser.END) | (1 << SBHasmParser.MEM) | (1 << SBHasmParser.LABEL) | (1 << SBHasmParser.EOL) | (1 << SBHasmParser.LISTEN) | (1 << SBHasmParser.TELL) | (1 << SBHasmParser.GAMECOMMENT) | (1 << SBHasmParser.GAMECOMMENTDEF))) != 0)):
break
self.state = 129
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SBHasmParser.ELSE:
self.state = 122
self.sonst()
self.state = 123
self.match(SBHasmParser.EOL)
self.state = 125
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 124
self.line()
self.state = 127
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SBHasmParser.COMMENT) | (1 << SBHasmParser.JUMP) | (1 << SBHasmParser.STEP) | (1 << SBHasmParser.PICKUP) | (1 << SBHasmParser.IF) | (1 << SBHasmParser.DROP) | (1 << SBHasmParser.WRITE) | (1 << SBHasmParser.TAKE) | (1 << SBHasmParser.GIVE) | (1 << SBHasmParser.END) | (1 << SBHasmParser.MEM) | (1 << SBHasmParser.LABEL) | (1 << SBHasmParser.EOL) | (1 << SBHasmParser.LISTEN) | (1 << SBHasmParser.TELL) | (1 << SBHasmParser.GAMECOMMENT) | (1 << SBHasmParser.GAMECOMMENTDEF))) != 0)):
break
self.state = 131
self.match(SBHasmParser.ENDIF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.ExpressionContext)
else:
return self.getTypedRuleContext(SBHasmParser.ExpressionContext,i)
def EOL(self, i:int=None):
if i is None:
return self.getTokens(SBHasmParser.EOL)
else:
return self.getToken(SBHasmParser.EOL, i)
def AND(self, i:int=None):
if i is None:
return self.getTokens(SBHasmParser.AND)
else:
return self.getToken(SBHasmParser.AND, i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(SBHasmParser.OR)
else:
return self.getToken(SBHasmParser.OR, i)
def getRuleIndex(self):
return SBHasmParser.RULE_expressions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpressions" ):
listener.enterExpressions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpressions" ):
listener.exitExpressions(self)
def expressions(self):
localctx = SBHasmParser.ExpressionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_expressions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 133
self.expression()
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SBHasmParser.AND or _la==SBHasmParser.OR:
self.state = 134
_la = self._input.LA(1)
if not(_la==SBHasmParser.AND or _la==SBHasmParser.OR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 135
self.match(SBHasmParser.EOL)
self.state = 136
self.expression()
self.state = 141
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def COMPARE(self):
return self.getToken(SBHasmParser.COMPARE, 0)
def direction(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.DirectionContext)
else:
return self.getTypedRuleContext(SBHasmParser.DirectionContext,i)
def items(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.ItemsContext)
else:
return self.getTypedRuleContext(SBHasmParser.ItemsContext,i)
def number(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.NumberContext)
else:
return self.getTypedRuleContext(SBHasmParser.NumberContext,i)
def getRuleIndex(self):
return SBHasmParser.RULE_expression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression" ):
listener.enterExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression" ):
listener.exitExpression(self)
def expression(self):
localctx = SBHasmParser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_expression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 145
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 142
self.direction()
pass
elif token in [SBHasmParser.MYITEM, SBHasmParser.NOTHING, SBHasmParser.ITEM, SBHasmParser.MEM]:
self.state = 143
self.items()
pass
elif token in [SBHasmParser.NUMBER]:
self.state = 144
self.number()
pass
else:
raise NoViableAltException(self)
self.state = 147
self.match(SBHasmParser.COMPARE)
self.state = 151
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 148
self.direction()
pass
elif token in [SBHasmParser.MYITEM, SBHasmParser.NOTHING, SBHasmParser.ITEM, SBHasmParser.MEM]:
self.state = 149
self.items()
pass
elif token in [SBHasmParser.NUMBER]:
self.state = 150
self.number()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CommentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def COMMENT(self):
return self.getToken(SBHasmParser.COMMENT, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_comment
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterComment" ):
listener.enterComment(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitComment" ):
listener.exitComment(self)
def comment(self):
localctx = SBHasmParser.CommentContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_comment)
try:
self.enterOuterAlt(localctx, 1)
self.state = 153
self.match(SBHasmParser.COMMENT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ItemsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def item(self):
return self.getTypedRuleContext(SBHasmParser.ItemContext,0)
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def MYITEM(self):
return self.getToken(SBHasmParser.MYITEM, 0)
def NOTHING(self):
return self.getToken(SBHasmParser.NOTHING, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_items
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterItems" ):
listener.enterItems(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitItems" ):
listener.exitItems(self)
def items(self):
localctx = SBHasmParser.ItemsContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_items)
try:
self.enterOuterAlt(localctx, 1)
self.state = 159
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.ITEM]:
self.state = 155
self.item()
pass
elif token in [SBHasmParser.MEM]:
self.state = 156
self.mem()
pass
elif token in [SBHasmParser.MYITEM]:
self.state = 157
self.match(SBHasmParser.MYITEM)
pass
elif token in [SBHasmParser.NOTHING]:
self.state = 158
self.match(SBHasmParser.NOTHING)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ItemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ITEM(self):
return self.getToken(SBHasmParser.ITEM, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_item
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterItem" ):
listener.enterItem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitItem" ):
listener.exitItem(self)
def item(self):
localctx = SBHasmParser.ItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_item)
try:
self.enterOuterAlt(localctx, 1)
self.state = 161
self.match(SBHasmParser.ITEM)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WriteContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WRITE(self):
return self.getToken(SBHasmParser.WRITE, 0)
def number(self):
return self.getTypedRuleContext(SBHasmParser.NumberContext,0)
def direction(self):
return self.getTypedRuleContext(SBHasmParser.DirectionContext,0)
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_write
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWrite" ):
listener.enterWrite(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWrite" ):
listener.exitWrite(self)
def write(self):
localctx = SBHasmParser.WriteContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_write)
try:
self.enterOuterAlt(localctx, 1)
self.state = 163
self.match(SBHasmParser.WRITE)
self.state = 167
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.NUMBER]:
self.state = 164
self.number()
pass
elif token in [SBHasmParser.DIRECTION]:
self.state = 165
self.direction()
pass
elif token in [SBHasmParser.MEM]:
self.state = 166
self.mem()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CalcContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def mem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.MemContext)
else:
return self.getTypedRuleContext(SBHasmParser.MemContext,i)
def EQUAL(self):
return self.getToken(SBHasmParser.EQUAL, 0)
def CALC(self):
return self.getToken(SBHasmParser.CALC, 0)
def calcop(self):
return self.getTypedRuleContext(SBHasmParser.CalcopContext,0)
def direction(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.DirectionContext)
else:
return self.getTypedRuleContext(SBHasmParser.DirectionContext,i)
def MYITEM(self, i:int=None):
if i is None:
return self.getTokens(SBHasmParser.MYITEM)
else:
return self.getToken(SBHasmParser.MYITEM, i)
def number(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.NumberContext)
else:
return self.getTypedRuleContext(SBHasmParser.NumberContext,i)
def getRuleIndex(self):
return SBHasmParser.RULE_calc
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCalc" ):
listener.enterCalc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCalc" ):
listener.exitCalc(self)
def calc(self):
localctx = SBHasmParser.CalcContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_calc)
try:
self.enterOuterAlt(localctx, 1)
self.state = 169
self.mem()
self.state = 170
self.match(SBHasmParser.EQUAL)
self.state = 171
self.match(SBHasmParser.CALC)
self.state = 176
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 172
self.direction()
pass
elif token in [SBHasmParser.MEM]:
self.state = 173
self.mem()
pass
elif token in [SBHasmParser.MYITEM]:
self.state = 174
self.match(SBHasmParser.MYITEM)
pass
elif token in [SBHasmParser.NUMBER]:
self.state = 175
self.number()
pass
else:
raise NoViableAltException(self)
self.state = 178
self.calcop()
self.state = 183
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 179
self.direction()
pass
elif token in [SBHasmParser.MEM]:
self.state = 180
self.mem()
pass
elif token in [SBHasmParser.MYITEM]:
self.state = 181
self.match(SBHasmParser.MYITEM)
pass
elif token in [SBHasmParser.NUMBER]:
self.state = 182
self.number()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetvalContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def mem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.MemContext)
else:
return self.getTypedRuleContext(SBHasmParser.MemContext,i)
def EQUAL(self):
return self.getToken(SBHasmParser.EQUAL, 0)
def SET(self):
return self.getToken(SBHasmParser.SET, 0)
def direction(self):
return self.getTypedRuleContext(SBHasmParser.DirectionContext,0)
def MYITEM(self):
return self.getToken(SBHasmParser.MYITEM, 0)
def number(self):
return self.getTypedRuleContext(SBHasmParser.NumberContext,0)
def NOTHING(self):
return self.getToken(SBHasmParser.NOTHING, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_setval
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetval" ):
listener.enterSetval(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetval" ):
listener.exitSetval(self)
def setval(self):
localctx = SBHasmParser.SetvalContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_setval)
try:
self.enterOuterAlt(localctx, 1)
self.state = 185
self.mem()
self.state = 186
self.match(SBHasmParser.EQUAL)
self.state = 187
self.match(SBHasmParser.SET)
self.state = 193
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 188
self.direction()
pass
elif token in [SBHasmParser.MEM]:
self.state = 189
self.mem()
pass
elif token in [SBHasmParser.MYITEM]:
self.state = 190
self.match(SBHasmParser.MYITEM)
pass
elif token in [SBHasmParser.NUMBER]:
self.state = 191
self.number()
pass
elif token in [SBHasmParser.NOTHING]:
self.state = 192
self.match(SBHasmParser.NOTHING)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TakeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TAKE(self):
return self.getToken(SBHasmParser.TAKE, 0)
def direction(self):
return self.getTypedRuleContext(SBHasmParser.DirectionContext,0)
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_take
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTake" ):
listener.enterTake(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTake" ):
listener.exitTake(self)
def take(self):
localctx = SBHasmParser.TakeContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_take)
try:
self.enterOuterAlt(localctx, 1)
self.state = 195
self.match(SBHasmParser.TAKE)
self.state = 198
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 196
self.direction()
pass
elif token in [SBHasmParser.MEM]:
self.state = 197
self.mem()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GiveContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def GIVE(self):
return self.getToken(SBHasmParser.GIVE, 0)
def direction(self):
return self.getTypedRuleContext(SBHasmParser.DirectionContext,0)
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_give
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGive" ):
listener.enterGive(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGive" ):
listener.exitGive(self)
def give(self):
localctx = SBHasmParser.GiveContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_give)
try:
self.enterOuterAlt(localctx, 1)
self.state = 200
self.match(SBHasmParser.GIVE)
self.state = 203
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 201
self.direction()
pass
elif token in [SBHasmParser.MEM]:
self.state = 202
self.mem()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NearestContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def EQUAL(self):
return self.getToken(SBHasmParser.EQUAL, 0)
def NEAREST(self):
return self.getToken(SBHasmParser.NEAREST, 0)
def item(self):
return self.getTypedRuleContext(SBHasmParser.ItemContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_nearest
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNearest" ):
listener.enterNearest(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNearest" ):
listener.exitNearest(self)
def nearest(self):
localctx = SBHasmParser.NearestContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_nearest)
try:
self.enterOuterAlt(localctx, 1)
self.state = 205
self.mem()
self.state = 206
self.match(SBHasmParser.EQUAL)
self.state = 207
self.match(SBHasmParser.NEAREST)
self.state = 208
self.item()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DropContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def DROP(self):
return self.getToken(SBHasmParser.DROP, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_drop
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDrop" ):
listener.enterDrop(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDrop" ):
listener.exitDrop(self)
def drop(self):
localctx = SBHasmParser.DropContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_drop)
try:
self.enterOuterAlt(localctx, 1)
self.state = 210
self.match(SBHasmParser.DROP)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SonstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ELSE(self):
return self.getToken(SBHasmParser.ELSE, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_sonst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSonst" ):
listener.enterSonst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSonst" ):
listener.exitSonst(self)
def sonst(self):
localctx = SBHasmParser.SonstContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_sonst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 212
self.match(SBHasmParser.ELSE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SbhcommentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def GAMECOMMENT(self):
return self.getToken(SBHasmParser.GAMECOMMENT, 0)
def number(self):
return self.getTypedRuleContext(SBHasmParser.NumberContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_sbhcomment
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSbhcomment" ):
listener.enterSbhcomment(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSbhcomment" ):
listener.exitSbhcomment(self)
def sbhcomment(self):
localctx = SBHasmParser.SbhcommentContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_sbhcomment)
try:
self.enterOuterAlt(localctx, 1)
self.state = 214
self.match(SBHasmParser.GAMECOMMENT)
self.state = 215
self.number()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SbhcommentdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def GAMECOMMENTDEF(self):
return self.getToken(SBHasmParser.GAMECOMMENTDEF, 0)
def number(self):
return self.getTypedRuleContext(SBHasmParser.NumberContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_sbhcommentd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSbhcommentd" ):
listener.enterSbhcommentd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSbhcommentd" ):
listener.exitSbhcommentd(self)
def sbhcommentd(self):
localctx = SBHasmParser.SbhcommentdContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_sbhcommentd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 217
self.match(SBHasmParser.GAMECOMMENTDEF)
self.state = 218
self.number()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ListenContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LISTEN(self):
return self.getToken(SBHasmParser.LISTEN, 0)
def message(self):
return self.getTypedRuleContext(SBHasmParser.MessageContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_listen
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListen" ):
listener.enterListen(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListen" ):
listener.exitListen(self)
def listen(self):
localctx = SBHasmParser.ListenContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_listen)
try:
self.enterOuterAlt(localctx, 1)
self.state = 220
self.match(SBHasmParser.LISTEN)
self.state = 221
self.message()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TellContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TELL(self):
return self.getToken(SBHasmParser.TELL, 0)
def message(self):
return self.getTypedRuleContext(SBHasmParser.MessageContext,0)
def everyone(self):
return self.getTypedRuleContext(SBHasmParser.EveryoneContext,0)
def direction(self):
return self.getTypedRuleContext(SBHasmParser.DirectionContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_tell
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTell" ):
listener.enterTell(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTell" ):
listener.exitTell(self)
def tell(self):
localctx = SBHasmParser.TellContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_tell)
try:
self.enterOuterAlt(localctx, 1)
self.state = 223
self.match(SBHasmParser.TELL)
self.state = 226
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.EVERYONE]:
self.state = 224
self.everyone()
pass
elif token in [SBHasmParser.DIRECTION]:
self.state = 225
self.direction()
pass
else:
raise NoViableAltException(self)
self.state = 228
self.message()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DirectionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def DIRECTION(self):
return self.getToken(SBHasmParser.DIRECTION, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_direction
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDirection" ):
listener.enterDirection(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDirection" ):
listener.exitDirection(self)
def direction(self):
localctx = SBHasmParser.DirectionContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_direction)
try:
self.enterOuterAlt(localctx, 1)
self.state = 230
self.match(SBHasmParser.DIRECTION)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def MEM(self):
return self.getToken(SBHasmParser.MEM, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_mem
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMem" ):
listener.enterMem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMem" ):
listener.exitMem(self)
def mem(self):
localctx = SBHasmParser.MemContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_mem)
try:
self.enterOuterAlt(localctx, 1)
self.state = 232
self.match(SBHasmParser.MEM)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NumberContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def NUMBER(self):
return self.getToken(SBHasmParser.NUMBER, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_number
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNumber" ):
listener.enterNumber(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNumber" ):
listener.exitNumber(self)
def number(self):
localctx = SBHasmParser.NumberContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_number)
try:
self.enterOuterAlt(localctx, 1)
self.state = 234
self.match(SBHasmParser.NUMBER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CalcopContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CALC_OP(self):
return self.getToken(SBHasmParser.CALC_OP, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_calcop
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCalcop" ):
listener.enterCalcop(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCalcop" ):
listener.exitCalcop(self)
def calcop(self):
localctx = SBHasmParser.CalcopContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_calcop)
try:
self.enterOuterAlt(localctx, 1)
self.state = 236
self.match(SBHasmParser.CALC_OP)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MessageContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def MESSAGE(self):
return self.getToken(SBHasmParser.MESSAGE, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_message
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMessage" ):
listener.enterMessage(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMessage" ):
listener.exitMessage(self)
def message(self):
localctx = SBHasmParser.MessageContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_message)
try:
self.enterOuterAlt(localctx, 1)
self.state = 238
self.match(SBHasmParser.MESSAGE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EveryoneContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EVERYONE(self):
return self.getToken(SBHasmParser.EVERYONE, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_everyone
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEveryone" ):
listener.enterEveryone(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEveryone" ):
listener.exitEveryone(self)
def everyone(self):
localctx = SBHasmParser.EveryoneContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_everyone)
try:
self.enterOuterAlt(localctx, 1)
self.state = 240
self.match(SBHasmParser.EVERYONE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ForeachdirContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def EQUAL(self):
return self.getToken(SBHasmParser.EQUAL, 0)
def FOREACHDIR(self):
return self.getToken(SBHasmParser.FOREACHDIR, 0)
def directions(self):
return self.getTypedRuleContext(SBHasmParser.DirectionsContext,0)
def COLON(self):
return self.getToken(SBHasmParser.COLON, 0)
def EOL(self):
return self.getToken(SBHasmParser.EOL, 0)
def ENDFOR(self):
return self.getToken(SBHasmParser.ENDFOR, 0)
def line(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.LineContext)
else:
return self.getTypedRuleContext(SBHasmParser.LineContext,i)
def getRuleIndex(self):
return SBHasmParser.RULE_foreachdir
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterForeachdir" ):
listener.enterForeachdir(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitForeachdir" ):
listener.exitForeachdir(self)
def foreachdir(self):
localctx = SBHasmParser.ForeachdirContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_foreachdir)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 242
self.mem()
self.state = 243
self.match(SBHasmParser.EQUAL)
self.state = 244
self.match(SBHasmParser.FOREACHDIR)
self.state = 245
self.directions()
self.state = 246
self.match(SBHasmParser.COLON)
self.state = 247
self.match(SBHasmParser.EOL)
self.state = 249
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 248
self.line()
self.state = 251
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SBHasmParser.COMMENT) | (1 << SBHasmParser.JUMP) | (1 << SBHasmParser.STEP) | (1 << SBHasmParser.PICKUP) | (1 << SBHasmParser.IF) | (1 << SBHasmParser.DROP) | (1 << SBHasmParser.WRITE) | (1 << SBHasmParser.TAKE) | (1 << SBHasmParser.GIVE) | (1 << SBHasmParser.END) | (1 << SBHasmParser.MEM) | (1 << SBHasmParser.LABEL) | (1 << SBHasmParser.EOL) | (1 << SBHasmParser.LISTEN) | (1 << SBHasmParser.TELL) | (1 << SBHasmParser.GAMECOMMENT) | (1 << SBHasmParser.GAMECOMMENTDEF))) != 0)):
break
self.state = 253
self.match(SBHasmParser.ENDFOR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| StarcoderdataPython |
3290483 | <reponame>Shchusia/gen_doc
"""
Setup module for install lib
"""
import os
import re
from os import path
from pathlib import Path
from typing import List, Optional
from setuptools import setup
LIB_NAME = 'gen_doc'
HERE = Path(__file__).parent
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_version() -> Optional[str]:
"""
Method for getting the version of the library from the init file
:requirements: version must be specified separately
:good: __version__ = '0.0.1'
:bad: __version__, __any_variable__ = '0.0.1', 'any_value'
:return: version lib
"""
txt = (HERE / LIB_NAME / "__init__.py").read_text("utf-8")
txt = txt.replace("'", '"')
try:
version = re.findall(r'^__version__ = "([^"]+)"\r?$', txt, re.M)[0]
return version
except IndexError:
raise RuntimeError("Unable to determine version.")
def get_packages() -> List[str]:
"""
Help method
:return: List[str] path to files and folders library
"""
ignore = ['__pycache__']
list_sub_folders_with_paths = [x[0].replace(os.sep, '.')
for x in os.walk(LIB_NAME)
if x[0].split(os.sep)[-1] not in ignore]
return list_sub_folders_with_paths
setup(name=LIB_NAME,
version=get_version(),
description='Module for build documentation',
author='<NAME>',
long_description=long_description,
long_description_content_type='text/markdown',
author_email='<EMAIL>',
url='https://github.com/Shchusia/gen_doc',
packages=get_packages(),
keywords=['pip', LIB_NAME],
python_requires='>=3.7',
entry_points={
'console_scripts': [
'gen_doc=gen_doc.commands:main'
]},
)
| StarcoderdataPython |
90959 | <reponame>pepsipepsi/nodebox_opengl_python3
import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
# Generate compositions using random text.
font('Arial Black')
def rndText():
"""Returns a random string of up to 9 characters."""
t = u""
for i in range(random(10)):
t += chr(random(10,120))
return t
def draw(canvas):
canvas.clear()
# Define some colors.
#colormode(HSB)
white = color(1,1,1,0.8)
black = color(0,0,0,0.8)
red = color(random(),0,0.2,0.8)
translate(0,-200)
for i in range(100):
# This translation is not reset every time, so it is
# appended to previous translations. This gives
# interesting effects.
translate(random(-100,100),random(-100,100))
# Save the current transformation. It's a good idea
# to do this in the beginning of a loop. End the
# loop with a pop.
push()
# Rotate in increments of 45 degrees.
rotate(random(5)*45)
fontsize(random(800))
fill(choice((white,black,red)))
someText = rndText()
text(someText, 0,0)
pop()
canvas.size = 500,500
canvas.run(draw) | StarcoderdataPython |
3358972 | <reponame>dhimmel/serg-pycode<filename>bioparser/efo.py
import os
import csv
import networkx
import data
import networkx_ontology
import obo
class EFO(obo.OBO):
def __init__(self, directory=None):
if directory is None:
directory = data.current_path('efo')
obo_filename = 'efo.obo'
keep_attributes = ['name', 'def', 'synonym']
super(EFO, self).__init__(directory, obo_filename, keep_attributes)
def get_diseases(self, root = 'EFO:0000408'):
'EFO:0000408' # disease
ontology = self.get_ontology()
return ontology.get_descendents(root)
def get_neoplasms(self):
root = 'EFO:0000616' # neoplasm
ontology = self.get_ontology()
return ontology.get_descendents(root)
def get_non_neoplastic_diseases(self):
return self.get_diseases() - self.get_neoplasms()
def gxa_query_compounds(self, root='CHEBI:37577'):
"""
Finds all leaf nodes which are descendents of root.
Colons are replaced with underscores in the returned output.
Writes 'gxa_query_compounds.txt' showing term id and names.
The default root is CHEBI:37577 for 'chemical compound'.
"""
self.get_graph()
chemical_compounds = list(networkx.dfs_postorder_nodes(self.graph, source=root))
query_compounds = filter(lambda x: self.graph.out_degree(x) == 0, chemical_compounds)
#self.write_terms(query_compounds, 'gxa_query_compounds.txt')
#query_compounds = map(replace_colon, query_compounds)
return query_compounds
def gxa_query_diseases(self, root='EFO:0000408'):
"""
The sef of leaf nodes which are descendents of root is computed.
The predecessors of the leaf nodes are then computed. Nodes which have
more than five total descedents are excluded. Nodes that are descendents
of included nodes are excluded.
Writes 'gxa_query_diseases.txt' showing term id and names.
The default root is EFO_0000408 for 'disease'.
"""
self.get_graph()
diseases = list(self.get_diseases(root))
leaf_diseases = filter(lambda x: self.graph.out_degree(x) == 0, diseases)
query_diseases = set(leaf_diseases)
for leaf_disease in leaf_diseases:
query_diseases |= set(self.graph.predecessors(leaf_disease))
for node in list(query_diseases):
num_descendents = len(list(networkx.dfs_postorder_nodes(self.graph, source=node))) - 1
if num_descendents > 5:
query_diseases.remove(node)
for node in list(query_diseases):
descendents = set(networkx.dfs_postorder_nodes(self.graph, source=node))
descendents.remove(node)
query_diseases -= descendents
#self.write_terms(query_diseases, 'gxa_query_diseases.txt')
#query_diseases = map(replace_colon, query_diseases)
return query_diseases
if __name__ =='__main__':
efo = EFO()
efo.get_graph()
#efo.gxa_query_compounds()
#efo.gxa_query_diseases() | StarcoderdataPython |
3308377 | <filename>Util.py<gh_stars>0
import time
import os
import sys
def formatTime(t):
return time.strftime('%H:%M:%S', time.gmtime(t))
def formatTimeHM(t):
return time.strftime('%H:%M', time.gmtime(t))
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
UI_DIR = resource_path("UI Files/")
| StarcoderdataPython |
1752727 | import pandas as pd
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
import folium
import pprint
pp = pprint.PrettyPrinter(indent =4)
general_directory = os.path.split(os.getcwd())[0]
data_location = os.path.join(general_directory, "data")
dataset = "trainingsetvalues" +".csv"
datasetlabels = "trainingsetlabels.csv"
df = pd.read_csv(os.path.join(data_location, dataset))
df1 = pd.read_csv(os.path.join(data_location, datasetlabels))
values_with_labels = pd.merge(left=df, right=df1, left_on = "id", right_on= "id")
#create dataframe where rows are all the different columns
described_df = pd.DataFrame(df.columns, columns = ['column'])
#add data type of a column to the described dataframe
described_df["data_type"] = [df[column].dtype for column in df.columns]
#add unique count of variables in a column to the described dataframe
described_df["unique_count"] = [df[column].nunique() for column in df.columns]
#add nan count of variables in a column to the described df
described_df["nan_count"] = [df[column].isna().sum() for column in df.columns]
#add zero (0) count of variables in a column to the described df
described_df["zero_count"] = [(df[column] == 0).sum(axis=0) for column in df.columns]
described_df.to_excel("description.xlsx")
print(described_df) | StarcoderdataPython |
1747154 | <reponame>fossabot/bili-bonus<gh_stars>1-10
# -*- coding: utf-8 -*-
from src.luckydraw.main import start
| StarcoderdataPython |
3293247 | #idade para se alistar com acrescimo de sexo criado por mim
from datetime import date
atual = date.today().year
print(' Alistamento obrigatório Militar')
print('Para sexo MASCULINO digite [1]\n Para sexo FEMININO digite [2] ')
sexo = int(input('Qual é o seu sexo? '))
if sexo == 1:
nasc = int(input('Digite seu ano de nascimento: '))
idade = atual - nasc
print('quem nasceu em {} tem {} anos em {}'.format(nasc, idade, atual))
if idade == 18:
print('Você precisa se alistar imadiatamente')
if idade < 18:
saldo = 18 - idade
print(' Ainda faltam {} anos para seu alistamento'.format(saldo))
ano = atual + saldo
print('Seu alistamento será em {}'.format(ano))
if idade > 18:
saldo = idade - 18
print('Você deveria ter se alistado há {} anos '.format(saldo))
ano = atual - saldo
print('seu alistamento foi no ano {}'.format(ano))
else:
print('Você não precisa se alistar') | StarcoderdataPython |
106713 | from discord.ext import commands
from xml.etree import ElementTree
import discord, os, requests, time, re, random
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
tts_subscription_key = '<KEY>'
text_analytics_subscription_key = '<KEY>'
class TTSMessage(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.tts_subscription_key = tts_subscription_key
@commands.Cog.listener()
async def on_message(self, message):
# we do not want the bot to reply to itself
if message.author == self.bot.user:
return
# Note: if there are too many messages at the same time, some messages will be missed.
r_content = re.sub(r'\<\S+\>', '', message.content) # delete mentioned user_id
r_content = re.sub(r'@\S+\s', '', r_content)
r_content = re.sub(r'^https?:\/\/.*[\r\n]*', '', r_content, flags=re.MULTILINE)
if len(r_content) <50: # if a sentence is too long, then ignore it.
guild = message.guild
if guild.me.voice != None: # if bot is in any voice channel
voice_client: discord.VoiceClient = discord.utils.get(self.bot.voice_clients, guild=guild)
if len(guild.me.voice.channel.members) > 1: # another or the other user in voice channel
if not voice_client.is_playing():
if not message.content.startswith('!'):
app = TextToSpeech(tts_subscription_key, message.author.display_name, r_content) # generate .mp3 file
app.get_token()
app.save_audio()
audio_source = discord.FFmpegPCMAudio('temp.mp3')
if guild.me.voice != None:
try:
await voice_client.play(audio_source, after=None)
except:
pass
elif len(guild.me.voice.channel.members) == 1:
await voice_client.disconnect()
@commands.command(name='來')
async def join_vc(self, ctx):
guild = ctx.guild
if guild.me.voice != None: # if bot is in any voice channel
voice_client: discord.VoiceClient = discord.utils.get(self.bot.voice_clients, guild=guild)
await voice_client.disconnect()
if ctx.message.author.voice != None: # message's author is in a voice channel
channel = ctx.message.author.voice.channel
await channel.connect()
else:
channel = ctx.message.channel
await channel.send('貴使用者並不在任何語音頻道內,請不要講幹話。')
else: # bot is not in any voice channel
if ctx.message.author.voice != None: # message's author is in a voice channel
channel = ctx.message.author.voice.channel
await channel.connect()
else:
channel = ctx.message.channel
await channel.send('貴使用者並不在任何語音頻道內,請不要講幹話。')
@commands.command(name='滾')
async def leave_vc(self, ctx):
for x in self.bot.voice_clients:
if(x.guild == ctx.message.author.guild):
return await x.disconnect()
return await ctx.message.channel.say('鍋P4窩')
@commands.command(name='唱', aliases=['播', '播放', 'play', 'sing', 's', \
'停', '停止', '停停停', '別', '鬧', '別鬧', 'queue', 'que', 'stop'])
async def play(self, ctx, song_name=None):
guild = ctx.guild
voice_client: discord.VoiceClient = discord.utils.get(self.bot.voice_clients, guild=guild)
if guild.me.voice != None: # if bot is in any voice channel
if ctx.message.author.voice != None: # message's author is in a voice channel
if song_name!=None:
try:
audio_source = discord.FFmpegPCMAudio('./data/music/{}.mp3'.format(song_name))
if not voice_client.is_playing():
await voice_client.play(audio_source, after=None)
else:
await voice_client.stop()
await voice_client.play(audio_source, after=None)
except:
pass
else:
try:
await voice_client.stop()
except:
pass
else:
await ctx.message.channel.send('你給我進來')
else:
await ctx.message.channel.send('歹勢 拎北牟營')
@commands.command(name='歌單', aliases=['playlist', 'songlist', 'listsongs'])
async def playlist(self, ctx):
channel = ctx.message.channel
song_list = []
for file in os.listdir("./data/music/"):
if file.endswith(".mp3"):
song_list.append(os.path.splitext(file)[0])
try:
song_list.remove('temp')
except:
pass
await channel.send(', '.join(song_list))
def setup(bot):
bot.add_cog(TTSMessage(bot))
class TextToSpeech(object):
def __init__(self, tts_subscription_key, tts_author, tts_text):
self.tts_subscription_key = tts_subscription_key
self.text_key = text_analytics_subscription_key
self.tts = tts_text
self.author = tts_author
self.access_token = None
def get_token(self):
fetch_token_url = "https://eastasia.api.cognitive.microsoft.com/sts/v1.0/issueToken"
headers = {
'Ocp-Apim-Subscription-Key': self.tts_subscription_key
}
response = requests.post(fetch_token_url, headers=headers)
self.access_token = str(response.text)
def save_audio(self):
base_url = 'https://eastasia.tts.speech.microsoft.com/'
path = 'cognitiveservices/v1'
constructed_url = base_url + path
headers = {
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/ssml+xml',
'X-Microsoft-OutputFormat': 'riff-24khz-16bit-mono-pcm',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}
xml_body = ElementTree.Element('speak', version='1.0')
xml_body.set('{http://www.w3.org/XML/1998/namespace}lang', 'zh-Hant')
voice = ElementTree.SubElement(xml_body, 'voice')
voice.set('{http://www.w3.org/XML/1998/namespace}lang', 'zh-Hant')
ld = Lang_Detection(self.text_key, self.tts)
result = ld.language_detection()
if result != None:
voice.set('name', result)
voice.text = '{}さんは、{}と言った。'.format(self.author, self.tts)
else:
accent_list = [
"Microsoft Server Speech Text to Speech Voice (zh-TW, Zhiwei, Apollo)",\
"Microsoft Server Speech Text to Speech Voice (zh-TW, HanHanRUS)",\
"Microsoft Server Speech Text to Speech Voice (zh-TW, Yating, Apollo)",\
"Microsoft Server Speech Text to Speech Voice (zh-CN, Kangkang, Apollo)",\
"Microsoft Server Speech Text to Speech Voice (zh-CN, Yaoyao, Apollo)",\
"Microsoft Server Speech Text to Speech Voice (zh-CN, HuihuiRUS)"]
random_accent = random.choice(accent_list)
voice.set('name', random_accent)
voice.text = '{}說了,{}'.format(self.author, self.tts)
body = ElementTree.tostring(xml_body)
response = requests.post(constructed_url, headers=headers, data=body)
if response.status_code == 200:
with open('temp.mp3', 'wb') as audio:
audio.write(response.content)
print("\nStatus code: " + str(response.status_code))
else:
print("\nStatus code: " + str(response.status_code) + "\nSomething went wrong. Check your subscription key and headers.\n")
class Lang_Detection(object):
def __init__(self, text_key, text):
self.key = text_key
self.text = text
self.endpoint = 'https://eastasia.api.cognitive.microsoft.com'
def authenticateClient(self):
credentials = CognitiveServicesCredentials(self.key)
text_analytics_client = TextAnalyticsClient(
endpoint=self.endpoint, credentials=credentials)
return text_analytics_client
def language_detection(self):
text = self.text
client = self.authenticateClient()
try:
documents = [
{'id': '1', 'text': text}
]
response = client.detect_language(documents=documents)
for document in response.documents:
print("Language: ", document.detected_languages[0].name) #document.detected_languages[0].iso6391_name
if document.detected_languages[0].name == 'Japanese':
return "Microsoft Server Speech Text to Speech Voice (ja-JP, Ayumi, Apollo)"
else:
return None
except Exception as err:
print("Encountered exception. {}".format(err))
if __name__ == "__main__":
tts_text = '你今天過得怎麼樣?'
tts_author = '路西法'
app = TextToSpeech(tts_subscription_key, tts_author, tts_text)
app.get_token()
app.save_audio()
| StarcoderdataPython |
28698 | """This module contains helper functions used in the API"""
import datetime
import json
import re
import string
import random
from functools import wraps
from flask import request
from api_v1.models import User
def name_validalidation(name, context):
"""Function used to validate various names"""
if len(name.strip()) == 0 or not re.match("^[-a-zA-Z0-9_\\s]*$", name):
message = "Name shouldn't be empty. No special characters"
response = {
"message": message + " for " + context + " names",
context: "null"
}
return response, 400
def email_validation(email):
"""Function used to validate users emails"""
if not re.match(r"(^[a-zA-Z0-9_.]+@[a-zA-Z0-9-]+\.[a-z]+$)", email):
response = {
'message': 'Incorrect email format.',
'status': 'Registration failed'
}
return response, 400
def datetimeconverter(obj):
"""Function to convert datime objects to a string"""
if isinstance(obj, datetime.datetime):
return obj.__str__()
def master_serializer(resource):
"""Function to return a resource json"""
data = resource.serialize()
user_json = json.dumps(
data, default=datetimeconverter, sort_keys=True
)
return user_json
def token_required(funct):
"""Decorator method to check for jwt tokens"""
@wraps(funct)
def wrapper(*args, **kwargs):
"""Wrapper function to add pass down results of the token decoding"""
if 'Authorization' in request.headers:
access_token = request.headers.get('Authorization')
data = User.decode_token(access_token)
if not isinstance(data, str):
user_id = data
else:
response = {
'message': data
}
return response, 401
return funct(*args, user_id, **kwargs)
else:
message = "No token found! Ensure that the request header"
response = {
'message': message + ' has an authorization key value'
}
return response, 401
wrapper.__doc__ = funct.__doc__
wrapper.__name__ = funct.__name__
return wrapper
def password_generator(size=8, chars=string.ascii_uppercase + string.digits):
"""Function to generate a random password"""
return ''.join(random.choice(chars) for _ in range(size))
| StarcoderdataPython |
1600251 | import appdirs
from pkg_resources import Requirement, resource_filename
import shutil
import os
from plico.utils.addtree import mkdirp
class ConfigFileManager():
def __init__(self, appName, appAuthor, pythonPackageName):
self._appName = appName
self._appAuthor = appAuthor
self._packageName = pythonPackageName
self._appdirs = appdirs.AppDirs(self._appName, self._appAuthor)
def getConfigFilePath(self):
confPath = os.path.join(self._appdirs.user_config_dir,
'%s.conf' % self._packageName)
return confPath
def _getConfigFilePathInPackage(self):
return resource_filename(
Requirement(self._packageName),
"%s/conf/%s.conf" % (self._packageName, self._packageName))
def doesConfigFileExists(self):
return os.path.isfile(self.getConfigFilePath())
def installConfigFileFromPackage(self, overwrite=False):
if self.doesConfigFileExists() and (overwrite is False):
return
source = self._getConfigFilePathInPackage()
dest = self.getConfigFilePath()
mkdirp(os.path.dirname(self.getConfigFilePath()))
shutil.copyfile(source, dest)
| StarcoderdataPython |
18005 | <filename>controllers/rcj_soccer_referee_supervisor/rcj_soccer_referee_supervisor.py
from math import ceil
from referee.consts import MATCH_TIME, TIME_STEP
from referee.referee import RCJSoccerReferee
referee = RCJSoccerReferee(
match_time=MATCH_TIME,
progress_check_steps=ceil(15/(TIME_STEP/1000.0)),
progress_check_threshold=0.5,
ball_progress_check_steps=ceil(10/(TIME_STEP/1000.0)),
ball_progress_check_threshold=0.5,
)
while referee.step(TIME_STEP) != -1:
referee.emit_positions()
if not referee.tick():
break
# When end of match, pause simulator immediately
referee.simulationSetMode(referee.SIMULATION_MODE_PAUSE)
| StarcoderdataPython |
1680353 | import json
from sys import exit
class Disassembler:
"""
A class used to represent Disassembler
Attributes
----------
_opcodes: list[int]
_instructions: dict[int, dict()]
_registers: dict[str, str]
_opcode: str
_r_type_format: dict[str, str]
_j_type_format: dict[str, str]
_i_type_format: dict[str, str]
instructions_to_decode: list[str]
Methods
-------
_mips_decoder(x)
Used for customized decoding json file
initialize(path_to_input: str, path_to_configuration: str)
Load configuration and input file, creates object
decode_instruction(instruction: int)
Decodes passed instruction
disassemble()
Iterates over all instructions in _instructions and passes them to decode_instruction(instruction: int)
"""
def __init__(self, configuration, instructions_to_decode):
"""
:param configuration
:param instructions_to_decode
"""
self._opcodes = configuration.get('opcodes')
self._instructions = configuration.get('instructions')
self._registers = configuration.get('registers')
self._opcode = configuration.get('opcode')
self._r_type_format = configuration.get('r_type_format')
self._j_type_format = configuration.get('j_type_format')
self._i_type_format = configuration.get('i_type_format')
self.instructions_to_decode = instructions_to_decode
self.output_data = []
@staticmethod
def _mips_decoder(x):
"""
:param x
:return
"""
if isinstance(x, dict):
new_dict = {}
for k, v in x.items():
try:
new_dict[int(k)] = v
except ValueError:
new_dict[k] = v
if isinstance(v, str) and v.startswith('0b'):
new_dict[k] = int(v, 2)
return new_dict
return x
@classmethod
def initialize(cls, path_to_input: str, path_to_configuration: str):
"""
:param path_to_input
:param path_to_configuration
:return
"""
try:
with open(file=path_to_input) as f:
input_data = f.read().splitlines()
except FileNotFoundError:
print(f'Can not load a file: {path_to_input}')
print('Try a different file!')
exit()
with open(file=path_to_configuration) as json_file:
configuration = json.load(json_file, object_hook=cls._mips_decoder)
return cls(configuration, input_data)
def decode_instruction(self, hex_instruction):
"""
:param hex_instruction
:return:
"""
instruction = int(hex_instruction, 16)
opcode = (instruction & self._opcode) >> 26
if opcode not in self._opcodes:
print('Unsupported instruction!')
return
# R-type
if opcode == 0:
try:
rs = self._registers[(self._r_type_format.get('rs') & instruction) >> 21]
rt = self._registers[(self._r_type_format.get('rt') & instruction) >> 16]
rd = self._registers[(self._r_type_format.get('rd') & instruction) >> 11]
shift = (self._r_type_format.get('shift') & instruction) >> 6
func = self._r_type_format.get('func') & instruction
template = self._instructions[opcode][func]
except KeyError:
print('Unsupported instruction!')
return
decoded_instruction = template.get('syntax').replace('$rs', rs)
decoded_instruction = decoded_instruction.replace('$rt', rt)
decoded_instruction = decoded_instruction.replace('$rd', rd)
decoded_instruction = decoded_instruction.replace('$shift', f'{shift:#010x}')
# J-type
elif opcode in [2, 3]:
offset = self._j_type_format.get('offset') & instruction
template = self._instructions.get(opcode)
decoded_instruction = template.get('syntax').replace('$offset', f'{offset:#010x}')
# I-type
else:
try:
rs = self._registers[(self._i_type_format.get('rs') & instruction) >> 21]
rt = self._registers[(self._i_type_format.get('rt') & instruction) >> 16]
imm = self._i_type_format.get('imm') & instruction
template = self._instructions.get(opcode)
except KeyError:
print('Unsupported instruction!')
return
decoded_instruction = template.get('syntax').replace('$rs', rs)
decoded_instruction = decoded_instruction.replace('$rt', rt)
decoded_instruction = decoded_instruction.replace('$imm', f'{imm:#010x}')
output = f'{hex_instruction} -> {decoded_instruction}'
print(output)
self.output_data.append(output)
def disassemble(self):
for hex_instruction in self.instructions_to_decode:
self.decode_instruction(hex_instruction)
| StarcoderdataPython |
80251 | <reponame>dmulyalin/ttp<filename>test/pytest/test_answers_and_docs.py
import sys
sys.path.insert(0, "../..")
import pprint
import pytest
import logging
logging.basicConfig(level=logging.DEBUG)
from ttp import ttp
def test_answer_1():
"""https://stackoverflow.com/questions/63522291/parsing-blocks-of-text-within-a-file-into-objects"""
data = """
#*Approximate Distance Oracles with Improved Query Time.
#@<NAME>
#t2015
#cEncyclopedia of Algorithms
#index555036b37cea80f954149ffc
#*Subset Sum Algorithm for Bin Packing.
#@<NAME>
#t2015
#cEncyclopedia of Algorithms
#index555036b37cea80f954149ffd
"""
template = """
#*{{ info | ORPHRASE }}
#@{{ author | ORPHRASE }}
#t{{ year }}
#c{{ title | ORPHRASE }}
#index{{ index }}
"""
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="flat_list")
pprint.pprint(res)
assert res == [
{
"author": "<NAME>",
"index": "555036b37cea80f954149ffc",
"info": "Approximate Distance Oracles with Improved Query Time.",
"title": "Encyclopedia of Algorithms",
"year": "2015",
},
{
"author": "<NAME>",
"index": "555036b37cea80f954149ffd",
"info": "Subset Sum Algorithm for Bin Packing.",
"title": "Encyclopedia of Algorithms",
"year": "2015",
},
]
# test_answer_1()
def test_answer_2():
"""https://stackoverflow.com/questions/63499479/extract-value-from-text-string-using-format-string-in-python"""
data = """
name=username1, age=1001
name=username2, age=1002
name=username3, age=1003
"""
template = "name={{ name }}, age={{ age }}"
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="flat_list")
# pprint.pprint(res)
assert res == [
{"age": "1001", "name": "username1"},
{"age": "1002", "name": "username2"},
{"age": "1003", "name": "username3"},
]
# test_answer_2()
def test_issue_20_answer():
data_to_parse = """
(*, 172.16.58.3)
LISP0.4200, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
LISP0.4201, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
(172.16.17.32, 172.16.58.3), 6d20h/00:02:23, flags: FT
Incoming interface: Vlan1029, RPF nbr 0.0.0.0
Outgoing interface list:
LISP0.4100, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
"""
show_mcast1 = """
<template name="mcast" results="per_template">
<group name="mcast_entries.{{ overlay_src }}">
({{ overlay_src | _start_ | replace("*", "'*'")}}, {{ overlay_grp | IP }})
({{ overlay_src | _start_ | IP }}, {{ overlay_grp | IP }}), {{ entry_uptime }}/{{ entry_state_or_timer }}, flags: {{ entry_flags }}
Incoming interface: {{ incoming_intf }}, RPF nbr {{ rpf_neighbor }}
<group name="oil_entries*">
{{ outgoing_intf }}, ({{ underlay_src | IP }}, {{ underlay_grp | IP }}), Forward/Sparse, {{ oil_uptime }}/{{ oil_state_or_timer}}
</group>
</group>
</template>
"""
parser = ttp(template=show_mcast1)
parser.add_input(data_to_parse, template_name="mcast")
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=100)
assert res == {
"mcast": {
"mcast_entries": {
"'*'": {
"oil_entries": [
{
"oil_state_or_timer": "stopped",
"oil_uptime": "1d18h",
"outgoing_intf": "LISP0.4200",
"underlay_grp": "172.16.31.10",
"underlay_src": "172.16.58.3",
},
{
"oil_state_or_timer": "stopped",
"oil_uptime": "2d05h",
"outgoing_intf": "LISP0.4201",
"underlay_grp": "172.16.31.10",
"underlay_src": "172.16.58.3",
},
],
"overlay_grp": "172.16.58.3",
},
"172.16.17.32": {
"entry_flags": "FT",
"entry_state_or_timer": "00:02:23",
"entry_uptime": "6d20h",
"incoming_intf": "Vlan1029",
"oil_entries": [
{
"oil_state_or_timer": "stopped",
"oil_uptime": "1d18h",
"outgoing_intf": "LISP0.4100",
"underlay_grp": "172.16.31.10",
"underlay_src": "172.16.58.3",
}
],
"overlay_grp": "172.16.58.3",
"rpf_neighbor": "0.0.0.0",
},
}
}
}
# test_issue_20_answer()
def test_answer_3():
"""
Fixed bug with results forming - when have two _start_ matches, but
one of them is False, TTP was selecting first match without checking
if its False, updated decision logic to do that check.
"""
data = """
/c/slb/virt 12
dis
ipver v4
vip 1.1.1.1
rtsrcmac ena
vname "my name"
/c/slb/virt 12/service 443 https
group 15
rport 443
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 443 https/http
xforward ena
httpmod hsts_insert
/c/slb/virt 12/service 443 https/ssl
srvrcert cert certname
sslpol ssl-Policy
/c/slb/virt 12/service 80 http
group 15
rport 80
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 80 http/http
xforward ena
/c/slb/virt 14
dis
ipver v4
vip 1.1.4.4
rtsrcmac ena
vname "my name2"
"""
template = """
<template name="VIP_cfg" results="per_template">
<group name="{{ vip }}">
/c/slb/virt {{ virt_seq | DIGIT }}
dis {{ config_state | set("dis") }}
ipver {{ ipver}}
vip {{ vip }}
rtsrcmac {{ rtsrcmac }}
vname "{{ vip_name | ORPHRASE }}"
<group name="services.{{ port }}.{{ proto }}">
/c/slb/virt 12/service {{ port | DIGIT }} {{ proto | exclude(ssl) }}
group {{group_seq }}
rport {{ real_port }}
pbind {{ pbind }}
dbind {{ dbind }}
xforward {{ xforward }}
httpmod {{ httpmod }}
</group>
<group name="ssl_profile">
/c/slb/virt {{ virt_seq }}/service 443 https/ssl
srvrcert cert {{ ssl_server_cert }}
sslpol {{ ssl_profile }}
{{ ssl | set("https/ssl") }}
</group>
</group>
</template>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=50)
assert res == {
"VIP_cfg": {
"1.1.1.1": {
"config_state": "dis",
"ipver": "v4",
"rtsrcmac": "ena",
"services": {
"443": {
"https": {
"dbind": "forceproxy",
"group_seq": "15",
"pbind": "clientip",
"real_port": "443",
},
"https/http": {"httpmod": "hsts_insert", "xforward": "ena"},
},
"80": {
"http": {
"dbind": "forceproxy",
"group_seq": "15",
"pbind": "clientip",
"real_port": "80",
},
"http/http": {"xforward": "ena"},
},
},
"ssl_profile": {
"ssl": "https/ssl",
"ssl_profile": "ssl-Policy",
"ssl_server_cert": "certname",
"virt_seq": "12",
},
"vip_name": "my name",
"virt_seq": "12",
},
"1.1.4.4": {
"config_state": "dis",
"ipver": "v4",
"rtsrcmac": "ena",
"vip_name": "my name2",
"virt_seq": "14",
},
}
}
# test_answer_3()
def test_answer_4():
data = """
/c/slb/virt 12
dis
ipver v4
vip 1.1.1.1
rtsrcmac ena
vname "my name"
/c/slb/virt 12/service 443 https
group 15
rport 443
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 443 https/http
xforward ena
httpmod hsts_insert
/c/slb/virt 12/service 443 https/ssl
srvrcert cert certname
sslpol ssl-Policy
/c/slb/virt 12/service 80 http
group 15
rport 80
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 80 http/http
xforward ena
/c/slb/virt 14
dis
ipver v4
vip 1.1.4.4
rtsrcmac ena
vname "my name2"
"""
template = """
<template name="VIP_cfg" results="per_template">
<group name="{{ vip }}">
/c/slb/virt {{ virt_seq | DIGIT }}
dis {{ config_state | set("dis") }}
ipver {{ ipver}}
vip {{ vip }}
rtsrcmac {{ rtsrcmac }}
vname "{{ vip_name | ORPHRASE }}"
<group name="services.{{ port }}" contains="dbind, pbind">
/c/slb/virt 12/service {{ port | DIGIT }} {{ proto | exclude(ssl) }}
group {{group_seq }}
rport {{ real_port }}
pbind {{ pbind }}
dbind {{ dbind }}
xforward {{ xforward }}
httpmod {{ httpmod }}
</group>
<group name="ssl_profile">
/c/slb/virt {{ virt_seq }}/service 443 https/ssl
srvrcert cert {{ ssl_server_cert }}
sslpol {{ ssl_profile }}
{{ ssl | set("https/ssl") }}
</group>
</group>
</template>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=50)
assert res == {
"VIP_cfg": {
"1.1.1.1": {
"config_state": "dis",
"ipver": "v4",
"rtsrcmac": "ena",
"services": {
"443": {
"dbind": "forceproxy",
"group_seq": "15",
"pbind": "clientip",
"proto": "https",
"real_port": "443",
},
"80": {
"dbind": "forceproxy",
"group_seq": "15",
"pbind": "clientip",
"proto": "http",
"real_port": "80",
},
},
"ssl_profile": {
"ssl": "https/ssl",
"ssl_profile": "ssl-Policy",
"ssl_server_cert": "certname",
"virt_seq": "12",
},
"vip_name": "my name",
"virt_seq": "12",
},
"1.1.4.4": {
"config_state": "dis",
"ipver": "v4",
"rtsrcmac": "ena",
"vip_name": "my name2",
"virt_seq": "14",
},
}
}
# test_answer_4()
def test_issue_20_answer_2():
data_to_parse = """
(*, 192.168.3.11)
LISP0.4200, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
LISP0.4201, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
(172.16.17.32, 239.100.100.100), 2d05h/00:01:19, flags: FT
Incoming interface: Vlan1029, RPF nbr 0.0.0.0
Outgoing interface list:
LISP0.4100, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
LISP0.4101, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
(*, 172.16.58.3), 6d20h/00:03:28, RP 172.16.17.32, flags: S
Incoming interface: Null, RPF nbr 0.0.0.0
Outgoing interface list:
Vlan3014, Forward/Sparse, 1d18h/00:03:28
LISP0.4100, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
"""
show_mcast1 = """
<template name="mcast" results="per_template">
<group name="mcast_entries.{{ overlay_src }}">
({{ overlay_src | _start_ | replace("*", "'*'") }}, {{ overlay_grp | IP }})
({{ overlay_src | _start_ | IP }}, {{ overlay_grp | IP }}), {{ entry_uptime }}/{{ entry_state_or_timer }}, flags: {{ entry_flags }}
({{ overlay_src | _start_ | replace("*", "'*'") }}, {{ overlay_grp | IP }}), {{ entry_uptime }}/{{ entry_state_or_timer }}, RP {{ rp }}, flags: {{ entry_flags }}
Incoming interface: {{ incoming_intf }}, RPF nbr {{ rpf_neighbor }}
<group name="oil_entries*">
{{ outgoing_intf }}, Forward/Sparse, {{ oil_uptime }}/{{ oil_state_or_timer}}
{{ outgoing_intf }}, ({{ underlay_src | IP }}, {{ underlay_grp | IP }}), Forward/Sparse, {{ oil_uptime }}/{{ oil_state_or_timer}}
</group>
</group>
</template>
"""
parser = ttp(template=show_mcast1)
parser.add_input(data_to_parse, template_name="mcast")
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=100)
assert res == {
"mcast": {
"mcast_entries": {
"'*'": [
{"overlay_grp": "192.168.3.11"},
{
"entry_flags": "S",
"entry_state_or_timer": "00:03:28",
"entry_uptime": "6d20h",
"incoming_intf": "Null",
"oil_entries": [
{
"oil_state_or_timer": "00:03:28",
"oil_uptime": "1d18h",
"outgoing_intf": "Vlan3014",
"underlay_grp": "172.16.31.10",
"underlay_src": "172.16.58.3",
}
],
"overlay_grp": "172.16.58.3",
"rp": "172.16.17.32",
"rpf_neighbor": "0.0.0.0",
},
],
"172.16.17.32": {
"entry_flags": "FT",
"entry_state_or_timer": "00:01:19",
"entry_uptime": "2d05h",
"incoming_intf": "Vlan1029",
"overlay_grp": "172.16.58.3",
"rpf_neighbor": "0.0.0.0",
},
}
}
}
# test_issue_20_answer_2()
def test_docs_ttp_dictionary_usage_example():
template = """
<input load="text">
interface Lo0
ip address 172.16.17.32/29
!
interface Lo1
ip address 1.1.1.1/30
</input>
<group macro="add_last_host">
interface {{ interface }}
ip address {{ ip }}
</group>
<macro>
def add_last_host(data):
ip_obj, _ = _ttp_["match"]["to_ip"](data["ip"])
all_ips = list(ip_obj.network.hosts())
data["last_host"] = str(all_ips[-1])
return data
</macro>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
[
{
"interface": "Lo0",
"ip": "172.16.17.32/29",
"last_host": "172.16.58.3",
},
{"interface": "Lo1", "ip": "1.1.1.1/30", "last_host": "1.1.1.2"},
]
]
]
# test_docs_ttp_dictionary_usage_example()
def test_github_issue_21_answer():
data_to_parse = """
R1#sh ip nbar protocol-discovery protocol
GigabitEthernet1
Last clearing of "show ip nbar protocol-discovery" counters 00:13:45
Input Output
----- ------
Protocol Packet Count Packet Count
Byte Count Byte Count
5min Bit Rate (bps) 5min Bit Rate (bps)
5min Max Bit Rate (bps) 5min Max Bit Rate (bps)
---------------------------- ------------------------ ------------------------
ssh 191 134
24805 22072
2000 1000
1999 1001
unknown 172 503
39713 31378
0 0
3000 0
ping 144 144
14592 14592
0 0
1000 1000
dns 107 0
21149 0
0 0
2000 0
vrrp 0 738
0 39852
0 0
0 0
ldp 174 175
13224 13300
0 0
0 0
ospf 86 87
9460 9570
0 0
0 0
Total 874 1781
122943 130764
2000 1000
8000 2000
"""
show_nbar = """
<template name="nbar" results="per_template">
<vars>C1 = "DIGIT | to_int | to_list | joinmatches"</vars>
<group name="{{ interface }}">
{{ interface | re('Gig.+') | re('Ten.+') }}
<group name="{{ protocol }}" macro="map_to_keys">
{{ protocol }} {{ in | chain(C1) }} {{ out | chain(C1) }}
{{ ignore(r"\\s+") }} {{ in | chain(C1) }} {{ out | chain(C1) }}
</group>
</group>
<macro>
def map_to_keys(data):
# uncomment to see data
# print(data)
inp_values = data.pop("in")
out_values = data.pop("out")
inp_keys = ["IN Packet Count", "IN Byte Count", "IN 5min Bit Rate (bps)", "IN 5min Max Bit Rate (bps)"]
out_keys = ["OUT Packet Count", "OUT Byte Count", "OUT 5min Bit Rate (bps)", "OUT 5min Max Bit Rate (bps)"]
data.update(dict(zip(inp_keys, inp_values)))
data.update(dict(zip(out_keys, out_values)))
return data
</macro>
</template>
"""
parser = ttp(template=show_nbar)
parser.add_input(data_to_parse, template_name="nbar")
parser.parse()
res = parser.result(structure="dictionary")
pprint.pprint(res, width=100)
assert res == {
"nbar": {
"GigabitEthernet1 ": {
"Total": {
"IN 5min Bit Rate (bps)": 2000,
"IN 5min Max Bit Rate (bps)": 8000,
"IN Byte Count": 122943,
"IN Packet Count": 874,
"OUT 5min Bit Rate (bps)": 1000,
"OUT 5min Max Bit Rate (bps)": 2000,
"OUT Byte Count": 130764,
"OUT Packet Count": 1781,
},
"dns": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 2000,
"IN Byte Count": 21149,
"IN Packet Count": 107,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 0,
"OUT Packet Count": 0,
},
"ldp": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 0,
"IN Byte Count": 13224,
"IN Packet Count": 174,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 13300,
"OUT Packet Count": 175,
},
"ospf": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 0,
"IN Byte Count": 9460,
"IN Packet Count": 86,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 9570,
"OUT Packet Count": 87,
},
"ping": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 1000,
"IN Byte Count": 14592,
"IN Packet Count": 144,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 1000,
"OUT Byte Count": 14592,
"OUT Packet Count": 144,
},
"ssh": {
"IN 5min Bit Rate (bps)": 2000,
"IN 5min Max Bit Rate (bps)": 1999,
"IN Byte Count": 24805,
"IN Packet Count": 191,
"OUT 5min Bit Rate (bps)": 1000,
"OUT 5min Max Bit Rate (bps)": 1001,
"OUT Byte Count": 22072,
"OUT Packet Count": 134,
},
"unknown": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 3000,
"IN Byte Count": 39713,
"IN Packet Count": 172,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 31378,
"OUT Packet Count": 503,
},
"vrrp": {
"IN 5min Bit Rate (bps)": 0,
"IN 5min Max Bit Rate (bps)": 0,
"IN Byte Count": 0,
"IN Packet Count": 0,
"OUT 5min Bit Rate (bps)": 0,
"OUT 5min Max Bit Rate (bps)": 0,
"OUT Byte Count": 39852,
"OUT Packet Count": 738,
},
}
}
}
# test_github_issue_21_answer()
def test_github_issue_22():
data = """
interface Loopback0
description Fabric Node Router ID
ip address 172.16.58.3 255.255.255.255
ip pim sparse-mode
ip router isis
clns mtu 1400
end
interface Loopback0
description Fabric Node Router ID
ip address 172.16.17.32 255.255.255.255
ip pim sparse-mode
ip router isis
clns mtu 1400
end
"""
template = """{{ ignore(r"\\s+") }}ip address {{ ip_address }} 255.255.255.255"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [[[{"ip_address": "172.16.58.3"}, {"ip_address": "172.16.17.32"}]]]
# test_github_issue_22()
def test_github_issue_24():
data = """
19: IP4 1.1.1.1, 00:03:b2:78:04:13, vname portal, NO SERVICES UP
Virtual Services:
http: rport http, group 11, health http (HTTP), pbind clientip
Real Servers:
22: 10.10.10.10, web1, group ena, health (runtime HTTP), 0 ms, FAILED
Reason: N/A
23: 10.11.11.11, web2, group ena, health (runtime HTTP), 0 ms, FAILED
Reason: N/A
https: rport https, group 12, health tcp (TCP), pbind clientip
Real Servers:
22: 10.10.10.10, web1, group ena, health (runtime TCP), 0 ms, FAILED
Reason: N/A
23: 10.11.11.11, web2, group ena, health (runtime TCP), 0 ms, FAILED
Reason: N/A
"""
template = """
<template name="VIP_cfg" results="per_template">
<group name="{{ vs_instance }}" default="">
{{ vs_instance }}: IP4 {{ vs_ip }},{{ ignore(".+") }}
<group name="services*" default="">
{{ vs_service }}: rport {{ rport }},{{ ignore(".+") }}
<group name="pool*" default="">
{{ node_id }}: {{ node_ip }},{{ ignore(".+") }}
Reason: {{ reason }}
</group>
</group>
</group>
</template>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="dictionary")
# pprint.pprint(res, width=100)
assert res == {
"VIP_cfg": {
"19": {
"services": [
{
"pool": [
{
"node_id": "22",
"node_ip": "10.10.10.10",
"reason": "N/A",
},
{
"node_id": "23",
"node_ip": "10.11.11.11",
"reason": "N/A",
},
],
"rport": "http",
"vs_service": "http",
},
{
"pool": [
{
"node_id": "22",
"node_ip": "10.10.10.10",
"reason": "N/A",
},
{
"node_id": "23",
"node_ip": "10.11.11.11",
"reason": "N/A",
},
],
"rport": "https",
"vs_service": "https",
},
],
"vs_ip": "1.1.1.1",
}
}
}
# test_github_issue_24()
def test_reddit_answer_1():
"""
https://www.reddit.com/r/networking/comments/j106ot/export_custom_lists_from_the_config_aruba_switch/
Hit a bug while was doing this template - join action overridden by ignore indicator add action
"""
data = """
SWITCH# show vlan port 2/11 detail
Status and Counters - VLAN Information - for ports 2/11
Port name:
VLAN ID Name | Status Voice Jumbo Mode
------- -------------------- + ---------- ----- ----- --------
60 ABC | Port-based No No Tagged
70 DEF | Port-based No No Tagged
101 GHIJ | Port-based No No Untagged
105 KLMNO | Port-based No No Tagged
116 PQRS | Port-based No No Tagged
117 TVU | Port-based No No Tagged
SWITCH# show vlan port 2/12 detail
Status and Counters - VLAN Information - for ports 2/12
Port name:
VLAN ID Name | Status Voice Jumbo Mode
------- -------------------- + ---------- ----- ----- --------
61 ABC | Port-based No No Tagged
71 DEF | Port-based No No Tagged
103 GHI | Port-based No No Untagged
"""
template = """
<vars>
hostname="gethostname"
</vars>
<group name="vlans*">
Status and Counters - VLAN Information - for ports {{ Port_Number }}
{{ Tagged_VLAN | joinmatches(" ") }} {{ ignore }} | {{ ignore }} {{ ignore }} {{ ignore }} Tagged
{{ Untagged_VLAN }} {{ ignore }} | {{ ignore }} {{ ignore }} {{ ignore }} Untagged
{{ Hostname | set(hostname) }}
</group>
<output>
format = "csv"
path = "vlans"
headers = "Hostname, Port_Number, Untagged_VLAN, Tagged_VLAN"
</output>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# print(res)
assert res == [
'"Hostname","Port_Number","Untagged_VLAN","Tagged_VLAN"\n"SWITCH","2/11","101","60 70 105 116 117"\n"SWITCH","2/12","103","61 71"'
]
# test_reddit_answer_1()
def test_reddit_answer_2():
data = """
config router ospf
set abr-type standard
set auto-cost-ref-bandwidth 1000
set distance-external 110
set distance-inter-area 110
set distance-intra-area 110
set database-overflow disable
set database-overflow-max-lsas 10000
set database-overflow-time-to-recover 300
set default-information-originate disable
set default-information-metric 10
set default-information-metric-type 2
set default-information-route-map ''
set default-metric 10
set distance 110
set rfc1583-compatible disable
set router-id 10.1.1.1
set spf-timers 5 10
set bfd disable
set log-neighbour-changes enable
set distribute-list-in "OSPF_IMPORT_PREFIX"
set distribute-route-map-in ''
set restart-mode none
set restart-period 120
config area
edit 0.0.0.1
set shortcut disable
set authentication none
set default-cost 10
set nssa-translator-role candidate
set stub-type summary
set type nssa
set nssa-default-information-originate disable
set nssa-default-information-originate-metric 10
set nssa-default-information-originate-metric-type 2
set nssa-redistribution enable
next
end
config ospf-interface
edit "vlan1-int"
set interface "Vlan1"
set ip 0.0.0.0
set authentication text
set authentication-key netconanRemoved13
set prefix-length 0
set retransmit-interval 5
set transmit-delay 1
set cost 0
set priority 1
set dead-interval 40
set hello-interval 10
set hello-multiplier 0
set database-filter-out disable
set mtu 0
set mtu-ignore disable
set network-type point-to-point
set bfd global
set status enable
set resync-timeout 40
next
edit "vlan2-int"
set interface "vlan2"
set ip 0.0.0.0
set authentication text
set authentication-key netconanRemoved14
set prefix-length 0
set retransmit-interval 5
set transmit-delay 1
set cost 0
set priority 1
set dead-interval 40
set hello-interval 10
set hello-multiplier 0
set database-filter-out disable
set mtu 0
set mtu-ignore disable
set network-type point-to-point
set bfd global
set status enable
set resync-timeout 40
next
end
config network
edit 1
set prefix 10.1.1.1 255.255.255.252
set area 0.0.0.1
next
edit 2
set prefix 10.1.1.3 255.255.255.252
set area 0.0.0.1
next
end
config redistribute "connected"
set status enable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "static"
set status enable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "rip"
set status disable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "bgp"
set status enable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "isis"
set status disable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
end
"""
template = """
<vars>
clean_phrase = [
'ORPHRASE',
'macro(\"clean_str\")'
]
clean_list = [
'ORPHRASE',
'macro(\"build_list\")'
]
</vars>
<macro>
def build_list(data):
if "\\" \\"" in data:
t = data.split("\\" \\"")
for i in range(0, len(t)):
t[i] = t[i].strip("\\"").replace(" ", "_")
i+=1
return t
else:
return [data.strip("\\"").replace(" ", "_")]
def clean_str(data):
return data.replace("\\"","").replace(" ", "_")
def match_ip_or_any(data):
import ipaddress
if data == \"any\":
return data
elif "/" in data:
return str(data)
else:
t = data.replace(" ", "/")
return str(ipaddress.IPv4Network(t, strict=False))
def ignore_empty(data):
if data == "\'\'":
return bool(False)
else:
return data
</macro>
<macro>
def skip_empty(data):
if data == {}:
return False
return data
</macro>
<group name="ospf">
config router ospf {{ _start_ }}
set auto-cost-ref-bandwidth {{ ref_bw }}
set default-information-originate {{ default_originate | contains("enable") }}
set default-information-metric {{ default_originate_metric }}
set default-information-metric-type {{ default_originate_metric_type }}
set default-information-route-map {{ default_originate_routemap | chain("clean_phrase") | macro("ignore_empty") }}
set default-metric {{ default_rt_metric }}
set rfc1583-compatible {{ rfc1583_compat | contains("enable") }}
set router-id {{ router_id }}
set distribute-list-in {{ dist_list_in | chain("clean_phrase") | macro("ignore_empty") }}
set distribute-route-map-in {{ dist_routemap_in | chain("clean_phrase") | macro("ignore_empty") }}
<group name="areas*" macro="skip_empty">
config area {{ _start_ }}
<group>
edit {{ area | _start_ }}
set stub-type {{ stub_type }}
set type {{ area_type }}
set nssa-default-information-originate {{ nssa_default_originate | contains("enable") }}
set nssa-default-information-originate-metric {{ nssa_default_metric }}
set nssa-default-information-originate-metric-type {{ nssa_default_metric_type }}
set nssa-redistribution {{ nssa_redis }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
<group name="interfaces*" macro="skip_empty">
config ospf-interface {{ _start_ }}
<group contains="status">
edit {{ name | chain("clean_phrase") | _start_ }}
set interface {{ interface | chain("clean_phrase")}}
set ip {{ ip | exclude("0.0.0.0") }}
set cost {{ cost | exclude("0") }}
set priority {{ priority }}
set mtu {{ mtu | exclude("0") }}
set network-type {{ network }}
set status {{ status | contains("enable") }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
<group name="networks*" macro="skip_empty">
config network {{ _start_ }}
<group>
edit {{ id | _start_ }}
set prefix {{ prefix | ORPHRASE | to_ip | with_prefixlen }}
set area {{ area }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
<group name="redistribute*" contains="status">
config redistribute {{ protocol | chain("clean_phrase") | _start_ }}
set status {{ status | contains('enable') }}
set route-map {{ route_map | chain("clean_phrase") | macro("ignore_empty") }}
set metric-type {{ metric-type }}
set metric {{ metric | exclude("0") }}
set tag {{ tag | exclude("0")}}
end {{ _end_ }}
</group>
end {{ _end_ }}
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"ospf": {
"areas": [
{
"area": "0.0.0.1",
"area_type": "nssa",
"nssa_default_metric": "10",
"nssa_default_metric_type": "2",
"nssa_redis": "enable",
"stub_type": "summary",
}
],
"default_originate_metric": "10",
"default_originate_metric_type": "2",
"default_rt_metric": "10",
"dist_list_in": "OSPF_IMPORT_PREFIX",
"interfaces": [
{
"interface": "Vlan1",
"name": "vlan1-int",
"network": "point-to-point",
"priority": "1",
"status": "enable",
},
{
"interface": "vlan2",
"name": "vlan2-int",
"network": "point-to-point",
"priority": "1",
"status": "enable",
},
],
"networks": [
{"area": "0.0.0.1", "id": "1", "prefix": "10.1.1.1/30"},
{"area": "0.0.0.1", "id": "2", "prefix": "10.1.1.3/30"},
],
"redistribute": [
{
"metric-type": "2",
"protocol": "connected",
"status": "enable",
},
{"metric-type": "2", "protocol": "static", "status": "enable"},
{"metric-type": "2", "protocol": "bgp", "status": "enable"},
],
"ref_bw": "1000",
"router_id": "10.1.1.1",
}
}
]
]
# test_reddit_answer_2()
def test_github_issue_32():
data = """
.id=*c;export-route-targets=65001:48;65001:0;import-route-targets=65001:48;interfaces=lo-ext;vlan56;route-distinguisher=65001:48;routing-mark=VRF_EXT
.id=*10;comment=;export-route-targets=65001:80;import-route-targets=65001:80;65001:0;interfaces=lo-private;route-distinguisher=65001:80;routing-mark=VRF_PRIVATE
"""
template = """
<group method="table">
.id={{ id | exclude(";") }};export-route-targets={{ export-route-targets }};import-route-targets={{ import-route-targets }};interfaces={{ interfaces }};route-distinguisher={{ route-distinguisher }};routing-mark={{ routing-mark }}
.id={{ id }};comment{{ comment }};export-route-targets={{ export-route-targets }};import-route-targets={{ import-route-targets }};interfaces={{ interfaces }};route-distinguisher={{ route-distinguisher }};routing-mark={{ routing-mark }}
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="flat_list")
# pprint.pprint(res)
assert res == [
{
"export-route-targets": "65001:48;65001:0",
"id": "*c",
"import-route-targets": "65001:48",
"interfaces": "lo-ext;vlan56",
"route-distinguisher": "65001:48",
"routing-mark": "VRF_EXT",
},
{
"comment": "=",
"export-route-targets": "65001:80",
"id": "*10",
"import-route-targets": "65001:80;65001:0",
"interfaces": "lo-private",
"route-distinguisher": "65001:80",
"routing-mark": "VRF_PRIVATE",
},
]
# test_github_issue_32()
def test_slack_answer_1():
data = """
Firmware
Version
----------------
02.1.1 Build 002
Hardware
Version
----------------
V2R4
"""
template = """
<group name="versions">
Hardware {{ _start_ }}
Firmware {{ _start_ }}
{{ version | PHRASE | let("type", "firmware") }}
{{ version | exclude("---") | exclude("Vers") | let("type", "hardware") }}
{{ _end_ }}
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="flat_list")
# pprint.pprint(res)
assert res == [
{
"versions": [
{"type": "firmware", "version": "02.1.1 Build 002"},
{"type": "hardware", "version": "V2R4"},
]
}
]
# test_slack_answer_1()
def test_group_default_docs():
template = """
<input load="text">
device-hostame uptime is 27 weeks, 3 days, 10 hours, 46 minutes, 10 seconds
</input>
<group name="uptime**">
device-hostame uptime is {{ uptime | PHRASE }}
<group name="software">
software version {{ version | default("uncknown") }}
</group>
</group>
<group name="domain" default="Uncknown">
Default domain is {{ fqdn }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"domain": {"fqdn": "Uncknown"},
"uptime": {
"software": {"version": "uncknown"},
"uptime": "27 weeks, 3 days, 10 hours, 46 minutes, 10 seconds",
},
}
]
]
# test_group_default_docs()
def test_github_issue_34_answer():
template = """
<input load="text">
Hi World
</input>
<group name='demo'>
<group name='audiences*'>
Hello {{ audience | default([]) }}
</group>
</group>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [[{"demo": {"audiences": [{"audience": []}]}}]]
# test_github_issue_34_answer()
def test_github_issue_33_answer_1():
template = """
<input load="text">
server 1.1.1.1
server 172.16.31.10 172.16.31.10
server 172.16.17.32 172.16.17.32 172.16.31.10
</input>
<group name="servers" method="table">
server {{ server | re(r"\\S+") | let("servers_number", 1 ) }}
server {{ server | re(r"\\S+ \\S+") | let("servers_number", 2) }}
server {{ server | re(r"\\S+ \\S+ \\S+") | let("servers_number", 3) }}
</group>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"servers": [
{"server": "1.1.1.1", "servers_number": 1},
{"server": "172.16.31.10 172.16.31.10", "servers_number": 2},
{"server": "172.16.17.32 172.16.17.32 172.16.31.10", "servers_number": 3},
]
}
]
]
# test_github_issue_33_answer_1()
def test_issue_36():
template = """
<input load="text">
ip access-list standard 42
10 remark machine_A
10 permit 192.168.200.162
20 remark machine_B
20 permit 192.168.200.149
30 deny any log
ip access-list standard 98
10 permit 10.10.10.1
20 remark toto
20 permit 30.30.30.1
30 permit 30.30.30.0 0.0.0.255
ip access-list standard 99
10 permit 10.20.30.40 log
20 permit 20.30.40.1 log
30 remark DEVICE - SNMP RW
30 permit 172.16.58.3 0.0.0.127
40 permit 172.16.17.32 0.0.0.63
ip access-list extended 199
10 remark COLLECTOR - SNMP
10 permit ip 172.16.17.32 0.0.0.255 any
20 remark RETURN - Back
20 permit ip 172.16.31.10 0.0.0.127 any
30 remark VISUALIZE
30 permit ip host 172.16.58.3 any
</input>
<group name="ip.{{ acl_type }}.{{ acl_name }}">
ip access-list {{ acl_type }} {{ acl_name }}
<group name="{{ entry_id }}*" method="table">
{{ entry_id }} remark {{ remark_name | re(".+") | let("action", "remark") }}
{{ entry_id }} {{ action }} {{ src_host }}
{{ entry_id }} {{ action }} {{ src_host | let("log", "log") }} log
{{ entry_id }} {{ action }} {{ protocol }} host {{ src_host | let("dest_any", "any") }} any
{{ entry_id }} {{ action }} {{ protocol }} {{ src_ntw | let("dest_any", "any") }} {{ src_wildcard | IP }} any
{{ entry_id }} {{ action }} {{ src_ntw }} {{ src_wildcard | IP }}
</group>
</group>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [
[
{
"ip": {
"extended": {
"199": {
"10": [
{"action": "remark", "remark_name": "COLLECTOR - SNMP"},
{
"action": "permit",
"dest_any": "any",
"protocol": "ip",
"src_ntw": "172.16.17.32",
"src_wildcard": "0.0.0.255",
},
],
"20": [
{"action": "remark", "remark_name": "RETURN - Back"},
{
"action": "permit",
"dest_any": "any",
"protocol": "ip",
"src_ntw": "172.16.31.10",
"src_wildcard": "0.0.0.127",
},
],
"30": [
{"action": "remark", "remark_name": "VISUALIZE"},
{
"action": "permit",
"dest_any": "any",
"protocol": "ip",
"src_host": "172.16.58.3",
},
],
}
},
"standard": {
"42": {
"10": [
{"action": "remark", "remark_name": "machine_A"},
{"action": "permit", "src_host": "192.168.200.162"},
],
"20": [
{"action": "remark", "remark_name": "machine_B"},
{"action": "permit", "src_host": "192.168.200.149"},
],
"30": [{"action": "deny", "log": "log", "src_host": "any"}],
},
"98": {
"10": [{"action": "permit", "src_host": "10.10.10.1"}],
"20": [
{"action": "remark", "remark_name": "toto"},
{"action": "permit", "src_host": "30.30.30.1"},
],
"30": [
{
"action": "permit",
"src_ntw": "30.30.30.0",
"src_wildcard": "0.0.0.255",
}
],
},
"99": {
"10": [
{
"action": "permit",
"log": "log",
"src_host": "10.20.30.40",
}
],
"20": [
{
"action": "permit",
"log": "log",
"src_host": "20.30.40.1",
}
],
"30": [
{"action": "remark", "remark_name": "DEVICE - SNMP RW"},
{
"action": "permit",
"src_ntw": "172.16.58.3",
"src_wildcard": "0.0.0.127",
},
],
"40": [
{
"action": "permit",
"src_ntw": "60.60.60.64",
"src_wildcard": "0.0.0.63",
}
],
},
},
}
}
]
]
# test_issue_36()
def test_github_issue_37_original_data_template():
template = """
<macro>
import re
def qinq(data):
data = re.sub(r"\\*", r"qinq", data)
return data
</macro>
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}" default="none">
epipe {{ service_id | _start_ }} customer {{ customer_id }} create
description "{{ description | ORPHRASE | default("none") }}"
service-mtu {{ service_mtu | default("none") }}
service-name "{{ service_name | ORPHRASE | default("none") }}"
<group name="endpoint" default="none">
endpoint {{ endpoint | _start_ }} create
revert-time {{ revert_time | default("none") }}
exit {{ _end_ }}
</group>
<group name="sap.{{ sap_id }}" default="none">
sap {{ sap_id | macro("qinq") | _start_ | ORPHRASE }} create
description "{{ description | ORPHRASE | default("none")}}"
multi-service-site "{{ mss_name | default("none") }}"
<group name="ingress" default="default_ingress" >
ingress {{ _start_ }}
qos {{ sap_ingress | default("1") }}
scheduler-policy {{ scheduler_policy | default("none")}}
exit {{ _end_ }}
</group>
<group name="egress" default="default_egress">
egress {{ _start_ }}
scheduler-policy {{ scheduler_policy | default("none") }}
qos {{ sap_egress | default("1)") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="pwr_sdp.{{pwr_spoke_sdp_id}}**" default="none">
spoke-sdp {{ pwr_spoke_sdp_id | default("none")}}:{{vc_id | _start_ | default("none") }} endpoint {{ endpoint | default("none") }} create
precedence {{ precedence | default("default_precedence") }}
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="regular_sdp.{{r_spoke_sdp_id}}**" default="none">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id | _start_ }} create
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
"""
data = """
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
epipe 103256 customer 160 create
description "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:"
service-mtu 1988
service-name "EPIPE service-103256 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:15.* create
description "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN5"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8139:103256 create
no shutdown
exit
no shutdown
exit
epipe 103742 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:"
service-mtu 1588
service-name "EPIPE service-103742 DKTN08a-D0105 (192.168.3.11)"
sap 5/2/50:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:"
multi-service-site "TATA_STRAT_LON_A206_LANA"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8061:103742 create
no shutdown
exit
no shutdown
exit
epipe 55513386 customer 4 vc-switching create
description "vf=EAGG:cn=Bulldog:tl=VF"
service-mtu 1526
spoke-sdp 78:55513386 create
control-word
no shutdown
exit
spoke-sdp 8245:55513386 create
control-word
no shutdown
exit
no shutdown
exit
epipe 55517673 customer 4 create
description "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA EPIPE#BAACTQ#VLAN 901"
service-mtu 1526
service-name "epipe service-64585 DKTN08a-D0105 (192.168.3.11)"
endpoint "SDP" create
revert-time infinite
exit
sap 2/2/3:901.* create
description "2_2_3,H0505824A,Bulldog,VLAN 901"
ingress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
egress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
no shutdown
exit
spoke-sdp 8243:55517673 endpoint "SDP" create
collect-stats
precedence 1
no shutdown
exit
spoke-sdp 8245:55517673 endpoint "SDP" create
collect-stats
precedence primary
no shutdown
exit
no shutdown
exit
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"service": {
"epipe": {
"103076": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:",
"regular_sdp": {
"8051": {"state": "enabled", "vc_id": "103076"}
},
"sap": {
"1/2/12:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:",
"egress": {
"sap_egress": "1)",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "1",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN10",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103076 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103206": {
"customer_id": "1904",
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK PLC Stepney Green E1 "
"3DG'",
"regular_sdp": {
"8035": {"state": "enabled", "vc_id": "103206"}
},
"sap": {
"2/2/3:401.100": {
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK "
"PLC "
"Stepney "
"Green "
"E1 "
"3DG'",
"egress": {
"sap_egress": "11010",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11010",
"scheduler_policy": "none",
},
"mss_name": "SKANSKA_E13DG_A825_LAN1",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103206 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103256": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:",
"regular_sdp": {
"8139": {"state": "enabled", "vc_id": "103256"}
},
"sap": {
"1/2/12:15.qinq": {
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN5",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103256 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103742": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:",
"regular_sdp": {
"8061": {"state": "enabled", "vc_id": "103742"}
},
"sap": {
"5/2/50:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_STRAT_LON_A206_LANA",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103742 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"55517673": {
"customer_id": "4",
"description": "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA "
"EPIPE#BAACTQ#VLAN 901",
"endpoint": {
"endpoint": '"SDP"',
"revert_time": "infinite",
},
"pwr_sdp": {
"8243": {
"endpoint": '"SDP"',
"precedence": "1",
"state": "enabled",
"vc_id": "55517673",
},
"8245": {
"endpoint": '"SDP"',
"precedence": "primary",
"state": "enabled",
"vc_id": "55517673",
},
},
"sap": {
"2/2/3:901.qinq": {
"description": "2_2_3,H0505824A,Bulldog,VLAN "
"901",
"egress": {
"sap_egress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"ingress": {
"sap_ingress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"mss_name": "none",
"state": "enabled",
}
},
"service_mtu": "1526",
"service_name": "epipe service-64585 "
"DKTN08a-D0105 "
"(6172.16.17.321)",
"state": "enabled",
},
}
}
}
]
]
# test_github_issue_37_original_data_template()
def test_github_issue_37_cleaned_up_data():
"""
Problem with below template without bug fix, was that
'no shutdown' statement for sap group was matched by
spoke-sdp group as well and added to results causing
false match. To fix it, added tracking of previously
started groups in results object, so that before add
match results to overall results if PATH differ need
to check that this particular item groups has been
started before, previous logic was not checking for that.
Have not noticed any issues with other 200+ tests or
any performance degradation for single/multi-process
parsing.
"""
template = """
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}">
epipe {{ service_id }} customer {{ customer_id }} create
<group name="regular_sdp.{{r_spoke_sdp_id}}**">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id }} create
no shutdown {{ state | set("enabled") }}
</group>
</group>
</group>
"""
data = """
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [
[
{
"service": {
"epipe": {
"103076": {
"customer_id": "160",
"regular_sdp": {
"8051": {"state": "enabled", "vc_id": "103076"}
},
},
"103206": {
"customer_id": "1904",
"regular_sdp": {
"8035": {"state": "enabled", "vc_id": "103206"}
},
},
}
}
}
]
]
# test_github_issue_37_cleaned_up_data()
def test_github_issue_37_cleaned_data_template():
template = """
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}" default="none">
epipe {{ service_id }} customer {{ customer_id }} create
description "{{ description | ORPHRASE }}"
service-mtu {{ service_mtu }}
service-name "{{ service_name | ORPHRASE }}"
<group name="endpoint" default="none">
endpoint {{ endpoint }} create
revert-time {{ revert_time }}
exit {{ _end_ }}
</group>
<group name="sap.{{ sap_id }}" default="none">
sap {{ sap_id | resub(r"\\*", "qinq") | ORPHRASE }} create
description "{{ description | ORPHRASE }}"
multi-service-site "{{ mss_name }}"
<group name="ingress">
ingress {{ _start_ }}
qos {{ sap_ingress | default("1") }}
scheduler-policy {{ scheduler_policy | default("none")}}
exit {{ _end_ }}
</group>
<group name="egress">
egress {{ _start_ }}
scheduler-policy {{ scheduler_policy | default("none") }}
qos {{ sap_egress | default("1)") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="pwr_sdp.{{pwr_spoke_sdp_id}}**" default="none">
spoke-sdp {{ pwr_spoke_sdp_id }}:{{vc_id }} endpoint {{ endpoint }} create
precedence {{ precedence | default("default_precedence") }}
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="regular_sdp.{{r_spoke_sdp_id}}**" default="none">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id }} create
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
"""
data = """
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
epipe 103256 customer 160 create
description "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:"
service-mtu 1988
service-name "EPIPE service-103256 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:15.* create
description "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN5"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8139:103256 create
no shutdown
exit
no shutdown
exit
epipe 103742 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:"
service-mtu 1588
service-name "EPIPE service-103742 DKTN08a-D0105 (192.168.3.11)"
sap 5/2/50:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:"
multi-service-site "TATA_STRAT_LON_A206_LANA"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8061:103742 create
no shutdown
exit
no shutdown
exit
epipe 55513386 customer 4 vc-switching create
description "vf=EAGG:cn=Bulldog:tl=VF"
service-mtu 1526
spoke-sdp 78:55513386 create
control-word
no shutdown
exit
spoke-sdp 8245:55513386 create
control-word
no shutdown
exit
no shutdown
exit
epipe 55517673 customer 4 create
description "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA EPIPE#BAACTQ#VLAN 901"
service-mtu 1526
service-name "epipe service-64585 DKTN08a-D0105 (63.130.108.41)"
endpoint "SDP" create
revert-time infinite
exit
sap 2/2/3:901.* create
description "2_2_3,H0505824A,Bulldog,VLAN 901"
ingress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
egress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
no shutdown
exit
spoke-sdp 8243:55517673 endpoint "SDP" create
collect-stats
precedence 1
no shutdown
exit
spoke-sdp 8245:55517673 endpoint "SDP" create
collect-stats
precedence primary
no shutdown
exit
no shutdown
exit
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"service": {
"epipe": {
"103076": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:",
"regular_sdp": {
"8051": {"state": "enabled", "vc_id": "103076"}
},
"sap": {
"1/2/12:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:",
"egress": {
"sap_egress": "1)",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "1",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN10",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103076 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103206": {
"customer_id": "1904",
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK PLC Stepney Green E1 "
"3DG'",
"regular_sdp": {
"8035": {"state": "enabled", "vc_id": "103206"}
},
"sap": {
"2/2/3:401.100": {
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK "
"PLC "
"Stepney "
"Green "
"E1 "
"3DG'",
"egress": {
"sap_egress": "11010",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11010",
"scheduler_policy": "none",
},
"mss_name": "SKANSKA_E13DG_A825_LAN1",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103206 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103256": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:",
"regular_sdp": {
"8139": {"state": "enabled", "vc_id": "103256"}
},
"sap": {
"1/2/12:15.qinq": {
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN5",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103256 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103742": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:",
"regular_sdp": {
"8061": {"state": "enabled", "vc_id": "103742"}
},
"sap": {
"5/2/50:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_STRAT_LON_A206_LANA",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103742 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"55517673": {
"customer_id": "4",
"description": "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA "
"EPIPE#BAACTQ#VLAN 901",
"endpoint": {
"endpoint": '"SDP"',
"revert_time": "infinite",
},
"pwr_sdp": {
"8243": {
"endpoint": '"SDP"',
"precedence": "1",
"state": "enabled",
"vc_id": "55517673",
},
"8245": {
"endpoint": '"SDP"',
"precedence": "primary",
"state": "enabled",
"vc_id": "55517673",
},
},
"sap": {
"2/2/3:901.qinq": {
"description": "2_2_3,H0505824A,Bulldog,VLAN "
"901",
"egress": {
"sap_egress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"ingress": {
"sap_ingress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"mss_name": "none",
"state": "enabled",
}
},
"service_mtu": "1526",
"service_name": "epipe service-64585 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
}
}
}
]
]
# test_github_issue_37_cleaned_data_template()
def test_github_issue_42():
data = """
vrf xyz
address-family ipv4 unicast
import route-target
65000:3507
65000:3511
65000:5453
65000:5535
!
export route-target
65000:5453
65000:5535
!
!
!
"""
template = """
<group name="vrfs">
vrf {{name}}
<group name="route-targets">
import route-target {{ _start_ }}
{{ import | to_list | joinmatches }}
</group>
!
<group name="route-targets">
export route-target {{ _start_ }}
{{ export | to_list | joinmatches }}
</group>
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"vrfs": {
"name": "xyz",
"route-targets": [
{
"import": [
"65000:3507",
"65000:3511",
"65000:5453",
"65000:5535",
]
},
{"export": ["65000:5453", "65000:5535"]},
],
}
}
]
]
# test_github_issue_42()
def test_github_issue_42_answer():
data = """
vrf xyz
address-family ipv4 unicast
import route-target
65000:3507
65000:3511
65000:5453
65000:5535
!
export route-target
65000:5453
65000:5535
!
!
!
"""
template = """
<group name="vrfs">
vrf {{name}}
<group name="import_rts">
import route-target {{ _start_ }}
{{ import_rt | _start_ }}
</group>
!
<group name="export_rts">
export route-target {{ _start_ }}
{{ export_rt | _start_ }}
</group>
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"vrfs": {
"export_rts": [
{"export_rt": "65000:5453"},
{"export_rt": "65000:5535"},
],
"import_rts": [
{"import_rt": "65000:3507"},
{"import_rt": "65000:3511"},
{"import_rt": "65000:5453"},
{"import_rt": "65000:5535"},
],
"name": "xyz",
}
}
]
]
# test_github_issue_42_answer()
def test_issue_45():
data = """
vrf2 {
forwarding-options {
dhcp-relay {
server-group {
IN_MEDIA_SIGNALING {
10.154.6.147;
}
DHCP-NGN-SIG {
10.154.6.147;
}
}
group group2 {
active-server-group IN_MEDIA_SIGNALING;
overrides {
trust-option-82;
}
}
group NGN-SIG {
active-server-group DHCP-NGN-SIG;
overrides {
trust-option-82;
}
}
}
}
}
"""
template = """
<group name="vrfs*">
{{ name | _start_ }} {
<group name="forwarding_options">
forwarding-options { {{ _start_ }}
<group name="dhcp_relay">
dhcp-relay { {{ _start_ }}
<group name="server_group">
server-group { {{ _start_ }}
<group name="dhcp*">
{{ server_group_name1 | _start_ }} {
<group name="helper_addresses*">
{{ helper_address | IP }};
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
<group name="groups*">
group {{ group_name | _start_ }} {
active-server-group {{server_group_name2}};
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res)
# assert res == [
# [
# {
# "vrfs": [
# {
# "forwarding_options": {
# "dhcp_relay": {
# "groups": [
# {
# "group_name": "group2",
# "server_group_name2": "IN_MEDIA_SIGNALING",
# },
# {
# "group_name": "NGN-SIG",
# "server_group_name2": "DHCP-NGN-SIG",
# },
# ],
# "server_group": {
# "dhcp": [
# {
# "helper_addresses": [
# {"helper_address": "10.154.6.147"}
# ],
# "server_group_name1": "IN_MEDIA_SIGNALING",
# },
# {
# "helper_addresses": [
# {"helper_address": "10.154.6.147"}
# ],
# "server_group_name1": "DHCP-NGN-SIG",
# },
# {"server_group_name1": "overrides"},
# {"server_group_name1": "overrides"},
# ]
# },
# }
# },
# "name": "vrf2",
# }
# ]
# }
# ]
# ]
# was able to fix the issue by introducing ended_groups tracking in results
# processing while was trying to fix issue 57
assert res == [
[
{
"vrfs": [
{
"forwarding_options": {
"dhcp_relay": {
"groups": [
{
"group_name": "group2",
"server_group_name2": "IN_MEDIA_SIGNALING",
},
{
"group_name": "NGN-SIG",
"server_group_name2": "DHCP-NGN-SIG",
},
],
"server_group": {
"dhcp": [
{
"helper_addresses": [
{"helper_address": "10.154.6.147"}
],
"server_group_name1": "IN_MEDIA_SIGNALING",
},
{
"helper_addresses": [
{"helper_address": "10.154.6.147"}
],
"server_group_name1": "DHCP-NGN-SIG",
},
]
},
}
},
"name": "vrf2",
}
]
}
]
]
# test_issue_45()
def test_issue_45_1():
data = """
vrf2 {
forwarding-options {
dhcp-relay {
server-group {
IN_MEDIA_SIGNALING {
10.154.6.147;
}
group NGN-SIG {
active-server-group DHCP-NGN-SIG;
overrides {
trust-option-82;
}
}
}
}
}
"""
template = """
<group name="vrfs*">
{{ name | _start_ }} {
<group name="forwarding_options">
forwarding-options { {{ _start_ }}
<group name="dhcp_relay">
dhcp-relay { {{ _start_ }}
<group name="server_group">
server-group { {{ _start_ }}
<group name="dhcp*">
{{ server_group_name | _start_ }} {
</group>
</group>
<group name="groups*">
group {{ group_name | _start_ }} {
</group>
</group>
</group>
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"vrfs": [
{
"forwarding_options": {
"dhcp_relay": {
"groups": [{"group_name": "NGN-SIG"}],
"server_group": {
"dhcp": [
{"server_group_name": "IN_MEDIA_SIGNALING"},
{"server_group_name": "overrides"},
]
},
}
},
"name": "vrf2",
}
]
}
]
]
# test_issue_45_1()
def test_issue_45_filtering_fix():
data = """
vrf2 {
forwarding-options {
dhcp-relay {
server-group {
IN_MEDIA_SIGNALING {
10.154.6.147;
}
DHCP-NGN-SIG {
10.154.6.147;
}
}
group group2 {
active-server-group IN_MEDIA_SIGNALING;
overrides {
trust-option-82;
}
}
group NGN-SIG {
active-server-group DHCP-NGN-SIG;
overrides {
trust-option-82;
}
}
}
}
}
"""
template = """
<group name="vrfs*">
{{ name | _start_ }} {
<group name="forwarding_options">
forwarding-options { {{ _start_ }}
<group name="dhcp_relay">
dhcp-relay { {{ _start_ }}
<group name="server_group">
server-group { {{ _start_ }}
<group name="dhcp*">
{{ server_group_name1 | _start_ | exclude("overrides") }} {
<group name="helper_addresses*">
{{ helper_address | IP }};
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
<group name="groups*">
group {{ group_name | _start_ }} {
active-server-group {{server_group_name2}};
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"vrfs": [
{
"forwarding_options": {
"dhcp_relay": {
"groups": [
{
"group_name": "group2",
"server_group_name2": "IN_MEDIA_SIGNALING",
},
{
"group_name": "NGN-SIG",
"server_group_name2": "DHCP-NGN-SIG",
},
],
"server_group": {
"dhcp": [
{
"helper_addresses": [
{"helper_address": "10.154.6.147"}
],
"server_group_name1": "IN_MEDIA_SIGNALING",
},
{
"helper_addresses": [
{"helper_address": "10.154.6.147"}
],
"server_group_name1": "DHCP-NGN-SIG",
},
]
},
}
},
"name": "vrf2",
}
]
}
]
]
# test_issue_45_filtering_fix()
def test_issue_47_answer():
data = """
Some text which indicates that below block should be included in results ABC
interface Loopback0
description Router-id-loopback
ip address 192.168.0.113/24
!
Some text which indicates that below block should be included in results DEF
interface Loopback2
description Router-id-loopback 2
ip address 192.168.0.114/24
!
Some text which indicates that below block should NOT be included in results
interface Vlan778
description CPE_Acces_Vlan
ip address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/124
ip vrf CPE1
!
Some text which indicates that below block should be included in results GKL
interface Loopback3
description Router-id-loopback 3
ip address 192.168.0.115/24
!
"""
template = """
Some text which indicates that below block should be included in results ABC {{ _start_ }}
Some text which indicates that below block should be included in results DEF {{ _start_ }}
Some text which indicates that below block should be included in results GKL {{ _start_ }}
interface {{ interface }}
ip address {{ ip }}/{{ mask }}
description {{ description | re(".+") }}
ip vrf {{ vrf }}
! {{ _end_ }}
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=150)
assert res == [
[
[
{
"description": "Router-id-loopback",
"interface": "Loopback0",
"ip": "192.168.0.113",
"mask": "24",
},
{
"description": "Router-id-loopback 2",
"interface": "Loopback2",
"ip": "192.168.0.114",
"mask": "24",
},
{
"description": "Router-id-loopback 3",
"interface": "Loopback3",
"ip": "192.168.0.115",
"mask": "24",
},
]
]
]
# test_issue_47_answer()
def test_issue_48_answer():
data = """
ECON*3400 The Economics of Personnel Management U (3-0) [0.50]
In this course, we examine the economics of personnel management in organizations.
Using mainstream microeconomic and behavioural economic theory, we will consider
such issues as recruitment, promotion, financial and non-financial incentives,
compensation, job performance, performance evaluation, and investment in personnel.
The interplay between theoretical models and empirical evidence will be emphasized in
considering different approaches to the management of personnel.
Prerequisite(s): ECON*2310 or ECON*2200
Department(s): Department of Economics and Finance
ECON*4400 The Economics of Personnel Management U (7-1) [0.90]
In this course, we examine the economics of personnel management in organizations.
Using mainstream microeconomic and behavioural economic theory, we will consider
such issues as recruitment, promotion, financial and non-financial incentives,
compensation, job performance, performance evaluation, and investment in personnel.
Prerequisite(s): ECON*2310
Department(s): Department of Economics
"""
template = """
<vars>
descr_chain = [
"PHRASE",
"exclude('Prerequisite(s)')",
"exclude('Department(s)')",
"joinmatches"
]
</vars>
<group>
{{ course }}*{{ code }} {{ name | PHRASE }} {{ semester }} ({{ lecture_lab_time }}) [{{ weight }}]
{{ description | chain(descr_chain) }}
Prerequisite(s): {{ prereqs | ORPHRASE }}
Department(s): {{ department | ORPHRASE }}
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=150)
assert res == [
[
[
{
"code": "3400",
"course": "ECON",
"department": "Department of Economics and Finance",
"description": "In this course, we examine the economics of personnel management in organizations.\n"
"Using mainstream microeconomic and behavioural economic theory, we will consider\n"
"such issues as recruitment, promotion, financial and non-financial incentives,\n"
"compensation, job performance, performance evaluation, and investment in personnel.\n"
"The interplay between theoretical models and empirical evidence will be emphasized in\n"
"considering different approaches to the management of personnel.",
"lecture_lab_time": "3-0",
"name": "The Economics of Personnel Management",
"prereqs": "ECON*2310 or ECON*2200",
"semester": "U",
"weight": "0.50",
},
{
"code": "4400",
"course": "ECON",
"department": "Department of Economics",
"description": "In this course, we examine the economics of personnel management in organizations.\n"
"Using mainstream microeconomic and behavioural economic theory, we will consider\n"
"such issues as recruitment, promotion, financial and non-financial incentives,\n"
"compensation, job performance, performance evaluation, and investment in personnel.",
"lecture_lab_time": "7-1",
"name": "The Economics of Personnel Management",
"prereqs": "ECON*2310",
"semester": "U",
"weight": "0.90",
},
]
]
]
# test_issue_48_answer()
def test_issue_48_answer_more():
data = """
IBIO*4521 Thesis in Integrative Biology F (0-12) [1.00]
This course is the first part of the two-semester course IBIO*4521/2. This course is
a two-semester (F,W) undergraduate project in which students conduct a comprehensive,
independent research project in organismal biology under the supervision of a faculty
member in the Department of Integrative Biology. Projects involve a thorough literature
review, a research proposal, original research communicated in oral and poster
presentations, and in a written, publication quality document. This two-semester course
offers students the opportunity to pursue research questions and experimental designs
that cannot be completed in the single semester research courses. Students must make
arrangements with both a faculty supervisor and the course coordinator at least one
semester in advance. A departmental registration form must be obtained from the course
coordinator and submitted no later than the second class day of the fall semester. This is
a twosemester course offered over consecutive semesters F-W. When you select this
course, you must select IBIO*4521 in the Fall semester and IBIO*4522 in the Winter
semester.A grade will not be assigned to IBIO*4521 until IBIO*4522 has been completed.
Prerequisite(s): 12.00 credits
Restriction(s): Normally a minimum cumulative average of 70%. Permission of course
coordinator.
Department(s): Department of Integrative Biology
IBIO*4533 Thesis in Integrative Biology F (0-14) [2.00]
This course is the first part of the two-semester course IBIO*4521/2. This course is
a two-semester (F,W) undergraduate project in which students conduct a comprehensive,
independent research project in organismal biology under the supervision of a faculty
member in the Department of Integrative Biology.
Restriction(s): Normally a minimum cumulative average of 80%. Permission of course
coordinator. Normally a minimum cumulative average of 90%. Permission of course
coordinator.
Department(s): Department of Integrative Biology
"""
template = """
<vars>
chain_1 = [
"ORPHRASE",
"exclude('Prerequisite(s)')",
"exclude('Department(s)')",
"exclude('Restriction(s)')",
"joinmatches"
]
</vars>
<group>
{{ course }}*{{ code }} {{ name | PHRASE }} {{ semester }} ({{ lecture_lab_time }}) [{{ weight }}]
{{ description | chain(chain_1) }}
Prerequisite(s): {{ prereqs | ORPHRASE }}
Department(s): {{ department | ORPHRASE }}
<group name="_">
Restriction(s): {{ restrictions | PHRASE | joinmatches }}
{{ restrictions | chain(chain_1) }}
</group>
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res, width=150)
assert res == [
[
[
{
"code": "4521",
"course": "IBIO",
"department": "Department of Integrative Biology",
"description": "This course is the first part of the two-semester course IBIO*4521/2. This course is\n"
"a two-semester (F,W) undergraduate project in which students conduct a comprehensive,\n"
"independent research project in organismal biology under the supervision of a faculty\n"
"member in the Department of Integrative Biology. Projects involve a thorough literature\n"
"review, a research proposal, original research communicated in oral and poster\n"
"presentations, and in a written, publication quality document. This two-semester course\n"
"offers students the opportunity to pursue research questions and experimental designs\n"
"that cannot be completed in the single semester research courses. Students must make\n"
"arrangements with both a faculty supervisor and the course coordinator at least one\n"
"semester in advance. A departmental registration form must be obtained from the course\n"
"coordinator and submitted no later than the second class day of the fall semester. This is\n"
"a twosemester course offered over consecutive semesters F-W. When you select this\n"
"course, you must select IBIO*4521 in the Fall semester and IBIO*4522 in the Winter\n"
"semester.A grade will not be assigned to IBIO*4521 until IBIO*4522 has been completed.",
"lecture_lab_time": "0-12",
"name": "Thesis in Integrative Biology",
"prereqs": "12.00 credits",
"restrictions": "Normally a minimum cumulative average of 70%. Permission of course\ncoordinator.",
"semester": "F",
"weight": "1.00",
},
{
"code": "4533",
"course": "IBIO",
"department": "Department of Integrative Biology",
"description": "This course is the first part of the two-semester course IBIO*4521/2. This course is\n"
"a two-semester (F,W) undergraduate project in which students conduct a comprehensive,\n"
"independent research project in organismal biology under the supervision of a faculty\n"
"member in the Department of Integrative Biology.",
"lecture_lab_time": "0-14",
"name": "Thesis in Integrative Biology",
"restrictions": "Normally a minimum cumulative average of 80%. Permission of course\n"
"coordinator. Normally a minimum cumulative average of 90%. Permission of course\n"
"coordinator.",
"semester": "F",
"weight": "2.00",
},
]
]
]
# test_issue_48_answer_more()
def test_slack_channel_answer_for_Noif():
data = """
# not disabled and no comment
/ip address add address=10.4.1.245 interface=lo0 network=10.4.1.245
/ip address add address=10.4.1.246 interface=lo1 network=10.4.1.246
# not disabled and comment with no quotes
/ip address add address=10.9.48.241/29 comment=SITEMON interface=ether2 network=10.9.48.240
/ip address add address=10.9.48.233/29 comment=Camera interface=vlan205@bond1 network=10.9.48.232
/ip address add address=10.9.49.1/24 comment=SM-Management interface=vlan200@bond1 network=10.9.49.0
# not disabled and comment with quotes
/ip address add address=10.4.1.130/30 comment="to core01" interface=vlan996@bond4 network=10.4.1.128
/ip address add address=10.4.250.28/29 comment="BH 01" interface=vlan210@bond1 network=10.4.250.24
/ip address add address=10.9.50.13/30 comment="Cust: site01-PE" interface=vlan11@bond1 network=10.9.50.12
# disabled no comment
/ip address add address=10.0.0.2/30 disabled=yes interface=bridge:customer99 network=10.0.0.0
# disabled with comment
/ip address add address=169.254.1.100/24 comment=Cambium disabled=yes interface=vlan200@bond1 network=169.254.1.0
# disabled with comment with quotes
/ip address add address=10.4.248.20/29 comment="Backhaul to AGR (Test Segment)" disabled=yes interface=vlan209@bond1 network=10.4.248.16
"""
template = """
<vars>
default_values = {
"comment": "",
"disabled": False
}
</vars>
<group default="default_values">
## not disabled and no comment
/ip address add address={{ ip | _start_ }} interface={{ interface }} network={{ network }}
## not disabled and comment with/without quotes
/ip address add address={{ ip | _start_ }}/{{ mask }} comment={{ comment | ORPHRASE | exclude("disabled=") | strip('"')}} interface={{ interface }} network={{ network }}
## disabled no comment
/ip address add address={{ ip | _start_ }}/{{ mask }} disabled={{ disabled }} interface={{ interface }} network={{ network }}
## disabled with comment with/without quotes
/ip address add address={{ ip | _start_ }}/{{ mask }} comment={{ comment | ORPHRASE | exclude("disabled=") | strip('"') }} disabled={{ disabled }} interface={{ interface }} network={{ network }}
</group>
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result(structure="flat_list")
# pprint.pprint(res, width=200)
assert res == [
{
"comment": "",
"disabled": False,
"interface": "lo0",
"ip": "10.4.1.245",
"network": "10.4.1.245",
},
{
"comment": "",
"disabled": False,
"interface": "lo1",
"ip": "10.4.1.246",
"network": "10.4.1.246",
},
{
"comment": "SITEMON",
"disabled": False,
"interface": "ether2",
"ip": "10.9.48.241",
"mask": "29",
"network": "10.9.48.240",
},
{
"comment": "Camera",
"disabled": False,
"interface": "vlan205@bond1",
"ip": "10.9.48.233",
"mask": "29",
"network": "10.9.48.232",
},
{
"comment": "SM-Management",
"disabled": False,
"interface": "vlan200@bond1",
"ip": "10.9.49.1",
"mask": "24",
"network": "10.9.49.0",
},
{
"comment": "to core01",
"disabled": False,
"interface": "vlan996@bond4",
"ip": "10.4.1.130",
"mask": "30",
"network": "10.4.1.128",
},
{
"comment": "BH 01",
"disabled": False,
"interface": "vlan210@bond1",
"ip": "10.4.250.28",
"mask": "29",
"network": "10.4.250.24",
},
{
"comment": "Cust: site01-PE",
"disabled": False,
"interface": "vlan11@bond1",
"ip": "10.9.50.13",
"mask": "30",
"network": "10.9.50.12",
},
{
"comment": "",
"disabled": "yes",
"interface": "bridge:customer99",
"ip": "10.0.0.2",
"mask": "30",
"network": "10.0.0.0",
},
{
"comment": "Cambium",
"disabled": "yes",
"interface": "vlan200@bond1",
"ip": "169.254.1.100",
"mask": "24",
"network": "169.254.1.0",
},
{
"comment": "Backhaul to AGR (Test Segment)",
"disabled": "yes",
"interface": "vlan209@bond1",
"ip": "10.4.248.20",
"mask": "29",
"network": "10.4.248.16",
},
]
# test_slack_channel_answer_for_Noif()
def test_slack_answer_2():
data_to_parse = """
port 1/1/1
description "port 1 description"
ethernet
mode hybrid
encap-type dot1q
crc-monitor
sd-threshold 5 multiplier 5
sf-threshold 3 multiplier 5
window-size 60
exit
network
queue-policy "ncq-only"
accounting-policy 12
collect-stats
egress
queue-group "qos-policy-for-router1" instance 1 create
accounting-policy 1
collect-stats
agg-rate
rate 50000
exit
exit
exit
exit
access
egress
queue-group "policer-output-queues" instance 1 create
accounting-policy 1
collect-stats
exit
exit
exit
lldp
dest-mac nearest-bridge
admin-status tx-rx
notification
tx-tlvs port-desc sys-name sys-desc sys-cap
tx-mgmt-address system
exit
exit
down-on-internal-error
exit
no shutdown
exit
port 1/1/2
description "another port to a another router"
ethernet
mode hybrid
encap-type dot1q
egress-scheduler-policy "qos-port-scheduler"
crc-monitor
sd-threshold 5 multiplier 5
sf-threshold 3 multiplier 5
window-size 60
exit
access
egress
queue-group "policer-output-queues" instance 1 create
accounting-policy 1
collect-stats
exit
exit
exit
down-on-internal-error
exit
no shutdown
exit
port 1/1/3
description "port 3 to some third router"
ethernet
mode access
encap-type dot1q
mtu 2000
egress-scheduler-policy "strict-scheduler"
network
queue-policy "ncq-only"
accounting-policy 12
collect-stats
egress
queue-group "some-shaping-policy" instance 1 create
accounting-policy 1
collect-stats
agg-rate
rate 50000
exit
exit
queue-group "another-shaping-policy" instance 1 create
accounting-policy 1
collect-stats
agg-rate
rate 50000
exit
exit
queue-group "this-shaper-is-cool" instance 1 create
agg-rate
rate 1000000
exit
exit
exit
exit
exit
no shutdown
exit
"""
template = """
<group name="system.ports">
port {{ id }}
shutdown {{ admin_enabled | set(false) }}
description "{{ description | ORPHRASE | strip('"') }}"
<group name="ethernet">
ethernet {{ _start_ }}
mode {{ mode }}
encap-type {{ encap_type }}
mtu {{ mtu | DIGIT }}
egress-scheduler-policy {{ egress_sched_policy | strip('"') }}
loopback internal persistent {{ loop_internal | set(true) }}
<group name="network">
network {{ _start_ }}
queue-policy {{ queue_policy | ORPHRASE | strip('"') }}
accounting-policy {{ accounting_policy | DIGIT }}
collect-stats {{ collect_stats | set(true) }}
<group name="egress">
egress {{ _start_ }}
<group name="queuegroups*">
queue-group {{ name | strip('"') }} instance 1 create
rate {{ agg_rate | DIGIT }}
exit {{_end_}}
</group>
## this "exit {{ _end_ }}" had wrong indentation level, leading to
## group name="egress" finishing too early
exit {{_end_}}
</group>
exit {{_end_}}
</group>
lldp {{ lldp_enabled | set(true) }}
exit {{_end_}}
</group>
no shutdown {{admin_enabled | set(true)}}
exit {{_end_}}
</group>
"""
parser = ttp(data=data_to_parse, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res, width=150)
assert res == [
[
{
"system": {
"ports": [
{
"admin_enabled": True,
"description": "port 1 description",
"ethernet": {
"encap_type": "dot1q",
"lldp_enabled": True,
"mode": "hybrid",
"network": {
"accounting_policy": "12",
"collect_stats": True,
"egress": {
"queuegroups": [
{
"agg_rate": "50000",
"name": "qos-policy-for-router1",
}
]
},
"queue_policy": "ncq-only",
},
},
"id": "1/1/1",
},
{
"admin_enabled": True,
"description": "another port to a another router",
"ethernet": {
"egress_sched_policy": "qos-port-scheduler",
"encap_type": "dot1q",
"mode": "hybrid",
},
"id": "1/1/2",
},
{
"admin_enabled": True,
"description": "port 3 to some third router",
"ethernet": {
"egress_sched_policy": "strict-scheduler",
"encap_type": "dot1q",
"mode": "access",
"mtu": "2000",
"network": {
"accounting_policy": "12",
"collect_stats": True,
"egress": {
"queuegroups": [
{
"agg_rate": "50000",
"name": "some-shaping-policy",
},
{
"agg_rate": "50000",
"name": "another-shaping-policy",
},
{
"agg_rate": "1000000",
"name": "this-shaper-is-cool",
},
]
},
"queue_policy": "ncq-only",
},
},
"id": "1/1/3",
},
]
}
}
]
]
# test_slack_answer_2()
def test_slack_answer_3():
"""
Problem was that interfaces were matched by regexes from both ospf and ospfv3
groups, decision logic was not able to properly work out to which group result
should belong, changed behavior to check if match is a child of current record
group and use it if so. Also had to change how group id encoded from string to
tuple of two elements ("group path", "group index",)
Here is some debug output until problem was fixed:
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf3**::1
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf**.interfaces*::0
re_idex: 0
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf3**::1
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf3**.interfaces*::0
re_idex: 1
# problem was happening because logic was not able to decide that need to use this match
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf**::0
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf**.interfaces*::0
re_idex: 0
# problem was happening because logic was picking up this match
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf**::0
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf3**.interfaces*::0
re_idex: 1
Wrong results:
[[{'service': {'vprns': [{'4': {'name': 'ospf_version3_vprn',
'ospf': {'area': '0.0.0.0', 'interfaces': [{'name': 'interface-one'}]},
'ospf3': {'area': '0.0.0.0', 'interfaces': [{'name': 'interface-two'}]}},
'5': {'name': 'vprn5', 'ospf': {'area': '0.0.0.0'},
'ospf3': {'interfaces': [{'name': 'interface-three'}]}}}]}}]]
"""
data = """
service
vprn 4 name "ospf_version3_vprn" customer 40 create
ospf
area 0.0.0.0
interface "interface-one"
ospf3 0
area 0.0.0.0
interface "interface-two"
vprn 5 name "vprn5" customer 50 create
ospf
area 0.0.0.0
interface "interface-three"
"""
template = """
<group name="service.vprns*.{{id}}**">
vprn {{ id }} name {{ name | ORPHRASE | strip('"') }} customer {{ ignore }} create
<group name="ospf**">
ospf {{ _start_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') }}
</group>
</group>
<group name="ospf3**">
ospf3 0 {{ _start_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') }}
</group>
</group>
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[
{
"service": {
"vprns": [
{
"4": {
"name": "ospf_version3_vprn",
"ospf": {
"area": "0.0.0.0",
"interfaces": [{"name": "interface-one"}],
},
"ospf3": {
"area": "0.0.0.0",
"interfaces": [{"name": "interface-two"}],
},
},
"5": {
"name": "vprn5",
"ospf": {
"area": "0.0.0.0",
"interfaces": [{"name": "interface-three"}],
},
},
}
]
}
}
]
]
# test_slack_answer_3()
def test_slack_answer_3_full():
data = """
service
vprn 1 name "vprn1" customer 10 create
interface "loopback" create
exit
interface "interface-one" create
exit
interface "interface-two" create
exit
interface "bgp-interface" create
exit
exit
vprn 2 name "vprn2" customer 20 create
interface "loopback" create
exit
interface "interface-two" create
exit
interface "bgp-interface" create
exit
exit
vprn 3 name "vprn3" customer 30 create
interface "loopback" create
exit
interface "interface-two" create
exit
exit
vprn 4 name "ospf_version3_vprn" customer 40 create
interface "loopback" create
exit
interface "interface-two" create
exit
exit
vprn 5 name "vprn5" customer 50 create
interface "loopback" create
exit
interface "interface-two" create
exit
interface "bgp-interface" create
exit
exit
vprn 1 name "vprn1" customer 10 create
interface "loopback" create
address 10.10.10.1/32
loopback
exit
interface "interface-one" create
address 10.10.10.10/30
sap 1/1/1:10 create
exit
exit
interface "interface-two" create
address 10.10.10.100/31
sap lag-5:80 create
exit
exit
interface "bgp-interface" create
address 10.10.10.200/31
sap lag-4:100 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 2 name "vprn2" customer 20 create
interface "interface-two" create
address 10.11.11.10/31
sap lag-1:50 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 3 name "vprn3" customer 30 create
interface "loopback" create
address 10.12.12.12/32
loopback
exit
interface "interface-two" create
address 10.12.12.100/31
sap lag-5:33 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 4 name "ospf_version3_vprn" customer 40 create
interface "loopback" create
address 10.40.40.10/32
ipv6
address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:ae46/128
exit
loopback
exit
interface "interface-two" create
address 10.40.40.100/31
ipv6
address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:1111/64
exit
sap lag-5:800 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
ospf3 0
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 5 name "vprn5" customer 50 create
interface "loopback" create
address 10.50.50.50/32
loopback
exit
interface "interface-two" create
address 10.50.50.100/31
sap lag-5:5 create
exit
exit
interface "bgp-interface" create
address 10.50.50.200/31
sap lag-1:602 create
exit
exit
bgp
group "eBGP"
peer-as 4444
neighbor 10.50.50.201
exit
exit
no shutdown
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
exit
"""
template = """
#-------------------------------------------------- {{ ignore }}
echo "Service Configuration" {{ ignore }}
#-------------------------------------------------- {{ ignore }}
service {{ ignore }}
<group name="service.vprns*.{{id}}**">
vprn {{ id }} name {{ name | ORPHRASE | strip('"') }} customer {{ ignore }} create
shutdown {{ admin_enabled | set("False") }}
description {{ description | ORPHRASE | strip('"') }}
vrf-import {{ import_policy | ORPHRASE | strip('"') }}
router-id {{ router_id }}
autonomous-system {{ local_as }}
route-distinguisher {{ loopback_ip }}:{{ vrf_routedist }}
vrf-target target:{{ ignore }}:{{ vrf_routetarget }}
vrf-target {{ vrf_export }} target:{{ ignore }}:{{ vrf_routetarget }}
<group name="interfaces*.{{name}}**">
interface {{ name | ORPHRASE | strip('"') }} create
shutdown {{ admin_enabled | set("False") }}
description {{ description | ORPHRASE | strip('"') }}
address {{ address | IP }}/{{ mask | DIGIT }}
ip-mtu {{ mtu }}
bfd {{ bfd_timers }} receive {{ ignore }} multiplier {{ bfd_interval }}
<group name="vrrp">
vrrp {{ instance }}
backup {{ backup }}
priority {{ priority }}
policy {{ policy }}
ping-reply {{ pingreply | set("True") }}
traceroute-reply {{ traceroute_reply | set("True") }}
init-delay {{ initdelay }}
message-interval {{ message_int_seconds }}
message-interval milliseconds {{ message_int_milliseconds }}
bfd-enable 1 interface {{ bfd_interface | ORPHRASE | strip('"')}} dst-ip {{ bfd_dst_ip }}
exit {{ _end_ }}
</group>
<group name="ipv6">
ipv6 {{ _start_ }}
address {{ address | IPV6 }}/{{ mask | DIGIT }}
address {{ address | _start_ | IPV6 }}/{{ mask | DIGIT }} dad-disable
link-local-address {{ linklocal_address | IPV6 }} dad-disable
<group name="vrrp">
vrrp {{ instance | _start_ }}
<group name="backup*">
backup {{ ip }}
</group>
priority {{ priority }}
policy {{ policy }}
ping-reply {{ pingreplay | set("True") }}
traceroute-reply {{ traceroute_reply | set("True") }}
init-delay {{ initdelay }}
message-interval milliseconds {{ message_int_milliseconds }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
<group name="vpls">
vpls {{ vpls_name | ORPHRASE | strip('"') | _start_ }}
exit {{ _end_ }}
</group>
<group name="sap**">
sap {{ port | _start_ }}:{{ vlan | DIGIT }} create
ingress {{ _exact_ }}
qos {{ qos_sap_ingress }}
<group name="_">
egress {{ _start_ }}
qos {{ qos_sap_egress }}
</group>
collect-stats {{ collect_stats | set("True") }}
accounting-policy {{ accounting_policy }}
exit {{ _end_}}
</group>
exit {{ _end_}}
</group>
<group name="staticroutes*">
static-route-entry {{ prefix | PREFIX | _start_ }}
black-hole {{ blackhole | set("True") }}
next-hop {{ nexthop | IP }}
shutdown {{ admin_enabled | set("False") }}
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
<group name="aggregates">
aggregate {{ agg_block | PREFIX | _start_ }} summary-only
</group>
<group name="router_advertisement">
router-advertisement {{ _start_ }}
interface {{ interface | ORPHRASE | strip('"') }}
use-virtual-mac {{ use_virtualmac | set("True") }}
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
<group name="bgp**">
bgp {{ _start_ }}
min-route-advertisement {{ min_route_advertisement | DIGIT }}
<group name="peergroups*">
group {{ name | ORPHRASE | strip('"') }}
family {{ family | ORPHRASE | split(" ") }}
type {{ peer_type | ORPHRASE }}
import {{ importpolicy | ORPHRASE | strip('"') }}
export {{ exportpolicy | ORPHRASE | strip('"') }}
peer-as {{ remote_as }}
bfd-enable {{ bfd_enabled | set("True") }}
<group name="neighbors*">
neighbor {{ address | IP | _start_ }}
neighbor {{ address | IPV6 | _start_ }}
shutdown {{ admin_enabled | set("False") }}
keepalive {{ keepalive }}
hold-time {{ holdtime }}
bfd-enable {{ bfd_enabled | set("True") }}
as-override {{ as_override | set("True") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") | _start_ }}
exit {{ _end_ }}
</group>
<group name="ospf**">
ospf {{ _start_ }}{{ _exact_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') | _start_ }}
passive {{ passive | set("True") }}
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
<group name="ospf3**">
ospf3 0 {{ _start_ }}{{ _exact_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') | _start_ }}
passive {{ passive | set("True") }}
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res, width=100)
assert res == [
[
{
"service": {
"vprns": [
{
"1": {
"admin_enabled": "True",
"interfaces": [
{
"bgp-interface": {
"address": "10.10.10.200",
"mask": "31",
"sap": {"port": "lag-4", "vlan": "100"},
},
"interface-one": {
"address": "10.10.10.10",
"mask": "30",
"sap": {"port": "1/1/1", "vlan": "10"},
},
"interface-two": {
"address": "10.10.10.100",
"mask": "31",
"sap": {"port": "lag-5", "vlan": "80"},
},
"loopback": {
"address": "10.10.10.1",
"mask": "32",
},
}
],
"name": "vprn1",
"ospf": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
"2": {
"admin_enabled": "True",
"interfaces": [
{
"bgp-interface": {},
"interface-two": {
"address": "10.11.11.10",
"mask": "31",
"sap": {"port": "lag-1", "vlan": "50"},
},
"loopback": {},
}
],
"name": "vprn2",
"ospf": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
"3": {
"admin_enabled": "True",
"interfaces": [
{
"interface-two": {
"address": "10.12.12.100",
"mask": "31",
"sap": {"port": "lag-5", "vlan": "33"},
},
"loopback": {
"address": "10.12.12.12",
"mask": "32",
},
}
],
"name": "vprn3",
"ospf": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
"4": {
"admin_enabled": "True",
"interfaces": [
{
"interface-two": {
"address": "10.40.40.100",
"ipv6": {
"address": "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:1111",
"mask": "64",
},
"mask": "31",
"sap": {"port": "lag-5", "vlan": "800"},
},
"loopback": {
"address": "10.40.40.10",
"ipv6": {
"address": "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:ae46",
"mask": "128",
},
"mask": "32",
},
}
],
"name": "ospf_version3_vprn",
"ospf": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
"ospf3": {
"admin_enabled": "True",
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
"5": {
"admin_enabled": "True",
"bgp": {
"admin_enabled": "True",
"peergroups": [
{
"name": "eBGP",
"neighbors": [{"address": "10.50.50.201"}],
"remote_as": "4444",
}
],
},
"interfaces": [
{
"bgp-interface": {
"address": "10.50.50.200",
"mask": "31",
"sap": {"port": "lag-1", "vlan": "602"},
},
"interface-two": {
"address": "10.50.50.100",
"mask": "31",
"sap": {"port": "lag-5", "vlan": "5"},
},
"loopback": {
"address": "10.50.50.50",
"mask": "32",
},
}
],
"name": "vprn5",
"ospf": {
"area": "0.0.0.0",
"interfaces": [
{"name": "interface-two", "passive": "True"}
],
},
},
}
]
}
}
]
]
# test_slack_answer_3_full()
def test_issue_45_for_junos_cfg():
data = """
system {
host-name LAB-MX-1;
time-zone some/time;
default-address-selection;
no-redirects;
no-ping-record-route;
no-ping-time-stamp;
tacplus-server {
1.1.1.1 {
port 49;
secret "<SECRET_HASH>"; ## SECRET-DATA
source-address 172.16.17.32;
}
172.16.31.10 {
port 49;
secret "<SECRET_HASH>"; ## SECRET-DATA
source-address 172.16.17.32;
}
172.16.17.32 {
port 49;
secret "<SECRET_HASH>"; ## SECRET-DATA
source-address 172.16.17.32;
}
}
services {
ssh {
root-login deny;
no-tcp-forwarding;
protocol-version v2;
max-sessions-per-connection 32;
client-alive-count-max 3;
client-alive-interval 10;
connection-limit 10;
rate-limit 5;
}
netconf {
ssh {
connection-limit 10;
rate-limit 4;
}
}
}
}
"""
template = """
<group name="system_level">
system { {{ _start_ }}
host-name {{ HOSTNAME }};
time-zone {{ TZ }};
default-address-selection; {{ default_address_selection | set(True) }}
no-redirects; {{ no_redirects | set(True) }}
no-ping-record-route; {{ no_ping_record_route | set(True) }}
no-ping-time-stamp; {{ no_ping_time_stamp | set(True) }}
<group name="services">
services { {{ _start_ }}
<group name="{{ service }}">
{{ service }} {
http; {{ http | set(true) }}
https; {{ https | set(true) }}
no-tcp-forwarding; {{ no-tcp-fwding | set(true) }}
protocol-version {{ ssh-proto }};
connection-limit {{ connection-limit | DIGIT }};
rate-limit {{rate-limit | DIGIT }};
root-login deny; {{ root-login | set(false) }}
max-sessions-per-connection {{ max-sessions | DIGIT }};
client-alive-count-max {{ client-alive-count-max | DIGIT }};
client-alive-interval {{ client-alive-interval | DIGIT }};
<group name="ssh">
ssh; {{ ssh | set(true) }}
</group>
<group name="ssh">
ssh { {{ _start_ }}
connection-limit {{ connection-limit | DIGIT }};
rate-limit {{ rate-limit | DIGIT }};
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
<group name="internet-options">
internet-options { {{ _start_ }}
icmpv4-rate-limit packet-rate {{ packet-rate| DIGIT }};
icmpv6-rate-limit packet-rate {{ packet-rate| DIGIT }};
no-source-quench; {{ no-source-quench | set(true) }}
tcp-drop-synfin-set; {{ tcp-drop-synfin-set | set(true) }}
no-tcp-reset {{ no-tcp-reset }};
} {{ _end_ }}
</group>
authentication-order [{{ authentication-order }}];
<group name="ports">
ports { {{ _start_ }}
auxiliary disable; {{ auxiliary | set(false) }}
} {{ _end_ }}
</group>
<group name="root-authentication">
root-authentication { {{ _start_ }}
encrypted-password "{{ <PASSWORD>-password }}"; ## SECRET-DATA
} {{ _end_ }}
</group>
<group name="dns" itemize="name_server">
name-server { {{ _start_ }}
{{ name_server | IP | _line_ | to_list }};
} {{ _end_ }}
</group>
<group name="commit">
commit { {{ _start_ }}
synchronize; {{ commit_sync | set(true) }}
persist-groups-inheritance; {{ commit_persist-groups-inherit | set(true) }}
} {{ _end_ }}
</group>
<group name="tacacs">
tacplus-server { {{ _start_ }}
<group name="tacacs-servers.{{ tac_server }}">
{{ tac_server | IP }} {
port {{ tac_port }};
secret "{{ tac_secret }}"; ## SECRET-DATA
source-address {{ tac_source | IP }};
} {{ end }}
</group>
} {{ end }}
</group>
} {{ end }}
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[
{
"system_level": {
"HOSTNAME": "LAB-MX-1",
"TZ": "some/time",
"default_address_selection": True,
"no_ping_record_route": True,
"no_ping_time_stamp": True,
"no_redirects": True,
"services": {
"netconf": {
"ssh": {"connection-limit": "10", "rate-limit": "4"}
},
"ssh": {
"client-alive-count-max": "3",
"client-alive-interval": "10",
"connection-limit": "10",
"max-sessions": "32",
"no-tcp-fwding": True,
"rate-limit": "5",
"root-login": False,
"ssh-proto": "v2",
},
},
"tacacs": {
"tacacs-servers": {
"1.1.1.1": {
"tac_port": "49",
"tac_secret": "<SECRET_HASH>",
"tac_source": "5.5.5.5",
},
"2.2.2.2": {
"tac_port": "49",
"tac_secret": "<SECRET_HASH>",
"tac_source": "5.5.5.5",
},
"4.4.4.4": {
"tac_port": "49",
"tac_secret": "<SECRET_HASH>",
"tac_source": "5.5.5.5",
},
}
},
}
}
]
]
# test_issue_45_for_junos_cfg()
def test_faq_multiline_output_matching():
data = """
Local Intf: Te2/1/23
System Name: r1.lab.local
System Description:
Cisco IOS Software, Catalyst 1234 L3 Switch Software (cat1234e-ENTSERVICESK9-M), Version 1534.1(1)SG, RELEASE SOFTWARE (fc3)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2012 by Cisco Systems, Inc.
Compiled Sun 15-Apr-12 02:35 by p
Time remaining: 92 seconds
"""
template = """
<group>
Local Intf: {{ local_intf }}
System Name: {{ peer_name }}
<group name="peer_system_description">
System Description: {{ _start_ }}
{{ sys_description | _line_ | joinmatches(" ") }}
Time remaining: {{ ignore }} seconds {{ _end_ }}
</group>
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[
[
{
"local_intf": "Te2/1/23",
"peer_name": "r1.lab.local",
"peer_system_description": {
"sys_description": "Cisco IOS Software, Catalyst 1234 L3 Switch "
"Software (cat1234e-ENTSERVICESK9-M), Version "
"1534.1(1)SG, RELEASE SOFTWARE (fc3) Technical "
"Support: http://www.cisco.com/techsupport "
"Copyright (c) 1986-2012 by Cisco Systems, Inc. "
"Compiled Sun 15-Apr-12 02:35 by p"
},
}
]
]
]
# test_faq_multiline_output_matching()
def test_issue_52_answer():
data = """
Origin:
Some random name
Example Address, example number, example city
Origin:
Some random name 2
Example Address, example number, example city 2
Origin:
Some random name 3
Example Address, example number, example city 3
One more string
"""
template = """
<macro>
def process(data):
lines = data["match"].splitlines()
name = lines[0]
address = lines[1]
return {"name": name, "address": address}
</macro>
<group name="origin*" macro="process">
Origin: {{ _start_ }}
{{ match | _line_ | joinmatches }}
</group>
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[
{
"origin": [
{
"address": "Example Address, example number, example city",
"name": "Some random name",
},
{
"address": "Example Address, example number, example city 2",
"name": "Some random name 2",
},
{
"address": "Example Address, example number, example city 3",
"name": "Some random name 3",
},
]
}
]
]
# test_issue_52_answer()
def test_issue_51_answer():
""" test workaround for removing <> chars from input data """
data = """
Name:Jane<br>
Name:Michael<br>
Name:July<br>
"""
template = """
<group name="people">
Name:{{ name }}<br>
</group>
"""
# this works as well
# template = "Name:{{ name }}br"
# data = data.replace("<", "").replace(">", "")
# this did not work. fails with xml parsing error
# template = "Name:{{ name }}<br>"
# data = data.replace("<", "<").replace(">", ">")
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=100)
assert res == [
[{"people": [{"name": "Jane"}, {"name": "Michael"}, {"name": "July"}]}]
]
# test_issue_51_answer()
def test_issue_50():
template = """
<input load="text">
interface "BNG-RH201-CORE"
address 11.11.11.11/31
description "BNG-RH201-CORE"
ldp-sync-timer 10
port lag-107:709
ipv6
address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64
exit
bfd 150 receive 150 multiplier 3
no shutdown
exit
interface "BNG-RH202-CORE"
address 22.22.22.22/31
description "BNG-RH201-CORE"
ldp-sync-timer 10
port lag-108:809
ipv6
address fdf8:f53e:61e4::18/64
exit
bfd 150 receive 150 multiplier 3
no shutdown
exit
interface "system"
address 33.33.33.33/32
ipv6
address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128
exit
no shutdown
exit
ies 97 name "OTDR-MGT" customer 1 create
description "OTDR-MGT"
interface "OTDR-MGT" create
address 172.16.31.10/25
vrrp 97
backup 10.20.30.1
priority 200
exit
vpls "OTDR-MGT-VPLS"
exit
exit
no shutdown
exit
ies 99 name "OLT-MGT" customer 1 create
description "OLT-INBAND-MGT"
interface "OLT-MGT" create
address 192.168.3.11/25
vrrp 1
backup 10.20.40.1
priority 200
exit
vpls "OLT-MGT-VPLS"
exit
exit
no shutdown
exit
ies 100 name "100" customer 1 create
description "IES 100 for subscribers"
redundant-interface "shunt" create
address 66.66.66.66/31
spoke-sdp 1:100 create
no shutdown
exit
exit
subscriber-interface "s100" create
description " Subscriber interface for subscribers"
allow-unmatching-subnets
address 172.16.58.3/22 gw-ip-address 192.168.3.11
address 172.16.31.10/20 gw-ip-address 192.168.3.11
group-interface "s100-lag210-vlan101" create
tos-marking-state trusted
ipv6
router-advertisements
managed-configuration
no shutdown
exit
dhcp6
proxy-server
no shutdown
exit
exit
exit
exit
exit
</input>
<group name="ifaces.{{ name }}" contains="ipv4,ipv6">
## group to match top level interfaces
interface "{{ name }}"
description {{ description | re(".+") | strip('"') }}
address {{ ipv4 | joinmatches('; ') }}
address {{ ipv6 | contains(":") | joinmatches('; ') }}
exit {{ _end_ }}
</group>
<group name="ifaces.{{ name }}" contains="ipv4,ipv6">
## group to match lower level interfaces
interface "{{ name | _start_ }}" create
{{ iftype }}-interface "{{ name | _start_ }}" create
description {{ description | re(".+") | strip('"') | strip }}
address {{ ipv4 | contains(".") | joinmatches('; ') }}
address {{ ipv4 | contains(".") | joinmatches('; ') }} gw-ip-address {{ ignore }}
exit {{ _end_ }}
</group>
"""
parser = ttp(template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"ifaces": {
"BNG-RH201-CORE": {
"description": "BNG-RH201-CORE",
"ipv4": "11.11.11.11/31",
"ipv6": "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64",
},
"BNG-RH202-CORE": {
"description": "BNG-RH201-CORE",
"ipv4": "172.16.17.32/31",
"ipv6": "fdf8:f53e:61e4::18/64",
},
"OLT-MGT": {"ipv4": "192.168.3.11/25"},
"OTDR-MGT": {"ipv4": "172.16.31.10/25"},
"s100": {
"description": "Subscriber interface for subscribers",
"iftype": "subscriber",
"ipv4": "172.16.58.3/22; 172.16.31.10/20",
},
"shunt": {"iftype": "redundant", "ipv4": "66.66.66.66/31"},
"system": {
"ipv4": "192.168.127.12/32",
"ipv6": "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128",
},
}
}
]
]
# test_issue_50()
def test_start_with_set():
data = """
authentication {
inactive: authentication {
"""
template = """
authentication { {{ inactive | set(False) | _start_ }}
inactive: authentication { {{ inactive | set(True) | _start_ }}
"""
parser = ttp(data, template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [[[{"inactive": False}, {"inactive": True}]]]
# test_start_with_set()
def test_ios_bgp_pers_pars():
template = """
<vars>
defaults_bgp_peers = {
"description": "",
"remote-as": "",
"shutdown": "no",
"inherit_peer-session": "",
"update-source": "",
"password": ""
}
</vars>
<group name="bgp_peers">
<group name="{{ ASN }}">
router bgp {{ ASN }}
<group name="{{ PeerIP }}" default="defaults_bgp_peers">
neighbor {{ PeerIP }} remote-as {{ remote-as }}
neighbor {{ PeerIP }} description {{ description | ORPHRASE }}
neighbor {{ PeerIP | let("shutdown", "yes") }} shutdown
neighbor {{ PeerIP }} inherit peer-session {{ inherit_peer-session }}
neighbor {{ PeerIP }} password {{ password | ORPHRASE }}
neighbor {{ PeerIP }} update-source {{ update-source }}
</group>
</group>
</group>
"""
data = """
router bgp 65100
neighbor 1.1.1.1 remote-as 1234
neighbor 1.1.1.1 description Some Description here
neighbor 1.1.1.1 shutdown
neighbor 1.1.1.1 inherit peer-session session_1
neighbor 1.1.1.1 password <PASSWORD>
neighbor 1.1.1.1 update-source Loopback 1
neighbor 1.1.1.2 remote-as 1234
neighbor 1.1.1.2 inherit peer-session session_1
neighbor 1.1.1.2 update-source Loopback 1
"""
parser = ttp(data, template, log_level="DEBUG")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"bgp_peers": {
"65100": {
"1.1.1.1": {
"description": "Some Description here",
"inherit_peer-session": "session_1",
"password": "<PASSWORD>",
"remote-as": "1234",
"shutdown": "yes",
"update-source": "",
},
"1.1.1.2": {
"description": "",
"inherit_peer-session": "session_1",
"password": "",
"remote-as": "1234",
"shutdown": "no",
"update-source": "",
},
}
}
}
]
]
# test_ios_bgp_pers_pars()
def test_ip_address_parsing():
data = """
interface Vlan99
description vlan99_interface
ip address 192.168.127.12 255.255.255.0 secondary
ip address 192.168.3.11 255.255.255.0 secondary
ip address 10.99.10.1 255.255.255.0
load-interval 60
bandwidth 10000000
!
interface Vlan100
description vlan100_interface
ip address 10.100.10.1 255.255.255.0
load-interval 60
bandwidth 10000000
!
"""
template = """
<group name="interface">
interface {{ interface }}
description {{ description }}
ip address {{ ipv4_addr | PHRASE | exclude("secondary") | to_ip | with_prefixlen }}
load-interval {{ load-interval }}
bandwidth {{ bandwidth }}
<group name="ipv4_secondary*">
ip address {{ ipv4_addr | PHRASE | let("is_secondary", True) | to_ip | with_prefixlen }} secondary
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"interface": [
{
"bandwidth": "10000000",
"description": "vlan99_interface",
"interface": "Vlan99",
"ipv4_addr": "10.99.10.1/24",
"ipv4_secondary": [
{"ipv4_addr": "192.168.127.12/24", "is_secondary": True},
{"ipv4_addr": "192.168.3.11/24", "is_secondary": True},
],
"load-interval": "60",
},
{
"bandwidth": "10000000",
"description": "vlan100_interface",
"interface": "Vlan100",
"ipv4_addr": "10.100.10.1/24",
"load-interval": "60",
},
]
}
]
]
# test_ip_address_parsing()
def test_vlans_parsing():
template = """
<group name="ports_summary*">
{{ port }} {{ mode }} {{ encap }} {{ satus }} {{ native_vlan | DIGIT }}
</group>
<group name="vlans_allowed">
Port Vlans allowed on trunk {{ _start_ }}
<group name="interfaces*">
{{ port }} {{ vlans | unrange('-', ',') | split(",") }}
</group>
{{ _end_ }}
</group>
<group name="vlans_active">
Port Vlans allowed and active in management domain {{ _start_ }}
<group name="interfaces*">
{{ port }} {{ vlans | unrange('-', ',') | split(",") }}
</group>
{{ _end_ }}
</group>
<group name="vlans_forwarding">
Port Vlans in spanning tree forwarding state and not pruned {{ _start_ }}
<group name="interfaces*">
{{ port }} {{ vlans | unrange('-', ',') | split(",") }}
</group>
{{ _end_ }}
</group>
"""
data = """
Port Mode Encapsulation Status Native vlan
Gi0 on 802.1q trunking 1
Gi7 on 802.1q trunking 1
Port Vlans allowed on trunk
Gi0 1,8,999,1002-1005
Gi7 1,100,120,1000,1002-1005
Port Vlans allowed and active in management domain
Gi0 1,8,999
Gi7 1,100,120,1000
Port Vlans in spanning tree forwarding state and not pruned
Gi0 1,8,999
Gi7 1,100,120,1000
"""
parser = ttp(data, template, log_level="DEBUG")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=120)
assert res == [
[
{
"ports_summary": [
{
"encap": "802.1q",
"mode": "on",
"native_vlan": "1",
"port": "Gi0",
"satus": "trunking",
},
{
"encap": "802.1q",
"mode": "on",
"native_vlan": "1",
"port": "Gi7",
"satus": "trunking",
},
],
"vlans_active": {
"interfaces": [
{"port": "Gi0", "vlans": ["1", "8", "999"]},
{"port": "Gi7", "vlans": ["1", "100", "120", "1000"]},
]
},
"vlans_allowed": {
"interfaces": [
{
"port": "Gi0",
"vlans": ["1", "8", "999", "1002", "1003", "1004", "1005"],
},
{
"port": "Gi7",
"vlans": [
"1",
"100",
"120",
"1000",
"1002",
"1003",
"1004",
"1005",
],
},
]
},
"vlans_forwarding": {
"interfaces": [
{"port": "Gi0", "vlans": ["1", "8", "999"]},
{"port": "Gi7", "vlans": ["1", "100", "120", "1000"]},
]
},
}
]
]
# test_vlans_parsing()
def test_asa_acls_issue_55_uses_itemize_with_dynamic_path():
data = """
object-group service gokuhead
service-object tcp-udp destination eq gokurpc
service-object tcp destination eq 902
service-object tcp destination eq https
service-object tcp destination eq nfs
service-object tcp destination eq 10025
object-group network gohan
network-object object gohan-01
network-object object gohan-02
network-object object vlan_944
network-object object gohan-03
network-object object gohan-05
network-object object gohan-06
object-group service sql tcp
port-object eq 1433
object-group network vegeta
group-object trunks
network-object object vegeta-01
object-group network Space-Users
network-object object ab
network-object object ac
network-object object ad
network-object object ae
network-object object af
network-object object ag
network-object object ah
network-object object ai
network-object object aj
object-group network dalmatians
network-object object dog-01
group-object trunks
network-object object vlan_950
group-object Space-Users
network-object object Darts-Summary
"""
template = """
<vars>
SVC_PORTS = "tcp-udp|tcp|udp"
</vars>
<group name="object-{{ object_type }}-groups**.{{ object_name }}**">
object-group {{ object_type }} {{ object_name | _start_ }}
object-group {{ object_type }} {{ object_name | _start_ }} {{ protocol | re("SVC_PORTS")}}
description {{ description | re(".*") }}
<group name="{{ type }}-objects" itemize="obj_name" method="table">
network-object object {{ obj_name | let("type", "network") }}
network-object host {{ obj_name | IP | let("type", "network") }}
group-object {{ obj_name | let("type", "group") }}
service-object object {{ obj_name | let("type", "service") }}
service-object {{ obj_name | let("type", "service") }}
</group>
<group name="service-object-ports*">
service-object {{ protocol | re("SVC_PORTS") }} destination eq {{port}}
</group>
<group name="service-object-port-ranges*">
service-object {{ protocol | re("SVC_PORTS") }} destination range {{port_begin}} {{port_end}}
</group>
<group name="service-port-objects" itemize="port_obj">
port-object eq {{ port_obj }}
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res, width=80)
assert res == [
[
{
"object-network-groups": {
"Space-Users": {
"network-objects": [
"ab",
"ac",
"ad",
"ae",
"af",
"ag",
"ah",
"ai",
"aj",
]
},
"dalmatians": {
"group-objects": ["trunks", "Space-Users"],
"network-objects": ["dog-01", "vlan_950", "Darts-Summary"],
},
"gohan": {
"network-objects": [
"gohan-01",
"gohan-02",
"vlan_944",
"gohan-03",
"gohan-05",
"gohan-06",
]
},
"vegeta": {
"group-objects": ["trunks"],
"network-objects": ["vegeta-01"],
},
},
"object-service-groups": {
"gokuhead": {
"service-object-ports": [
{"port": "gokurpc", "protocol": "tcp-udp"},
{"port": "902", "protocol": "tcp"},
{"port": "https", "protocol": "tcp"},
{"port": "nfs", "protocol": "tcp"},
{"port": "10025", "protocol": "tcp"},
]
},
"sql": {"protocol": "tcp", "service-port-objects": ["1433"]},
},
}
]
]
# test_asa_acls_issue_55()
def test_asa_acls_issue_55():
data = """
object-group service gokuhead
service-object tcp-udp destination eq gokurpc
service-object tcp destination eq 902
service-object tcp destination eq https
service-object tcp destination eq nfs
service-object tcp destination eq 10025
object-group network gohan
network-object object gohan-01
network-object object gohan-02
network-object object vlan_944
network-object object gohan-03
network-object object gohan-05
network-object object gohan-06
object-group service sql tcp
port-object eq 1433
object-group network vegeta
group-object trunks
network-object object vegeta-01
object-group network Space-Users
network-object object ab
network-object object ac
network-object object ad
network-object object ae
network-object object af
network-object object ag
network-object object ah
network-object object ai
network-object object aj
object-group network dalmatians
network-object object dog-01
group-object trunks
network-object object vlan_950
group-object Space-Users
network-object object Darts-Summary
"""
template = """
<vars>
SVC_PORTS = "tcp-udp|tcp|udp"
</vars>
<group name="object-{{ object_type }}-groups**.{{ object_name }}**">
object-group {{ object_type }} {{ object_name | _start_ }}
object-group {{ object_type }} {{ object_name | _start_ }} {{ protocol | re("SVC_PORTS")}}
description {{ description | re(".*") }}
<group name="network-objects" itemize="obj_name" method="table">
network-object object {{ obj_name | }}
network-object host {{ obj_name | IP }}
</group>
<group name="group-objects" itemize="obj_name" method="table">
group-object {{ obj_name }}
</group>
<group name="group-objects" itemize="obj_name" method="table">
service-object object {{ obj_name }}
service-object {{ obj_name }}
</group>
<group name="service-object-ports*">
service-object {{ protocol | re("SVC_PORTS") }} destination eq {{port}}
</group>
<group name="service-object-port-ranges*">
service-object {{ protocol | re("SVC_PORTS") }} destination range {{port_begin}} {{port_end}}
</group>
<group name="service-port-objects" itemize="port_obj">
port-object eq {{ port_obj }}
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res, width=80)
assert res == [
[
{
"object-network-groups": {
"Space-Users": {
"network-objects": [
"ab",
"ac",
"ad",
"ae",
"af",
"ag",
"ah",
"ai",
"aj",
]
},
"dalmatians": {
"group-objects": ["trunks", "Space-Users"],
"network-objects": ["dog-01", "vlan_950", "Darts-Summary"],
},
"gohan": {
"network-objects": [
"gohan-01",
"gohan-02",
"vlan_944",
"gohan-03",
"gohan-05",
"gohan-06",
]
},
"vegeta": {
"group-objects": ["trunks"],
"network-objects": ["vegeta-01"],
},
},
"object-service-groups": {
"gokuhead": {
"service-object-ports": [
{"port": "gokurpc", "protocol": "tcp-udp"},
{"port": "902", "protocol": "tcp"},
{"port": "https", "protocol": "tcp"},
{"port": "nfs", "protocol": "tcp"},
{"port": "10025", "protocol": "tcp"},
]
},
"sql": {"protocol": "tcp", "service-port-objects": ["1433"]},
},
}
]
]
# test_asa_acls_issue_55()
def test_issue_57_headers_parsing():
"""
Issue first was with startempty match not beeing selected in favour
of start match produced by headers :
Interface Link Protocol Primary_IP Description {{ _headers_ }}
that was fixed by adding this code to the TTP selection logic for multiple
matches:
# startempty RE always more preferred
if startempty_re:
for index in startempty_re:
re_ = result[index][0]
result_data = result[index][1]
# skip results that did not pass validation check
if result_data == False:
continue
# prefer result with same path as current record
elif re_["GROUP"].group_id == self.record["GRP_ID"]:
break
# prefer children of current record group
elif self.record["GRP_ID"] and re_["GROUP"].group_id[
0
].startswith(self.record["GRP_ID"][0]):
break
# start RE preferred next
elif start_re:
Another problem was with
Interface Link Protocol Primary_IP Description {{ _headers_ }}
matching on "Duplex: (a)/A - auto; H - half; F - full" line, that was fixed
by chaning _end_ logic by introducing self.ended_groups set to _results_class
and replacing self.GRPLOCL with logic to use self.ended_groups instead.
All in all it resulted in better _end_ handling behavior and allowed to fix issue
45 as well where before this one had to use filtering instead, but now _end_ also
helps.
"""
data = """
Brief information on interfaces in route mode:
Link: ADM - administratively down; Stby - standby
Protocol: (s) - spoofing
Interface Link Protocol Primary IP Description
InLoop0 UP UP(s) --
REG0 UP -- --
Vlan401 UP UP 10.251.147.36 HSSBC_to_inband_mgmt_r4
Brief information on interfaces in bridge mode:
Link: ADM - administratively down; Stby - standby
Speed: (a) - auto
Duplex: (a)/A - auto; H - half; F - full
Type: A - access; T - trunk; H - hybrid
Interface Link Speed Duplex Type PVID Description
BAGG1 UP 20G(a) F(a) T 1 to-KDC-R4.10-Core-1
BAGG14 UP 10G(a) F(a) T 1 KDC-R429-E1 BackUp Chassis
BAGG22 UP 20G(a) F(a) T 1 HSSBC-NS-01
FGE1/0/49 DOWN auto A A 1
XGE1/0/1 UP 10G(a) F(a) T 1 KDC-R402-E1 Backup Chassis
"""
template = """
<group name = "interfaces">
<group name="routed">
Brief information on interfaces in route mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Protocol Primary_IP Description {{ _headers_ }}
</group>
{{ _end_ }}
</group>
<group name="bridged">
Brief information on interfaces in bridge mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Speed Duplex Type PVID Description {{ _headers_ }}
</group>
{{ _end_ }}
</group>
</group>
"""
parser = ttp(data, template, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
assert res == [
[
{
"interfaces": {
"bridged": {
"BAGG1": {
"Description": "to-KDC-R4.10-Core-1",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "20G(a)",
"Type": "T",
},
"BAGG14": {
"Description": "KDC-R429-E1 BackUp " "Chassis",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "10G(a)",
"Type": "T",
},
"BAGG22": {
"Description": "HSSBC-NS-01",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "20G(a)",
"Type": "T",
},
"FGE1/0/49": {
"Description": "",
"Duplex": "A",
"Link": "DOWN",
"PVID": "1",
"Speed": "auto",
"Type": "A",
},
"Link: ADM - administr": {
"Description": "",
"Duplex": "Stby -",
"Link": "ative",
"PVID": "dby",
"Speed": "ly down;",
"Type": "stan",
},
"XGE1/0/1": {
"Description": "KDC-R402-E1 Backup " "Chassis",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "10G(a)",
"Type": "T",
},
},
"routed": {
"InLoop0": {
"Description": "",
"Link": "UP",
"Primary_IP": "--",
"Protocol": "UP(s)",
},
"Link: ADM - administr": {
"Description": "",
"Link": "ative",
"Primary_IP": "Stby - " "standby",
"Protocol": "ly down;",
},
"REG0": {
"Description": "",
"Link": "UP",
"Primary_IP": "--",
"Protocol": "--",
},
"Vlan401": {
"Description": "HSSBC_to_inband_mgmt_r4",
"Link": "UP",
"Primary_IP": "10.251.147.36",
"Protocol": "UP",
},
},
}
}
]
]
# test_issue_57_headers_parsing()
def test_issue_57_headers_parsing_using_columns():
"""
Added columns for headers, now can adjust headers size as required
to filter unwanted results
"""
data = """
Brief information on interfaces in route mode:
Link: ADM - administratively down; Stby - standby
Protocol: (s) - spoofing
Interface Link Protocol Primary IP Description
InLoop0 UP UP(s) --
REG0 UP -- --
Vlan401 UP UP 10.251.147.36 HSSBC_to_inband_mgmt_r4
Brief information on interfaces in bridge mode:
Link: ADM - administratively down; Stby - standby
Speed: (a) - auto
Duplex: (a)/A - auto; H - half; F - full
Type: A - access; T - trunk; H - hybrid
Interface Link Speed Duplex Type PVID Description
BAGG1 UP 20G(a) F(a) T 1 to-KDC-R4.10-Core-1
BAGG14 UP 10G(a) F(a) T 1 KDC-R429-E1 BackUp Chassis
BAGG22 UP 20G(a) F(a) T 1 HSSBC-NS-01
FGE1/0/49 DOWN auto A A 1
XGE1/0/1 UP 10G(a) F(a) T 1 KDC-R402-E1 Backup Chassis
"""
template = """
<group name = "interfaces">
<group name="routed">
Brief information on interfaces in route mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Protocol Primary_IP Description {{ _headers_ | columns(5)}}
</group>
{{ _end_ }}
</group>
<group name="bridged">
Brief information on interfaces in bridge mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Speed Duplex Type PVID Description {{ _headers_ | columns(7) }}
</group>
{{ _end_ }}
</group>
</group>
"""
parser = ttp(data, template, log_level="error")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=80)
assert res == [
[
{
"interfaces": {
"bridged": {
"BAGG1": {
"Description": "to-KDC-R4.10-Core-1",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "20G(a)",
"Type": "T",
},
"BAGG14": {
"Description": "KDC-R429-E1 BackUp " "Chassis",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "10G(a)",
"Type": "T",
},
"BAGG22": {
"Description": "HSSBC-NS-01",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "20G(a)",
"Type": "T",
},
"FGE1/0/49": {
"Description": "",
"Duplex": "A",
"Link": "DOWN",
"PVID": "1",
"Speed": "auto",
"Type": "A",
},
"XGE1/0/1": {
"Description": "KDC-R402-E1 Backup " "Chassis",
"Duplex": "F(a)",
"Link": "UP",
"PVID": "1",
"Speed": "10G(a)",
"Type": "T",
},
},
"routed": {
"InLoop0": {
"Description": "",
"Link": "UP",
"Primary_IP": "--",
"Protocol": "UP(s)",
},
"REG0": {
"Description": "",
"Link": "UP",
"Primary_IP": "--",
"Protocol": "--",
},
"Vlan401": {
"Description": "HSSBC_to_inband_mgmt_r4",
"Link": "UP",
"Primary_IP": "10.251.147.36",
"Protocol": "UP",
},
},
}
}
]
]
# test_issue_57_headers_parsing_using_columns()
def test_interface_template_not_collecting_all_data_solution():
data = """
interface Bundle-Ether10
description Bundle-Ether10
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.7
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.6 255.255.255.254
ipv6 address fc00::1:5/127
load-interval 30
!
interface Bundle-Ether51
description Bundle-Ether51
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.2
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.3 255.255.255.254
ipv6 address fc00::1:3/127
load-interval 30
!
interface Loopback0
description Loopback0
ipv4 address 10.1.1.1 255.255.255.255
ipv4 address 10.2.2.2 255.255.255.255 secondary
ipv6 address fc00::1/128
ipv6 address fc00::101/128
!
interface Loopback1
description Loopback1
ipv4 address 10.100.0.1 255.255.255.0
ipv4 address 10.100.1.1 255.255.255.0 secondary
ipv4 address 10.100.2.1 255.255.255.0 secondary
ipv6 address fc00:100::1/64
ipv6 address fc00:100::101/64
ipv6 address fc00:100::201/64
!
interface MgmtEth0/RP0/CPU0/0
description MgmtEth0/RP0/CPU0/0
cdp
vrf VRF-MGMT
ipv4 address 172.23.136.21 255.255.252.0
!
interface GigabitEthernet0/0/0/12
description GigabitEthernet0/0/0/12
mtu 9018
lldp
receive disable
transmit disable
!
negotiation auto
load-interval 30
l2transport
!
!
interface TenGigE0/0/0/4
description TenGigE0/0/0/4
bundle id 51 mode active
cdp
load-interval 30
!
interface TenGigE0/0/0/5
shutdown
!
interface TenGigE0/0/0/5.100 l2transport
description TenGigE0/0/0/5.100
!
interface TenGigE0/0/0/47
description TenGigE0/0/0/47
shutdown
mac-address 201.b19.1234
!
interface BVI101
cdp
description BVI101
ipv4 address 192.168.101.1 255.255.255.0
load-interval 30
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
description HundredGigE0/0/1/0
bundle id 10 mode active
cdp
load-interval 30
mac-address 200.b19.5678
!
interface preconfigure GigabitEthernet0/0/0/11
description GigabitEthernet0/0/0/11
shutdown
!
interface preconfigure GigabitEthernet0/0/0/16
description GigabitEthernet0/0/0/16
shutdown
!
interface preconfigure GigabitEthernet0/0/0/17
description GigabitEthernet0/0/0/17
shutdown
!
"""
template_original = """
<doc>
Template for capturing interface configuration data from IOS-XR devices
Note: In order to different interface appearances, the interface block has been replicated.
Be sure to update all blocks accordingly when adding any new values to capture.
</doc>
<vars>
intf_defaults = {
"description": None,
"speed": None,
"negotiation": None,
"disabled": False,
"mode": None,
}
</vars>
<macro>
## parses ipv4 addresses to determine which is primary and which are secondary
## and converts dotted-quad subnet mask into cidr format
def ipv4_macro(data):
data_list = list(data.split(" "))
addr = str(data_list[0])
mask = str(data_list[1])
mask = str(sum(bin(int(x)).count('1') for x in mask.split('.')))
ipv4 = addr+"/"+mask
if 'secondary' in data:
is_secondary = True
else:
is_secondary = False
result = { "ipv4" : ipv4, "is_secondary" : is_secondary }
return result
</macro>
<group name="interfaces" default="intf_defaults">
interface {{ interface | _start_}}
interface {{ interface | let("mode", "l2transport") | _start_ }} l2transport
interface preconfigure {{ interface | let("mode", "preconfigure") | _start_ }}
description {{ description | re(".+") }}
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | ORPHRASE | _exact_ }}
</group>
! {{ _end_ }}
</group>
"""
parser = ttp(data, template_original, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
assert res == [
[
{
"interfaces": [
{
"description": "Bundle-Ether10",
"disabled": False,
"interface": "Bundle-Ether10",
"ipv4": [
{"ipv4": {"ipv4": "192.168.1.6/31", "is_secondary": False}}
],
"ipv6": [{"ipv6": "fc00::1:5/127"}],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "Bundle-Ether51",
"disabled": False,
"interface": "Bundle-Ether51",
"ipv4": [
{"ipv4": {"ipv4": "192.168.1.3/31", "is_secondary": False}}
],
"ipv6": [{"ipv6": "fc00::1:3/127"}],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "Loopback0",
"disabled": False,
"interface": "Loopback0",
"ipv4": [
{"ipv4": {"ipv4": "10.1.1.1/32", "is_secondary": False}},
{"ipv4": {"ipv4": "10.2.2.2/32", "is_secondary": True}},
],
"ipv6": [{"ipv6": "fc00::1/128"}, {"ipv6": "fc00::101/128"}],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "Loopback1",
"disabled": False,
"interface": "Loopback1",
"ipv4": [
{"ipv4": {"ipv4": "10.100.0.1/24", "is_secondary": False}},
{"ipv4": {"ipv4": "10.100.1.1/24", "is_secondary": True}},
{"ipv4": {"ipv4": "10.100.2.1/24", "is_secondary": True}},
],
"ipv6": [
{"ipv6": "fc00:100::1/64"},
{"ipv6": "fc00:100::101/64"},
{"ipv6": "fc00:100::201/64"},
],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "MgmtEth0/RP0/CPU0/0",
"disabled": False,
"interface": "MgmtEth0/RP0/CPU0/0",
"ipv4": [
{
"ipv4": {
"ipv4": "172.23.136.21/22",
"is_secondary": False,
}
}
],
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "GigabitEthernet0/0/0/12",
"disabled": False,
"interface": "GigabitEthernet0/0/0/12",
"mode": None,
"negotiation": "auto",
"speed": None,
},
{
"description": "TenGigE0/0/0/4",
"disabled": False,
"interface": "TenGigE0/0/0/4",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": None,
"disabled": True,
"interface": "TenGigE0/0/0/5",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "TenGigE0/0/0/5.100",
"disabled": False,
"interface": "TenGigE0/0/0/5.100",
"mode": "l2transport",
"negotiation": None,
"speed": None,
},
{
"description": "TenGigE0/0/0/47",
"disabled": True,
"interface": "TenGigE0/0/0/47",
"mac_address": "201.b19.1234",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "BVI101",
"disabled": False,
"interface": "BVI101",
"ipv4": [
{
"ipv4": {
"ipv4": "192.168.101.1/24",
"is_secondary": False,
}
}
],
"mac_address": "200.b19.4321",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "HundredGigE0/0/1/0",
"disabled": False,
"interface": "HundredGigE0/0/1/0",
"mac_address": "200.b19.5678",
"mode": None,
"negotiation": None,
"speed": None,
},
{
"description": "GigabitEthernet0/0/0/11",
"disabled": True,
"interface": "GigabitEthernet0/0/0/11",
"mode": "preconfigure",
"negotiation": None,
"speed": None,
},
{
"description": "GigabitEthernet0/0/0/16",
"disabled": True,
"interface": "GigabitEthernet0/0/0/16",
"mode": "preconfigure",
"negotiation": None,
"speed": None,
},
{
"description": "GigabitEthernet0/0/0/17",
"disabled": True,
"interface": "GigabitEthernet0/0/0/17",
"mode": "preconfigure",
"negotiation": None,
"speed": None,
},
]
}
]
]
# test_interface_template_not_collecting_all_data_solution()
@pytest.mark.skipif(True, reason="Need to fix this one")
def test_interface_template_not_collecting_all_data():
"""
For interface BVI101 not collecting mac-address
"""
data = """
interface Bundle-Ether10
description Bundle-Ether10
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.7
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.6 255.255.255.254
ipv6 address fc00::1:5/127
load-interval 30
!
interface Bundle-Ether51
description Bundle-Ether51
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.2
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.3 255.255.255.254
ipv6 address fc00::1:3/127
load-interval 30
!
interface Loopback0
description Loopback0
ipv4 address 10.1.1.1 255.255.255.255
ipv4 address 10.2.2.2 255.255.255.255 secondary
ipv6 address fc00::1/128
ipv6 address fc00::101/128
!
interface Loopback1
description Loopback1
ipv4 address 10.100.0.1 255.255.255.0
ipv4 address 10.100.1.1 255.255.255.0 secondary
ipv4 address 10.100.2.1 255.255.255.0 secondary
ipv6 address fc00:100::1/64
ipv6 address fc00:100::101/64
ipv6 address fc00:100::201/64
!
interface MgmtEth0/RP0/CPU0/0
description MgmtEth0/RP0/CPU0/0
cdp
vrf VRF-MGMT
ipv4 address 172.23.136.21 255.255.252.0
!
interface GigabitEthernet0/0/0/12
description GigabitEthernet0/0/0/12
mtu 9018
lldp
receive disable
transmit disable
!
negotiation auto
load-interval 30
l2transport
!
!
interface TenGigE0/0/0/4
description TenGigE0/0/0/4
bundle id 51 mode active
cdp
load-interval 30
!
interface TenGigE0/0/0/5
shutdown
!
interface TenGigE0/0/0/5.100 l2transport
description TenGigE0/0/0/5.100
!
interface TenGigE0/0/0/47
description TenGigE0/0/0/47
shutdown
mac-address 201.b19.1234
!
interface BVI101
cdp
description BVI101
ipv4 address 192.168.101.1 255.255.255.0
load-interval 30
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
description HundredGigE0/0/1/0
bundle id 10 mode active
cdp
load-interval 30
mac-address 200.b19.5678
!
interface preconfigure GigabitEthernet0/0/0/11
description GigabitEthernet0/0/0/11
shutdown
!
interface preconfigure GigabitEthernet0/0/0/16
description GigabitEthernet0/0/0/16
shutdown
!
interface preconfigure GigabitEthernet0/0/0/17
description GigabitEthernet0/0/0/17
shutdown
!
"""
template_original = """
<doc>
Template for capturing interface configuration data from IOS-XR devices
Note: In order to different interface appearances, the interface block has been replicated.
Be sure to update all blocks accordingly when adding any new values to capture.
</doc>
<macro>
## parses ipv4 addresses to determine which is primary and which are secondary
## and converts dotted-quad subnet mask into cidr format
def ipv4_macro(data):
data_list = list(data.split(" "))
addr = str(data_list[0])
mask = str(data_list[1])
mask = str(sum(bin(int(x)).count('1') for x in mask.split('.')))
ipv4 = addr+"/"+mask
if 'secondary' in data:
is_secondary = True
else:
is_secondary = False
result = { "ipv4" : ipv4, "is_secondary" : is_secondary }
return result
</macro>
## parent group for all interface groups
<group name="interfaces">
## matches primary interfaces
<group>
{{ mode | set(None) }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface {{ interface }}
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
## matches pre-configured interfaces
<group>
{{ mode | set('preconfigure') }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface preconfigure {{ interface }}
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
{{ mode | set('l2transport') }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface {{ interface }} l2transport
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
</group>
"""
parser = ttp(data, template_original, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
# test_interface_template_not_collecting_all_data()
def test_interface_template_not_collecting_all_data_reduced():
"""
Below template and data were producing this result:
[[{'interfaces': [{'interface': 'TenGigE0/0/0/5.100'},
{'interface': 'BVI101',
'ipv4': [{'ipv4': '192.168.101.1 255.255.255.0'}]}]}]]
TTP was not collecting mac-address for BVI 101
"""
data = """
interface TenGigE0/0/0/5.100 l2transport
!
interface BVI101
ipv4 address 192.168.101.1 255.255.255.0
mac-address 200.b19.4321
!
"""
template = """
<group name="interfaces">
## matches primary interfaces
<group>
interface {{ interface }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | _line_ | _exact_ }}
</group>
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
interface {{ interface }} l2transport
mac-address {{ mac_address }}
</group>
</group>
"""
parser = ttp(data, template, log_level="error")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=80)
assert res == [
[
{
"interfaces": [
{"interface": "TenGigE0/0/0/5.100"},
{
"interface": "BVI101",
"ipv4": [{"ipv4": "192.168.101.1 255.255.255.0"}],
"mac_address": "200.b19.4321",
},
]
}
]
]
# test_interface_template_not_collecting_all_data_reduced()
@pytest.mark.skipif(True, reason="Need to fix this one")
def test_interface_template_not_collecting_all_data_reduced_2():
"""
Below template and data producing this result:
[[{'interfaces': [{'interface': 'TenGigE0/0/0/5'},
{'interface': 'TenGigE0/0/0/5.100',
'mac_address': '200.b19.1234'},
{'interface': 'BVI101',
'ipv4': [{'ipv4': '192.168.101.1 255.255.255.0'}]},
{'interface': 'HundredGigE0/0/1/0',
'mac_address': '200.b19.5678'}]}]]
Interface BVI should not have IPv4 address matched, but
should have mac-address matched. Problem is due to that
l2transport group starts and it has group for IPv4 addresses,
next match after matching IPv4 is mac-address, but his parent
is a different group, as a result IPv4 address saved under wrong group
and mac-address not saved at all
IDEA: try to implement automatic end of group tracking, to add pevious
groups to self.ended_groups if next, different group starts.
Current solution to this problem would be to use _end_ to explicitly
indicate end of group
"""
data = """
interface TenGigE0/0/0/5
!
interface TenGigE0/0/0/5.100 l2transport
mac-address 200.b19.1234
!
interface BVI101
ipv4 address 192.168.101.1 255.255.255.0
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
mac-address 200.b19.5678
!
"""
template_original = """
<group name="interfaces">
## matches primary interfaces
<group>
interface {{ interface }}
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
interface {{ interface }} l2transport
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | _line_ | _exact_ }}
</group>
</group>
</group>
"""
parser = ttp(data, template_original, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
# test_interface_template_not_collecting_all_data_reduced_2()
def test_issue_61():
data = """
banner motd &
BANNER MESSAGE line 1
BANNER MESSAGE line 2
BANNER MESSAGE line 3
&
some
other staff
"""
template_to_match_marker = "banner motd {{ marker }}"
template_to_parse_banner = """
<group name="motd">
banner motd {{ ignore(banner_marker) }} {{ _start_ }}
{{ banner_mesage | _line_ | joinmatches("\\n") }}
{{ ignore(banner_marker) }} {{ _end_ }}
</group>
"""
# extract marker value
parser = ttp(data, template_to_match_marker)
parser.parse()
marker = parser.result()[0][0]["marker"]
# parse banner
parser = ttp(data, template_to_parse_banner, vars={"banner_marker": marker})
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [[{'motd': {'banner_mesage': 'BANNER MESSAGE line 1\n'
'BANNER MESSAGE line 2\n'
'BANNER MESSAGE line 3'}}]]
# test_issue_61()
def test_fortigate_intf_parsing():
template = """
<group name="interfaces">
config system interface {{ _start_ }}
<group name="/interfaces*">
edit "{{ interface }}"
set allowaccess {{ allowaccess }}
set description "{{ description }}"
set interface "{{ phy_interface }}"
set snmp-index {{ snmp_index }}
set type {{ fgt_int_type }}
set vdom "{{ vdom }}"
set vlanid {{ vlan }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
"""
data = """
config system np6
edit "np6_0"
next
end
config system interface
edit "mgmt1"
set vdom "root"
set ip 10.10.10.1 255.255.255.248
set allowaccess ping
set type physical
set description "mgmt1"
set snmp-index 1
next
edit "port1"
set vdom "internal"
set ip 20.20.20.1 255.255.255.248
set allowaccess ping
set type physical
set snmp-index 2
next
end
config system custom-language
edit "en"
set filename "en"
next
edit "fr"
set filename "fr"
next
end
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [[{'interfaces': [{'allowaccess': 'ping',
'description': 'mgmt1',
'fgt_int_type': 'physical',
'interface': 'mgmt1',
'snmp_index': '1',
'vdom': 'root'},
{'allowaccess': 'ping',
'fgt_int_type': 'physical',
'interface': 'port1',
'snmp_index': '2',
'vdom': 'internal'}]}]]
# test_fortigate_intf_parsing()
def test_issue_57_one_more():
"""
Without _anonymous_ group groups id formation bug fix
below template/data were producitng this result:
[[{'portchannel': {'1': {'local_members': [{}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
Further debugging revelead the flaw in results selection logic,
due to exclude("Port") statemets group was invalidated and anonymous group_id
was same as parent group_id resulting in new anonymous group matches were not
able to restart the group, fixed by changing the way how anonymous group id formed.
Before fix:
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
After fix:
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*._anonymous_', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
"""
data = """
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E -- Collecting, F -- Distributing,
G -- Defaulted, H -- Expired
Aggregate Interface: Bridge-Aggregation1
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/1 U 32768 1 {ACG}
GE6/0/2 U 32768 1 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/1 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/2 0 32768 0 0x8000, 0000-0000-0000 {EF}
Aggregate Interface: Bridge-Aggregation2
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/3 U 32768 2 {ACG}
GE6/0/4 U 32768 2 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/3 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/4 0 32768 0 0x8000, 0000-0000-0000 {EF}
"""
template = """
<group name = "portchannel.{{channel_number}}">
Aggregate Interface: Bridge-Aggregation{{ channel_number}}
<group name = "local_members*" void="">
Local: {{_start_}}
<group>
{{interface | exclude("Port") }} {{status}} {{priority}} {{oper_key }} {{flag}}
</group>
</group>
<group name = "remote_members*">
{{interface }} {{status}} {{priority}} {{oper_key}} {{sys_id}}, {{ mac | MAC }} {{flag}}
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [[{'portchannel': {'1': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/1',
'oper_key': '1',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/2',
'oper_key': '1',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/3',
'oper_key': '2',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/4',
'oper_key': '2',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
# test_issue_57_one_more()
def test_issue_57_one_more_answer():
data = """
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E -- Collecting, F -- Distributing,
G -- Defaulted, H -- Expired
Aggregate Interface: Bridge-Aggregation1
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/1 U 32768 1 {ACG}
GE6/0/2 U 32768 1 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/1 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/2 0 32768 0 0x8000, 0000-0000-0000 {EF}
Aggregate Interface: Bridge-Aggregation2
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/3 U 32768 2 {ACG}
GE6/0/4 U 32768 2 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/3 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/4 0 32768 0 0x8000, 0000-0000-0000 {EF}
"""
template = """
<group name = "portchannel.{{channel_number}}">
Aggregate Interface: Bridge-Aggregation{{ channel_number}}
<group name = "local_members*">
{{interface}} {{status}} {{priority | DIGIT}} {{oper_key | DIGIT}} {{flag}}
</group>
<group name = "remote_members*">
{{interface}} {{status}} {{priority | DIGIT}} {{oper_key | DIGIT}} {{sys_id}}, {{ mac | MAC }} {{flag}}
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [[{'portchannel': {'1': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/1',
'oper_key': '1',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/2',
'oper_key': '1',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/3',
'oper_key': '2',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/4',
'oper_key': '2',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
# test_issue_57_one_more_answer()
def test_issue_57_one_more_empty_dict_in_res():
"""
Without fix this results produced:
[[{'portchannel': {'1': {'local_members': [{},
{'flag': '{ACG}',
'interface': 'GE6/0/1',
'oper_key': '1',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/2',
'oper_key': '1',
'priority': '32768',
'status': 'U'}],
'remote_members': [{},
{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{},
{'flag': '{ACG}',
'interface': 'GE6/0/3',
'oper_key': '2',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/4',
'oper_key': '2',
'priority': '32768',
'status': 'U'}],
'remote_members': [{},
{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
Above results contain empty dictionary list item, this is because
local_members* and remote_members* use * to indicate list item
as a result self.dict_by_path was returning E as a list element,
and results were appended to that element, but results are empty dictionary,
update saving logic to check if results are empty and skip appending them
if so.
"""
data = """
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E -- Collecting, F -- Distributing,
G -- Defaulted, H -- Expired
Aggregate Interface: Bridge-Aggregation1
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/1 U 32768 1 {ACG}
GE6/0/2 U 32768 1 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/1 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/2 0 32768 0 0x8000, 0000-0000-0000 {EF}
Aggregate Interface: Bridge-Aggregation2
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/3 U 32768 2 {ACG}
GE6/0/4 U 32768 2 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/3 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/4 0 32768 0 0x8000, 0000-0000-0000 {EF}
"""
template = """
<group name = "portchannel.{{channel_number}}">
Aggregate Interface: Bridge-Aggregation{{ channel_number}}
<group name = "local_members*">
Local: {{_start_}}
<group>
{{interface }} {{status}} {{priority}} {{oper_key | DIGIT }} {{flag}}
</group>
</group>
<group name = "remote_members*">
Remote: {{_start_}}
<group>
{{interface }} {{status}} {{priority}} {{oper_key}} {{sys_id}}, {{ mac | MAC }} {{flag}}
</group>
</group>
</group>
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [[{'portchannel': {'1': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/1',
'oper_key': '1',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/2',
'oper_key': '1',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{'flag': '{ACG}',
'interface': 'GE6/0/3',
'oper_key': '2',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/4',
'oper_key': '2',
'priority': '32768',
'status': 'U'}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
# test_issue_57_one_more_empty_dict_in_res() | StarcoderdataPython |
3355161 | from war import War
WAR_STATS_FILE_PATH = "./data/war_stats.tsv"
def load_data(file_path):
"""
Loads war data from given file path
Args:
file_path (str): path to .tsv file with war data
"""
# open war stats data in read mode
wars = []
file = open(file_path, "r")
# CSV columns:
# Era War Death range Date Combatants Location Notes Aliases Description Source
for line in file.readlines():
print("line", line)
items = line.split("\t") # tab-separated values
print("items", items) # get individual column values
# Strip leading and trailing whitespace
for i in range(len(items)):
items[i] = items[i].strip()
# skip first row (column headings)
if items[0] == "Era":
continue
era = items[0]
name = items[1]
death_range = items[2].replace("+", "") # remove + symbols
date = items[3]
combatants = items[4]
location = items[5]
notes = items[6]
# turn aliases into a list
aliases = [alias.strip() for alias in items[7].split(",")]
description = items[8]
source = items[9]
# figure out upper and lower deaths
deaths = death_range.split("-")
lower_deaths = deaths[0]
if len(deaths) == 2:
upper_deaths = deaths[1]
else:
upper_deaths = deaths[0]
# append War object
wars.append(
War(
name,
aliases,
upper_deaths,
lower_deaths,
combatants,
era,
date,
description,
location,
notes,
source,
)
)
print(wars[0].aliases)
load_data(WAR_STATS_FILE_PATH)
| StarcoderdataPython |
3288760 | <filename>third_party/chromite/lib/chrome_util.py
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library containing utility functions used for Chrome-specific build tasks."""
from __future__ import print_function
import functools
import glob
import os
import re
import shlex
import shutil
from chromite.lib import failures_lib
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
# Taken from external/gyp.git/pylib.
def _NameValueListToDict(name_value_list):
"""Converts Name-Value list to dictionary.
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = {}
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ProcessShellFlags(defines):
"""Validate and convert a string of shell style flags to a dictionary."""
assert defines is not None
return _NameValueListToDict(shlex.split(defines))
class Conditions(object):
"""Functions that return conditions used to construct Path objects.
Condition functions returned by the public methods have signature
f(gn_args, staging_flags). For descriptions of gn_args and
staging_flags see docstring for StageChromeFromBuildDir().
"""
@classmethod
def _GnSetTo(cls, flag, value, gn_args, _staging_flags):
val = gn_args.get(flag)
return val == value
@classmethod
def _StagingFlagSet(cls, flag, _gn_args, staging_flags):
return flag in staging_flags
@classmethod
def _StagingFlagNotSet(cls, flag, gn_args, staging_flags):
return not cls._StagingFlagSet(flag, gn_args, staging_flags)
@classmethod
def GnSetTo(cls, flag, value):
"""Returns condition that tests a gn flag is set to a value."""
return functools.partial(cls._GnSetTo, flag, value)
@classmethod
def StagingFlagSet(cls, flag):
"""Returns condition that tests a staging_flag is set."""
return functools.partial(cls._StagingFlagSet, flag)
@classmethod
def StagingFlagNotSet(cls, flag):
"""Returns condition that tests a staging_flag is not set."""
return functools.partial(cls._StagingFlagNotSet, flag)
class MultipleMatchError(failures_lib.StepFailure):
"""A glob pattern matches multiple files but a non-dir dest was specified."""
class MissingPathError(failures_lib.StepFailure):
"""An expected path is non-existant."""
class MustNotBeDirError(failures_lib.StepFailure):
"""The specified path should not be a directory, but is."""
class Copier(object):
"""File/directory copier.
Provides destination stripping and permission setting functionality.
"""
def __init__(self, strip_bin=None, strip_flags=None, default_mode=0o644,
dir_mode=0o755, exe_mode=0o755):
"""Initialization.
Args:
strip_bin: Path to the program used to strip binaries. If set to None,
binaries will not be stripped.
strip_flags: A list of flags to pass to the |strip_bin| executable.
default_mode: Default permissions to set on files.
dir_mode: Mode to set for directories.
exe_mode: Permissions to set on executables.
"""
self.strip_bin = strip_bin
self.strip_flags = strip_flags
self.default_mode = default_mode
self.dir_mode = dir_mode
self.exe_mode = exe_mode
@staticmethod
def Log(src, dest, directory):
sep = ' [d] -> ' if directory else ' -> '
logging.debug('%s %s %s', src, sep, dest)
def _CopyFile(self, src, dest, path):
"""Perform the copy.
Args:
src: The path of the file/directory to copy.
dest: The exact path of the destination. Does nothing if it already
exists.
path: The Path instance containing copy operation modifiers (such as
Path.exe, Path.strip, etc.)
"""
assert not os.path.isdir(src), '%s: Not expecting a directory!' % src
# This file has already been copied by an earlier Path.
if os.path.exists(dest):
return
osutils.SafeMakedirs(os.path.dirname(dest), mode=self.dir_mode)
if path.exe and self.strip_bin and path.strip and os.path.getsize(src) > 0:
strip_flags = (['--strip-unneeded'] if self.strip_flags is None else
self.strip_flags)
cros_build_lib.DebugRunCommand(
[self.strip_bin] + strip_flags + ['-o', dest, src])
shutil.copystat(src, dest)
else:
shutil.copy2(src, dest)
mode = path.mode
if mode is None:
mode = self.exe_mode if path.exe else self.default_mode
os.chmod(dest, mode)
def Copy(self, src_base, dest_base, path, sloppy=False):
"""Copy artifact(s) from source directory to destination.
Args:
src_base: The directory to apply the src glob pattern match in.
dest_base: The directory to copy matched files to. |Path.dest|.
path: A Path instance that specifies what is to be copied.
sloppy: If set, ignore when mandatory artifacts are missing.
Returns:
A list of the artifacts copied.
"""
copied_paths = []
src = os.path.join(src_base, path.src)
if not src.endswith('/') and os.path.isdir(src):
raise MustNotBeDirError('%s must not be a directory\n'
'Aborting copy...' % (src,))
paths = glob.glob(src)
if not paths:
if path.optional:
logging.debug('%s does not exist and is optional. Skipping.', src)
elif sloppy:
logging.warning('%s does not exist and is required. Skipping anyway.',
src)
else:
msg = ('%s does not exist and is required.\n'
'You can bypass this error with --sloppy.\n'
'Aborting copy...' % src)
raise MissingPathError(msg)
elif len(paths) > 1 and path.dest and not path.dest.endswith('/'):
raise MultipleMatchError(
'Glob pattern %r has multiple matches, but dest %s '
'is not a directory.\n'
'Aborting copy...' % (path.src, path.dest))
else:
for p in paths:
rel_src = os.path.relpath(p, src_base)
if path.IsBlacklisted(rel_src):
continue
if path.dest is None:
rel_dest = rel_src
elif path.dest.endswith('/'):
rel_dest = os.path.join(path.dest, os.path.basename(p))
else:
rel_dest = path.dest
assert not rel_dest.endswith('/')
dest = os.path.join(dest_base, rel_dest)
copied_paths.append(p)
self.Log(p, dest, os.path.isdir(p))
if os.path.isdir(p):
for sub_path in osutils.DirectoryIterator(p):
rel_path = os.path.relpath(sub_path, p)
sub_dest = os.path.join(dest, rel_path)
if path.IsBlacklisted(rel_path):
continue
if sub_path.endswith('/'):
osutils.SafeMakedirs(sub_dest, mode=self.dir_mode)
else:
self._CopyFile(sub_path, sub_dest, path)
else:
self._CopyFile(p, dest, path)
return copied_paths
class Path(object):
"""Represents an artifact to be copied from build dir to staging dir."""
DEFAULT_BLACKLIST = (r'(^|.*/)\.svn($|/.*)',)
def __init__(self, src, exe=False, cond=None, dest=None, mode=None,
optional=False, strip=True, blacklist=None):
"""Initializes the object.
Args:
src: The relative path of the artifact. Can be a file or a directory.
Can be a glob pattern.
exe: Identifes the path as either being an executable or containing
executables. Executables may be stripped during copy, and have
special permissions set. We currently only support stripping of
specified files and glob patterns that return files. If |src| is a
directory or contains directories, the content of the directory will
not be stripped.
cond: A condition (see Conditions class) to test for in deciding whether
to process this artifact.
dest: Name to give to the target file/directory. Defaults to keeping the
same name as the source.
mode: The mode to set for the matched files, and the contents of matched
directories.
optional: Whether to enforce the existence of the artifact. If unset, the
script errors out if the artifact does not exist. In 'sloppy'
mode, the Copier class treats all artifacts as optional.
strip: If |exe| is set, whether to strip the executable.
blacklist: A list of path patterns to ignore during the copy. This gets
added to a default blacklist pattern.
"""
self.src = src
self.exe = exe
self.cond = cond
self.dest = dest
self.mode = mode
self.optional = optional
self.strip = strip
self.blacklist = self.DEFAULT_BLACKLIST
if blacklist is not None:
self.blacklist += tuple(blacklist)
def IsBlacklisted(self, path):
"""Returns whether |path| is in the blacklist.
A file in the blacklist is not copied over to the staging directory.
Args:
path: The path of a file, relative to the path of this Path object.
"""
for pattern in self.blacklist:
if re.match(pattern, path):
return True
return False
def ShouldProcess(self, gn_args, staging_flags):
"""Tests whether this artifact should be copied."""
if not gn_args and not staging_flags:
return True
if self.cond and isinstance(self.cond, list):
for c in self.cond:
if not c(gn_args, staging_flags):
return False
elif self.cond:
return self.cond(gn_args, staging_flags)
return True
_ENABLE_NACL = 'enable_nacl'
_IS_CHROME_BRANDED = 'is_chrome_branded'
_IS_COMPONENT_BUILD = 'is_component_build'
_HIGHDPI_FLAG = 'highdpi'
STAGING_FLAGS = (
_HIGHDPI_FLAG,
)
_CHROME_SANDBOX_DEST = 'chrome-sandbox'
C = Conditions
# In the below Path lists, if two Paths both match a file, the earlier Path
# takes precedence.
# Files shared between all deployment types.
_COPY_PATHS_COMMON = (
Path('chrome_sandbox', mode=0o4755, dest=_CHROME_SANDBOX_DEST),
Path('icudtl.dat'),
Path('libosmesa.so', exe=True, optional=True),
# Do not strip the nacl_helper_bootstrap binary because the binutils
# objcopy/strip mangles the ELF program headers.
Path('nacl_helper_bootstrap',
exe=True,
strip=False,
cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('nacl_irt_*.nexe', cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('nacl_helper',
exe=True,
optional=True,
cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('nacl_helper_nonsfi',
exe=True,
optional=True,
cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('natives_blob.bin', optional=True),
Path('pnacl/', cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('snapshot_blob.bin', optional=True),
)
_COPY_PATHS_APP_SHELL = (
Path('app_shell', exe=True),
Path('extensions_shell_and_test.pak'),
) + _COPY_PATHS_COMMON
_COPY_PATHS_CHROME = (
Path('chrome', exe=True),
Path('chrome-wrapper'),
Path('chrome_100_percent.pak'),
Path('chrome_200_percent.pak', cond=C.StagingFlagSet(_HIGHDPI_FLAG)),
Path('dbus/', optional=True),
Path('keyboard_resources.pak'),
Path('libassistant.so', exe=True, optional=True),
Path('libmojo_core.so', exe=True),
# Widevine CDM is already pre-stripped. In addition, it doesn't
# play well with the binutils stripping tools, so skip stripping.
Path('libwidevinecdm.so',
exe=True,
strip=False,
cond=C.GnSetTo(_IS_CHROME_BRANDED, True)),
# In component build, copy so files (e.g. libbase.so) except for the
# blacklist.
Path('*.so',
blacklist=(r'libwidevinecdm.so',),
exe=True,
cond=C.GnSetTo(_IS_COMPONENT_BUILD, True)),
Path('locales/*.pak'),
Path('Packages/chrome_content_browser/manifest.json', optional=True),
Path('Packages/chrome_content_gpu/manifest.json', optional=True),
Path('Packages/chrome_content_plugin/manifest.json', optional=True),
Path('Packages/chrome_content_renderer/manifest.json', optional=True),
Path('Packages/chrome_content_utility/manifest.json', optional=True),
Path('Packages/chrome_mash/manifest.json', optional=True),
Path('Packages/chrome_mash_content_browser/manifest.json', optional=True),
Path('Packages/content_browser/manifest.json', optional=True),
Path('resources/'),
Path('resources.pak'),
Path('xdg-settings'),
Path('*.png'),
) + _COPY_PATHS_COMMON
_COPY_PATHS_MAP = {
'app_shell': _COPY_PATHS_APP_SHELL,
'chrome': _COPY_PATHS_CHROME,
}
def _FixPermissions(dest_base):
"""Last minute permission fixes."""
cros_build_lib.DebugRunCommand(['chmod', '-R', 'a+r', dest_base])
cros_build_lib.DebugRunCommand(
['find', dest_base, '-perm', '/110', '-exec', 'chmod', 'a+x', '{}', '+'])
def GetCopyPaths(deployment_type='chrome'):
"""Returns the list of copy paths used as a filter for staging files.
Args:
deployment_type: String describing the deployment type. Either "app_shell"
or "chrome".
Returns:
The list of paths to use as a filter for staging files.
"""
paths = _COPY_PATHS_MAP.get(deployment_type)
if paths is None:
raise RuntimeError('Invalid deployment type "%s"' % deployment_type)
return paths
def StageChromeFromBuildDir(staging_dir, build_dir, strip_bin, sloppy=False,
gn_args=None, staging_flags=None,
strip_flags=None, copy_paths=_COPY_PATHS_CHROME):
"""Populates a staging directory with necessary build artifacts.
If |gn_args| or |staging_flags| are set, then we decide what to stage
based on the flag values. Otherwise, we stage everything that we know
about that we can find.
Args:
staging_dir: Path to an empty staging directory.
build_dir: Path to location of Chrome build artifacts.
strip_bin: Path to executable used for stripping binaries.
sloppy: Ignore when mandatory artifacts are missing.
gn_args: A dictionary of args.gn valuses that Chrome was built with.
staging_flags: A list of extra staging flags. Valid flags are specified in
STAGING_FLAGS.
strip_flags: A list of flags to pass to the tool used to strip binaries.
copy_paths: The list of paths to use as a filter for staging files.
"""
os.mkdir(os.path.join(staging_dir, 'plugins'), 0o755)
if gn_args is None:
gn_args = {}
if staging_flags is None:
staging_flags = []
copier = Copier(strip_bin=strip_bin, strip_flags=strip_flags)
copied_paths = []
for p in copy_paths:
if p.ShouldProcess(gn_args, staging_flags):
copied_paths += copier.Copy(build_dir, staging_dir, p, sloppy=sloppy)
if not copied_paths:
raise MissingPathError('Couldn\'t find anything to copy!\n'
'Are you looking in the right directory?\n'
'Aborting copy...')
_FixPermissions(staging_dir)
| StarcoderdataPython |
3371883 | # -*- coding: utf-8 -*-
"""Flux Calculation class tests.
This script tests the operation of the Background Image Class.
Created on Thu Apr 22 13:44:35 2021
@author: denis
"""
import numpy as np
import pytest
from AIS.Background_Image import Background_Image
ccd_operation_mode = {
"em_mode": 0,
"em_gain": 1,
"preamp": 1,
"hss": 1,
"binn": 1,
"t_exp": 1,
"image_size": 2024,
}
em_gain = ccd_operation_mode["em_gain"]
binn = ccd_operation_mode["binn"]
t_exp = ccd_operation_mode["t_exp"]
preamp = ccd_operation_mode["preamp"]
hss = ccd_operation_mode["hss"]
image_size = ccd_operation_mode["image_size"]
sky_flux = 10
ccd_gain = 3
dark_current = 1e-5
read_noise = 6.67
bias_level = 500
dc = dark_current * t_exp
rn = read_noise
nf = 1
background_level = bias_level + (dc + sky_flux) * t_exp * em_gain * binn ** 2 / ccd_gain
noise = (
np.sqrt(rn ** 2 + (sky_flux + dc) * t_exp * nf ** 2 * em_gain ** 2 * binn ** 2)
/ ccd_gain
)
@pytest.fixture
def bgi():
return Background_Image(
ccd_operation_mode, ccd_gain, dark_current, read_noise, bias_level
)
# ------------------------ Initialize the class --------------------------
def test_em_gain(bgi):
assert bgi.em_gain == em_gain
def test_preamp(bgi):
assert bgi.preamp == preamp
def test_hss(bgi):
assert bgi.hss == hss
def test_bin(bgi):
assert bgi.binn == binn
def test_t_exp(bgi):
assert bgi.t_exp == t_exp
def test_image_size(bgi):
assert bgi.image_size == image_size
def test_ccd_gain(bgi):
assert bgi.ccd_gain == ccd_gain
def test_bias_level(bgi):
assert bgi.bias_level == bias_level
def test_noise_factor_1(bgi):
assert bgi.NOISE_FACTOR == 1
def test_noise_factor_2():
ccd_operation_mode["em_mode"] = 1
bgi = Background_Image(
ccd_operation_mode, ccd_gain, dark_current, read_noise, bias_level
)
assert bgi.NOISE_FACTOR == 1.4
# ----------------------- Calculate Background Image -------------------------
def test_create_background_image(bgi):
image = bgi.create_background_image(sky_flux)
bg_level = np.mean(image)
new_noise = np.std(image)
assert np.allclose(bg_level, background_level)
# assert np.allclose(noise, new_noise)
def test_create_bias_image(bgi):
image = bgi.create_bias_image()
bg_level = np.mean(image)
new_noise = round(np.std(image), 2)
noise = round(rn / ccd_gain, 2)
assert np.allclose(bg_level, bias_level)
assert np.allclose(noise, new_noise)
def test_create_dark_image(bgi):
image = bgi.create_dark_image()
bg_level = np.mean(image)
new_noise = round(np.std(image), 2)
noise = round(rn / ccd_gain, 2)
assert np.allclose(bg_level, bias_level + dc)
assert np.allclose(noise, new_noise)
def test_create_flat_image(bgi):
image = bgi.create_flat_image()
bg_level = np.mean(image)
BIAS = 32000
new_noise = round(np.std(image), 2)
poisson_noise = BIAS / ccd_gain
pixel_sensibility_noise = BIAS / ccd_gain * 0.03 # 3% of pixel sensibility
noise = (
np.sqrt(
rn ** 2
+ (poisson_noise + pixel_sensibility_noise)
* nf ** 2
* em_gain ** 2
* binn ** 2
)
/ ccd_gain
)
assert np.allclose(bg_level, BIAS)
# assert np.allclose(noise, new_noise)
| StarcoderdataPython |
1735810 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the wallet can send and receive using all combinations of address types.
There are 5 nodes-under-test:
- node0 uses legacy addresses
- node1 uses p2sh/segwit addresses
- node2 uses p2sh/segwit addresses and bech32 addresses for change
- node3 uses bech32 addresses
- node4 uses a p2sh/segwit addresses for change
node5 exists to generate new blocks.
## Multisig address test
Test that adding a multisig address with:
- an uncompressed pubkey always gives a legacy address
- only compressed pubkeys gives the an `-addresstype` address
## Sending to address types test
A series of tests, iterating over node0-node4. In each iteration of the test, one node sends:
- 10/101th of its balance to itself (using getrawchangeaddress for single key addresses)
- 20/101th to the next node
- 30/101th to the node after that
- 40/101th to the remaining node
- 1/101th remains as fee+change
Iterate over each node for single key addresses, and then over each node for
multisig addresses.
Repeat test, but with explicit address_type parameters passed to getnewaddress
and getrawchangeaddress:
- node0 and node3 send to p2sh.
- node1 sends to bech32.
- node2 sends to legacy.
As every node sends coins after receiving, this also
verifies that spending coins sent to all these address types works.
## Change type test
Test that the nodes generate the correct change address type:
- node0 always uses a legacy change address.
- node1 uses a bech32 addresses for change if any destination address is bech32.
- node2 always uses a bech32 address for change
- node3 always uses a bech32 address for change
- node4 always uses p2sh/segwit output for change.
"""
from decimal import Decimal
import itertools
from test_framework.test_framework import XSNTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes_bi,
sync_blocks,
sync_mempools,
)
class AddressTypeTest(XSNTestFramework):
def set_test_params(self):
self.num_nodes = 6
self.extra_args = [
["-addresstype=legacy"],
["-addresstype=p2sh-segwit"],
["-addresstype=p2sh-segwit", "-changetype=bech32"],
["-addresstype=bech32"],
["-changetype=p2sh-segwit"],
[]
]
def setup_network(self):
self.setup_nodes()
# Fully mesh-connect nodes for faster mempool sync
for i, j in itertools.product(range(self.num_nodes), repeat=2):
if i > j:
connect_nodes_bi(self.nodes, i, j)
self.sync_all()
def get_balances(self, confirmed=True):
"""Return a list of confirmed or unconfirmed balances."""
if confirmed:
return [self.nodes[i].getbalance() for i in range(4)]
else:
return [self.nodes[i].getunconfirmedbalance() for i in range(4)]
def test_address(self, node, address, multisig, typ):
"""Run sanity checks on an address."""
info = self.nodes[node].getaddressinfo(address)
assert(self.nodes[node].validateaddress(address)['isvalid'])
if not multisig and typ == 'legacy':
# P2PKH
assert(not info['isscript'])
assert(not info['iswitness'])
assert('pubkey' in info)
elif not multisig and typ == 'p2sh-segwit':
# P2SH-P2WPKH
assert(info['isscript'])
assert(not info['iswitness'])
assert_equal(info['script'], 'witness_v0_keyhash')
assert('pubkey' in info)
elif not multisig and typ == 'bech32':
# P2WPKH
assert(not info['isscript'])
assert(info['iswitness'])
assert_equal(info['witness_version'], 0)
assert_equal(len(info['witness_program']), 40)
assert('pubkey' in info)
elif typ == 'legacy':
# P2SH-multisig
assert(info['isscript'])
assert_equal(info['script'], 'multisig')
assert(not info['iswitness'])
assert('pubkeys' in info)
elif typ == 'p2sh-segwit':
# P2SH-P2WSH-multisig
assert(info['isscript'])
assert_equal(info['script'], 'witness_v0_scripthash')
assert(not info['iswitness'])
assert(info['embedded']['isscript'])
assert_equal(info['embedded']['script'], 'multisig')
assert(info['embedded']['iswitness'])
assert_equal(info['embedded']['witness_version'], 0)
assert_equal(len(info['embedded']['witness_program']), 64)
assert('pubkeys' in info['embedded'])
elif typ == 'bech32':
# P2WSH-multisig
assert(info['isscript'])
assert_equal(info['script'], 'multisig')
assert(info['iswitness'])
assert_equal(info['witness_version'], 0)
assert_equal(len(info['witness_program']), 64)
assert('pubkeys' in info)
else:
# Unknown type
assert(False)
def test_change_output_type(self, node_sender, destinations, expected_type):
txid = self.nodes[node_sender].sendmany(fromaccount="", amounts=dict.fromkeys(destinations, 0.001))
raw_tx = self.nodes[node_sender].getrawtransaction(txid)
tx = self.nodes[node_sender].decoderawtransaction(raw_tx)
# Make sure the transaction has change:
assert_equal(len(tx["vout"]), len(destinations) + 1)
# Make sure the destinations are included, and remove them:
output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]]
change_addresses = [d for d in output_addresses if d not in destinations]
assert_equal(len(change_addresses), 1)
self.log.debug("Check if change address " + change_addresses[0] + " is " + expected_type)
self.test_address(node_sender, change_addresses[0], multisig=False, typ=expected_type)
def run_test(self):
# Mine 101 blocks on node5 to bring nodes out of IBD and make sure that
# no coinbases are maturing for the nodes-under-test during the test
self.nodes[5].generate(101)
sync_blocks(self.nodes)
uncompressed_1 = "0496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858ee"
uncompressed_2 = "047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77"
compressed_1 = "0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"
compressed_2 = "<KEY>"
# addmultisigaddress with at least 1 uncompressed key should return a legacy address.
for node in range(4):
self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, uncompressed_2])['address'], True, 'legacy')
self.test_address(node, self.nodes[node].addmultisigaddress(2, [compressed_1, uncompressed_2])['address'], True, 'legacy')
self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, compressed_2])['address'], True, 'legacy')
# addmultisigaddress with all compressed keys should return the appropriate address type (even when the keys are not ours).
self.test_address(0, self.nodes[0].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'legacy')
self.test_address(1, self.nodes[1].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit')
self.test_address(2, self.nodes[2].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit')
self.test_address(3, self.nodes[3].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'bech32')
for explicit_type, multisig, from_node in itertools.product([False, True], [False, True], range(4)):
address_type = None
if explicit_type and not multisig:
if from_node == 1:
address_type = 'bech32'
elif from_node == 0 or from_node == 3:
address_type = 'p2sh-segwit'
else:
address_type = 'legacy'
self.log.info("Sending from node {} ({}) with{} multisig using {}".format(from_node, self.extra_args[from_node], "" if multisig else "out", "default" if address_type is None else address_type))
old_balances = self.get_balances()
self.log.debug("Old balances are {}".format(old_balances))
to_send = (old_balances[from_node] / 101).quantize(Decimal("0.00000001"))
sends = {}
self.log.debug("Prepare sends")
for n, to_node in enumerate(range(from_node, from_node + 4)):
to_node %= 4
change = False
if not multisig:
if from_node == to_node:
# When sending non-multisig to self, use getrawchangeaddress
address = self.nodes[to_node].getrawchangeaddress(address_type=address_type)
change = True
else:
address = self.nodes[to_node].getnewaddress(address_type=address_type)
else:
addr1 = self.nodes[to_node].getnewaddress()
addr2 = self.nodes[to_node].getnewaddress()
address = self.nodes[to_node].addmultisigaddress(2, [addr1, addr2])['address']
# Do some sanity checking on the created address
if address_type is not None:
typ = address_type
elif to_node == 0:
typ = 'legacy'
elif to_node == 1 or (to_node == 2 and not change):
typ = 'p2sh-segwit'
else:
typ = 'bech32'
self.test_address(to_node, address, multisig, typ)
# Output entry
sends[address] = to_send * 10 * (1 + n)
self.log.debug("Sending: {}".format(sends))
self.nodes[from_node].sendmany("", sends)
sync_mempools(self.nodes)
unconf_balances = self.get_balances(False)
self.log.debug("Check unconfirmed balances: {}".format(unconf_balances))
assert_equal(unconf_balances[from_node], 0)
for n, to_node in enumerate(range(from_node + 1, from_node + 4)):
to_node %= 4
assert_equal(unconf_balances[to_node], to_send * 10 * (2 + n))
# node5 collects fee and block subsidy to keep accounting simple
self.nodes[5].generate(1)
sync_blocks(self.nodes)
new_balances = self.get_balances()
self.log.debug("Check new balances: {}".format(new_balances))
# We don't know what fee was set, so we can only check bounds on the balance of the sending node
assert_greater_than(new_balances[from_node], to_send * 10)
assert_greater_than(to_send * 11, new_balances[from_node])
for n, to_node in enumerate(range(from_node + 1, from_node + 4)):
to_node %= 4
assert_equal(new_balances[to_node], old_balances[to_node] + to_send * 10 * (2 + n))
# Get one p2sh/segwit address from node2 and two bech32 addresses from node3:
to_address_p2sh = self.nodes[2].getnewaddress()
to_address_bech32_1 = self.nodes[3].getnewaddress()
to_address_bech32_2 = self.nodes[3].getnewaddress()
# Fund node 4:
self.nodes[5].sendtoaddress(self.nodes[4].getnewaddress(), Decimal("1"))
self.nodes[5].generate(1)
sync_blocks(self.nodes)
assert_equal(self.nodes[4].getbalance(), 1)
self.log.info("Nodes with addresstype=legacy never use a P2WPKH change output")
self.test_change_output_type(0, [to_address_bech32_1], 'legacy')
self.log.info("Nodes with addresstype=p2sh-segwit only use a P2WPKH change output if any destination address is bech32:")
self.test_change_output_type(1, [to_address_p2sh], 'p2sh-segwit')
self.test_change_output_type(1, [to_address_bech32_1], 'bech32')
self.test_change_output_type(1, [to_address_p2sh, to_address_bech32_1], 'bech32')
self.test_change_output_type(1, [to_address_bech32_1, to_address_bech32_2], 'bech32')
self.log.info("Nodes with change_type=bech32 always use a P2WPKH change output:")
self.test_change_output_type(2, [to_address_bech32_1], 'bech32')
self.test_change_output_type(2, [to_address_p2sh], 'bech32')
self.log.info("Nodes with addresstype=bech32 always use a P2WPKH change output (unless changetype is set otherwise):")
self.test_change_output_type(3, [to_address_bech32_1], 'bech32')
self.test_change_output_type(3, [to_address_p2sh], 'bech32')
self.log.info('getrawchangeaddress defaults to addresstype if -changetype is not set and argument is absent')
self.test_address(3, self.nodes[3].getrawchangeaddress(), multisig=False, typ='bech32')
self.log.info('getrawchangeaddress fails with invalid changetype argument')
assert_raises_rpc_error(-5, "Unknown address type 'bech23'", self.nodes[3].getrawchangeaddress, 'bech23')
self.log.info("Nodes with changetype=p2sh-segwit never use a P2WPKH change output")
self.test_change_output_type(4, [to_address_bech32_1], 'p2sh-segwit')
self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit')
self.log.info("Except for getrawchangeaddress if specified:")
self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit')
self.test_address(4, self.nodes[4].getrawchangeaddress('bech32'), multisig=False, typ='bech32')
if __name__ == '__main__':
AddressTypeTest().main()
| StarcoderdataPython |
156706 |
import tensorflow as tf
from capsule.norm_layer import Norm
layers = tf.keras.layers
models = tf.keras.models
import matplotlib.pyplot as plt
class ReconstructionNetwork(tf.keras.Model):
def __init__(self, in_capsules, in_dim, name="", out_dim=28, img_dim=1):
super(ReconstructionNetwork, self).__init__(name=name)
self.in_capsules = in_capsules
self.in_dim = in_dim
self.out_dim = out_dim
self.y = None
self.flatten = layers.Flatten()
self.fc1 = layers.Dense(512, name="fc1", activation=tf.nn.relu)
self.fc2 = layers.Dense(1024, name="fc2", activation=tf.nn.relu)
self.fc3 = layers.Dense(out_dim * out_dim * img_dim, name="fc3", activation=tf.sigmoid)
def call(self, x, y):
x = self.flatten(x)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x | StarcoderdataPython |
3379872 | from .fc import FC as FullyConnectedClassifier
from .rnn import RNNClassifier
from .bert_hf import BERT
__all__ = ["FullyConnectedClassifier", "RNNClassifier", "BERT"]
| StarcoderdataPython |
161110 | import os
import re
import logging
import pandas as pd
from googleapiclient.discovery import build
import yaml
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
# load personal websites
with open("_data/websites.yml", "r") as f:
WEBSITES = yaml.load(f, Loader=yaml.BaseLoader)
def member_url(member):
name, *rest = member.split(" (")
name = "".join(name)
try:
url = WEBSITES[name]
except KeyError:
return member
rest = f' ({"".join(rest)}' if rest else ""
return f'<a href="{url}">{name}</a>{rest}'
# fetch Google Sheet for members data
GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
COMMITTEES_SPREADSHEET_ID = os.environ["COMMITTEES_SPREADSHEET_ID"]
service = build("sheets", "v4", developerKey=GOOGLE_API_KEY)
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=COMMITTEES_SPREADSHEET_ID,
range='Sheet1').execute()
values = result.get('values', [])
# to dataframe
columns = []
for col in values[0]:
*name, time = col.split()
columns.append((" ".join(name), time.capitalize()))
n_cols = len(columns)
columns = pd.MultiIndex.from_tuples(columns, names=["Committee", "Time"])
data = []
for row in values[1:]:
n = len(row)
row = [x if x else None for x in row]
padded = row + [None for _ in range(n_cols - n)]
data.append(padded)
df = pd.DataFrame(data, columns=columns)
# write yaml
content = {}
for committee in df.columns[1:].droplevel(1).drop_duplicates():
content[committee] = {}
for time in df[committee].columns:
col = (committee, time)
members = df[col].dropna().to_list()
if members:
content[committee][time] = [member_url(m) for m in members]
if not content[committee]:
content.pop(committee)
with open("_data/committees.yml", "w") as f:
for committee, items in content.items():
f.write(f"- committee: {committee}\n")
f.write(f" listing:\n")
for time, members in items.items():
f.write(f" - time: {time}\n")
f.write(f" members: {members}\n") | StarcoderdataPython |
151071 | from rest_framework.test import APITestCase
# Create your tests here.
class BaseTestCase(APITestCase):
pass
| StarcoderdataPython |
46697 | """
Django settings for webDe project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from django.urls import reverse_lazy
try:
from .LocalSetting import *
DEBUG = True
ALLOWED_HOSTS = ['*']
except:
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =')z(66-zt@g3y_=mh(n(xs8!es%yi0f7aczob9m&m)xikyrx#*6'
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
#ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.accounts',
'apps.entradas',
#'social_django',
'taggit',
'sslserver',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webDe.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'webDe.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticRoot')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL ='/media/'
#Social Media Login
AUTHENTICATION_BACKENDS = (
# 'social_core.backends.amazon.AmazonOAuth2',
# # 'social_core.backends.bitbucket.BitbucketOAuth',
# 'social_core.backends.facebook.FacebookAppOAuth2',
# 'social_core.backends.facebook.FacebookOAuth2',
# 'social_core.backends.github.GithubOAuth2',
# 'social_core.backends.gitlab.GitLabOAuth2',
# 'social_core.backends.google.GoogleOAuth',
# 'social_core.backends.google.GoogleOAuth2',
# 'social_core.backends.google.GoogleOpenId',
# 'social_core.backends.google.GooglePlusAuth',
# 'social_core.backends.google_openidconnect.GoogleOpenIdConnect',
# 'social_core.backends.instagram.InstagramOAuth2',
# 'social_core.backends.linkedin.LinkedinOAuth',
# 'social_core.backends.linkedin.LinkedinOAuth2',
# 'social_core.backends.spotify.SpotifyOAuth2',
# 'social_core.backends.trello.TrelloOAuth',
# 'social_core.backends.tumblr.TumblrOAuth',
# 'social_core.backends.twitter.TwitterOAuth',
# 'social_core.backends.yahoo.YahooOAuth',
# 'social_core.backends.yahoo.YahooOpenId',
'django.contrib.auth.backends.ModelBackend',
)
#Para esto hay que tenerlo con https la redireccion
#SOCIAL_AUTH_FACEBOOK_KEY = FACEBOOK_KEY
#SOCIAL_AUTH_FACEBOOK_SECRET = FACEBOOK_SECRET
#para traer el email
#SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
#SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
# 'fields': 'id,name,email',
#}
#SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = GOOGLE_KEY
#SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = GOOGLE_SECRET
#SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
#SOCIAL_AUTH_PIPELINE = (
#revisar para implementar que nose repitan los email
# 'utilsSocialPipe.check_email_exists',
# 'social_core.pipeline.social_auth.social_details',
# 'social_core.pipeline.social_auth.social_uid',
# 'social_core.pipeline.social_auth.auth_allowed',
# 'social_core.pipeline.social_auth.social_user',
# 'social_core.pipeline.user.get_username',
# 'social_core.pipeline.mail.mail_validation',
# 'social_core.pipeline.user.create_user',
# 'social_core.pipeline.social_auth.associate_user',
# 'social_core.pipeline.debug.debug',
# 'social_core.pipeline.social_auth.load_extra_data',
# 'social_core.pipeline.user.user_details',
# 'social_core.pipeline.debug.debug'
#)
#Login opcions
#LOGIN_URL = '/login/'
LOGOUT_REDIRECT_URL = reverse_lazy('accounts:login')
LOGIN_REDIRECT_URL = reverse_lazy('entradas:index')
#SSL
SECURE_SSL_REDIRECT = True
CSRF_COOKIE_SECURE = True
#config file
FILE_UPLOAD_PERMISSIONS = 0o644
| StarcoderdataPython |
57220 | # copied from https://github.com/probml/pyprobml/blob/master/scripts/sgmcmc_nuts_demo.py
# Compare NUTS, SGLD and Adam on sampling from a multivariate Gaussian
from collections import namedtuple
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union
import jax.numpy as jnp
import optax
from blackjax import nuts, stan_warmup
from jax import jit, random, vmap
from jax.lax import scan
from jax.random import normal, split
from sgmcmcjax.optimizer import build_optax_optimizer
from sgmcmcjax.samplers import build_sgld_sampler
from .sgmcmc_utils import build_nuts_sampler
# We use the 'quickstart' example from
# https://github.com/jeremiecoullon/SGMCMCJax
def loglikelihood(theta, x):
return -0.5 * jnp.dot(x - theta, x - theta)
def logprior(theta):
return -0.5 * jnp.dot(theta, theta) * 0.01
# generate dataset
N, D = 1000, 100
key = random.PRNGKey(0)
mu_true = random.normal(key, (D,))
X_data = random.normal(key, shape=(N, D)) + mu_true
# Adam
batch_size = int(0.1 * N)
opt = optax.adam(learning_rate=1e-2)
optimizer = build_optax_optimizer(opt, loglikelihood, logprior, (X_data,), batch_size)
Nsamples = 10_000
params, log_post_list = optimizer(key, Nsamples, jnp.zeros(D))
print(log_post_list.shape)
print(params.shape)
assert jnp.allclose(params, mu_true, atol=1e-1)
print("adam test passed")
# SGLD
batch_size = int(0.1 * N)
dt = 1e-5
sampler = build_sgld_sampler(dt, loglikelihood, logprior, (X_data,), batch_size)
Nsamples = 10_000
samples = sampler(key, Nsamples, jnp.zeros(D))
print(samples.shape)
mu_est = jnp.mean(samples, axis=0)
assert jnp.allclose(mu_est, mu_true, atol=1e-1)
print("sgld test passed")
# NUTS / blackjax
num_warmup = 500
sampler = build_nuts_sampler(num_warmup, loglikelihood, logprior, (X_data,))
Nsamples = 10_000
samples = sampler(key, Nsamples, jnp.zeros(D))
print(samples.shape)
mu_est = jnp.mean(samples, axis=0)
assert jnp.allclose(mu_est, mu_true, atol=1e-1)
print("nuts test passed")
| StarcoderdataPython |
132326 | <gh_stars>0
import cv2
import pandas as pd
import numpy as np
import argparse
#Creating argument parser to take image path from command line
ap = argparse.ArgumentParser()
ap.add_argument('-i','--image', required=True,help='Image Path')
args = vars(ap.parse_args())
img_path = args['image']
#Reading image with openCV
img = cv2.imread(img_path)
#declaring global variables (are used later on)
clicked = False
r = g = b = xpos = ypos = 0
#Reading CSV file with pandas and giving them to each colum
index = ["color","color_name","hex","R","G","B"]
csv = pd.read_csv('colors.csv',names=index,header=None)
#creating a draw draw_function
#It will create rgb values when we click
def draw_function(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
global b,g,r,xpos,ypos,clicked
clicked = True
xpos = x
ypos = y
b,g,r = img[y,x]
b = int(b)
g = int(g)
r = int(r)
#Calculating distance to get the color name
#distance formula is
# d = abs(Red – ithRedColor) + (Green – ithGreenColor) + (Blue – ithBlueColor)
def getColourName(R,G,B):
minimum=10000
for i in range (len(csv)):
d = abs(R- int(csv.loc[i,"R"])) + abs(G- int(csv.loc[i,"G"])) + abs(B- int(csv.loc[i,"B"]))
if(d<=minimum):
minimun = d
cname = csv.loc[i,"color_name"]
return cname
#setting a mouse callback on a window
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_function)
#Displaying the images in the Window
while(1):
cv2.imshow("image",img)
if(clicked):
#cv2.rectangle(image, startpoint, endpoint, color, thickness) -1 thickness fills rectangle entirely
cv2.rectangle(img,(20,20),(750,60),(b,g,r),-1)
#Creating text string to display ( Color name and RGB values )
text = getColourName(r,g,b) + 'R='+ str(r) + 'G='+ str(g) + 'B='+ str(b)
#cv2.putText(img,text,start,font(0-7), fontScale, color, thickness, lineType, (optional bottomLeft bool) )
cv2.putText(img,text,(50,50),2,0.8,(255,255,255),2,cv2.LINE_AA)
#For very light colors we will display text in black color
if(r+g+b>=600):
cv2.putText(img,text,(50,50),2,0.8,(0,0,0),2,cv2.LINE_AA)
clicked = False
#Breaking the loop when user hits 'ESC' key
if cv2.waitKey(20) & 0xFF ==27:
break
cv2.destroyAllWindows()
| StarcoderdataPython |
183244 | """Utils
"""
import json
__all__ = ['MyException', 'nedb_parser', 'python_data_to_lua_table']
LUA_INDENT = ' ' * 4
class MyException(Exception):
"""My Exception
I don't want to check the return value of every function.
Raise an exception, just easier to programming.
"""
def __init__(self, msg):
super().__init__(msg)
self.message = msg
def __str__(self):
return self.message
def nedb_parser(nedb):
"""nedb_parser
"""
result = {}
line_num = 1
print('Get raw data from {}'.format(nedb))
with open(nedb, 'r', encoding='utf-8') as nedb_f:
for line in nedb_f:
python_object = json.loads(line)
if not isinstance(python_object, dict):
raise MyException(
'Not a python dict, line number {}'.format(line_num))
line_num += 1
item_id = python_object['id']
result[item_id] = python_object
print('Loaded {} datas from {}'.format(len(result), nedb))
return result
def python_data_to_lua_table(data, level=0, indent=LUA_INDENT):
"""Generate Lua string via python structure
only dict, list, string, number, bool are allowed
:param data: data to parse
:param level: indent level
:param indent: indent characters
:return: tuple (lua string in this level and
whether it's a list only contains number and string
"""
lines = []
if isinstance(data, list):
all_elements_is_pure_data = True
for i in data:
if isinstance(i, dict):
line, _ = python_data_to_lua_table(i, level+1, indent)
lines.append(level*indent + '{\n'
+ line + '\n' + level*indent + '}')
all_elements_is_pure_data = False
elif isinstance(i, list):
line, pure_data_in_next_level = \
python_data_to_lua_table(i, level+1, indent)
if pure_data_in_next_level:
lines.append(level*indent + '{' + line + '}')
else:
lines.append(level*indent + '{\n'
+ line + '\n' + level*indent + '}')
all_elements_is_pure_data = False
elif isinstance(i, bool):
# this must before int case
lines.append(level*indent + '{}'.format(
'true' if i else 'false'))
elif isinstance(i, int):
lines.append(level*indent + str(i))
elif isinstance(i, str):
lines.append(level*indent + '"{}"'.format(i))
else:
raise MyException('Unsupported data\n' + str(i) + '\n'
+ 'with type:' + type(i))
if all_elements_is_pure_data:
# All elements in list is pure data, not list or dict
return ', '.join([i.strip() for i in lines]), True
return ',\n'.join(lines), False
if isinstance(data, dict):
for i in data:
if isinstance(data[i], dict):
line, _ = python_data_to_lua_table(data[i], level+1, indent)
lines.append(level*indent + '["{}"] = {{\n'.format(i)
+ line + '\n' + level*indent + '}')
elif isinstance(data[i], list):
line, pure_data_in_next_level = \
python_data_to_lua_table(data[i], level+1, indent)
if pure_data_in_next_level:
lines.append(level*indent + '["{}"] = {{'.format(i)
+ line + '}')
else:
lines.append(level*indent + '["{}"] = {{\n'.format(i)
+ line + '\n' + level*indent + '}')
elif isinstance(data[i], bool):
# this must before int case
lines.append(level*indent + '["{}"] = {}'.format(i,
'true' if data[i] else 'false'))
elif isinstance(data[i], int):
lines.append(level*indent + '["{}"] = {}'.format(i, data[i]))
elif isinstance(data[i], str):
lines.append(level*indent + '["{}"] = "{}"'.format(i, data[i]))
else:
raise MyException('Unsupported data\n' + str(data[i]) + '\n'
+ 'with type:' + str(type(data[i])))
return ',\n'.join(lines), False
else:
raise MyException('Unsupported data\n' + str(data) + '\n'
+ 'with type:' + type(data))
| StarcoderdataPython |
96918 | <gh_stars>100-1000
from __future__ import print_function
import re
import os
import sys
import argparse
import collections
import subprocess
import json
from poline.utilfuncs import *
from poline.fields import Fields
from itertools import islice
from operator import itemgetter, attrgetter
if sys.version_info >= (3,0):
from urllib.parse import urlparse
else:
from urlparse import urlparse
from pprint import pprint, pformat
if sys.version_info >= (3,5):
_collections_Generator = collections.Generator
else:
from poline import _com_collections
_collections_Generator = _com_collections.Generator
T = True
F = False
def _len(value):
if isinstance(value, _collections_Generator):
return sum(1 for x in value)
else:
return len(value)
def _stdin(args):
for line in sys.stdin:
if args.separator is not None:
yield Fields(line.strip().split(args.separator))
elif args.split:
yield Fields(line.strip().split())
else:
yield line.strip()
sys.stdin.close()
# Hello old friends
_shell_commands= ['cp', 'df', 'docker', 'du', 'find', 'grep', 'git', 'history',
'ln', 'ls', 'lsof', 'mv', 'netstat', 'nmcli', 'ps', 'rm',
'stat', 'whois']
for _shell_command in _shell_commands:
exec ("""{funcname} = lambda *args, **kwargs: sh(['{funcname}']+list(args), **kwargs)""".format(funcname=_shell_command))
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('expression', nargs='+', help="python expression")
parser.add_argument('-F', '--separator', default=None, help="split each line by SEPARATOR")
parser.add_argument('-s', '--split', const=True, default=False, action='store_const', help="split each line")
parser.add_argument('-q', '--quiet', const=True, default=False, action='store_const',
help="don't implicitly print results")
if argv is not None:
args = parser.parse_args(argv)
else:
args = parser.parse_args()
result = _stdin(args)
for expression in args.expression:
separator = None
new_result = []
if expression.startswith('|') or expression.startswith('%'):
if expression.startswith('%'):
expression = expression[1:]
exp_parts = expression.split('%')
separator = exp_parts[0]
expression = '%'.join(exp_parts[1:])
else:
expression = expression[1:]
for result_line in result:
if separator:
result_parts = Fields(result_line.split(separator))
else:
result_parts = Fields(result_line.split())
invars = {
'_': result,
'__': result_parts,
'__str': result_line,
'len': _len,
}
for result_pard_idx in range(len(result_parts)+10):
invars['_{}'.format(result_pard_idx)] = result_parts[result_pard_idx]
new_result += [eval('(%s)' % expression, globals(), invars)]
result = new_result
elif expression.startswith(':'):
invars = {
'_': result,
'len': _len,
}
expression = expression[1:]
exp_parts = expression.split(':')
tuples = exp_parts[0]
expression = '{} {}'.format(':'.join(exp_parts[1:]), 'for ({}) in _'.format(tuples))
result = eval('(%s)' % expression, globals(), invars)
else:
invars = {
'_': result,
'len': _len,
}
result = eval('(%s)' % expression, globals(), invars)
#argv is not None when we're calling this from a unit test
if argv is not None:
return result
if not args.quiet:
if isinstance(result, (list, _collections_Generator)):
for line in result:
if isinstance(line, (list, tuple)):
print(*line)
else:
print(line)
else:
print(result)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3238148 | import pandas as pd
import json
import numpy as np
import yfinance as yf
import datetime
import time
BLACKLIST = ['IBTB','FDM']
if input('Override Close Wait?') == 'n':
print("Waiting for market close...")
while True:
now = datetime.datetime.now()
current_time = int(now.strftime("%H%M"))
print(current_time)
if current_time >= 1300 and current_time <= 1500:
print('Start time: {}'.format(current_time))
break
time.sleep(60)
print("Market has closed. Beginning program...")
f = open('/Users/oceanhawk/Documents/Python/Stock-Trading-Bots/Version-1/json/stocks.json')
data = json.load(f)
lst = []
for obj in data:
lst.append([obj['Ticker'],
obj['Strength'],
obj['Trend'][0],
obj['Open Close Average']])
for obj in lst:
ticker = yf.Ticker(obj[0])
df = ticker.history(period='1d', interval = '1d')
Open = df['Open'][0]
Close = df['Close'][0]
pDiff = (Close/Open)-1
obj.append(pDiff)
print("Percent difference: {}, Ticker: {}".format(pDiff,obj[0]))
_columns = ['Ticker', 'Strength','Trend', 'OC_Average', 'Actual_Increase']
df = pd.DataFrame(data=lst,columns = _columns)
file = "/Users/oceanhawk/Documents/Python/Stock-Trading-Bots/Version-1/DataGathering/Open_Close/Trainers/data.csv"
p_df = pd.read_csv(file)
frames = pd.concat([df,p_df], ignore_index = True)
#if input("Update file? (y/n)") == 'n':
#quit()
frames.to_csv(file, mode='w',sep=';')
| StarcoderdataPython |
3269368 | import unittest
import pyperclip
from accounts import User
from accounts import Credentials
class TestAccounts(unittest.TestCase):
"""
Test class that define test cases for the user class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def setUp(self):
"""
set up to run before each test cases.
"""
self.new_user = User("Nevill","Oronni","<EMAIL>","speeds01")
def tearDown(self):
"""
method that cleans up after each test has run.
"""
User.user_list = []
def test_init(self):
"""
test_init test case to test if the object is initialized properly
"""
self.assertEqual(self.new_user.first_name,"Nevill")
self.assertEqual(self.new_user.last_name,"Oronni")
self.assertEqual(self.new_user.email,"<EMAIL>")
self.assertEqual(self.new_user.password,"<PASSWORD>")
def test_save_user(self):
"""
test_save_account test case to test if the new account object is saved into the contact list
"""
self.new_user.save_user()
self.assertEqual(len(User.user_list),1)
def test_save_user_many_times(self):
"""
test_save_account_many_times sees if we can save many contacts to our list
"""
self.new_user.save_user()
test_user = User("Steven","Gerrard","<EMAIL>","gerrard01")
test_user.save_user()
self.assertEqual(len(User.user_list),2)
def test_delete_user(self):
"""
method tests whether we can delete a user account form our user list
"""
self.new_user.save_user()
test_user = User("Steven","Gerrard","<EMAIL>ven<EMAIL>","gerrard01")
test_user.save_user()
self.new_user.delete_user()
self.assertEqual(len(User.user_list),1)
def test_get_user(self):
"""
test to check if you can find a user by their email and password
"""
self.new_user.save_user()
test_user = User("Steven","Gerrard","<EMAIL>","gerrard01")
test_user.save_user()
login_user = User.find_by("<EMAIL>","gerrard01")
self.assertEqual(login_user.email,test_user.email)
def test_user_exists(self):
"""
test to see if a user account exists
"""
self.new_user.save_user()
test_user = User("Steven","Gerrard","<EMAIL>","gerrard01")
test_user.save_user()
user_exists = User.user_exist("<EMAIL>","gerrard01")
self.assertTrue(user_exists)
def test_list_all_users(self):
"""
retuns a list of all the users saved in the app
"""
self.assertEqual(User.list_users(),User.user_list)
#subclass inherits methods and variables from parent class
class TestCredentials(unittest.TestCase):
"""
Test case defines test cases for the credentials class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def setUp(self):
"""
set up method to run before each test cases.
"""
self.new_credential = Credentials("Facebook","social media account","space\11")
def tearDown(self):
"""
clean/refreshes after each test case runs
"""
Credentials.credential_list = []
def test_init(self):
"""
this tests if the object is initialized properly
"""
self.assertEqual(self.new_credential.account_name,"Facebook")
self.assertEqual(self.new_credential.description,"social media account")
self.assertEqual(self.new_credential.password,"<PASSWORD>")
def test_save_credential(self):
"""
this test method test to see whether we can save a new credential account
"""
self.new_credential.save_credential() #save the credential
self.assertEqual(len(Credentials.credential_list),1)
def test_save_many_credentials(self):
"""
test method to check if we can save multiple credentials
"""
self.new_credential.save_credential()
test_credential = Credentials("Twitter","instant messenger","2222")
test_credential.save_credential()
self.assertEqual(len(Credentials.credential_list),2)
def test_delete_credential(self):
"""
method that deletes user credentials account from a list
"""
self.new_credential.save_credential()
test_credential = Credentials("Twitter","instant messenger","2222")
test_credential.save_credential()
self.new_credential.delete_credential() #deletes a credenial
self.assertEqual(len(Credentials.credential_list),1)
def test_find_credential(self):
"""
method to test whether we can find the specific credential in our credential list
"""
self.new_credential.save_credential()
test_credential = Credentials("Twitter","instant messenger","2222")
test_credential.save_credential()
find_credential = Credentials.find_by_name("Twitter")
self.assertEqual(find_credential.description,test_credential.description)
def test_credential_exists(self):
"""
test to see if a credential is in the credentials list
"""
self.new_credential.save_credential()
test_credential = Credentials("Twitter","instant messenger","2222")
test_credential.save_credential()
credential_exist = Credentials.credential_check("Twitter")
self.assertTrue(credential_exist)
def test_show_credentials(self):
"""
method will show a list of the credentials
"""
self.assertEqual(Credentials.show_credentials(),Credentials.credential_list)
#def test_copy_password(self):
#"""
#test to see if I can copy my credentials to the clipboard
#"""
#self.new_credential.save_credential()
#Credentials.copy_password("<PASSWORD>")
#self.assertEqual(self.new_credential.password,pyperclip.paste())
def test_generate_random_password(self):
'''
test to see if method can auto generate passwords
'''
generate_random_password = <PASSWORD>.new_credential.generate_random_password()
self.assertEqual(len(generate_random_password),8)
#if statement if this file is run then unittest should gather all of the test modules and execute them.
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
3270435 | <gh_stars>1-10
import logging
import re
import time
from collections import deque
from datetime import datetime
from operator import itemgetter
import pyrclib
from pyrclib.channels import Channel
from pyrclib.connection import IRCConnection
from pyrclib.events import EventDispatcher
from pyrclib.user import User
class IRCBot(IRCConnection):
def __init__(self, nick, user, realname):
logging.basicConfig(
format='%(asctime)s [%(levelname)8s] %(name)12s - %(message)s',
level=logging.INFO,
)
IRCConnection.__init__(self, nick, user, realname)
self.version = pyrclib.__version__
self.delay = 1000
self.dispatcher = EventDispatcher(self)
self.protocol = {}
# Used to queue /WHO requests
self.pending_who = deque()
# User and channel lists
self.channels = {}
self.users = {}
self.reply_clientinfo = 'CLIENTINFO FINGER PING SOURCE TIME ' \
'USERINFO VERSION'
self.reply_finger = 'Don\'t finger me, pervert!'
self.reply_source = 'https://github.com/martinsileno/pyrclib'
self.reply_userinfo = ''
self.reply_version = 'pyrclib v%s' % self.version
def line_received(self, line):
"""Called on every line received from the server.
This method must not be overridden.
"""
if line.startswith('PING '):
self.on_serverping()
return
self.dispatcher.dispatch(line)
# ==========================================================================
# Raw events
# Raw numerics documentation from: http://www.mirc.net/raws/
# ==========================================================================
def raw_005(self, *params):
"""Parse RPL_ISUPPORT (numeric 005) to understand this IRCd's protocol
implementation. Otherwise, our client would fail to interpret server
replies or modes changes.
Referenced document: http://www.irc.org/tech_docs/005.html
"""
for par in params[:-1]:
param, sep, value = par.partition('=')
if param == 'PREFIX':
# A list of channel modes a person can get and the respective
# prefix a channel or nickname will get in case the person
# has it.
# The order of the modes goes from most powerful to least
# powerful.
# Those prefixes are shown in the output of the WHOIS, WHO and
# NAMES command.
regex = re.compile('\((\w+)\)(.*)')
r = regex.search(value)
modes, symbols = r.groups()
zipped = zip(modes, symbols)
self.protocol['prefixes'] = list(zipped)
elif param == 'CHANTYPES':
# The supported channel prefixes.
self.protocol['chantypes'] = value
elif param == 'CHANMODES':
# This is a list of channel modes according to 4 types.
# modes_target
# Mode that adds or removes a nick or address to a list.
# Always has a parameter.
# modes_param
# Mode that changes a setting and always has a parameter.
# modes_setparam
# Mode that changes a setting and only has a parameter
# when set.
# modes_noparam
# Mode that changes a setting and never has a parameter.
(self.protocol['modes_target'], self.protocol['modes_param'],
self.protocol['modes_setparam'],
self.protocol['modes_noparam']) = value.split(',')
elif param == 'MODES':
# Maximum number of channel modes with parameter allowed per
# MODE command.
self.protocol['maxmodes'] = int(value)
elif param == 'NICKLEN':
# Maximum nickname length.
self.protocol['maxnicklength'] = int(value)
elif param == 'NETWORK':
# The IRC network name.
self.network = value
elif param == 'EXCEPTS':
# The server support ban exceptions (e mode).
# See RFC 2811 for more information.
self.protocol['supports_excepts'] = True
elif param == 'INVEX':
# The server support invite exceptions (+I mode).
# See RFC 2811 for more information.
self.protocol['supports_invex'] = True
elif param == 'WALLCHOPS':
# The server supports messaging channel operators
self.protocol['wallchops'] = True
elif param == 'WALLVOICES':
# Notice to +#channel goes to all voiced persons.
self.protocol['wallvoices'] = True
elif param == 'STATUSMSG':
# The server supports messaging channel member
# who have a certain status or higher.
# The status is one of the letters from PREFIX.
self.protocol['statusmsg'] = value
elif param == 'CASEMAPPING':
# Case mapping used for nick- and channel name comparing.
self.protocol['casemapping'] = value
elif param == 'ELIST':
# The server supports extentions for the LIST command.
# The tokens specify which extention are supported.
self.protocol['elist'] = value
elif param == 'TOPICLEN':
# Maximum topic length.
self.protocol['topiclen'] = int(value)
elif param == 'KICKLEN':
# Maximum kick comment length.
self.protocol['kicklen'] = int(value)
elif param == 'CHANNELLEN':
# Maximum channel name length.
self.protocol['channellen'] = int(value)
elif param == 'SILENCE':
# The server support the SILENCE command.
# The number is the maximum number of allowed entries in the
# list.
self.protocol['max_silencelist'] = int(value)
elif param == 'RFC2812':
# Server supports RFC 2812 features.
self.protocol['rfc2812'] = True
elif param == 'PENALTY':
# Server gives extra penalty to some commands instead of the
# normal
# 2 seconds per message and 1 second for every 120 bytes in a
# message.
self.protocol['penalty'] = True
elif param == 'FNC':
# Forced nick changes: The server may change the nickname
# without the client sending a NICK message.
self.protocol['fnc'] = True
elif param == 'SAFELIST':
# The LIST is sent in multiple iterations so send queue won't
# fill and kill the client connection.
self.protocol['safelist'] = True
elif param == 'AWAYLEN':
# The max length of an away message.
self.protocol['awaylen'] = int(value)
elif param == 'USERIP':
# The USERIP command exists.
self.protocol['userip'] = True
elif param == 'CPRIVMSG':
# The CPRIVMSG command exists, used for mass messaging people in
# specified channel (CPRIVMSG channel nick,nick2,... :text)
self.protocol['cprivmsg'] = True
elif param == 'CNOTICE':
# The CNOTICE command exists, just like CPRIVMSG.
self.protocol['cnotice'] = True
elif param == 'MAXNICKLEN':
# Maximum length of nicks the server will send to the client?
self.protocol['maxnicklen'] = int(value)
elif param == 'MAXTARGETS':
# Maximum targets allowed for PRIVMSG and NOTICE commands.
self.protocol['maxtargets'] = int(value)
elif param == 'KNOCK':
# The KNOCK command exists.
self.protocol['knock'] = True
elif param == 'WHOX':
# The WHO command uses WHOX protocol.
self.protocol['whox'] = True
elif param == 'CALLERID':
# The server supports server side ignores via the +g user mode.
self.protocol['callerid'] = True
def raw_315(self, channel, endofwho):
"""This is sent at the end of a WHO request.
"""
self.pending_who.popleft() # Oldest /WHO done, remove it
if len(self.pending_who) > 0:
# Other /WHO request(s) waiting, send the oldest one
self.sender.raw_line('WHO {0}'.format(self.pending_who[0]))
def raw_324(self, channel, modes, args=None):
"""This is returned for a MODE request.
"""
# TEMPORARY: we are currently ignoring modes parameters!
self.channels[channel].modes = modes[1:]
def raw_329(self, channel, time):
"""This is returned as part of a MODE request,
giving you the time the channel was created.
"""
self.channels[channel].creationdate = datetime.fromtimestamp(
float(time))
def raw_331(self, channel, msg):
"""This is returned for a TOPIC request if the channel has no current topic.
On JOIN, if a channel has no topic, this is not returned.
Instead, no topic-related replies are returned.
"""
self.channels[channel].topic.reset()
def raw_332(self, channel, topic):
"""This is returned for a TOPIC request or when you JOIN,
if the channel has a topic.
"""
self.channels[channel].topic.text = topic
def raw_333(self, channel, nick, time):
"""This is returned for a TOPIC request or when you JOIN,
if the channel has a topic.
"""
if '!' and '@' in nick:
# sometimes it's just a nick and not a full nick!user@host
self.channels[channel].topic.set_by = User.from_mask(nick)
self.channels[channel].topic.date = datetime.fromtimestamp(float(time))
def raw_352(self, chan, ident, host, server, nick, status, hopsname):
"""This is returned by a WHO request, one line for each user that is
matched.
"""
hops, realname = hopsname.split(' ', 1)
self.users[nick].ident = ident
self.users[nick].host = host
self.users[nick].realname = realname
# TODO: parse flags:
# - away status
# - ircop status
def raw_353(self, bla, channel, names):
"""This is returned for a NAMES request for a channel, or when you
initially join a channel.
It contains a list of every user on the channel.
"""
prefixes = list(map(itemgetter(1), self.protocol['prefixes']))
for name in names.split(' '):
if name[0] not in prefixes:
mode = ''
else:
mode = name[0]
st = 1
for c in name[1:]:
if c in prefixes:
mode += c
st += 1
else:
break
name = name[st:]
self.users[name] = User(name)
self.channels[channel].users[name] = mode
def raw_366(self, channel, msg):
"""This is returned at the end of a NAMES list, after all
visible names are returned.
"""
pass
def raw_unknown(self, numeric, *args):
pass
# ==========================================================================
# Private events
#
# ==========================================================================
def _pre_join(self, user, channel):
"""Adds the user to the channel user list.
If we are joining a channel, add this channel to ours, send a MODE
request to get the channel modes and a WHO request to get ident/host
of unknown users.
"""
self.users[user.nick] = user
if user.nick == self.nick:
self.channels[channel] = Channel(channel)
self.sender.raw_line('MODE {0}'.format(channel))
self.request_who(channel)
self.channels[channel].users[user.nick] = ''
self.on_join(user, channel)
def _pre_part(self, user, channel, reason=None):
"""Removes the user from the channel user list.
"""
if user.nick == self.nick:
del self.channels[channel]
else:
del self.channels[channel].users[user.nick]
# remove him from global user list only if we don't share any chan.
if self.get_comchans(user.nick) == []:
del self.users[user.nick]
self.on_part(user, channel, reason)
def _pre_nick(self, user, newnick):
"""Changes a user's nick.
"""
oldnick = user.nick
if oldnick == self.nick:
self.nick = newnick
for chan in self.get_comchans(user.nick):
chan.renameuser(oldnick, newnick)
del self.users[oldnick]
user.nick = newnick
self.users[newnick] = user
self.on_nickchange(oldnick, newnick)
def _pre_kick(self, sender, channel, nick, reason=None):
"""Removes a user from a channel's user list when he gets kicked.
"""
del self.channels[channel].users[nick]
if self.get_comchans(nick) == []:
del self.users[nick]
self.on_kick(sender, nick, channel, reason)
def _pre_kill(self, killer, victim, message):
"""We were killed by someone, disconnect.
"""
if victim == self.nick: # is this really needed?
self.receiver.disconnect()
self.on_kill(killer, victim, message)
def _pre_quit(self, user, reason=None):
"""Removes a user from list when he quits.
"""
nick = user.nick
del self.users[nick]
for chan in self.get_comchans(user.nick):
del chan.users[nick]
self.on_quit(user, reason)
def _pre_topic(self, sender, channel, newtopic):
"""Topic tracking.
"""
self.channels[channel].topic.text = newtopic
self.channels[channel].topic.set_by = sender
self.channels[channel].topic.date = datetime.now()
self.on_topicchange(sender, channel, newtopic)
def _set_prefix(self, channel, m, target):
"""Adds a prefix (like op, voice etc.) to a user in a channel.
"""
s = dict(self.protocol['prefixes'])[m]
self.channels[channel].users[target] += s
def _unset_prefix(self, channel, m, target):
"""Removes a prefix (like op, voice etc.) from a user in a channel.
"""
s = dict(self.protocol['prefixes'])[m]
current = self.channels[channel].users[target]
self.channels[channel].users[target] = current.replace(s, '')
def _pre_set_mode(self, user, channel, mode, target=None):
"""Syncs channels modes with mode changes.
"""
if mode not in map(itemgetter(0), self.protocol['prefixes']):
self.channels[channel].modes += mode
self.on_set_mode(user, channel, mode, target)
def _pre_unset_mode(self, user, channel, mode, target=None):
"""Syncs channels modes with mode changes.
"""
if mode not in map(itemgetter(0), self.protocol['prefixes']):
current = self.channels[channel].modes
self.channels[channel].modes = current.replace(mode, '')
self.on_unset_mode(user, channel, mode, target)
# ==========================================================================
# Public events
#
# ==========================================================================
def on_disconnect(self):
"""Called on disconnection from a server, can be overridden as required.
"""
pass
def on_serverping(self):
"""Called on a PING request from the IRC server.
Shouldn't be overridden...
"""
self.sender.raw_line('PONG ' + self.nick)
def on_privmsg(self, sender, channel, message):
"""Called when a message is received.
DON'T USE, USE on_channel_message or on_private_message
"""
pass
def on_channel_message(self, sender, channel, message):
"""Called when a channel message is received.
"""
pass
def on_private_message(self, sender, message):
"""Called when a private message is received.
"""
pass
def on_action(self, sender, channel, message):
"""Called when an action is received (/me does something).
"""
pass
def on_join(self, user, channel):
"""Called when someone (our bot included) joins a channel.
"""
pass
def on_part(self, user, channel, reason=None):
"""Called when someone (out bot included) parts from a channel.
"""
pass
def on_nickchange(self, oldnick, newnick):
"""Called when someone (our bot included) changes nick.
"""
pass
def on_notice(self, sender, target, message):
"""Called when a notice is received. Target can be a channel or our bot's nick.
"""
pass
def on_quit(self, user, reason):
"""Called when someone (our bot included) quits from IRC.
"""
pass
def on_kick(self, sender, target, channel, reason):
"""Called when someone (our bot included) gets kicked from a channel.
"""
pass
def on_topicchange(self, sender, channel, newtopic):
"""Called when someone changes channel topic.
"""
pass
def on_invite(self, sender, target, channel):
"""TODO
"""
pass
def on_kill(self, killer, victim, message):
"""Some evil user killed us.
"""
pass
def on_set_mode(self, user, channel, mode, target=None):
"""Called when a mode is set.
"""
pass
def on_unset_mode(self, user, channel, mode, target=None):
"""Called when a mode is unset.
"""
pass
def on_unknown(self, *args):
"""An unknown event happened! We don't know how to process it.
"""
pass
# ==========================================================================
# -- CTCP events -- #
# All CTCP descriptions are taken from:
# http://www.irchelp.org/irchelp/rfc/ctcpspec.html
# ==========================================================================
def on_CTCP_clientinfo(self, sender, target, arg):
"""This is for client developers use to make it easier to show other
client hackers what a certain client knows when it comes to CTCP. The
replies should be fairly verbose explaining what CTCP commands are
understood.
"""
self.ctcpreply(sender.nick, 'CLIENTINFO', self.reply_clientinfo)
def on_CTCP_finger(self, sender, target, arg):
"""This is used to get a user's real name, and perhaps also the idle time
of the user (this usage has been obsoleted by enhancements to the IRC
protocol).
"""
self.ctcpreply(sender.nick, 'FINGER', self.reply_finger)
def on_CTCP_ping(self, sender, target, arg):
"""Ping is used to measure the time delay between clients on the IRC
network.
The replying client sends back an identical message inside a notice.
"""
self.ctcpreply(sender.nick, 'PING', arg)
def on_CTCP_source(self, sender, target, arg):
"""This is used to get information about where to get a copy of the
client.
"""
self.ctcpreply(sender.nick, 'SOURCE', self.reply_source)
def on_CTCP_time(self, sender, target, arg):
"""Time queries are used to determine what time it is where another
user's client is running.
"""
# TODO: allow custom reply.
self.ctcpreply(sender.nick, 'TIME', str(datetime.now())[:19])
def on_CTCP_userinfo(self, sender, target, arg):
"""This is used to transmit a string which is settable by the user (and
never should be set by the client).
"""
if self.reply_userinfo:
self.ctcpreply(sender.nick, 'USERINFO', self.reply_userinfo)
def on_CTCP_version(self, sender, target, arg):
"""This is used to get information about the name of the other client and
the version of it.
"""
self.ctcpreply(sender.nick, 'VERSION', self.reply_version)
def on_CTCPREPLY_ping(self, sender, target, arg):
"""Triggered when someone replies to our CTCP ping query.
"""
pass
# IRC Commands
def join(self, channel, key=None):
"""Joins a channel with an optional key.
This method must not be overridden.
"""
s = 'JOIN ' + channel
if key:
s += ' ' + key
self.sender.raw_line(s)
def part(self, channel, reason=None):
"""Parts from a channel with an optional reason.
This method must not be overridden.
"""
s = 'PART ' + channel
if reason:
s += ' :' + reason
self.sender.raw_line(s)
def ctcpreply(self, target, type, reply=None):
"""Sends a reply (a notice) to a CTCP request.
This method must not be overridden.
"""
r = '{0}{1}{0}'.format(
chr(1), type if not reply else '{0} {1}'.format(type, reply))
self.notice(target, r)
def ping(self, target, ts=None):
"""Sends a CTCP Ping request to a target.
"""
self.ctcpquery(target, 'PING', ts if ts else int(time.time()))
def ctcpquery(self, target, type, args=None):
"""Sends a CTCP request (privmsg) to a target.
This method must not be overridden.
"""
r = '{0}{1}{2}{0}'.format(
chr(1), type, ' {0}'.format(args) if args else '')
self.privmsg(target, r)
def notice(self, target, msg):
"""Sends a notice to a channel or a user.
This method must not be overridden.
"""
self.sender.add('NOTICE {0} :{1}'.format(target, msg))
def privmsg(self, target, msg):
"""Sends a message to a channel or a user.
This method must not be overridden.
"""
self.sender.add('PRIVMSG {0} :{1}'.format(target, msg))
def identify(self, password):
"""Identifies the bot to NickServ.
This method must not be overridden.
"""
self.sender.raw_line('NICKSERV IDENTIFY {0}'.format(password))
def invite(self, channel, user):
"""Used to invite someone in a channel.
"""
self.sender.raw_line('INVITE {0} {1}'.format(user, channel))
def kick(self, channel, user, reason=None):
"""Used to kick a user out of a channel with an optional reason.
"""
s = 'KICK {0} {1}'.format(channel, user)
if reason:
s += ' :' + reason
self.sender.raw_line(s)
def voice(self, channel, user):
"""Voices a user (or more, if user is a list) in a channel.
"""
m = 'v'
if isinstance(user, list):
m *= len(user)
else:
user = [user]
self.set_mode(channel, m, user)
def op(self, channel, user):
"""Ops a user (or more, if user is a list) in a channel.
"""
m = 'o'
if isinstance(user, list):
m *= len(user)
else:
user = [user]
self.set_mode(channel, m, user)
def nickchange(self, newnick):
"""This method changes our bot's nick. It could fail, for example
if the new nickname is not available.
This method must not be overridden.
"""
self.sender.raw_line('NICK {0}'.format(newnick))
def topic(self, channel, newtopic=None):
"""This method is used to change a channel's topic.
If no newtopic is given, the server will reply with the current topic.
This method must not be overridden.
"""
s = 'TOPIC ' + channel
if newtopic:
s += ' :' + newtopic
self.sender.raw_line(s)
def set_mode(self, channel, mode, args=None):
"""Set a mode (or more than one) in a channel with optional arguments.
- args is a list.
"""
s = 'MODE {0} +{1}'.format(channel, mode)
if args:
s += ' ' + ' '.join(args)
self.sender.raw_line(s)
def unset_mode(self, channel, mode, args=None):
"""Removes a mode (or more than one) from a channel with optional
arguments.
- args is a list.
"""
s = 'MODE {0} -{1}'.format(channel, mode)
if args:
s += ' ' + ' '.join(args)
self.sender.raw_line(s)
def request_who(self, target):
"""Puts a /WHO request in a queue. Sends it if the queue is empty.
"""
self.pending_who.append(target)
if len(self.pending_who) == 1:
self.sender.raw_line('WHO {0}'.format(target))
def get_comchans(self, nick):
"""Returns a list of channels our bot and this user are in.
"""
comchans = []
for bla, chan in self.channels.items():
if nick in chan.users:
comchans.append(chan)
return comchans
| StarcoderdataPython |
4810771 | <gh_stars>0
from django.db import models
from django.utils.text import slugify
from utils.base_models import BaseModel
class Category(BaseModel):
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.slug)
super().save(*args, **kwargs)
def __str__(self):
return self.name
| StarcoderdataPython |
3322342 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorlog
import pprint
from utils.pycocoevalcap.eval import COCOEvalCap
from utils.pycocotools.coco import COCO
from utils.utils import nostdout
pp = pprint.PrettyPrinter().pprint
class Evaluator(object):
def __init__(self):
pass
def evaluation(self, predicts, answers, method="coco"):
"""Wrapper method for evaluation
Args:
predicts: list of tokens list
answers: list of tokens list
method: evaluation method. ("coco")
Returns:
Dictionary with metric name in key metric result in value
"""
colorlog.info("Run evaluation...")
if method == "coco":
eval_result = self._coco_evaluation(predicts, answers)
else:
raise NotImplementedError
pp(eval_result)
return eval_result
def _coco_evaluation(self, predicts, answers):
coco_res = []
ann = {
'images': [], 'info': '', 'type': 'captions',
'annotations': [], 'licenses': ''
}
for i, (predict, answer) in enumerate(zip(predicts, answers)):
predict_cap = ' '.join(predict)
answer_cap = ' '.join(answer).replace('_UNK', '_UNKNOWN')
ann['images'].append({'id': i})
ann['annotations'].append(
{'caption': answer_cap, 'id': i, 'image_id': i}
)
coco_res.append(
{'caption': predict_cap, 'id': i, 'image_id': i}
)
with nostdout():
coco = COCO(ann)
coco_res = coco.loadRes(coco_res)
coco_eval = COCOEvalCap(coco, coco_res)
coco_eval.evaluate()
return coco_eval.eval
| StarcoderdataPython |
162543 | <reponame>ndjuric/dscaler
#!/usr/bin/env python3
DOCTL = "/usr/local/bin/doctl"
PK_FILE = "/home/ndjuric/.ssh/id_rsa.pub"
SWARM_DIR = TAG = "swarm"
OVERLAY_NETWORK = "swarmnet"
DOCKER_REGISTRY = {
'master': 'private.docker.registry.example.com:5000/master',
'worker': 'private.docker.registry.example.com:5000/worker'
}
NFS_SERVER = '10.135.69.119'
''' CALL_MAP is a list enumerating methods from the Cloud class the are allowed to be directly executed. '''
CALL_MAP = [
'build',
'deploy',
'destroy',
'add_worker',
'add_manager',
'ssh_manager',
'logs_master',
'remove_worker',
'remove_manager',
'get_master_container_id'
]
SCRIPT_CREATE = "#!/bin/bash\n"
SCRIPT_CREATE += "apt-get install -y nfs-common\n"
SCRIPT_CREATE += "mkdir /nfs\n"
SCRIPT_CREATE += "mount {0}:/nfs /nfs\n".format(NFS_SERVER)
SCRIPT_CREATE += "ufw allow 2377/tcp\n"
SCRIPT_CREATE += "export "
SCRIPT_CREATE += "PUBLIC_IPV4=$(curl -s http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address)\n"
SCRIPT_CREATE += "docker swarm init --advertise-addr \"${PUBLIC_IPV4}:2377\"\n"
SCRIPT_CREATE += "docker network create --driver overlay {0}\n".format(OVERLAY_NETWORK)
SCRIPT_CREATE += "docker service create "
SCRIPT_CREATE += "--network swarmnet "
SCRIPT_CREATE += "--name master "
SCRIPT_CREATE += "--mount type=bind,source=/nfs,target=/nfs {0} \n".format(DOCKER_REGISTRY['master'])
SCRIPT_CREATE += "docker service create --network swarmnet --name worker {0}\n".format(DOCKER_REGISTRY['worker'])
SCRIPT_CREATE += "docker service scale worker=5\n"
SCRIPT_JOIN = "#!/bin/bash\n"
SCRIPT_JOIN += "apt-get install -y nfs-common\n"
SCRIPT_JOIN += "mkdir /nfs\n"
SCRIPT_JOIN += "mount {0}:/nfs /nfs\n".format(NFS_SERVER)
SCRIPT_JOIN += "ufw allow 2377/tcp\n"
SCRIPT_JOIN += "export "
SCRIPT_JOIN += "PUBLIC_IPV4=$(curl -s http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address)\n"
SCRIPT_JOIN += "docker swarm join --advertise-addr \"${{PUBLIC_IPV4}}:2377\" --token \"{0}\" \"{1}:2377\"\n"
| StarcoderdataPython |
1756855 | <filename>docker_ws/src/ai_model/get_io_tensors.py
###############################################################################################################
# Source : https://newbedev.com/given-a-tensor-flow-model-graph-how-to-find-the-input-node-and-output-node-names
###############################################################################################################
import argparse
import tensorflow as tf
###############################################################################################################
def load_tf_graph(graph_name):
"""
Load a TensorFlow graph 'graph_name'.
"""
# Initialize a dataflow graph structure
graph_def = tf.Graph().as_graph_def()
# Parse the Tensorflow graph
graph_def.ParseFromString(tf.io.gfile.GFile(graph_name, "rb").read())
# Imports the graph from graph_def into the current default Graph
tf.import_graph_def(graph_def, name = '')
# Get the default graph for the current thread
graph = tf.compat.v1.get_default_graph()
return graph
def get_in_out_tensors(graph):
"""
Get the input and output tensors from the TensorFlow graph 'graph'.
"""
# Get the graph nodes that perform computation on tensors
ops = graph.get_operations()
# Initialize input and output tensors
inputs = []
outputs_set = set(ops)
# Process operations
for op in ops:
# The input nodes are nodes without input
if len(op.inputs) == 0 and op.type != 'Const':
inputs.append(op)
# The output nodes are nodes without output
else:
for input_tensor in op.inputs:
if input_tensor.op in outputs_set:
outputs_set.remove(input_tensor.op)
outputs = list(outputs_set)
return inputs, outputs
def display_tensor(tensor_list, disp_shape=False):
"""
Display tensor name and shape for each tensor in 'tensor_list'.
"""
i = 1
for tensor in tensor_list:
if(disp_shape):
# Extract tensor shape
shape = str(tensor.get_attr('shape').dim)
shape = shape.replace('size: ', '')
shape = shape.replace('\n', '')
# Display name and shape of tensor
print("- Tensor n°{0} : name={1}, shape={2}".format(i, tensor.name, shape))
else:
# Display tensor name
print("- Tensor n°{0} : name={1}".format(i, tensor.name))
i += 1
print("\n")
###############################################################################################################
def main():
# Create arguments
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--graph', type=str, default='./graph.pb', help="Path to the TensorFlow graph to analyze. Default is './graph.pb'.")
# Parse arguments
args = parser.parse_args()
# Print argument values
print('\n------------------------------------')
print ('Command line options:')
print('--graph:', args.graph)
print('------------------------------------\n')
# Load TensorFlow graph to analyze
graph = load_tf_graph(args.graph)
# Read the input and output tensor names and dimensions from the graph
input_tensors, output_tensors = get_in_out_tensors(graph)
# Display input tensor
print("Input tensor(s) :")
display_tensor(input_tensors, disp_shape=True)
# Display output tensor
print("Output tensor(s) :")
display_tensor(output_tensors)
###############################################################################################################
if __name__ == '__main__':
main()
| StarcoderdataPython |
3303166 | #!/usr/bin/env python
"""
This source file is part of the Swift.org open source project
Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors
Licensed under Apache License v2.0 with Runtime Library Exception
See https://swift.org/LICENSE.txt for license information
See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
------------------------------------------------------------------------------
This is a helper script for the main swift repository's build-script.py that
knows how to build and install the stress tester utilities given a swift
workspace.
"""
from __future__ import print_function
import argparse
import sys
import os, platform
import subprocess
def printerr(message):
print(message, file=sys.stderr)
def main(argv_prefix = []):
args = parse_args(argv_prefix + sys.argv[1:])
run(args)
def parse_args(args):
parser = argparse.ArgumentParser(prog='build-script-helper.py')
parser.add_argument('--package-path', default='')
parser.add_argument('-v', '--verbose', action='store_true', help='log executed commands')
parser.add_argument('--prefix', help='install path')
parser.add_argument('--configuration', default='debug')
parser.add_argument('--build-path', default=None)
parser.add_argument('--multiroot-data-file', help='Path to an Xcode workspace to create a unified build of SwiftSyntax with other projects.')
parser.add_argument('--toolchain', required=True, help='the toolchain to use when building this package')
parser.add_argument('--update', action='store_true', help='update all SwiftPM dependencies')
parser.add_argument('--no-local-deps', action='store_true', help='use normal remote dependencies when building')
parser.add_argument('build_actions', help="Extra actions to perform. Can be any number of the following", choices=['all', 'build', 'test', 'generate-xcodeproj', 'install'], nargs="*", default=['build'])
parsed = parser.parse_args(args)
parsed.swift_exec = os.path.join(parsed.toolchain, 'bin', 'swift')
# Convert package_path to absolute path, relative to root of repo.
repo_path = os.path.dirname(__file__)
parsed.package_path = os.path.realpath(
os.path.join(repo_path, parsed.package_path))
if not parsed.build_path:
parsed.build_path = os.path.join(parsed.package_path, '.build')
return parsed
def add_rpath(rpath, binary, verbose):
cmd = ['install_name_tool', '-add_rpath', rpath, binary]
print(' '.join(cmd))
installToolProcess = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = installToolProcess.communicate()
if installToolProcess.returncode != 0:
print('install_name_tool -add_rpath command failed, assume incremental build and proceed.')
if verbose:
print(stdout)
def run(args):
package_name = os.path.basename(args.package_path)
env = dict(os.environ)
# Use local dependencies (i.e. checked out next package-syntax-parser).
if not args.no_local_deps:
env['SWIFTCI_USE_LOCAL_DEPS'] = "1"
if args.update:
print("** Updating dependencies of %s **" % package_name)
try:
update_swiftpm_dependencies(package_path=args.package_path,
swift_exec=args.swift_exec,
build_path=args.build_path,
env=env,
verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Updating dependencies of %s failed' % package_name)
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
# The test action creates its own build. No need to build if we are just testing.
if should_run_action('build', args.build_actions):
print("** Building %s **" % package_name)
try:
invoke_swift(package_path=args.package_path,
swift_exec=args.swift_exec,
action='build',
products=['package-syntax-parser'],
build_path=args.build_path,
multiroot_data_file=args.multiroot_data_file,
configuration=args.configuration,
env=env,
verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Building %s failed' % package_name)
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
if should_run_action('install', args.build_actions):
print("** Installing %s **" % package_name)
try:
bin_path = os.path.join(args.build_path, 'release')
install(bin_path, args.toolchain, args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Installing %s failed' % package_name)
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
output_dir = os.path.realpath(os.path.join(args.build_path, args.configuration))
if should_run_action("generate-xcodeproj", args.build_actions):
print("** Generating Xcode project for %s **" % package_name)
try:
generate_xcodeproj(args.package_path,
swift_exec=args.swift_exec,
env=env,
verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Generating the Xcode project failed')
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
if should_run_action("test", args.build_actions):
print("** Testing %s **" % package_name)
try:
invoke_swift(package_path=args.package_path,
swift_exec=args.swift_exec,
action='test',
products=['%sPackageTests' % package_name],
build_path=args.build_path,
multiroot_data_file=args.multiroot_data_file,
configuration=args.configuration,
env=env,
verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Testing %s failed' % package_name)
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
def should_run_action(action_name, selected_actions):
if action_name in selected_actions:
return True
elif "all" in selected_actions:
return True
else:
return False
def update_swiftpm_dependencies(package_path, swift_exec, build_path, env, verbose):
args = [swift_exec, 'package', '--package-path', package_path, '--build-path', build_path, 'update']
check_call(args, env=env, verbose=verbose)
def invoke_swift(package_path, swift_exec, action, products, build_path, multiroot_data_file, configuration, env, verbose):
# Until rdar://53881101 is implemented, we cannot request a build of multiple
# targets simultaneously. For now, just build one product after the other.
for product in products:
invoke_swift_single_product(package_path, swift_exec, action, product, build_path, multiroot_data_file, configuration, env, verbose)
def invoke_swift_single_product(package_path, swift_exec, action, product, build_path, multiroot_data_file, configuration, env, verbose):
args = [swift_exec, action, '--package-path', package_path, '-c', configuration, '--build-path', build_path]
if platform.system() != "Darwin":
args.extend(["--enable-test-discovery"])
if multiroot_data_file:
args.extend(['--multiroot-data-file', multiroot_data_file])
if action == 'test':
args.extend(['--test-product', product])
else:
args.extend(['--product', product])
# Tell SwiftSyntax that we are building in a build-script environment so that
# it does not need to be rebuilt if it has already been built before.
env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1'
check_call(args, env=env, verbose=verbose)
def generate_xcodeproj(package_path, swift_exec, env, verbose):
package_name = os.path.basename(package_path)
xcodeproj_path = os.path.join(package_path, '%s.xcodeproj' % package_name)
args = [swift_exec, 'package', '--package-path', package_path, 'generate-xcodeproj', '--output', xcodeproj_path]
check_call(args, env=env, verbose=verbose)
def check_call(cmd, verbose, env=os.environ, **kwargs):
if verbose:
print(' '.join([escape_cmd_arg(arg) for arg in cmd]))
return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs)
def install(bin_path, toolchain, verbose):
toolchain_bin = os.path.join(toolchain, 'bin')
for exe in ['package-syntax-parser']:
install_binary(exe, bin_path, toolchain_bin, toolchain, verbose)
def install_binary(exe, source_dir, install_dir, toolchain, verbose):
cmd = ['rsync', '-a', os.path.join(source_dir, exe), install_dir]
print(' '.join(cmd))
subprocess.check_call(cmd)
if platform.system() == 'Darwin':
result_path = os.path.join(install_dir, exe)
stdlib_rpath = os.path.join(toolchain, 'lib', 'swift', 'macosx')
delete_rpath(stdlib_rpath, result_path)
add_rpath('@executable_path/../lib', result_path, verbose)
add_rpath('@executable_path/../lib/swift/macosx', result_path, verbose)
def delete_rpath(rpath, binary):
cmd = ["install_name_tool", "-delete_rpath", rpath, binary]
print(' '.join(cmd))
subprocess.check_call(cmd)
def escape_cmd_arg(arg):
if '"' in arg or ' ' in arg:
return '"%s"' % arg.replace('"', '\\"')
else:
return arg
if __name__ == '__main__':
main()
| StarcoderdataPython |
3289458 | from mrjob.job import MRJob
from mrjob.step import MRStep
from heapq import nlargest
from operator import itemgetter
import re
WORD_RE = re.compile(r"[\w']+")
class MRMostUsedWord(MRJob):
def mapper_get_words(self, _, row):
data = row.split('\t')
for word in WORD_RE.findall(data[2]):
yield (word.lower(), 1)
def combiner_count_words(self, word, counts):
yield (word, sum(counts))
def reducer_count_words(self, word, counts):
yield None, (sum(counts), word)
def reducer_find_max_word(self, _, word_count_pairs):
yield None, sorted(word_count_pairs, reverse=True)[:10]
def steps(self):
return [
MRStep(mapper=self.mapper_get_words,
combiner=self.combiner_count_words,
reducer=self.reducer_count_words),
MRStep(reducer=self.reducer_find_max_word)
]
if __name__=='__main__':
MRMostUsedWord.run() | StarcoderdataPython |
3272962 | <filename>fastapi/{{ cookiecutter.project_name }}/app/core/config.py
from typing import Literal
from pydantic import AnyHttpUrl, BaseSettings, Field
class CustomBaseSettings(BaseSettings):
"""Configure .env settings for all our setting-classes"""
class Config:
env_file = '.env'
env_file_encoding = 'utf-8'
case_sensitive = True
class Env(CustomBaseSettings):
ENVIRONMENT: Literal['dev', 'lab', 'prod', 'qa', 'test'] = Field(..., env='ENVIRONMENT')
env = Env()
class RedisCredentials(CustomBaseSettings):
REDIS_PASSWORD: str = Field('', env='REDIS_PASSWORD')
TEST_REDIS_HOST: str = Field('localhost:6380', env='TEST_REDIS_HOST')
redis = RedisCredentials()
{% if cookiecutter.sqlmodel == 'True' %}
class PostgresCredentials(CustomBaseSettings):
POSTGRES_USER: str = Field('{{ cookiecutter.project_name }}', env='POSTGRES_USER')
POSTGRES_PASSWORD: str = Field('', env='POSTGRES_PASSWORD')
POSTGRES_HOST: str = Field('localhost', env='POSTGRES_HOST')
postgres = PostgresCredentials()
{% endif %}
class Credentials(CustomBaseSettings):
REDIS_URL = (
redis.TEST_REDIS_HOST
if env.ENVIRONMENT == 'test'
else 'localhost'
if env.ENVIRONMENT in ['dev', 'test']
else f'redis.my_proj-{env.ENVIRONMENT}.svc'
)
REDIS_CONNECTION_STRING: str = f'redis://:{redis.REDIS_PASSWORD}@{REDIS_URL}'
{% if cookiecutter.sqlmodel == 'True' %}
POSTGRES_USERNAME: str = '{{ cookiecutter.project_name }}'
POSTGRES_CONNECTION_STRING: str = Field(
f'postgresql+asyncpg://{postgres.POSTGRES_USER}:{postgres.POSTGRES_PASSWORD}@{postgres.POSTGRES_HOST}',
)
{% endif %}
class Authentication(CustomBaseSettings):
TENANT_ID: str = Field(default='9b5ff18e-53c0-45a2-8bc2-9c0c8f60b2c6', description='Intility tenant ID')
APP_CLIENT_ID: str = Field(..., description='ClientID for web-app. (First step)')
OPENAPI_CLIENT_ID: str = Field(..., description='OpenAPI/Swagger SPA client ID')
authentication = Authentication()
class Authorization(CustomBaseSettings):
SCOPES: dict[str, str] = Field(
{
f'api://{authentication.APP_CLIENT_ID}/user_impersonation': 'User impersonation',
},
description=(
'A dictionary with scopes, used for OpenAPI authentication/authorization. '
'You can add multiple scopes here.'
),
)
class Settings(
Env,
RedisCredentials,
{% if cookiecutter.sqlmodel == 'True' %}PostgresCredentials,{% endif %}
Credentials,
Authentication,
Authorization,
):
API_V1_STR: str = '/api/v1'
PROJECT_NAME: str = '{{ cookiecutter.project_name }}'
SECRET_KEY: str = Field(..., env='SECRET_KEY')
SENTRY_DSN: str = Field('', env='SENTRY_DSN')
# CORS settings should answer this question: Which frontends (through browser) are allowed to access the API?
# This includes your OpenAPI documentation, your react frontend etc.
BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = ['http://localhost:8000']
BACKEND_CORS_ORIGINS_REGEX: str = r'https://.*\.intility\.[no|com]'
settings = Settings()
| StarcoderdataPython |
26061 | <reponame>algon-320/tenki.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import datetime
import pickle
import lxml.html
import urllib.request, urllib.error
import re
from modules.weather import Weather
from modules.print_util import String
class WeatherForecastManager:
PICKLE_DUMP_FILE = 'tenki.dump'
XPATH_UPDATED_TIME = r'//*[@id="main-column"]/section/h2/time/text()'
XPATH_POINT_INFO = r'//*[@id="main-column"]/section/h2/text()'
XPATH_ANNOUNCED_TIME_COMMENT = r'//*[@id="main-column"]/section/comment()[contains(., "announce_datetime")]'
XPATH_WEATHER_DATES = r'//*[@id="main-column"]/section/table[%d]/tr[1]/td/div/p/text()'
XPATH_WEATHER_TD = r'//*[@id="main-column"]/section/table[%d]/tr[4]/td'
XPATH_TEMPERATURE_TD = r'//*[@id="main-column"]/section/table[%d]/tr[6]/td'
XPATH_PROB_RAIN_TD = r'//*[@id="main-column"]/section/table[%d]/tr[7]/td'
XPATH_AMOUNT_RAIN_TD = r'//*[@id="main-column"]/section/table[%d]/tr[9]/td'
XPATH_HUMIDITY_TD = r'//*[@id="main-column"]/section/table[%d]/tr[10]/td'
SHOW_OPTS = (
SHOW_WEATHER,
SHOW_TEMPERATURE,
SHOW_PROBABILITY_OF_RAIN,
SHOW_AMOUNT_OF_RAIN,
SHOW_HUMIDITY,
SHOW_WITHOUT_COLORS,
) = map(lambda x: 1 << x, range(6))
SHOW_ALL = SHOW_WEATHER | SHOW_TEMPERATURE | SHOW_PROBABILITY_OF_RAIN | SHOW_AMOUNT_OF_RAIN | SHOW_HUMIDITY
def __init__(self, spot_url):
self.url = spot_url
self.weathers = []
self.updated_time = None
self.point_name = ''
if os.path.exists(WeatherForecastManager.PICKLE_DUMP_FILE):
self.unpickle()
if self.updated_time + datetime.timedelta(hours=1) > datetime.datetime.now() and self.url == spot_url:
return
self.update_weather(spot_url)
def update_weather(self, url):
# print('[debug] checking for updates ...')
try:
html = urllib.request.urlopen(url).read()
except:
print('[error] cannot open URL')
sys.exit(1)
dom = lxml.html.fromstring(html.decode('utf-8'))
updated_time_str = dom.xpath(WeatherForecastManager.XPATH_UPDATED_TIME)[0]
point_info = dom.xpath(WeatherForecastManager.XPATH_POINT_INFO)[0]
self.point_name = re.match(r'(.+)の天気', point_info).group(1)
# 更新日時を設定
comment = dom.xpath(WeatherForecastManager.XPATH_ANNOUNCED_TIME_COMMENT)[0]
comment = lxml.html.tostring(comment, method='html', encoding='unicode')
mat = re.match(r'.*announce_datetime:(\d{4})\-(\d{2})\-\d{2} \d{2}\:\d{2}\:\d{2}', comment)
year = int(mat.group(1))
month = int(mat.group(2))
mat = re.match(r'(\d+)日(\d+):(\d+)発表', updated_time_str)
day = int(mat.group(1))
hour = int(mat.group(2))
minute = int(mat.group(3))
self.updated_time = datetime.datetime(year, month, day, hour, minute)
self.weathers = []
for k in range(3):
w = Weather()
w.date = dom.xpath(WeatherForecastManager.XPATH_WEATHER_DATES % (k + 1))[0][:-1]
tds_weather = dom.xpath(WeatherForecastManager.XPATH_WEATHER_TD % (k + 1))
tds_temperature = dom.xpath(WeatherForecastManager.XPATH_TEMPERATURE_TD % (k + 1))
tds_probability_of_rain = dom.xpath(WeatherForecastManager.XPATH_PROB_RAIN_TD % (k + 1))
tds_amount_of_rain = dom.xpath(WeatherForecastManager.XPATH_AMOUNT_RAIN_TD % (k + 1))
tds_humidity = dom.xpath(WeatherForecastManager.XPATH_HUMIDITY_TD % (k + 1))
w.weathers = list(map(lambda td: td[1].text, tds_weather))
w.is_past = list(map(lambda td: ('past' in td[0].attrib['src']), tds_weather))
w.temperatures = list(map(lambda td: float(td[0].text), tds_temperature))
w.probability_of_rains = list(map(lambda td: None if td[0].text == '---' else int(td[0].text), tds_probability_of_rain))
w.amount_of_rains = list(map(lambda td: float(td[0].text), tds_amount_of_rain))
w.humidities = list(map(lambda td: int(td[0].text), tds_humidity))
self.weathers.append(w)
self.pickle_data()
def pickle_data(self):
with open(WeatherForecastManager.PICKLE_DUMP_FILE, 'wb') as f:
tmp = (self.url, self.weathers, self.updated_time, self.point_name)
pickle.dump(tmp, f)
def unpickle(self):
with open(WeatherForecastManager.PICKLE_DUMP_FILE, 'rb') as f:
tmp = pickle.load(f)
self.url = tmp[0]
self.weathers = tmp[1]
self.updated_time = tmp[2]
self.point_name = tmp[3]
def print_weather(self, show_opts=None, conky=False, days=2):
if show_opts == None:
show_opts = WeatherForecastManager.SHOW_ALL
max_width = 0
for w in self.weathers:
max_width = max(max_width, String.get_string_width(w.date))
max_width += 6
max_unit_width = 0
for i in range(days):
w = self.weathers[i]
for tmp in w.weathers:
max_unit_width = max(max_unit_width, String.get_string_width(tmp))
print('-' * (max_width + (max_unit_width + 1) * 8))
print('{p}の天気 ({M}月{D}日 {h:02d}:{m:02d} 発表)'.format(p=self.point_name,
M=self.updated_time.month, D=self.updated_time.day,
h=self.updated_time.hour, m=self.updated_time.minute))
time_labels = ['03時', '06時', '09時', '12時', '15時', '18時', '21時', '24時']
sys.stdout.write((' ' * max_width))
for l in time_labels:
sys.stdout.write(String.center(l, max_unit_width + 1))
sys.stdout.write('\n')
print('=' * (max_width + (max_unit_width + 1) * 8))
for i in range(days):
w = self.weathers[i]
col = bool(show_opts & WeatherForecastManager.SHOW_WITHOUT_COLORS)
if show_opts & WeatherForecastManager.SHOW_WEATHER:
w.print_weather(max_width, max_unit_width, no_color=col, conky=conky)
if show_opts & WeatherForecastManager.SHOW_TEMPERATURE:
w.print_temperature(max_width, max_unit_width, no_color=col, conky=conky)
if show_opts & WeatherForecastManager.SHOW_PROBABILITY_OF_RAIN:
w.print_probability_of_rain(max_width, max_unit_width, no_color=col, conky=conky)
if show_opts & WeatherForecastManager.SHOW_AMOUNT_OF_RAIN:
w.print_amount_of_rain(max_width, max_unit_width, no_color=col, conky=conky)
if show_opts & WeatherForecastManager.SHOW_HUMIDITY:
w.print_humidity(max_width, max_unit_width, no_color=col, conky=conky)
print('=' * (max_width + (max_unit_width + 1) * 8))
| StarcoderdataPython |
197572 | """
Views of core application.
"""
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.shortcuts import render
from django.utils.translation import gettext as _
from rest_framework.authtoken.models import Token
# Create your views here.
def home(request):
"""
View of home page.
"""
return render(request, 'home.html')
def donate(request):
"""
View of donate page.
"""
return render(request, 'donate.html')
def login(request):
"""
View of login page.
"""
if request.user.is_authenticated:
return redirect('home')
return render(request, 'account/login.html')
def logout_view(request):
"""
View to logout user.
"""
logout(request)
return redirect('home')
@login_required
def settings_view(request):
"""
View to settings page of user.
"""
token = Token.objects.get(user=request.user)
return render(request, 'account/settings.html', {'user_token': token})
@login_required
def new_user_token(request):
"""
View to generate a new token for user.
"""
Token.objects.get(user=request.user).delete()
Token.objects.create(user=request.user)
messages.success(request, _('New user token created.'))
return redirect('settings')
| StarcoderdataPython |
3270329 | from Errors import CacheKeyError
import functools
predefinedCacheKeyHints = {
"test": "This is a test hint.",
"outputFolder": "A folder path(string) for logger file and figures: Auto provided when Discoverer is initiated.",
"groupName": "A string of the group name: Should be defined by hand.",
"friendList": "A dict mapping WeChat ID to name: Can be returned by DbManager.getFrindList()",
"textMessagesSplitedByIdInGroupChat": "A dict splits all text messages by WeChat IDs: Can be produced by Middlewares.textMessagesSplitedByIdInGroupChat"
}
def needs(*ns):
def needsChecked(func):
@functools.wraps(func)
def wrapped_f(historyData, cache, logger):
em = []
for n in ns:
if n not in cache:
em.append(" > CacheKeyError: '"+ n +"'. Cache needs check failed for: " + func.__name__ + "\n"
+ ((" - Hint: " + predefinedCacheKeyHints[n]) if n in predefinedCacheKeyHints else " - Not a pre-defined key name."))
if len(em) != 0:
raise CacheKeyError("\n".join(em))
return func(historyData, cache, logger)
return wrapped_f
return needsChecked | StarcoderdataPython |
165019 | from .casing import * | StarcoderdataPython |
1602493 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2019-04-25 15:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0049_auto_20181009_1410'),
]
operations = [
migrations.AlterField(
model_name='analytictag',
name='page',
field=models.CharField(choices=[('ourstory', 'ourstory'), ('peru', 'peru'), ('ethics', 'ethics'), ('financials', 'financials'), ('ourteam', 'ourteam'), ('teensprogram', 'teensprogram'), ('childrensprogram', 'childrensprogram'), ('womensprogram', 'womensprogram'), ('donate', 'donate'), ('volunteerperu', 'volunteerperu'), ('chicago', 'chicago'), ('donations', 'donations'), ('Global', 'Global')], default='ourstory', max_length=15),
),
]
| StarcoderdataPython |
3379202 | class SBLError(Exception):
"""SBL Exception"""
pass
| StarcoderdataPython |
4838359 | import tensorflow as tf
x = [[1,2,3],[4,5,6]]
x = tf.convert_to_tensor(x)
xtrans = tf.transpose(x)
y=([[[1,2,3],[6,5,4]],[[4,5,6],[3,6,3]]])
y = tf.convert_to_tensor(y)
ytrans = tf.transpose(y, perm=[0, 2, 1])
with tf.Session() as sess:
print(sess.run(xtrans))
print(sess.run(ytrans)) | StarcoderdataPython |
1613445 | import datetime
import pytest
import os
from batimap.app import create_app
from batimap.extensions import db
from batimap.db import Base, Boundary, Cadastre, City
@pytest.fixture
def app():
test_db_uri = os.environ.get(
"POSTGRES_URI", "postgresql://test:batimap@localhost:15432/testdb"
)
test_redis_uri = os.environ.get("REDIS_URI", "redis://localhost:16379")
app = create_app(test_db_uri, test_redis_uri)
app.config.update(
TESTING=True, CELERY_BROKER_URL=test_redis_uri, CELERY_BACK_URL=test_redis_uri
)
yield app
with app.app_context():
Base.metadata.drop_all(bind=db.sqlalchemy.engine)
# pass
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture
def runner(app, caplog):
# we need to disable logs because of https://github.com/pallets/click/issues/824
caplog.set_level(100000)
return app.test_cli_runner()
@pytest.fixture(scope="session")
def celery_config():
return {
"broker_url": "memory://",
"result_backend": os.environ.get("REDIS_URI", "redis://localhost:16379"),
}
@pytest.fixture
def db_mock_cities(app):
with app.app_context():
objects = [
City(insee="01004", department="01", import_date="2009"),
City(insee="01005", department="01", import_date="2013"),
City(insee="01006", department="01", import_date="2009"),
City(insee="02022", department="02", import_date="2012"),
City(insee="02023", department="02", import_date="raster", is_raster=True),
]
db.session.add_all(objects)
db.session.commit()
@pytest.fixture
def db_mock_all_date(app):
with app.app_context():
dates = list(range(2009, 2021)) + [
"raster",
"unfinished",
"unknown",
"never",
]
for idx, date in enumerate(dates):
db.session.add_all(
[
Cadastre(insee=f"080{idx:02}", department="08", od_buildings=100),
Cadastre(insee=f"081{idx:02}", department="08", od_buildings=100),
City(
insee=f"080{idx:02}",
department="08",
import_date=date,
osm_buildings=25,
),
City(
insee=f"081{idx:02}",
department="08",
date_cadastre=datetime.datetime.now(),
import_date=date,
osm_buildings=100,
),
Boundary(
insee=f"080{idx:02}",
admin_level=8,
geometry="srid=4326; POLYGON((0 0,0.1 0,0.1 0.1,0 0.1,0 0))",
),
Boundary(
insee=f"081{idx:02}",
admin_level=8,
geometry="srid=4326; POLYGON((0 0,0.2 0,0.2 0.2,0 0.2,0 0))",
),
]
)
db.session.commit()
@pytest.fixture
def db_mock_boundaries(app):
with app.app_context():
objects = [
Boundary(
insee="01",
name="01-test",
geometry="srid=4326; POLYGON((0 0,1 0,1 1,0 1,0 0))",
admin_level=6,
),
Boundary(
insee="02",
name="02-test",
geometry="srid=4326; POLYGON((0 0,1 0,1 1,0 1,0 0))",
admin_level=6,
),
Boundary(
insee="01004",
name="01004-test",
admin_level=8,
geometry="srid=4326; POLYGON((0 0,1 0,1 1,0 1,0 0))",
),
Boundary(
insee="01005",
name="01005-test",
admin_level=8,
geometry="srid=4326; POLYGON((1 1,2 1,2 2,1 2,1 1))",
),
Boundary(
insee="01006",
name="01006-test",
admin_level=8,
geometry="srid=4326; POLYGON((2 2,3 2,3 3,2 3,2 2))",
),
Boundary(
insee="02022",
name="02022-test",
admin_level=8,
geometry="srid=4326; POLYGON((3 3,4 3,4 4,3 4,3 3))",
),
Boundary(
insee="02022",
name="02022-test-oldcity",
admin_level=9,
geometry="srid=4326; POLYGON((3.5 3.5,4 3.5,4 4,3.5 4,3.5 3.5))",
),
Boundary(
insee="02023",
name="02023-test",
admin_level=8,
geometry="srid=4326; POLYGON((3 3,4 3,4 4,3 4,3 3))",
),
]
db.session.add_all(objects)
db.session.commit()
| StarcoderdataPython |
1617239 | <filename>lib/pytaf/tafdecoder.py
import re
from .taf import TAF
class DecodeError(Exception):
def __init__(self, msg):
self.strerror = msg
class Decoder(object):
def __init__(self, taf):
if isinstance(taf, TAF):
self._taf = taf
else:
raise DecodeError("Argument is not a TAF parser object")
def decode_taf(self):
form = self._taf.get_header()["form"]
result = ""
result += self._decode_header(self._taf.get_header()) + "\n"
for group in self._taf.get_groups():
# TAF specific stuff
if form == "taf":
if group["header"]:
result += self._decode_group_header(group["header"]) + "\n"
# METAR specific stuff
if form == "metar":
if group["temperature"]:
result += " Temperature: %s\n" % self._decode_temperature(group["temperature"])
if group["pressure"]:
result += " Pressure: %s\n" % self._decode_pressure(group["pressure"])
# Both TAF and METAR
if group["wind"]:
result += " Wind: %s \n" % self._decode_wind(group["wind"])
if group["visibility"]:
result += " Visibility: %s \n" % self._decode_visibility(group["visibility"])
if group["clouds"]:
result += " Sky conditions: %s \n" % self._decode_clouds(group["clouds"])
if group["weather"]:
result += " Weather: %s \n" % self._decode_weather(group["weather"])
if group["windshear"]:
result += " Windshear: %s\n" % self._decode_windshear(group["windshear"])
result += " \n"
if self._taf.get_maintenance():
result += self._decode_maintenance(self._taf.get_maintenance())
return(result)
def _decode_header(self, header):
result = ""
# Ensure it's side effect free
_header = header
if _header["form"] == 'taf':
# Decode TAF header
# Type
if _header["type"] == "AMD":
result += "TAF amended for "
elif _header["type"] == "COR":
result += "TAF corrected for "
elif _header["type"] == "RTD":
result += "TAF related for "
else:
result += "TAF for "
# Add ordinal suffix
_header["origin_date"] = _header["origin_date"] + self._get_ordinal_suffix(_header["origin_date"])
_header["valid_from_date"] = _header["valid_from_date"] + self._get_ordinal_suffix(_header["valid_from_date"])
_header["valid_till_date" ] = _header["valid_till_date"] + self._get_ordinal_suffix(_header["valid_till_date"])
result += ("%(icao_code)s issued %(origin_hours)s:%(origin_minutes)s UTC on the %(origin_date)s, "
"valid from %(valid_from_hours)s:00 UTC on the %(valid_from_date)s to %(valid_till_hours)s:00 UTC on the %(valid_till_date)s")
else:
# Decode METAR header
# Type
if _header["type"] == "COR":
result += "METAR corrected for "
else:
result += "METAR for "
_header["origin_date"] = _header["origin_date"] + self._get_ordinal_suffix(_header["origin_date"])
result += ("%(icao_code)s issued %(origin_hours)s:%(origin_minutes)s UTC on the %(origin_date)s")
result = result % _header
return(result)
def _decode_group_header(self, header):
result = ""
_header = header
from_str = "From %(from_hours)s:%(from_minutes)s on the %(from_date)s: "
prob_str = "Probability %(probability)s%% of the following between %(from_hours)s:00 on the %(from_date)s and %(till_hours)s:00 on the %(till_date)s: "
tempo_str = "Temporarily between %(from_hours)s:00 on the %(from_date)s and %(till_hours)s:00 on the %(till_date)s: "
prob_tempo_str = "Probability %(probability)s%% of the following temporarily between %(from_hours)s:00 on the %(from_date)s and %(till_hours)s:00 on the %(till_date)s: "
becmg_str = "Gradual change to the following between %(from_hours)s:00 on the %(from_date)s and %(till_hours)s:00 on the %(till_date)s: "
if "type" in _header:
# Add ordinal suffix
if "from_date" in _header:
from_suffix = self._get_ordinal_suffix(_header["from_date"])
_header["from_date"] = _header["from_date"] + from_suffix
if "till_date" in _header:
till_suffix = self._get_ordinal_suffix(_header["till_date"])
_header["till_date"] = _header["till_date"] + till_suffix
if _header["type"] == "FM":
result += from_str % { "from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"from_minutes": _header["from_minutes"] }
elif _header["type"] == "PROB%s" % (_header["probability"]):
result += prob_str % { "probability": _header["probability"],
"from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"till_date": _header["till_date"],
"till_hours": _header["till_hours"] }
elif "PROB" in _header["type"] and "TEMPO" in _header["type"]:
result += prob_tempo_str % { "probability": _header["probability"],
"from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"till_date": _header["till_date"],
"till_hours": _header["till_hours"] }
elif _header["type"] == "TEMPO":
result += tempo_str % { "from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"till_date": _header["till_date"],
"till_hours": _header["till_hours"] }
elif _header["type"] == "BECMG":
result += becmg_str % { "from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"till_date": _header["till_date"],
"till_hours": _header["till_hours"] }
return(result)
def _decode_wind(self, wind):
unit = ""
result = ""
if wind["direction"] == "000":
return("calm")
elif wind["direction"] == "VRB":
result += "variable"
else:
result += "from %s degrees" % wind["direction"]
if wind["unit"] == "KT":
unit = "knots"
elif wind["unit"] == "MPS":
unit = "meters per second"
else:
# Unlikely, but who knows
unit = "(unknown unit)"
result += " at %s %s" % (wind["speed"], unit)
if wind["gust"]:
result += " gusting to %s %s" % (wind["gust"], unit)
return(result)
def _decode_visibility(self, visibility):
result = ""
if "more" in visibility:
if visibility["more"]:
result += "more than "
result += visibility["range"]
if visibility["unit"] == "SM":
result += " statute miles"
elif visibility["unit"] == "M":
result += " meters"
return(result)
def _decode_clouds(self, clouds):
result = ""
i_result = ""
list = []
for layer in clouds:
if layer["layer"] == "SKC" or layer["layer"] == "CLR":
return "sky clear"
if layer["layer"] == "NSC":
return "no significant cloud"
if layer["layer"] == "CAVOK":
return "ceiling and visibility are OK"
if layer["layer"] == "CAVU":
return "ceiling and visibility unrestricted"
if layer["layer"] == "VV///":
return "Sky Obscured"
if layer["layer"] == "SCT":
layer_type = "scattered"
elif layer["layer"] == "BKN":
layer_type = "broken"
elif layer["layer"] == "FEW":
layer_type = "few"
elif layer["layer"] == "OVC":
layer_type = "overcast"
if layer["type"] == "CB":
type = "cumulonimbus"
elif layer["type"] == "CU":
type = "cumulus"
elif layer["type"] == "TCU":
type = "towering cumulus"
elif layer["type"] == "CI":
type = "cirrus"
else:
type = ""
result = "%s %s clouds at %d feet" % (layer_type, type, int(layer["ceiling"])*100)
# Remove extra whitespace, if any
result = re.sub(r'\s+', ' ', result)
list.append(result)
layer = ""
type = ""
result = ""
result = ", ".join(list)
return(result)
def _decode_weather(self, weather):
# Dicts for translating the abbreviations
dict_intensities = {
"-" : "light",
"+" : "heavy",
"VC" : "in the vicinity",
"RE" : "recent"
}
dict_modifiers = {
"MI" : "shallow",
"BC" : "patchy",
"DR" : "low drifting",
"BL" : "blowing",
"SH" : "showers",
"TS" : "thunderstorms",
"FZ" : "freezing",
"PR" : "partial"
}
dict_phenomenons = {
"DZ" : "drizzle",
"RA" : "rain",
"SN" : "snow",
"SG" : "snow grains",
"IC" : "ice",
"PL" : "ice pellets",
"GR" : "hail",
"GS" : "small snow/hail pellets",
"UP" : "unknown precipitation",
"BR" : "mist",
"FG" : "fog",
"FU" : "smoke",
"DU" : "dust",
"SA" : "sand",
"HZ" : "haze",
"PY" : "spray",
"VA" : "volcanic ash",
"PO" : "dust/sand whirl",
"SQ" : "squall",
"FC" : "funnel cloud",
"SS" : "sand storm",
"DS" : "dust storm",
}
weather_txt_blocks = []
# Check for special cases first. If a certain combination is found
# then skip parsing the whole weather string and return a defined string
# immediately
for group in weather:
# +FC = Tornado or Waterspout
if "+" in group["intensity"] and "FC" in group["phenomenon"]:
weather_txt_blocks.append("tornado or waterspout")
continue
# Sort the elements of the weather string, if no special combi-
# nation is found.
intensities_pre = []
intensities_post = []
if "RE" in group["intensity"]:
intensities_pre.append("RE")
group["intensity"].remove("RE")
for intensity in group["intensity"]:
if intensity != "VC":
intensities_pre.append(intensity)
else:
intensities_post.append(intensity)
modifiers_pre = []
modifiers_post = []
for modifier in group["modifier"]:
if modifier != "TS" and modifier != "SH":
modifiers_pre.append(modifier)
else:
modifiers_post.append(modifier)
phenomenons_pre = []
phenomenons_post = []
for phenomenon in group["phenomenon"]:
if phenomenon != "UP":
phenomenons_pre.append(phenomenon)
else:
phenomenons_post.append(phenomenon)
# Build the human readable text from the single weather string
# and append it to a list containing all the interpreted text
# blocks from a TAF group
weather_txt = ""
for intensity in intensities_pre:
weather_txt += dict_intensities[intensity] + " "
for modifier in modifiers_pre:
weather_txt += dict_modifiers[modifier] + " "
phenomenons = phenomenons_pre + phenomenons_post
cnt = len(phenomenons)
for phenomenon in phenomenons:
weather_txt += dict_phenomenons[phenomenon]
if cnt > 2:
weather_txt += ", "
if cnt == 2:
weather_txt += " and "
cnt = cnt-1
weather_txt += " "
for modifier in modifiers_post:
weather_txt += dict_modifiers[modifier] + " "
for intensity in intensities_post:
weather_txt += dict_intensities[intensity] + " "
weather_txt_blocks.append(weather_txt.strip())
# Put all the human readable stuff together and return the final
# output as a string.
weather_txt_full = ""
for block in weather_txt_blocks[:-1]:
weather_txt_full += block + " / "
weather_txt_full += weather_txt_blocks[-1]
return(weather_txt_full)
def _decode_temperature(self, temperature, unit='C'):
if temperature["air_prefix"] == 'M':
air_c = int(temperature["air"])*-1
else:
air_c = int(temperature["air"])
if temperature["dewpoint_prefix"] == 'M':
dew_c = int(temperature["dewpoint"])*-1
else:
dew_c = int(temperature["dewpoint"])
if unit == 'C':
air_txt = air_c
dew_txt = dew_c
if unit == 'F':
air_f = int(round(air_c*1.8+32))
dew_f = int(round(dew_c*1.8+32))
air_txt = air_f
dew_txt = dew_f
result = "air at %s°%s, dewpoint at %s°%s" % (air_txt, unit, dew_txt, unit)
return(result)
def _decode_pressure(self, pressure):
result = "%s hPa" % (pressure["athm_pressure"])
return(result)
def _decode_windshear(self, windshear):
result = "at %s, wind %s at %s %s" % ((int(windshear["altitude"])*100), windshear["direction"], windshear["speed"], windshear["unit"])
return(result)
def _decode_maintenance(self, maintenance):
if maintenance:
return "Station is under maintenance check\n"
def _get_ordinal_suffix(self, date):
_date = str(date)
suffix = ""
if re.match(".*(1[12]|[04-9])$", _date):
suffix = "th"
elif re.match(".*1$", _date):
suffix = "st"
elif re.match(".*2$", _date):
suffix = "nd"
elif re.match(".*3$", _date):
suffix = "rd"
return(suffix)
| StarcoderdataPython |
1604054 | <reponame>nunulong/algorithms
class Solution:
def two_sum(self, nums, target):
result = []
for index in range(0, len(nums), 1):
sec = target - nums[index]
if sec in nums:
result.append(index)
result.append(nums.index(sec))
return result
| StarcoderdataPython |
1614987 | <reponame>nicholas-miklaucic/nmiklaucic-updated-emacs
"""Glue for the "black" library.
"""
import sys
from pkg_resources import parse_version
import os
try:
import toml
except ImportError:
toml = None
from elpy.rpc import Fault
BLACK_NOT_SUPPORTED = sys.version_info < (3, 6)
try:
if BLACK_NOT_SUPPORTED:
black = None
else:
import black
except ImportError: # pragma: no cover
black = None
def fix_code(code, directory):
"""Formats Python code to conform to the PEP 8 style guide.
"""
if not black:
raise Fault("black not installed", code=400)
# Get black config from pyproject.toml
line_length = black.DEFAULT_LINE_LENGTH
string_normalization = True
pyproject_path = os.path.join(directory, "pyproject.toml")
if toml is not None and os.path.exists(pyproject_path):
pyproject_config = toml.load(pyproject_path)
black_config = pyproject_config.get("tool", {}).get("black", {})
if "line-length" in black_config:
line_length = black_config["line-length"]
if "skip-string-normalization" in black_config:
string_normalization = not black_config["skip-string-normalization"]
try:
if parse_version(black.__version__) < parse_version("19.0"):
reformatted_source = black.format_file_contents(
src_contents=code, line_length=line_length, fast=False)
else:
fm = black.FileMode(
line_length=line_length,
string_normalization=string_normalization)
reformatted_source = black.format_file_contents(
src_contents=code, fast=False, mode=fm)
return reformatted_source
except black.NothingChanged:
return code
except Exception as e:
raise Fault("Error during formatting: {}".format(e), code=400)
| StarcoderdataPython |
3218123 | <reponame>zconnect-iot/zconnect-django
# pylint: disable=wildcard-import,unused-wildcard-import
from .base import ModelBase
from .activity_stream import * # noqa
from .device import * # noqa
from .event import * # noqa
from .location import * # noqa
from .organization import * # noqa
from .product import * # noqa
from .updates import * # noqa
from .user import * # noqa
from .states import * # noqa
from . import mixins
__all__ = [
"AbstractDevice",
"ActivitySubscription",
"Device",
"DeviceUpdateStatus",
"Event",
"EventDefinition",
"Location",
"ModelBase",
"Organization",
"OrganizationLogo",
"Product",
"ProductFirmware",
"ProductPreprocessors",
"ProductTags",
"UpdateExecution",
"User",
"mixins",
"DeviceState",
]
| StarcoderdataPython |
36696 | <reponame>zopefoundation/grokcore.component
"""
Imported model and adapter won't be grokked:
>>> import grokcore.component as grok
>>> grok.testing.grok(__name__)
>>> from grokcore.component.tests.adapter.adapter import IHome
>>> cave = Cave()
>>> home = IHome(cave)
Traceback (most recent call last):
...
TypeError: ('Could not adapt', <grokcore.component.tests.adapter.adapter.Cave object at ...>, <InterfaceClass grokcore.component.tests.adapter.adapter.IHome>)
""" # noqa: E501 line too long
from grokcore.component.tests.adapter.adapter import Cave, Home # noqa: F401
| StarcoderdataPython |
24974 | <filename>test.py<gh_stars>1-10
#!/usr/bin/env python
"""
Test the Inspector
"""
import os.path, sys
sys.path.append(os.path.dirname(__file__))
import unittest
import inspector
########################################################
#
# Inspector test
#
########################################################
def func2(*args, **kwargs):
"""
Example method
"""
return inspector.trace(*args, **kwargs)
def func1(*args, **kwargs):
"""
Example method
"""
return func2(*args, **kwargs)
def do_inspect(*args, **kwargs):
"""
Call this function to test
"""
return func1(*args, **kwargs)
########################################################
#
# TEST CASES
#
########################################################
class InspectorTest(unittest.TestCase):
"""
Unit tests for the Inspector
"""
def test_full_trace(self):
"""
Test a simple full trace
"""
expected = """\
test.py:28 in method func1
\treturn func2(*args, **kwargs)test.py:34 in method do_inspect
\treturn func1(*args, **kwargs)test.py:57 in method test_full_trace
\toutput = do_inspect(basename_only=True)[:3]
"""
output = do_inspect(basename_only=True)[:3]
self.assertEqual(
(''.join(output)).strip(),
expected.strip())
def test_trace_with_depth(self):
"""
Test a simple trace with a limited depth
"""
expected = """\
test.py:28 in method func1
\treturn func2(*args, **kwargs)
"""
output = do_inspect(depth=1, basename_only=True)[:3]
self.assertEqual(
(''.join(output)).strip(),
expected.strip())
def test_trace_with_one_line_log(self):
"""
Test a simple trace with a limited depth and one line response
"""
expected = """\
test.py:28 in method func1: \treturn func2(*args, **kwargs)
"""
output = do_inspect(
depth=1, one_line_response=True, basename_only=True)[:3]
self.assertEqual(
(''.join(output)).strip(),
expected.strip())
if __name__ == '__main__':
SUITE = unittest.TestSuite()
SUITE.addTest(InspectorTest('test_full_trace'))
SUITE.addTest(InspectorTest('test_trace_with_depth'))
SUITE.addTest(InspectorTest('test_trace_with_one_line_log'))
unittest.TextTestRunner(verbosity=2).run(SUITE)
| StarcoderdataPython |
131353 | """Chapter 5: Question 4.
A simple condition to check if a number is power of 2: n & (n-1) == 0
Example:
n = 1000
1000 & (1000 - 1)) = 1000 & 0111 = 0000 = 0
"""
def is_power_of_two(n):
"""Checks if n is a power of 2.
Args:
n: Non-negative integer.
"""
if n < 0:
raise ValueError('Input argument must be >= 0.')
return n & (n-1) == 0
| StarcoderdataPython |
3297620 | <gh_stars>0
lista = [ ]
k = 0
n = 0
while k < 10:
consoante = input ('Digite uma letra: ')
lista.append(consoante)
if consoante not in 'aeiou':
n = n + 1
k = k + 1
print(n)
print(lista)
| StarcoderdataPython |
1733671 | <reponame>nrser/nansi.collections<filename>wireguard/plugins/action/package.py<gh_stars>0
from __future__ import annotations
from typing import Literal, Optional
import splatlog as logging
from nansi.plugins.action.os_resolve import OSResolveAction
from nansi.plugins.action.args.all import Arg, ArgsBase
LOG = logging.getLogger(__name__)
class Args(ArgsBase):
state = Arg(Literal["present", "absent"], "present")
version = Arg(Optional[str], None)
class DebianArgs(Args):
name = Arg(str, "wireguard")
@property
def names(self):
if self.version is None:
return self.name
return dict(name=self.name, version=self.version)
class ActionModule(OSResolveAction):
@OSResolveAction.map(family="debian")
def debian(self):
args = DebianArgs(self._task.args, self)
self.tasks.nansi.apt.ext(names=args.names, state=args.state)
| StarcoderdataPython |
3236325 | <reponame>JohannesBuchner/pystrict3
from email import encoders
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import mimetypes
import os
import re
import smtplib
class Email:
"""
This class handles the creation and sending of email messages
via SMTP. This class also handles attachments and can send
HTML messages. The code comes from various places around
the net and from my own brain.
"""
def __init__(self, smtpServer):
"""
Create a new empty email message object.
@param smtpServer: The address of the SMTP server
@type smtpServer: String
"""
self._textBody = None
self._htmlBody = None
self._subject = ""
self._smtpServer = smtpServer
self._reEmail = re.compile("^([\\w \\._]+\\<[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\>|[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)$")
self.clearRecipients()
self.clearAttachments()
def send(self):
"""
Send the email message represented by this object.
"""
# Validate message
if self._textBody is None and self._htmlBody is None:
raise Exception("Error! Must specify at least one body type (HTML or Text)")
if len(self._to) == 0:
raise Exception("Must specify at least one recipient")
# Create the message part
if self._textBody is not None and self._htmlBody is None:
msg = MIMEText(self._textBody, "plain")
elif self._textBody is None and self._htmlBody is not None:
msg = MIMEText(self._htmlBody, "html")
else:
msg = MIMEMultipart("alternative")
msg.attach(MIMEText(self._textBody, "plain"))
msg.attach(MIMEText(self._htmlBody, "html"))
# Add attachments, if any
if len(self._attach) != 0:
tmpmsg = msg
msg = MIMEMultipart()
msg.attach(tmpmsg)
for fname,attachname in self._attach:
if not os.path.exists(fname):
print("File '%s' does not exist. Not attaching to email." % fname)
continue
if not os.path.isfile(fname):
print("Attachment '%s' is not a file. Not attaching to email." % fname)
continue
# Guess at encoding type
ctype, encoding = mimetypes.guess_type(fname)
if ctype is None or encoding is not None:
# No guess could be made so use a binary type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(fname)
attach = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'image':
fp = open(fname, 'rb')
attach = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'audio':
fp = open(fname, 'rb')
attach = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(fname, 'rb')
attach = MIMEBase(maintype, subtype)
attach.set_payload(fp.read())
fp.close()
# Encode the payload using Base64
encoders.encode_base64(attach)
# Set the filename parameter
if attachname is None:
filename = os.path.basename(fname)
else:
filename = attachname
attach.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(attach)
# Some header stuff
msg['Subject'] = self._subject
msg['From'] = self._from
msg['To'] = ", ".join(self._to)
msg.preamble = "You need a MIME enabled mail reader to see this message"
# Send message
msg = msg.as_string()
server = smtplib.SMTP(self._smtpServer)
server.sendmail(self._from, self._to, msg)
server.quit()
def setSubject(self, subject):
"""
Set the subject of the email message.
"""
self._subject = subject
def setFrom(self, address):
"""
Set the email sender.
"""
if not self.validateEmailAddress(address):
raise Exception("Invalid email address '%s'" % address)
self._from = address
def clearRecipients(self):
"""
Remove all currently defined recipients for
the email message.
"""
self._to = []
def addRecipient(self, address):
"""
Add a new recipient to the email message.
"""
if not self.validateEmailAddress(address):
raise Exception("Invalid email address '%s'" % address)
self._to.append(address)
def setTextBody(self, body):
"""
Set the plain text body of the email message.
"""
self._textBody = body
def setHtmlBody(self, body):
"""
Set the HTML portion of the email message.
"""
self._htmlBody = body
def clearAttachments(self):
"""
Remove all file attachments.
"""
self._attach = []
def addAttachment(self, fname, attachname=None):
"""
Add a file attachment to this email message.
@param fname: The full path and file name of the file
to attach.
@type fname: String
@param attachname: This will be the name of the file in
the email message if set. If not set
then the filename will be taken from
the fname parameter above.
@type attachname: String
"""
if fname is None:
return
self._attach.append( (fname, attachname) )
def validateEmailAddress(self, address):
"""
Validate the specified email address.
@return: True if valid, False otherwise
@rtype: Boolean
"""
if self._reEmail.search(address) is None:
return False
return True
if __name__ == "__main__":
# Run some tests
mFrom = "Test User <<EMAIL>>"
mTo = "<EMAIL>"
m = Email("mail.mydomain.com")
m.setFrom(mFrom)
m.addRecipient(mTo)
# Simple Plain Text Email
m.setSubject("Plain text email")
m.setTextBody("This is a plain text email <b>I should not be bold</b>")
m.send()
# Plain text + attachment
m.setSubject("Text plus attachment")
m.addAttachment("/home/user/image.png")
m.send()
# Simple HTML Email
m.clearAttachments()
m.setSubject("HTML Email")
m.setTextBody(None)
m.setHtmlBody("The following should be <b>bold</b>")
m.send()
# HTML + attachment
m.setSubject("HTML plus attachment")
m.addAttachment("/home/user/image.png")
m.send()
# Text + HTML
m.clearAttachments()
m.setSubject("Text and HTML Message")
m.setTextBody("You should not see this text in a MIME aware reader")
m.send()
# Text + HTML + attachment
m.setSubject("HTML + Text + attachment")
m.addAttachment("/home/user/image.png")
m.send()
| StarcoderdataPython |
4806532 |
while True:
try:
numInput=int(input("Please Enter a number:"))
break
except ValueError:
print("Try Again")
if numInput%2==0:
print("even")
else:
print("odd")
| StarcoderdataPython |
3325319 | import spacy
nlp = spacy.load("de_core_news_sm")
text = "Apple wurde 1976 von <NAME>, <NAME> und <NAME> gegründet."
# Verarbeite den Text
doc = ____
# Iteriere über die vorhergesagten Entitäten
for ent in ____.____:
# Drucke den Text und das Label der Entität
print(ent.____, ____.____)
| StarcoderdataPython |
1633165 | <reponame>NatholBMX/coursera_partical_rl
import sys
import numpy as np
sys.path.append("..")
import grading
def submit_bandits(scores, email, token):
epsilon_greedy_agent = None
ucb_agent = None
thompson_sampling_agent = None
for agent in scores:
if "EpsilonGreedyAgent" in agent.name:
epsilon_greedy_agent = agent.name
if "UCBAgent" in agent.name:
ucb_agent = agent.name
if "ThompsonSamplingAgent" in agent.name:
thompson_sampling_agent = agent.name
assert epsilon_greedy_agent is not None
assert ucb_agent is not None
assert thompson_sampling_agent is not None
grader = grading.Grader("VL9tBt7zEeewFg5wtLgZkA")
grader.set_answer(
"YQLYE",
(int(scores[epsilon_greedy_agent][int(1e4) - 1]) -
int(scores[epsilon_greedy_agent][int(5e3) - 1])))
grader.set_answer(
"FCHOZ",
(int(scores[epsilon_greedy_agent][int(1e4) - 1]) -
int(scores[ucb_agent][int(1e4) - 1])))
grader.set_answer(
"0JWHl",
(int(scores[epsilon_greedy_agent][int(5e3) - 1]) -
int(scores[ucb_agent][int(5e3) - 1])))
grader.set_answer(
"4rH5M",
(int(scores[epsilon_greedy_agent][int(1e4) - 1]) -
int(scores[thompson_sampling_agent][int(1e4) - 1])))
grader.set_answer(
"TvOqm",
(int(scores[epsilon_greedy_agent][int(5e3) - 1]) -
int(scores[thompson_sampling_agent][int(5e3) - 1])))
grader.submit(email, token)
def submit_mcts(total_reward, email, token):
grader = grading.Grader("Giz88DiCEei4TA70mSDOBg")
grader.set_answer("L1HgT", int(total_reward))
grader.submit(email, token)
| StarcoderdataPython |
1760797 | # Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xml.sax
import taxonomy
import validator
import constants
class _ElementsHandler(xml.sax.ContentHandler):
"""
Reads the files in solar-taxonomy/core/*.xsd
This extracts the metadata for each concept name, such as the datatype
of the concept, whether it's nillable, etc.
As a SAX parser, it streams the XML, and startElement() is called
once for each element in the file.
"""
def __init__(self):
self._elements = {}
def startElement(self, name, attrs):
if name == "xs:element":
element = taxonomy.Element()
for item in attrs.items():
if item[0] == "abstract":
if item[1] == "false":
element.abstact = False
else:
element.abstract = True
elif item[0] == "id":
# Turn the first underscore (only the first) into
# a colon. For example, the concept named
# solar:InverterPowerLevel10PercentMember_1 appears
# in the id field as
# solar_InverterPowerLevel10PercentMember_1. We want
# to replace the first underscore but not the second.
element.id = item[1].replace("_", ":", 1)
elif item[0] == "name":
element.name = item[1]
elif item[0] == "nillable":
if item[1] == "false":
element.nillable = False
else:
element.nillable = True
elif item[0] == "solar:periodIndependent":
element.period_independent = item[1]
elif item[0] == "substitutionGroup":
element.substitution_group = item[1]
elif item[0] == "type":
element.type_name = item[1]
elif item[0] == "xbrli:periodType":
element.period_type = item[1]
self._elements[element.id] = element
def elements(self):
return self._elements
class _TaxonomySemanticHandler(xml.sax.ContentHandler):
"""
Reads the files in solar-taxonomy/documents/<document name>/*_pre.xml
This extracts the list of concept names from the presentation file.
As a SAX parser,it streams the XML, and startElement() is called
once for each XML element in the file.
"""
def __init__(self):
self._concepts = []
def startElement(self, name, attrs):
if name == "loc":
for item in attrs.items():
if item[0] == "xlink:label":
concept = item[1].replace("_", ":", 1)
self._concepts.append(concept)
def concepts(self):
return self._concepts
class _TaxonomyRelationshipHandler(xml.sax.ContentHandler):
"""
Reads the files in solar-taxonomy/documents/<document name>/*_def.xml
This extracts the relationships between the concepts, such as when one
concept is a parent of another, when a concept belongs to a hypercube,
etc.
As a SAX parser,it streams the XML, and startElement() is called
once for each XML element in the file.
"""
def __init__(self):
self._relationships = []
def startElement(self, name, attrs):
if name == "definitionArc":
relationship = {"role": None, "from": None, "to": None, "order": None}
for item in attrs.items():
if item[0] == "xlink:arcrole":
relationship['role'] = item[1].split("/")[-1]
if item[0] == "xlink:from":
relationship['from'] = item[1].replace("_", ":", 1)
if item[0] == "xlink:to":
relationship['to'] = item[1].replace("_", ":", 1)
if item[0] == "order":
relationship['order'] = item[1]
self._relationships.append(relationship)
# Question TBD: do we need to remember which document definition
# this relationship came from? would the same concepts ever have
# different relationships in one document than another?
def relationships(self):
return self._relationships
class TaxonomySemantic(object):
def __init__(self):
self._elements = self._load_elements()
self._concepts = self._load_concepts()
self._relationships = self._load_relationships()
def _load_elements_file(self, pathname):
eh = _ElementsHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(eh)
parser.parse(open(pathname))
return eh.elements()
def _load_elements(self):
elements = self._load_elements_file(os.path.join(
constants.SOLAR_TAXONOMY_DIR, "core",
"solar_2018-03-31_r01.xsd"))
elements.update(self._load_elements_file(os.path.join(
constants.SOLAR_TAXONOMY_DIR, "external",
"us-gaap-2017-01-31.xsd")))
elements.update(self._load_elements_file(os.path.join(
constants.SOLAR_TAXONOMY_DIR, "external",
"dei-2018-01-31.xsd")))
return elements
def elements(self):
"""
Returns a map of elements.
"""
return self._elements
def _load_concepts_file(self, pathname):
tax = _TaxonomySemanticHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(tax)
parser.parse(open(pathname))
return tax.concepts()
def _load_concepts(self):
"""
Returns a dict of available concepts
"""
# TODO: Better understand the relationship of "def" vs. "pre" xml files. Using pre seems
# to load a more accurate representation of the taxonomy but this was found via trial and
# error as opposed to a scientific methodology.
concepts = {}
for dirname in os.listdir(os.path.join(constants.SOLAR_TAXONOMY_DIR,
"data")):
for filename in os.listdir(
os.path.join(constants.SOLAR_TAXONOMY_DIR, "data",
dirname)):
# if 'def.' in filename:
if 'pre.' in filename:
concepts[dirname] = self._load_concepts_file(
os.path.join(constants.SOLAR_TAXONOMY_DIR,
"data", dirname, filename))
for dirname in os.listdir(os.path.join(constants.SOLAR_TAXONOMY_DIR,
"documents")):
for filename in os.listdir(
os.path.join(constants.SOLAR_TAXONOMY_DIR, "documents",
dirname)):
# if 'def.' in filename:
if 'pre.' in filename:
concepts[dirname] = self._load_concepts_file(
os.path.join(constants.SOLAR_TAXONOMY_DIR,
"documents", dirname, filename))
return concepts
def _load_relationships_file(self, fn):
tax = _TaxonomyRelationshipHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(tax)
parser.parse(open(os.path.join(constants.SOLAR_TAXONOMY_DIR, fn)))
return tax.relationships()
def _load_relationships(self):
relationships = {}
for dirname in os.listdir(os.path.join(constants.SOLAR_TAXONOMY_DIR, "documents")):
for filename in os.listdir(os.path.join(constants.SOLAR_TAXONOMY_DIR, "documents", dirname)):
if 'def.' in filename:
relationships[dirname] = self._load_relationships_file(os.path.join("documents", dirname, filename))
return relationships
def validate_concept(self, concept):
"""
Validates if a concept is present in the Taxonomy
"""
found = False
for c in self._concepts:
for cc in self._concepts[c]:
if cc == concept:
found = True
break
return found
def validate_concept_value(self, concept, value):
"""
Validates if a concept is present in the Taxonomy and that its value is legal.
"""
# Check presence
found = False
concept_info = False
for c in self._concepts:
for cc in self._concepts[c]:
if cc == concept:
found = True
concept_info = self.concept_info(concept)
break
if not found:
return ["'{}' concept not found.".format(concept)]
return validator.validate_concept_value(concept_info, value)
def validate_ep(self, data):
"""
Validates if an end point type is present in the Taxonomy
"""
if data in self._concepts:
return True
else:
return False
def concepts_ep(self, data):
"""
Returns a list of all concepts in an end point
"""
if data in self._concepts:
return self._concepts[data]
else:
return None
def relationships_ep(self, entry_point):
"""
Returns a list of all relationshiops in an entry point
Returns an empty list if the concept exists but has no relationships
"""
if entry_point in self._concepts:
if entry_point in self._relationships:
return self._relationships[entry_point]
else:
return []
else:
return None
def concept_info(self, concept):
"""
Returns information on a single concept.
"""
found = False
for c in self._concepts:
for cc in self._concepts[c]:
if cc == concept:
found = True
break
if not found:
return None
if concept in self._elements:
return self._elements[concept]
else:
return None
def concepts_info_ep(self, data):
"""
Returns a list of all concepts and their attributes in an end point
"""
if data in self._concepts:
ci = []
for concept in self._concepts[data]:
if concept in self._elements:
ci.append(self._elements[concept])
else:
# TODO: This case is not correctly understood. Here are some samples that are not found:
# Warning, concept not found: solar:MeterRatingAccuracy_1
# Warning, concept not found: solar:MeterRevenueGrade_1
# Warning, concept not found: solar:MeterBidirectional_1
# Warning, concept not found: solar:RevenueMeterPowerFactor_1
# Warning, concept not found: solar:InverterPowerLevel10PercentMember_1
# This case should be understood and handled correctly as opposed to just printing a warning message.
#print("Warning, concept not found:", concept)
pass
return ci
else:
return None
| StarcoderdataPython |
3316825 | #coding:utf8
# rest
from rest_framework import serializers
# my model
from models import WXUser
class WXUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = WXUser
fields = ('openid','nickname','avatar','gender','city','province','country','language')
| StarcoderdataPython |
1630802 | <reponame>alexrudy/tox-server<gh_stars>1-10
# type: ignore
from invoke import task
@task
def lock(c, sync=True):
"""Lock dependencies"""
c.config["run"]["echo"] = True
c.run("pip-compile requirements/test-requirements.in")
c.run("pip-compile requirements/dev-requirements.in")
if sync:
sync_requirements(c, dev=True)
@task(name="sync")
def sync_requirements(c, dev=False):
"""Install dependencies"""
requirements = "requirements/dev-requirements.txt" if dev else "requirements/test-requirements.txt"
c.run(f"pip-sync {requirements}")
c.run("pip install -e .")
@task
def clean_build(c):
"""remove Python build artifacts"""
c.run("rm -fr build/")
c.run("rm -fr dist/")
c.run("rm -fr .eggs/")
c.run("find . -name '*.egg-info' -exec rm -fr {} +")
c.run("find . -name '*.egg' -exec rm -f {} +")
@task
def clean_pyc(c):
"""remove Python file artifacts"""
c.run("find . -name '*.pyc' -exec rm -f {} +")
c.run("find . -name '*.pyo' -exec rm -f {} +")
c.run("find . -name '*~' -exec rm -f {} +")
c.run("find . -name '__pycache__' -exec rm -fr {} +")
@task
def clean_tests(c):
"""remove test and coverage artifacts"""
c.run("rm -fr .tox/")
c.run("rm -f .coverage")
c.run("rm -fr htmlcov/")
c.run("rm -fr .pytest_cache")
c.run("rm -fr .mypy_cache")
@task(clean_build, clean_pyc, clean_tests)
def clean(c):
"""clean everything"""
pass
@task(clean)
def dist(c):
"""build distributions"""
c.run("python setup.py sdist bdist_wheel")
@task(dist)
def release(c, test=True):
"""release to pypi"""
if test:
c.run("twine upload --repository testpypi dist/*")
else:
c.run("twine upload dist/*")
@task
def test(c):
"""run tests"""
c.run("pytest")
@task(name="test-all")
def test_all(c):
"""run all tests via tox"""
c.run("tox")
| StarcoderdataPython |
3377291 | # -*- coding: utf-8 -*-
import lib.requests as requests
import conf.config as conf
base = conf.read_config(conf.path, 'API', 'baseUrl')
class Shot(object):
def __getShot(self,uid,project_id,start,length):
api = conf.read_config(conf.path, 'API', 'getShotApi')
url = base + api + '?uid=' + uid + '&project_id=' + project_id + '&start=' + start + '&length=' + length
s = requests.session()
result = s.post(url)
s.keep_alive = False
if result.text != u"null":
return result.json()['SHOT']
else:
return ""
def callService(self,uid,project_id,start,length):
return self.__getShot(uid,project_id,start,length)
class SingleShot(object):
def __getSingleShot(self,pid,entityId,entityType):
api = conf.read_config(conf.path, 'API', 'getSingleShotApi')
url = base + api + '?project_id=' + pid + '&entity_id=' + entityId + '&entity_type=' + entityType
s = requests.session()
result = s.post(url)
s.keep_alive = False
if result.text != u"null":
return result.json()['SA']
else:
return ""
def callService(self,pid,entityId,entityType):
return self.__getSingleShot(pid,entityId,entityType)
class Sequences(object):
def __getSequences(self,pid):
api = conf.read_config(conf.path, 'API', 'sequencesApi')
url = base + api + pid
s = requests.session()
result = s.post(url)
s.keep_alive = False
if result.text != u"null":
return result.json()['SEQUENCENAME']
else:
return ""
def callService(self,pid):
return self.__getSequences(pid)
class ShotBySequences(object):
def __getShotBySequences(self,pid):
api = conf.read_config(conf.path, 'API', 'shotBySequenceApi')
url = base + api + '?project_id=' + pid
s = requests.session()
result = s.post(url)
s.keep_alive = False
return result.json()
def callService(self,pid):
return self.__getShotBySequences(pid)
| StarcoderdataPython |
1616110 | <filename>parallel-pytest.py
import argparse
import re
import subprocess
import threading
import fnmatch
import os
import sys
import six.moves.queue as queue
class Collector(threading.Thread):
def __init__(self):
self.__output = queue.Queue()
self.is_failure = False
super(Collector, self).__init__()
self.start()
def run(self):
while True:
line = self.__output.get()
if line is None:
break
print(line)
sys.stdout.flush()
def put(self, line, is_failure):
self.__output.put(line)
if is_failure:
self.is_failure = True
class Executor(threading.Thread):
def __init__(self, queue, collector):
self.__queue = queue
self.__collector = collector
super(Executor, self).__init__()
self.start()
def run(self):
try:
while True:
item = self.__queue.get(block=False)
p = subprocess.Popen(
item["command"], shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
line = stdout.strip().rsplit(b"\n", 1)[-1]
line = re.sub(b"^=*", b"", line)
line = re.sub(b"=*$", b"", line)
line = line.strip()
# For pytest.
if re.match(
b'^\\d+ deselected in \\d+(\\.\\d+)? seconds$', line):
p.returncode = 0
# For pytest-xdist.
if re.match(
b'^no tests ran in \\d+(\\.\\d+)? seconds$', line):
p.returncode = 0
extra_info = b""
if p.returncode != 0:
extra_info = b"\n"+stdout+stderr
self.__collector.put("[%s] %s (%s)%s" % (
"SUCCESS" if p.returncode == 0 else "FAILED",
item["file"], line.decode("utf-8"),
extra_info.decode("utf-8")),
p.returncode != 0)
except queue.Empty:
pass
def recursive_glob(hint, directory):
files = []
seen = {}
if hint != "":
with open(hint) as f:
for file in f:
file = file.strip()
if os.path.exists(file):
files.append(file)
seen[file] = True
for root, dirs, fs in os.walk(directory):
for f in fnmatch.filter(fs, 'test_*.py'):
file = os.path.join(root, f)
if file not in seen:
files.append(os.path.join(root, f))
return files
def main(args):
tests = queue.Queue()
for f in recursive_glob(args.hint, args.directory):
tests.put({
"file": f,
"command": args.pytest + " " + f,
})
collector = Collector()
threads = []
for i in range(args.threads):
threads.append(Executor(tests, collector))
for thread in threads:
thread.join()
collector.put(None, False)
collector.join()
if collector.is_failure:
print("test failed")
exit(1)
else:
print("test succeeded")
exit(0)
parser = argparse.ArgumentParser()
parser.add_argument("--hint", default="")
parser.add_argument("--directory", default=".")
parser.add_argument("--pytest", default="pytest -m 'not slow and not gpu'")
parser.add_argument("--threads", type=int, default=8)
main(parser.parse_args())
| StarcoderdataPython |
4826787 | <filename>scripts/prepare_nil_dataset.py
import numpy as np
import pandas as pd
import textdistance
import json
import os
import statistics
import pickle
scores_path = './data/scores'
datasets_path = './data/BLINK_benchmark'
dataset_output_path = './data/nil_dataset.pickle'
datasets = [
('AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testa_scores'),
('AIDA-YAGO2_testb_ner', 'AIDA-YAGO2_testb_scores'),
('AIDA-YAGO2_train_ner', 'AIDA-YAGO2_train_scores'),
('ace2004_questions', 'ace2004_questions_scores'),
('aquaint_questions', 'aquaint_questions_scores'),
('clueweb_questions', 'clueweb_questions_scores'),
('msnbc_questions', 'msnbc_questions_scores'),
('wnedwiki_questions', 'wnedwiki_questions_scores'),
]
def _eval_line(x, scores_col='scores'):
assert len(x[scores_col]) == len(x.nns)
scores = x[scores_col].copy()
correct = -1
if x.labels in x.nns:
# found correct entity
i_correct = x.nns.index(x.labels)
# correct is position of the correct entity according to the estimated score
# correct = 0 means the best scored entity is the correct one
# correct = -1 means the correct entity is not in the top k
correct = np.argsort(x[scores_col])[::-1].tolist().index(i_correct)
return correct
def _bc_get_stats(x, remove_correct=False, scores_col='scores', nns_col='nns', labels_col='labels', top_k=100):
scores = x[scores_col]
nns = x[nns_col]
if isinstance(scores, str):
scores = np.array(json.loads(scores))
if isinstance(nns, str):
nns = np.array(json.loads(nns))
assert len(scores) == len(nns)
scores = scores.copy()
sort_scores_i = np.argsort(scores)[::-1]
scores = np.array(scores)
scores = scores[sort_scores_i][:top_k]
nns = nns.copy()
nns = np.array(nns)
nns = nns[sort_scores_i][:top_k]
correct = None
if x[labels_col] in nns:
# found correct entity
i_correct = list(nns).index(x[labels_col])
correct = scores[i_correct]
_stats = {
"correct": correct,
"max": max(scores),
"second": sorted(scores, reverse=True)[1],
"min": min(scores),
"mean": statistics.mean(scores),
"median": statistics.median(scores),
"stdev": statistics.stdev(scores)
}
return _stats
def _bi_get_stats(x, remove_correct=False, top_k=100):
return _bc_get_stats(x, remove_correct=remove_correct, scores_col='scores', top_k=top_k)
def _cross_get_stats(x, remove_correct=False, top_k=100):
return _bc_get_stats(x, remove_correct=remove_correct, scores_col='unsorted_scores', top_k=top_k)
def _load_scores(bi_scores, cross_scores, basepath=None):
if basepath is not None:
bi_scores = os.path.join(basepath, bi_scores)
cross_scores = os.path.join(basepath, cross_scores)
bi_df = pd.read_json(bi_scores)
assert (bi_df['labels'].apply(lambda x: len(x)) != 1).sum() == 0
bi_df['labels'] = bi_df['labels'].apply(lambda x: x[0])
cross_df = pd.read_json(cross_scores)
assert (cross_df['labels'].apply(lambda x: len(x)) != 1).sum() == 0
cross_df['labels'] = cross_df['labels'].apply(lambda x: x[0])
assert all(bi_df['labels'] == cross_df['labels'])
return bi_df, cross_df
def myf(x):
x['cross_labels_title'] = id2title[x['cross_labels']
] if x['cross_labels'] != -1 else 'NIL'
x['cross_best_candidate_title'] = id2title[x['cross_best_candidate']]
x['bi_labels_title'] = id2title[x['bi_labels']
] if x['bi_labels'] != -1 else 'NIL'
x['bi_best_candidate_title'] = id2title[x['bi_best_candidate']]
return x
def _best_candidate(scores, nns, nil_score=None, nil_threshold=0.5):
if nil_score is not None and nil_score < nil_threshold:
# identified as NIL
return -1
else:
return nns[np.argmax(scores)]
# load id2title
id2title_path = './data/id2title.pickle'
print('Loading id2title from {}'.format(id2title_path))
if os.path.isfile(id2title_path):
with open(id2title_path, 'rb') as fd:
id2title = pickle.load(fd)
else:
raise Exception('{} not found! Generate it with `python blink/main_dense.py --save-id2title`.'.format(id2title_path))
whole = pd.DataFrame() # the entire dataset
for d_data, d_score in datasets:
df_src = d_data
d_score = os.path.join(scores_path, d_score)
d_data = os.path.join(datasets_path, d_data)
print(f'Processing scores for {d_score}...')
bi_path = f'{d_score}_bi.jsonl'
cross_path = f'{d_score}_cross.jsonl'
bi_df, cross_df = _load_scores(bi_path, cross_path)
assert bi_df.shape[0] == cross_df.shape[0]
bi_df['recall@'] = bi_df.apply(lambda x: _eval_line(x, 'scores'), axis=1)
bi_df['best_candidate'] = bi_df.apply(
lambda x: _best_candidate(x['scores'], x.nns), axis=1)
cross_df['recall@'] = cross_df.apply(
lambda x: _eval_line(x, 'unsorted_scores'), axis=1)
cross_df['best_candidate'] = cross_df.apply(
lambda x: _best_candidate(x['unsorted_scores'], x.nns), axis=1)
d_source = pd.read_json(f'{d_data}.jsonl', lines=True)
assert d_source.shape[0] == bi_df.shape[0]
assert d_source.shape[0] == cross_df.shape[0]
d_source[['bi_'+c for c in bi_df.columns]] = bi_df
d_source[['cross_'+c for c in cross_df.columns]] = cross_df
d_source['src'] = df_src
d_source['src_i'] = d_source.index
# calculate all the stats we need
_all_k = list(range(2, 10)) + list(range(10, 105, 5))
counter = 0
for k in _all_k:
counter += 1
print('\r{}/{}'.format(counter, len(_all_k)), end='')
_stats_n_bi = d_source.apply(
lambda x: _bc_get_stats(x,
scores_col='bi_scores',
nns_col='bi_nns',
labels_col='bi_labels',
top_k=k
),
axis=1, result_type='expand')
d_source[[f'bi_stats_{k}_'+c for c in _stats_n_bi.columns]] = _stats_n_bi
_stats_n_cross = d_source.apply(
lambda x: _bc_get_stats(x,
scores_col='cross_unsorted_scores',
nns_col='cross_nns',
labels_col='cross_labels',
top_k=k
),
axis=1, result_type='expand')
d_source[[f'cross_stats_{k}_'+c for c in _stats_n_cross.columns]] = _stats_n_cross
print()
whole = pd.concat([whole, d_source])
# save original index
whole['i'] = whole.index
print('Getting titles...')
whole = whole.apply(myf, axis=1)
# feature selection
# calc all text distances
# # get the list from devlop-local:train_new.Rmd with qval=None
dist_dict = {
"Hamming": textdistance.Hamming(qval=None),
"Mlipns": textdistance.MLIPNS(qval=None),
"Levenshtein": textdistance.Levenshtein(qval=None),
"DamerauLevenshtein": textdistance.DamerauLevenshtein(qval=None),
"JaroWinkler": textdistance.JaroWinkler(qval=None),
"StrCmp95": textdistance.StrCmp95(),
"NeedlemanWunsch": textdistance.NeedlemanWunsch(qval=None),
"Gotoh": textdistance.Gotoh(qval=None),
"SmithWaterman": textdistance.SmithWaterman(qval=None),
"Jaccard": textdistance.Jaccard(qval=None),
"Sorensen": textdistance.Sorensen(qval=None),
"Tversky": textdistance.Tversky(qval=None),
"Overlap": textdistance.Overlap(qval=None),
"Tanimoto": textdistance.Tanimoto(qval=None),
"Cosine": textdistance.Cosine(qval=None),
"MongeElkan": textdistance.MongeElkan(qval=None),
"Bag": textdistance.Bag(qval=None),
"LCSSeq": textdistance.LCSSeq(qval=None),
"LCSStr": textdistance.LCSStr(qval=None),
"Editex": textdistance.Editex()
}
print('Text distances cross...')
crossbi='cross'
for i in range(0, len(dist_dict.keys())):
diststring = list(dist_dict.keys())[i]
dist = dist_dict[diststring]
print('\r{}/{}'.format(i, len(dist_dict.keys())), end='')
whole[crossbi + '_'+ diststring.lower()] = whole.apply(
lambda x: dist.normalized_similarity(x['mention'].lower(), x[f'{crossbi}_best_candidate_title'].lower()),
axis=1
)
print()
print('Text distances bi...')
crossbi='bi'
for i in range(0, len(dist_dict.keys())):
diststring = list(dist_dict.keys())[i]
dist = dist_dict[diststring]
print('\r{}/{}'.format(i, len(dist_dict.keys())), end='')
whole[crossbi + '_'+ diststring.lower()] = whole.apply(
lambda x: dist.normalized_similarity(x['mention'].lower(), x[f'{crossbi}_best_candidate_title'].lower()),
axis=1
)
print()
print('One-hot encoding NER types...')
whole['ner_per'] = whole.eval('ner == "PER"').astype(float)
whole['ner_loc'] = whole.eval('ner == "LOC"').astype(float)
whole['ner_org'] = whole.eval('ner == "ORG"').astype(float)
whole['ner_misc'] = whole.eval('ner == "MISC"').astype(float)
print('Calculating target y...')
whole['y_bi'] = whole.eval('bi_labels != -1 and bi_labels == bi_best_candidate').astype(int)
whole['y_cross'] = whole.eval('cross_labels != -1 and cross_labels == cross_best_candidate').astype(int)
print('Saving dataset at {}...'.format(dataset_output_path))
whole.to_pickle(dataset_output_path)
| StarcoderdataPython |
1778348 | # -*- coding: utf-8 -*-
import math
import os
import random
import time
import src.hyperka.et_funcs.utils as ut
from src.hyperka.ea_funcs.train_funcs import find_neighbours_multi
from src.hyperka.et_apps.util import generate_adjacent_graph
# 根据相应参数初始化模型
def get_model(folder, kge_model, args):
print("data folder:", folder)
print("read_input begin...")
read_func = ut.read_input # 用于读取输入的函数
# insnet和onto的结构如下:
# [all_ids_triples, train_ids_triples_set, test_ids_triples_set,
# total_ents_num, total_rels_num, total_triples_num]
# instype的结构如下:
# [[train_heads_ids_list, train_tails_ids_list],
# [test_heads_ids_list, test_tails_ids_list, test_head_tails_ids_list]]
insnet, onto, instype = read_func(folder)
print("read_input finished\n")
print("generate_adjacent_graph begin...")
ins_near_ents_graph, ins_near_rels_graph = generate_adjacent_graph(total_ents_num=insnet[3],
total_rels_num=insnet[4],
triples=insnet[0].triples)
onto_near_ents_graph, onto_near_rels_graph = generate_adjacent_graph(total_ents_num=onto[3],
total_rels_num=onto[4],
triples=onto[0].triples)
print("ins_near_ents_adj shape:", ins_near_ents_graph[0].shape)
print("ins_near_rels_adj shape:", ins_near_rels_graph[0].shape)
print("onto_near_ents_adj shape:", onto_near_ents_graph[0].shape)
print("onto_near_rels_adj shape:", onto_near_rels_graph[0].shape)
print("generate_adjacent_graph finished\n")
model = kge_model(insnet, onto, instype, ins_near_ents_graph, ins_near_rels_graph, onto_near_ents_graph,
onto_near_rels_graph, args)
return insnet[0], onto[0], model
# 获得修改前的HyperKA模型
def get_origin_model(folder, kge_model, args):
print("data folder:", folder)
print("read_input begin...")
read_func = ut.read_input # 用于读取输入的函数
# insnet和onto的结构如下:
# [all_ids_triples, train_ids_triples_set, test_ids_triples_set,
# total_ents_num, total_rels_num, total_triples_num]
# instype的结构如下:
# [[train_heads_ids_list, train_tails_ids_list],
# [test_heads_ids_list, test_tails_ids_list, test_head_tails_ids_list]]
insnet, onto, instype = read_func(folder)
print("read_input finished\n")
print("generate_adjacent_graph begin...")
ins_adj = generate_adjacent_graph(total_ents_num=insnet[3], total_rels_num=insnet[4], triples=insnet[0].triples,
origin=True)
onto_adj = generate_adjacent_graph(total_ents_num=onto[3], total_rels_num=onto[4], triples=onto[0].triples,
origin=True)
print("ins adj shape:", ins_adj.shape)
print("onto adj shape:", ins_adj.shape)
model = kge_model(insnet, onto, instype, ins_adj, onto_adj, args)
return insnet[0], onto[0], model
# 训练k个epoch
def train_k_epochs(model, ins_triples, onto_triples, k, args, truncated_ins_num, truncated_onto_num):
neighbours_of_ins_triples, neighbours_of_onto_triples = dict(), dict()
start = time.time()
# TODO:truncated_ins_num和truncated_onto_num的作用不是很明白,default设置下开始的时候truncated_ins_num和truncated_onto_num都为0
if truncated_ins_num > 0.1:
ins_embeds = model.eval_ins_input_embed()
onto_embeds = model.eval_onto_input_embed()
neighbours_of_ins_triples = find_neighbours_multi(ins_embeds, model.ins_entities, truncated_ins_num,
args.nums_threads)
neighbours_of_onto_triples = find_neighbours_multi(onto_embeds, model.onto_entities, truncated_onto_num,
args.nums_threads)
end = time.time()
print("generate nearest-{}-&-{} neighbours: {:.3f} s".format(truncated_ins_num, truncated_onto_num,
end - start))
for epoch in range(1, k + 1):
print("epoch:", epoch)
triple_loss, mapping_loss, time_cost = train_1_epoch(model, ins_triples, onto_triples, args,
neighbours_of_ins_triples,
neighbours_of_onto_triples)
print("triple_loss(L1) = {:.3f}, mapping_loss(L2) = {:.3f}, "
"time = {:.3f} s".format(triple_loss, mapping_loss, time_cost))
end = time.time()
print("train k epochs finished, time cost:", round(end - start, 2), "s")
# 训练1个epoch
def train_1_epoch(model, ins_triples, onto_triples, args,
neighbours_of_ins_triples, neighbours_of_onto_triples):
triple_loss, mapping_loss = 0, 0
start = time.time()
# 一个epoch需要跑steps步,每一步跑batch_size大小的数据
steps = math.ceil(ins_triples.triples_num / args.batch_size)
# print("steps per epoch:", steps)
link_batch_size = math.ceil(len(model.train_instype_head) / steps)
for step in range(1, steps + 1):
# if step % 5 == 1:
# print("\tstep:", step)
triple_step_loss, triple_step_time = train_triple_1_step(model, ins_triples, onto_triples, step, args,
neighbours_of_ins_triples, neighbours_of_onto_triples)
triple_loss += triple_step_loss
mapping_step_loss, mapping_step_time = train_mapping_1_step(model, link_batch_size, args.mapping_neg_nums)
mapping_loss += mapping_step_loss
# print("train triple 1 step time cost:", triple_step_time, "s")
# print("train mapping 1 step time cost:", mapping_step_time, "s")
triple_loss /= steps
mapping_loss /= steps
# 一个epoch跑完后对ins_triples_list和onto_triples_list重新排列,这样使下一个epoch时构造的batch与这个epoch的不同
random.shuffle(ins_triples.triple_list)
random.shuffle(onto_triples.triple_list)
end = time.time()
return triple_loss, mapping_loss, round(end - start, 2)
# 根据triple loss训练一步
def train_triple_1_step(model, ins_triples, onto_triples, step, args,
neighbours_of_ins_triples, neighbours_of_onto_triples):
start = time.time()
ins_pos_triples, ins_neg_triples, onto_pos_triples, onto_neg_triples = \
generate_pos_neg_triple_batch(ins_triples, onto_triples, step, args.batch_size, args.triple_neg_nums,
neighbours_of_ins_triples, neighbours_of_onto_triples)
triple_pos_neg_batch = [ins_pos_triples, ins_neg_triples, onto_pos_triples, onto_neg_triples]
triple_loss = model.optimize_triple_loss(triple_pos_neg_batch).data
end = time.time()
return triple_loss, round(end - start, 2)
# 根据mapping loss训练一步
# TODO:这里面构造负例的具体方法还没有理清楚,并且内部变量命名也比较混乱,暂且命名做出一定修改当成黑箱
def train_mapping_1_step(model, link_batch_size, mapping_neg_nums=20):
start = time.time()
# 从model.train_instype_link中选择link_batch_size个实例-类型二元组(ent, type)来作为pos_link_list
pos_link_list = random.sample(model.train_instype_link, link_batch_size)
link_pos_h = [pos_link[0] for pos_link in pos_link_list]
link_pos_t = [pos_link[1] for pos_link in pos_link_list]
neg_link_list = list()
for i in range(mapping_neg_nums):
# 随机选择model中所有type中的link_batch_size个组成neg_tails
neg_tails = random.sample(model.train_instype_tail + model.test_instype_tail, link_batch_size)
neg_link_list.extend([(link_pos_h[i], neg_tails[i]) for i in range(link_batch_size)])
neg_link_list = list(set(neg_link_list) - model.train_instype_set)
link_neg_h = [neg_link[0] for neg_link in neg_link_list]
link_neg_t = [neg_link[1] for neg_link in neg_link_list]
mapping_pos_neg_batch = [link_pos_h, link_pos_t, link_neg_h, link_neg_t]
mapping_loss = model.optimize_mapping_loss(mapping_pos_neg_batch).data
end = time.time()
return mapping_loss, round(end - start, 2)
# 生成某一个epoch的第step步的pos triples
def generate_pos_triples(ins_triples_list, onto_triples_list, step, batch_size):
# 每一个step需要num1个ins_triples_list中的正例triples和num2个onto_triples_list中的正例triples
num1 = batch_size
num2 = int(batch_size / len(ins_triples_list) * len(onto_triples_list))
start1 = step * num1
start2 = step * num2
end1 = start1 + num1
end2 = start2 + num2
if end1 > len(ins_triples_list):
end1 = len(ins_triples_list)
if end2 > len(onto_triples_list):
end2 = len(onto_triples_list)
pos_ins_triples = ins_triples_list[start1: end1]
pos_onto_triples = onto_triples_list[start2: end2]
# TODO: 这里的特判的意图不是很清楚
if len(pos_onto_triples) == 0:
pos_onto_triples = onto_triples_list[0:num2]
return pos_ins_triples, pos_onto_triples
# 生成某一个epoch的第step步的neg triples
def generate_neg_triples(pos_triples, all_triples, triple_neg_nums, neighbours):
all_triples_set = all_triples.triples
ent_list = all_triples.ent_list
neg_triples = list()
for (h, r, t) in pos_triples:
choice = random.randint(0, 999)
if choice < 500:
# 对每一个pos_triple中的pos triple(设为(h,r,t)),
# 从pos triple的头部h的邻居中中选取triple_neg_nums个实体来作负例的neg_head
# 若h的邻居为空,则默认从所有实体(即ent_list)中选择
candidates = neighbours.get(h, ent_list)
neg_samples = random.sample(candidates, triple_neg_nums)
neg_triples.extend([(neg_head, r, t) for neg_head in neg_samples])
elif choice >= 500:
# 对每一个pos_triple中的pos triple(设为(h,r,t)),
# 从pos triple的尾部t的邻居中中选取triple_neg_nums个实体来作负例的neg_tail
# 若t的邻居为空,则默认从所有实体(即ent_list)中选择
candidates = neighbours.get(t, ent_list)
neg_samples = random.sample(candidates, triple_neg_nums)
neg_triples.extend([(h, r, neg_tail) for neg_tail in neg_samples])
neg_triples = list(set(neg_triples) - all_triples_set)
return neg_triples
# 生成某一个epoch的第step步的triple batch(包含neg负例和pos正例)
def generate_pos_neg_triple_batch(ins_triples, onto_triples, step, batch_size, triple_neg_nums,
neighbours_of_ins_triples=None, neighbours_of_onto_triples=None):
assert triple_neg_nums >= 1
pos_ins_triples, pos_onto_triples = generate_pos_triples(
ins_triples.triple_list, onto_triples.triple_list, step, batch_size)
neg_ins_triples = generate_neg_triples(pos_ins_triples, ins_triples, triple_neg_nums, neighbours_of_ins_triples)
neg_onto_triples = generate_neg_triples(pos_onto_triples, onto_triples, triple_neg_nums, neighbours_of_onto_triples)
return pos_ins_triples, neg_ins_triples, pos_onto_triples, neg_onto_triples
| StarcoderdataPython |
1739069 | from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from . import views
APP_SLUGS = {
'chrono': 'Chrono',
'face_value': 'Face_Value',
'podcasts': 'Podcasts',
'roller': 'Roller',
'webfighter': 'Webfighter',
'generalnotes': 'General_Notes',
'rtcamera': 'rtcamera'
}
def redirect_doc(uri, request=None):
view = RedirectView.as_view(
url='https://developer.mozilla.org/docs%s' % uri)
return view(request) if request else view
redirect_patterns = patterns('',
url('^docs/firefox_os_guideline$',
redirect_doc('/Web/Apps/Design'),
name='ecosystem.ffos_guideline'),
url('^docs/responsive_design$',
redirect_doc('/Web_Development/Mobile/Responsive_design'),
name='ecosystem.responsive_design'),
url('^docs/patterns$',
redirect_doc('/Web/Apps/Design/Responsive_Navigation_Patterns'),
name='ecosystem.design_patterns'),
url('^docs/review$',
redirect_doc('/Web/Apps/Publishing/Marketplace_review_criteria'),
name='ecosystem.publish_review'),
url('^docs/deploy$',
redirect_doc('/Mozilla/Marketplace/Options'),
name='ecosystem.publish_deploy'),
url('^docs/hosted$',
redirect_doc('/Mozilla/Marketplace/Publish_options#Hosted_apps'),
name='ecosystem.publish_hosted'),
url('^docs/submission$',
redirect_doc('/Web/Apps/Publishing/Submitting_an_app'),
name='ecosystem.publish_submit'),
url('^docs/packaged$',
redirect_doc('/Web/Apps/Developing/Packaged_apps'),
name='ecosystem.publish_packaged'),
url('^docs/intro_apps$',
redirect_doc('/Web/Apps/Quickstart/Build/Intro_to_open_web_apps'),
name='ecosystem.build_intro'),
url('^docs/firefox_os$',
redirect_doc('/Mozilla/Firefox_OS'),
name='ecosystem.build_ffos'),
url('^docs/manifests$',
redirect_doc('/Web/Apps/FAQs/About_app_manifests'),
name='ecosystem.build_manifests'),
url('^docs/apps_offline$',
redirect_doc('/Web/Apps/Offline_apps'),
name='ecosystem.build_apps_offline'),
url('^docs/game_apps$',
redirect_doc('/Web/Apps/Developing/Games'),
name='ecosystem.build_game_apps'),
url('^docs/mobile_developers$',
redirect_doc('/Web/Apps/Quickstart/Build/For_mobile_developers'),
name='ecosystem.build_mobile_developers'),
url('^docs/web_developers$',
redirect_doc('/Web/Apps/Quickstart/Build/For_Web_developers'),
name='ecosystem.build_web_developers'),
url('^docs/firefox_os_simulator$',
redirect_doc('/Tools/Firefox_OS_Simulator'),
name='ecosystem.firefox_os_simulator'),
url('^docs/payments$',
redirect_doc('/Web/Apps/Quickstart/Build/Payments'),
name='ecosystem.build_payments'),
url('^docs/concept$',
redirect_doc('/Web/Apps/Quickstart/Design/Concept_A_great_app'),
name='ecosystem.design_concept'),
url('^docs/fundamentals$',
redirect_doc('/Web/Apps/Quickstart/Design/Design_Principles'),
name='ecosystem.design_fundamentals'),
url('^docs/ui_guidelines$',
redirect_doc('/Apps/Design'),
name='ecosystem.design_ui'),
url('^docs/quick_start$',
redirect_doc('/Web/Apps/Quickstart/Build/Your_first_app'),
name='ecosystem.build_quick'),
url('^docs/reference_apps$',
redirect_doc('/Web/Apps/Reference_apps'),
name='ecosystem.build_reference'),
url('^docs/apps/(?P<page>\w+)?$',
lambda req, page:
redirect_doc('/Web/Apps/Reference_apps/' + APP_SLUGS.get(page, ''), req),
name='ecosystem.apps_documentation'),
url('^docs/payments/status$',
redirect_doc('/Mozilla/Marketplace/Payments_Status'),
name='ecosystem.publish_payments'),
url('^docs/tools$',
redirect_doc('/Web/Apps/Quickstart/Build/App_tools'),
name='ecosystem.build_tools'),
url('^docs/app_generator$',
redirect_doc('/Web/Apps/Developing/App_templates'),
name='ecosystem.build_app_generator'),
url('^docs/app_manager$',
redirect_doc('/Mozilla/Firefox_OS/Using_the_App_Manager'),
name='ecosystem.app_manager'),
url('^docs/dev_tools$',
redirect_doc('/Tools'),
name='ecosystem.build_dev_tools'),
# Doesn't start with docs/, but still redirects to MDN.
url('^dev_phone$',
redirect_doc('/Mozilla/Firefox_OS/Developer_phone_guide/Flame'),
name='ecosystem.dev_phone'),
)
urlpatterns = redirect_patterns + patterns('',
url('^$', views.landing, name='ecosystem.landing'),
url('^partners$', views.partners, name='ecosystem.partners'),
url('^support$', views.support, name='ecosystem.support'),
url('^docs/badges$', views.publish_badges, name='ecosystem.publish_badges')
)
| StarcoderdataPython |
166333 | <gh_stars>0
import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper
from baselines.common.vec_env.vec_env import clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrappers):
def step(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
def step_env(env, action, reset_random=False):
ob, reward, done, info = env.step(action)
if done:
if reset_random:
env.reset_random()
ob = env.reset_agent()
else:
ob = env.reset_agent()
return ob, reward, done, info
def get_env_attr(env, attr):
if hasattr(env, attr):
return getattr(env, attr)
while hasattr(env, 'env'):
env = env.env
if hasattr(env, attr):
return getattr(env, attr)
return None
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
remote.send([step(env, action) for env, action in zip(envs, data)])
elif cmd == 'step_env':
remote.send([step_env(env, action) for env, action in zip(envs, data)])
elif cmd == 'step_env_reset_random':
remote.send([step_env(env, action, reset_random=True) for env, action in zip(envs, data)])
elif cmd == 'observation_space':
remote.send(envs[0].observation_space)
elif cmd == 'adversary_observation_space':
remote.send(envs[0].adversary_observation_space)
elif cmd == 'adversary_action_space':
remote.send(envs[0].adversary_action_space)
elif cmd == 'max_steps':
remote.send(envs[0].max_steps)
elif cmd == 'render':
remote.send([env.render(mode='level') for env in envs])
elif cmd == 'render_to_screen':
remote.send([envs[0].render('human')])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send(CloudpickleWrapper((envs[0].observation_space, envs[0].action_space, envs[0].spec)))
elif cmd == 'reset_to_level':
remote.send([envs[0].reset_to_level(data[0], data[1])])
elif cmd == 'max_episode_steps':
max_episode_steps = get_env_attr(envs[0], '_max_episode_steps')
remote.send(max_episode_steps)
elif hasattr(envs[0], cmd):
attrs = [getattr(env, cmd) for env in envs]
is_callable = hasattr(attrs[0], '__call__')
if is_callable:
if not hasattr(data, '__len__'):
data = [data]*len(attrs)
remote.send([attr(d) if d is not None else attr() for attr, d in zip(attrs, data)])
else:
remote.send([attr for attr in attrs])
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
for env in envs:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn', in_series=1, is_eval=False):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
"""
self.waiting = False
self.closed = False
self.in_series = in_series
nenvs = len(env_fns)
assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series"
self.nremotes = nenvs // in_series
env_fns = np.array_split(env_fns, self.nremotes)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv().x
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
# Get processed action dim
self.is_eval = is_eval
self.processed_action_dim = 1
if not is_eval:
self.remotes[0].send(('processed_action_dim', None))
self.processed_action_dim = self.remotes[0].recv()[0]
def step_async(self, action):
self._assert_not_closed()
action = np.array_split(action, self.nremotes)
for remote, action in zip(self.remotes, action):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_complexity_info(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('get_complexity_info', None))
info = [remote.recv() for remote in self.remotes]
info = _flatten_list(info)
return info
def get_images(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('render', None))
imgs = [remote.recv() for remote in self.remotes]
imgs = _flatten_list(imgs)
return imgs
def render_to_screen(self):
self._assert_not_closed()
self.remotes[0].send(('render_to_screen', None))
return self.remotes[0].recv()
def max_episode_steps(self):
self._assert_not_closed()
self.remotes[0].send(('max_episode_steps', None))
return self.remotes[0].recv()
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys if k != 'colors_crop'}
else:
return np.stack(obs)
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
class ParallelAdversarialVecEnv(SubprocVecEnv):
def __init__(self, env_fns, adversary=True, is_eval=False):
super().__init__(env_fns, is_eval=is_eval)
action_space = self.action_space
if action_space.__class__.__name__ == 'Box':
self.action_dim = action_space.shape[0]
elif action_space.__class__.__name__ == 'MultiDiscrete':
self.action_dim = len(list(action_space.nvec))
else:
self.action_dim = 1
self.adv_action_dim = 0
if adversary:
adv_action_space = self.adversary_action_space
if adv_action_space.__class__.__name__ == 'Box':
self.adv_action_dim = adv_action_space.shape[0]
elif adv_action_space.__class__.__name__ == 'MultiDiscrete':
self.adv_action_dim = len(list(adv_action_space.nvec))
else:
self.adv_action_dim = 1
def _should_expand_action(self, action, adversary=False):
if not adversary:
action_dim = self.action_dim
else:
action_dim = self.adv_action_dim
# print('expanding actions?', action_dim>1, flush=True)
return action_dim > 1 or self.processed_action_dim > 1
def seed_async(self, seed, index):
self._assert_not_closed()
self.remotes[index].send(('seed', seed))
self.waiting = True
def seed_wait(self, index):
self._assert_not_closed()
obs = self.remotes[index].recv()
self.waiting = False
return obs
def seed(self, seed, index):
self.seed_async(seed, index)
return self.seed_wait(index)
def level_seed_async(self, index):
self._assert_not_closed()
self.remotes[index].send(('level_seed', None))
self.waiting = True
def level_seed_wait(self, index):
self._assert_not_closed()
level_seed = self.remotes[index].recv()
self.waiting = False
return level_seed
def level_seed(self, index):
self.level_seed_async(index)
return self.level_seed_wait(index)
# Wrap SubprocVecEnv step_async with higher-dim action logic
# def step_async(self, action):
# if self._should_expand_action(action):
# action = np.expand_dims(action, 1)
# super().step_async(action)
# step_adversary
def step_adversary(self, action):
if self._should_expand_action(action, adversary=True):
action = np.expand_dims(action, 1)
self.step_adversary_async(action)
return self.step_wait()
def step_adversary_async(self, action):
self._assert_not_closed()
[remote.send(('step_adversary', a)) for remote, a in zip(self.remotes, action)]
self.waiting = True
def step_env_async(self, action):
self._assert_not_closed()
if self._should_expand_action(action):
action = np.expand_dims(action, 1)
[remote.send(('step_env', a)) for remote, a in zip(self.remotes, action)]
self.waiting = True
def step_env_reset_random_async(self, action):
self._assert_not_closed()
if self._should_expand_action(action):
action = np.expand_dims(action, 1)
[remote.send(('step_env_reset_random', a)) for remote, a in zip(self.remotes, action)]
self.waiting = True
# reset_agent
def reset_agent(self):
self._assert_not_closed()
[remote.send(('reset_agent', None)) for remote in self.remotes]
self.waiting = True
obs = [remote.recv() for remote in self.remotes]
self.waiting = False
obs = _flatten_list(obs)
return _flatten_obs(obs)
# reset_random
def reset_random(self):
self._assert_not_closed()
[remote.send(('reset_random', None)) for remote in self.remotes]
self.waiting = True
obs = [remote.recv() for remote in self.remotes]
self.waiting = False
obs = _flatten_list(obs)
return _flatten_obs(obs)
# reset_to_level
def reset_to_level(self, level, index, editing=False):
self._assert_not_closed()
self.remotes[index].send(('reset_to_level', [level, editing]))
self.waiting = True
obs = self.remotes[index].recv()
self.waiting = False
return _flatten_obs(obs)
def reset_to_level_batch(self, level, editing=False):
self._assert_not_closed()
[remote.send(('reset_to_level', [level[i], editing])) for i, remote in enumerate(self.remotes)]
self.waiting = True
obs = [remote.recv() for remote in self.remotes]
self.waiting = False
obs = _flatten_list(obs)
return _flatten_obs(obs)
# observation_space
def get_observation_space(self):
self._assert_not_closed()
self.remotes[0].send(('observation_space', None))
obs_space = self.remotes[0].recv()
if hasattr(obs_space, 'spaces'):
obs_space = obs_space.spaces
return obs_space
# adversary_observation_space
def get_adversary_observation_space(self):
self._assert_not_closed()
self.remotes[0].send(('adversary_observation_space', None))
obs_space = self.remotes[0].recv()
if hasattr(obs_space, 'spaces'):
obs_space = obs_space.spaces
return obs_space
def get_adversary_action_space(self):
self._assert_not_closed()
self.remotes[0].send(('adversary_action_space', None))
action_dim = self.remotes[0].recv()
return action_dim
def get_grid_str(self):
self._assert_not_closed()
self.remotes[0].send(('get_grid_str', None))
grid_obs = self.remotes[0].recv()
return grid_obs[0]
def get_des_file(self):
self._assert_not_closed()
self.remotes[0].send(('get_des_file', None))
des_files = self.remotes[0].recv()
return des_files[0]
def get_max_episode_steps(self):
self._assert_not_closed()
self.remotes[0].send(('max_episode_steps', None))
self.waiting = True
max_episode_steps = self.remotes[0].recv()
self.waiting = False
return max_episode_steps
def get_seed(self):
return self.remote_attr('seed_value', flatten=True)
def set_seed(self, seeds):
return self.remote_attr('seed', data=seeds, flatten=True)
def set_obs_space(self, obs_type):
return self.remote_attr('set_obs', data=obs_type, flatten=True)
def get_level(self):
levels = self.remote_attr('level')
return [l[0] for l in levels] # flatten
# Generic getter
def remote_attr(self, name, data=None, flatten=False):
self._assert_not_closed()
if hasattr(data, '__len__'):
assert len(data) == len(self.remotes)
[remote.send((name, d)) for remote, d in zip(self.remotes, data)]
else:
[remote.send((name, data)) for remote in self.remotes]
result = [remote.recv() for remote in self.remotes]
return _flatten_list(result) if flatten else result
# Navigation-specific
def get_distance_to_goal(self):
return self.remote_attr('distance_to_goal', flatten=True)
def get_passable(self):
return self.remote_attr('passable', flatten=True)
def get_shortest_path_length(self):
return self.remote_attr('shortest_path_length', flatten=True)
# === Multigrid-specific ===
def get_num_blocks(self):
return self.remote_attr('n_clutter_placed', flatten=True)
def get_goal_pos_action(self):
self._assert_not_closed()
self.remotes[0].send(('get_goal_pos_action', None))
actions = self.remotes[0].recv()
return actions[0]
def get_agent_pos_action(self):
self._assert_not_closed()
self.remotes[0].send(('get_agent_pos_action', None))
actions = self.remotes[0].recv()
return actions[0]
def get_termination_action(self):
self._assert_not_closed()
self.remotes[0].send(('get_termination_action', None))
actions = self.remotes[0].recv()
return actions[0]
def get_agent_loc(self):
return self.remote_attr('agent_loc', flatten=True)
def get_goal_loc(self):
return self.remote_attr('goal_loc', flatten=True)
return locs
def get_agent_pos(self):
return self.remote_attr('agent_start_pos', flatten=True)
def get_goal_pos(self):
return self.remote_attr('goal_pos', flatten=True)
def __getattr__(self, name):
if name == 'observation_space':
return self.get_observation_space()
elif name == 'adversary_observation_space':
return self.get_adversary_observation_space()
elif name == 'adversary_action_space':
return self.get_adversary_action_space()
elif name == 'max_dim':
return self.get_max_dim()
elif name == 'get_goal_pos':
return self.get_goal_pos()
elif name == 'get_agent_pos':
return self.get_agent_pos()
else:
return self.__getattribute__(name)
| StarcoderdataPython |
158752 | <reponame>ry755/ryfs
#!/usr/bin/env python3
# ryfs.py
# manage RYFS disk images
import os
import sys
import struct
import argparse
version_info = (0, 2)
version = '.'.join(str(c) for c in version_info)
# create new RYFSv1 disk image
def ryfs_create():
if not quiet:
if use_boot_sector:
print("creating bootable RYFSv1 image", "\"" + ryfs_image.name + "\"", "of size", ryfs_image_size, "bytes with label", "\"" + ryfs_image_label + "\"")
else:
print("creating RYFSv1 image", "\"" + ryfs_image.name + "\"", "of size", ryfs_image_size, "bytes with label", "\"" + ryfs_image_label + "\"")
# fill new file with zeros
ryfs_image.seek(0)
ryfs_image.write(bytearray(ryfs_image_size))
ryfs_image.seek(0)
if use_boot_sector:
ryfs_image.write(ryfs_image_boot.read(512))
ryfs_image.seek(512)
# write number of bitmap sectors, 1 byte
ryfs_image.write(struct.pack('<B', ryfs_image_bitmap_sectors))
# write RYFSv1 version header
ryfs_image.write(bytearray([1,ord('R'),ord('Y')]))
# write size of image, 2 bytes, little endian
ryfs_image.write(struct.pack('<H', ryfs_image_size_sectors))
# write directory label
ryfs_image.write(str_to_bytearray(ryfs_image_label))
# seek to first bitmap sector
ryfs_image.seek(1024)
# mark first two sectors as used (boot sector and directory sector)
bitmap = 0b0000000000000011
# mark bitmap sectors as used
for bitmap_sector in range(0, ryfs_image_bitmap_sectors):
bitmap = bitmap | 1 << bitmap_sector + 2
ryfs_image.write(struct.pack('<I', bitmap))
# add file to an existing RYFSv1 disk image
def ryfs_add():
if not quiet:
print("adding file", "\"" + extra_file_name + "." + extra_file_ext + "\"", "of size", extra_file_size, "bytes to RYFSv1 filesystem with label", "\"" + ryfs_image_label + "\"")
# ensure file doesn't already exist
if ryfs_find_entry(extra_file_name, extra_file_ext) != None:
print("file already exists! failing")
return
# find first empty file entry
first_free_entry = ryfs_find_free_entry()
if first_free_entry == None:
print("all file entries are used! failing")
return
ryfs_image.seek(first_free_entry)
first_free_sector = ryfs_find_free_sector()
if first_free_sector == None:
print("all sectors are used! failing")
return
# write number of first file sector, 2 bytes, little endian
ryfs_image.write(struct.pack('<H', first_free_sector))
# write file size in sectors, 2 bytes, little endian
ryfs_image.write(struct.pack('<H', extra_file_size_sectors))
# write null-terminated 8.3 file name, 12 bytes
spaces = ' ' * (8 - len(extra_file_name))
ryfs_image.write(bytearray(extra_file_name, 'utf-8'))
ryfs_image.write(bytearray(spaces, 'utf-8'))
ryfs_image.write(bytearray(extra_file_ext, 'utf-8'))
ryfs_image.write(bytearray('\x00', 'utf-8'))
extra_file.seek(0)
# write file data
for sector in range(0, extra_file_size_sectors):
# find a free sector to use as the current sector to write to
next_free_sector = ryfs_find_free_sector()
ryfs_image.seek(next_free_sector*512)
ryfs_mark_used(next_free_sector)
# find another free sector to use as the next sector of this file
next_free_sector = ryfs_find_free_sector()
ryfs_image.write(bytearray([255,0]))
if sector != extra_file_size_sectors-1:
# this is not the last sector of this file
# write the nomber of the next file sector
ryfs_image.write(struct.pack('<H', next_free_sector))
# since this is not the last sector, we don't care about the size of it (we know it's 512 bytes)
ryfs_image.write(struct.pack('<H', 0))
else:
# this is the last sector of this file
# there is no next sector for this file
ryfs_image.write(struct.pack('<H', 0))
# write the size of the last sector
ryfs_image.write(struct.pack('<H', extra_file_size - extra_file.tell()))
# zero sector first to ensure there is no remaining data from previous files
ryfs_image.write(bytearray(506))
ryfs_image.seek(ryfs_image.tell()-506)
# write file data
ryfs_image.write(extra_file.read(506))
extra_file.close()
# remove file from an existing RYFSv1 disk image
def ryfs_remove():
if not quiet:
print("removing file", "\"" + extra_file_name + "." + extra_file_ext + "\"", "from RYFSv1 filesystem with label", "\"" + ryfs_image_label + "\"")
# find file entry
file_entry = ryfs_find_entry(extra_file_name, extra_file_ext)
if file_entry == None:
print("file not found! failing")
return
ryfs_image.seek(file_entry)
ryfs_mark_free(int.from_bytes(ryfs_image.read(2), byteorder='little'))
extra_file_size_sectors = int.from_bytes(ryfs_image.read(2), byteorder='little')
ryfs_image.seek(ryfs_image.tell()-4)
next_sector = int.from_bytes(ryfs_image.read(2), byteorder='little')
# mark sectors as free
for sector in range(0, extra_file_size_sectors):
ryfs_image.seek(next_sector*512)
ryfs_mark_free(next_sector)
ryfs_image.seek(ryfs_image.tell()+2)
next_sector = int.from_bytes(ryfs_image.read(2), byteorder='little')
# remove file entry
ryfs_image.seek(file_entry)
ryfs_image.write(bytearray(16))
# export file from an existing RYFSv1 disk image
def ryfs_export():
if not quiet:
print("exporting file", "\"" + extra_file_name + "." + extra_file_ext + "\"", "from RYFSv1 filesystem with label", "\"" + ryfs_image_label + "\"")
# find file entry
file_entry = ryfs_find_entry(extra_file_name, extra_file_ext)
if file_entry == None:
print("file not found! failing")
return
ryfs_image.seek(file_entry)
first_sector = int.from_bytes(ryfs_image.read(2), byteorder='little')
size = int.from_bytes(ryfs_image.read(2), byteorder='little')
# write file data
ryfs_image.seek(first_sector*512)
extra_file.seek(0)
for sector in range(0, size):
ryfs_image.seek(ryfs_image.tell()+2)
next_sector = int.from_bytes(ryfs_image.read(2), byteorder='little')
sector_size = int.from_bytes(ryfs_image.read(2), byteorder='little')
if sector != size-1:
# this is not the last sector of this file
# read a whole sector's worth of data
extra_file.write(ryfs_image.read(506))
else:
# this is the last sector of this file
# only read the amount of data in this sector
extra_file.write(ryfs_image.read(sector_size))
ryfs_image.seek(next_sector*512)
extra_file.close()
# list files in an existing RYFSv1 disk image
def ryfs_list():
if not quiet:
print("listing files from RYFSv1 filesystem with label", "\"" + ryfs_image_label + "\"")
# seek to first file entry
ryfs_image.seek(512+16)
# print existing file entries
for i in range(0,30):
if ryfs_image.read(2) != b'\x00\x00':
ryfs_image.seek(ryfs_image.tell()+2)
entry = bytes(ryfs_image.read(12)).decode("utf-8")
print(entry)
continue
ryfs_image.seek(ryfs_image.tell()+4)
# find first free sector
# returns None if all sectors are used
def ryfs_find_free_sector():
# save current file pointer
old_location = ryfs_image.tell()
# seek to first bitmap sector
ryfs_image.seek(1024)
# find first free sector
for bitmap_sector in range(0, ryfs_image_bitmap_sectors):
for bitmap_byte in range(0, 511):
first_clear_bit = find_first_clear(int.from_bytes(ryfs_image.read(1), byteorder='little'))
if first_clear_bit != None:
first_free_sector = (bitmap_sector*4096) + (bitmap_byte*8) + first_clear_bit
ryfs_image.seek(old_location)
return first_free_sector
# no free sectors were found, return None
ryfs_image.seek(old_location)
return None
# find first free file entry
# returns None if all entries are used
def ryfs_find_free_entry():
# save current file pointer
old_location = ryfs_image.tell()
# seek to first file entry
ryfs_image.seek(512+16)
# loop through each entry until we find an empty one
for i in range(0,30):
if ryfs_image.read(2) == b'\x00\x00':
first_free_entry = (ryfs_image.tell()-2)
ryfs_image.seek(old_location)
return first_free_entry
ryfs_image.seek(ryfs_image.tell()+14)
# no free entries were found, return None
ryfs_image.seek(old_location)
return None
# find specified file entry
# returns None if entry doesn't exist
def ryfs_find_entry(name, ext):
# save current file pointer
old_location = ryfs_image.tell()
spaces = ' ' * (8 - len(name))
entry = bytearray()
entry.extend(bytes(name, 'utf-8'))
entry.extend(bytes(spaces, 'utf-8'))
entry.extend(bytes(ext, 'utf-8'))
entry.extend(bytes('\x00', 'utf-8'))
ryfs_image.seek(512+20)
for i in range(0,30):
if bytearray(ryfs_image.read(12)) == entry:
entry_location = (ryfs_image.tell()-16)
ryfs_image.seek(old_location)
return entry_location
ryfs_image.seek(ryfs_image.tell()+4)
# speficied file entry wasn't found, return None
ryfs_image.seek(old_location)
return None
# mark a sector as used
def ryfs_mark_used(sector):
# save current file pointer
old_location = ryfs_image.tell()
bitmap_sector = int(round_ceil(sector+1, 4096)/4096)-1
bitmap_byte = int(round_ceil(sector+1, 8)/8)-1
if bitmap_byte > 4096:
bitmap_byte = bitmap_byte % 4096
bitmap_bit = sector % 8
final_location = 1024+(bitmap_sector*512)+bitmap_byte
ryfs_image.seek(final_location)
bitmap = int.from_bytes(ryfs_image.read(1), byteorder='little')
bitmap = bitmap | 1 << bitmap_bit
ryfs_image.seek(final_location)
ryfs_image.write(struct.pack('<B', bitmap))
# restore old file pointer
ryfs_image.seek(old_location)
# mark a sector as free
def ryfs_mark_free(sector):
# save current file pointer
old_location = ryfs_image.tell()
bitmap_sector = int(round_ceil(sector+1, 4096)/4096)-1
bitmap_byte = int(round_ceil(sector+1, 8)/8)-1
if bitmap_byte > 4096:
bitmap_byte = bitmap_byte % 4096
bitmap_bit = sector % 8
final_location = 1024+(bitmap_sector*512)+bitmap_byte
ryfs_image.seek(final_location)
bitmap = int.from_bytes(ryfs_image.read(1), byteorder='little')
bitmap = bitmap & ~(1 << bitmap_bit)
ryfs_image.seek(final_location)
ryfs_image.write(struct.pack('<B', bitmap))
# restore old file pointer
ryfs_image.seek(old_location)
def round_ceil(number, ceil_num):
if number == 0:
return ceil_num
remainder = number % ceil_num
if remainder == 0:
return number
return number + ceil_num - remainder
def find_first_clear(byte):
if byte == 0b11111111:
return None
first_clear = 0
while (byte % 2) == 1:
first_clear += 1
byte = byte >> 1
return first_clear
def str_to_bytearray(text):
array = bytearray()
for char in text:
array.append(ord(char))
return array
def open_image(filename):
if os.path.exists(filename):
return open(filename, 'r+b')
else:
if ryfs_action == "create":
return open(filename, 'w+b')
else:
print("error: file \"" + filename + "\" not found")
sys.exit()
def open_file(filename):
if ryfs_action == "export":
return open(filename, 'w+b')
if ryfs_action == "remove":
return filename
if os.path.exists(filename):
return open(filename, 'r+b')
else:
print("error: file \"" + filename + "\" not found")
sys.exit()
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="RYFS management tool - version " + version + "\nManage RYFSv1 disk images",
epilog="example commands to create a 2KB image named \"ryfs.img\" with label \"Stuff\" and add \"hello.txt\" to it:\
\n create ryfs.img -l Stuff -s 2048\
\n add ryfs.img hello.txt\
\n list ryfs.img"
)
arg_parser.add_argument('action', nargs=1, help="\"add\", \"create\", \"export\", \"list\", \"remove\"")
arg_parser.add_argument('image', nargs=1, help="disk image to manage")
arg_parser.add_argument('file', nargs='?', help="file to modify (optional, depending on action)")
arg_parser.add_argument('-b', '--boot-sector', dest="boot", type=argparse.FileType('rb'), help="use specified file as boot sector (must be 512 bytes)")
arg_parser.add_argument('-l', '--label', type=str, default="RYFS", help="label of new directory (max. 8 characters, default \"RYFS\")")
arg_parser.add_argument('-s', '--size', type=int, default=1474560, help="size in bytes of disk image to create (default 1474560 bytes)")
arg_parser.add_argument('-q', '--quiet', action="store_true", help="disable all output except warnings and errors")
args = arg_parser.parse_args()
ryfs_action = args.action[0]
ryfs_image = open_image(args.image[0])
ryfs_image_size = args.size
ryfs_image_size_sectors = int(round_ceil(ryfs_image_size, 512)/512)
ryfs_image_bitmap_sectors = int(round_ceil(ryfs_image_size_sectors, 4096)/4096)
ryfs_image_label = args.label
if ryfs_action == "add" or ryfs_action == "export" or ryfs_action == "remove":
use_extra_file = True
else:
use_extra_file = False
if use_extra_file:
extra_file = open_file(args.file)
if ryfs_action == "remove":
extra_file_name, extra_file_ext = extra_file.split('.')
else:
extra_file_name, extra_file_ext = os.path.splitext(os.path.basename(extra_file.name))
extra_file_size = os.fstat(extra_file.fileno()).st_size
extra_file_size_sectors = int(round_ceil(extra_file_size, 506)/506)
extra_file_ext = extra_file_ext[1:]
if (len(extra_file_name) > 8) or (len(extra_file_ext) > 3):
print("error: file name must be in 8.3 format")
print(len(extra_file_name))
print(len(extra_file_ext))
sys.exit()
quiet = args.quiet
if args.boot != None:
ryfs_image_boot = args.boot
use_boot_sector = True
else:
use_boot_sector = False
if ryfs_image_size > 16777216:
print("error: RYFSv1 does not support read-write filesystems over 16MB")
sys.exit()
# if we aren't creating a new filesystem, get the existing label
if ryfs_action != "create":
ryfs_image.seek(512+6)
ryfs_image_label = ryfs_image.read(8).decode("utf-8")
if len(ryfs_image_label) > 8:
print("error: filesystem label must be 8 characters or less")
sys.exit()
if ryfs_action == "add":
ryfs_add()
elif ryfs_action == "create":
ryfs_create()
elif ryfs_action == "export":
ryfs_export()
elif ryfs_action == "list":
ryfs_list()
elif ryfs_action == "remove":
ryfs_remove()
else:
print("error: unknown action", "\"" + ryfs_action + "\"")
sys.exit()
ryfs_image.close()
| StarcoderdataPython |
1697905 | <filename>code/blastn_all_v_all.py
'''
For blastn searches we are going to calculate the percent coverage of
the phage genome and score the longest coverage as the best hit. It
doesn't matter where the hits are on the bacterial genome.
We are going to use a cutoff of 0.001 E value
'''
import sys,os,re
from phage import Phage
phage=Phage()
try:
blastf=sys.argv[1]
except:
sys.exit(sys.argv[0] + "< blast file>")
# read the fasta file of phages to get the lengths
lens=phage.phageSequenceLengths()
sys.stderr.write("Found " + str(len(lens)) + " sequences\n")
# get the phage and bacteria so we can limit our data appropriately
bacteriaG = set(phage.completeBacteriaIDs())
phageG = set(phage.phageIDs())
hits = {}
for p in phageG:
hits[p]={}
with open(blastf, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
e=float(p[10])
if e > 0.001:
continue
m=re.findall('(NC_\d+)', p[0])
if m == []:
sys.stderr.write("WARNING: No phage found in " + p[0] + "\n")
continue
pnc = m[0]
if pnc not in phageG:
continue
if pnc not in lens:
sys.stderr.write("No length for " + pnc + "\n")
continue
m=re.findall('(NC_\d+)', p[1])
if m == []:
sys.stderr.write("WARNING: No bacteria found in " + p[1] + "\n")
continue
bnc = m[0]
if bnc not in bacteriaG:
continue
if bnc not in hits[pnc]:
hits[pnc][bnc]=[]
for i in range(lens[pnc]+1):
hits[pnc][bnc].append(0)
for i in range(int(p[6]), int(p[7])+1):
hits[pnc][bnc][i]=1
# now for each phage we need to print it, and a list of all its other
# hits. We also need to make all missing phage/bacteria 0, and limit
# this to only our 820 phage that we want!
for p in phageG:
for b in bacteriaG:
if b in hits[p]:
c = sum(hits[p][b])
else:
c = 0
print("\t".join([p, b, str(c)]))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.