Spaces:
Runtime error
Runtime error
Commit
·
e4806d5
1
Parent(s):
5e1486f
Upload 22 files
Browse files- __init__.py +0 -0
- algorithm/__init__.py +0 -0
- algorithm/__pycache__/__init__.cpython-310.pyc +0 -0
- algorithm/__pycache__/solver.cpython-310.pyc +0 -0
- algorithm/plot2D.py +37 -0
- algorithm/solver.py +143 -0
- app.py +68 -0
- ast_parser/__init__.py +47 -0
- ast_parser/__pycache__/__init__.cpython-310.pyc +0 -0
- ast_parser/__pycache__/chars.cpython-310.pyc +0 -0
- ast_parser/__pycache__/errors.cpython-310.pyc +0 -0
- ast_parser/__pycache__/lexer.cpython-310.pyc +0 -0
- ast_parser/__pycache__/linter.cpython-310.pyc +0 -0
- ast_parser/__pycache__/parser.cpython-310.pyc +0 -0
- ast_parser/__pycache__/token.cpython-310.pyc +0 -0
- ast_parser/chars.py +131 -0
- ast_parser/errors.py +52 -0
- ast_parser/lexer.py +382 -0
- ast_parser/linter.py +291 -0
- ast_parser/parser.py +245 -0
- ast_parser/token.py +106 -0
- main.py +59 -0
__init__.py
ADDED
|
File without changes
|
algorithm/__init__.py
ADDED
|
File without changes
|
algorithm/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (160 Bytes). View file
|
|
|
algorithm/__pycache__/solver.cpython-310.pyc
ADDED
|
Binary file (4.28 kB). View file
|
|
|
algorithm/plot2D.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
|
| 4 |
+
def plot_2d(C, A, B, initial_point):
|
| 5 |
+
n_constraints = A.shape[0] # Get the number of constraints
|
| 6 |
+
|
| 7 |
+
plt.figure(figsize=(8, 6))
|
| 8 |
+
plt.xlim(0, 5) # Adjust based on your problem
|
| 9 |
+
plt.ylim(0, 5) # Adjust based on your problem
|
| 10 |
+
plt.xlabel('X1')
|
| 11 |
+
plt.ylabel('X2')
|
| 12 |
+
|
| 13 |
+
# Plot feasible region (constraints)
|
| 14 |
+
x1 = np.linspace(0, 5, 100)
|
| 15 |
+
|
| 16 |
+
for i in range(n_constraints):
|
| 17 |
+
x2_i = (B[i] - A[i, 0] * x1) / A[i, 1] # Calculate x2 values for each constraint
|
| 18 |
+
plt.plot(x1, x2_i, label=f'{A[i, 0]}*X1 + {A[i, 1]}*X2 <= {B[i]}')
|
| 19 |
+
|
| 20 |
+
# Fill the feasible region
|
| 21 |
+
min_x2 = np.minimum.reduce([(B[i] - A[i, 0] * x1) / A[i, 1] for i in range(n_constraints)])
|
| 22 |
+
plt.fill_between(x1, min_x2, 0, where=(x1 >= 0) & (x1 <= 5), alpha=0.2)
|
| 23 |
+
|
| 24 |
+
# Plot the initial feasible solution point
|
| 25 |
+
plt.scatter(initial_point[0], initial_point[1], color='red', marker='o', label='Initial Point')
|
| 26 |
+
|
| 27 |
+
plt.legend()
|
| 28 |
+
plt.show()
|
| 29 |
+
|
| 30 |
+
# Example usage with variable-sized A and B:
|
| 31 |
+
C = np.array([2, 3]) # Objective function coefficients
|
| 32 |
+
A = np.array([[1, 1], # Constraint matrix
|
| 33 |
+
[2, 1],
|
| 34 |
+
[3, 1]]) # Add more rows for additional constraints
|
| 35 |
+
B = np.array([4, 5, 6]) # Right-hand side, add more values for additional constraints
|
| 36 |
+
initial_point = np.array([1.0, 3.0])
|
| 37 |
+
plot_2d(C, A, B, initial_point)
|
algorithm/solver.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from ast_parser.parser import EquationKind # Assuming EquationKind is imported from another module
|
| 3 |
+
|
| 4 |
+
class Solver:
|
| 5 |
+
"""Solver takes in the equations which have been parsed from the input,
|
| 6 |
+
it converts them to matrix arrays and applies the matrix operations on
|
| 7 |
+
them.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
objective_functions (list of Equation objects): List of objective functions.
|
| 11 |
+
constraints (list of Equation objects): List of constraint equations.
|
| 12 |
+
"""
|
| 13 |
+
def __init__(self, objective_functions, constraints):
|
| 14 |
+
# Extract objective function variables and negate their coefficients
|
| 15 |
+
self.objective_functions = objective_functions[0].variables
|
| 16 |
+
self.objective_functions.pop('Z')
|
| 17 |
+
|
| 18 |
+
self.constraints = []
|
| 19 |
+
|
| 20 |
+
for i, _ in enumerate(constraints):
|
| 21 |
+
self.constraints.append(constraints[i])
|
| 22 |
+
|
| 23 |
+
# Negate coefficients of objective function for maximization
|
| 24 |
+
for key in self.objective_functions:
|
| 25 |
+
self.objective_functions[key] *= -1
|
| 26 |
+
|
| 27 |
+
# Add slack variables to constraints if necessary
|
| 28 |
+
for temp_dict in constraints:
|
| 29 |
+
for key in self.objective_functions.keys():
|
| 30 |
+
if key not in temp_dict.variables:
|
| 31 |
+
temp_dict.variables[key] = 0
|
| 32 |
+
|
| 33 |
+
slack = len(self.constraints)
|
| 34 |
+
i = 0
|
| 35 |
+
|
| 36 |
+
for _, constraint in enumerate(self.constraints):
|
| 37 |
+
if constraint.kind == EquationKind.LEQ:
|
| 38 |
+
constraint.variables["s_" + str(i)] = 1.0
|
| 39 |
+
self.objective_functions["s_" + str(i)] = 0
|
| 40 |
+
i += 1
|
| 41 |
+
|
| 42 |
+
while i < slack:
|
| 43 |
+
constraint.variables["s_" + str(i)] = 1.0
|
| 44 |
+
self.objective_functions["s_" + str(i)] = 0
|
| 45 |
+
i += 1
|
| 46 |
+
|
| 47 |
+
self.A, self.B, self.C = self.convert_to_matrices()
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def get_results(self):
|
| 51 |
+
|
| 52 |
+
objective_values, solution, variable_names = self.advanced_simplex(self.A, self.B, self.C)
|
| 53 |
+
variable_names = list(variable_names.values())
|
| 54 |
+
results_str = "The vector of decision variables is : \n"
|
| 55 |
+
for i in range(len(objective_values)):
|
| 56 |
+
results_str+= f"{variable_names[i]} : {round(objective_values[i, 0], 2)}\n"
|
| 57 |
+
results_str+= "The optimal solution is {}\n".format(solution)
|
| 58 |
+
return results_str
|
| 59 |
+
|
| 60 |
+
def convert_to_matrices(self):
|
| 61 |
+
"""Converts objective functions and constraints to matrices.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
A (numpy.ndarray): Coefficients matrix for constraints.
|
| 65 |
+
B (numpy.ndarray): Right-hand side matrix for constraints.
|
| 66 |
+
C (numpy.ndarray): Coefficients matrix for objective function.
|
| 67 |
+
"""
|
| 68 |
+
num_constraints = len(self.constraints)
|
| 69 |
+
num_variables = len(self.objective_functions)
|
| 70 |
+
|
| 71 |
+
A = np.zeros((num_constraints, num_variables))
|
| 72 |
+
C = np.zeros((1, num_variables))
|
| 73 |
+
B = np.zeros((num_constraints, 1))
|
| 74 |
+
|
| 75 |
+
for i, constraint in enumerate(self.constraints):
|
| 76 |
+
for variable_name, coefficient in constraint.variables.items():
|
| 77 |
+
j = list(self.objective_functions.keys()).index(variable_name)
|
| 78 |
+
A[i, j] = coefficient
|
| 79 |
+
|
| 80 |
+
for variable_name, coefficient in self.objective_functions.items():
|
| 81 |
+
j = list(self.objective_functions.keys()).index(variable_name)
|
| 82 |
+
C[0, j] = coefficient
|
| 83 |
+
|
| 84 |
+
B[i, 0] = constraint.bound
|
| 85 |
+
|
| 86 |
+
return A, B, C
|
| 87 |
+
|
| 88 |
+
def advanced_simplex(self, A, b, C):
|
| 89 |
+
"""Performs the advanced simplex algorithm to find the optimal solution.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
A (numpy.ndarray): Coefficients matrix for constraints.
|
| 93 |
+
b (numpy.ndarray): Right-hand side matrix for constraints.
|
| 94 |
+
C (numpy.ndarray): Coefficients matrix for objective function.
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
objective_values (numpy.ndarray): The values of the objective function variables.
|
| 98 |
+
solution (float): The optimal solution value.
|
| 99 |
+
"""
|
| 100 |
+
n, m = A.shape
|
| 101 |
+
|
| 102 |
+
B = np.eye(n)
|
| 103 |
+
C_B = np.zeros((1, n))
|
| 104 |
+
|
| 105 |
+
count = 0
|
| 106 |
+
|
| 107 |
+
variable_names = list(self.objective_functions.keys())[-n:]
|
| 108 |
+
variable_names = {key: value for key, value in zip(range(n), variable_names)}
|
| 109 |
+
|
| 110 |
+
prev_solution = float('inf')
|
| 111 |
+
while True:
|
| 112 |
+
count += 1
|
| 113 |
+
B_inverse = np.around(np.linalg.inv(B), decimals=2)
|
| 114 |
+
for row in B_inverse:
|
| 115 |
+
for value in row:
|
| 116 |
+
if value == -0:
|
| 117 |
+
value = 0
|
| 118 |
+
|
| 119 |
+
X_B = np.matmul(B_inverse, b)
|
| 120 |
+
P_table = np.round(np.matmul(B_inverse, A), 2)
|
| 121 |
+
objective_values = np.matmul(C_B, P_table) - C
|
| 122 |
+
solution = np.round(np.matmul(C_B, X_B), 2)
|
| 123 |
+
|
| 124 |
+
if abs(prev_solution - solution) < 0.0001:
|
| 125 |
+
return X_B, solution, variable_names
|
| 126 |
+
|
| 127 |
+
entering_var_idx = np.argmin(objective_values)
|
| 128 |
+
|
| 129 |
+
ratios = []
|
| 130 |
+
for i in range(n):
|
| 131 |
+
if P_table[i, entering_var_idx] > 0:
|
| 132 |
+
ratios.append(X_B[i, 0] / P_table[i, entering_var_idx])
|
| 133 |
+
else:
|
| 134 |
+
ratios.append(np.inf)
|
| 135 |
+
|
| 136 |
+
exiting_var_idx = np.argmin(ratios)
|
| 137 |
+
|
| 138 |
+
temp_list = list(self.objective_functions.keys())
|
| 139 |
+
|
| 140 |
+
variable_names[exiting_var_idx] = temp_list[entering_var_idx]
|
| 141 |
+
B[:, exiting_var_idx] = A[:, entering_var_idx]
|
| 142 |
+
C_B[:, exiting_var_idx] = C[:, entering_var_idx]
|
| 143 |
+
prev_solution = solution
|
app.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
from ast_parser import Parser, EquationKind
|
| 4 |
+
from algorithm import solver
|
| 5 |
+
|
| 6 |
+
def process(input_equation):
|
| 7 |
+
|
| 8 |
+
parser = Parser(input_equation)
|
| 9 |
+
|
| 10 |
+
equations = tuple(parser)
|
| 11 |
+
|
| 12 |
+
for equation in equations:
|
| 13 |
+
if equation.kind == EquationKind.GEQ and equation.bound < 0.0:
|
| 14 |
+
for variable, coefficient in equation.variables.items():
|
| 15 |
+
equation.variables[variable] *= -1.0
|
| 16 |
+
|
| 17 |
+
equation.bound *= -1.0
|
| 18 |
+
equation.kind = EquationKind.LEQ
|
| 19 |
+
|
| 20 |
+
continue
|
| 21 |
+
|
| 22 |
+
if equation.kind == EquationKind.GEQ:
|
| 23 |
+
raise ValueError("Equation kind must be either EQ or LEQ")
|
| 24 |
+
if equation.bound < 0.0:
|
| 25 |
+
raise ValueError("Equation bound must be non-negative")
|
| 26 |
+
|
| 27 |
+
demand: dict[str, list[int]] = {}
|
| 28 |
+
for i, equation in enumerate(equations):
|
| 29 |
+
for variable, coefficient in equation.variables.items():
|
| 30 |
+
if coefficient == 0.0:
|
| 31 |
+
continue
|
| 32 |
+
|
| 33 |
+
if variable not in demand:
|
| 34 |
+
demand[variable] = []
|
| 35 |
+
|
| 36 |
+
demand[variable].append(i)
|
| 37 |
+
|
| 38 |
+
objective_variables = tuple(
|
| 39 |
+
filter(lambda variable: len(demand[variable]) == 1, demand.keys())
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
objective_functions = tuple(
|
| 43 |
+
filter(
|
| 44 |
+
lambda function: function.kind == EquationKind.EQ,
|
| 45 |
+
map(lambda variable: equations[demand[variable][0]], objective_variables),
|
| 46 |
+
)
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
if len(objective_variables) != len(objective_functions):
|
| 50 |
+
raise ValueError("Objective functions must be equalities")
|
| 51 |
+
|
| 52 |
+
constraints = tuple(
|
| 53 |
+
filter(
|
| 54 |
+
lambda function: function not in objective_functions,
|
| 55 |
+
equations,
|
| 56 |
+
)
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
solver_instance = solver.Solver(objective_functions, constraints)
|
| 60 |
+
|
| 61 |
+
results = solver_instance.get_results()
|
| 62 |
+
return results
|
| 63 |
+
|
| 64 |
+
description = "This app calculates optimum value using Simplex Method. Enter all equations, following each with a comma. The last equation should be without a comma. As an example 'Z = 5x_1 + 4x_2', '6x_1 + 4x_2 <= 24'"
|
| 65 |
+
examples = [["Z = 5x_1 + 4x_2,6x_1 + 4x_2 <= 24"]]
|
| 66 |
+
demo = gr.Interface(fn=process, inputs="text", outputs="text", title="Optimization Assignment",examples=examples,description=description)
|
| 67 |
+
|
| 68 |
+
demo.launch()
|
ast_parser/__init__.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ast_parser.chars import (
|
| 2 |
+
is_alpha,
|
| 3 |
+
is_ascii,
|
| 4 |
+
is_coefficient_start,
|
| 5 |
+
is_digit,
|
| 6 |
+
is_lower_alpha,
|
| 7 |
+
is_upper_alpha,
|
| 8 |
+
is_variable_continue,
|
| 9 |
+
is_variable_start,
|
| 10 |
+
print_char_code,
|
| 11 |
+
)
|
| 12 |
+
from ast_parser.errors import LexerException, LinterException, PositionedException
|
| 13 |
+
from ast_parser.lexer import Lexer
|
| 14 |
+
from ast_parser.linter import Linter
|
| 15 |
+
from ast_parser.parser import Equation, EquationKind, Parser
|
| 16 |
+
from ast_parser.token import (
|
| 17 |
+
Location,
|
| 18 |
+
Token,
|
| 19 |
+
TokenKind,
|
| 20 |
+
is_binary_operator,
|
| 21 |
+
is_relational_operator,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
__all__ = (
|
| 25 |
+
"is_digit",
|
| 26 |
+
"is_coefficient_start",
|
| 27 |
+
"is_lower_alpha",
|
| 28 |
+
"is_upper_alpha",
|
| 29 |
+
"is_alpha",
|
| 30 |
+
"is_variable_start",
|
| 31 |
+
"is_variable_continue",
|
| 32 |
+
"is_ascii",
|
| 33 |
+
"print_char_code",
|
| 34 |
+
"PositionedException",
|
| 35 |
+
"LexerException",
|
| 36 |
+
"LinterException",
|
| 37 |
+
"Lexer",
|
| 38 |
+
"Linter",
|
| 39 |
+
"EquationKind",
|
| 40 |
+
"Equation",
|
| 41 |
+
"Parser",
|
| 42 |
+
"TokenKind",
|
| 43 |
+
"Location",
|
| 44 |
+
"Token",
|
| 45 |
+
"is_binary_operator",
|
| 46 |
+
"is_relational_operator",
|
| 47 |
+
)
|
ast_parser/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (984 Bytes). View file
|
|
|
ast_parser/__pycache__/chars.cpython-310.pyc
ADDED
|
Binary file (3.24 kB). View file
|
|
|
ast_parser/__pycache__/errors.cpython-310.pyc
ADDED
|
Binary file (1.96 kB). View file
|
|
|
ast_parser/__pycache__/lexer.cpython-310.pyc
ADDED
|
Binary file (9.67 kB). View file
|
|
|
ast_parser/__pycache__/linter.cpython-310.pyc
ADDED
|
Binary file (7.22 kB). View file
|
|
|
ast_parser/__pycache__/parser.cpython-310.pyc
ADDED
|
Binary file (6.72 kB). View file
|
|
|
ast_parser/__pycache__/token.cpython-310.pyc
ADDED
|
Binary file (2.35 kB). View file
|
|
|
ast_parser/chars.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ast_parser.token import TokenKind
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def is_digit(code: int) -> bool:
|
| 5 |
+
"""Check if code is a digit.
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
code (int): Unicode code point.
|
| 9 |
+
|
| 10 |
+
Returns:
|
| 11 |
+
bool: True if code is a digit, False otherwise.
|
| 12 |
+
"""
|
| 13 |
+
return 0x0030 <= code <= 0x0039 # <digit>
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def is_coefficient_start(code: int) -> bool:
|
| 17 |
+
"""Check if code is a coefficient start.
|
| 18 |
+
|
| 19 |
+
The Coefficient can start with a digit or a dot.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
code (int): Unicode code point.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
bool: True if code is a Coefficient start, False otherwise.
|
| 26 |
+
"""
|
| 27 |
+
return is_digit(code) or code == 0x002E # <digit> | `.`
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def is_lower_alpha(code: int) -> bool:
|
| 31 |
+
"""Check if code is a lowercased alpha.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
code (int): Unicode code point.
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
bool: True if code is a lowercased alpha, False otherwise.
|
| 38 |
+
"""
|
| 39 |
+
return 0x0061 <= code <= 0x007A # <lower_alpha>
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def is_upper_alpha(code: int) -> bool:
|
| 43 |
+
"""Check if code is an uppercased alpha.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
code (int): Unicode code point.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
bool: True if code is an uppercased alpha, False otherwise.
|
| 50 |
+
"""
|
| 51 |
+
return 0x0041 <= code <= 0x005A # <upper_alpha>
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def is_alpha(code: int) -> bool:
|
| 55 |
+
"""Check if code is an alpha.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
code (int): Unicode code point.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
bool: True if code is an alpha, False otherwise.
|
| 62 |
+
"""
|
| 63 |
+
return is_lower_alpha(code) or is_upper_alpha(code) # <alpha>
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def is_variable_start(code: int) -> bool:
|
| 67 |
+
"""Check if code is a variable start.
|
| 68 |
+
|
| 69 |
+
The Variable can start with an alpha or an underscore.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
code (int): Unicode code point.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
bool: True if code is a Variable start, False otherwise.
|
| 76 |
+
"""
|
| 77 |
+
return is_alpha(code) or code == 0x005F # <alpha> | `_`
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def is_variable_continue(code: int) -> bool:
|
| 81 |
+
"""Check if code is a variable continue.
|
| 82 |
+
|
| 83 |
+
The Variable can continue with an alpha, an underscore or a digit.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
code (int): Unicode code point.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
bool: True if code is a Variable continue, False otherwise.
|
| 90 |
+
"""
|
| 91 |
+
return is_variable_start(code) or is_digit(code) # <alpha> | `_` | <digit>
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def is_ascii(code: int) -> bool:
|
| 95 |
+
"""Check if code is an ASCII.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
code (int): Unicode code point.
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
bool: True if code is an ASCII, False otherwise.
|
| 102 |
+
"""
|
| 103 |
+
return 0x0020 <= code <= 0x007E # <ASCII>
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def print_char_code(code: int | None) -> str:
|
| 107 |
+
"""Describe code as a character or Unicode code point.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
code (int | None): Unicode code point. None for EOF.
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
str: Character or Unicode code point.
|
| 114 |
+
"""
|
| 115 |
+
if code is None: # <EOF>
|
| 116 |
+
return TokenKind.EOF.value
|
| 117 |
+
|
| 118 |
+
return chr(code) if is_ascii(code) else f"U+{code:04X}"
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
__all__ = (
|
| 122 |
+
"is_digit",
|
| 123 |
+
"is_coefficient_start",
|
| 124 |
+
"is_lower_alpha",
|
| 125 |
+
"is_upper_alpha",
|
| 126 |
+
"is_alpha",
|
| 127 |
+
"is_variable_start",
|
| 128 |
+
"is_variable_continue",
|
| 129 |
+
"is_ascii",
|
| 130 |
+
"print_char_code",
|
| 131 |
+
)
|
ast_parser/errors.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ast_parser.token import Location
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class PositionedException(Exception):
|
| 5 |
+
"""Base class for exceptions with Location.
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
source (str): The source string being tokenized.
|
| 9 |
+
location (Location): The Location of the exception.
|
| 10 |
+
description (str, optional): An optional description of the
|
| 11 |
+
error.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
source: str
|
| 15 |
+
"""The source string being tokenized."""
|
| 16 |
+
location: Location
|
| 17 |
+
"""The Location of the error in the source."""
|
| 18 |
+
|
| 19 |
+
def __init__(
|
| 20 |
+
self, source: str, location: Location, description: str | None = None
|
| 21 |
+
) -> None:
|
| 22 |
+
super().__init__(description)
|
| 23 |
+
|
| 24 |
+
self.source = source
|
| 25 |
+
self.location = location
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class LexerException(PositionedException):
|
| 29 |
+
"""A LexerException is raised when the Lexer encounters an invalid
|
| 30 |
+
character or token.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
source (str): The source string being tokenized.
|
| 34 |
+
location (Location): The Location of the exception.
|
| 35 |
+
description (str, optional): An optional description of the
|
| 36 |
+
error.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class LinterException(PositionedException):
|
| 41 |
+
"""A LinterException is raised when the Linter encounters an invalid
|
| 42 |
+
token chain.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
source (str): The source string being tokenized.
|
| 46 |
+
location (Location): The Location of the exception.
|
| 47 |
+
description (str, optional): An optional description of the
|
| 48 |
+
error.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
__all__ = ("PositionedException", "LexerException", "LinterException")
|
ast_parser/lexer.py
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Callable
|
| 4 |
+
|
| 5 |
+
from ast_parser.chars import (
|
| 6 |
+
is_coefficient_start,
|
| 7 |
+
is_digit,
|
| 8 |
+
is_variable_continue,
|
| 9 |
+
is_variable_start,
|
| 10 |
+
print_char_code,
|
| 11 |
+
)
|
| 12 |
+
from ast_parser.errors import LexerException
|
| 13 |
+
from ast_parser.token import Location, Token, TokenKind
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Lexer:
|
| 17 |
+
"""A Lexer is a stateful stream generator in that every time it is
|
| 18 |
+
advanced, it returns the next token in the source.
|
| 19 |
+
|
| 20 |
+
Assuming the source lexes, the final Token emitted by the Lexer
|
| 21 |
+
will be of kind EOF, after which the Lexer will repeatedly return
|
| 22 |
+
the same EOF token whenever called.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
source (str): The source string being tokenized.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
_source: str
|
| 29 |
+
"""The source string being tokenized."""
|
| 30 |
+
|
| 31 |
+
_token: Token
|
| 32 |
+
"""The currently active Token."""
|
| 33 |
+
|
| 34 |
+
_line: int
|
| 35 |
+
"""The current line number."""
|
| 36 |
+
_line_start: int
|
| 37 |
+
"""The index of the start of the current line."""
|
| 38 |
+
|
| 39 |
+
def __init__(self, source: str) -> None:
|
| 40 |
+
self._source = source
|
| 41 |
+
|
| 42 |
+
self._token = Token(TokenKind.SOF, 0, 0, Location(0, 0), "")
|
| 43 |
+
|
| 44 |
+
self._line = 1
|
| 45 |
+
self._line_start = 0
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def source(self) -> str:
|
| 49 |
+
"""Gets the source string being tokenized.
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
str: The source string being tokenized.
|
| 53 |
+
"""
|
| 54 |
+
return self._source
|
| 55 |
+
|
| 56 |
+
def __iter__(self) -> Lexer:
|
| 57 |
+
"""Gets an iterator over the Tokens in the source.
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
Lexer: An iterator over the tokens in the source.
|
| 61 |
+
"""
|
| 62 |
+
return self
|
| 63 |
+
|
| 64 |
+
def __next__(self) -> Token:
|
| 65 |
+
"""Gets the next Token from the source.
|
| 66 |
+
|
| 67 |
+
Raises:
|
| 68 |
+
StopIteration: The end of the source has been reached.
|
| 69 |
+
LexerException: Unexpected character, less than operator is
|
| 70 |
+
not allowed.
|
| 71 |
+
LexerException: Unexpected character, greater than operator
|
| 72 |
+
is not allowed.
|
| 73 |
+
LexerException: Invalid character: <code>.
|
| 74 |
+
LexerException: Invalid coefficient, unexpected digit after
|
| 75 |
+
0: <code>.
|
| 76 |
+
LexerException: Invalid coefficient, expected digit but
|
| 77 |
+
got: <code>.
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
Token: The next token from the source.
|
| 81 |
+
"""
|
| 82 |
+
last_token = self._token
|
| 83 |
+
next_token = self._next_token()
|
| 84 |
+
|
| 85 |
+
if last_token.kind == TokenKind.EOF and next_token.kind == TokenKind.EOF:
|
| 86 |
+
if last_token.prev_token is None:
|
| 87 |
+
raise LexerException(
|
| 88 |
+
self._source,
|
| 89 |
+
last_token.location,
|
| 90 |
+
"Crude modification of the token chain is detected",
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
if last_token.prev_token.kind == TokenKind.EOF:
|
| 94 |
+
raise StopIteration
|
| 95 |
+
|
| 96 |
+
self._token = next_token
|
| 97 |
+
|
| 98 |
+
return last_token
|
| 99 |
+
|
| 100 |
+
self._token.next_token = next_token
|
| 101 |
+
next_token.prev_token = self._token
|
| 102 |
+
|
| 103 |
+
self._token = next_token
|
| 104 |
+
|
| 105 |
+
return last_token
|
| 106 |
+
|
| 107 |
+
def _create_token(self, kind: TokenKind, start: int, end: int, value: str) -> Token:
|
| 108 |
+
"""Creates a token with the given parameters.
|
| 109 |
+
|
| 110 |
+
A token is created relative to the current state of the Lexer.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
kind (TokenKind): The kind of token.
|
| 114 |
+
start (int): The index of the first character of the token.
|
| 115 |
+
end (int): The index of the first character after the token.
|
| 116 |
+
value (str): The value of the token.
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
Token:
|
| 120 |
+
"""
|
| 121 |
+
location = Location(self._line, 1 + start - self._line_start)
|
| 122 |
+
|
| 123 |
+
return Token(kind, start, end, location, value, self._token)
|
| 124 |
+
|
| 125 |
+
def _read_code(self, position: int) -> int | None:
|
| 126 |
+
"""Reads the character code at the given position in the source.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
position (int): The index of the character to read.
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
int | None: The character code at the given position, or
|
| 133 |
+
None if the position is out of bounds.
|
| 134 |
+
"""
|
| 135 |
+
return ord(self._source[position]) if position < len(self._source) else None
|
| 136 |
+
|
| 137 |
+
def _read_while(self, start: int, predicate: Callable[[int], bool]) -> int:
|
| 138 |
+
"""Reads a sequence of characters from the source starting at
|
| 139 |
+
the given position while the predicate is satisfied.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
start (int): The index of the first character of the token.
|
| 143 |
+
predicate (Callable[[int], bool]): A function that takes a
|
| 144 |
+
character code and returns whether it satisfies the
|
| 145 |
+
predicate.
|
| 146 |
+
|
| 147 |
+
Returns:
|
| 148 |
+
int: The index of the first character after the sequence.
|
| 149 |
+
"""
|
| 150 |
+
position = start
|
| 151 |
+
|
| 152 |
+
while position < len(self._source) and predicate(ord(self._source[position])):
|
| 153 |
+
# print(
|
| 154 |
+
# self._source[position],
|
| 155 |
+
# ord(self._source[position]),
|
| 156 |
+
# is_digit(ord(self._source[position])),
|
| 157 |
+
# )
|
| 158 |
+
position += 1
|
| 159 |
+
|
| 160 |
+
return position
|
| 161 |
+
|
| 162 |
+
def _read_digits(self, start: int, first_code: int | None) -> int:
|
| 163 |
+
"""Reads a sequence of digits from the source starting at the
|
| 164 |
+
given position.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
start (int): The index of the first character of the token.
|
| 168 |
+
first_code (int | None): The code of the first character of
|
| 169 |
+
the token.
|
| 170 |
+
|
| 171 |
+
Raises:
|
| 172 |
+
LexerException: Unexpected character, expected digit but
|
| 173 |
+
got: <code>.
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
int: The index of the first character after the digits.
|
| 177 |
+
"""
|
| 178 |
+
if not is_digit(first_code): # not <digit>
|
| 179 |
+
raise LexerException(
|
| 180 |
+
self._source,
|
| 181 |
+
Location(self._line, 1 + start - self._line_start),
|
| 182 |
+
f"Unexpected character, expected digit but got: {print_char_code(first_code)}",
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
return self._read_while(start + 1, is_digit)
|
| 186 |
+
|
| 187 |
+
def _read_coefficient(self, start: int, first_code: int) -> Token:
|
| 188 |
+
"""Reads a coefficient token from the source starting at the
|
| 189 |
+
given position.
|
| 190 |
+
|
| 191 |
+
Args:
|
| 192 |
+
start (int): The index of the first character of the token.
|
| 193 |
+
first_code (int): The code of the first character of the
|
| 194 |
+
token.
|
| 195 |
+
|
| 196 |
+
Raises:
|
| 197 |
+
LexerException: Invalid coefficient, expected digit but
|
| 198 |
+
got: <code>.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
Token: The coefficient token.
|
| 202 |
+
"""
|
| 203 |
+
position, code = start, first_code
|
| 204 |
+
|
| 205 |
+
# Leftmost digits.
|
| 206 |
+
if code == 0x0030: # `0`
|
| 207 |
+
position += 1
|
| 208 |
+
code = self._read_code(position)
|
| 209 |
+
|
| 210 |
+
if is_digit(code): # <digit>
|
| 211 |
+
raise LexerException(
|
| 212 |
+
self._source,
|
| 213 |
+
Location(self._line, 1 + position - self._line_start),
|
| 214 |
+
f"Invalid coefficient, unexpected digit after 0: {print_char_code(code)}",
|
| 215 |
+
)
|
| 216 |
+
elif code != 0x002E: # not `.`
|
| 217 |
+
position = self._read_digits(position, code)
|
| 218 |
+
code = self._read_code(position)
|
| 219 |
+
|
| 220 |
+
# Rightmost digits.
|
| 221 |
+
if code == 0x002E: # `.`
|
| 222 |
+
position += 1
|
| 223 |
+
code = self._read_code(position)
|
| 224 |
+
|
| 225 |
+
position = self._read_digits(position, code)
|
| 226 |
+
code = self._read_code(position)
|
| 227 |
+
|
| 228 |
+
# Exponent.
|
| 229 |
+
if code in (0x0045, 0x0065): # `E` | `e`
|
| 230 |
+
position += 1
|
| 231 |
+
code = self._read_code(position)
|
| 232 |
+
|
| 233 |
+
if code in (0x002B, 0x002D): # `+` | `-`
|
| 234 |
+
position += 1
|
| 235 |
+
code = self._read_code(position)
|
| 236 |
+
|
| 237 |
+
position = self._read_digits(position, code)
|
| 238 |
+
|
| 239 |
+
return self._create_token(
|
| 240 |
+
TokenKind.COEFFICIENT, start, position, self._source[start:position]
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
def _read_variable(self, start: int) -> Token:
|
| 244 |
+
"""Reads a variable token from the source starting at the given
|
| 245 |
+
position.
|
| 246 |
+
|
| 247 |
+
Args:
|
| 248 |
+
start (int): The index of the first character of the token.
|
| 249 |
+
|
| 250 |
+
Returns:
|
| 251 |
+
Token: The variable token.
|
| 252 |
+
"""
|
| 253 |
+
position = self._read_while(start + 1, is_variable_continue)
|
| 254 |
+
|
| 255 |
+
return self._create_token(
|
| 256 |
+
TokenKind.VARIABLE, start, position, self._source[start:position]
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
def _next_token(self) -> Token:
|
| 260 |
+
"""Gets the next token from the source starting at the given
|
| 261 |
+
position.
|
| 262 |
+
|
| 263 |
+
This skips over whitespace until it finds the next lexable
|
| 264 |
+
token, then lexes punctuators immediately or calls the
|
| 265 |
+
appropriate helper function for more complicated tokens.
|
| 266 |
+
|
| 267 |
+
Raises:
|
| 268 |
+
LexerException: Unexpected character, less than operator is
|
| 269 |
+
not allowed.
|
| 270 |
+
LexerException: Unexpected character, greater than operator
|
| 271 |
+
is not allowed.
|
| 272 |
+
LexerException: Invalid character: <code>.
|
| 273 |
+
LexerException: Invalid coefficient, unexpected digit after
|
| 274 |
+
0: <code>.
|
| 275 |
+
LexerException: Invalid coefficient, expected digit but
|
| 276 |
+
got: <code>.
|
| 277 |
+
|
| 278 |
+
Returns:
|
| 279 |
+
Token: The next token from the source.
|
| 280 |
+
"""
|
| 281 |
+
position = self._token.end
|
| 282 |
+
|
| 283 |
+
while position < len(self._source):
|
| 284 |
+
char = self._source[position]
|
| 285 |
+
code = ord(char)
|
| 286 |
+
|
| 287 |
+
match code:
|
| 288 |
+
# Ignored:
|
| 289 |
+
# - unicode BOM;
|
| 290 |
+
# - white space;
|
| 291 |
+
# - line terminator.
|
| 292 |
+
case 0xFEFF | 0x0009 | 0x0020: # <BOM> | `\t` | <space>
|
| 293 |
+
position += 1
|
| 294 |
+
|
| 295 |
+
continue
|
| 296 |
+
case 0x000A: # `\n`
|
| 297 |
+
position += 1
|
| 298 |
+
|
| 299 |
+
self._line += 1
|
| 300 |
+
self._line_start = position
|
| 301 |
+
|
| 302 |
+
continue
|
| 303 |
+
case 0x000D: # `\r`
|
| 304 |
+
position += (
|
| 305 |
+
2 if self._read_code(position + 1) == 0x000A else 1
|
| 306 |
+
) # `\r\n` | `\r`
|
| 307 |
+
|
| 308 |
+
self._line += 1
|
| 309 |
+
self._line_start = position
|
| 310 |
+
|
| 311 |
+
continue
|
| 312 |
+
# Single-char tokens:
|
| 313 |
+
# - binary plus and minus operators;
|
| 314 |
+
# - multiplication operator;
|
| 315 |
+
# - relational operators;
|
| 316 |
+
# - comma.
|
| 317 |
+
case 0x002B | 0x002D | 0x002A: # `+` | `-` | `*`
|
| 318 |
+
return self._create_token(
|
| 319 |
+
TokenKind(char), position, position + 1, char
|
| 320 |
+
)
|
| 321 |
+
case 0x003D: # `=`
|
| 322 |
+
if self._read_code(position + 1) == 0x003D:
|
| 323 |
+
return self._create_token(
|
| 324 |
+
TokenKind.EQ, position, position + 2, "=="
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
return self._create_token(
|
| 328 |
+
TokenKind.EQ, position, position + 1, char
|
| 329 |
+
)
|
| 330 |
+
case 0x003C: # `<`
|
| 331 |
+
if self._read_code(position + 1) == 0x003D: # `=`
|
| 332 |
+
return self._create_token(
|
| 333 |
+
TokenKind.LEQ, position, position + 2, "<="
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
raise LexerException(
|
| 337 |
+
self._source,
|
| 338 |
+
Location(self._line, 1 + position - self._line_start),
|
| 339 |
+
"Unexpected character, less than operator is not allowed",
|
| 340 |
+
)
|
| 341 |
+
case 0x003E: # `>`
|
| 342 |
+
if self._read_code(position + 1) == 0x003D: # `=`
|
| 343 |
+
return self._create_token(
|
| 344 |
+
TokenKind.GEQ, position, position + 2, ">="
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
raise LexerException(
|
| 348 |
+
self._source,
|
| 349 |
+
Location(self._line, 1 + position - self._line_start),
|
| 350 |
+
"Unexpected character, greater than operator is not allowed",
|
| 351 |
+
)
|
| 352 |
+
case 0x2264: # `≤`
|
| 353 |
+
return self._create_token(
|
| 354 |
+
TokenKind.LEQ, position, position + 1, char
|
| 355 |
+
)
|
| 356 |
+
case 0x2265: # `≥`
|
| 357 |
+
return self._create_token(
|
| 358 |
+
TokenKind.GEQ, position, position + 1, char
|
| 359 |
+
)
|
| 360 |
+
case 0x002C: # `,`
|
| 361 |
+
return self._create_token(
|
| 362 |
+
TokenKind.COMMA, position, position + 1, char
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
# Multi-char tokens:
|
| 366 |
+
# - coefficient;
|
| 367 |
+
# - variable.
|
| 368 |
+
if is_coefficient_start(code): # <digit> | `.`
|
| 369 |
+
return self._read_coefficient(position, code)
|
| 370 |
+
if is_variable_start(code): # <alpha> | `_`
|
| 371 |
+
return self._read_variable(position)
|
| 372 |
+
|
| 373 |
+
raise LexerException(
|
| 374 |
+
self._source,
|
| 375 |
+
Location(self._line, 1 + position - self._line_start),
|
| 376 |
+
f"Invalid character: {print_char_code(code)}",
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
return self._create_token(TokenKind.EOF, position, position, "")
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
__all__ = ("Lexer",)
|
ast_parser/linter.py
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ast_parser.errors import LinterException
|
| 2 |
+
from ast_parser.token import (
|
| 3 |
+
Token,
|
| 4 |
+
TokenKind,
|
| 5 |
+
is_binary_operator,
|
| 6 |
+
is_relational_operator,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Linter:
|
| 11 |
+
"""Linter enables real-time validation of the token chain.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
source (str): The source string being tokenized.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
_source: str
|
| 18 |
+
|
| 19 |
+
_variable_provided: bool
|
| 20 |
+
_relation_provided: bool
|
| 21 |
+
|
| 22 |
+
def __init__(self, source: str) -> None:
|
| 23 |
+
self._source = source
|
| 24 |
+
|
| 25 |
+
self._variable_provided = False
|
| 26 |
+
self._relation_provided = False
|
| 27 |
+
|
| 28 |
+
def lint(self, token: Token) -> None:
|
| 29 |
+
"""The lint method checks the token for integrity, validity and
|
| 30 |
+
relevance. It raises a LinterException if the token is invalid
|
| 31 |
+
or unexpected.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
token (Token): The starting token.
|
| 35 |
+
source (str): The source string being tokenized. Used for
|
| 36 |
+
exception messages.
|
| 37 |
+
|
| 38 |
+
Raises:
|
| 39 |
+
LinterException: Unexpected binary operator, term missed.
|
| 40 |
+
LinterException: Equation must contain only one relational
|
| 41 |
+
operator.
|
| 42 |
+
LinterException: Term must contain no more than one
|
| 43 |
+
variable.
|
| 44 |
+
LinterException: Unexpected comma at the beginning of the
|
| 45 |
+
equation.
|
| 46 |
+
LinterException: Unexpected comma, equation missed.
|
| 47 |
+
LinterException: Unexpected comma at the end of the
|
| 48 |
+
equation.
|
| 49 |
+
LinterException: Equation must contain a relational
|
| 50 |
+
operator.
|
| 51 |
+
"""
|
| 52 |
+
if token.kind != TokenKind.SOF and (
|
| 53 |
+
token.prev_token is None or token.prev_token.next_token is not token
|
| 54 |
+
):
|
| 55 |
+
raise LinterException(
|
| 56 |
+
self._source,
|
| 57 |
+
token.location,
|
| 58 |
+
"Crude modification of the token chain is detected",
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
if token.kind == TokenKind.EOF:
|
| 62 |
+
self._lint_eof(token)
|
| 63 |
+
|
| 64 |
+
if is_binary_operator(token):
|
| 65 |
+
self._lint_binary_operator(token)
|
| 66 |
+
|
| 67 |
+
self._variable_provided = False
|
| 68 |
+
|
| 69 |
+
if token.kind == TokenKind.MUL:
|
| 70 |
+
self._lint_multiplication_operator(token)
|
| 71 |
+
|
| 72 |
+
if is_relational_operator(token):
|
| 73 |
+
self._lint_relational_operator(token)
|
| 74 |
+
|
| 75 |
+
self._variable_provided = False
|
| 76 |
+
self._relation_provided = True
|
| 77 |
+
|
| 78 |
+
if token.kind == TokenKind.VARIABLE:
|
| 79 |
+
self._lint_variable(token)
|
| 80 |
+
|
| 81 |
+
self._variable_provided = True
|
| 82 |
+
|
| 83 |
+
if token.kind == TokenKind.COMMA:
|
| 84 |
+
self._lint_comma(token)
|
| 85 |
+
|
| 86 |
+
self._variable_provided = False
|
| 87 |
+
self._relation_provided = False
|
| 88 |
+
|
| 89 |
+
def _lint_eof(self, token: Token) -> None:
|
| 90 |
+
"""Lint the EOF Token.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
token (Token): The EOF Token.
|
| 94 |
+
|
| 95 |
+
Raises:
|
| 96 |
+
LinterException: Equation must contain a relational
|
| 97 |
+
operator.
|
| 98 |
+
LinterException: Unexpected binary operator at the end of
|
| 99 |
+
the equation.
|
| 100 |
+
LinterException: Unexpected EOF, right side of the equation
|
| 101 |
+
is missed.
|
| 102 |
+
LinterException: Unexpected comma at the end of the
|
| 103 |
+
equation.
|
| 104 |
+
"""
|
| 105 |
+
if token.prev_token.kind != TokenKind.SOF and not self._relation_provided:
|
| 106 |
+
raise LinterException(
|
| 107 |
+
self._source,
|
| 108 |
+
token.location,
|
| 109 |
+
"Equation must contain a relational operator",
|
| 110 |
+
)
|
| 111 |
+
if is_binary_operator(token.prev_token):
|
| 112 |
+
raise LinterException(
|
| 113 |
+
self._source,
|
| 114 |
+
token.prev_token.location,
|
| 115 |
+
"Unexpected binary operator at the end of the equation",
|
| 116 |
+
)
|
| 117 |
+
if token.prev_token.kind == TokenKind.MUL:
|
| 118 |
+
raise LinterException(
|
| 119 |
+
self._source,
|
| 120 |
+
token.location,
|
| 121 |
+
"Unexpected EOF, multiplier missed",
|
| 122 |
+
)
|
| 123 |
+
if is_relational_operator(token.prev_token):
|
| 124 |
+
raise LinterException(
|
| 125 |
+
self._source,
|
| 126 |
+
token.location,
|
| 127 |
+
"Unexpected EOF, right side of the equation is missed",
|
| 128 |
+
)
|
| 129 |
+
if token.prev_token.kind == TokenKind.COMMA:
|
| 130 |
+
raise LinterException(
|
| 131 |
+
self._source,
|
| 132 |
+
token.location,
|
| 133 |
+
"Unexpected comma at the end of the equation",
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
def _lint_binary_operator(self, token: Token) -> None:
|
| 137 |
+
"""Lint the binary operator Token.
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
token (Token): The binary operator Token.
|
| 141 |
+
|
| 142 |
+
Raises:
|
| 143 |
+
LinterException: Unexpected binary operator, term missed.
|
| 144 |
+
"""
|
| 145 |
+
if is_binary_operator(token.prev_token):
|
| 146 |
+
raise LinterException(
|
| 147 |
+
self._source,
|
| 148 |
+
token.location,
|
| 149 |
+
"Unexpected binary operator, term missed",
|
| 150 |
+
)
|
| 151 |
+
if token.prev_token.kind == TokenKind.MUL:
|
| 152 |
+
raise LinterException(
|
| 153 |
+
self._source,
|
| 154 |
+
token.location,
|
| 155 |
+
"Unexpected binary operator, multiplier missed",
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
def _lint_multiplication_operator(self, token: Token) -> None:
|
| 159 |
+
"""Lint the multiplication operator Token.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
token (Token): The multiplication operator Token.
|
| 163 |
+
|
| 164 |
+
Raises:
|
| 165 |
+
LinterException: Unexpected multiplication operator, term
|
| 166 |
+
missed.
|
| 167 |
+
"""
|
| 168 |
+
if (
|
| 169 |
+
is_binary_operator(token.prev_token)
|
| 170 |
+
or is_relational_operator(token.prev_token)
|
| 171 |
+
or token.prev_token.kind == TokenKind.COMMA
|
| 172 |
+
):
|
| 173 |
+
raise LinterException(
|
| 174 |
+
self._source,
|
| 175 |
+
token.location,
|
| 176 |
+
"Unexpected multiplication operator, term missed",
|
| 177 |
+
)
|
| 178 |
+
if token.prev_token.kind == TokenKind.MUL:
|
| 179 |
+
raise LinterException(
|
| 180 |
+
self._source,
|
| 181 |
+
token.location,
|
| 182 |
+
"Unexpected multiplication operator, multiplier missed",
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
def _lint_relational_operator(self, token: Token) -> None:
|
| 186 |
+
"""Lint the relational operator Token.
|
| 187 |
+
|
| 188 |
+
Args:
|
| 189 |
+
token (Token): The relational operator Token.
|
| 190 |
+
|
| 191 |
+
Raises:
|
| 192 |
+
LinterException: Equation must contain only one relational
|
| 193 |
+
operator.
|
| 194 |
+
LinterException: Unexpected binary operator, term missed.
|
| 195 |
+
LinterException: Unexpected relational operator, left side
|
| 196 |
+
of the equation is missed.
|
| 197 |
+
"""
|
| 198 |
+
if token.prev_token.kind in (TokenKind.SOF, TokenKind.COMMA):
|
| 199 |
+
raise LinterException(
|
| 200 |
+
self._source,
|
| 201 |
+
token.location,
|
| 202 |
+
"Unexpected relational operator, left side of the equation is missed",
|
| 203 |
+
)
|
| 204 |
+
if self._relation_provided:
|
| 205 |
+
raise LinterException(
|
| 206 |
+
self._source,
|
| 207 |
+
token.location,
|
| 208 |
+
"Equation must contain only one relational operator",
|
| 209 |
+
)
|
| 210 |
+
if is_binary_operator(token.prev_token):
|
| 211 |
+
raise LinterException(
|
| 212 |
+
self._source,
|
| 213 |
+
token.location,
|
| 214 |
+
"Unexpected binary operator, term missed",
|
| 215 |
+
)
|
| 216 |
+
if token.prev_token.kind == TokenKind.MUL:
|
| 217 |
+
raise LinterException(
|
| 218 |
+
self._source,
|
| 219 |
+
token.location,
|
| 220 |
+
"Unexpected relational operator, multiplier missed",
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
def _lint_variable(self, token: Token) -> None:
|
| 224 |
+
"""Lint the variable Token.
|
| 225 |
+
|
| 226 |
+
Args:
|
| 227 |
+
token (Token): The variable Token.
|
| 228 |
+
|
| 229 |
+
Raises:
|
| 230 |
+
LinterException: Term must contain no more than one
|
| 231 |
+
variable.
|
| 232 |
+
"""
|
| 233 |
+
if self._variable_provided:
|
| 234 |
+
raise LinterException(
|
| 235 |
+
self._source,
|
| 236 |
+
token.location,
|
| 237 |
+
"Term must contain no more than one variable",
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
def _lint_comma(self, token: Token) -> None:
|
| 241 |
+
"""Lint the comma Token.
|
| 242 |
+
|
| 243 |
+
Args:
|
| 244 |
+
token (Token): The comma Token.
|
| 245 |
+
|
| 246 |
+
Raises:
|
| 247 |
+
LinterException: Unexpected comma at the beginning of the
|
| 248 |
+
equation.
|
| 249 |
+
LinterException: Unexpected comma, equation missed.
|
| 250 |
+
LinterException: Unexpected comma at the end of the
|
| 251 |
+
equation.
|
| 252 |
+
"""
|
| 253 |
+
if token.prev_token.kind == TokenKind.SOF:
|
| 254 |
+
raise LinterException(
|
| 255 |
+
self._source,
|
| 256 |
+
token.location,
|
| 257 |
+
"Unexpected comma at the beginning of the equation",
|
| 258 |
+
)
|
| 259 |
+
if not self._relation_provided:
|
| 260 |
+
raise LinterException(
|
| 261 |
+
self._source,
|
| 262 |
+
token.location,
|
| 263 |
+
"Equation must contain a relational operator",
|
| 264 |
+
)
|
| 265 |
+
if is_binary_operator(token.prev_token):
|
| 266 |
+
raise LinterException(
|
| 267 |
+
self._source,
|
| 268 |
+
token.prev_token,
|
| 269 |
+
"Unexpected binary operator at the end of the equation",
|
| 270 |
+
)
|
| 271 |
+
if token.prev_token.kind == TokenKind.MUL:
|
| 272 |
+
raise LinterException(
|
| 273 |
+
self._source,
|
| 274 |
+
token.location,
|
| 275 |
+
"Unexpected comma, multiplier missed",
|
| 276 |
+
)
|
| 277 |
+
if is_relational_operator(token.prev_token):
|
| 278 |
+
raise LinterException(
|
| 279 |
+
self._source,
|
| 280 |
+
token.location,
|
| 281 |
+
"Unexpected comma, right side of the equation is missed",
|
| 282 |
+
)
|
| 283 |
+
if token.prev_token.kind == TokenKind.COMMA:
|
| 284 |
+
raise LinterException(
|
| 285 |
+
self._source,
|
| 286 |
+
token.location,
|
| 287 |
+
"Unexpected comma, equation missed",
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
__all__ = ("Linter",)
|
ast_parser/parser.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass, field
|
| 4 |
+
from enum import Enum
|
| 5 |
+
|
| 6 |
+
from ast_parser.lexer import Lexer
|
| 7 |
+
from ast_parser.linter import Linter
|
| 8 |
+
from ast_parser.token import Token, TokenKind
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class EquationKind(str, Enum):
|
| 12 |
+
"""Kind of relationship between the left and right parts of the
|
| 13 |
+
Equation."""
|
| 14 |
+
|
| 15 |
+
EQ = "="
|
| 16 |
+
"""Equality operator."""
|
| 17 |
+
LEQ = "<="
|
| 18 |
+
"""Less than or equal to operator."""
|
| 19 |
+
GEQ = ">="
|
| 20 |
+
"""Greater than or equal to operator."""
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@dataclass
|
| 24 |
+
class Equation:
|
| 25 |
+
"""Equation definition."""
|
| 26 |
+
|
| 27 |
+
kind: EquationKind
|
| 28 |
+
"""Kind of relationship between the left and right parts of the
|
| 29 |
+
Equation.
|
| 30 |
+
"""
|
| 31 |
+
variables: dict[str, float]
|
| 32 |
+
"""Equation variables. The names are the keys, the coefficients are
|
| 33 |
+
the values.
|
| 34 |
+
"""
|
| 35 |
+
bound: float
|
| 36 |
+
"""Bound of the Equation. Must be non-negative."""
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclass
|
| 40 |
+
class EquationAccumulator:
|
| 41 |
+
"""The EquationAccumulator collects equation Tokens: variables,
|
| 42 |
+
coefficients, and bounds.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
kind: EquationKind | None = field(default=None)
|
| 46 |
+
"""Kind of relationship between the left and right parts of the
|
| 47 |
+
Equation.
|
| 48 |
+
"""
|
| 49 |
+
variables: dict[str, float] = field(default_factory=dict)
|
| 50 |
+
"""Equation variables. The names are the keys, the coefficients are
|
| 51 |
+
the values.
|
| 52 |
+
"""
|
| 53 |
+
bound: float = field(default=0.0)
|
| 54 |
+
"""Bound of the Equation. Must be non-negative."""
|
| 55 |
+
|
| 56 |
+
coefficient: float | None = field(default=None)
|
| 57 |
+
"""Coefficient of the current variable."""
|
| 58 |
+
variable: str | None = field(default=None)
|
| 59 |
+
"""Name of the current variable."""
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class Parser:
|
| 63 |
+
"""Source Parser. It parses the source and returns a list of
|
| 64 |
+
Equation objects found in the source.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
source (str): The source to parse.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
_lexer: Lexer
|
| 71 |
+
"""Lexer of the source."""
|
| 72 |
+
_linter: Linter
|
| 73 |
+
"""Linter of the source."""
|
| 74 |
+
|
| 75 |
+
_accumulator: EquationAccumulator
|
| 76 |
+
"""Equation accumulator."""
|
| 77 |
+
|
| 78 |
+
def __init__(self, source: str) -> None:
|
| 79 |
+
self._lexer = Lexer(source)
|
| 80 |
+
self._linter = Linter(source)
|
| 81 |
+
|
| 82 |
+
self._accumulator = EquationAccumulator()
|
| 83 |
+
|
| 84 |
+
def __iter__(self) -> Parser:
|
| 85 |
+
"""Gets an iterator over the Equations in the source.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
Parser: _description_
|
| 89 |
+
"""
|
| 90 |
+
return self
|
| 91 |
+
|
| 92 |
+
def __next__(self) -> Equation:
|
| 93 |
+
"""Gets the next Equation in the source.
|
| 94 |
+
|
| 95 |
+
Accumulation, processing and validation of Equations in
|
| 96 |
+
real-time. The source is parsed sequentially, token by token.
|
| 97 |
+
|
| 98 |
+
Raises:
|
| 99 |
+
StopIteration: The end of the source has been reached.
|
| 100 |
+
LexerException: Unexpected character, less than operator is
|
| 101 |
+
not allowed.
|
| 102 |
+
LexerException: Unexpected character, greater than operator
|
| 103 |
+
is not allowed.
|
| 104 |
+
LexerException: Invalid character: <code>.
|
| 105 |
+
LexerException: Invalid coefficient, unexpected digit after
|
| 106 |
+
0: <code>.
|
| 107 |
+
LexerException: Invalid coefficient, expected digit but
|
| 108 |
+
got: <code>.
|
| 109 |
+
LinterException: Unexpected binary operator, term missed.
|
| 110 |
+
LinterException: Equation must contain only one relational
|
| 111 |
+
operator.
|
| 112 |
+
LinterException: Term must contain no more than one
|
| 113 |
+
variable.
|
| 114 |
+
LinterException: Unexpected comma at the beginning of the
|
| 115 |
+
equation.
|
| 116 |
+
LinterException: Unexpected comma, equation missed.
|
| 117 |
+
LinterException: Unexpected comma at the end of the
|
| 118 |
+
equation.
|
| 119 |
+
LinterException: Equation must contain a relational
|
| 120 |
+
operator.
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
Equation: The next Equation from the source.
|
| 124 |
+
"""
|
| 125 |
+
while True:
|
| 126 |
+
token = next(self._lexer)
|
| 127 |
+
|
| 128 |
+
self._linter.lint(token)
|
| 129 |
+
|
| 130 |
+
match token.kind:
|
| 131 |
+
case TokenKind.SOF | TokenKind.MUL:
|
| 132 |
+
continue
|
| 133 |
+
case TokenKind.EOF:
|
| 134 |
+
if token.prev_token.kind == TokenKind.SOF:
|
| 135 |
+
raise StopIteration
|
| 136 |
+
|
| 137 |
+
return self._derive_equation()
|
| 138 |
+
case TokenKind.ADD:
|
| 139 |
+
self._parse_addition_operator()
|
| 140 |
+
|
| 141 |
+
continue
|
| 142 |
+
case TokenKind.SUB:
|
| 143 |
+
self._parse_subtraction_operator()
|
| 144 |
+
|
| 145 |
+
continue
|
| 146 |
+
case TokenKind.EQ | TokenKind.LEQ | TokenKind.GEQ:
|
| 147 |
+
self._parse_relational_operator(token)
|
| 148 |
+
|
| 149 |
+
continue
|
| 150 |
+
case TokenKind.COEFFICIENT:
|
| 151 |
+
self._parse_coefficient(token)
|
| 152 |
+
|
| 153 |
+
continue
|
| 154 |
+
case TokenKind.VARIABLE:
|
| 155 |
+
self._parse_variable(token)
|
| 156 |
+
|
| 157 |
+
continue
|
| 158 |
+
case TokenKind.COMMA:
|
| 159 |
+
return self._derive_equation()
|
| 160 |
+
|
| 161 |
+
def _extend_variables(self) -> None:
|
| 162 |
+
"""Extend the variables with the current variable and
|
| 163 |
+
coefficient.
|
| 164 |
+
"""
|
| 165 |
+
if self._accumulator.coefficient:
|
| 166 |
+
if self._accumulator.kind:
|
| 167 |
+
self._accumulator.coefficient *= -1.0
|
| 168 |
+
if self._accumulator.variable:
|
| 169 |
+
if self._accumulator.variable in self._accumulator.variables:
|
| 170 |
+
self._accumulator.variables[
|
| 171 |
+
self._accumulator.variable
|
| 172 |
+
] += self._accumulator.coefficient
|
| 173 |
+
else:
|
| 174 |
+
self._accumulator.variables[
|
| 175 |
+
self._accumulator.variable
|
| 176 |
+
] = self._accumulator.coefficient
|
| 177 |
+
else:
|
| 178 |
+
self._accumulator.bound -= self._accumulator.coefficient
|
| 179 |
+
|
| 180 |
+
def _derive_equation(self) -> Equation:
|
| 181 |
+
"""Derive the Equation from the EquationAccumulator.
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
Equation: The derived Equation.
|
| 185 |
+
"""
|
| 186 |
+
self._extend_variables()
|
| 187 |
+
|
| 188 |
+
equation = Equation(
|
| 189 |
+
self._accumulator.kind, self._accumulator.variables, self._accumulator.bound
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
self._accumulator = EquationAccumulator()
|
| 193 |
+
|
| 194 |
+
return equation
|
| 195 |
+
|
| 196 |
+
def _parse_addition_operator(self) -> None:
|
| 197 |
+
"""Parse the addition operator Token."""
|
| 198 |
+
self._extend_variables()
|
| 199 |
+
|
| 200 |
+
self._accumulator.variable = None
|
| 201 |
+
self._accumulator.coefficient = 1.0
|
| 202 |
+
|
| 203 |
+
def _parse_subtraction_operator(self) -> None:
|
| 204 |
+
"""Parse the subtraction operator Token."""
|
| 205 |
+
self._extend_variables()
|
| 206 |
+
|
| 207 |
+
self._accumulator.variable = None
|
| 208 |
+
self._accumulator.coefficient = -1.0
|
| 209 |
+
|
| 210 |
+
def _parse_relational_operator(self, token: Token) -> None:
|
| 211 |
+
"""Parse the relational operator Token.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
token (Token): The relational operator Token.
|
| 215 |
+
"""
|
| 216 |
+
self._extend_variables()
|
| 217 |
+
|
| 218 |
+
self._accumulator.kind = EquationKind(token.kind.value)
|
| 219 |
+
self._accumulator.variable = None
|
| 220 |
+
self._accumulator.coefficient = None
|
| 221 |
+
|
| 222 |
+
def _parse_coefficient(self, token: Token) -> None:
|
| 223 |
+
"""Parse the coefficient Token.
|
| 224 |
+
|
| 225 |
+
Args:
|
| 226 |
+
token (Token): The coefficient Token.
|
| 227 |
+
"""
|
| 228 |
+
if not self._accumulator.coefficient:
|
| 229 |
+
self._accumulator.coefficient = 1.0
|
| 230 |
+
|
| 231 |
+
self._accumulator.coefficient *= float(token.value)
|
| 232 |
+
|
| 233 |
+
def _parse_variable(self, token: Token) -> None:
|
| 234 |
+
"""Parse the variable Token.
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
token (Token): The variable Token.
|
| 238 |
+
"""
|
| 239 |
+
if not self._accumulator.coefficient:
|
| 240 |
+
self._accumulator.coefficient = 1.0
|
| 241 |
+
|
| 242 |
+
self._accumulator.variable = token.value
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
__all__ = ("EquationKind", "Equation", "Parser")
|
ast_parser/token.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass, field
|
| 4 |
+
from enum import Enum
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TokenKind(str, Enum):
|
| 8 |
+
"""Kind of the Token."""
|
| 9 |
+
|
| 10 |
+
SOF = "<SOF>"
|
| 11 |
+
"""Start of file."""
|
| 12 |
+
EOF = "<EOF>"
|
| 13 |
+
"""End of file."""
|
| 14 |
+
|
| 15 |
+
ADD = "+"
|
| 16 |
+
"""Addition operator."""
|
| 17 |
+
SUB = "-"
|
| 18 |
+
"""Subtraction operator."""
|
| 19 |
+
MUL = "*"
|
| 20 |
+
"""Multiplication operator."""
|
| 21 |
+
|
| 22 |
+
EQ = "="
|
| 23 |
+
"""Equality operator."""
|
| 24 |
+
LEQ = "<="
|
| 25 |
+
"""Less than or equal to operator."""
|
| 26 |
+
GEQ = ">="
|
| 27 |
+
"""Greater than or equal to operator."""
|
| 28 |
+
|
| 29 |
+
COEFFICIENT = "Coefficient"
|
| 30 |
+
"""Coefficient of a variable."""
|
| 31 |
+
VARIABLE = "Variable"
|
| 32 |
+
"""Variable name."""
|
| 33 |
+
|
| 34 |
+
COMMA = ","
|
| 35 |
+
"""Comma."""
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@dataclass
|
| 39 |
+
class Location:
|
| 40 |
+
"""Location of the Token in the source.
|
| 41 |
+
|
| 42 |
+
The Location is more user-friendly than the start and end indices.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
line: int
|
| 46 |
+
"""Line number of the token in the source. Starts at 1."""
|
| 47 |
+
column: int
|
| 48 |
+
"""Column number of the token in the source. Starts at 1."""
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@dataclass
|
| 52 |
+
class Token:
|
| 53 |
+
"""Token of the source code."""
|
| 54 |
+
|
| 55 |
+
kind: TokenKind
|
| 56 |
+
"""Kind of the token."""
|
| 57 |
+
|
| 58 |
+
start: int = field(repr=False)
|
| 59 |
+
"""The index of the first character of the token."""
|
| 60 |
+
end: int = field(repr=False)
|
| 61 |
+
"""The index of the first character after the token."""
|
| 62 |
+
location: Location
|
| 63 |
+
"""The Location of the token in the source."""
|
| 64 |
+
|
| 65 |
+
value: str
|
| 66 |
+
"""The value of the token."""
|
| 67 |
+
|
| 68 |
+
prev_token: Token | None = field(repr=False, default=None)
|
| 69 |
+
"""The previous Token in the source."""
|
| 70 |
+
next_token: Token | None = field(repr=False, default=None)
|
| 71 |
+
"""The next Token in the source."""
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def is_binary_operator(token: Token) -> bool:
|
| 75 |
+
"""Check if the Token is an algebraic binary operator.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
token (Token): The Token to check.
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
bool: True if the Token is an algebraic binary operator, False
|
| 82 |
+
otherwise.
|
| 83 |
+
"""
|
| 84 |
+
return token.kind in (TokenKind.ADD, TokenKind.SUB)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def is_relational_operator(token: Token) -> bool:
|
| 88 |
+
"""Check if the Token is a relational operator.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
token (Token): The Token to check.
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
bool: True if the Token is a relational operator, False
|
| 95 |
+
otherwise.
|
| 96 |
+
"""
|
| 97 |
+
return token.kind in (TokenKind.EQ, TokenKind.LEQ, TokenKind.GEQ)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
__all__ = (
|
| 101 |
+
"TokenKind",
|
| 102 |
+
"Location",
|
| 103 |
+
"Token",
|
| 104 |
+
"is_binary_operator",
|
| 105 |
+
"is_relational_operator",
|
| 106 |
+
)
|
main.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ast_parser import Parser, EquationKind
|
| 2 |
+
from algorithm import solver
|
| 3 |
+
|
| 4 |
+
print("Enter all equations, following each with a comma. The last equation should be without a comma")
|
| 5 |
+
print("As an example 'Z = 5x_1 + 4x_2', '6x_1 + 4x_2 <= 24'")
|
| 6 |
+
input_equation = input()
|
| 7 |
+
|
| 8 |
+
parser = Parser(input_equation)
|
| 9 |
+
|
| 10 |
+
equations = tuple(parser)
|
| 11 |
+
|
| 12 |
+
for equation in equations:
|
| 13 |
+
if equation.kind == EquationKind.GEQ and equation.bound < 0.0:
|
| 14 |
+
for variable, coefficient in equation.variables.items():
|
| 15 |
+
equation.variables[variable] *= -1.0
|
| 16 |
+
|
| 17 |
+
equation.bound *= -1.0
|
| 18 |
+
equation.kind = EquationKind.LEQ
|
| 19 |
+
|
| 20 |
+
continue
|
| 21 |
+
|
| 22 |
+
if equation.kind == EquationKind.GEQ:
|
| 23 |
+
raise ValueError("Equation kind must be either EQ or LEQ")
|
| 24 |
+
if equation.bound < 0.0:
|
| 25 |
+
raise ValueError("Equation bound must be non-negative")
|
| 26 |
+
|
| 27 |
+
demand: dict[str, list[int]] = {}
|
| 28 |
+
for i, equation in enumerate(equations):
|
| 29 |
+
for variable, coefficient in equation.variables.items():
|
| 30 |
+
if coefficient == 0.0:
|
| 31 |
+
continue
|
| 32 |
+
|
| 33 |
+
if variable not in demand:
|
| 34 |
+
demand[variable] = []
|
| 35 |
+
|
| 36 |
+
demand[variable].append(i)
|
| 37 |
+
|
| 38 |
+
objective_variables = tuple(
|
| 39 |
+
filter(lambda variable: len(demand[variable]) == 1, demand.keys())
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
objective_functions = tuple(
|
| 43 |
+
filter(
|
| 44 |
+
lambda function: function.kind == EquationKind.EQ,
|
| 45 |
+
map(lambda variable: equations[demand[variable][0]], objective_variables),
|
| 46 |
+
)
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
if len(objective_variables) != len(objective_functions):
|
| 50 |
+
raise ValueError("Objective functions must be equalities")
|
| 51 |
+
|
| 52 |
+
constraints = tuple(
|
| 53 |
+
filter(
|
| 54 |
+
lambda function: function not in objective_functions,
|
| 55 |
+
equations,
|
| 56 |
+
)
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
solver.Solver(objective_functions, constraints)
|