index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
31,337
|
moremoban/moban-jinja2-github
|
refs/heads/master
|
/moban_jinja2_github/__init__.py
|
# flake8: noqa
import moban_jinja2_github.issues
from moban_jinja2_github._version import __author__, __version__
from moban.plugins.jinja2.extensions import jinja_global
from moban_jinja2_github.contributors import get_contributors
jinja_global("moban_jinja2_contributors", get_contributors)
|
{"/moban_jinja2_github/__init__.py": ["/moban_jinja2_github/_version.py", "/moban_jinja2_github/contributors.py"], "/tests/test_contributors.py": ["/moban_jinja2_github/contributors.py"]}
|
31,338
|
moremoban/moban-jinja2-github
|
refs/heads/master
|
/moban_jinja2_github/contributors.py
|
from gease.exceptions import UrlNotFound
from gease.contributors import EndPoint
def get_contributors(user, repo, exclude_contributors=()):
repo = EndPoint(user, repo)
try:
user_list = repo.get_all_contributors()
user_list = [
detail
for detail in user_list
if "login" in detail
and detail["login"] not in exclude_contributors
]
return user_list
except UrlNotFound:
return []
|
{"/moban_jinja2_github/__init__.py": ["/moban_jinja2_github/_version.py", "/moban_jinja2_github/contributors.py"], "/tests/test_contributors.py": ["/moban_jinja2_github/contributors.py"]}
|
31,339
|
moremoban/moban-jinja2-github
|
refs/heads/master
|
/tests/test_contributors.py
|
from mock import MagicMock, patch
from nose.tools import eq_
from gease.exceptions import UrlNotFound
@patch("moban_jinja2_github.contributors.EndPoint")
def test_get_contributors(fake_end_point):
sample_contributors = [
{"login": "author"},
{"login": "ok", "url": "contributors"},
]
fake_api = MagicMock(
get_all_contributors=MagicMock(return_value=sample_contributors)
)
fake_end_point.return_value = fake_api
from moban_jinja2_github.contributors import get_contributors
actual = get_contributors("user", "repo", ["author"])
expected = [{"login": "ok", "url": "contributors"}]
eq_(list(actual), expected)
@patch("moban_jinja2_github.contributors.EndPoint")
def test_get_non_existent_url(fake_end_point):
fake_api = MagicMock(
get_all_contributors=MagicMock(side_effect=UrlNotFound)
)
fake_end_point.return_value = fake_api
from moban_jinja2_github.contributors import get_contributors
actual = get_contributors("user", "repo", ["author"])
expected = []
eq_(list(actual), expected)
|
{"/moban_jinja2_github/__init__.py": ["/moban_jinja2_github/_version.py", "/moban_jinja2_github/contributors.py"], "/tests/test_contributors.py": ["/moban_jinja2_github/contributors.py"]}
|
31,340
|
moremoban/moban-jinja2-github
|
refs/heads/master
|
/moban_jinja2_github/_version.py
|
__version__ = "0.0.4"
__author__ = "chfw"
|
{"/moban_jinja2_github/__init__.py": ["/moban_jinja2_github/_version.py", "/moban_jinja2_github/contributors.py"], "/tests/test_contributors.py": ["/moban_jinja2_github/contributors.py"]}
|
31,342
|
cuboids/rl-assignments
|
refs/heads/main
|
/RepeatGames.py
|
from time import perf_counter
from HexBoard import Agent, Human, HexBoard
def play_hex(ngames=None, player1=Agent(name="Alice"), player2=Agent(name="Bob"), board_size=3,
show_midgame=False, show_endgame=True, seed=0, analysis=False):
# Docstring needs to be revised!
"""
Script to repeat n games.
How to use:
Provide input to set up the game.
Arguments:
ngames: (int > 0) the number of games to play
board_size: (int > 1) the size of the hex board
player1: (Agent) the first agent
player2: (Agent) the second agent
show_midgame: if midgame positions need to be printed.
show_endgame: if the final position needs to be printed
seed: specify to get different results
analysis: if agents should share their thought processes
Returns:
All game results is stored in dict games_result.
Data stored: endstate, winner, turns, elapsed_time
To access individual game e.g. games_result['games']['11']
Q: How to change each player's decison?
A: Change the value assigned to 'move' correspondingly.
There are several options (random move, manual move, minimax).
But user has to modify corresponding code.
"""
if ngames is None:
ngames = int(input("Number of games: "))
print(f'{player1.name} (blue) and {player2.name} (red) will play on {board_size}x{board_size}', end=' ')
print(f'Hex board for {ngames} round{"" if ngames == 1 else "s"}.')
print(f'{player1.name} and {player2.name} will move first in odd-numbered and even-numbered games respectively.')
print()
# Initialize game results dictionary
games_result = {'p1': player1, 'p2': player2, 'games': {}}
player1.color, player2.color = HexBoard.BLUE, HexBoard.RED # Let agents know their colors
player1.seed = player2.seed = seed
players = (player1, player2) # This will help us when players alternate turns
for game_i in range(1, ngames + 1):
game = HexBoard(board_size)
player1.game = player2.game = game_i
nodes1, nodes2 = [], []
nodes = (nodes1, nodes2)
n_turns = 0
print(f'Game {game_i} starts. {player1.name if game_i % 2 else player2.name} moves first.')
time_start = perf_counter() # Time each game
# Game starts
# Condition to end: finish(win/lose) or n_turns > board positions (prevent infinity loop)
while not game.is_game_over() and n_turns < board_size ** 2:
n_turns += 1
turn = int(n_turns % 2 != game_i % 2) # Alternate each turn and alternate first move each game
player1.n_turns = player2.n_turns = n_turns
if analysis:
nodes[turn].append(players[turn].analyse_position(game))
move = players[turn].make_move(game)
game.place(move, players[turn].color)
if show_midgame:
print(f'Game {game_i} - Number of moves {n_turns}')
game.print()
time_stop = perf_counter() # Stop the count
time_elapsed = time_stop - time_start
# print game result
print(f'Game {game_i} ends in {n_turns} turns. Elapsed time {time_elapsed}.')
if show_endgame:
print('End state:')
game.print()
if game.check_win(1):
print(f'Game {game_i} won by {player1.name} (blue).')
winner = player1.name
player1.rate_1vs1(player2) # Update Trueskill rating
elif game.check_win(2):
print(f'Game {game_i} won by {player2.name} (red).')
winner = player2.name
player2.rate_1vs1(player1)
else:
print('NO WINNER! Game terminated by user.')
winner = None
print('')
result_dict = {str(game_i): {'endstate': game, 'winner': winner, 'turns': n_turns,
'elapsed_time': time_elapsed, 'nodes': nodes}}
games_result['games'].update(result_dict)
print()
print(f'[Report] All {ngames} games completed.')
winners = [v['winner'] for k,v in games_result['games'].items()]
print(f'{player1.name} wins: {winners.count(player1.name)}, {player2.name} wins: {winners.count(player2.name)} \n')
return games_result
example = False
if example:
Bart = Human(name="Bart")
Alice = Agent(name='Alice', searchby='minimax')
play_hex(2, Alice, Bart, 5, show_endgame=True)
Alice.plot_rating_history(Bart)
|
{"/RepeatGames.py": ["/HexBoard.py"]}
|
31,343
|
cuboids/rl-assignments
|
refs/heads/main
|
/HexBoard.py
|
import copy
import hashlib
import math
import names # For giving random names to agents. See https://pypi.org/project/names/
import numpy as np
import random
import string
import time
from itertools import permutations
from itertools import product # For evalue_fun
from trueskill import Rating, rate_1vs1
import matplotlib.pyplot as plt
# from chooseEval import evaluateScore # TO BE DEPRECIATED
class TranspositionTable:
"""Contains a dict that uses board state as keys, and stores info about best moves"""
def __init__(self):
"""Constructor, initiate the transposition table"""
self.table = {}
def is_empty(self):
"""Check if table is empty"""
return not self.table
def store(self, n):
"""Store result node information to TT"""
key = n['state'].convert_key()
if key in self.table.keys(): # Board state already exists in TT
# print('[TT] Found transpositions')
# Update TT entry
if n['depth'] >= self.table[key]['depth']: # Compare search depth
self.table[key]['depth'] = n['depth']
self.table[key]['bestmove'] = n['move']
self.table[key]['bestmoves'] = n['moves']
self.table[key]['score'] = n['score']
# print('[TT] Updated depth, best move, score in entry')
else: # Create new TT entry
value = {'state': n['state'], 'depth': n['depth'],
'bestmove': n['move'], 'bestmoves': n['moves'], 'score': n['score']}
key = n['state'].convert_key()
self.table.update({key: value})
# print('[TT] Created new entry')
def lookup(self, n, depth):
"""Return look up result from TT"""
hit = False
key = n['state'].convert_key()
if key in self.table.keys(): # Found tranposition in TT
# Transposition has larger or equal search depth than current search
if depth <= self.table[key]['depth']:
transposition = self.table[key]
hit = True # Found transposition with useful depth, can return score
score = transposition['score']
bestmove = transposition['bestmove']
bestmoves = transposition['bestmoves']
return hit, score, bestmoves
# Transposition has smaller search depth than current search
else:
transposition = self.table[key]
hit = False
score = None # Score in TT not useful
bestmove = transposition['bestmove'] # Return best move to improve move ordering
bestmoves = transposition['bestmoves']
return hit, score, bestmoves
else: # Transposition not found
hit = False
score = None
bestmove = ()
bestmoves = ()
return hit, score, bestmoves
def count_entry(self):
return len(self.table.keys())
class Agent:
# Set DEBUG to True if you want to debug.
# WARNING: this will print way too much.
DEBUG = False
RANDOM_MOVE = True
Hash = hashlib.sha512
MAX_HASH_PLUS_ONE = 2 ** (Hash().digest_size * 8)
def __init__(self, name=None, depth=3, searchby="random", hyperpars=None):
"""Sets up the agent.
Args:
name: a string representing the name of the agent
depth: an integer representing the search depth
searchby: a string indicating the search method.
Currently supports "random", "minimax", "alphabeta", "alphabetaIDTT", and "mcts"
hyperpars: a dictionary with hyperparameters.
timilimit: integer representing timelimit for anytime search algorithm, including "alphabetaIDTT"
N: (used in MCTS)
Cp: (used in MCTS)
"""
if name is None:
self.name = names.get_first_name()
elif name == "matrix":
self.name = "Agent Smith"
else:
self.name = name
if hyperpars is None:
hyperpars = {'timelimit': 2, 'N': 250, 'Cp': 2}
self.depth = depth
self.game = 0
self.rating = Rating()
self.rating_history = [self.rating]
self.searchby = searchby
self.timelimit = hyperpars['timelimit']
self.n_turns = 0
self.color = None
self.seed = 0
self.timelimit = 2
self.n = None # last node
self.N = hyperpars['N'] # For MCTS
self.Cp = hyperpars['Cp'] # For MCTS
def make_seed(self):
"""Generate a reproducible seed based on the Agent's name, turn, and game.
Based on https://stackoverflow.com/a/44556106 (reproducible hashes).
"""
hash_digest = self.Hash(self.name.encode()).digest()
hash_int = int.from_bytes(hash_digest, 'big')
return hash_int % 1e7 + self.n_turns + 1e3 * self.game + self.seed
def set_color(self, col):
"""Gives agent a color
Args:
col: Either HexBoard.BLUE or HexBoard.RED
"""
if col in (1, 2):
self.color = col
def rate_1vs1(self, opponent, opponent_won=False):
"""Updates the rating of agent and opponent after playing a 1vs1
Args:
opponent: another agent
opponent_won: boolean indicating if the opponent won.
"""
rating2 = opponent.rating
if not opponent_won:
self.rating, opponent.rating = rate_1vs1(self.rating, rating2)
else:
self.rating, opponent.rating = rate_1vs1(rating2, self.rating)
self.rating_history.append(self.rating)
opponent.rating_history.append(opponent.rating)
def plot_rating_history(self, *args):
"""Plot agents rating history"""
game = range(1, len(self.rating_history)+1)
mu = [rating.mu for rating in self.rating_history]
sigma = [rating.sigma for rating in self.rating_history]
ci_lower = [a - b for a, b in zip(mu, sigma)]
ci_upper = [a + b for a, b in zip(mu, sigma)]
fig, ax = plt.subplots()
ax.plot(game, mu, label=self.name, color='k')
ax.fill_between(game, ci_lower, ci_upper, alpha=0.1)
# Plot other agents (if any)
if not args == ():
linestyles = ['dashed', 'dotted', 'dashdot']
players = args
for player in players:
game = range(1, len(player.rating_history)+1)
mu = [rating.mu for rating in player.rating_history]
sigma = [rating.sigma for rating in player.rating_history]
ci_lower = [a - b for a, b in zip(mu, sigma)]
ci_upper = [a + b for a, b in zip(mu, sigma)]
ax.plot(game, mu, label=player.name, color='k', linestyle=linestyles[players.index(player)])
ax.fill_between(game, ci_lower, ci_upper, alpha=0.1)
ax.legend()
ax.set_xlabel('Number of games (N)')
ax.set_ylabel('Trueskill rating')
ax.axhline(y=25, color='r', linestyle='-', linewidth=1)
plt.show()
def analyse_position(self, game):
"""Let the agent evaluate a position
Returns more detailed information than make_move.
"""
self.n = eval('self.' + self.searchby + '(game)')
return self.n
def make_move(self, game):
"""Let's the agent calculate a move based on it's searchby strategy
Args:
game: position of type HexBoard.
"""
if self.n is not None:
n = self.n
else:
n = self.analyse_position(game)
self.n = None
# alphabetaIDTT returns a tuple (n, tt)
if isinstance(n, tuple):
n = n[0]
return self.select_move(n['moves'])
def select_move(self, moves):
"""Select a move among equally good moves
Args:
moves: a list of equally good moves.
"""
if self.RANDOM_MOVE:
self.make_seed()
random.seed(self.make_seed())
return random.choices(moves)[0]
return moves[0]
def random(self, game):
"""Let the agent make a random move"""
self.make_seed()
random.seed(self.make_seed())
return {'moves': [random.sample(game.get_allempty(), 1)[0]]}
def minimax(self, game, depth=None, ntype=None, p=None):
"""
Let the agent make a depth-limited minimax move
Args:
game: A HexBoard instance.
depth (int): depth limit of search tree, if depth exceeds empty positions, it will be reduced
ntype (str): node type, either 'MAX' or 'MIN'
p (int): perspective/player of search tree root, either 1 for HexBoard.BLUE, or 2 for HexBoard.RED
Returns:
A dict including state, depth, children, type, score, move (depreciated), and moves.
"""
# Movelist for current state
movelist = game.get_allempty()
if depth is ntype is p is None:
depth, ntype, p = self.depth, "MAX", self.color
# For small board and end game, depth limit == full depth
if depth > len(movelist):
print('WARNING: DEPTH is limited by empty positions in board => set to full depth search.\n')
depth = len(movelist)
# Initialize node
n = {'state': game, 'depth': depth, 'children': {}, 'type': ntype, 'moves': []}
if self.DEBUG:
print(f'\nNode DEPTH = {n["depth"]} (TYPE = {n["type"]})')
print(' GAME OVER?', n['state'].game_over)
if depth and not n['state'].is_game_over():
print(f' PLAYER {p} to consider EMPTY positions {movelist}')
# Initialize child_count to count children at depth d
child_count = 0
# Main loop
if n['state'].is_game_over(): # Case: gameover at depth >= 0 (do we need to give bonus or penalty to score?)
n['type'] = 'LEAF'
n['score'] = 1000+depth if n['state'].check_win(self.color) else -1000-depth
if self.DEBUG:
print(' Leaf SCORE (LEAF) =', n['score'], '\n')
return n
elif not depth: # Case: reaching the search tree depth limit
n['type'] = 'DEPTH==0'
n['score'] = self.eval_dijkstra2(n['state'])
if self.DEBUG:
print(' Leaf SCORE (DEPTH==0) =', n['score'], '\n')
return n
elif n['type'] == 'MAX': # Max node
g_max = -np.inf # Initialize max score with very small
n['score'] = g_max
for child_move in movelist: # Search all children and compare score
child_count += 1
if self.DEBUG:
print(f'\nFrom DEPTH {n["depth"]} branch --> Child {child_count}:')
print(f'\nPLAYER {p} moves as {child_move} STATE before move:')
n['state'].print()
new_state = copy.deepcopy(n['state']) # Copy state to avoid modifying current state
new_state.place(child_move, p) # Generate child state
if self.DEBUG:
print(' STATE after move:')
new_state.print() # Eyetest child state
child_n = self.minimax(new_state, n['depth'] - 1, 'MIN', p) # Generate child node
n['children'].update({str(child_move): child_n}) # Store children node
if child_n['score'] > g_max: # Update current node to back up from the maximum child node
g_max = child_n['score']
n['score'] = child_n['score']
n['move'] = child_move
n['moves'] = [child_move]
if child_n['score'] == g_max:
n['moves'].append(child_move) # Fix deterministic tendencies
if self.DEBUG:
print(f'End of PLAYER {p} DEPTH {n["depth"]} {n["type"]} node:', end='')
print(f'Child move {child_move}', end=' ')
print(f'score = {child_n["score"]}; Updated optimal move {n["move"]} score = {n["score"]}.')
elif n['type'] == 'MIN': # Min node
g_min = np.inf # Initialize min score with very large
n['score'] = g_min
for child_move in movelist:
child_count = child_count + 1
if self.DEBUG:
print(f'\nFrom DEPTH {n["depth"]} branch --> Child {child_count}:')
print(f'PLAYER {p} moves at {child_move} STATE before move:')
n['state'].print()
new_p = [1, 2]
new_p.remove(p) # Reverse perspective for child node
new_state = copy.deepcopy(n['state'])
new_state.place(child_move, new_p[0]) # Generate child state
if self.DEBUG:
print(' STATE after move:')
new_state.print()
child_n = self.minimax(new_state, n['depth'] - 1, 'MAX', p)
n['children'].update({str(child_move): child_n}) # Store children node
if child_n['score'] < g_min: # Update current node to back up from the minimum child node
g_min = child_n['score']
n['score'] = child_n['score']
n['move'] = child_move
n['moves'] = [child_move]
if child_n['score'] == g_min:
n['moves'].append(child_move)
if self.DEBUG:
print(f'End of PLAYER {p} DEPTH {n["depth"]} {n["type"]} node: Child move {child_move}', end=" ")
print(f'score = {child_n["score"]}; Updated optimal move {n["move"]} score = {n["score"]}.')
else:
print('Error: Nothing to execute.')
return
return n
def alphabeta(self, game, depth=None, ntype="MAX", p=None, a=-np.inf, b=np.inf):
"""
Alpha-Beta search algorithm
Args:
game (HexBoard object):
depth (int): depth limit of search tree, if depth exceeds empty positions, it will be reduced
p (int): perspective/player of search tree root, either 1 for HexBoard.BLUE, or 2 for HexBoard.RED
ntype (str): node type, etiher 'MAX' or 'MIN'
a (float): alpha value, first input should be -np.inf or very small value, increase upon recursion
b (float): beta value, first input should be np.inf or very large value, decrease upon recursion
Returns:
node (dict): {'state', 'depth', 'children', 'type', 'score', 'move'}
Further improvements:
search statistics: nodes searched + cutoffs
"""
# Generate movelist by getting all empty positions of current state
movelist = game.get_allempty()
if depth is p is None:
depth, p = self.depth, self.color
# For small board and end game, depth limit == full depth
if depth > len(movelist):
print('[alphabeta-reminder] Search depth is limited by empty positions in board.\n')
depth = len(movelist)
# Initialize node
n = {'state': game, 'depth': depth, 'children': {}, 'type': ntype, 'moves': [],
'children_searched': movelist, 'children_cutoff': []}
if self.DEBUG:
print(f'\nNode DEPTH = {n["depth"]} (TYPE = {n["type"]})')
print(' GAME OVER?', n['state'].is_game_over())
if depth and not n['state'].is_game_over():
print(f' PLAYER {p} to consider EMPTY positions {movelist}')
print(f'Start of function: alpha = {a} beta = {b}')
# Initialize child_count to count children at depth d
child_count = 0
# Determine score or search children, depending on node
if n['state'].is_game_over(): # Case: gameover at depth >= 0
n['type'] = 'LEAF'
n['score'] = 1000+depth if n['state'].check_win(p) else -1000-depth
if self.DEBUG:
print(f'Leaf SCORE (LEAF): {n["score"]} \n')
return n
elif not depth: # Case: reaching the search tree depth limit
n['type'] = 'HEURISTIC'
n['score'] = self.eval_dijkstra1(n['state'], p)
if self.DEBUG:
print(f'Leaf SCORE (HEURISTIC): {n["score"]} \n')
return n
elif n['type'] == 'MAX': # Max node
g_max = -np.inf # Initialize max score with very small
n['score'] = g_max
for child_move in movelist: # Search all children and compare score
child_count = movelist.index(child_move) + 1
if self.DEBUG:
print(f'From DEPTH {n["depth"]} branch --> Child #{movelist.index(child_move)}: \n_PLAYER {p} will make move {child_move}')
new_state = copy.deepcopy(n['state']) # Copy state to avoid modifying current state
new_state.place(child_move, p) # Generate child state
if self.DEBUG:
print('_BEFORE move (current state):')
n['state'].print()
print('_AFTER move (child state):')
new_state.print()
print('\n')
child_n = self.alphabeta(new_state, n['depth'] - 1, 'MIN', p, a, b) # Generate child node
n['children'].update({str(child_move): child_n}) # Store children node
if child_n['score'] == g_max:
n['moves'].append(child_move)
elif child_n['score'] > g_max: # Update current node to back up from the maximum child node
g_max = child_n['score']
n['score'] = child_n['score']
n['move'] = child_move
n['moves'] = [child_move]
a = max(a, g_max) # Update alpha, traces the g_max value
if self.DEBUG:
print(f'End of PLAYER {p} DEPTH {n["depth"]} {n["type"]} node: Child move {child_move}', end=" ")
print(f'score = {child_n["score"]}; Updated optimal move {n["move"]} score = {n["score"]}.')
if a >= b:
n['children_searched'] = movelist[:child_count]
n['children_cutoff'] = movelist[child_count:]
if self.DEBUG:
print(f'Beta cutoff takes place at alpha = {a} beta = {b}')
print(f'Beta cutoff takes place at move {child_move};', end=' ')
print(f'at child {child_count}; pruning {len(movelist) - child_count}', end=' ')
print(f'out of {len(movelist)} children')
break # Beta cutoff, g >= b
elif n['type'] == 'MIN': # Min node
g_min = np.inf # Initialize min score with very large
n['score'] = g_min
for child_move in movelist:
child_count += 1
if self.DEBUG:
print(f'From DEPTH {n["depth"]} branch --> Child #{movelist.index(child_move)}: \n_PLAYER {p} will make move {child_move}')
new_p = [1, 2]
new_p.remove(p) # Reverse perspective for child node
new_state = copy.deepcopy(n['state'])
new_state.place(child_move, new_p[0]) # Generate child state
if self.DEBUG:
print('_BEFORE move (current state):')
n['state'].print()
print('_AFTER move (child state):')
new_state.print()
print('\n')
child_n = self.alphabeta(new_state, n['depth'] - 1, 'MAX', p, a, b)
n['children'].update({str(child_move): child_n}) # Store children node
if child_n['score'] == g_min:
n['moves'].append(child_move)
elif child_n['score'] < g_min: # Update current node to back up from the minimum child node
g_min = child_n['score']
n['score'] = child_n['score']
n['move'] = child_move
n['moves'] = [child_move]
b = min(b, g_min) # Update beta, traces the g_min value
if self.DEBUG:
print(f'End of PLAYER {p} DEPTH {n["depth"]} {n["type"]} node: Child move {child_move}', end=" ")
print(f'score = {child_n["score"]}; Updated optimal move {n["move"]} score = {n["score"]}.')
if a >= b:
n['children_searched'] = movelist[:child_count]
n['children_cutoff'] = movelist[child_count:]
if self.DEBUG:
print(f'Alpha cutoff takes place at alpha = {a} beta = {b}')
print(f'Alpha cutoff takes place at move {child_move};', end=" ")
print(f'at child {child_count}; pruning {len(movelist) - child_count}', end=' ')
print(f'out of {len(movelist)} children')
break # Alpha cutoff, a >= g
else:
print('Error: Nothing to execute.')
return
return n
def alphabetaIDTT(self, game):
"""
Calls ttalphabeta() with iterative deepnening, which starts with shallow depth and increase depth iteratively.
It is an anytime algorithm, user may define parameter timelimit when assigning an Agent.
The function will terminate on following conditions:
EITHER 1) kernel is interuppted OR 2) timeout OR 3) search depth exceeds board empty positions.
Parameters:
game (HexBoard object):
timelimit (int): search time limit in seconds. SUGGEST testing with timelimit from 1 first
p (int): carry to ttalphabeta(), refer to ttalphabeta() docstring
Ouput:
node (dict): {'state', 'depth', 'children', 'type', 'score', 'move'}
"""
# Initialize
timelimit = self.timelimit
timeout = time.time() + timelimit # Define timeout criteria
depth = 1 # Start with shallow depth
tt = TranspositionTable() # Initialize search with empty
result_node = ()
#print('USER NOTE: User may interrupt the kernel to terminate search.')
#try:
while True:
if self.DEBUG:
print(f'[Iteration status] Start iteration at depth {depth} , TT entries: {tt.count_entry()}')
result_node, tt = self.ttalphabeta(game=game, depth=depth, p=self.color, tt=tt) # Use TT from previous search to improve efficiency
# print(f'Best move at root node = {result_node["move"]} \n')
if time.time() > timeout: # WARNING This method is not perfect and only breaks after search completed, may change to raise + class Exception for instant interrupt
print(f'[Iteration report] Termination: TIMEOUT. Return result of completed search at depth {depth}')
break
if depth == len(game.get_allempty()):
print(f'[Iteration report] Termination: EXACT SEARCH ended. Return result of completed search at depth {depth}')
break
depth += 1 # Increase depth after one iteration
#except KeyboardInterrupt: # Interrupt kernel, Ctrl+c in console
# print('[Iteration status] Termination: USER INTERRUPT')
# print(f'[Iteration report] Return result of completed search at depth {depth - 1} \n')
# pass
#finally:
#return result_node # Normal output for repeat games, TT not required in this case.
return (result_node, tt) # Return for test only. Conflict with repeat games expected.
def ttalphabeta(self, game, depth=None, p=None, ntype='MAX', a=-np.inf, b=np.inf,
tt=TranspositionTable()):
"""
Alpha-Beta search algorithm, to be used with iterationdeepening() and custom class TranspositionTable.
All debug printouts suppressed.
Args:
game (HexBoard object):
depth (int): depth limit of search tree, if depth exceeds empty positions, it will be reduced
p (int): perspective/player of search tree root, either 1 for HexBoard.BLUE, or 2 for HexBoard.RED
ntype (str): node type, etiher 'MAX' or 'MIN'
a (float): alpha value, first input should be -np.inf or very small value, increase upon recursion
b (float): beta value, first input should be np.inf or very large value, decrease upon recursion
tt (TranspositionTable object): initial value at root = {}
Returns:
node (dict): {'state', 'depth', 'children', 'type', 'score', 'move', 'children_searched', 'children_cutoff'}
tt (TranspositionTable object): transposition table
"""
# Generate movelist by getting all empty positions of current state
movelist = game.get_allempty()
if p is None:
p = self.color
# For small board and end game, depth limit == full depth
if depth > len(movelist):
print('[ttalphabeta-reminder] Search depth is limited by empty positions in board.\n')
depth = len(movelist)
# Initialize node
n = {'state': game, 'depth': depth, 'children': {}, 'type': ntype, 'moves': [],
'children_searched': movelist, 'children_cutoff': []}
if self.DEBUG:
print(f'Start of {n["type"]} node DEPTH = {n["depth"]}')
print(f'_Is the state of node GAME OVER: {n["state"].game_over}')
if (depth != 0) and not (n['state'].is_game_over()):
print(f'_PLAYER {p} to consider EMPTY positions: {movelist}')
print('\n')
# Look up TT for current board state with search depth d
tt_hit, tt_score, tt_bestmoves = tt.lookup(n, depth)
if self.DEBUG:
print(f'TT lookup returns hit: {tt_hit}\n')
# Case: Transposition state with >= current search depth is found in TT
if tt_hit: # Copy from TT and return result
n['type'] = 'TT : ' + n['state'].convert_key()
n['score'] = tt_score
n['moves'] = tt_bestmoves
if self.DEBUG:
print('Found transposition at >= current search depth, copy and return TT result. \n')
return n, tt
# Update move list to search best move in TT first
for tt_bestmove in tt_bestmoves:
if tt_bestmove in movelist:
if self.DEBUG:
print('Best move is found in TT. Improve move ordering:')
print(f'Original movelist: {movelist}')
movelist.remove(tt_bestmove) # Remove best move in movelist
movelist.insert(0, tt_bestmove) # Insert best move to the first of movelist
if self.DEBUG:
print(f'New movelist: {movelist} \n')
# Determine score or search children, depending on node
if n['state'].is_game_over(): # Case: gameover at depth >= 0
n['type'] = 'LEAF' # Leaf node, Terminal node, no more child because game is over
n['score'] = 1000+depth if n['state'].check_win(self.color) else -1000-depth
n['move'] = () # Store empty () to TT and return
if self.DEBUG:
print(f'Leaf SCORE (LEAF): {n["score"]} \n')
# Store n to TT and return n
elif depth == 0: # Case: reaching the search tree depth limit
# print('This is a node at DEPTH==0')
n['type'] = 'HEURISTIC'
n['score'] = self.eval_dijkstra2(n['state'])
n['move'] = () # Store empty () to TT and return
if self.DEBUG:
print(f'Leaf SCORE (HEURISTIC): {n["score"]} \n')
# Store n to TT and return n
elif n['type'] == 'MAX': # Max node
g_max = -np.inf # Initialize max score with very small
n['score'] = g_max
for child_move in movelist: # Search children / subtree
child_count = movelist.index(child_move) + 1
if self.DEBUG:
print(f'From DEPTH {n["depth"]} branch --> Child #{movelist.index(child_move)}: \n_PLAYER {p} will make move {child_move}')
new_state = copy.deepcopy(n['state']) # Copy state to aviod modifying node state
new_state.place(child_move, p) # Generate child state
if self.DEBUG:
print('_BEFORE move (current state):')
n['state'].print()
print('_AFTER move (child state):')
new_state.print()
print('\n')
# Search OR evaluate child node, update TT
child_n, tt = self.ttalphabeta(new_state, n['depth'] - 1, p, 'MIN', a, b, tt)
n['children'].update({str(child_move): child_n}) # Store children node to current node
if child_n['score'] > g_max: # Update current node to backtrack from the maximum child node
g_max = child_n['score'] # Update max score
n['score'] = child_n['score'] # Store to return
n['move'] = child_move # Store to return
n['moves'] = [child_move]
a = max(a, g_max) # Update alpha, traces the g_max value among siblings
elif child_n['score'] == g_max:
n['moves'].append(child_move) # For equally good move, also store to TT?
if self.DEBUG:
print(f'End of child #{movelist.index(child_move)} move {child_move} for PLAYER {p} {n["type"]} node at DEPTH {n["depth"]}:', end=" ")
print(f'child score = {child_n["score"]}; Updated optimal move {n["move"]} has score = {n["score"]}. \n')
print(f'Bounds: alpha = {a} beta = {b} \n')
if a >= b: # Check Beta cutoff
n['children_searched'] = movelist[:child_count]
n['children_cutoff'] = movelist[child_count:]
if self.DEBUG:
print(f'Beta cutoff takes place at alpha = {a} beta = {b}')
print(f'Beta cutoff takes place at move {child_move};', end=' ')
print(f'at child {child_count}; pruning {len(movelist) - child_count}', end=' ')
print(f'out of {len(movelist)} children')
break # Beta cutoff, stop searching other sibling
elif n['type'] == 'MIN': # Min node
g_min = np.inf # Initialize min score with very large
n['score'] = g_min
for child_move in movelist:
child_count = movelist.index(child_move) + 1
if self.DEBUG:
print(f'From DEPTH {n["depth"]} branch --> Child #{movelist.index(child_move)}: \n_PLAYER {p} will make move {child_move}')
new_p = [1, 2]
new_p.remove(p) # Reverse color to make opponent move
new_state = copy.deepcopy(n['state'])
new_state.place(child_move, new_p[0]) # New child state
if self.DEBUG:
print('_BEFORE move (current state):')
n['state'].print()
print('_AFTER move (child state):')
new_state.print()
print('\n')
# Child of MIN becomes MAX
child_n, tt = self.ttalphabeta(new_state, n['depth'] - 1, p, 'MAX', a, b, tt)
n['children'].update({str(child_move): child_n})
if child_n['score'] < g_min: # Update current node to backtrack from the minimum child node
g_min = child_n['score']
n['score'] = child_n['score']
n['move'] = child_move
n['moves'] = [child_move]
b = min(b, g_min) # Update beta, traces the g_min value among siblings
elif child_n['score'] == g_min:
n['moves'].append(child_move)
if self.DEBUG:
print(f'End of child #{movelist.index(child_move)} move {child_move} for PLAYER {p} {n["type"]} node at DEPTH {n["depth"]}:', end=" ")
print(f'child score = {child_n["score"]}; Updated optimal move {n["move"]} has score = {n["score"]}. \n')
print(f'Bounds: alpha = {a} beta = {b} \n')
if a >= b:
n['children_searched'] = movelist[:child_count]
n['children_cutoff'] = movelist[child_count:]
if self.DEBUG:
print(f'Alpha cutoff takes place at move {child_move}; at child {movelist.index(child_move)};', end=" ")
print(f'pruning {len(movelist) - movelist.index(child_move)} out of {len(movelist)} children \n')
break # Alpha cutoff, stop searching other sibling
else:
print('SEARCH ERROR: Node type is unknown')
return
tt.store(n) # Store search result of this node (state) to TT, and return
# print(f'TT stored; Total # of entries in TT = {tt.count_entry()} \n')
return n, tt
def dijkstra1(self, game, graph, start, player):
"""
Evaluate position with the Dijkstra algorithm
Args:
board: HexBoard object
graph: node map(see evalue function for explannation)
start: a tuple containing coordinate (x, y) of piece
player: an integer of either HexBoard.BLUE(==1) or HexBoard.RED(==2)
Returns:
"""
graph = {key: value for (key, value) in graph.items()} # Create a new dict to avoid the orignal one be replaced
shortest_distance = {} # In the following 18 line of codes, which are derived and adjused from the Ian Sullivan(2017)(start)
unseenNodes = graph # the code source: Implementation of dijkstra in python https://www.youtube.com/watch?v=IG1QioWSXRI&t=1s
inf = 5000
size_board = game.size
for node in unseenNodes:
shortest_distance[node] = inf
shortest_distance[start] = 0
while unseenNodes:
minNode = -10
for node in unseenNodes:
if minNode == -10:
minNode = node
elif shortest_distance[node] < shortest_distance[minNode]:
minNode = node
for childNode, distance in graph[minNode].items():
if distance + shortest_distance[minNode] < shortest_distance[childNode]:
shortest_distance[childNode] = distance + shortest_distance[minNode]
unseenNodes.pop(minNode) # In the upper 18 line of codes, which are derived and adjused from the Ian Sullivan(2017)(end)
# In the below, all codes is to identify the smallest distnace for red/blue pieces to the two side border
if player == HexBoard.RED: # red is vertical
edgeupper1 = []
edgelower2 = []
for i in range(size_board):
a_edge1 = (i, 0)
a_edge2 = (i, size_board - 1)
edgeupper1.append(a_edge1)
edgelower2.append(a_edge2)
else: # blue is horizontal
edgeupper1 = []
edgelower2 = []
for i in range(size_board):
a_edge1 = (0, i)
a_edge2 = (size_board - 1, i)
edgeupper1.append(a_edge1)
edgelower2.append(a_edge2)
target_upper = inf
for candidate in edgeupper1:
if shortest_distance[candidate] < target_upper:
target_upper = shortest_distance[candidate]
target_lower = inf
for candidate2 in edgelower2:
if shortest_distance[candidate2] < target_lower:
target_lower = shortest_distance[candidate2]
return target_lower + target_upper
def eval_dijkstra1(self, game, player):
"""
Parameters:
board: HexBoard object
player: an integer of either HexBoard.BLUE(==1) or HexBoard.RED(==2) , meaning in the perspective of one of them
"""
size_board = game.size
samplespace = list(product([i for i in range(size_board)], [i for i in range(size_board)]))
redcoordinate = [k for k, v in game.board.items() if v == 2] # Freddy asks Ifan
bluecoordinate = [k for k, v in game.board.items() if v == 1] # Freddy asks Ifan
# the node map, by default the distance between one piece and its neighbor is one
# adjustment to the default of distance, the same color will be zero, enemy color will be a large number
top_level_map_red = {} # the node map from red perspecitve
second_level_map_red = {}
for i in samplespace:
neigher_node = HexBoard(size_board).get_neighbors(i)
for j in neigher_node:
if j in redcoordinate: # special case 1
second_level_map_red[j] = 0
elif j in bluecoordinate: # special case 2, enemy color
second_level_map_red[j] = 5000
else: # default = 1
second_level_map_red[j] = 1
top_level_map_red[i] = second_level_map_red
second_level_map_red = {}
top_level_map_blue = {} # the node map from red perspecitve
second_level_map_blue = {}
for i in samplespace:
neigher_node = HexBoard(size_board).get_neighbors(i)
for j in neigher_node:
if j in redcoordinate: # special case 1, enemy color
second_level_map_blue[j] = 5000
elif j in bluecoordinate: # special case 2
second_level_map_blue[j] = 0
else: # default = 1
second_level_map_blue[j] = 1
top_level_map_blue[i] = second_level_map_blue
second_level_map_blue = {}
# heuristic_score = remaining_blue_hexes-remaining_red_hexes
red_distance_from_win = []
blue_distance_from_win = []
for a_coordinate in redcoordinate:
value = self.dijkstra1(game, top_level_map_red, a_coordinate, player=HexBoard.RED)
red_distance_from_win.append(value)
for a_coordinate in bluecoordinate:
value = self.dijkstra1(game, top_level_map_blue, a_coordinate, player=HexBoard.BLUE)
blue_distance_from_win.append(value)
# Because the shortest path Dijkstra function give us is in terms of current put pieces,
# It may larger than sizeboard, But we know sizeboard is the upperbound.
# Therefore, we set a constraint here to ensure the shortes path will not larger than sizeboard
red_distance_from_win.append(size_board)
blue_distance_from_win.append(size_board)
heuristic_score = min(blue_distance_from_win) - min(red_distance_from_win)
# Before return the heuristic_score, we should exclude that the game is over, meaning the player wins or enemy wins
# If the player win, we set the return value with a large value.
# If the enemy win, we set the return value with a large negative value.
allcolor = [HexBoard.RED, HexBoard.BLUE]
allcolor.remove(player) # to get the enemy color
if game.check_win(player): # the player wins
return 5000 # Freddy: probably irrelevant now because check_win will be executed before calling evaluation
elif game.check_win(allcolor[0]): # its enemy wins
return -5000
else:
if player == HexBoard.RED:
return heuristic_score
else:
return -heuristic_score
def eval_dijkstra2(self, game):
return (-1)**self.color * (self.dijkstra2(game, 1) - self.dijkstra2(game, 2))
@staticmethod
def dijkstra2(game, player):
"""Evaluate position with Robbie's Dijkstra algorithm"""
if player == HexBoard.BLUE:
source, destination, ignore1, ignore2 = 'Left', 'Right', 'Top', 'Down'
else:
source, destination, ignore1, ignore2 = 'Top', 'Down', 'Left', 'Right'
distance = {k: np.inf for k in game.get_all()}
distance.update({source: 0, destination: np.inf})
unvisited = {k: True for k in game.get_all()}
unvisited.update({source: True, destination: True, ignore1: False, ignore2: False})
square = source
def dijkstra2_r(game, player, square, distance, unvisited, destination):
""" This is the recursive part of the algorithm"""
# Update distances for neighbors
for neighbor in game.get_neighbors(square, extra_hexes=True):
if unvisited[neighbor]:
color = game.get_color(neighbor)
if color == player:
distance[neighbor] = min(distance[neighbor], distance[square])
elif color == HexBoard.EMPTY:
distance[neighbor] = min(distance[neighbor], distance[square] + 1)
unvisited[square] = False
# Dijkstra's algorithm ends when the destination square has been visited.
if not unvisited[destination]:
return distance[destination]
ud = {k: v for k, v in distance.items() if unvisited[k]} # Unvisited distances
next_square = min(ud, key=ud.get)
return dijkstra2_r(game, player, next_square, distance, unvisited, destination)
return dijkstra2_r(game, player, square, distance, unvisited, destination)
def mcts(self, game):
"""MCTS
Args:
game: A HexBoard instance.
times_of_loop: Int. iteration times of every move
cp: A parameter of UCT formula.
"""
times_of_loop, cp = self.N, self.Cp
root = MCTS_hex(game, self.color)
for i in range(times_of_loop):
root.BestUCT_Childnode(cp)
score = {}
for childnode, nodeobject in root.children.items():
if nodeobject.visit_count == 0:
nodeobject.visit_count = -1000 # Assume we prefer not to pick unexplored node by assigning negative counts.
score[childnode] = nodeobject.value_sum/nodeobject.visit_count
return {'moves': [max(score, key= score.get)[-1]]}
class MCTS_hex:
def __init__(self, game, col, parent="root has no parent", ID_tuple=("root",)):
"""MCTS algorithm: get the node.
Args:
game: A HexBoard instance.
col: Either HexBoard.BLUE or HexBoard.RED.
parent: Parent's node.
ID_tuple: Unniquely define every node's identity.
"""
self.player = col #player is either HexBoard.BLUE or HexBoard.RED
self.parent = parent # parent is a node object
self.children = {} # the node's children
self.visit_count = 0 # Number of visit.
self.value_sum = 0 # The total count of win
self.state = copy.deepcopy(game) # self.state is HexBoard object
self.state_empty = [k for k, v in self.state.board.items() if v == 3 ]
# the ID_tuple is nodes name or we can say it is the "state"
# the name gives us information of path. i.e. all the actions in order by two players
self.ID_tuple = ID_tuple
def expanded(self):
"""To check whether the node is expanded or not"""
return len(self.children) > 0
def freddy_get_root_Node(self):
"""To get the root"""
parent = self.parent
if parent == "root has no parent":
return self
return parent.freddy_get_root_Node()
def expand(self):
"""To expand childnodes"""
player = self.player
if self.player == HexBoard.BLUE:
enemy_player = HexBoard.RED
else:
enemy_player = HexBoard.BLUE
movingstate = copy.deepcopy(self.state)
emptycoordinate_2 = copy.deepcopy(self.state_empty)
for a_tuple in emptycoordinate_2:
movingstate.place(a_tuple, player)
nodes_name = self.ID_tuple + (a_tuple,)
self.children[nodes_name]= MCTS_hex(game = movingstate, col = enemy_player, parent = self,ID_tuple = nodes_name)
def rollout(self):
"""To roll out to the terminal and get the reward [-1, 0 , 1]"""
root_color = self.freddy_get_root_Node().player
player = self.player
movingstate = copy.deepcopy(self.state)
emptycoordinate = [k for k, v in movingstate.board.items() if v == 3]
if player == HexBoard.BLUE:
player_enemy = HexBoard.RED
else:
player_enemy = HexBoard.BLUE
if movingstate.check_win(player_enemy) == True:
if player_enemy == root_color:
self.value_sum = 1
else:
self.value_sum = -1
elif movingstate.check_win(player) == True:
if player_enemy == root_color:
self.value_sum = -1
else:
self.value_sum = 1
elif emptycoordinate == {}:
self.value_sum = 0
else:
while True:
a_empty_piece = random.choice(emptycoordinate)
movingstate.place(a_empty_piece,player)
emptycoordinate.remove(a_empty_piece)
if movingstate.check_win(player) == True:
if player_enemy == root_color:
self.value_sum = -1
break
else:
self.value_sum = 1
break
a_empty_piece = random.choice(emptycoordinate)
movingstate.place(a_empty_piece,player_enemy)
emptycoordinate.remove(a_empty_piece)
if movingstate.check_win(player_enemy) == True:
if player_enemy == root_color:
self.value_sum = 1
break
else:
self.value_sum = -1
break
if emptycoordinate == {}:
self.value_sum = 0
break
def backpropagate(self, reward = 0):
"""To add back visit count/ reward to the node's parent, parent'parent... root.
Args:
reward: [-1,0,1]
"""
if self.parent == "root has no parent":
return None
elif self.visit_count == 0:
self.visit_count =1
reward = self.value_sum
self.parent.visit_count += 1
self.parent.value_sum += reward
self.parent.backpropagate(reward)
elif self.children == {}:
self.visit_count +=1
self.parent.value_sum += reward
self.parent.backpropagate(reward)
elif self.parent != "root has no parent":
self.parent.visit_count += 1
self.parent.value_sum += reward
self.parent.backpropagate(reward)
def BestUCT_Childnode(self,cp = 1):
"""Select function of MCTS.
Args:
cp: a parameter of UCT formula.
"""
# BestUCT_Childnode is our selection function
# cp is the parameter of the UCT formula
# player is either HexBoard.BLUE or HexBoard.RED
if self.children == {}:
self.expand()
a_dic = {}
nodes_visit_num = []
self.cp = cp
self.root = self.freddy_get_root_Node()
for childnode, nodeobject in self.children.items():
nodes_visit_num.append(nodeobject.visit_count)
if 0 in nodes_visit_num:
for childnode, nodeobject in self.children.items():
if nodeobject.visit_count == 0:
nodeobject.rollout()
nodeobject.backpropagate()
return None#self.children[childnode]
break
elif self.children == {}:
self.rollout()
self.backpropagate()
return None
else:
for childnode, nodeobject in self.children.items():
self.exploitation = nodeobject.value_sum / nodeobject.visit_count
self.term = math.log(nodeobject.parent.visit_count)/nodeobject.visit_count
if self.term < 0: #becasue < 0 can not be taken sqrt
self.term = 0
self.exploration = self.cp * math.sqrt(self.term)
a_dic[childnode] = self.exploitation + self.exploration
Bestchild_ID_tuple = max(a_dic, key= a_dic.get)
Bestchild = self.children[Bestchild_ID_tuple]
if Bestchild.visit_count != 0:
return Bestchild.BestUCT_Childnode()
class HexBoard:
BLUE = 1 # value take up by a position
RED = 2
EMPTY = 3
def __init__(self, board_size):
"""Constructor, set board size"""
self.board = {}
self.size = board_size
self.game_over = False # state of game over
for x in range(board_size):
for y in range(board_size):
self.board[x, y] = HexBoard.EMPTY
def is_game_over(self):
"""Check if it's game over"""
return self.game_over
def is_empty(self, coordinates):
"""Check if position is empty"""
return self.board[coordinates] == HexBoard.EMPTY
def is_color(self, coordinates, color):
"""Check if position contain certain color 1/2"""
return self.board[coordinates] == color
def get_color(self, coordinates):
"""Read color of a position"""
if coordinates in ["Left", "Right"]:
return HexBoard.BLUE
if coordinates in ["Top", "Down"]:
return HexBoard.RED
if coordinates == (-1, -1):
return HexBoard.EMPTY
return self.board[coordinates]
def place(self, coordinates, color):
"""
Place a piece of color at a position, make move? Will update game over state.
Check condition if it's not game over AND position is empty
"""
if not self.game_over and self.board[coordinates] == HexBoard.EMPTY:
self.board[coordinates] = color # update the color
if self.check_win(HexBoard.RED) or self.check_win(HexBoard.BLUE): # check win for either color
self.game_over = True # if check win is true, one side win, then update game over state to true
@staticmethod
def get_opposite_color(current_color):
"""return opposite color. what is the purpose?"""
if current_color == HexBoard.BLUE:
return HexBoard.RED
return HexBoard.BLUE
def get_neighbors(self, coordinates, extra_hexes=False):
"""Return a list of valid neighbor coordinates from a position
Input:
extra_hexes: if extra hexes Left, Top, Right, Down should be included.
"""
neighbors = []
# Add four hexes outside the board for the Dijkstra algorithm.
if coordinates == "Left":
neighbors.extend([(0, cy) for cy in range(self.size)])
elif coordinates == "Top":
neighbors.extend([(cx, 0) for cx in range(self.size)])
elif coordinates == "Right":
neighbors.extend([(self.size - 1, cy) for cy in range(self.size)])
elif coordinates == "Down":
neighbors.extend([(cx, self.size - 1) for cx in range(self.size)])
else:
(cx, cy) = coordinates
if cx - 1 >= 0:
neighbors.append((cx - 1, cy))
if cx + 1 < self.size:
neighbors.append((cx + 1, cy))
if cx - 1 >= 0 and cy + 1 <= self.size - 1:
neighbors.append((cx - 1, cy + 1))
if cx + 1 < self.size and cy - 1 >= 0:
neighbors.append((cx + 1, cy - 1))
if cy + 1 < self.size:
neighbors.append((cx, cy + 1))
if cy - 1 >= 0:
neighbors.append((cx, cy-1))
if extra_hexes:
if not cx:
neighbors.append("Left")
if not cy:
neighbors.append("Top")
if cx == self.size - 1:
neighbors.append("Right")
if cy == self.size - 1:
neighbors.append("Down")
return neighbors
def border(self, color, move):
"""Check if a move is the right color reaching the right border, blue1-x, red2-y"""
(nx, ny) = move
return (color == HexBoard.BLUE and nx == self.size-1) or (color == HexBoard.RED and ny == self.size-1)
def traverse(self, color, move, visited):
"""Move is the target position"""
if not self.is_color(move, color) or (move in visited and visited[move]):
return False # check if move position do NOT contain my color AND is NOT in visited
if self.border(color, move):
return True # check if the move is reaching border
visited[move] = True # update position in visited (move history)
for n in self.get_neighbors(move): # check all neigbour positions if the move passes all checks above
if self.traverse(color, n, visited):
return True
return False
def check_win(self, color):
"""Check win condition"""
for i in range(self.size):
if color == HexBoard.BLUE:
move = (0, i) # for blue, move rightward (0,0), (0,1), (0,2), ... = start check from one border
else:
move = (i, 0) # for red, move downward (0,0), (1,0), (2,0), ...
# If true in traverse, return win.
# Note that traverse will return check if right color reach the right border
if self.traverse(color, move, {}):
return True
return False
def print(self):
print(" ", end="")
for y in range(self.size):
print(chr(y + ord('a')), "", end="") # print x axis id
print("")
print(" -----------------------")
for y in range(self.size):
print(y, "|", end="") # print y axis id
for z in range(y):
print(" ", end="") # print space
for x in range(self.size):
piece = self.board[x, y] # read position
if piece == HexBoard.BLUE:
print("b ", end="") # end=print without newline
elif piece == HexBoard.RED:
print("r ", end="")
else:
if x == self.size:
print("-", end="")
else:
print("- ", end="")
print("|") # print '|' and new line by default
print(" -----------------------")
# return list of empty positions
def get_allempty(self):
"""Return a list of empty positions in current board, same as movelist."""
return [k for k, v in self.board.items() if v == 3] # 3 = EMPTY
def get_all(self):
return [k for k, v in self.board.items()]
def convert_key(self):
"""Return a key (str) that represent board positions, unique"""
key = "" # initiate
for y in range(self.size):
for x in range(self.size):
key += str(self.board[x, y]) # read piece state {1/2/3}
return key
class Human(Agent):
# Should we make a separate class for humans?
def make_move(self, game):
game.print()
while True:
move = input("Enter your move: ").strip("''").strip('""').strip().casefold()
if move == "q":
print("Game terminated by user.")
game.game_over = True # Experimental
return
try:
x_coord = int(string.ascii_lowercase.index(move[0]))
y_coord = int(move[1:])
move = (x_coord, y_coord)
except ValueError or KeyError:
print("That's not a legal move.")
print("Please try again, or press 'q' to quit.")
game.print()
continue
if (x_coord, y_coord) in game.get_allempty():
break
else:
print("That move is not valid. Please try again, or press 'q to quit.")
return move
|
{"/RepeatGames.py": ["/HexBoard.py"]}
|
31,344
|
rheitz/FoodServer
|
refs/heads/master
|
/Project/FoodServer.py
|
# Food Server
# Ideaventions Academy
# 7th Grade 2017-2018
# imports
import time
from Project.Functions import *
# variables
# Turn on/off times for light
# Target temperature and humidity (76 F and 70% RH)
TEMP_FAN = 76
TIME_OFF = 21 # 9 pm at night turn off
TIME_ON = 6 # 6 am turn on
MAX_TEMP = 76 # Max T
MIN_TEMP = 63 # Min T
TARGET_TEMP = 72
MAX_RH = 50 # Max RH
MIN_RH = 17 # Min RH
TARGET_RH = 40
LightsAreOn = False
FanIsOn = False
HumidifierIsOn = False
temperature = 0
humidity = 0
# Main loop
# Get time and see if lights should be on or off
while True:
time = time.localtime()
if LightsAreOn == False and time.tm_hour == TIME_ON:
lightsOn()
LightsAreOn = True
if LightsAreOn == True and time.tm_hour == TIME_OFF:
lightsOff()
LightsAreOn = False
if FanIsOn == False and temperature >= MAX_TEMP:
fanOn()
FanIsOn = True
if FanIsOn == True and temperature <= TARGET_TEMP:
fanOff()
FanIsOn = False
if HumidifierIsOn == False and humidity <= MIN_RH:
humidifierOn()
HumidifierIsOn = True
if HumidifierIsOn == True and humidity >= TARGET_RH:
humidifierOff()
HumidifierIsOn = False
|
{"/Project/FoodServer.py": ["/Project/Functions.py"]}
|
31,345
|
rheitz/FoodServer
|
refs/heads/master
|
/Project/testprogram.py
|
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setup(27)
for i in range(2,11):
GPIO.setup(i,GPIO.OUT)
lights = [27,2,3,4,5,6,7,8,9,10]
for pins in lights:
GPIO.output(pins,GPIO.HIGH)
sleep(10)
lights = [27,2,3,4,5,6,7,8,9,10]
for pins in lights:
GPIO.output(pins,GPIO.LOW)
|
{"/Project/FoodServer.py": ["/Project/Functions.py"]}
|
31,346
|
rheitz/FoodServer
|
refs/heads/master
|
/Project/Functions.py
|
import RPi.GPIO as GPIO
# Setup GPIO (2-27)
GPIO.setmode(GPIO.BCM)
GPIO.setup(27,GPIO.OUT)
for i in range(2,21):
GPIO.setup(i,GPIO.OUT)
def fanOn():
GPIO.output(11, GPIO.HIGH)
def fanOff():
GPIO.output(11, GPIO.LOW)
def humidifierFanOn():
GPIO.output(12, GPIO.HIGH)
def humidifierFanOff():
GPIO.output(12, GPIO.LOW)
def misterOn():
GPIO.output(13, GPIO.HIGH)
def misterOff():
GPIO.output(13, GPIO.LOW)
def lightsOn():
lights = [27, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for pins in lights:
GPIO.output(pins, GPIO.HIGH)
def lightsOff():
lights = [27, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for pins in lights:
GPIO.output(pins, GPIO.LOW)
def humidifierOn():
mister.on()
fan.on()
def humidifierOff():
mister.off()
fan.off()
|
{"/Project/FoodServer.py": ["/Project/Functions.py"]}
|
31,351
|
tux2603/WG
|
refs/heads/master
|
/SpriteLibraries.py
|
import pyglet
import numpy as np
from math import sin, cos, atan2, sqrt
class Sprite(pyglet.sprite.Sprite):
# TODO drawBoundingRectangle
# TODO isClicked
# TODO getBoundingRectangle
# TODO reflect
# TODO isCollided
# The maximum speed at which the sprite can travel, used for acceleration methods
maxSpeed = 10
# The distance that the sprite travles per clock tick
_velocity = (0, 0)
# Number of game ticks that the sprite has to remain alive
ttl = -1
# A two dimensional array of which pixels in the sprite are opaque/transparent
pixelMap = None
def __init__(self, img, x=0, y=0, blend_src=770, blend_dest=771, batch=None, group=None, usage='dynamic', subpixel=False):
super(Sprite, self).__init__(img, x=x, y=y, blend_src=blend_src, blend_dest=blend_dest,
batch=batch, group=group, usage=usage, subpixel=subpixel)
self.setImage(img)
# Sets the image of the sprite and updates the pixel map data used for collison detection
def setImage(self, img):
# Set the image
self.image = img
# Get raw pixel data out of the image
rawData = img.get_image_data()
colorFormat = 'RGBA'
pitch = rawData.width * len(colorFormat)
pixelData = rawData.get_data(colorFormat, pitch)
self.pixelMap = np.zeros((rawData.width, rawData.height), np.int8)
# load the transparency values from the raw data string into the array
for x in range(rawData.width):
for y in range(rawData.height):
self.pixelMap[x][y] = pixelData[(
x + y * rawData.width) * len(colorFormat) + 3]
# Gets a transformation matrix for the sprite
def getTransformMatrix(self):
return Matrix.createTranslation((-self.image.anchor_x / self.scale_x, -self.image.anchor_y / self.scale_y)) * \
Matrix.createRotation(np.radians(self.rotation)) * Matrix.createScale((self.scale_x, self.scale_y)) * \
Matrix.createTranslation(
(self.position[0] + self.image.anchor_x, self.position[1] + self.image.anchor_y))
def getWidth(self):
return self.image.width * self.scale_x
def getHeight(self):
return self.image.height * self.scale_y
def getCenter(self):
return (self.getWidth() / 2, self.getHeight() / 2)
def getSpeed(self):
return sqrt(self._velocity[0] * self._velocity[0] + self._velocity[1] * self._velocity[1])
def setSpeedAndDirection(self, speed, angle):
rad = np.radians(angle)
if speed > self.maxSpeed:
speed = self.maxSpeed
self._velocity = (speed * cos(rad), speed * sin(rad))
def getDirectionAngle(self):
angle = np.degrees(atan2(self.velocity[1], self.velocity[0]))
if angle < 0:
angle += 360
return angle
def setDirectionAngle(self, newAngle):
speed = self.getSpeed()
self.setSpeedAndDirection(speed, newAngle)
def changeDirectionAngle(self, delta):
self._velocity = (self._velocity[0] * cos(delta) + self._velocity[1] * -sin(
delta), self._velocity[0] * sin(delta) + self._velocity[1] * cos(delta))
def getDirectionTo(self, other):
deltaPos = self.position - other.position
angle = np.degrees(atan2(deltaPos[1], deltaPos[0]))
if angle < 0:
angle += 360
return angle
def setVelocity(self, newVelocity):
newSpeed = sqrt(
newVelocity[0] * newVelocity[0] + newVelocity[1] * newVelocity[1])
reductionFactor = 1
if newSpeed > self.maxSpeed:
reductionFactor = self.maxSpeed / newSpeed
self._velocity = (
newVelocity[0] * reductionFactor, newVelocity[1] * reductionFactor)
def setVelocityX(self, newVelocity):
self.setVelocity((newVelocity, self._velocity[1]))
def setVelocityY(self, newVelocity):
self.setVelocity((self._velocity[0], newVelocity))
def _checkTTL(self):
if self.ttl >= 0:
self.ttl -= 1
elif self.ttl == 0:
self.visible = False
return self.visible
def move(self):
if not self._checkTTL():
return False
self.position = (
self.position[0] + self._velocity[0], self.position[1] + self._velocity[1])
return True
def accelerate(self, acceleration):
if isinstance(acceleration, (tuple, list)):
ax = acceleration[0]
ay = acceleration[1]
else:
ax = acceleration * self._velocity[0] / \
(self._velocity[0] + self._velocity[1])
ay = acceleration * self._velocity[1] / \
(self._velocity[0] + self._velocity[1])
self._velocity = (self._velocity[0] + ax, self._velocity[1] + ay)
class Ground(Sprite):
blocksTop = False
blocksBottom = False
blocksLeft = False
blocksRight = False
buoyancy = 0
def setAttributes(self, blocksTop=False, blocksBottom=False, blocksLeft=False, blocksRight=False, buoyancy=0):
self.blocksTop = blocksTop
self.blocksBottom = blocksBottom
self.blocksLeft = blocksLeft
self.blocksRight = blocksRight
self.buoyancy = buoyancy
class Matrix():
@staticmethod
def createTranslation(offset):
x, y = offset
return Matrix(((1, 0, x), (0, 1, y), (0, 0, 1)))
@staticmethod
def createRotation(theta):
return Matrix(((cos(theta), -sin(theta), 0), (sin(theta), cos(theta), 0), (0, 0, 1)))
@staticmethod
def createScale(svale):
scaleX, scaleY = scaleX
return Matrix(((scaleX, 0, 0), (0, scaleY, 0), (0, 0, 1)))
def __init__(self, matrix=None):
self._array = np.zeros((3, 3))
if matrix is None:
self._array[0][0] = 1
self._array[1][1] = 1
self._array[2][2] = 1
else:
for x in range(3):
for y in range(3):
self._array[x][y] = matrix[x][y]
def __mul__(self, other):
if isinstance(other, Matrix):
return Matrix(self._array.dot(other._array))
else:
return Matrix(self._array.dot(other))
def invert(self):
self._array = np.linalg.inv(self._array)
|
{"/WG.py": ["/SpriteLibraries.py"]}
|
31,352
|
tux2603/WG
|
refs/heads/master
|
/WG.py
|
import pyglet
import numpy as np
from SpriteLibraries import Sprite
from pyglet.window import key
GRAVITY = -2.0
gameWindow = pyglet.window.Window(width=800, height=600, fullscreen=False)
keyboard = key.KeyStateHandler()
fpsDisplay = None
# Sprite batches
backgroundBatch = None
terrainBatch = None
mainBatch = None
foregroundBatch = None
terrainData = (
{
"fileName": None,
"batch": None,
"blocksTop": False,
"blocksBottom": False,
"blocksLeft": False,
"blocksRight": False,
"buoyancy": 0.0
},
{
"fileName": "dirtBackground.png",
"batch": terrainBatch,
"blocksTop": False,
"blocksBottom": False,
"blocksLeft": False,
"blocksRight": False,
"buoyancy": 0.0
},
{
"fileName": "dirt.png",
"batch": terrainBatch,
"blocksTop": False,
"blocksBottom": True,
"blocksLeft": True,
"blocksRight": True,
"buoyancy": 0.0
},
{
"fileName": "dirtTop.png",
"batch": terrainBatch,
"blocksTop": True,
"blocksBottom": False,
"blocksLeft": False,
"blocksRight": False,
"buoyancy": 0.0
},
{
"fileName": "dirtLeft.png",
"batch": terrainBatch,
"blocksTop": False,
"blocksBottom": False,
"blocksLeft": True,
"blocksRight": False,
"buoyancy": 0.0
},
{
"fileName": "dirtRight.png",
"batch": terrainBatch,
"blocksTop": False,
"blocksBottom": False,
"blocksLeft": False,
"blocksRight": True,
"buoyancy": 0.0
},
{
"fileName": "dirtTopLeft.png",
"batch": terrainBatch,
"blocksTop": True,
"blocksBottom": False,
"blocksLeft": True,
"blocksRight": False,
"buoyancy": 0.0
},
{
"fileName": "dirtTopRight.png",
"batch": terrainBatch,
"blocksTop": True,
"blocksBottom": False,
"blocksLeft": False,
"blocksRight": True,
"buoyancy": 0.0
},
{
"fileName": "water.png",
"batch": terrainBatch,
"blocksTop": False,
"blocksBottom": False,
"blocksLeft": False,
"blocksRight": False,
"buoyancy": 0.45
}
)
# Define sprites
player = None
plunger1 = None
plunger2 = None
door = None
ground = [[]]
# Define images
faceImage = None
cloudImage = None
doorImage = None
faceImage = None
gooieImage = None
ivyStalkImage = None
ivyImage = None
jellyImage = None
pixelImage = None
plungerImage = None
terrainImages = []
updates = 0
# function to load the resources
def load():
# images
global faceImage, cloudImage, doorImage, faceImage, gooieImage, ivyStalkImage, ivyImage, jellyImage, pixelImage, plungerImage, terrainImages
# Sprites
global player
# Set the resource path
pyglet.resource.path = ['resources']
pyglet.resource.reindex()
# Load the sprite data stuff
faceImage = pyglet.resource.image('images/face.png')
# Load in terrain images
terrainImages.append(None)
terrainImages.append(pyglet.resource.image(
'images/terrainImages/dirtBackground.png'))
terrainImages.append(pyglet.resource.image(
'images/terrainImages/dirt.png'))
terrainImages.append(pyglet.resource.image(
'images/terrainImages/dirtTop.png'))
terrainImages.append(pyglet.resource.image(
'images/terrainImages/dirtLeft.png'))
terrainImages.append(pyglet.resource.image(
'images/terrainImages/dirtRight.png'))
terrainImages.append(pyglet.resource.image(
'images/terrainImages/dirtTopLeft.png'))
terrainImages.append(pyglet.resource.image(
'images/terrainImages/dirtTopRight.png'))
terrainImages.append(pyglet.resource.image(
'images/terrainImages/water.png'))
player = Sprite(faceImage, batch=mainBatch)
player.setSpeedAndDirection(1/60, 45)
# Displays a testing bar of different terrain images
# TODO: Would terrain be better handled as one giant/multiple large textures rendered at level load?
# for i in range(len(terrainImages)):
# if not terrainImages[i] is None:
# ground[0].append(Sprite(terrainImages[i], batch=(backgroundBatch if i != 8 else foregroundBatch), x = i * 32, y=0))
player.y = 50
@gameWindow.event
def on_draw():
gameWindow.clear()
backgroundBatch.draw()
terrainBatch.draw()
mainBatch.draw()
foregroundBatch.draw()
fpsDisplay.draw()
def update(dt):
global updates
updatePlaying(dt)
updates += 1
def updatePlaying(dt):
# TODO: Update the monsters
# TODO: Get player collision status
onGround = player.y <= 0.2
inWater = False
collidedRight = False
collidedLeft = False
dontBounce = False
hitHead = False
blockBuoyancy = 0.0
##########################################################################
###### Update player velocities based on keyboard input ######
##########################################################################
# Move left
if (keyboard[pyglet.window.key.LEFT] or keyboard[key.A]) and not collidedLeft:
player.x -= 2
# Moving right
if (keyboard[key.RIGHT] or keyboard[key.D]) and not collidedRight:
player.x += 2
# Moving up/jumping
if keyboard[key.UP] or keyboard[key.W] or keyboard[key.SPACE]:
# mark that the player shouldn't bounce
dontBounce = True
if onGround:
player.accelerate((0, 5))
elif inWater:
player.setVelocityY(2)
# Moving down
if keyboard[key.DOWN] or keyboard[key.S]:
if inWater:
player.setVelocityY(-2)
# TODO Shoot plungers
# TODO Rappel towards plungers
# TODO Disappear plungers when you're done
# float
player.accelerate((0, blockBuoyancy))
# If you're not on the ground, fall
if not onGround:
player.accelerate((0, -0.2))
# If you are on the ground, bounce
elif not dontBounce:
player.setVelocity((player._velocity[0] / 5, player._velocity[1] / -5))
# If you hit grass on the side of things, bounce
if collidedLeft or collidedRight:
player.setVelocity((-player._velocity[0] / 2, player._velocity[1]))
# If you hit you're head... OUCH!
if hitHead:
player.setVelocity((player._velocity[0], -player._velocity[1] / 2))
# TODO Make camera track the face, make the face and camera stay inside the world
# Move the face
player.move()
# TODO Move the plungers
# TODO Move the clouds
if __name__ == '__main__':
# Set the window background color
pyglet.gl.glClearColor(0.58823, 0.84313, 0.94117, 1.0)
# Initialize game variables and stuff
fpsDisplay = pyglet.window.FPSDisplay(window=gameWindow)
gameWindow.push_handlers(keyboard)
backgroundBatch = pyglet.graphics.Batch()
terrainBatch = pyglet.graphics.Batch()
mainBatch = pyglet.graphics.Batch()
foregroundBatch = pyglet.graphics.Batch()
# TODO Initialize the camera
# TODO Set a cross hair for the mouse?
load()
pyglet.clock.schedule_interval(update, 1/60.0)
pyglet.app.run()
|
{"/WG.py": ["/SpriteLibraries.py"]}
|
31,357
|
Shahid313/Python
|
refs/heads/main
|
/applicatioin/forms/forms.py
|
from flask_wtf import FlaskForm
from wtforms import TextAreaField,SubmitField
from wtforms.validators import DataRequired
from flask_wtf.file import FileField, FileAllowed, FileRequired
class InputForm(FlaskForm):
input_field_one = TextAreaField('Write An Essay')
input_field_two = TextAreaField('Write An Essay')
generate_report = SubmitField("Generate Report")
check_plagiarism = SubmitField("Check Plagerism")
|
{"/applicatioin/__init__.py": ["/applicatioin/Views/View.py"], "/applicatioin/Views/View.py": ["/applicatioin/__init__.py", "/applicatioin/forms/forms.py", "/applicatioin/Builders/ModelBuilder.py"]}
|
31,358
|
Shahid313/Python
|
refs/heads/main
|
/applicatioin/__init__.py
|
from flask import Flask, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config['SECRET_KEY'] = "secret key"
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:''@localhost/NLP'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app,db)
from applicatioin.Views.View import InputView
InputView.register(app)
|
{"/applicatioin/__init__.py": ["/applicatioin/Views/View.py"], "/applicatioin/Views/View.py": ["/applicatioin/__init__.py", "/applicatioin/forms/forms.py", "/applicatioin/Builders/ModelBuilder.py"]}
|
31,359
|
Shahid313/Python
|
refs/heads/main
|
/applicatioin/Views/View.py
|
from flask_classful import FlaskView, route
from applicatioin import db
from flask import render_template, request
from flask import redirect, url_for
from applicatioin.forms.forms import InputForm
from fuzzywuzzy import fuzz
import re
from applicatioin.Builders.ModelBuilder import *
class InputView(FlaskView):
def check_plagiarism(self, a, b):
a= re.sub("[^a-zA-Z]", "", a)
b= re.sub("[^a-zA-Z]", "", b)
print(fuzz.token_sort_ratio(a,b))
return fuzz.token_sort_ratio(a,b)
def count_pos(essay):
tokenized_sentences = essay_to_sentences(essay, remove_stopwords=True)
noun_count = 0
adj_count = 0
verb_count = 0
adv_count = 0
response = {}
for sentence in tokenized_sentences:
tagged_tokens = nltk.pos_tag(sentence)
for token_tuple in tagged_tokens:
pos_tag = token_tuple[1]
if pos_tag.startswith('N'):
noun_count += 1
elif pos_tag.startswith('J'):
adj_count += 1
elif pos_tag.startswith('V'):
verb_count += 1
elif pos_tag.startswith('R'):
adv_count += 1
response.update({"noun":noun_count,'adj': adj_count,'verbs': verb_count,
'adverbs': adv_count})
return response
def spell_check(essay,suggest):
import enchant
d=enchant.Dict("en_US")
c=0
response = {}
for i in range(len(essay.split(" "))):
a=essay.split(" ")
b=a[i]
e=""
e=e.join(b)
e= re.sub("[^a-zA-Z]", "", e)
b=""
if(len(e)):
if (d.check(e) == False):
c=c+1
if suggest:
response.update({str(e): d.suggest(e)})
else:
pass
return response
def word_count(essay):
words=essay_to_wordlist(essay, remove_stopwords=False)
return len(words)
def most_frequent_words(essay):
words=essay_to_wordlist(essay, remove_stopwords=True)
allWordDist = nltk.FreqDist(w for w in words)
t_list=[]
for i in range(10):
t_list.append(allWordDist.most_common(10)[i][0])
return t_list
@route('/',methods=['POST','GET'])
def input_text(self):
form = InputForm()
if request.method == 'POST':
if form.validate_on_submit():
if form.generate_report.data:
essay = form.input_field_one.data
mfw_list = most_frequent_words(essay)
wordCount = word_count(essay)
spellCheck =spell_check(essay,suggest=True)
part_of_speech = count_pos(essay)
return render_template('index.html', form=form, plg=None, most_frequent_words=str(mfw_list),
word_count=wordCount, spell_check=str(spell_check), part_of_speech=str(part_of_speech))
else:
text_one = form.input_field_one.data
text_two = form.input_field_two.data
print(text_one)
return render_template("index.html", form=form,
plg=str(self.check_plagiarism(text_one, text_two)))
return render_template('index.html', form=form, plg=None)
|
{"/applicatioin/__init__.py": ["/applicatioin/Views/View.py"], "/applicatioin/Views/View.py": ["/applicatioin/__init__.py", "/applicatioin/forms/forms.py", "/applicatioin/Builders/ModelBuilder.py"]}
|
31,360
|
Shahid313/Python
|
refs/heads/main
|
/applicatioin/Builders/ModelBuilder.py
|
import pandas as pd
import numpy as np
import nltk
import re
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# from gensim.models import Word2Vec
# from keras.layers import Embedding, LSTM, Dense, Dropout, Lambda, Flatten
# from keras.models import Sequential, load_model, model_from_config
# import keras.backend as K
# from sklearn.model_selection import KFold,train_test_split
# from sklearn.linear_model import LinearRegression
# from sklearn.metrics import cohen_kappa_score
# dataset=pd.read_excel("training_set.xlsx")
# X=dataset[['essay_set','essay']]
# X.drop(6973,axis=0,inplace=True)
# Y=dataset['domain1_score']
# Y.dropna(inplace=True)
def essay_to_wordlist(essay_v, remove_stopwords):
essay_v = re.sub("[^a-zA-Z]", " ", essay_v)
words = essay_v.lower().split() # converting to lower case
if remove_stopwords:
stops = set(stopwords.words("english")) # removing stopwords
words = [w for w in words if not w in stops]
return (words)
def lemmatize(words):
lemmatizer=WordNetLemmatizer()
lemmatized_words=[]
for e in words:
lemmatized_words.append(lemmatizer.lemmatize(e))
return lemmatized_words
"""Sentence tokenize the essay and call essay_to_wordlist() for word tokenization."""
def essay_to_sentences(essay_v, remove_stopwords):
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
raw_sentences = tokenizer.tokenize(essay_v.strip())
sentences = []
for raw_sentence in raw_sentences:
if len(raw_sentence) > 0:
sentences.append(essay_to_wordlist(raw_sentence, remove_stopwords))
return sentences
# """Make Feature Vector from the words list of an Essay."""
# def makeFeatureVec(words, model, num_features):
# featureVec = np.zeros((num_features,),dtype="float32")
# num_words = 0.
# index2word_set = set(model.wv.index2word)
# for word in words:
# if word in index2word_set:
# num_words += 1
# featureVec = np.add(featureVec,model[word])
# featureVec = np.divide(featureVec,num_words)
# return featureVec
# """Main function to generate the word vectors for word2vec model."""
# def getAvgFeatureVecs(essays, model, num_features):
# counter = 0
# essayFeatureVecs = np.zeros((len(essays),num_features),dtype="float32")
# for essay in essays:
# essayFeatureVecs[counter] = makeFeatureVec(essay, model, num_features)
# counter = counter + 1
# return essayFeatureVecs
# def get_model():
# """Define the model."""
# model = Sequential()
# model.add(LSTM(300, dropout=0.25, recurrent_dropout=0.2, input_shape=[1, 300], return_sequences=True))
# model.add(LSTM(64, recurrent_dropout=0.2))
# model.add(Dropout(0.25))
# model.add(Dense(1, activation='relu'))
# model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['mae'])
# model.summary()
# return model
# ### Training
# cv = KFold(len(X), n_folds=5, shuffle=True)
# results = []
# y_pred_list = []
# count = 1
# for traincv, testcv in cv:
# print("\n--------Fold {}--------\n".format(count))
# X_test, X_train, y_test, y_train = X.iloc[testcv], X.iloc[traincv], y.iloc[testcv], y.iloc[traincv]
# train_essays = X_train['essay']
# test_essays = X_test['essay']
# sentences = []
# for essay in train_essays:
# # Obtaining all sentences from the training essays.
# sentences += essay_to_sentences(essay, remove_stopwords = True)
# # Initializing variables for word2vec model.
# num_features = 300
# min_word_count = 40
# num_workers = 4
# context = 10
# downsampling = 1e-3
# print("Training Word2Vec Model...")
# model = Word2Vec(sentences, workers=num_workers, size=num_features, min_count = min_word_count, window = context, sample = downsampling)
# model.init_sims(replace=True)
# model.wv.save_word2vec_format('word2vecmodel.bin', binary=True)
# clean_train_essays = []
# # Generate training and testing data word vectors.
# for essay_v in train_essays:
# clean_train_essays.append(essay_to_wordlist(essay_v, remove_stopwords=True))
# trainDataVecs = getAvgFeatureVecs(clean_train_essays, model, num_features)
# clean_test_essays = []
# for essay_v in test_essays:
# clean_test_essays.append(essay_to_wordlist( essay_v, remove_stopwords=True ))
# testDataVecs = getAvgFeatureVecs( clean_test_essays, model, num_features )
# trainDataVecs = np.array(trainDataVecs)
# testDataVecs = np.array(testDataVecs)
# # Reshaping train and test vectors to 3 dimensions. (1 represnts one timestep)
# trainDataVecs = np.reshape(trainDataVecs, (trainDataVecs.shape[0], 1, trainDataVecs.shape[1]))
# testDataVecs = np.reshape(testDataVecs, (testDataVecs.shape[0], 1, testDataVecs.shape[1]))
# lstm_model = get_model()
# lstm_model.fit(trainDataVecs, y_train, batch_size=64, epochs=50)
# #lstm_model.load_weights('./model_weights/final_lstm.h5')
# y_pred = lstm_model.predict(testDataVecs)
# # Save any one of the 8 models.
# if count == 5:
# lstm_model.save('./model_weights/final_lstm.h5')
# # Round y_pred to the nearest integer.
# y_pred = np.around(y_pred)
# # Evaluate the model on the evaluation metric. "Quadratic mean averaged Kappa"
# result = cohen_kappa_score(y_test.values,y_pred,weights='quadratic')
# print("Kappa Score: {}".format(result))
# results.append(result)
# count += 1
# X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.2)
# train_essays = X_train['essay']
# test_essays = X_test['essay']
# sentences = []
# for essay in train_essays:
# # Obtaining all sentences from the training essays.
# sentences += essay_to_sentences(essay, remove_stopwords = True)
# num_features = 300
# min_word_count = 40
# num_workers = 4
# context = 10
# downsampling = 1e-3
# print("Training Word2Vec Model.")
# model = Word2Vec(sentences, workers=num_workers, size=num_features, min_count = min_word_count, window = context, sample = downsampling)
# clean_train_essays = []
# # Generate training and testing data word vectors.
# for essay_v in train_essays:
# clean_train_essays.append(essay_to_wordlist(essay_v, remove_stopwords=True))
# trainDataVecs = getAvgFeatureVecs(clean_train_essays, model, num_features)
# clean_test_essays = []
# for essay_v in test_essays:
# clean_test_essays.append(essay_to_wordlist( essay_v, remove_stopwords=True ))
# testDataVecs = getAvgFeatureVecs( clean_test_essays, model, num_features )
# trainDataVecs = np.array(trainDataVecs)
# testDataVecs = np.array(testDataVecs)
# # Reshaping train and test vectors to 3 dimensions. (1 represnts one timestep)
# trainDataVecs = np.reshape(trainDataVecs, (trainDataVecs.shape[0], 1, trainDataVecs.shape[1]))
# testDataVecs = np.reshape(testDataVecs, (testDataVecs.shape[0], 1, testDataVecs.shape[1]))
# lstm_model = get_model()
# lstm_model.fit(trainDataVecs, y_train, batch_size=75, epochs=50)
# y_pred = lstm_model.predict(testDataVecs)
# # Round y_pred to the nearest integer.
# y_pred = np.around(y_pred)
# # Evaluate the model on the evaluation metric. "Quadratic mean averaged Kappa"
# result = cohen_kappa_score(y_test.values,y_pred,weights='quadratic')
# print("Kappa Score: {}".format(result))
|
{"/applicatioin/__init__.py": ["/applicatioin/Views/View.py"], "/applicatioin/Views/View.py": ["/applicatioin/__init__.py", "/applicatioin/forms/forms.py", "/applicatioin/Builders/ModelBuilder.py"]}
|
31,361
|
xdf020168/test-information-platform
|
refs/heads/master
|
/manage.py
|
# coding=utf-8
# author: Zeng YueTian
# manage script of hera system
# how to use it: nohup python manage.py runserver --host 0.0.0.0 >/dev/null 2>&1 &
# please see run.sh
import os
import threading
from flask import Flask, render_template
from flask_script import Manager
from flask_script import Shell
from app import create_app, db
from app.main.views import *
from app.main.threads_function import *
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
# 使用命令行选项来控制启动参数
manager = Manager(app)
def make_shell_context():
"""
回调函数,在shell命令中自动执行某些操作(注册程序,数据库实例以及模型)
:return:
"""
return dict(app=app)
# 注册一个make_shell_context回调函数
manager.add_command("shell", Shell(make_context=make_shell_context))
app = Flask(__name__)
if __name__ == '__main__':
# define some working threads
threads = []
# get jenkins build failure data
t_build_failure = threading.Thread(target=get_jenkins_build_failure_data)
threads.append(t_build_failure)
t_testlink_info = threading.Thread(target=get_test_link_case_info)
threads.append(t_testlink_info)
t_zentao_info = threading.Thread(target=get_zentao_bug_info)
threads.append(t_zentao_info)
t_jenkins_info = threading.Thread(target=get_ut_info)
threads.append(t_jenkins_info)
# now we get threads as [t_build_failure, t_testlink_info, t_zentao_info, t_jenkins_info]
for t in threads:
t.setDaemon(True)
t.start()
if t == threads[-2]:
time.sleep(10)
manager.run()
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,362
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/const.py
|
# coding=utf-8
# common constant
# jenkins master server url,port
jenkins_server_url = 'http://10.4.0.1:8080/'
# User Id API Token
user_id = 'jenkins_user'
api_token = 'a36ea26fdd50a32f652e8d56d7cb86e3' # need to be updated
# job_name
job_names = [
##########################
# sdk test
##########################
'p2pclient_ut',
'BJ-Auto-Test-Linux_SDK_Start',
'BJ-Auto-Test-Linux_SDK_Check',
'BJ-Auto-Test_Linux_SDK_Api',
'BJ-Auto-Test_Linux_SDK_Init',
'BJ-Auto-Test_Linux_SDK_Login',
'BJ-Auto-Test_Linux_SDK_Penetrate',
'BJ-Auto-Test_Linux_SDK_Routine',
##########################
# server test
##########################
'p2pserver_ut',
'BJ-Auto-Test_Platform_Collect_Log',
'BJ-Auto-Test_Server_API_Channel',
# 'BJ-Auto-Test_Server_API_Dir',
# 'BJ-Auto-Test_Server_API_Panel',
'BJ-Auto-Test_Server_API_Report',
'BJ-Auto-Test_Server_API_Stats',
'BJ-Auto-Test_Server_API_Stun-hub',
'BJ-Auto-Test_Server_API_Stun_Rrpc',
'BJ-Auto-Test_Server_API_Stun_Stun',
'BJ-Auto-Test_Server_API_Stun_Thunder',
'BJ-Auto-Test_Server_API_TS',
'BJ-Auto-Test_Server_API_p2p-ops',
'BJ-Auto-Test_Server_API_Httpdns',
'BJ-Auto-Test_Platform_Flume',
'BJ-Auto-Test_Platform_Boss_Internal_Api',
##########################
# deplopy build
##########################
# 'BJ-Auto-Test-Deploy_Dir',
'BJ-Auto-Test-Deploy_Funnel',
'BJ-Auto-Test-Deploy_Get_DailyTest_Info',
'BJ-Auto-Test-Deploy_httpdns',
'BJ-Auto-Test-Deploy_kafka_flume',
'BJ-Auto-Test-Deploy_p2p_channel',
'BJ-Auto-Test-Deploy_p2p_live_channel',
'BJ-Auto-Test-Deploy_p2p_stun2_go',
'BJ-Auto-Test-Deploy_p2p_stun2_go_xunlei',
'BJ-Auto-Test-Deploy_SRV_p2p_ops',
'BJ-Auto-Test-Deploy_Stun-hub',
'BJ-Auto-Test-Deploy_Tracker',
'BJ-Auto-Test_DEPLOY_SRV_p2p_tracker_go',
'BJ-Auto-Test_Zeus_Deploy',
'develop_client_debug_build_windows',
'develop_client_release_log_http_ubuntu64',
'develop_client_release_nolog_http_centos',
'develop_client_release_nolog_https_apk_sdk_ubuntu64'
]
# get last x bulid_times result
LAST_BUILD_TIMES = 8
# zentao bug date_size
RECENT_DATE_SIZE = 20
# feature_test page
EVERY_PAGE_SIZE = 20
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,363
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/test_flask_sql.py
|
# just for function test !!!!
from manage import app
from flask_sqlalchemy import SQLAlchemy
# import sqlalchemy.orm as orm
MYSQL_HOST = "localhost"
MYSQL_PORT = 3306
MYSQL_UE_USER = "root"
MYSQL_PASSWORD = "rootPass"
MYSQL_DB = "hera"
# dialect+driver://username:password@host:port/database?charset=utf8
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://{0}:{1}@{2}:{3}/{4}".format(MYSQL_UE_USER, MYSQL_PASSWORD,
MYSQL_HOST, MYSQL_PORT, MYSQL_DB)
db = SQLAlchemy(app)
# engine = get_engine(app)
# DBSession = orm.sessionmaker(bind=engine)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60))
passwd = db.Column(db.String(30))
def __init__(self, name, passwd):
self.name = name
self.passwd = passwd
def __repr__(self):
return '<User %r>' % self.name
class Machines_info(db.Model):
ip = db.Column(db.String(20), primary_key=True)
username = db.Column(db.String(60))
passwd = db.Column(db.String(30))
cpu = db.Column(db.Integer)
memory = db.Column(db.Integer)
def __init__(self, name, passwd):
self.name = name
self.passwd = passwd
def __repr__(self):
return '%s,%s,%s,%d,%d' % (self.ip, self.username, self.passwd, self.cpu, self.memory)
def get_machines_list():
"""
body_data = [
{"ip": "192.168.1.153", "username": "root", "passwd": "rootPass", "cpu": 8, "memory": 16},
{"ip": "192.168.1.154", "username": "root", "passwd": "rootPass", "cpu": 8, "memory": 16}
]
return body_data
"""
body_data = []
machines_list = Machines_info.query.all()
print machines_list
for single_pc in machines_list:
tmp_dict = dict()
tmp_dict["ip"] = single_pc.ip
tmp_dict["username"] = single_pc.username
tmp_dict["passwd"] = single_pc.passwd
tmp_dict["cpu"] = single_pc.cpu
tmp_dict["memory"] = single_pc.memory
body_data.append(tmp_dict)
return body_data
def delete_machines_info(ip):
"""
:param ip:
:return:
"""
delete_obj = Machines_info.query.filter_by(ip=ip).first()
db.session.delete(delete_obj)
db.session.commit()
db.session.close()
if __name__ == "__main__":
users = User.query.all()
print users
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,364
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/test_link_parser.py
|
# coding=utf-8
# author: zengyuetian
# get test case and plan info from test link
from testlink import *
url = 'http://sub.site.com/lib/api/xmlrpc/v1/xmlrpc.php' # ip地址为安装的testlink ip
key = 'a16641a2cea5d780ae685beea32dcef1' # API key
def get_manual_case_info():
"""
get test suite and case number
:return: test_case_info
"""
test_case_info = []
test_link_obj = TestlinkAPIClient(url, key)
# get all test suites and their case number
projects = test_link_obj.getProjects()
# print projects
animbus = projects[0]
top_suites = test_link_obj.getFirstLevelTestSuitesForTestProject(animbus['id'])
# print top_suites
# suite = top_suites[0]
for suite in top_suites:
# print suite['id'], suite['name']
test_case_dict = {}
test_cases = test_link_obj.getTestCasesForTestSuite(suite['id'], 10, "")
test_case_dict['name'] = suite['name']
test_case_dict['number'] = len(test_cases)
test_case_info.append(test_case_dict)
# print len(test_cases)
# print "test_case_info:", test_case_info
return test_case_info
def test_progress():
"""
test_plan_info: [{'name': 'ios_3.9发布测试 RC2', 'total': 356, 'non_exec': 20}, ...]
:return:
"""
test_progress_info = []
test_link_obj = TestlinkAPIClient(url, key)
# get all test suites and their case number
projects = test_link_obj.getProjects()
# print projects
animbus = projects[0]
test_plans = test_link_obj.getProjectTestPlans(animbus['id'])
for test_plan in test_plans:
if test_plan['active'] == '1':
try:
test_plan_result = test_link_obj.getTotalsForTestPlan(test_plan['id'])
# print test_plan_result
except Exception:
continue
test_progress_info.append(process_plan_result(test_plan['name'], test_plan_result))
# print "Plan:{0}, cases:{1}".format(test_plan, test_cases_result)
return test_progress_info
def process_plan_result(plan_name, plan_result_dict):
"""
:param plan_result_dict:
:param plan_name:
A data structure example:
{
'with_tester': {
'6': {
'p': {'platform_id': 6, 'status': 'p', 'exec_qty': 0},
'f': {'platform_id': 6, 'status': 'f', 'exec_qty': 0},
'b': {'platform_id': 6, 'status': 'b', 'exec_qty': 0},
'n': {'platform_id': '6', 'status': 'n', 'exec_qty': '43'}}},
'total': {
'6': {'platform_id': '6', 'qty': '43'}},
'platforms': ''}
and may like:
{
'with_tester': [{
'p': {'platform_id': '0', 'status': 'p', 'exec_qty': '6'},
'f': {'platform_id': 0, 'status': 'f', 'exec_qty': 0},
'b': {'platform_id': 0, 'status': 'b', 'exec_qty': 0},
'n': {'platform_id': '0', 'status': 'n', 'exec_qty': '110'}}],
'total': [{'platform_id': '0', 'qty': '116'}],
'platforms': ''}
:return:
"""
# print plan_result_dict
key_list = ['with_tester', 'total', 'platforms']
total_case_number = 0
non_exec_number = 0
plan_progress = {}
# print plan_result_dict
for test_platform in plan_result_dict[key_list[0]]:
if isinstance(test_platform, dict):
non_exec_number += int(test_platform['n']['exec_qty'])
elif isinstance(test_platform, str):
non_exec_number += int(plan_result_dict[key_list[0]][test_platform]['n']['exec_qty'])
# print "non_exec_number:", non_exec_number
for test_platform in plan_result_dict[key_list[1]]:
if isinstance(test_platform, dict):
total_case_number += int(test_platform['qty'])
elif isinstance(test_platform, str):
total_case_number += int(plan_result_dict[key_list[1]][test_platform]['qty'])
pass
# print "total_case_number:", total_case_number
plan_progress['name'] = plan_name
plan_progress['total'] = total_case_number
plan_progress['non_exec'] = non_exec_number
return plan_progress
if __name__ == "__main__":
print test_progress()
# tlc = TestlinkAPIClient(url, key) # initialize TestlinkAPIClient object named tlc
# # 下面这些是获得制定test plan的case和每个case的进度
# tp = tlc.getTestPlanByName("Test Case", "ios_3.9 发布测试计划")
# print "tp:", tp
#
# totals = tlc.getTotalsForTestPlan(3026)
# print totals
#
# tcs = tlc.getTestCasesForTestPlan(3026)
# print tcs
# print len(tcs)
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,365
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/views.py
|
# coding=utf-8
# author: zengyuetian
# add sqlalchemy function by dh
import json
import pickle
from flask import render_template, redirect, url_for, request, flash, jsonify
from lib import get_root_path, sort_by_status
from const import EVERY_PAGE_SIZE
from app.database.database import db, Machines, Features
import threads_function
from . import main
daily_case = {}
# 路由装饰器由蓝本提供
@main.route('/')
def index():
return render_template('index.html')
@main.route('/help')
def help():
return render_template('help.html')
@main.route('/user/<name>')
def user(name):
return render_template('user.html', name=name)
@main.route('/bug_status')
def get_bugs_status():
"""
:return:
"""
# global bug_status
# print "bug_status", threads_function.bug_status
return render_template('bug_status.html', bug_status=threads_function.bug_status)
@main.route('/bug_info')
def get_bug_info():
"""
:return:
"""
# global newly_bug
# print "newly_bug", threads_function.newly_bug
sorted_bug_info = sorted(threads_function.newly_bug.iteritems(), key=lambda d: d[0], reverse=False)
# print "sorted_bug_info", sorted_bug_info
return render_template('bug_info.html', bug_info=sorted_bug_info)
@main.route('/regression_progress')
def get_test_progress():
"""
test_plan_info: [{'name': 'FunctionTest', 'total': 356, 'non-exec': 20}, ...]
:return:
"""
# global test_plan_progress
# print "test_plan_progress:{0}".format(threads_function.test_plan_progress)
# test_data = [{'name': 'ab', 't': 3}, {'name': 'ggg', 't': 5}]
# print "type:", type(test_data)
return render_template('regression_progress.html', test_plan_progress=threads_function.test_plan_progress)
@main.route('/auto_rate')
def auto_rate():
"""
:return:
"""
auto_case_num = 0
manual_case_num = 0
case_distribution = {}
global daily_case
for key in daily_case:
auto_case_num += daily_case[key]
for category in threads_function.manual_case:
manual_case_num += category['number']
case_distribution['auto_case_num'] = auto_case_num
case_distribution['manual_case_num'] = manual_case_num
# print "auto_case_num", auto_case_num
# print "manual_case_num", manual_case_num
# if auto_case_num != 0:
# auto_case_rate = float(auto_case_num) / (auto_case_num + manual_case_num)
# print "auto_case_rate:", auto_case_rate
return render_template('auto_rate.html', case_distribution=case_distribution)
@main.route('/auto_test', methods=['GET', 'POST'])
def auto_test():
"""
for example:
dailycase: {u'platform_collect_sdk': 2, u'stats': 41, u'panel': 94}
After sorting it will be a list.
:return:
"""
pkl_file = get_root_path() + "/app/data/auto_test.pkl"
if request.method == 'POST':
# save the value to file
data = request.get_json()
fil = open(pkl_file, "wb")
pickle.dump(data, fil)
fil.close()
# print data
return str(data)
else:
fil = open(pkl_file, "rb")
data = pickle.load(fil)
fil.close()
global daily_case
daily_case = json.loads(data)
# print "dailycase:", daily_case
sorted_daily_case = sorted(daily_case.iteritems(), key=lambda d: d[1], reverse=True)
# print "sorted_daily_case:", sorted_daily_case
return render_template("dailytest_case_info.html", daily_case=sorted_daily_case)
@main.route('/jenkins')
def get_jenkins_builds_status():
"""
:return:
"""
# global failed_build_dict
# print "failed_build_dict from get_jenkins_builds_status:", threads_function.failed_build_dict
json_data = json.dumps(threads_function.failed_build_dict)
return json_data
@main.route('/unit_test')
def get_ut_data():
"""
:return:
"""
# global ut_info
# print ut_info
return render_template('unit_test.html', ut_info=threads_function.ut_info)
@main.route('/jenkins_builds')
def display_jenkins_failed_builds():
"""
:return:
"""
return render_template('json_data.html')
@main.route('/manual_test')
def display_test_link_cases():
"""
test_link_case: [{'name': 'FunctionTest', 'number': 356}, {'name': 'PerformanceTest', 'number': 25}]
:return:
"""
# global manual_case
# print "manual_case:", manual_case
# print "type:", type(manual_case)
return render_template('test_link_case_info.html', test_case=threads_function.manual_case)
@main.route('/feature_test_progress')
def features():
"""
:return:
"""
"""
data version style
0: 未完成
1: 进行中
2: 已完成
feature_tests = [
{'name': 'HttpDns', 'id': '6', 'value': '21000000'},
{'name': 'Opt_Report', 'id': '7', 'value': '22220000'},
{'name': 'kafka-flume', 'id': '8', 'value': '00000000'}
]
"""
# if request.method == "POST":
# forms = request.form
# # print forms.get()
# name_list = forms.to_dict().keys()
# print "features:POST", name_list[0]
#f
# new_feature = Features(name_list[0])
# db.session.add(new_feature)
# db.session.commit()
page = request.args.get('page')
# print "page:", type(page), page
if page is None:
page = 1
else:
page = int(page)
# 拿一页的数据
slice_start = (page - 1) * EVERY_PAGE_SIZE
slice_stop = page * EVERY_PAGE_SIZE
feature_tests_info = Features.query.all()
db.session.close() # MUST to close session to make data not cached
all_feature_tests = []
for single_feature in feature_tests_info:
tmp_feature_dict = dict()
# print "single_feature", type(single_feature)
tmp_feature_dict['id'] = str(single_feature.id)
tmp_feature_dict['name'] = str(single_feature.feature_name)
# print type(single_feature.id), type(single_feature.feature_name), type(single_feature.demand)
tmp_feature_dict['value'] = \
str(single_feature.demand) + str(single_feature.test_schema) + str(single_feature.review) \
+ str(single_feature.achieve) + str(single_feature.environment) + str(single_feature.execute) \
+ str(single_feature.report) + str(single_feature.archive) + str(single_feature.storing)
all_feature_tests.append(tmp_feature_dict)
# print "all_feature_tests:", all_feature_tests
page_info = dict()
page_info["total_page"] = (len(all_feature_tests) - 1) / EVERY_PAGE_SIZE + 1
page_info["current_page"] = page
sorted_feature_tests = sort_by_status(all_feature_tests)
sorted_feature_tests = sorted_feature_tests[slice_start:slice_stop]
# print "features: ", sorted_feature_tests
# print "sorted_feature_tests:", sorted_feature_tests
return render_template('feature_test_progress.html',
sorted_feature_tests=sorted_feature_tests,
page_info=page_info)
@main.route('/add-feature', methods=['GET', 'POST'])
def add_feature_test():
"""
:return:
"""
# forms = request.form
# # print forms.get()
# name_list = forms.to_dict().keys()
# print name_list[0]
# print type(request.form['featurename'])
new_feature_name = str(request.form['featurename'])
# print "new_feature_name", new_feature_name
new_feature = Features(new_feature_name)
#
db.session.add(new_feature)
db.session.commit()
db.session.close()
return redirect(url_for('main.features'))
@main.route('/remove-feature', methods=['GET', 'POST'])
def remove_feature_test():
"""
:return:
"""
if request.method == "POST":
forms = request.form
# print forms
key_list = forms.to_dict().keys()
feature_id = key_list[0]
feature = Features.query.filter_by(id=feature_id).first()
# print "feature:", feature
db.session.delete(feature)
db.session.commit()
db.session.close()
return jsonify({'ok': True})
@main.route('/change-feature-name', methods=['GET', 'POST'])
def modify_feature_name():
"""
:return:
"""
if request.method == "POST":
forms = request.form
# print forms
json_dict = forms.to_dict()
feature_id = json_dict['feature_id']
new_feature_name = json_dict['name']
# print "modify feature_id:", feature_id
# print "new_feature_name", new_feature_name
feature = Features.query.filter_by(id=feature_id).first()
# print "feature:", feature
feature.feature_name = new_feature_name
db.session.commit()
db.session.close()
return jsonify({'ok': True})
@main.route('/update-feature', methods=['GET', 'POST'])
def update_feature_status():
"""
:return:
"""
if request.method == "POST":
forms = request.form
# print forms
json_dict = forms.to_dict()
flow = json_dict['flow']
feature_id = json_dict['feature_id']
value = json_dict['value']
feature = Features.query.filter_by(id=feature_id).first()
# print "feature:", feature
if flow == 'demand':
feature.demand = value
elif flow == 'test_schema':
feature.test_schema = value
elif flow == 'review':
feature.review = value
elif flow == 'achieve':
feature.achieve = value
elif flow == 'environment':
feature.environment = value
elif flow == 'execute':
feature.execute = value
elif flow == 'report':
feature.report = value
elif flow == 'archive':
feature.archive = value
elif flow == 'storing':
feature.storing = value
db.session.commit()
db.session.close()
return jsonify({'ok': True})
@main.route('/machines_info', methods=['GET', 'POST'])
def machines():
"""
:return:
"""
# print "machines_info"
machines_info = Machines.query.all()
return render_template('machines_info.html', machines_info=machines_info)
@main.route('/pc_info', methods=['GET', 'POST'])
def pcs():
"""
:return:pc
"""
# print "pc_info"
machines_info = Machines.query.all()
return render_template('pc_info.html', machines_info=machines_info)
@main.route('/add-machines', methods=['GET', 'POST'])
def add_machines():
"""
:return:
"""
form = request.form
ip = form.get('IP')
username = form.get('Username')
passwd = form.get('Passwd')
cpu = form.get('CPU')
memory = form.get('Memory')
if not ip:
flash('IP不能为空!')
return redirect(url_for('main.machines'))
machine = Machines(ip, username, passwd, cpu, memory)
db.session.add(machine)
db.session.commit()
db.session.close()
return redirect(url_for('main.machines'))
@main.route('/delete-machines/<string:ip>')
def delete_machines(ip):
"""
:return:
"""
machine = Machines.query.get_or_404(ip)
db.session.delete(machine)
db.session.commit()
db.session.close()
return redirect(url_for('main.pcs'))
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,366
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/jenkins_ut.py
|
# coding=utf-8
# author: zengyuetian
# get ut information from jenkins builds
import sys
import re
import urllib2
import urllib
import requests
import cookielib
from bs4 import BeautifulSoup
# 这段代码是用于解决中文报错的问题
reload(sys)
sys.setdefaultencoding("utf8")
login_url = 'http://10.4.0.1:8080/j_acegi_security_check'
builds = ["p2pclient_ut", "p2pserver_ut"]
client_referer_url = "http://10.4.0.1:8080/view/p2pclient_ut/job/p2pclient_ut/lastCompletedBuild/testReport"
server_referer_url = "http://10.4.0.1:8080/view/p2pserver/job/p2pserver_ut/lastCompletedBuild/testReport"
class Login(object):
def __init__(self):
self.name = ''
self.passwprd = ''
self.cj = cookielib.LWPCookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
def login(self, referer_url=None):
'''登录网站'''
login_params = {"j_username": "zengyuetian", "j_password": "vliQh3U2byob", "remember_me": False, "from": "/"}
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Accept': "text/html",
# "Accept-Encoding": "gzip, deflate"
}
# req = urllib2.Request(loginurl, urllib.urlencode(loginparams), headers=headers)
req = urllib2.Request(login_url, urllib.urlencode(login_params), headers=headers)
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
# thePage = response.read()
# print thePage
# req = urllib2.Request(referer_url, None, headers=headers)
# print referer_url
# response = urllib2.urlopen(req)
# print response
# self.operate = self.opener.open(req)
# thePage = response.read()
# return thePage
def get_ut_data(self, referer_url):
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Accept': "text/html",
# "Accept-Encoding": "gzip, deflate"
}
req = urllib2.Request(referer_url, None, headers=headers)
print referer_url
response = urllib2.urlopen(req)
print response
self.operate = self.opener.open(req)
thePage = response.read()
return thePage
def parse_ut_test_html(page):
ut_test_cases = 0
soup = BeautifulSoup(page, "html.parser")
date_data = soup.find_all(name="div", attrs={'align': 'right'})
for item in date_data:
if len(item.contents) == 1:
# print "contents:", item.contents[0]
number_res = str(item.contents[0])
# print "number_res:",number_res
if "tests\n" in number_res and "(±" in number_res:
ut_test_cases = process_div_get_ut_test(number_res)
break
return ut_test_cases
def process_div_get_ut_test(div_string):
"""
:param div_string:
:return:
"""
ut_test_res = 0
string_token_list = div_string.split("\n")
for token in string_token_list:
if "tests" in token:
tmp_list = token.split(" ")
if tmp_list[-1] == 'tests':
ut_test_res = tmp_list[-2]
return ut_test_res
def get_ut_num():
"""
:return:
"""
ut_test_data = {"p2pclient_ut": 0, "p2pserver_ut": 0}
userlogin = Login()
userlogin.login()
ut_test_data['p2pclient_ut'] = int(parse_ut_test_html(userlogin.get_ut_data(client_referer_url)))
ut_test_data['p2pserver_ut'] = int(parse_ut_test_html(userlogin.get_ut_data(server_referer_url)))
# ut_test_data['p2pclient_ut'] = int(parse_ut_test_html(userlogin.login(client_referer_url)))
# ut_test_data['p2pserver_ut'] = int(parse_ut_test_html(userlogin.login(server_referer_url)))
return ut_test_data
if __name__ == '__main__':
print get_ut_num()
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,367
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/__init__.py
|
# coding=utf-8
# author: zengyuetian
# Flask 用 蓝图(blueprints) 的概念来在一个应用中或跨应用制作应用组件和支持通用的模式。
from flask import Blueprint
main = Blueprint('main', __name__)
# 放在末尾是为了避免循环导入依赖
from . import views, errors
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,368
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/zentao_parser.py
|
# coding=utf-8
# author: zengyuetian
import cookielib
import sys
import urllib
import urllib2
from bs4 import BeautifulSoup
from app.main.const import *
# 这段代码是用于解决中文报错的问题
reload(sys)
sys.setdefaultencoding("utf8")
login_url = 'http://192.168.0.1:8080/zentao/user-login.html'
referer_url = "http://192.168.0.1:8080/zentao/bug-report-1-unclosed-0.html"
class Login(object):
def __init__(self):
self.name = ''
self.passwprd = ''
self.cj = cookielib.LWPCookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
def login(self, chart_param):
"""
:param chart_param:
:return:
"""
loginparams = {"account": "admin", "password": "adminPass", "keepLogin[]": "on"}
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Accept': "text/html",
# "Accept-Encoding": "gzip, deflate"
}
req = urllib2.Request(login_url, urllib.urlencode(loginparams), headers=headers)
# req = urllib2.Request(login_url, login_params, headers=headers)
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
thePage = response.read()
# print thePage
query_params = chart_param
req = urllib2.Request(referer_url, query_params, headers=headers)
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
opened_bugs_page = response.read()
# print opened_bugs_page
return opened_bugs_page
def parse_newly_bug_html(page):
date_list = []
value_list = []
bugs_dict = {}
soup = BeautifulSoup(page, "html.parser")
date_data = soup.find_all(name="td", attrs={'class': 'chart-label'})
for item in date_data:
if len(item.contents) == 1:
# print "contents:", item.contents[0]
date = str(item.contents[0])
date_list.append(date)
value_data = soup.find_all(name="td", attrs={'class': 'chart-value'})
for item in value_data:
if len(item.contents) == 1:
# print "contents:", item.contents[0]
value_list.append(int(item.contents[0]))
slice_size = 0 - RECENT_DATE_SIZE
date_list = date_list[slice_size:]
value_list = value_list[slice_size:]
# print date_list
# print len(date_list)
bugs_dict = dict(zip(date_list, value_list))
# print "page:bugs_dict", bugs_dict
return bugs_dict
def parse_bug_status_html(page):
category_list = []
value_list = []
bugs_status_dict = {}
soup = BeautifulSoup(page, "html.parser")
date_data = soup.find_all(name="td", attrs={'class': 'chart-label'})
for item in date_data:
if len(item.contents) == 1:
# print "contents:", item.contents[0]
category = str(item.contents[0])
category_list.append(category)
value_data = soup.find_all(name="td", attrs={'class': 'chart-value'})
for item in value_data:
if len(item.contents) == 1:
# print "contents:", item.contents[0]
value_list.append(int(item.contents[0]))
# print date_list
# print len(date_list)
bugs_status_dict = dict(zip(category_list, value_list))
# print "page:bugs_dict", bugs_dict
return bugs_status_dict
def get_newly_bug_info():
userlogin = Login()
param = "charts%5B%5D=openedBugsPerDay"
ori_page = userlogin.login(param)
return parse_newly_bug_html(ori_page)
def get_all_bugs_status():
userlogin = Login()
param = "charts%5B%5D=bugsPerStatus"
ori_page = userlogin.login(param)
# print ori_page
return parse_bug_status_html(ori_page)
if __name__ == '__main__':
print get_newly_bug_info()
print get_all_bugs_status()
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,369
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/database/database.py
|
# coding=utf-8
# feature test db lib
from flask_sqlalchemy import SQLAlchemy
from manage import app
MYSQL_HOST = "localhost"
MYSQL_PORT = 3306
MYSQL_USER = "root"
MYSQL_PASSWORD = "rootPass"
MYSQL_DB = "hera"
# dialect+driver://username:password@host:port/database?charset=utf8
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://{0}:{1}@{2}:{3}/{4}".format(MYSQL_USER, MYSQL_PASSWORD,
MYSQL_HOST, MYSQL_PORT, MYSQL_DB)
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True # 每次請求結束后都會自動提交數據庫中的變動
db = SQLAlchemy(app) # db表示正在使用的數據庫
# 用於跟蹤測試進度的表
# 用來增刪改查該表
class Features(db.Model):
__tablename__ = 'feature_test_progress'
id = db.Column(db.Integer, primary_key=True) # 自增id
feature_name = db.Column(db.String(60)) # 任務名 task_name
demand = db.Column(db.String(1)) # 需求 requirement
test_schema = db.Column(db.String(1)) # 方案 concept
review = db.Column(db.String(1)) # 评审 review
achieve = db.Column(db.String(1)) # 实现 implement
environment = db.Column(db.String(1)) # 环境 environment
execute = db.Column(db.String(1)) # 执行 execute
report = db.Column(db.String(1)) # 报告 report
archive = db.Column(db.String(1)) # 归档 archive
storing = db.Column(db.String(1)) # 入库 check-in
def __init__(self, feature_name, demand="0", test_schema="0", review="0", achieve="0", environment="0",
execute="0", report="0", archive="0", storing="0"):
self.feature_name = feature_name
self.demand = demand
self.test_schema = test_schema
self.review = review
self.achieve = achieve
self.environment = environment
self.execute = execute
self.report = report
self.archive = archive
self.storing = storing
def __repr__(self):
return '{0} {1} {2}{3}{4}{5}{6}{7}{8}{9}{10}'.format(self.id, self.feature_name, self.demand, self.test_schema,
self.review, self.achieve, self.environment,
self.execute, self.report, self.archive,
self.storing)
class Machines(db.Model):
__tablename__ = 'machines_info'
ip = db.Column(db.String(20), primary_key=True)
username = db.Column(db.String(60))
passwd = db.Column(db.String(30))
cpu = db.Column(db.Integer)
memory = db.Column(db.Integer)
def __init__(self, ip, username, passwd, cpu, memory):
self.ip = ip
self.username = username
self.passwd = passwd
self.cpu = cpu
self.memory = memory
def __repr__(self):
return '<Machine {0} {1} {2} {3} {4}>'.format(self.ip, self.username, self.passwd, self.cpu, self.memory)
if __name__ == "__main__":
feature_tests_info = Features.query.all()
print "1", feature_tests_info
feature = Features.query.get_or_404('Httpdns')
print "2", feature
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,370
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/lib.py
|
# coding=utf-8
# author: zengyuetian
# common lib for hera
import os
import inspect
import sys
def get_root_path():
"""
获得自动测试框架根目录
:return:
"""
file_path = os.path.abspath(inspect.getfile(sys.modules[__name__]))
main_path = os.path.dirname(file_path)
app_path = os.path.dirname(main_path)
root_path = os.path.dirname(app_path)
return root_path
def sort_by_status(ori_list):
"""
sort feature progress
:param ori_list:
:return:
"""
for i in range(len(ori_list) - 1):
for j in range(len(ori_list) - 1 - i):
if compare_two_features(ori_list[j], ori_list[j + 1]):
ori_list[j], ori_list[j + 1] = ori_list[j + 1], ori_list[j]
return ori_list
def compare_two_features(feature_a, feature_b):
"""
sort by value first, if value is same, sort by id.
:param feature_a:
:param feature_b:
:return:
"""
# ordered by complement
# status_a = feature_a['value']
# status_b = feature_b['value']
# odered by id
status_a = 0
status_b = 0
id_a = feature_a['id']
id_b = feature_b['id']
if status_a > status_b:
return True
elif status_a < status_b:
return False
elif status_a == status_b:
if id_a < id_b:
return True
else:
return False
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,371
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/models.py
|
# coding=utf-8
# author: zengyuetian
from . import db
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,372
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/threads_function.py
|
# coding=utf-8
# author: dh
# common functions
import time
from app.main.jenkins_ut import *
from app.main.robot_parser import *
from app.main.test_link_parser import *
from app.main.zentao_parser import *
# threads variable
failed_build_dict = dict()
newly_bug = dict()
bug_status = dict()
ut_info = dict()
manual_case = list()
test_plan_progress = list()
def get_jenkins_build_failure_data():
"""
thread to get jenkins data for the display
:return:
"""
while True:
jenkins_handle = init_jenkins()
global failed_build_dict
failed_build_dict = get_failed_data_set(jenkins_handle)
# print "failed_build_dict from threads:", failed_build_dict
# release cpu
time.sleep(10)
def get_test_link_case_info():
"""
thread to get test_link data for the display
:return:
"""
while True:
global manual_case
global test_plan_progress
manual_case = get_manual_case_info()
test_plan_progress = test_progress()
# release cpu
time.sleep(60)
def get_zentao_bug_info():
while True:
global newly_bug
global bug_status
newly_bug = get_newly_bug_info()
bug_status = get_all_bugs_status()
# release cpu
time.sleep(600)
def get_ut_info():
while True:
global ut_info
ut_info = get_ut_num()
# print "ut_info", ut_info
# release cpu
time.sleep(600)
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,373
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/main/robot_parser.py
|
# coding=utf-8
# author: donghao
# get build status from jenkins build jobs
import jenkins
from app.main.const import *
def get_failed_data_set(server):
"""
entry function
:param server:
:return:
"""
job_dict = dict()
last_failed_jobs = list()
for job in job_names:
try:
tmp_color = server.get_job_info(job)['color']
except Exception:
continue
if u'yellow' in tmp_color or u'red' in tmp_color:
# print job
last_failed_jobs.append(job)
# print "get_job over"
for failed_job in last_failed_jobs:
last_build_number = server.get_job_info(failed_job)['lastBuild']['number']
if last_build_number < LAST_BUILD_TIMES:
iterate_counter = last_build_number
else:
iterate_counter = LAST_BUILD_TIMES
tmp_job_status_list = []
for i in range(0, iterate_counter):
tmp_build_status_dict = {}
# print last_build_number - i
try:
tmp_res = server.get_build_info(failed_job, last_build_number - i)['result']
except Exception:
continue
tmp_key = "build{0}".format(last_build_number - i)
if u'SUCCESS' == tmp_res:
tmp_value = "0"
else:
tmp_value = "1"
tmp_build_status_dict[tmp_key] = tmp_value
tmp_job_status_list.append(tmp_build_status_dict)
job_dict[failed_job] = tmp_job_status_list
return job_dict
def init_jenkins():
srv = jenkins.Jenkins(jenkins_server_url, username=user_id, password=api_token)
# print srv
return srv
if __name__ == '__main__':
# 实例化jenkins对象,连接远程的jenkins master server
# server = init_jenkins()
# server = jenkins.Jenkins(jenkins_server_url, username=user_id, password=api_token)
# get_failed_data_set(server)
# pass
server = jenkins.Jenkins(jenkins_server_url, username=user_id, password=api_token)
user = server.get_whoami()
version = server.get_version()
print('Hello %s from Jenkins %s' % (user['fullName'], version))
# plugins = server.get_plugins_info()
# print plugins
print server.get_job_info(job_names[0])
print server.get_job_info(job_names[4])['lastBuild']
# print server.get_job_info(job_name[4])['color']
# print server.get_job_info(job_name[0])['lastBuild']['number']
# print server.get_build_info(job_name[1], 280)['result']
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,374
|
xdf020168/test-information-platform
|
refs/heads/master
|
/app/__init__.py
|
# coding=utf-8
# author: zengyuetian
# 使用工厂函数延迟创建程序实例
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config
db = SQLAlchemy()
def create_app(config_name):
"""
工厂函数
:param config_name: 配置名
:return:
"""
app = Flask(__name__)
app.config.from_object(config[config_name])
# 在扩展对象上完成初始化过程
config[config_name].init_app(app)
db.init_app(app)
# 附加路由和自定义的错误页面
# 使用蓝图(蓝本中定义的路由处于休眠状态,直到蓝本注册到程序上后,路由才真正成为程序的一部分
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
{"/manage.py": ["/app/__init__.py", "/app/main/views.py", "/app/main/threads_function.py"], "/app/main/views.py": ["/app/database/database.py", "/app/main/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/main/threads_function.py": ["/app/main/jenkins_ut.py", "/app/main/robot_parser.py", "/app/main/test_link_parser.py", "/app/main/zentao_parser.py"], "/app/__init__.py": ["/app/main/__init__.py"]}
|
31,375
|
beancount/fava
|
refs/heads/main
|
/src/fava/plugins/tag_discovered_documents.py
|
"""Beancount plugin to tag discovered documents.
It looks through all Document entries that were added by Beancount
automatically through file discovery and adds the tag "#discovered".
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from fava.beans.abc import Document
from fava.beans.helpers import replace
from fava.util.sets import add_to_set
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from fava.beans.types import BeancountOptions
from fava.helpers import BeancountError
__plugins__ = ["tag_discovered_documents"]
def tag_discovered_documents(
entries: list[Directive],
options_map: BeancountOptions,
) -> tuple[list[Directive], list[BeancountError]]:
"""Tag automatically added documents."""
if not options_map["documents"]: # pragma: no cover
return entries, []
for index, entry in enumerate(entries):
if isinstance(entry, Document) and entry.meta["lineno"] == 0:
entries[index] = replace(
entry,
tags=add_to_set(entry.tags, "discovered"),
)
return entries, []
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,376
|
beancount/fava
|
refs/heads/main
|
/src/fava/application.py
|
"""Fava's main WSGI application.
you can use `create_app` to create a Fava WSGI app for a given list of files.
To start a simple server::
from fava.application import create_app
app = create_app(['/path/to/file.beancount'])
app.run('localhost', 5000)
"""
from __future__ import annotations
from dataclasses import fields
from datetime import date
from datetime import datetime
from functools import lru_cache
from io import BytesIO
from pathlib import Path
from threading import Lock
from typing import TYPE_CHECKING
from urllib.parse import parse_qsl
from urllib.parse import urlencode
from urllib.parse import urlparse
from urllib.parse import urlunparse
import markdown2 # type: ignore[import]
from beancount import __version__ as beancount_version
from beancount.utils.text_utils import replace_numbers
from flask import abort
from flask import current_app
from flask import Flask
from flask import redirect
from flask import render_template
from flask import render_template_string
from flask import request
from flask import send_file
from flask import url_for as flask_url_for
from flask_babel import Babel # type: ignore[import]
from flask_babel import get_translations
from markupsafe import Markup
from werkzeug.utils import secure_filename
from fava import __version__ as fava_version
from fava import LANGUAGES
from fava import template_filters
from fava.context import g
from fava.core import FavaLedger
from fava.core.charts import FavaJSONProvider
from fava.core.documents import is_document_or_import_file
from fava.help import HELP_PAGES
from fava.helpers import FavaAPIError
from fava.internal_api import ChartApi
from fava.internal_api import get_ledger_data
from fava.json_api import json_api
from fava.util import next_key
from fava.util import send_file_inline
from fava.util import setup_logging
from fava.util import slugify
from fava.util.date import Interval
from fava.util.excel import HAVE_EXCEL
if TYPE_CHECKING: # pragma: no cover
from typing import Iterable
from flask.wrappers import Response
from werkzeug import Response as WerkzeugResponse
setup_logging()
SERVER_SIDE_REPORTS = [
"holdings",
"journal",
"options",
"statistics",
]
CLIENT_SIDE_REPORTS = [
"balance_sheet",
"commodities",
"documents",
"editor",
"errors",
"events",
"import",
"income_statement",
"query",
"trial_balance",
]
def _ledger_slugs_dict(ledgers: Iterable[FavaLedger]) -> dict[str, FavaLedger]:
"""Get dictionary mapping URL slugs to ledgers."""
ledgers_by_slug: dict[str, FavaLedger] = {}
for ledger in ledgers:
title_slug = slugify(ledger.options["title"])
slug = title_slug or slugify(ledger.beancount_file_path)
unique_key = next_key(slug, ledgers_by_slug)
ledgers_by_slug[unique_key] = ledger
return ledgers_by_slug
def static_url(filename: str) -> str:
"""Return a static url with an mtime query string for cache busting."""
file_path = Path(__file__).parent / "static" / filename
try:
mtime = str(int(file_path.stat().st_mtime))
except FileNotFoundError:
mtime = "0"
return url_for("static", filename=filename, mtime=mtime)
_cached_url_for = lru_cache(2048)(flask_url_for)
def _inject_filters(endpoint: str, values: dict[str, str]) -> None:
if (
"bfile" not in values
and current_app.url_map.is_endpoint_expecting(endpoint, "bfile")
and g.beancount_file_slug is not None
):
values["bfile"] = g.beancount_file_slug
if endpoint in ["static", "index"]:
return
for name in ["conversion", "interval", "account", "filter", "time"]:
if name not in values:
val = request.args.get(name)
if val is not None:
values[name] = val
def url_for(endpoint: str, **values: str) -> str:
"""Wrap flask.url_for using a cache."""
_inject_filters(endpoint, values)
return _cached_url_for(endpoint, **values)
def translations() -> dict[str, str]:
"""Get translations catalog."""
return get_translations()._catalog # type: ignore[no-any-return] # noqa: SLF001
def _setup_template_config(fava_app: Flask) -> None:
"""Setup jinja, template filters and globals."""
# Jinja config
fava_app.jinja_options = {
"extensions": ["jinja2.ext.do", "jinja2.ext.loopcontrols"],
"trim_blocks": True,
"lstrip_blocks": True,
}
# Add template filters
for function in template_filters.FILTERS:
fava_app.add_template_filter(function)
fava_app.add_template_filter(fields, "dataclass_fields")
# Add template global functions
fava_app.add_template_global(static_url, "static_url")
fava_app.add_template_global(date.today, "today")
fava_app.add_template_global(url_for, "url_for")
fava_app.add_template_global(translations, "translations")
fava_app.add_template_global(get_ledger_data, "get_ledger_data")
@fava_app.context_processor
def _template_context() -> dict[str, FavaLedger | type[ChartApi]]:
"""Inject variables into the template context."""
return {"ledger": g.ledger, "chart_api": ChartApi}
def _setup_filters(fava_app: Flask, read_only: bool, incognito: bool) -> None:
"""Setup request handlers/filters."""
fava_app.url_defaults(_inject_filters)
@fava_app.before_request
def _perform_global_filters() -> None:
if request.endpoint in ("json_api.get_changed", "json_api.get_errors"):
return
ledger = getattr(g, "ledger", None)
if ledger:
# check (and possibly reload) source file
if request.blueprint != "json_api":
ledger.changed()
g.filtered = ledger.get_filtered(
account=request.args.get("account"),
filter=request.args.get("filter"),
time=request.args.get("time"),
)
if read_only:
# Prevent any request that isn't a GET if read-only mode is active
@fava_app.before_request
def _read_only() -> None:
if request.method != "GET":
abort(401)
if incognito:
# Replace all numbers with 'X'.
@fava_app.after_request
def _incognito(response: WerkzeugResponse) -> WerkzeugResponse:
if response.content_type.startswith("text/html"):
original_text = response.get_data(as_text=True)
response.set_data(replace_numbers(original_text))
return response
load_file_lock = Lock()
@fava_app.url_value_preprocessor
def _pull_beancount_file(
_: str | None,
values: dict[str, str] | None,
) -> None:
g.beancount_file_slug = values.pop("bfile", None) if values else None
if not fava_app.config["LEDGERS"]:
with load_file_lock:
if not fava_app.config["LEDGERS"]:
fava_app.config["LEDGERS"] = _ledger_slugs_dict(
FavaLedger(filepath)
for filepath in fava_app.config["BEANCOUNT_FILES"]
)
if g.beancount_file_slug:
if g.beancount_file_slug not in fava_app.config["LEDGERS"]:
# one of the file slugs might have changed, update the mapping
fava_app.config["LEDGERS"] = _ledger_slugs_dict(
fava_app.config["LEDGERS"].values(),
)
if g.beancount_file_slug not in fava_app.config["LEDGERS"]:
abort(404)
g.ledger = fava_app.config["LEDGERS"][g.beancount_file_slug]
g.conversion = request.args.get("conversion", "at_cost")
g.interval = Interval.get(request.args.get("interval", "month"))
@fava_app.errorhandler(FavaAPIError)
def fava_api_exception(error: FavaAPIError) -> str:
"""Handle API errors."""
return render_template(
"_layout.html",
page_title="Error",
content=error.message,
)
def _setup_routes(fava_app: Flask) -> None: # noqa: PLR0915
@fava_app.route("/")
@fava_app.route("/<bfile>/")
def index() -> WerkzeugResponse:
"""Redirect to the Income Statement (of the given or first file)."""
if not g.beancount_file_slug:
g.beancount_file_slug = next(iter(fava_app.config["LEDGERS"]))
index_url = url_for("index")
default_page = fava_app.config["LEDGERS"][
g.beancount_file_slug
].fava_options.default_page
return redirect(f"{index_url}{default_page}")
@fava_app.route("/<bfile>/account/<name>/")
def account(name: str) -> str:
"""Get the account report."""
return render_template("_layout.html", content="", name=name)
@fava_app.route("/<bfile>/document/", methods=["GET"])
def document() -> Response:
"""Download a document."""
filename = request.args.get("filename", "")
if is_document_or_import_file(filename, g.ledger):
return send_file_inline(filename)
return abort(404)
@fava_app.route("/<bfile>/statement/", methods=["GET"])
def statement() -> Response:
"""Download a statement file."""
entry_hash = request.args.get("entry_hash", "")
key = request.args.get("key", "")
document_path = g.ledger.statement_path(entry_hash, key)
return send_file_inline(document_path)
@fava_app.route(
"/<bfile>/holdings"
"/by_<any(account,currency,cost_currency):aggregation_key>/",
)
def holdings_by(
aggregation_key: str,
) -> str:
"""Get the holdings report."""
return render_template(
"holdings.html",
aggregation_key=aggregation_key,
)
@fava_app.route("/<bfile>/<report_name>/")
def report(report_name: str) -> str:
"""Endpoint for most reports."""
if report_name in CLIENT_SIDE_REPORTS:
return render_template("_layout.html", content="")
if report_name in SERVER_SIDE_REPORTS:
return render_template(f"{report_name}.html")
return abort(404)
@fava_app.route(
"/<bfile>/extension/<extension_name>/<endpoint>",
methods=["GET", "POST", "PUT", "DELETE"],
)
def extension_endpoint(extension_name: str, endpoint: str) -> Response:
ext = g.ledger.extensions.get_extension(extension_name)
key = (endpoint, request.method)
if ext is None or key not in ext.endpoints:
return abort(404)
response = ext.endpoints[key](ext)
return (
fava_app.make_response(response)
if response is not None
else abort(404)
)
@fava_app.route("/<bfile>/extension_js_module/<extension_name>.js")
def extension_js_module(extension_name: str) -> Response:
"""Endpoint for extension module source."""
ext = g.ledger.extensions.get_extension(extension_name)
if ext is None or not ext.has_js_module:
return abort(404)
return send_file(ext.extension_dir / f"{ext.name}.js")
@fava_app.route("/<bfile>/extension/<extension_name>/")
def extension_report(extension_name: str) -> str:
"""Endpoint for extension reports."""
ext = g.ledger.extensions.get_extension(extension_name)
if ext is None or ext.report_title is None:
abort(404)
g.extension = ext
template = ext.jinja_env.get_template(f"{ext.name}.html")
content = Markup(template.render(ledger=g.ledger, extension=ext))
return render_template(
"_layout.html",
content=content,
page_title=ext.report_title,
)
@fava_app.route("/<bfile>/download-query/query_result.<result_format>")
def download_query(result_format: str) -> Response:
"""Download a query result."""
name, data = g.ledger.query_shell.query_to_file(
g.filtered.entries,
request.args.get("query_string", ""),
result_format,
)
filename = f"{secure_filename(name.strip())}.{result_format}"
return send_file(data, as_attachment=True, download_name=filename)
@fava_app.route("/<bfile>/download-journal/")
def download_journal() -> Response:
"""Download a Journal file."""
now = datetime.now().replace(microsecond=0)
filename = f"journal_{now.isoformat()}.beancount"
data = BytesIO(bytes(render_template("beancount_file"), "utf8"))
return send_file(data, as_attachment=True, download_name=filename)
@fava_app.route("/<bfile>/help/", defaults={"page_slug": "_index"})
@fava_app.route("/<bfile>/help/<page_slug>")
def help_page(page_slug: str) -> str:
"""Fava's included documentation."""
if page_slug not in HELP_PAGES:
abort(404)
html = markdown2.markdown_path(
(Path(__file__).parent / "help" / (page_slug + ".md")),
extras=["fenced-code-blocks", "tables", "header-ids"],
)
return render_template(
"help.html",
page_slug=page_slug,
help_html=Markup(
render_template_string(
html,
beancount_version=beancount_version,
fava_version=fava_version,
),
),
HELP_PAGES=HELP_PAGES,
)
@fava_app.route("/jump")
def jump() -> WerkzeugResponse:
"""Redirect back to the referer, replacing some parameters.
This is useful for sidebar links, e.g. a link ``/jump?time=year``
would set the time filter to `year` on the current page.
When accessing ``/jump?param1=abc`` from
``/example/page?param1=123¶m2=456``, this view should redirect to
``/example/page?param1=abc¶m2=456``.
"""
url = urlparse(request.referrer)
query_args = parse_qsl(url.query)
for key, values in request.args.lists():
query_args = [
key_value for key_value in query_args if key_value[0] != key
]
if values != [""]:
query_args.extend([(key, v) for v in values])
redirect_url = url._replace(query=urlencode(query_args))
return redirect(urlunparse(redirect_url))
def _setup_babel(fava_app: Flask) -> None:
"""Configure the Babel Flask extension."""
def _get_locale() -> str | None:
"""Get locale."""
lang = g.ledger.fava_options.language
if lang is not None:
return lang
return request.accept_languages.best_match(["en", *LANGUAGES])
try:
# for Flask-Babel <3.0
babel = Babel(fava_app)
babel.localeselector(_get_locale)
except AttributeError:
# for Flask-Babel >=3.0
babel = Babel(fava_app, locale_selector=_get_locale)
def create_app(
files: Iterable[Path | str],
load: bool = False,
incognito: bool = False,
read_only: bool = False,
) -> Flask:
"""Create a Fava Flask application.
Arguments:
files: The list of Beancount files (paths).
load: Whether to load the Beancount files directly.
incognito: Whether to run in incognito mode.
read_only: Whether to run in read-only mode.
"""
fava_app = Flask("fava")
fava_app.register_blueprint(json_api, url_prefix="/<bfile>/api")
fava_app.json = FavaJSONProvider(fava_app)
_setup_template_config(fava_app)
_setup_babel(fava_app)
_setup_filters(fava_app, read_only=read_only, incognito=incognito)
_setup_routes(fava_app)
fava_app.config["HAVE_EXCEL"] = HAVE_EXCEL
fava_app.config["BEANCOUNT_FILES"] = [str(f) for f in files]
fava_app.config["INCOGNITO"] = incognito
if load:
fava_app.config["LEDGERS"] = _ledger_slugs_dict(
FavaLedger(filepath)
for filepath in fava_app.config["BEANCOUNT_FILES"]
)
else:
fava_app.config["LEDGERS"] = None
return fava_app
#: This is still provided for compatibility but will be removed at some point.
app = create_app([])
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,377
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/query_shell.py
|
"""For using the Beancount shell from Fava."""
from __future__ import annotations
import contextlib
import io
import textwrap
from typing import Any
from typing import TYPE_CHECKING
from beancount.parser.options import OPTIONS_DEFAULTS
from beancount.query import query_compile
from beancount.query.query_compile import CompilationError
from beancount.query.query_parser import ParseError
from beancount.query.query_parser import RunCustom
from beancount.query.shell import BQLShell # type: ignore[import]
from beancount.utils import pager # type: ignore[attr-defined]
from fava.beans.funcs import execute_query
from fava.beans.funcs import run_query
from fava.core.module_base import FavaModule
from fava.helpers import FavaAPIError
from fava.util.excel import HAVE_EXCEL
from fava.util.excel import to_csv
from fava.util.excel import to_excel
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from fava.beans.abc import Query
from fava.beans.funcs import QueryResult
from fava.core import FavaLedger
from fava.helpers import BeancountError
# This is to limit the size of the history file. Fava is not using readline at
# all, but Beancount somehow still is...
try:
import readline
readline.set_history_length(1000)
except ImportError:
pass
class QueryShell(BQLShell, FavaModule): # type: ignore[misc]
"""A light wrapper around Beancount's shell."""
def __init__(self, ledger: FavaLedger) -> None:
self.buffer = io.StringIO()
BQLShell.__init__(self, True, None, self.buffer)
FavaModule.__init__(self, ledger)
self.result: QueryResult | None = None
self.stdout = self.buffer
self.entries: list[Directive] = []
self.errors: list[BeancountError] = []
self.options_map = OPTIONS_DEFAULTS
self.queries: list[Query] = []
def load_file(self) -> None:
self.queries = self.ledger.all_entries_by_type.Query
def add_help(self) -> None:
"""Attach help functions for each of the parsed token handlers."""
for attrname, func in BQLShell.__dict__.items():
if attrname[:3] != "on_":
continue
command_name = attrname[3:]
setattr(
self.__class__,
f"help_{command_name.lower()}",
lambda _, fun=func: print(
textwrap.dedent(fun.__doc__).strip(),
file=self.buffer,
),
)
def _loadfun(self) -> None:
self.entries = self.ledger.all_entries
self.errors = self.ledger.errors
self.options_map = self.ledger.options
def get_pager(self) -> Any:
"""No real pager, just a wrapper that doesn't close self.buffer."""
return pager.flush_only(self.buffer)
def noop(self, _: Any) -> None:
"""Doesn't do anything in Fava's query shell."""
print(self.noop.__doc__, file=self.buffer)
on_Reload = noop # noqa: N815
do_exit = noop
do_quit = noop
do_EOF = noop # noqa: N815
def on_Select(self, statement: str) -> None: # noqa: N802
try:
c_query = query_compile.compile( # type: ignore[attr-defined]
statement,
self.env_targets,
self.env_postings,
self.env_entries,
)
except CompilationError as exc:
print(f"ERROR: {str(exc).rstrip('.')}.", file=self.buffer)
return
rtypes, rrows = execute_query(c_query, self.entries, self.options_map)
if not rrows:
print("(empty)", file=self.buffer)
self.result = rtypes, rrows
def execute_query(self, entries: list[Directive], query: str) -> Any:
"""Run a query.
Arguments:
entries: The entries to run the query on.
query: A query string.
Returns:
A tuple (contents, types, rows) where either the first or the last
two entries are None. If the query result is a table, it will be
contained in ``types`` and ``rows``, otherwise the result will be
contained in ``contents`` (as a string).
"""
self._loadfun()
self.entries = entries
with contextlib.redirect_stdout(self.buffer):
self.onecmd(query)
contents = self.buffer.getvalue()
self.buffer.truncate(0)
if self.result is None:
return (contents.strip().strip("\x00"), None, None)
types, rows = self.result
self.result = None
return (None, types, rows)
def on_RunCustom(self, run_stmt: RunCustom) -> Any: # noqa: N802
"""Run a custom query."""
name = run_stmt.query_name
if name is None:
# List the available queries.
for query in self.queries:
print(query.name) # noqa: T201
else:
try:
query = next(
query for query in self.queries if query.name == name
)
except StopIteration:
print(f"ERROR: Query '{name}' not found") # noqa: T201
else:
statement = self.parser.parse(query.query_string)
self.dispatch(statement)
def query_to_file(
self,
entries: list[Directive],
query_string: str,
result_format: str,
) -> Any:
"""Get query result as file.
Arguments:
entries: The entries to run the query on.
query_string: A string, the query to run.
result_format: The file format to save to.
Returns:
A tuple (name, data), where name is either 'query_result' or the
name of a custom query if the query string is 'run name_of_query'.
``data`` contains the file contents.
Raises:
FavaAPIError: If the result format is not supported or the
query failed.
"""
name = "query_result"
try:
statement = self.parser.parse(query_string)
except ParseError as exception:
raise FavaAPIError(str(exception)) from exception
if isinstance(statement, RunCustom):
name = statement.query_name
try:
query = next(
query for query in self.queries if query.name == name
)
except StopIteration as exc:
raise FavaAPIError(f'Query "{name}" not found.') from exc
query_string = query.query_string
try:
types, rows = run_query(
entries,
self.ledger.options,
query_string,
numberify=True,
)
except (CompilationError, ParseError) as exception:
raise FavaAPIError(str(exception)) from exception
if result_format == "csv":
data = to_csv(types, rows)
else:
if not HAVE_EXCEL:
raise FavaAPIError("Result format not supported.")
data = to_excel(types, rows, result_format, query_string)
return name, data
QueryShell.on_Select.__doc__ = BQLShell.on_Select.__doc__
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,378
|
beancount/fava
|
refs/heads/main
|
/src/fava/cli.py
|
"""The command-line interface for Fava."""
from __future__ import annotations
import errno
import os
import click
from cheroot.wsgi import Server
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.middleware.profiler import ProfilerMiddleware
from fava import __version__
from fava.application import create_app
from fava.util import simple_wsgi
@click.command(context_settings={"auto_envvar_prefix": "FAVA"})
@click.argument(
"filenames",
nargs=-1,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
)
@click.option(
"-p",
"--port",
type=int,
default=5000,
show_default=True,
metavar="<port>",
help="The port to listen on.",
)
@click.option(
"-H",
"--host",
type=str,
default="localhost",
show_default=True,
metavar="<host>",
help="The host to listen on.",
)
@click.option("--prefix", type=str, help="Set an URL prefix.")
@click.option(
"--incognito",
is_flag=True,
help="Run in incognito mode and obscure all numbers.",
)
@click.option(
"--read-only",
is_flag=True,
help="Run in read-only mode, disabling any change through UI/API",
)
@click.option("-d", "--debug", is_flag=True, help="Turn on debugging.")
@click.option(
"--profile",
is_flag=True,
help="Turn on profiling. Implies --debug.",
)
@click.option(
"--profile-dir",
type=click.Path(),
help="Output directory for profiling data.",
)
@click.version_option(version=__version__, prog_name="fava")
def main( # noqa: PLR0912
filenames: tuple[str],
port: int,
host: str,
prefix: str,
incognito: bool,
read_only: bool,
debug: bool,
profile: bool,
profile_dir: str,
) -> None: # pragma: no cover
"""Start Fava for FILENAMES on http://<host>:<port>.
If the `BEANCOUNT_FILE` environment variable is set, Fava will use the
files (space-delimited) specified there in addition to FILENAMES.
Note you can also specify command-line options via environment variables.
For example, `--host=0.0.0.0` is equivalent to setting the environment
variable `FAVA_HOST=0.0.0.0`.
"""
if profile:
debug = True
env_filename = os.environ.get("BEANCOUNT_FILE")
all_filenames = (
filenames + tuple(env_filename.split()) if env_filename else filenames
)
if not all_filenames:
raise click.UsageError("No file specified")
app = create_app(
set(all_filenames),
incognito=incognito,
read_only=read_only,
)
if prefix:
app.wsgi_app = DispatcherMiddleware( # type: ignore[method-assign]
simple_wsgi,
{prefix: app.wsgi_app},
)
if host == "localhost":
# ensure that cheroot does not use IP6 for localhost
host = "127.0.0.1"
click.echo(f"Starting Fava on http://{host}:{port}")
if not debug:
server = Server((host, port), app)
try:
server.start()
except KeyboardInterrupt:
click.echo("Keyboard interrupt received: stopping Fava", err=True)
server.stop()
except OSError as error:
if "No socket could be created" in str(error):
click.echo(
f"Cannot start Fava because port {port} is already in use."
"\nPlease choose a different port with the '-p' option.",
)
raise click.Abort from error
else:
if profile:
app.wsgi_app = ProfilerMiddleware( # type: ignore[method-assign]
app.wsgi_app,
restrictions=(30,),
profile_dir=profile_dir if profile_dir else None,
)
app.jinja_env.auto_reload = True
try:
app.run(host, port, debug)
except OSError as error:
if error.errno == errno.EADDRINUSE:
click.echo(
f"Cannot start Fava because port {port} is already in use."
"\nPlease choose a different port with the '-p' option.",
)
raise click.Abort from error
raise
# needed for pyinstaller:
if __name__ == "__main__": # pragma: no cover
# pylint: disable=no-value-for-parameter
main()
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,379
|
beancount/fava
|
refs/heads/main
|
/src/fava/ext/portfolio_list/__init__.py
|
"""Portfolio list extension for Fava.
This is a simple example of Fava's extension reports system.
"""
from __future__ import annotations
import re
from dataclasses import dataclass
from decimal import Decimal
from typing import NamedTuple
from typing import TYPE_CHECKING
from fava.context import g
from fava.ext import FavaExtensionBase
from fava.helpers import FavaAPIError
from fava.template_filters import cost_or_value
if TYPE_CHECKING:
from fava.beans.funcs import ResultType
from fava.core.tree import Tree
from fava.core.tree import TreeNode
class Row(NamedTuple):
"""A row in the portfolio tables."""
account: str
balance: Decimal | None
allocation: Decimal | None
@dataclass
class Portfolio:
"""A portfolio."""
title: str
rows: list[Row]
types: tuple[ResultType, ...] = (
("account", str),
("balance", Decimal),
("allocation", Decimal),
)
class PortfolioList(FavaExtensionBase): # pragma: no cover
"""Sample Extension Report that just prints out an Portfolio List."""
report_title = "Portfolio List"
has_js_module = True
def portfolio_accounts(
self,
filter_str: str | None = None,
) -> list[Portfolio]:
"""Get an account tree based on matching regex patterns."""
tree = g.filtered.root_tree
portfolios = []
if filter_str:
portfolio = self._account_name_pattern(tree, filter_str)
portfolios.append(portfolio)
else:
for option in self.config:
opt_key = option[0]
if opt_key == "account_name_pattern":
portfolio = self._account_name_pattern(tree, option[1])
elif opt_key == "account_open_metadata_pattern":
portfolio = self._account_metadata_pattern(
tree,
option[1][0],
option[1][1],
)
else:
raise FavaAPIError("Portfolio List: Invalid option.")
portfolios.append(portfolio)
return portfolios
def _account_name_pattern(self, tree: Tree, pattern: str) -> Portfolio:
"""Return portfolio info based on matching account name.
Args:
tree: Ledger root tree node.
pattern: Account name regex pattern.
Return:
Data structured for use with a querytable (types, rows).
"""
regexer = re.compile(pattern)
selected_nodes = [
node for acct, node in tree.items() if regexer.match(acct)
]
return Portfolio(
f"Account names matching: '{pattern}'",
self._portfolio_data(selected_nodes),
)
def _account_metadata_pattern(
self,
tree: Tree,
metadata_key: str,
pattern: str,
) -> Portfolio:
"""Return portfolio info based on matching account open metadata.
Args:
tree: Ledger root tree node.
metadata_key: Metadata key to match for in account open.
pattern: Metadata value's regex pattern to match for.
Return:
Data structured for use with a querytable - (types, rows).
"""
regexer = re.compile(pattern)
selected_nodes = [
tree[entry.account]
for entry in self.ledger.all_entries_by_type.Open
if metadata_key in entry.meta
and regexer.match(entry.meta[metadata_key])
]
return Portfolio(
f"Accounts with '{metadata_key}' metadata matching: '{pattern }'",
self._portfolio_data(selected_nodes),
)
def _portfolio_data(self, nodes: list[TreeNode]) -> list[Row]:
"""Turn a portfolio of tree nodes into querytable-style data.
Args:
nodes: Account tree nodes.
Return:
types: Tuples of column names and types as strings.
rows: Dictionaries of row data by column names.
"""
operating_currency = self.ledger.options["operating_currency"][0]
acct_balances: list[tuple[str, Decimal | None]] = []
total = Decimal()
for node in nodes:
balance = cost_or_value(node.balance)
if operating_currency in balance:
balance_dec = balance[operating_currency]
total += balance_dec
acct_balances.append((node.name, balance_dec))
else:
acct_balances.append((node.name, None))
return [
Row(
account,
bal,
(round((bal / total) * 100, 2) if bal else None),
)
for account, bal in acct_balances
]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,380
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/types.py
|
"""Typing helpers."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from typing import TypedDict
from beancount.core.display_context import DisplayContext
from fava.beans.abc import Directive
from fava.helpers import BeancountError
class BeancountOptions(TypedDict):
"""Beancount options."""
title: str
filename: str
name_assets: str
name_liabilities: str
name_equity: str
name_income: str
name_expenses: str
account_current_conversions: str
account_current_earnings: str
render_commas: bool
operating_currency: list[str]
documents: list[str]
include: list[str]
dcontext: DisplayContext
LoaderResult = tuple[
list[Directive],
list[BeancountError],
BeancountOptions,
]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,381
|
beancount/fava
|
refs/heads/main
|
/tests/test_application.py
|
"""Tests for Fava's main Flask app."""
from __future__ import annotations
from typing import TYPE_CHECKING
from urllib.parse import urljoin
import pytest
from beancount import __version__ as beancount_version
from fava import __version__ as fava_version
from fava.application import create_app
from fava.application import SERVER_SIDE_REPORTS
from fava.application import static_url
from fava.context import g
if TYPE_CHECKING: # pragma: no cover
from pathlib import Path
from flask import Flask
from flask.testing import FlaskClient
from .conftest import SnapshotFunc
FILTER_COMBINATIONS = [
{"account": "Assets"},
{"filter": "any(account: Assets)"},
{"time": "2015", "filter": "#tag1 payee:BayBook"},
]
@pytest.mark.parametrize(
("report", "filters"),
[
(report, filters)
for report in SERVER_SIDE_REPORTS
for filters in FILTER_COMBINATIONS
],
)
def test_reports(
test_client: FlaskClient,
report: str,
filters: dict[str, str],
) -> None:
"""The standard reports work without error (content isn't checked here)."""
result = test_client.get(f"/long-example/{report}/", query_string=filters)
assert result.status_code == 200
def test_client_side_reports(
test_client: FlaskClient,
snapshot: SnapshotFunc,
) -> None:
"""The client-side rendered reports are generated."""
result = test_client.get("/long-example/documents/")
assert result.status_code == 200
snapshot(result.get_data(True))
@pytest.mark.parametrize(
("url", "return_code"),
[
("/", 302),
("/asdfasdf/", 404),
("/asdfasdf/asdfasdf/", 404),
("/example/not-a-report/", 404),
("/example/holdings/not-a-holdings-aggregation-key/", 404),
("/example/holdings/by_not-a-holdings-aggregation-key/", 404),
("/example/account/Assets:US:BofA:Checking/not_a_subreport/", 404),
],
)
def test_urls(test_client: FlaskClient, url: str, return_code: int) -> None:
"""Some URLs return a 404."""
result = test_client.get(url)
assert result.status_code == return_code
@pytest.mark.parametrize(
("url", "option", "expect"),
[
("/", None, "/long-example/income_statement/"),
("/long-example/", None, "/long-example/income_statement/"),
("/", "income_statement/", "/long-example/income_statement/"),
(
"/long-example/",
"income_statement/",
"/long-example/income_statement/",
),
(
"/",
"balance_sheet/?account=Assets:US:BofA:Checking",
"/long-example/balance_sheet/?account=Assets:US:BofA:Checking",
),
(
"/long-example/",
"income_statement/?account=Assets:US:BofA:Checking",
"/long-example/income_statement/?account=Assets:US:BofA:Checking",
),
(
"/",
"balance_sheet/?time=year-2+-+year",
"/long-example/balance_sheet/?time=year-2+-+year",
),
(
"/",
"balance_sheet/?time=year-2 - year",
"/long-example/balance_sheet/?time=year-2%20-%20year",
),
(
"/",
"trial_balance/?time=2014&account=Expenses:Rent",
"/long-example/trial_balance/?time=2014&account=Expenses:Rent",
),
],
)
def test_default_path_redirection(
app: Flask,
test_client: FlaskClient,
url: str,
option: str | None,
expect: str,
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test that default-page option redirects as expected."""
with app.test_request_context("/long-example/"):
app.preprocess_request()
if option:
monkeypatch.setattr(g.ledger.fava_options, "default_page", option)
result = test_client.get(url)
get_url = result.headers.get("Location", "")
# pre Werkzeug 2.1:
expect_url = urljoin("http://localhost/", expect)
assert result.status_code == 302
assert get_url in (expect, expect_url)
@pytest.mark.parametrize(
("referer", "jump_link", "expect"),
[
("/?foo=bar", "/jump?foo=baz", "/?foo=baz"),
("/?foo=bar", "/jump?baz=qux", "/?foo=bar&baz=qux"),
("/", "/jump?foo=bar&baz=qux", "/?foo=bar&baz=qux"),
("/", "/jump?baz=qux", "/?baz=qux"),
("/?foo=bar", "/jump?foo=", "/"),
("/?foo=bar", "/jump?foo=&foo=", "/?foo=&foo="),
("/", "/jump?foo=", "/"),
],
)
def test_jump_handler(
app: Flask,
test_client: FlaskClient,
referer: str,
jump_link: str,
expect: str,
) -> None:
"""Test /jump handler correctly redirect to the right location.
Note: according to RFC 2616, Location: header should use an absolute URL.
"""
result = test_client.get(jump_link, headers=[("Referer", referer)])
with app.test_request_context():
get_url = result.headers.get("Location", "")
# pre Werkzeug 2.1:
expect_url = urljoin("http://localhost/", expect)
assert result.status_code == 302
assert get_url in (expect, expect_url)
def test_help_ages(test_client: FlaskClient) -> None:
"""Help pages."""
result = test_client.get("/long-example/help/")
assert result.status_code == 200
assert f"Fava <code>{fava_version}</code>" in result.get_data(True)
assert f"<code>{beancount_version}</code>" in result.get_data(True)
result = test_client.get("/long-example/help/filters")
assert result.status_code == 200
result = test_client.get("/long-example/help/asdfasdf")
assert result.status_code == 404
def test_query_download(test_client: FlaskClient) -> None:
"""Download query as csv."""
result = test_client.get(
"/long-example/download-query/query_result.csv?query_string=balances",
)
assert result.status_code == 200
def test_incognito(test_data_dir: Path) -> None:
"""Numbers get obfuscated in incognito mode."""
app = create_app([test_data_dir / "example.beancount"], incognito=True)
test_client = app.test_client()
result = test_client.get("/example/balance_sheet/")
assert result.status_code == 200
assert "XXX" in result.get_data(True)
result = test_client.get("/example/api/commodities")
assert result.status_code == 200
assert "XXX" not in result.get_data(True)
def test_read_only_mode(test_data_dir: Path) -> None:
"""Non GET requests returns 401 in read-only mode"""
app = create_app([test_data_dir / "example.beancount"], read_only=True)
test_client = app.test_client()
assert test_client.get("/").status_code == 302
for method in [
test_client.delete,
test_client.patch,
test_client.post,
test_client.put,
]:
result = method("/any/path/")
assert result.status_code == 401
def test_download_journal(
test_client: FlaskClient,
snapshot: SnapshotFunc,
) -> None:
"""The currently filtered journal can be downloaded."""
result = test_client.get(
"/long-example/download-journal/",
query_string={"time": "2016-05-07"},
)
snapshot(result.get_data(True))
assert result.headers["Content-Disposition"].startswith(
'attachment; filename="journal_',
)
assert result.headers["Content-Type"] == "application/octet-stream"
def test_static_url(app: Flask) -> None:
"""Static URLs have the mtime appended."""
with app.test_request_context():
url = static_url("app.js")
assert url.startswith("/static/app.js?mtime=")
url = static_url("nonexistent.js")
assert url == "/static/nonexistent.js?mtime=0"
def test_load_extension_reports(test_client: FlaskClient) -> None:
"""Extension can register reports."""
url = "/extension-report/extension/PortfolioList/"
result = test_client.get(url)
assert result.status_code == 200
url = "/extension-report/extension_js_module/PortfolioList.js"
result = test_client.get(url)
assert result.status_code == 200
url = "/extension-report/extension_js_module/Missing.js"
result = test_client.get(url)
assert result.status_code == 404
url = "/extension-report/extension/MissingExtension/"
result = test_client.get(url)
assert result.status_code == 404
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,382
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_fava_options.py
|
from __future__ import annotations
import datetime
import re
from typing import TYPE_CHECKING
from fava.core.charts import dumps
from fava.core.fava_options import InsertEntryOption
from fava.core.fava_options import parse_options
from fava.util.date import FiscalYearEnd
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Custom
def test_fava_options(load_doc_custom_entries: list[Custom]) -> None:
"""
2016-06-14 custom "fava-option" "default-file"
2016-04-14 custom "fava-option" "show-closed-accounts" "true"
2016-04-14 custom "fava-option" "currency-column" "10"
2016-04-14 custom "fava-option" "indent" "4"
2016-04-14 custom "fava-option" "insert-entry" "Ausgaben:Test"
2016-04-14 custom "fava-option" "invalid"
2016-04-14 custom "fava-option" "locale" "en"
2016-04-14 custom "fava-option" "locale" "invalid"
2016-04-14 custom "fava-option" "collapse-pattern" "Account:Name"
2016-04-14 custom "fava-option" "collapse_pattern" "(invalid"
2016-04-14 custom "fava-option" "fiscal-year-end" "01-11"
2016-04-14 custom "fava-option" "conversion-currencies" "USD EUR HOOLI"
"""
options, errors = parse_options(load_doc_custom_entries)
# The options can be encoded to JSON.
dumps(options)
assert len(errors) == 3
assert options.indent == 4
assert options.insert_entry == [
InsertEntryOption(
datetime.date(2016, 4, 14),
re.compile("Ausgaben:Test"),
"<string>",
6,
),
]
assert options.show_closed_accounts
assert options.currency_column == 10
assert options.collapse_pattern == [re.compile("Account:Name")]
assert options.fiscal_year_end == FiscalYearEnd(1, 11)
assert options.conversion_currencies == ("USD", "EUR", "HOOLI")
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,383
|
beancount/fava
|
refs/heads/main
|
/src/fava/json_api.py
|
"""JSON API.
This module contains the url endpoints of the JSON API that is used by the web
interface for asynchronous functionality.
"""
from __future__ import annotations
import shutil
from dataclasses import dataclass
from functools import wraps
from inspect import Parameter
from inspect import signature
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Mapping
from typing import TYPE_CHECKING
from flask import Blueprint
from flask import get_template_attribute
from flask import jsonify
from flask import request
from flask_babel import gettext # type: ignore[import]
from fava.beans.abc import Document
from fava.beans.abc import Event
from fava.context import g
from fava.core.documents import filepath_in_document_folder
from fava.core.documents import is_document_or_import_file
from fava.core.ingest import filepath_in_primary_imports_folder
from fava.core.misc import align
from fava.helpers import FavaAPIError
from fava.internal_api import ChartApi
from fava.internal_api import get_errors
from fava.internal_api import get_ledger_data
from fava.serialisation import deserialise
from fava.serialisation import serialise
if TYPE_CHECKING: # pragma: no cover
from datetime import date
from decimal import Decimal
from flask.wrappers import Response
from fava.core.ingest import FileImporters
from fava.core.tree import SerialisedTreeNode
from fava.internal_api import ChartData
from fava.util.date import DateRange
json_api = Blueprint("json_api", __name__)
class ValidationError(Exception):
"""Validation of data failed."""
def json_err(msg: str) -> Response:
"""Jsonify the error message."""
return jsonify({"success": False, "error": msg})
def json_success(data: Any) -> Response:
"""Jsonify the response."""
return jsonify(
{"success": True, "data": data, "mtime": str(g.ledger.mtime)},
)
@json_api.errorhandler(FavaAPIError)
def _json_api_exception(error: FavaAPIError) -> Response:
return json_err(error.message)
@json_api.errorhandler(OSError)
def _json_api_oserror(error: OSError) -> Response:
return json_err(error.strerror)
@json_api.errorhandler(ValidationError)
def _json_api_validation_error(error: ValidationError) -> Response:
return json_err(f"Invalid API request: {error!s}")
def validate_func_arguments(
func: Callable[..., Any],
) -> Callable[[Mapping[str, str]], list[str]] | None:
"""Validate arguments for a function.
This currently only works for strings and lists (but only does a shallow
validation for lists).
Args:
func: The function to check parameters for.
Returns:
A function, which takes a Mapping and tries to construct a list of
positional parameters for the given function or None if the function
has no parameters.
"""
sig = signature(func)
params: list[tuple[str, Any]] = []
for param in sig.parameters.values():
if param.annotation not in {"str", "list[Any]"}: # pragma: no cover
raise ValueError(
f"Type of param {param.name} needs to str or list",
)
if param.kind != Parameter.POSITIONAL_OR_KEYWORD: # pragma: no cover
raise ValueError(f"Param {param.name} should be positional")
params.append((param.name, str if param.annotation == "str" else list))
if not params:
return None
def validator(mapping: Mapping[str, str]) -> list[str]:
args: list[str] = []
for param, type_ in params:
val = mapping.get(param, None)
if val is None:
raise ValidationError(f"Parameter `{param}` is missing.")
if not isinstance(val, type_):
raise ValidationError(
f"Parameter `{param}` of incorrect type.",
)
args.append(val)
return args
return validator
def api_endpoint(func: Callable[..., Any]) -> Callable[[], Response]:
"""Register an API endpoint.
The part of the function name up to the first underscore determines
the accepted HTTP method. For GET and DELETE endpoints, the function
parameters are extracted from the URL query string and passed to the
decorated endpoint handler.
"""
method, _, name = func.__name__.partition("_")
if method not in {"get", "delete", "put"}: # pragma: no cover
raise ValueError(f"Invalid endpoint function name: {func.__name__}")
validator = validate_func_arguments(func)
@json_api.route(f"/{name}", methods=[method])
@wraps(func)
def _wrapper() -> Response:
if validator is not None:
if method == "put":
request_json = request.get_json(silent=True)
if request_json is None:
raise FavaAPIError("Invalid JSON request.")
data = request_json
else:
data = request.args
res = func(*validator(data))
else:
res = func()
return json_success(res)
return _wrapper
class TargetPathAlreadyExistsError(FavaAPIError):
"""The given path already exists."""
def __init__(self, path: Path) -> None:
super().__init__(f"{path} already exists.")
class DocumentDirectoryMissingError(FavaAPIError):
"""No document directory was specified."""
def __init__(self) -> None:
super().__init__("You need to set a documents folder.")
@api_endpoint
def get_changed() -> bool:
"""Check for file changes."""
return g.ledger.changed()
api_endpoint(get_errors)
api_endpoint(get_ledger_data)
@api_endpoint
def get_payee_accounts(payee: str) -> list[str]:
"""Rank accounts for the given payee."""
return g.ledger.attributes.payee_accounts(payee)
@dataclass(frozen=True)
class QueryResult:
"""Table and optional chart returned by the query_result endpoint."""
table: Any
chart: Any | None = None
@api_endpoint
def get_query_result(query_string: str) -> Any:
"""Render a query result to HTML."""
table = get_template_attribute("_query_table.html", "querytable")
contents, types, rows = g.ledger.query_shell.execute_query(
g.filtered.entries,
query_string,
)
if contents and "ERROR" in contents:
raise FavaAPIError(contents)
table = table(g.ledger, contents, types, rows)
if types and g.ledger.charts.can_plot_query(types):
return QueryResult(table, g.ledger.charts.query(types, rows))
return QueryResult(table)
@api_endpoint
def get_extract(filename: str, importer: str) -> list[Any]:
"""Extract entries using the ingest framework."""
entries = g.ledger.ingest.extract(filename, importer)
return list(map(serialise, entries))
@dataclass(frozen=True)
class Context:
"""Context for an entry."""
entry: Any
balances_before: dict[str, list[str]] | None
balances_after: dict[str, list[str]] | None
sha256sum: str
slice: str
@api_endpoint
def get_context(entry_hash: str) -> Context:
"""Entry context."""
entry, before, after, slice_, sha256sum = g.ledger.context(entry_hash)
return Context(serialise(entry), before, after, sha256sum, slice_)
@api_endpoint
def get_move(account: str, new_name: str, filename: str) -> str:
"""Move a file."""
if not g.ledger.options["documents"]:
raise DocumentDirectoryMissingError
new_path = filepath_in_document_folder(
g.ledger.options["documents"][0],
account,
new_name,
g.ledger,
)
file_path = Path(filename)
if not file_path.is_file():
raise FavaAPIError(f"Not a file: '{filename}'")
if new_path.exists():
raise TargetPathAlreadyExistsError(new_path)
if not new_path.parent.exists():
new_path.parent.mkdir(parents=True)
shutil.move(filename, new_path)
return f"Moved {filename} to {new_path}."
@api_endpoint
def get_payee_transaction(payee: str) -> Any:
"""Last transaction for the given payee."""
entry = g.ledger.attributes.payee_transaction(payee)
return serialise(entry) if entry else None
@api_endpoint
def get_source(filename: str) -> dict[str, str]:
"""Load one of the source files."""
file_path = (
filename
or g.ledger.fava_options.default_file
or g.ledger.beancount_file_path
)
source, sha256sum = g.ledger.file.get_source(Path(file_path))
return {"source": source, "sha256sum": sha256sum, "file_path": file_path}
@api_endpoint
def put_source(file_path: str, source: str, sha256sum: str) -> str:
"""Write one of the source files and return the updated sha256sum."""
return g.ledger.file.set_source(Path(file_path), source, sha256sum)
@api_endpoint
def put_source_slice(entry_hash: str, source: str, sha256sum: str) -> str:
"""Write an entry source slice and return the updated sha256sum."""
return g.ledger.file.save_entry_slice(entry_hash, source, sha256sum)
@api_endpoint
def delete_source_slice(entry_hash: str, sha256sum: str) -> str:
"""Delete an entry source slice."""
g.ledger.file.delete_entry_slice(entry_hash, sha256sum)
return f"Deleted entry {entry_hash}."
@api_endpoint
def put_format_source(source: str) -> str:
"""Format beancount file."""
return align(source, g.ledger.fava_options.currency_column)
@api_endpoint
def delete_document(filename: str) -> str:
"""Delete a document."""
if not is_document_or_import_file(filename, g.ledger):
raise FavaAPIError("No valid document or import file.")
file_path = Path(filename)
if not file_path.exists():
raise FavaAPIError(f"{filename} does not exist.")
file_path.unlink()
return f"Deleted {filename}."
@api_endpoint
def put_add_document() -> str:
"""Upload a document."""
if not g.ledger.options["documents"]:
raise DocumentDirectoryMissingError
upload = request.files.get("file", None)
if not upload:
raise FavaAPIError("No file uploaded.")
if not upload.filename:
raise FavaAPIError("Uploaded file is missing filename.")
filepath = filepath_in_document_folder(
request.form["folder"],
request.form["account"],
upload.filename,
g.ledger,
)
if filepath.exists():
raise TargetPathAlreadyExistsError(filepath)
if not filepath.parent.exists():
filepath.parent.mkdir(parents=True)
upload.save(filepath)
if request.form.get("hash"):
g.ledger.file.insert_metadata(
request.form["hash"],
"document",
filepath.name,
)
return f"Uploaded to {filepath}"
@api_endpoint
def put_attach_document(filename: str, entry_hash: str) -> str:
"""Attach a document to an entry."""
g.ledger.file.insert_metadata(entry_hash, "document", filename)
return f"Attached '{filename}' to entry."
@api_endpoint
def put_add_entries(entries: list[Any]) -> str:
"""Add multiple entries."""
try:
entries = [deserialise(entry) for entry in entries]
except KeyError as error:
raise FavaAPIError(f"KeyError: {error}") from error
g.ledger.file.insert_entries(entries)
return f"Stored {len(entries)} entries."
@api_endpoint
def put_upload_import_file() -> str:
"""Upload a file for importing."""
upload = request.files.get("file", None)
if not upload:
raise FavaAPIError("No file uploaded.")
if not upload.filename:
raise FavaAPIError("Uploaded file is missing filename.")
filepath = filepath_in_primary_imports_folder(upload.filename, g.ledger)
if filepath.exists():
raise TargetPathAlreadyExistsError(filepath)
if not filepath.parent.exists():
filepath.parent.mkdir(parents=True)
upload.save(filepath)
return f"Uploaded to {filepath}"
########################################################################
# Reports
@api_endpoint
def get_events() -> list[Event]:
"""Get all (filtered) events."""
g.ledger.changed()
return [serialise(e) for e in g.filtered.entries if isinstance(e, Event)]
@api_endpoint
def get_imports() -> list[FileImporters]:
"""Get a list of the importable files."""
g.ledger.changed()
return g.ledger.ingest.import_data()
@api_endpoint
def get_documents() -> list[Document]:
"""Get all (filtered) documents."""
g.ledger.changed()
return [
serialise(e) for e in g.filtered.entries if isinstance(e, Document)
]
@dataclass(frozen=True)
class CommodityPairWithPrices:
"""A pair of commodities and prices for them."""
base: str
quote: str
prices: list[tuple[date, Decimal]]
@api_endpoint
def get_commodities() -> list[CommodityPairWithPrices]:
"""Get the prices for all commodity pairs."""
g.ledger.changed()
ret = []
for base, quote in g.ledger.commodity_pairs():
prices = g.filtered.prices(base, quote)
if prices:
ret.append(CommodityPairWithPrices(base, quote, prices))
return ret
@dataclass(frozen=True)
class TreeReport:
"""Data for the tree reports."""
date_range: DateRange | None
charts: list[ChartData]
trees: list[SerialisedTreeNode]
@api_endpoint
def get_income_statement() -> TreeReport:
"""Get the data for the income statement."""
g.ledger.changed()
options = g.ledger.options
invert = g.ledger.fava_options.invert_income_liabilities_equity
charts = [
ChartApi.interval_totals(
g.interval,
(options["name_income"], options["name_expenses"]),
label=gettext("Net Profit"),
invert=invert,
),
ChartApi.interval_totals(
g.interval,
options["name_income"],
label=f"{gettext('Income')} ({g.interval.label})",
invert=invert,
),
ChartApi.interval_totals(
g.interval,
options["name_expenses"],
label=f"{gettext('Expenses')} ({g.interval.label})",
),
ChartApi.hierarchy(options["name_income"]),
ChartApi.hierarchy(options["name_expenses"]),
]
root_tree = g.filtered.root_tree
trees = [
root_tree.get(options["name_income"]),
root_tree.get(options["name_expenses"]),
root_tree.net_profit(options, gettext("Net Profit")),
]
return TreeReport(
g.filtered.date_range,
charts,
trees=[tree.serialise_with_context() for tree in trees],
)
@api_endpoint
def get_balance_sheet() -> TreeReport:
"""Get the data for the balance sheet."""
g.ledger.changed()
options = g.ledger.options
charts = [
ChartApi.net_worth(),
ChartApi.hierarchy(options["name_assets"]),
ChartApi.hierarchy(options["name_liabilities"]),
ChartApi.hierarchy(options["name_equity"]),
]
root_tree_closed = g.filtered.root_tree_closed
trees = [
root_tree_closed.get(options["name_assets"]),
root_tree_closed.get(options["name_liabilities"]),
root_tree_closed.get(options["name_equity"]),
]
return TreeReport(
g.filtered.date_range,
charts,
trees=[tree.serialise_with_context() for tree in trees],
)
@api_endpoint
def get_trial_balance() -> TreeReport:
"""Get the data for the trial balance."""
g.ledger.changed()
options = g.ledger.options
charts = [
ChartApi.hierarchy(options["name_income"]),
ChartApi.hierarchy(options["name_expenses"]),
ChartApi.hierarchy(options["name_assets"]),
ChartApi.hierarchy(options["name_liabilities"]),
ChartApi.hierarchy(options["name_equity"]),
]
trees = [g.filtered.root_tree.get("")]
return TreeReport(
g.filtered.date_range,
charts,
trees=[tree.serialise_with_context() for tree in trees],
)
@dataclass(frozen=True)
class AccountBudget:
"""Budgets for an account."""
budget: dict[str, Decimal]
budget_children: dict[str, Decimal]
@dataclass(frozen=True)
class AccountReportJournal:
"""Data for the journal account report."""
charts: list[ChartData]
journal: str
@dataclass(frozen=True)
class AccountReportTree:
"""Data for the tree account reports."""
charts: list[ChartData]
interval_balances: list[SerialisedTreeNode]
budgets: dict[str, list[AccountBudget]]
dates: list[DateRange]
@api_endpoint
def get_account_report() -> AccountReportJournal | AccountReportTree:
"""Get the data for the account report."""
g.ledger.changed()
account_name = request.args.get("a", "")
subreport = request.args.get("r")
charts = [
ChartApi.account_balance(account_name),
ChartApi.interval_totals(
g.interval,
account_name,
label=gettext("Changes"),
),
]
if subreport in {"changes", "balances"}:
accumulate = subreport == "balances"
interval_balances, dates = g.ledger.interval_balances(
g.filtered,
g.interval,
account_name,
accumulate,
)
charts.append(ChartApi.hierarchy(account_name))
charts.extend(
ChartApi.hierarchy(
account_name,
date_range.begin,
date_range.end,
label=g.interval.format_date(date_range.begin),
)
for date_range in dates[:3]
)
all_accounts = (
interval_balances[0].accounts if interval_balances else []
)
budget_accounts = [
a for a in all_accounts if a.startswith(account_name)
]
budgets_mod = g.ledger.budgets
first_date_range = dates[-1]
budgets = {
account: [
AccountBudget(
budgets_mod.calculate(
account,
(first_date_range if accumulate else date_range).begin,
date_range.end,
),
budgets_mod.calculate_children(
account,
(first_date_range if accumulate else date_range).begin,
date_range.end,
),
)
for date_range in dates
]
for account in budget_accounts
}
return AccountReportTree(
charts,
interval_balances=[
tree.get(account_name).serialise(
g.conversion,
g.ledger.prices,
date_range.end_inclusive,
with_cost=False,
)
for tree, date_range in zip(interval_balances, dates)
],
dates=dates,
budgets=budgets,
)
journal = get_template_attribute("_journal_table.html", "journal_table")
entries = g.ledger.account_journal(
g.filtered,
account_name,
with_journal_children=g.ledger.fava_options.account_journal_include_children,
)
return AccountReportJournal(
charts,
journal=journal(entries, show_change_and_balance=True),
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,384
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/flags.py
|
"""Beancount entry flags."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from typing import TypeAlias
Flag: TypeAlias = str
FLAG_CONVERSIONS = "C"
FLAG_MERGING = "M"
FLAG_OKAY = "*"
FLAG_PADDING = "P"
FLAG_RETURNS = "R"
FLAG_SUMMARIZE = "S"
FLAG_TRANSFER = "T"
FLAG_UNREALIZED = "U"
FLAG_WARNING = "!"
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,385
|
beancount/fava
|
refs/heads/main
|
/tests/test_serialisation.py
|
from __future__ import annotations
import datetime
from decimal import Decimal
from typing import TYPE_CHECKING
import pytest
from beancount.core.number import MISSING
from beancount.core.position import CostSpec
from fava.beans import create
from fava.beans.helpers import replace
from fava.beans.str import to_string
from fava.core.charts import dumps
from fava.core.charts import loads
from fava.helpers import FavaAPIError
from fava.serialisation import deserialise
from fava.serialisation import deserialise_posting
from fava.serialisation import serialise
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from .conftest import SnapshotFunc
def test_serialise_txn() -> None:
txn = create.transaction(
{},
datetime.date(2017, 12, 12),
"*",
"Test3",
"asdfasd",
frozenset(["tag"]),
frozenset(["link"]),
[
create.posting("Assets:ETrade:Cash", "100 USD"),
create.posting("Assets:ETrade:GLD", "0 USD"),
],
)
json_txn = {
"date": "2017-12-12",
"flag": "*",
"meta": {},
"narration": "asdfasd",
"tags": ["tag"],
"links": ["link"],
"payee": "Test3",
"t": "Transaction",
"postings": [
{"account": "Assets:ETrade:Cash", "amount": "100 USD"},
{"account": "Assets:ETrade:GLD", "amount": "0 USD"},
],
}
serialised = loads(dumps(serialise(txn)))
assert serialised == json_txn
json_txn["payee"] = ""
serialised = loads(dumps(serialise(replace(txn, payee=""))))
assert serialised == json_txn
serialised = loads(dumps(serialise(replace(txn, payee=None))))
assert serialised == json_txn
def test_serialise_entry_types(
snapshot: SnapshotFunc,
load_doc_entries: list[Directive],
) -> None:
"""
2017-12-11 open Assets:Cash USD "STRICT"
2017-12-13 balance Assets:Cash 1 USD
2017-12-14 balance Assets:Cash 1 ~ 1.0 USD
2017-12-16 document Assets:Cash "/absolute/filename" #tag ^link
2017-12-12 event "event name" "event description"
bool-value: TRUE
string-value: "value"
account-value: Assets:Cash
amount-value: 10 USD
currency-value: USD
number-value: 10 + 10
date-value: 2022-12-12
2017-12-20 note Assets:Cash "This is some comment or note"
2017-12-21 pad Assets:Cash Assets:OtherCash
2017-12-22 close Assets:Cash
2018-12-15 commodity USD
2018-12-16 price USD 1 EUR
2019-12-12 query "query name" "journal"
"""
snapshot(dumps([serialise(entry) for entry in load_doc_entries]))
@pytest.mark.parametrize(
("amount_cost_price", "amount_string"),
[
(("100 USD", None, None), "100 USD"),
(
(
"100 USD",
CostSpec(Decimal("10"), None, "EUR", None, None, False),
None,
),
"100 USD {10 EUR}",
),
(
(
"100 USD",
CostSpec(Decimal("10"), None, "EUR", None, None, False),
"11 EUR",
),
"100 USD {10 EUR} @ 11 EUR",
),
(("100 USD", None, "11 EUR"), "100 USD @ 11 EUR"),
(
(
"100 USD",
CostSpec(
MISSING, # type: ignore[arg-type]
None,
MISSING, # type: ignore[arg-type]
None,
None,
False,
),
None,
),
"100 USD {}",
),
],
)
def test_serialise_posting(
amount_cost_price: tuple[str, CostSpec | None, str],
amount_string: str,
) -> None:
amount, cost, price = amount_cost_price
pos = create.posting("Assets", amount, cost, price) # type: ignore[arg-type]
json = {"account": "Assets", "amount": amount_string}
assert loads(dumps(serialise(pos))) == json
assert deserialise_posting(json) == pos
@pytest.mark.parametrize(
("amount_cost_price", "amount_string"),
[
(("100 USD", None, None), "10*10 USD"),
(("130 USD", None, None), "100+50 - 20 USD"),
(("-140 USD", None, None), "-1400 / 10 USD"),
(("10 USD", None, "1 EUR"), "10 USD @@ 10 EUR"),
(
("7 USD", None, "1.428571428571428571428571429 EUR"),
"7 USD @@ 10 EUR",
),
(("0 USD", None, "0 EUR"), "0 USD @@ 0 EUR"),
],
)
def test_deserialise_posting(
amount_cost_price: tuple[str, CostSpec | None, str | None],
amount_string: str,
) -> None:
"""Roundtrip is not possible here due to total price or calculation."""
amount, cost, price = amount_cost_price
pos = create.posting("Assets", amount, cost, price) # type: ignore[arg-type]
json = {"account": "Assets", "amount": amount_string}
assert deserialise_posting(json) == pos
def test_deserialise_posting_and_format(snapshot: SnapshotFunc) -> None:
txn = create.transaction(
{},
datetime.date(2017, 12, 12),
"*",
"Test3",
"asdfasd",
frozenset(["tag"]),
frozenset(["link"]),
[
deserialise_posting({"account": "Assets", "amount": "10"}),
deserialise_posting({"account": "Assets", "amount": "10 EUR @"}),
],
)
snapshot(to_string(txn))
def test_serialise_balance() -> None:
bal = create.balance(
{},
datetime.date(2019, 9, 17),
"Assets:ETrade:Cash",
create.amount("0.1234567891011121314151617 CHF"),
None,
None,
)
json = {
"date": "2019-09-17",
"amount": {"currency": "CHF", "number": "0.1234567891011121314151617"},
"diff_amount": None,
"meta": {},
"tolerance": None,
"account": "Assets:ETrade:Cash",
"t": "Balance",
}
serialised = loads(dumps(serialise(bal)))
assert serialised == json
def test_deserialise() -> None:
postings = [
{"account": "Assets:ETrade:Cash", "amount": "100 USD"},
{"account": "Assets:ETrade:GLD"},
]
json_txn = {
"t": "Transaction",
"date": "2017-12-12",
"flag": "*",
"payee": "Test3",
"narration": "asdfasd",
"tags": ["tag"],
"links": ["link"],
"meta": {},
"postings": postings,
}
txn = create.transaction(
{},
datetime.date(2017, 12, 12),
"*",
"Test3",
"asdfasd",
frozenset(["tag"]),
frozenset(["link"]),
[
create.posting("Assets:ETrade:Cash", "100 USD"),
replace(
create.posting("Assets:ETrade:GLD", "100 USD"),
units=MISSING,
),
],
)
assert deserialise(json_txn) == txn
with pytest.raises(FavaAPIError):
deserialise({})
with pytest.raises(FavaAPIError):
deserialise({"t": "NoEntry"})
def test_deserialise_balance() -> None:
json_bal = {
"t": "Balance",
"date": "2017-12-12",
"account": "Assets:ETrade:Cash",
"amount": {"number": "100", "currency": "USD"},
"meta": {},
}
bal = create.balance(
{},
datetime.date(2017, 12, 12),
"Assets:ETrade:Cash",
"100 USD",
)
assert deserialise(json_bal) == bal
def test_deserialise_note() -> None:
json_note = {
"t": "Note",
"date": "2017-12-12",
"account": "Assets:ETrade:Cash",
"comment": 'This is some comment or note""',
"meta": {},
}
note = create.note(
{},
datetime.date(2017, 12, 12),
"Assets:ETrade:Cash",
"This is some comment or note",
)
assert deserialise(json_note) == note
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,386
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/documents.py
|
"""Document path related helpers."""
from __future__ import annotations
from os import altsep
from os import sep
from pathlib import Path
from typing import TYPE_CHECKING
from fava.helpers import FavaAPIError
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
def is_document_or_import_file(filename: str, ledger: FavaLedger) -> bool:
"""Check whether the filename is a document or in an import directory.
Args:
filename: The filename to check.
ledger: The FavaLedger.
Returns:
Whether this is one of the documents or a path in an import dir.
"""
if any(
filename == d.filename for d in ledger.all_entries_by_type.Document
):
return True
file_path = Path(filename).resolve()
return any(
str(file_path).startswith(str(ledger.join_path(d)))
for d in ledger.fava_options.import_dirs
)
def filepath_in_document_folder(
documents_folder: str,
account: str,
filename: str,
ledger: FavaLedger,
) -> Path:
"""File path for a document in the folder for an account.
Args:
documents_folder: The documents folder.
account: The account to choose the subfolder for.
filename: The filename of the document.
ledger: The FavaLedger.
Returns:
The path that the document should be saved at.
"""
if documents_folder not in ledger.options["documents"]:
raise FavaAPIError(f"Not a documents folder: {documents_folder}.")
if account not in ledger.attributes.accounts:
raise FavaAPIError(f"Not a valid account: '{account}'")
for separator in sep, altsep:
if separator:
filename = filename.replace(separator, " ")
return ledger.join_path(
documents_folder,
*account.split(":"),
filename,
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,387
|
beancount/fava
|
refs/heads/main
|
/tests/test_template_filters.py
|
from __future__ import annotations
from decimal import Decimal
from typing import TYPE_CHECKING
from fava.template_filters import basename
from fava.template_filters import format_currency
if TYPE_CHECKING: # pragma: no cover
from flask import Flask
def test_format_currency(app: Flask) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
assert format_currency(Decimal("2.12")) == "2.12"
assert format_currency(Decimal("2.13"), invert=True) == "-2.13"
def test_basename() -> None:
"""Get the basename of a file path."""
assert basename(__file__) == "test_template_filters.py"
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,388
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/__init__.py
|
"""Types, functions and wrappers to deal with Beancount types."""
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,389
|
beancount/fava
|
refs/heads/main
|
/tests/test_util_ranking.py
|
from __future__ import annotations
from datetime import date
from fava.util.ranking import ExponentialDecayRanker
def test_ranker() -> None:
list_ = ["1", "2", "3"]
ranker = ExponentialDecayRanker(list_)
ranker.update("1", date(2015, 1, 1))
ranker.update("2", date(2014, 1, 1))
ranker.update("3", date(2016, 1, 1))
assert ranker.sort() == ["3", "1", "2"]
list_ = ["1", "2"]
ranker = ExponentialDecayRanker(list_)
ranker.update("2", date(2016, 1, 1))
ranker.update("2", date(2016, 1, 1))
ranker.update("1", date(2016, 1, 1))
ranker.update("1", date(2016, 1, 2))
assert ranker.sort() == ["1", "2"]
list_ = ["1", "2"]
ranker = ExponentialDecayRanker(list_)
ranker.update("2", date(2015, 1, 1))
ranker.update("2", date(2015, 1, 1))
ranker.update("1", date(2016, 1, 1))
assert ranker.sort() == ["1", "2"]
list_ = ["1", "2"]
ranker = ExponentialDecayRanker(list_)
ranker.update("2", date(2015, 1, 1))
ranker.update("2", date(2015, 1, 2))
ranker.update("1", date(2016, 1, 1))
assert ranker.sort() == ["2", "1"]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,390
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/number.py
|
"""Formatting numbers."""
from __future__ import annotations
import copy
from decimal import Decimal
from typing import Callable
from typing import TYPE_CHECKING
from babel.core import Locale
from beancount.core.display_context import Precision
from fava.core.module_base import FavaModule
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
Formatter = Callable[[Decimal], str]
def get_locale_format(locale: Locale | None, precision: int) -> Formatter:
"""Obtain formatting pattern for the given locale and precision.
Arguments:
locale: An optional locale.
precision: The precision.
Returns:
A function that renders Decimals to strings as desired.
"""
# Set a maximum precision of 14, half the default precision of Decimal
precision = min(precision, 14)
if locale is None:
fmt_string = "{:." + str(precision) + "f}"
def fmt(num: Decimal) -> str:
return fmt_string.format(num)
return fmt
pattern = copy.copy(locale.decimal_formats.get(None))
if not pattern:
raise ValueError("Expected Locale to have a decimal format pattern")
pattern.frac_prec = (precision, precision)
def locale_fmt(num: Decimal) -> str:
return pattern.apply(num, locale) # type: ignore[no-any-return]
return locale_fmt
class DecimalFormatModule(FavaModule):
"""Formatting numbers."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
self._locale: Locale | None = None
self._formatters: dict[str, Formatter] = {}
self._default_pattern = get_locale_format(None, 2)
self.precisions: dict[str, int] = {}
def load_file(self) -> None:
locale = None
locale_option = self.ledger.fava_options.locale
if self.ledger.options["render_commas"] and not locale_option:
locale_option = "en"
self.ledger.fava_options.locale = locale_option
if locale_option:
locale = Locale.parse(locale_option)
dcontext = self.ledger.options["dcontext"]
precisions: dict[str, int] = {}
for currency, ccontext in dcontext.ccontexts.items():
prec = ccontext.get_fractional(Precision.MOST_COMMON)
if prec is not None:
precisions[currency] = prec
precisions.update(self.ledger.commodities.precisions)
self._locale = locale
self._default_pattern = get_locale_format(locale, 2)
self._formatters = {
currency: get_locale_format(locale, prec)
for currency, prec in precisions.items()
}
self.precisions = precisions
def __call__(self, value: Decimal, currency: str | None = None) -> str:
"""Format a decimal to the right number of decimal digits with locale.
Arguments:
value: A decimal number.
currency: A currency string or None.
Returns:
A string, the formatted decimal.
"""
if currency is None:
return self._default_pattern(value)
return self._formatters.get(currency, self._default_pattern)(value)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,391
|
beancount/fava
|
refs/heads/main
|
/src/fava/internal_api.py
|
"""Internal API.
This is used to pre-process some data that is used in the templates, allowing
this part of the functionality to be tested and allowing some end-to-end tests
for the frontend data validation.
"""
from __future__ import annotations
from copy import copy
from dataclasses import dataclass
from typing import Any
from typing import TYPE_CHECKING
from flask import current_app
from flask import url_for
from flask_babel import gettext # type: ignore[import]
from fava.context import g
from fava.util.excel import HAVE_EXCEL
if TYPE_CHECKING: # pragma: no cover
from datetime import date
from fava.beans.abc import Meta
from fava.core.accounts import AccountDict
from fava.core.extensions import ExtensionDetails
from fava.core.fava_options import FavaOptions
from fava.helpers import BeancountError
from fava.util.date import Interval
@dataclass(frozen=True)
class SerialisedError:
"""A Beancount error, as passed to the frontend."""
type: str
source: Meta | None
message: str
@staticmethod
def from_beancount_error(err: BeancountError) -> SerialisedError:
"""Get a serialisable error from a Beancount error."""
source = copy(err.source)
if source is not None:
source.pop("__tolerances__", None)
return SerialisedError(err.__class__.__name__, source, err.message)
@dataclass(frozen=True)
class LedgerData:
"""This is used as report-independent data in the frontend."""
accounts: list[str]
account_details: AccountDict
base_url: str
currencies: list[str]
currency_names: dict[str, str]
errors: list[SerialisedError]
fava_options: FavaOptions
incognito: bool
have_excel: bool
links: list[str]
options: dict[str, str | list[str]]
payees: list[str]
precisions: dict[str, int]
tags: list[str]
years: list[str]
user_queries: list[Any]
upcoming_events_count: int
extensions: list[ExtensionDetails]
sidebar_links: list[tuple[str, str]]
other_ledgers: list[tuple[str, str]]
def get_errors() -> list[SerialisedError]:
"""Serialise errors (do not pass the entry as that might fail serialisation."""
return [SerialisedError.from_beancount_error(e) for e in g.ledger.errors]
def _get_options() -> dict[str, Any]:
options = g.ledger.options
return {
"documents": options["documents"],
"filename": options["filename"],
"include": options["include"],
"operating_currency": options["operating_currency"],
"title": options["title"],
"name_assets": options["name_assets"],
"name_liabilities": options["name_liabilities"],
"name_equity": options["name_equity"],
"name_income": options["name_income"],
"name_expenses": options["name_expenses"],
}
def get_ledger_data() -> LedgerData:
"""Get the report-independent ledger data."""
ledger = g.ledger
return LedgerData(
ledger.attributes.accounts,
ledger.accounts,
url_for("index"),
ledger.attributes.currencies,
ledger.commodities.names,
get_errors(),
ledger.fava_options,
current_app.config["INCOGNITO"],
HAVE_EXCEL,
ledger.attributes.links,
_get_options(),
ledger.attributes.payees,
ledger.format_decimal.precisions,
ledger.attributes.tags,
ledger.attributes.years,
ledger.query_shell.queries[: ledger.fava_options.sidebar_show_queries],
len(ledger.misc.upcoming_events),
ledger.extensions.extension_details,
ledger.misc.sidebar_links,
[
(ledger.options["title"], url_for("index", bfile=file_slug))
for (file_slug, ledger) in current_app.config["LEDGERS"].items()
if file_slug != g.beancount_file_slug
],
)
@dataclass(frozen=True)
class ChartData:
"""The common data format to pass charts to the frontend."""
type: str
label: str
data: Any
def _chart_interval_totals(
interval: Interval,
account_name: str | tuple[str, ...],
label: str | None = None,
invert: bool = False,
) -> ChartData:
return ChartData(
"bar",
label or str(account_name),
g.ledger.charts.interval_totals(
g.filtered,
interval,
account_name,
g.conversion,
invert,
),
)
def _chart_hierarchy(
account_name: str,
begin_date: date | None = None,
end_date: date | None = None,
label: str | None = None,
) -> ChartData:
return ChartData(
"hierarchy",
label or account_name,
g.ledger.charts.hierarchy(
g.filtered,
account_name,
g.conversion,
begin_date,
end_date or g.filtered.end_date,
),
)
def _chart_net_worth() -> ChartData:
return ChartData(
"balances",
gettext("Net Worth"),
g.ledger.charts.net_worth(g.filtered, g.interval, g.conversion),
)
def _chart_account_balance(account_name: str) -> ChartData:
return ChartData(
"balances",
gettext("Account Balance"),
g.ledger.charts.linechart(g.filtered, account_name, g.conversion),
)
class ChartApi:
"""Functions to generate chart data."""
account_balance = _chart_account_balance
hierarchy = _chart_hierarchy
interval_totals = _chart_interval_totals
net_worth = _chart_net_worth
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,392
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/load.py
|
"""Load Beancount files and strings."""
from __future__ import annotations
from typing import TYPE_CHECKING
from beancount.loader import load_string as load_string_bc
if TYPE_CHECKING: # pragma: no cover
from fava.beans.types import LoaderResult
def load_string(value: str) -> LoaderResult:
"""Load a Beancoun string."""
return load_string_bc(value)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,393
|
beancount/fava
|
refs/heads/main
|
/tests/test_util_date.py
|
from __future__ import annotations
from datetime import date
from datetime import datetime
from unittest import mock
import pytest
from fava.util.date import FiscalYearEnd
from fava.util.date import get_fiscal_period
from fava.util.date import get_next_interval
from fava.util.date import get_prev_interval
from fava.util.date import Interval
from fava.util.date import interval_ends
from fava.util.date import month_offset
from fava.util.date import number_of_days_in_period
from fava.util.date import parse_date
from fava.util.date import parse_fye_string
from fava.util.date import substitute
def test_interval() -> None:
assert Interval.get("month") is Interval.MONTH
assert Interval.get("year") is Interval.YEAR
assert Interval.get("YEAR") is Interval.YEAR
assert Interval.get("asdfasdf") is Interval.MONTH
def _to_date(string: str) -> date:
"""Convert a string in ISO 8601 format into a datetime.date object."""
return datetime.strptime(string, "%Y-%m-%d").date()
@pytest.mark.parametrize(
("input_date_string", "interval", "expect", "expect_filter"),
[
("2016-01-01", Interval.DAY, "2016-01-01", "2016-01-01"),
("2016-01-04", Interval.WEEK, "2016W01", "2016-W01"),
("2016-01-04", Interval.MONTH, "Jan 2016", "2016-01"),
("2016-01-04", Interval.QUARTER, "2016Q1", "2016-Q1"),
("2016-01-04", Interval.YEAR, "2016", "2016"),
],
)
def test_interval_format(
input_date_string: str,
interval: Interval,
expect: str,
expect_filter: str,
) -> None:
assert interval.format_date(_to_date(input_date_string)) == expect
assert (
interval.format_date_filter(_to_date(input_date_string))
== expect_filter
)
@pytest.mark.parametrize(
("input_date_string", "interval", "expect"),
[
("2016-01-01", Interval.DAY, "2016-01-02"),
("2016-01-01", Interval.WEEK, "2016-01-04"),
("2016-01-01", Interval.MONTH, "2016-02-01"),
("2016-01-01", Interval.QUARTER, "2016-04-01"),
("2016-01-01", Interval.YEAR, "2017-01-01"),
("2016-12-31", Interval.DAY, "2017-01-01"),
("2016-12-31", Interval.WEEK, "2017-01-02"),
("2016-12-31", Interval.MONTH, "2017-01-01"),
("2016-12-31", Interval.QUARTER, "2017-01-01"),
("2016-12-31", Interval.YEAR, "2017-01-01"),
("9999-12-31", Interval.QUARTER, "9999-12-31"),
("9999-12-31", Interval.YEAR, "9999-12-31"),
],
)
def test_get_next_interval(
input_date_string: str,
interval: Interval,
expect: str,
) -> None:
get = get_next_interval(_to_date(input_date_string), interval)
assert get == _to_date(expect)
@pytest.mark.parametrize(
("input_date_string", "interval", "expect"),
[
("2016-01-01", Interval.DAY, "2016-01-01"),
("2016-01-01", Interval.WEEK, "2015-12-28"),
("2016-01-01", Interval.MONTH, "2016-01-01"),
("2016-01-01", Interval.QUARTER, "2016-01-01"),
("2016-01-01", Interval.YEAR, "2016-01-01"),
("2016-12-31", Interval.DAY, "2016-12-31"),
("2016-12-31", Interval.WEEK, "2016-12-26"),
("2016-12-31", Interval.MONTH, "2016-12-01"),
("2016-12-31", Interval.QUARTER, "2016-10-01"),
("2016-12-31", Interval.YEAR, "2016-01-01"),
("9999-12-31", Interval.QUARTER, "9999-10-01"),
("9999-12-31", Interval.YEAR, "9999-01-01"),
],
)
def test_get_prev_interval(
input_date_string: str,
interval: Interval,
expect: str,
) -> None:
get = get_prev_interval(_to_date(input_date_string), interval)
assert get == _to_date(expect)
def test_interval_tuples() -> None:
assert list(
interval_ends(date(2014, 3, 5), date(2014, 5, 5), Interval.MONTH),
) == [
date(2014, 3, 1),
date(2014, 4, 1),
date(2014, 5, 1),
date(2014, 6, 1),
]
assert list(
interval_ends(date(2014, 1, 1), date(2014, 5, 1), Interval.MONTH),
) == [
date(2014, 1, 1),
date(2014, 2, 1),
date(2014, 3, 1),
date(2014, 4, 1),
date(2014, 5, 1),
]
assert list(
interval_ends(date(2014, 3, 5), date(2014, 5, 5), Interval.YEAR),
) == [date(2014, 1, 1), date(2015, 1, 1)]
assert list(
interval_ends(date(2014, 1, 1), date(2015, 1, 1), Interval.YEAR),
) == [date(2014, 1, 1), date(2015, 1, 1)]
@pytest.mark.parametrize(
("string", "output"),
[
("year", "2016"),
("(year-1)", "2015"),
("year-1-2", "2015-2"),
("(year)-1-2", "2016-1-2"),
("(year+3)", "2019"),
("(year+3)month", "20192016-06"),
("(year-1000)", "1016"),
("quarter", "2016-Q2"),
("quarter+2", "2016-Q4"),
("quarter+20", "2021-Q2"),
("(month)", "2016-06"),
("month+6", "2016-12"),
("(month+24)", "2018-06"),
("week", "2016-W25"),
("week+20", "2016-W45"),
("week+2000", "2054-W42"),
("day", "2016-06-24"),
("day+20", "2016-07-14"),
],
)
def test_substitute(string: str, output: str) -> None:
# Mock the imported datetime.date in fava.util.date module
# Ref:
# http://www.voidspace.org.uk/python/mock/examples.html#partial-mocking
with mock.patch("fava.util.date.datetime.date") as mock_date:
mock_date.today.return_value = _to_date("2016-06-24")
mock_date.side_effect = date
assert substitute(string) == output
@pytest.mark.parametrize(
("fye_str", "test_date", "string", "output"),
[
("06-30", "2018-02-02", "fiscal_year", "FY2018"),
("06-30", "2018-08-02", "fiscal_year", "FY2019"),
("06-30", "2018-07-01", "fiscal_year", "FY2019"),
("06-30", "2018-08-02", "fiscal_year-1", "FY2018"),
("06-30", "2018-02-02", "fiscal_year+6", "FY2024"),
("06-30", "2018-08-02", "fiscal_year+6", "FY2025"),
("06-30", "2018-08-02", "fiscal_quarter", "FY2019-Q1"),
("06-30", "2018-10-01", "fiscal_quarter", "FY2019-Q2"),
("06-30", "2018-12-30", "fiscal_quarter", "FY2019-Q2"),
("06-30", "2018-02-02", "fiscal_quarter", "FY2018-Q3"),
("06-30", "2018-07-03", "fiscal_quarter-1", "FY2018-Q4"),
("06-30", "2018-07-03", "fiscal_quarter+6", "FY2020-Q3"),
("04-05", "2018-07-03", "fiscal_quarter", None),
],
)
def test_fiscal_substitute(
fye_str: str,
test_date: str,
string: str,
output: str | None,
) -> None:
fye = parse_fye_string(fye_str)
with mock.patch("fava.util.date.datetime.date") as mock_date:
mock_date.today.return_value = _to_date(test_date)
mock_date.side_effect = date
if output is None:
with pytest.raises(
ValueError,
match="Cannot use fiscal_quarter if fiscal year",
):
substitute(string, fye)
else:
assert substitute(string, fye) == output
@pytest.mark.parametrize(
("expect_start", "expect_end", "text"),
[
("2000-01-01", "2001-01-01", " 2000 "),
("2010-10-01", "2010-11-01", "2010-10"),
("2000-01-03", "2000-01-04", "2000-01-03"),
("2015-01-05", "2015-01-12", "2015-W01"),
("2015-04-01", "2015-07-01", "2015-Q2"),
("2014-01-01", "2016-01-01", "2014 to 2015"),
("2014-01-01", "2016-01-01", "2014-2015"),
("2011-10-01", "2016-01-01", "2011-10 - 2015"),
("2018-07-01", "2020-07-01", "FY2019 - FY2020"),
("2018-07-01", "2021-01-01", "FY2019 - 2020"),
("2010-07-01", "2015-07-01", "FY2011 to FY2015"),
("2011-01-01", "2015-07-01", "2011 to FY2015"),
],
)
def test_parse_date(expect_start: str, expect_end: str, text: str) -> None:
expected = (_to_date(expect_start), _to_date(expect_end))
assert parse_date(text, FiscalYearEnd(6, 30)) == expected
if "FY" not in text:
assert parse_date(text, None) == expected
def test_parse_date_empty() -> None:
assert parse_date(" ", FiscalYearEnd(6, 30)) == (None, None)
assert parse_date(" ", None) == (None, None)
@pytest.mark.parametrize(
("expect_start", "expect_end", "text"),
[
("2014-01-01", "2016-06-27", "year-2-day+2"),
("2016-01-01", "2016-06-25", "year-day"),
("2015-01-01", "2017-01-01", "2015-year"),
("2016-01-01", "2016-04-01", "quarter-1"),
("2013-07-01", "2014-07-01", "fiscal_year-2"),
("2016-04-01", "2016-07-01", "fiscal_quarter"),
],
)
def test_parse_date_relative(
expect_start: str,
expect_end: str,
text: str,
) -> None:
start, end = _to_date(expect_start), _to_date(expect_end)
with mock.patch("fava.util.date.datetime.date") as mock_date:
mock_date.today.return_value = _to_date("2016-06-24")
mock_date.side_effect = date
assert parse_date(text, FiscalYearEnd(6, 30)) == (start, end)
@pytest.mark.parametrize(
("interval", "date_str", "expect"),
[
(Interval.DAY, "2016-05-01", 1),
(Interval.DAY, "2016-05-31", 1),
(Interval.WEEK, "2016-05-01", 7),
(Interval.WEEK, "2016-05-31", 7),
(Interval.MONTH, "2016-05-02", 31),
(Interval.MONTH, "2016-05-31", 31),
(Interval.MONTH, "2016-06-11", 30),
(Interval.MONTH, "2016-07-31", 31),
(Interval.MONTH, "2016-02-01", 29),
(Interval.MONTH, "2015-02-01", 28),
(Interval.MONTH, "2016-01-01", 31),
(Interval.QUARTER, "2015-02-01", 90),
(Interval.QUARTER, "2015-05-01", 91),
(Interval.QUARTER, "2016-02-01", 91),
(Interval.QUARTER, "2016-12-01", 92),
(Interval.YEAR, "2015-02-01", 365),
(Interval.YEAR, "2016-01-01", 366),
],
)
def test_number_of_days_in_period(
interval: Interval,
date_str: str,
expect: int,
) -> None:
assert number_of_days_in_period(interval, _to_date(date_str)) == expect
@pytest.mark.parametrize(
("date_input", "offset", "expected"),
[
("2018-01-12", 0, "2018-01-12"),
("2018-01-01", -3, "2017-10-01"),
("2018-01-30", 1, None), # raises value error, as it should
("2018-01-12", 13, "2019-02-12"),
("2018-01-12", -13, "2016-12-12"),
],
)
def test_month_offset(
date_input: str,
offset: int,
expected: str | None,
) -> None:
start_date = _to_date(date_input)
if expected is None:
with pytest.raises(ValueError, match="day is out of range"):
month_offset(start_date, offset)
else:
assert str(month_offset(start_date, offset)) == expected
@pytest.mark.parametrize(
("year", "quarter", "fye_str", "expect_start", "expect_end"),
[
# standard calendar year [FYE=12-31]
(2018, None, "12-31", "2018-01-01", "2019-01-01"),
(2018, 1, "12-31", "2018-01-01", "2018-04-01"),
(2018, 3, "12-31", "2018-07-01", "2018-10-01"),
(2018, 4, "12-31", "2018-10-01", "2019-01-01"),
# US fiscal year [FYE=09-30]
(2018, None, "09-30", "2017-10-01", "2018-10-01"),
(2018, 3, "09-30", "2018-04-01", "2018-07-01"),
# 30th June - Australia and NZ [FYE=06-30]
(2018, None, "06-30", "2017-07-01", "2018-07-01"),
(2018, 1, "06-30", "2017-07-01", "2017-10-01"),
(2018, 2, "06-30", "2017-10-01", "2018-01-01"),
(2018, 4, "06-30", "2018-04-01", "2018-07-01"),
# 5th Apr - UK [FYE=04-05]
(2018, None, "04-05", "2017-04-06", "2018-04-06"),
(2018, 1, "04-05", "None", "None"),
# 28th February - consider leap years [FYE=02-28]
(2016, None, "02-28", "2015-03-01", "2016-03-01"),
(2017, None, "02-28", "2016-03-01", "2017-03-01"),
# None
(2018, None, None, "2018-01-01", "2019-01-01"),
# expected errors
(2018, 0, "12-31", "None", "None"),
(2018, 5, "12-31", "None", "None"),
],
)
def test_get_fiscal_period(
year: int,
quarter: int | None,
fye_str: str | None,
expect_start: str,
expect_end: str,
) -> None:
fye = parse_fye_string(fye_str) if fye_str else None
start_date, end_date = get_fiscal_period(year, fye, quarter)
assert str(start_date) == expect_start
assert str(end_date) == expect_end
@pytest.mark.parametrize(
("fye_str", "month", "day"),
[
("12-31", 12, 31),
("06-30", 6, 30),
("02-28", 2, 28),
],
)
def test_parse_fye_string(fye_str: str, month: int, day: int) -> None:
fye = parse_fye_string(fye_str)
assert fye
assert fye.month == month
assert fye.day == day
@pytest.mark.parametrize(
"fye_str",
[
"12-32",
"asdfasdf",
"02-29",
],
)
def test_parse_fye_invalid_string(fye_str: str) -> None:
assert parse_fye_string(fye_str) is None
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,394
|
beancount/fava
|
refs/heads/main
|
/tests/test_json_api.py
|
from __future__ import annotations
import datetime
import hashlib
import re
from io import BytesIO
from pathlib import Path
from typing import Any
from typing import TYPE_CHECKING
import pytest
from fava.beans.funcs import hash_entry
from fava.context import g
from fava.core.charts import dumps
from fava.core.fava_options import InsertEntryOption
from fava.core.file import get_entry_slice
from fava.core.file import insert_entry
from fava.core.misc import align
from fava.json_api import validate_func_arguments
from fava.json_api import ValidationError
if TYPE_CHECKING: # pragma: no cover
from flask import Flask
from flask.testing import FlaskClient
from werkzeug.test import TestResponse
from fava.core import FavaLedger
from .conftest import SnapshotFunc
def test_validate_get_args() -> None:
def noparams() -> None:
pass
assert validate_func_arguments(noparams) is None
def func(test: str) -> None:
assert test
assert isinstance(test, str)
validator = validate_func_arguments(func)
assert validator
with pytest.raises(ValidationError):
validator({"notest": "value"})
assert validator({"test": "value"}) == ["value"]
def assert_api_error(response: TestResponse, msg: str | None = None) -> str:
"""Asserts that the response errored and contains the message."""
assert response.status_code == 200
assert response.json
assert not response.json["success"], response.json
err_msg = response.json["error"]
assert isinstance(err_msg, str)
if msg:
assert msg == err_msg
return err_msg
def assert_api_success(response: TestResponse, data: Any | None = None) -> Any:
"""Asserts that the request was successful and contains the data."""
assert response.status_code == 200
assert response.json
assert response.json["success"], response.json
if data is not None:
assert data == response.json["data"]
return response.json["data"]
def test_api_changed(test_client: FlaskClient) -> None:
response = test_client.get("/long-example/api/changed")
assert_api_success(response, False)
def test_api_add_document(
app: Flask,
test_client: FlaskClient,
tmp_path: Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
monkeypatch.setitem(g.ledger.options, "documents", [str(tmp_path)])
request_data = {
"folder": str(tmp_path),
"account": "Expenses:Food:Restaurant",
"file": (BytesIO(b"asdfasdf"), "2015-12-12 test"),
}
url = "/long-example/api/add_document"
response = test_client.put(url)
assert_api_error(response, "No file uploaded.")
filename = (
tmp_path / "Expenses" / "Food" / "Restaurant" / "2015-12-12 test"
)
response = test_client.put(url, data=request_data)
assert_api_success(response, f"Uploaded to {filename}")
assert Path(filename).is_file()
request_data["file"] = (BytesIO(b"asdfasdf"), "2015-12-12 test")
response = test_client.put(url, data=request_data)
assert_api_error(response, f"{filename} already exists.")
def test_api_upload_import_file(
app: Flask,
test_client: FlaskClient,
tmp_path: Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
monkeypatch.setattr(
g.ledger.fava_options,
"import_dirs",
[str(tmp_path)],
)
request_data = {
"file": (BytesIO(b"asdfasdf"), "recipt.pdf"),
}
url = "/long-example/api/upload_import_file"
response = test_client.put(url)
assert_api_error(response, "No file uploaded.")
filename = tmp_path / "recipt.pdf"
response = test_client.put(url, data=request_data)
assert_api_success(response, f"Uploaded to {filename}")
assert Path(filename).is_file()
# Uploading the exact same file should fail due to path conflict
request_data["file"] = (BytesIO(b"asdfasdf"), "recipt.pdf")
response = test_client.put(url, data=request_data)
assert_api_error(response, f"{filename} already exists.")
def test_api_errors(test_client: FlaskClient, snapshot: SnapshotFunc) -> None:
response = test_client.get("/long-example/api/errors")
assert_api_success(response, [])
response = test_client.get("/errors/api/errors")
data = assert_api_success(response)
def get_message(err: Any) -> str:
return str(err["message"])
snapshot(sorted(data, key=get_message))
def test_api_context(
test_client: FlaskClient,
snapshot: SnapshotFunc,
example_ledger: FavaLedger,
) -> None:
response = test_client.get("/long-example/api/context")
assert_api_error(
response,
"Invalid API request: Parameter `entry_hash` is missing.",
)
entry_hash = hash_entry(
next(
entry
for entry in example_ledger.all_entries_by_type.Transaction
if entry.narration == r"Investing 40% of cash in VBMPX"
and entry.date == datetime.date(2016, 5, 9)
),
)
response = test_client.get(
"/long-example/api/context",
query_string={"entry_hash": entry_hash},
)
data = assert_api_success(response)
snapshot(data)
entry_hash = hash_entry(example_ledger.all_entries[10])
response = test_client.get(
"/long-example/api/context",
query_string={"entry_hash": entry_hash},
)
data = assert_api_success(response)
snapshot(data)
assert not data.get("balances_before")
def test_api_payee_accounts(
test_client: FlaskClient,
snapshot: SnapshotFunc,
) -> None:
assert_api_error(test_client.get("/long-example/api/payee_accounts"))
response = test_client.get(
"/long-example/api/payee_accounts",
query_string={"payee": "EDISON POWER"},
)
data = assert_api_success(response)
assert data[0] == "Assets:US:BofA:Checking"
assert data[1] == "Expenses:Home:Electricity"
snapshot(data)
def test_api_payee_transaction(
test_client: FlaskClient,
snapshot: SnapshotFunc,
) -> None:
response = test_client.get(
"/long-example/api/payee_transaction",
query_string={"payee": "EDISON POWER"},
)
data = assert_api_success(response)
snapshot(data)
def test_api_imports(
test_client: FlaskClient,
snapshot: SnapshotFunc,
) -> None:
response = test_client.get("/import/api/imports")
data = assert_api_success(response)
assert data
snapshot(data)
importable = next(f for f in data if f["importers"])
assert importable
response = test_client.get(
"/import/api/extract",
query_string={
"filename": importable["name"],
"importer": importable["importers"][0]["importer_name"],
},
)
data = assert_api_success(response)
snapshot(data)
def test_api_move(test_client: FlaskClient) -> None:
response = test_client.get("/long-example/api/move")
assert_api_error(
response,
"Invalid API request: Parameter `account` is missing.",
)
invalid = {"account": "Assets", "new_name": "new", "filename": "old"}
response = test_client.get("/long-example/api/move", query_string=invalid)
assert_api_error(response, "You need to set a documents folder.")
response = test_client.get("/import/api/move", query_string=invalid)
assert_api_error(response, "Not a valid account: 'Assets'")
response = test_client.get(
"/import/api/move",
query_string={
**invalid,
"account": "Assets:Checking",
},
)
assert_api_error(response, "Not a file: 'old'")
def test_api_get_source_invalid_unicode(test_client: FlaskClient) -> None:
response = test_client.get(
"/invalid-unicode/api/source",
query_string={"filename": ""},
)
err_msg = assert_api_error(response)
assert "The source file contains invalid unicode" in err_msg
def test_api_get_source_unknown_file(test_client: FlaskClient) -> None:
response = test_client.get(
"/example/api/source",
query_string={"filename": "/home/not-one-of-the-includes"},
)
err_msg = assert_api_error(response)
assert "Trying to read a non-source file" in err_msg
def test_api_source_put(
test_client: FlaskClient,
example_ledger: FavaLedger,
) -> None:
path = Path(example_ledger.beancount_file_path)
url = "/long-example/api/source"
# test bad request
response = test_client.put(url)
assert_api_error(response, "Invalid JSON request.")
payload = path.read_text("utf-8")
sha256sum = hashlib.sha256(path.read_bytes()).hexdigest()
# change source
response = test_client.put(
url,
json={
"source": "asdf" + payload,
"sha256sum": sha256sum,
"file_path": str(path),
},
)
sha256sum = hashlib.sha256(path.read_bytes()).hexdigest()
assert_api_success(response, sha256sum)
# check if the file has been written
assert path.read_text("utf-8") == "asdf" + payload
# write original source file
result = test_client.put(
url,
json={
"source": payload,
"sha256sum": sha256sum,
"file_path": str(path),
},
)
assert result.status_code == 200
assert path.read_text("utf-8") == payload
def test_api_format_source(
test_client: FlaskClient,
example_ledger: FavaLedger,
) -> None:
path = Path(example_ledger.beancount_file_path)
url = "/long-example/api/format_source"
payload = path.read_text("utf-8")
response = test_client.put(url, json={"source": payload})
assert_api_success(response, align(payload, 61))
def test_api_format_source_options(
app: Flask,
test_client: FlaskClient,
monkeypatch: pytest.MonkeyPatch,
) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
path = Path(g.ledger.beancount_file_path)
payload = path.read_text("utf-8")
monkeypatch.setattr(g.ledger.fava_options, "currency_column", 90)
response = test_client.put(
"/long-example/api/format_source",
json={"source": payload},
)
assert_api_success(response, align(payload, 90))
def test_api_source_slice_delete(
test_client: FlaskClient,
example_ledger: FavaLedger,
) -> None:
path = Path(example_ledger.beancount_file_path)
contents = path.read_text("utf-8")
url = "/long-example/api/source_slice"
# test bad request
response = test_client.delete(url)
assert_api_error(
response,
"Invalid API request: Parameter `entry_hash` is missing.",
)
entry = next(
entry
for entry in example_ledger.all_entries_by_type.Transaction
if entry.payee == "Chichipotle"
and entry.date == datetime.date(2016, 5, 3)
)
entry_hash = hash_entry(entry)
entry_source, sha256sum = get_entry_slice(entry)
# delete entry
response = test_client.delete(
url,
query_string={
"entry_hash": entry_hash,
"sha256sum": sha256sum,
},
)
assert_api_success(response, f"Deleted entry {entry_hash}.")
assert path.read_text("utf-8") != contents
insert_option = InsertEntryOption(
datetime.date(1, 1, 1),
re.compile(".*"),
entry.meta["filename"],
entry.meta["lineno"],
)
insert_entry(entry, entry.meta["filename"], [insert_option], 59, 2)
assert path.read_text("utf-8") == contents
def test_api_add_entries(
app: Flask,
test_client: FlaskClient,
tmp_path: Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
test_file = tmp_path / "test_file"
test_file.open("a")
monkeypatch.setattr(g.ledger, "beancount_file_path", str(test_file))
entries = [
{
"t": "Transaction",
"date": "2017-12-12",
"flag": "*",
"payee": "Test3",
"tags": [],
"links": [],
"narration": "",
"meta": {},
"postings": [
{"account": "Assets:US:ETrade:Cash", "amount": "100 USD"},
{"account": "Assets:US:ETrade:GLD"},
],
},
{
"t": "Transaction",
"date": "2017-01-12",
"flag": "*",
"payee": "Test1",
"tags": [],
"links": [],
"narration": "",
"meta": {},
"postings": [
{"account": "Assets:US:ETrade:Cash", "amount": "100 USD"},
{"account": "Assets:US:ETrade:GLD"},
],
},
{
"t": "Transaction",
"date": "2017-02-12",
"flag": "*",
"payee": "Test",
"tags": [],
"links": [],
"narration": "Test",
"meta": {},
"postings": [
{"account": "Assets:US:ETrade:Cash", "amount": "100 USD"},
{"account": "Assets:US:ETrade:GLD"},
],
},
]
url = "/long-example/api/add_entries"
response = test_client.put(url, json={"entries": entries})
assert_api_success(response, "Stored 3 entries.")
assert test_file.read_text("utf-8") == """
2017-01-12 * "Test1" ""
Assets:US:ETrade:Cash 100 USD
Assets:US:ETrade:GLD
2017-02-12 * "Test" "Test"
Assets:US:ETrade:Cash 100 USD
Assets:US:ETrade:GLD
2017-12-12 * "Test3" ""
Assets:US:ETrade:Cash 100 USD
Assets:US:ETrade:GLD
"""
@pytest.mark.parametrize(
("query_string", "result_str"),
[
("balances from year = 2014", "5086.65 USD"),
("select sum(day)", "43558"),
],
)
def test_api_query_result(
query_string: str,
result_str: str,
test_client: FlaskClient,
) -> None:
response = test_client.get(
"/long-example/api/query_result",
query_string={"query_string": query_string},
)
data = assert_api_success(response)
assert result_str in data["table"]
def test_api_query_result_error(test_client: FlaskClient) -> None:
response = test_client.get(
"/long-example/api/query_result",
query_string={"query_string": "nononono"},
)
assert response.status_code == 200
assert "ERROR: Syntax error near" in response.get_data(True)
def test_api_query_result_filters(test_client: FlaskClient) -> None:
response = test_client.get(
"/long-example/api/query_result",
query_string={"query_string": "select sum(day)", "time": "2021"},
)
data = assert_api_success(response)
assert data["chart"] is None
assert "6882" in data["table"]
def test_api_query_result_charts(
test_client: FlaskClient,
snapshot: SnapshotFunc,
) -> None:
query_string = (
"SELECT payee, SUM(COST(position)) AS balance "
"WHERE account ~ 'Assets' GROUP BY payee, account"
)
response = test_client.get(
"/long-example/api/query_result",
query_string={"query_string": query_string},
)
data = assert_api_success(response)
assert data["chart"]
snapshot(data["chart"])
def test_api_commodities_empty(
test_client: FlaskClient,
) -> None:
response = test_client.get(
"/long-example/api/commodities?time=3000",
)
data = assert_api_success(response)
assert not data
@pytest.mark.parametrize(
("name", "url"),
[
("commodities", "/long-example/api/commodities"),
("documents", "/example/api/documents"),
("events", "/long-example/api/events"),
("income_statement", "/long-example/api/income_statement?time=2014"),
("trial_balance", "/long-example/api/trial_balance?time=2014"),
("balance_sheet", "/long-example/api/balance_sheet"),
(
"balance_sheet_with_cost",
"/long-example/api/balance_sheet?conversion=at_value",
),
(
"account_report_off_by_one_journal",
(
"/off-by-one/api/account_report"
"?interval=day&conversion=at_value&a=Assets"
),
),
(
"account_report_off_by_one",
(
"/off-by-one/api/account_report"
"?interval=day&conversion=at_value&a=Assets&r=balances"
),
),
],
)
def test_api(
test_client: FlaskClient,
snapshot: SnapshotFunc,
name: str,
url: str,
) -> None:
response = test_client.get(url)
data = assert_api_success(response)
assert data
snapshot(dumps(data), name)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,395
|
beancount/fava
|
refs/heads/main
|
/contrib/pythonanywhere/favadev/favadev_pythonanywhere_com_wsgi.py
|
"""fava wsgi application"""
from __future__ import annotations
from fava.application import create_app
application = create_app(
[
"/home/favadev/example.beancount",
"/home/favadev/budgets-example.beancount",
"/home/favadev/huge-example.beancount",
],
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,396
|
beancount/fava
|
refs/heads/main
|
/src/fava/plugins/link_documents.py
|
"""Beancount plugin to link entries to documents.
It goes through all entries with a `document` metadata-key, and tries to
associate them to Document entries. For transactions, it then also adds a link
from the transaction to documents, as well as the "#linked" tag.
"""
from __future__ import annotations
from collections import defaultdict
from os.path import normpath
from pathlib import Path
from typing import Any
from typing import TYPE_CHECKING
from fava.beans.abc import Document
from fava.beans.abc import Transaction
from fava.beans.account import get_entry_accounts
from fava.beans.funcs import hash_entry
from fava.beans.helpers import replace
from fava.helpers import BeancountError
from fava.util.sets import add_to_set
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
class DocumentError(BeancountError):
"""Document-linking related error."""
__plugins__ = ["link_documents"]
def link_documents(
entries: list[Directive],
_: Any,
) -> tuple[list[Directive], list[DocumentError]]:
"""Link entries to documents."""
errors = []
# All document indices by their full file path.
by_fullname = {}
# All document indices by their file basename.
by_basename = defaultdict(list)
for index, entry in enumerate(entries):
if isinstance(entry, Document):
by_fullname[entry.filename] = index
by_basename[Path(entry.filename).name].append((index, entry))
for index, entry in enumerate(entries):
disk_docs = [
value
for key, value in entry.meta.items()
if key.startswith("document")
]
if not disk_docs:
continue
hash_ = hash_entry(entry)[:8]
entry_accounts = get_entry_accounts(entry)
for disk_doc in disk_docs:
documents = [
j
for j, document in by_basename[disk_doc]
if document.account in entry_accounts
]
disk_doc_path = normpath(
Path(entry.meta["filename"]).parent / disk_doc,
)
if disk_doc_path in by_fullname:
documents.append(by_fullname[disk_doc_path])
if not documents:
errors.append(
DocumentError(
entry.meta,
f"Document not found: '{disk_doc}'",
entry,
),
)
continue
for j in documents:
# Since we might link a document multiple times, we have to use
# the index for the replacement here.
doc: Document = entries[j] # type: ignore[assignment]
entries[j] = replace(
doc,
links=add_to_set(doc.links, hash_),
tags=add_to_set(doc.tags, "linked"),
)
# The other entry types do not support links, so only add links for
# txns.
if isinstance(entry, Transaction):
entries[index] = replace(
entry,
links=add_to_set(entry.links, hash_),
)
return entries, errors
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,397
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/helpers.py
|
"""Helpers for Beancount entries."""
from __future__ import annotations
from typing import Any
from typing import TypeVar
from fava.beans.abc import Directive
from fava.beans.abc import Posting
T = TypeVar("T", Directive, Posting)
def replace(entry: T, **kwargs: Any) -> T:
"""Create a copy of the given directive, replacing some arguments."""
if isinstance(entry, tuple):
return entry._replace(**kwargs) # type: ignore[attr-defined,no-any-return]
raise TypeError(f"Could not replace attribute in type {type(entry)}")
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,398
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/abc.py
|
"""Abstract base classes for Beancount types."""
from __future__ import annotations
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import TYPE_CHECKING
from beancount.core import amount
from beancount.core import data
from beancount.core import position
if TYPE_CHECKING: # pragma: no cover
import datetime
from decimal import Decimal
from typing import TypeAlias
MetaValue: TypeAlias = Any
Meta: TypeAlias = dict[str, MetaValue]
TagsOrLinks: TypeAlias = set[str] | frozenset[str]
class Amount(ABC):
"""An amount in some currency."""
@property
@abstractmethod
def number(self) -> Decimal:
"""Number of units in the amount."""
@property
@abstractmethod
def currency(self) -> str:
"""Currency of the amount."""
Amount.register(amount.Amount)
class Cost(ABC):
"""A cost (basically an amount with date and label)."""
@property
@abstractmethod
def number(self) -> Decimal:
"""Number of units in the cost."""
@property
@abstractmethod
def currency(self) -> str:
"""Currency of the cost."""
@property
@abstractmethod
def date(self) -> datetime.date:
"""Date of the cost."""
@property
@abstractmethod
def label(self) -> str | None:
"""Label of the cost."""
Cost.register(position.Cost)
class Position(ABC):
"""A Beancount position - just cost and units."""
@property
@abstractmethod
def units(self) -> Amount:
"""Units of the posting."""
@property
@abstractmethod
def cost(self) -> Cost | None:
"""Units of the position."""
Position.register(position.Position)
class Posting(Position):
"""A Beancount posting."""
@property
@abstractmethod
def account(self) -> str:
"""Account of the posting."""
@property
@abstractmethod
def units(self) -> Amount:
"""Units of the posting."""
@property
@abstractmethod
def cost(self) -> Cost | None:
"""Units of the posting."""
@property
@abstractmethod
def price(self) -> Amount | None:
"""Price of the posting."""
@property
@abstractmethod
def meta(self) -> Meta | None:
"""Metadata of the posting."""
@property
@abstractmethod
def flag(self) -> str | None:
"""Flag of the posting."""
Posting.register(data.Posting)
class Directive(ABC):
"""A Beancount directive."""
@property
@abstractmethod
def date(self) -> datetime.date:
"""Metadata of the directive."""
@property
@abstractmethod
def meta(self) -> Meta:
"""Metadata of the directive."""
class Transaction(Directive):
"""A Beancount Transaction directive."""
@property
@abstractmethod
def flag(self) -> str:
"""Flag of the transaction."""
@property
@abstractmethod
def payee(self) -> str:
"""Payee of the transaction."""
@property
@abstractmethod
def narration(self) -> str:
"""Narration of the transaction."""
@property
@abstractmethod
def postings(self) -> list[Posting]:
"""Payee of the transaction."""
@property
@abstractmethod
def tags(self) -> TagsOrLinks:
"""Entry tags."""
@property
@abstractmethod
def links(self) -> TagsOrLinks:
"""Entry links."""
class Balance(Directive):
"""A Beancount Balance directive."""
@property
@abstractmethod
def account(self) -> str:
"""Account of the directive."""
@property
@abstractmethod
def diff_amount(self) -> Amount | None:
"""Account of the directive."""
class Commodity(Directive):
"""A Beancount Commodity directive."""
@property
@abstractmethod
def currency(self) -> str:
"""Currency."""
class Close(Directive):
"""A Beancount Close directive."""
@property
@abstractmethod
def account(self) -> str:
"""Account of the directive."""
class Custom(Directive):
"""A Beancount Custom directive."""
@property
@abstractmethod
def type(self) -> str:
"""Directive type."""
@property
@abstractmethod
def values(self) -> list[Any]:
"""Custom values."""
class Document(Directive):
"""A Beancount Document directive."""
@property
@abstractmethod
def filename(self) -> str:
"""Filename of the document."""
@property
@abstractmethod
def account(self) -> str:
"""Account of the directive."""
@property
@abstractmethod
def tags(self) -> TagsOrLinks:
"""Entry tags."""
@property
@abstractmethod
def links(self) -> TagsOrLinks:
"""Entry links."""
class Event(Directive):
"""A Beancount Event directive."""
@property
@abstractmethod
def account(self) -> str:
"""Account of the directive."""
class Note(Directive):
"""A Beancount Note directive."""
@property
@abstractmethod
def account(self) -> str:
"""Account of the directive."""
@property
@abstractmethod
def comment(self) -> str:
"""Note comment."""
class Open(Directive):
"""A Beancount Open directive."""
@property
@abstractmethod
def account(self) -> str:
"""Account of the directive."""
class Pad(Directive):
"""A Beancount Pad directive."""
@property
@abstractmethod
def account(self) -> str:
"""Account of the directive."""
@property
@abstractmethod
def source_account(self) -> str:
"""Source account of the pad."""
class Price(Directive):
"""A Beancount Price directive."""
@property
@abstractmethod
def currency(self) -> str:
"""Currency for which this is a price."""
@property
@abstractmethod
def amount(self) -> Amount:
"""Price amount."""
class Query(Directive):
"""A Beancount Query directive."""
@property
@abstractmethod
def name(self) -> str:
"""Name of this query."""
@property
@abstractmethod
def query_string(self) -> str:
"""BQL query."""
class TxnPosting(ABC):
"""A transaction and a posting."""
@property
@abstractmethod
def txn(self) -> Transaction:
"""Transaction."""
@property
@abstractmethod
def posting(self) -> Posting:
"""Posting."""
Balance.register(data.Balance)
Commodity.register(data.Commodity)
Close.register(data.Close)
Custom.register(data.Custom)
Document.register(data.Document)
Event.register(data.Event)
Note.register(data.Note)
Open.register(data.Open)
Pad.register(data.Pad)
Price.register(data.Price)
Transaction.register(data.Transaction)
Query.register(data.Query)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,399
|
beancount/fava
|
refs/heads/main
|
/tests/__init__.py
|
"""Fava tests."""
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,400
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/account.py
|
"""Account name helpers."""
from __future__ import annotations
from typing import TYPE_CHECKING
from beancount.core.account import TYPE as ACCOUNT_TYPE
from fava.beans.abc import Custom
from fava.beans.abc import Pad
from fava.beans.abc import Transaction
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
def parent(acc: str) -> str | None:
"""Get the name of the parent of the given account."""
parts = acc.rsplit(":", maxsplit=1)
return parts[0] if len(parts) == 2 else None
def root(acc: str) -> str:
"""Get root account of the given account."""
parts = acc.split(":", maxsplit=1)
return parts[0]
def get_entry_accounts(entry: Directive) -> list[str]:
"""Accounts for an entry.
Args:
entry: An entry.
Returns:
A list with the entry's accounts ordered by priority: For
transactions the posting accounts are listed in reverse order.
"""
if isinstance(entry, Transaction):
return list(reversed([p.account for p in entry.postings]))
if isinstance(entry, Custom):
return [val.value for val in entry.values if val.dtype == ACCOUNT_TYPE]
if isinstance(entry, Pad):
return [entry.account, entry.source_account]
account_ = getattr(entry, "account", None)
if account_ is not None:
return [account_]
return []
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,401
|
beancount/fava
|
refs/heads/main
|
/src/fava/helpers.py
|
"""Exceptions and module base class."""
from __future__ import annotations
from typing import NamedTuple
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from fava.beans.abc import Meta
class BeancountError(NamedTuple):
"""NamedTuple base for a Beancount-style error."""
source: Meta | None
message: str
entry: Directive | None
class FavaAPIError(Exception):
"""Fava's base exception class."""
def __init__(self, message: str) -> None:
super().__init__()
self.message = message
def __str__(self) -> str:
return self.message
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,402
|
beancount/fava
|
refs/heads/main
|
/src/fava/serialisation.py
|
"""(De)serialisation of entries.
When adding entries, these are saved via the JSON API - using the functionality
of this module to obtain the appropriate data structures from
`beancount.core.data`. Similarly, for the full entry completion, a JSON
representation of the entry is provided.
This is not intended to work well enough for full roundtrips yet.
"""
from __future__ import annotations
import datetime
from copy import copy
from functools import singledispatch
from typing import Any
from beancount.parser.parser import parse_string
from fava.beans import create
from fava.beans.abc import Amount
from fava.beans.abc import Balance
from fava.beans.abc import Directive
from fava.beans.abc import Posting
from fava.beans.abc import Transaction
from fava.beans.helpers import replace
from fava.beans.str import to_string
from fava.helpers import FavaAPIError
from fava.util.date import parse_date
@singledispatch
def serialise(entry: Directive | Posting) -> Any:
"""Serialise an entry or posting."""
if not isinstance(entry, Directive):
raise TypeError(f"Unsupported object {entry}")
ret = entry._asdict() # type: ignore[attr-defined]
ret["t"] = entry.__class__.__name__
return ret
@serialise.register(Transaction)
def _(entry: Transaction) -> Any:
"""Serialise an entry."""
ret = entry._asdict() # type: ignore[attr-defined]
ret["meta"] = copy(entry.meta)
ret["meta"].pop("__tolerances__", None)
ret["t"] = "Transaction"
ret["payee"] = entry.payee or ""
ret["postings"] = list(map(serialise, entry.postings))
return ret
@serialise.register(Balance)
def _(entry: Balance) -> Any:
"""Serialise an entry."""
ret = entry._asdict() # type: ignore[attr-defined]
ret["t"] = "Balance"
amt = ret["amount"]
ret["amount"] = {"number": str(amt.number), "currency": amt.currency}
return ret
@serialise.register(Posting)
def _(posting: Posting) -> Any:
"""Serialise a posting."""
position_str = (
to_string(posting) if isinstance(posting.units, Amount) else ""
)
if posting.price is not None:
position_str += f" @ {to_string(posting.price)}"
return {"account": posting.account, "amount": position_str}
def deserialise_posting(posting: Any) -> Posting:
"""Parse JSON to a Beancount Posting."""
amount = posting.get("amount", "")
entries, errors, _ = parse_string(
f'2000-01-01 * "" ""\n Assets:Account {amount}',
)
if errors:
raise FavaAPIError(f"Invalid amount: {amount}")
txn = entries[0]
if not isinstance(txn, Transaction):
raise TypeError("Expected transaction")
pos = txn.postings[0]
return replace(pos, account=posting["account"], meta=None)
def deserialise(json_entry: Any) -> Directive:
"""Parse JSON to a Beancount entry.
Args:
json_entry: The entry.
Raises:
KeyError: if one of the required entry fields is missing.
FavaAPIError: if the type of the given entry is not supported.
"""
date = parse_date(json_entry.get("date", ""))[0]
if not isinstance(date, datetime.date):
raise FavaAPIError("Invalid entry date.")
if json_entry["t"] == "Transaction":
postings = [deserialise_posting(pos) for pos in json_entry["postings"]]
return create.transaction(
json_entry["meta"],
date,
json_entry.get("flag", ""),
json_entry.get("payee", ""),
json_entry["narration"] or "",
frozenset(json_entry["tags"]),
frozenset(json_entry["links"]),
postings,
)
if json_entry["t"] == "Balance":
raw_amount = json_entry["amount"]
amount = create.amount(
f"{raw_amount['number']} {raw_amount['currency']}",
)
return create.balance(
json_entry["meta"],
date,
json_entry["account"],
amount,
)
if json_entry["t"] == "Note":
comment = json_entry["comment"].replace('"', "")
return create.note(
json_entry["meta"],
date,
json_entry["account"],
comment,
)
raise FavaAPIError("Unsupported entry type.")
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,403
|
beancount/fava
|
refs/heads/main
|
/docs/generate.py
|
"""Generate the reST-files for the API documentation.
sphinx-apidoc is not customizeable enough to do this.
"""
from __future__ import annotations
import pkgutil
from pathlib import Path
import fava
MODULES = list(pkgutil.walk_packages(fava.__path__, fava.__name__ + "."))
RST_PATH = Path(__file__).parent / "api"
if not RST_PATH.is_dir():
RST_PATH.mkdir()
def heading(name: str, level: str = "-") -> str:
"""Return the rst-heading for the given heading."""
return f"{name}\n{level * len(name)}\n\n"
for package in ["fava"] + [mod.name for mod in MODULES if mod.ispkg]:
submodules = [
mod.name
for mod in MODULES
if mod.name.startswith(package)
and not mod.ispkg
and "_test" not in mod.name
and mod.name.count(".") == package.count(".") + 1
]
with (RST_PATH / f"{package}.rst").open("w") as rst:
rst.write(heading(package, "="))
rst.write(f".. automodule:: {package}\n\n")
for submod in submodules:
rst.write(heading(submod, "-"))
rst.write(f".. automodule:: {submod}\n\n")
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,404
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/commodities.py
|
"""Attributes for auto-completion."""
from __future__ import annotations
from contextlib import suppress
from typing import TYPE_CHECKING
from fava.core.module_base import FavaModule
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
class CommoditiesModule(FavaModule):
"""Details about the currencies and commodities."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
self.names: dict[str, str] = {}
self.precisions: dict[str, int] = {}
def load_file(self) -> None:
self.names = {}
self.precisions = {}
for commodity in self.ledger.all_entries_by_type.Commodity:
name = commodity.meta.get("name")
if name:
self.names[commodity.currency] = name
precision = commodity.meta.get("precision")
if precision is not None:
with suppress(ValueError):
self.precisions[commodity.currency] = int(precision)
def name(self, commodity: str) -> str:
"""Get the name of a commodity (or the commodity itself if not set)."""
return self.names.get(commodity, commodity)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,405
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/file.py
|
"""Reading/writing Beancount files."""
from __future__ import annotations
import re
import threading
from codecs import decode
from codecs import encode
from dataclasses import replace
from hashlib import sha256
from operator import attrgetter
from pathlib import Path
from typing import Iterable
from typing import TYPE_CHECKING
from markupsafe import Markup
from fava.beans.abc import Balance
from fava.beans.abc import Close
from fava.beans.abc import Document
from fava.beans.abc import Open
from fava.beans.abc import Transaction
from fava.beans.account import get_entry_accounts
from fava.beans.flags import FLAG_CONVERSIONS
from fava.beans.flags import FLAG_MERGING
from fava.beans.flags import FLAG_PADDING
from fava.beans.flags import FLAG_RETURNS
from fava.beans.flags import FLAG_SUMMARIZE
from fava.beans.flags import FLAG_TRANSFER
from fava.beans.flags import FLAG_UNREALIZED
from fava.beans.str import to_string
from fava.core.module_base import FavaModule
from fava.helpers import FavaAPIError
from fava.util import next_key
if TYPE_CHECKING: # pragma: no cover
import datetime
from fava.beans.abc import Directive
from fava.core import FavaLedger
from fava.core.fava_options import InsertEntryOption
#: The flags to exclude when rendering entries.
EXCL_FLAGS = {
FLAG_PADDING, # P
FLAG_SUMMARIZE, # S
FLAG_TRANSFER, # T
FLAG_CONVERSIONS, # C
FLAG_UNREALIZED, # U
FLAG_RETURNS, # R
FLAG_MERGING, # M
}
def sha256_str(val: str) -> str:
"""Hash a string."""
return sha256(encode(val, encoding="utf-8")).hexdigest()
class NonSourceFileError(FavaAPIError):
"""Trying to read a non-source file."""
def __init__(self, path: Path) -> None:
super().__init__(f"Trying to read a non-source file at '{path}'")
class ExternallyChangedError(FavaAPIError):
"""The file changed externally."""
def __init__(self, path: Path) -> None:
super().__init__(f"The file at '{path}' changed externally.")
class InvalidUnicodeError(FavaAPIError):
"""The source file contains invalid unicode."""
def __init__(self, reason: str) -> None:
super().__init__(
f"The source file contains invalid unicode: {reason}.",
)
class FileModule(FavaModule):
"""Functions related to reading/writing to Beancount files."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
self.lock = threading.Lock()
def get_source(self, path: Path) -> tuple[str, str]:
"""Get source files.
Args:
path: The path of the file.
Returns:
A string with the file contents and the `sha256sum` of the file.
Raises:
FavaAPIError: If the file at `path` is not one of the
source files or it contains invalid unicode.
"""
if str(path) not in self.ledger.options["include"]:
raise NonSourceFileError(path)
with path.open(mode="rb") as file:
contents = file.read()
sha256sum = sha256(contents).hexdigest()
try:
source = decode(contents)
except UnicodeDecodeError as exc:
raise InvalidUnicodeError(str(exc)) from exc
return source, sha256sum
def set_source(self, path: Path, source: str, sha256sum: str) -> str:
"""Write to source file.
Args:
path: The path of the file.
source: A string with the file contents.
sha256sum: Hash of the file.
Returns:
The `sha256sum` of the updated file.
Raises:
FavaAPIError: If the file at `path` is not one of the
source files or if the file was changed externally.
"""
with self.lock:
_, original_sha256sum = self.get_source(path)
if original_sha256sum != sha256sum:
raise ExternallyChangedError(path)
contents = encode(source, encoding="utf-8")
with path.open("w+b") as file:
file.write(contents)
self.ledger.extensions.after_write_source(str(path), source)
self.ledger.load_file()
return sha256(contents).hexdigest()
def insert_metadata(
self,
entry_hash: str,
basekey: str,
value: str,
) -> None:
"""Insert metadata into a file at lineno.
Also, prevent duplicate keys.
"""
with self.lock:
self.ledger.changed()
entry: Directive = self.ledger.get_entry(entry_hash)
key = next_key(basekey, entry.meta)
indent = self.ledger.fava_options.indent
insert_metadata_in_file(
Path(entry.meta["filename"]),
entry.meta["lineno"],
indent,
key,
value,
)
self.ledger.extensions.after_insert_metadata(entry, key, value)
def save_entry_slice(
self,
entry_hash: str,
source_slice: str,
sha256sum: str,
) -> str:
"""Save slice of the source file for an entry.
Args:
entry_hash: An entry.
source_slice: The lines that the entry should be replaced with.
sha256sum: The sha256sum of the current lines of the entry.
Returns:
The `sha256sum` of the new lines of the entry.
Raises:
FavaAPIError: If the entry is not found or the file changed.
"""
with self.lock:
entry = self.ledger.get_entry(entry_hash)
ret = save_entry_slice(entry, source_slice, sha256sum)
self.ledger.extensions.after_entry_modified(entry, source_slice)
return ret
def delete_entry_slice(self, entry_hash: str, sha256sum: str) -> None:
"""Delete slice of the source file for an entry.
Args:
entry_hash: An entry.
sha256sum: The sha256sum of the current lines of the entry.
Raises:
FavaAPIError: If the entry is not found or the file changed.
"""
with self.lock:
entry = self.ledger.get_entry(entry_hash)
delete_entry_slice(entry, sha256sum)
self.ledger.extensions.after_delete_entry(entry)
def insert_entries(self, entries: list[Directive]) -> None:
"""Insert entries.
Args:
entries: A list of entries.
"""
with self.lock:
self.ledger.changed()
fava_options = self.ledger.fava_options
for entry in sorted(entries, key=incomplete_sortkey):
insert_options = fava_options.insert_entry
currency_column = fava_options.currency_column
indent = fava_options.indent
fava_options.insert_entry = insert_entry(
entry,
self.ledger.beancount_file_path,
insert_options,
currency_column,
indent,
)
self.ledger.extensions.after_insert_entry(entry)
def render_entries(self, entries: list[Directive]) -> Iterable[Markup]:
"""Return entries in Beancount format.
Only renders :class:`.Balance` and :class:`.Transaction`.
Args:
entries: A list of entries.
Yields:
The entries rendered in Beancount format.
"""
indent = self.ledger.fava_options.indent
for entry in entries:
if isinstance(entry, (Balance, Transaction)):
if isinstance(entry, Transaction) and entry.flag in EXCL_FLAGS:
continue
try:
yield Markup(get_entry_slice(entry)[0] + "\n")
except (KeyError, FileNotFoundError):
yield Markup(
to_string(
entry,
self.ledger.fava_options.currency_column,
indent,
),
)
def incomplete_sortkey(entry: Directive) -> tuple[datetime.date, int]:
"""Sortkey for entries that might have incomplete metadata."""
if isinstance(entry, Open):
return (entry.date, -2)
if isinstance(entry, Balance):
return (entry.date, -1)
if isinstance(entry, Document):
return (entry.date, 1)
if isinstance(entry, Close):
return (entry.date, 2)
return (entry.date, 0)
def insert_metadata_in_file(
path: Path,
lineno: int,
indent: int,
key: str,
value: str,
) -> None:
"""Insert the specified metadata in the file below lineno.
Takes the whitespace in front of the line that lineno into account.
"""
with path.open(encoding="utf-8") as file:
contents = file.readlines()
contents.insert(lineno, f'{" " * indent}{key}: "{value}"\n')
with path.open("w", encoding="utf-8") as file:
file.write("".join(contents))
def find_entry_lines(lines: list[str], lineno: int) -> list[str]:
"""Lines of entry starting at lineno.
Args:
lines: A list of lines.
lineno: The 0-based line-index to start at.
"""
entry_lines = [lines[lineno]]
while True:
lineno += 1
try:
line = lines[lineno]
except IndexError:
return entry_lines
if not line.strip() or re.match(r"\S", line[0]):
return entry_lines
entry_lines.append(line)
def get_entry_slice(entry: Directive) -> tuple[str, str]:
"""Get slice of the source file for an entry.
Args:
entry: An entry.
Returns:
A string containing the lines of the entry and the `sha256sum` of
these lines.
"""
path = Path(entry.meta["filename"])
with path.open(encoding="utf-8") as file:
lines = file.readlines()
entry_lines = find_entry_lines(lines, entry.meta["lineno"] - 1)
entry_source = "".join(entry_lines).rstrip("\n")
return entry_source, sha256_str(entry_source)
def save_entry_slice(
entry: Directive,
source_slice: str,
sha256sum: str,
) -> str:
"""Save slice of the source file for an entry.
Args:
entry: An entry.
source_slice: The lines that the entry should be replaced with.
sha256sum: The sha256sum of the current lines of the entry.
Returns:
The `sha256sum` of the new lines of the entry.
Raises:
FavaAPIError: If the file at `path` is not one of the
source files.
"""
path = Path(entry.meta["filename"])
with path.open(encoding="utf-8") as file:
lines = file.readlines()
first_entry_line = entry.meta["lineno"] - 1
entry_lines = find_entry_lines(lines, first_entry_line)
entry_source = "".join(entry_lines).rstrip("\n")
if sha256_str(entry_source) != sha256sum:
raise ExternallyChangedError(path)
lines = (
lines[:first_entry_line]
+ [source_slice + "\n"]
+ lines[first_entry_line + len(entry_lines) :]
)
path = Path(entry.meta["filename"])
with path.open("w", encoding="utf-8") as file:
file.writelines(lines)
return sha256_str(source_slice)
def delete_entry_slice(entry: Directive, sha256sum: str) -> None:
"""Delete slice of the source file for an entry.
Args:
entry: An entry.
sha256sum: The sha256sum of the current lines of the entry.
Raises:
FavaAPIError: If the file at `path` is not one of the
source files.
"""
path = Path(entry.meta["filename"])
with path.open(encoding="utf-8") as file:
lines = file.readlines()
first_entry_line = entry.meta["lineno"] - 1
entry_lines = find_entry_lines(lines, first_entry_line)
entry_source = "".join(entry_lines).rstrip("\n")
if sha256_str(entry_source) != sha256sum:
raise ExternallyChangedError(path)
# Also delete the whitespace following this entry
last_entry_line = first_entry_line + len(entry_lines)
while True:
try:
line = lines[last_entry_line]
except IndexError:
break
if line.strip():
break
last_entry_line += 1
lines = lines[:first_entry_line] + lines[last_entry_line:]
path = Path(entry.meta["filename"])
with path.open("w", encoding="utf-8") as file:
file.writelines(lines)
def insert_entry(
entry: Directive,
default_filename: str,
insert_options: list[InsertEntryOption],
currency_column: int,
indent: int,
) -> list[InsertEntryOption]:
"""Insert an entry.
Args:
entry: An entry.
default_filename: The default file to insert into if no option matches.
insert_options: Insert options.
currency_column: The column to align currencies at.
indent: Number of indent spaces.
Returns:
A list of updated insert options.
"""
filename, lineno = find_insert_position(
entry,
insert_options,
default_filename,
)
content = to_string(entry, currency_column, indent)
path = Path(filename)
with path.open(encoding="utf-8") as file:
contents = file.readlines()
if lineno is None:
# Appending
contents += "\n" + content
else:
contents.insert(lineno, content + "\n")
with path.open("w", encoding="utf-8") as file:
file.writelines(contents)
if lineno is None:
return insert_options
added_lines = content.count("\n") + 1
return [
(
replace(option, lineno=option.lineno + added_lines)
if option.filename == filename and option.lineno > lineno
else option
)
for option in insert_options
]
def find_insert_position(
entry: Directive,
insert_options: list[InsertEntryOption],
default_filename: str,
) -> tuple[str, int | None]:
"""Find insert position for an entry.
Args:
entry: An entry.
insert_options: A list of InsertOption.
default_filename: The default file to insert into if no option matches.
Returns:
A tuple of the filename and the line number.
"""
# Get the list of accounts that should be considered for the entry.
# For transactions, we want the reversed list of posting accounts.
accounts = get_entry_accounts(entry)
# Make no assumptions about the order of insert_options entries and instead
# sort them ourselves (by descending dates)
insert_options = sorted(
insert_options,
key=attrgetter("date"),
reverse=True,
)
for account in accounts:
for insert_option in insert_options:
# Only consider InsertOptions before the entry date.
if insert_option.date >= entry.date:
continue
if insert_option.re.match(account):
return (insert_option.filename, insert_option.lineno - 1)
return (default_filename, None)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,406
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_file.py
|
from __future__ import annotations
import re
from datetime import date
from pathlib import Path
from textwrap import dedent
from typing import TYPE_CHECKING
import pytest
from fava.beans import create
from fava.beans.helpers import replace
from fava.core.fava_options import InsertEntryOption
from fava.core.file import delete_entry_slice
from fava.core.file import ExternallyChangedError
from fava.core.file import find_entry_lines
from fava.core.file import get_entry_slice
from fava.core.file import insert_entry
from fava.core.file import insert_metadata_in_file
from fava.core.file import save_entry_slice
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Transaction
from fava.core import FavaLedger
from .conftest import SnapshotFunc
def _get_entry(ledger: FavaLedger, payee: str, date_: str) -> Transaction:
"""Fetch a transaction with the given payee and date."""
return next(
e
for e in ledger.all_entries_by_type.Transaction
if e.payee == payee and str(e.date) == date_
)
def test_get_entry_slice(example_ledger: FavaLedger) -> None:
entry = _get_entry(example_ledger, "Chichipotle", "2016-05-03")
assert get_entry_slice(entry) == (
"""2016-05-03 * "Chichipotle" "Eating out with Joe"
Liabilities:US:Chase:Slate -21.70 USD
Expenses:Food:Restaurant 21.70 USD""",
"d60da810c0c7b8a57ae16be409c5e17a640a837c1ac29719ebe9f43930463477",
)
def test_save_entry_slice(example_ledger: FavaLedger) -> None:
entry = _get_entry(example_ledger, "Chichipotle", "2016-05-03")
entry_source, sha256sum = get_entry_slice(entry)
new_source = """2016-05-03 * "Chichipotle" "Eating out with Joe"
Expenses:Food:Restaurant 21.70 USD"""
filename = Path(entry.meta["filename"])
contents = filename.read_text("utf-8")
with pytest.raises(ExternallyChangedError):
save_entry_slice(entry, new_source, "wrong hash")
assert filename.read_text("utf-8") == contents
new_sha256sum = save_entry_slice(entry, new_source, sha256sum)
assert filename.read_text("utf-8") != contents
sha256sum = save_entry_slice(entry, entry_source, new_sha256sum)
assert filename.read_text("utf-8") == contents
def test_delete_entry_slice(example_ledger: FavaLedger) -> None:
entry = _get_entry(example_ledger, "Chichipotle", "2016-05-03")
entry_source, sha256sum = get_entry_slice(entry)
filename = Path(entry.meta["filename"])
contents = filename.read_text("utf-8")
with pytest.raises(ExternallyChangedError):
delete_entry_slice(entry, "wrong hash")
assert filename.read_text("utf-8") == contents
delete_entry_slice(entry, sha256sum)
assert filename.read_text("utf-8") != contents
insert_option = InsertEntryOption(
date(1, 1, 1),
re.compile(".*"),
entry.meta["filename"],
entry.meta["lineno"],
)
insert_entry(entry, entry.meta["filename"], [insert_option], 59, 2)
assert filename.read_text("utf-8") == contents
def test_insert_metadata_in_file(tmp_path: Path) -> None:
file_content = dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
""")
samplefile = tmp_path / "example.beancount"
samplefile.write_text(file_content)
# Insert some metadata lines.
insert_metadata_in_file(samplefile, 1, 4, "metadata", "test1")
insert_metadata_in_file(samplefile, 1, 4, "metadata", "test2")
assert samplefile.read_text("utf-8") == dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
metadata: "test2"
metadata: "test1"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
""")
# Check that inserting also works if the next line is empty.
insert_metadata_in_file(samplefile, 5, 4, "metadata", "test1")
assert samplefile.read_text("utf-8") == dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
metadata: "test2"
metadata: "test1"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
metadata: "test1"
""")
def test_find_entry_lines() -> None:
file_content = dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
2016-02-26 note Accounts:Text "Uncle Boons"
2016-02-26 note Accounts:Text "Uncle Boons"
; test
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
""")
lines = file_content.split("\n")
entry_lines = [
'2016-02-26 * "Uncle Boons" "Eating out alone"',
" Liabilities:US:Chase:Slate -24.84 USD",
" Expenses:Food:Restaurant 24.84 USD",
]
note_line = ['2016-02-26 note Accounts:Text "Uncle Boons"']
assert find_entry_lines(lines, 0) == entry_lines
assert find_entry_lines(lines, 7) == entry_lines
assert find_entry_lines(lines, 4) == note_line
assert find_entry_lines(lines, 5) == note_line
def test_insert_entry_transaction(tmp_path: Path) -> None:
file_content = dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
""")
samplefile = tmp_path / "example.beancount"
samplefile.write_text(file_content)
postings = [
create.posting(
"Liabilities:US:Chase:Slate",
"-10.00 USD",
),
create.posting("Expenses:Food", "10.00 USD"),
]
transaction = create.transaction(
{},
date(2016, 1, 1),
"*",
"new payee",
"narr",
frozenset(),
frozenset(),
postings,
)
# Test insertion without "insert-entry" options.
insert_entry(transaction, str(samplefile), [], 61, 4)
assert samplefile.read_text("utf-8") == dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
2016-01-01 * "new payee" "narr"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
""")
# Verify that InsertEntryOptions with dates greater or equal than the
# transaction dates are ignored.
options = [
InsertEntryOption(
date(2015, 1, 1),
re.compile(".*:Food"),
str(samplefile),
1,
),
InsertEntryOption(
date(2015, 1, 2),
re.compile(".*:FOOO"),
str(samplefile),
1,
),
InsertEntryOption(
date(2017, 1, 1),
re.compile(".*:Food"),
str(samplefile),
6,
),
]
new_options = insert_entry(
replace(transaction, narration="narr1"),
str(samplefile),
options,
61,
4,
)
assert new_options[0].lineno == 5
assert new_options[1].lineno == 5
assert new_options[2].lineno == 10
assert samplefile.read_text("utf-8") == dedent("""\
2016-01-01 * "new payee" "narr1"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
2016-01-01 * "new payee" "narr"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
""")
# Verify that previous postings are matched against InsertEntryOptions when
# the last posting doesn't match.
options = [
InsertEntryOption(
date(2015, 1, 1),
re.compile(".*:Slate"),
str(samplefile),
5,
),
InsertEntryOption(
date(2015, 1, 2),
re.compile(".*:FOOO"),
str(samplefile),
1,
),
]
new_transaction = replace(transaction, narration="narr2")
new_options = insert_entry(
new_transaction,
str(samplefile),
options,
61,
4,
)
assert new_options[0].lineno == 9
assert new_options[1].lineno == 1
assert samplefile.read_text("utf-8") == dedent("""\
2016-01-01 * "new payee" "narr1"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
2016-01-01 * "new payee" "narr2"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
2016-01-01 * "new payee" "narr"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
""")
# Verify that preference is given to InsertEntryOptions with later dates in
# case several of them match a posting.
options = [
InsertEntryOption(
date(2015, 1, 1),
re.compile(".*:Food"),
str(samplefile),
5,
),
InsertEntryOption(
date(2015, 1, 2),
re.compile(".*:Food"),
str(samplefile),
1,
),
]
new_transaction = replace(transaction, narration="narr3")
insert_entry(new_transaction, str(samplefile), options, 61, 4)
assert samplefile.read_text("utf-8") == dedent("""\
2016-01-01 * "new payee" "narr3"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
2016-01-01 * "new payee" "narr1"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
2016-01-01 * "new payee" "narr2"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
2016-01-01 * "new payee" "narr"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
""")
def test_insert_entry_align(tmp_path: Path) -> None:
file_content = dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
""")
samplefile = tmp_path / "example.beancount"
samplefile.write_text(file_content)
postings = [
create.posting(
"Liabilities:US:Chase:Slate",
"-10.00 USD",
),
create.posting("Expenses:Food", "10.00 USD"),
]
transaction = create.transaction(
{},
date(2016, 1, 1),
"*",
"new payee",
"narr",
frozenset(),
frozenset(),
postings,
)
insert_entry(transaction, str(samplefile), [], 50, 4)
assert samplefile.read_text("utf-8") == dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
2016-01-01 * "new payee" "narr"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
""")
def test_insert_entry_indent(tmp_path: Path) -> None:
file_content = dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
""")
samplefile = tmp_path / "example.beancount"
samplefile.write_text(file_content)
postings = [
create.posting(
"Liabilities:US:Chase:Slate",
"-10.00 USD",
),
create.posting("Expenses:Food", "10.00 USD"),
]
transaction = create.transaction(
{},
date(2016, 1, 1),
"*",
"new payee",
"narr",
frozenset(),
frozenset(),
postings,
)
# Test insertion with 2-space indent.
insert_entry(transaction, str(samplefile), [], 61, 2)
assert samplefile.read_text("utf-8") == dedent("""\
2016-02-26 * "Uncle Boons" "Eating out alone"
Liabilities:US:Chase:Slate -24.84 USD
Expenses:Food:Restaurant 24.84 USD
2016-01-01 * "new payee" "narr"
Liabilities:US:Chase:Slate -10.00 USD
Expenses:Food 10.00 USD
""")
def test_render_entries(
example_ledger: FavaLedger,
snapshot: SnapshotFunc,
) -> None:
entry1 = _get_entry(example_ledger, "Uncle Boons", "2016-04-09")
entry2 = _get_entry(example_ledger, "BANK FEES", "2016-05-04")
postings = [
create.posting("Expenses:Food", "10.00 USD"),
]
transaction = create.transaction(
{},
date(2016, 1, 1),
"*",
"new payee",
"narr",
frozenset(),
frozenset(),
postings,
)
entries = example_ledger.file.render_entries([entry1, entry2, transaction])
snapshot("\n".join(entries))
file_content = dedent("""\
2016-04-09 * "Uncle Boons" "" #trip-new-york-2016
Liabilities:US:Chase:Slate -52.22 USD
Expenses:Food:Restaurant 52.22 USD
2016-05-04 * "BANK FEES" "Monthly bank fee"
Assets:US:BofA:Checking -4.00 USD
Expenses:Financial:Fees 4.00 USD
""")
assert file_content == "\n".join(
example_ledger.file.render_entries([entry1, entry2]),
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,407
|
beancount/fava
|
refs/heads/main
|
/src/fava/util/__init__.py
|
"""Some small utility functions."""
from __future__ import annotations
import logging
import re
import time
from functools import wraps
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Iterable
from typing import TYPE_CHECKING
from typing import TypeVar
from unicodedata import normalize
from urllib.parse import quote
from flask import abort
from flask import send_file
if TYPE_CHECKING: # pragma: no cover
from typing import ParamSpec
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIEnvironment
from flask.wrappers import Response
BASEPATH = Path(__file__).parent.parent
def filter_api_changed(record: logging.LogRecord) -> bool:
"""Filter out LogRecords for requests that poll for changes."""
return "/api/changed HTTP" not in record.getMessage()
def setup_logging() -> None:
"""Set up logging for Fava."""
logging.basicConfig(level=logging.INFO, format="%(message)s")
logging.getLogger("werkzeug").addFilter(filter_api_changed)
if TYPE_CHECKING:
Item = TypeVar("Item")
P = ParamSpec("P")
T = TypeVar("T")
def listify(func: Callable[P, Iterable[Item]]) -> Callable[P, list[Item]]:
"""Make generator function return a list (decorator)."""
@wraps(func)
def _wrapper(*args: P.args, **kwargs: P.kwargs) -> list[Item]:
return list(func(*args, **kwargs))
return _wrapper
def timefunc(
func: Callable[P, T],
) -> Callable[P, T]: # pragma: no cover - only used for debugging so far
"""Time function for debugging (decorator)."""
@wraps(func)
def _wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(f"Ran {func.__name__} in {end - start}") # noqa: T201
return result
return _wrapper
def next_key(basekey: str, keys: dict[str, Any]) -> str:
"""Return the next unused key for basekey in the supplied dictionary.
The first try is `basekey`, followed by `basekey-2`, `basekey-3`, etc
until a free one is found.
"""
if basekey not in keys:
return basekey
i = 2
while f"{basekey}-{i}" in keys:
i = i + 1
return f"{basekey}-{i}"
def slugify(string: str) -> str:
"""Slugify a string.
Args:
string: A string.
Returns:
A 'slug' of the string suitable for URLs. Retains non-ascii
characters.
"""
string = normalize("NFKC", string)
# remove all non-word characters (except '-')
string = re.sub(r"[^\s\w-]", "", string).strip().lower()
# replace spaces (or groups of spaces and dashes) with dashes
return re.sub(r"[-\s]+", "-", string)
def simple_wsgi(
_: WSGIEnvironment,
start_response: StartResponse,
) -> list[bytes]:
"""Return an empty response (a simple WSGI app)."""
start_response("200 OK", [("Content-Type", "text/html")])
return [b""]
def send_file_inline(filename: str) -> Response:
"""Send a file inline, including the original filename.
Ref: http://test.greenbytes.de/tech/tc2231/.
"""
try:
response: Response = send_file(filename)
except FileNotFoundError:
return abort(404)
base = Path(filename).name
cont_disp = f"inline; filename*=UTF-8''{quote(base)}"
response.headers["Content-Disposition"] = cont_disp
return response
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,408
|
beancount/fava
|
refs/heads/main
|
/tests/test_extensions.py
|
"""Tests for the extension system."""
from __future__ import annotations
from typing import TYPE_CHECKING
from fava.core.extensions import ExtensionDetails
if TYPE_CHECKING: # pragma: no cover
from .conftest import GetFavaLedger
def test_report_page_globals(get_ledger: GetFavaLedger) -> None:
"""Extensions can register reports and have JS modules."""
extension_report_ledger = get_ledger("extension-report")
result = extension_report_ledger.extensions.extension_details
assert result == [
ExtensionDetails("PortfolioList", "Portfolio List", True),
]
extension_report_ledger.extensions.after_write_source("test", "test")
ext = extension_report_ledger.extensions.get_extension("PortfolioList")
assert ext
assert ext.name == "PortfolioList"
assert ext.extension_dir.exists()
assert (ext.extension_dir / "PortfolioList.js").exists()
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,409
|
beancount/fava
|
refs/heads/main
|
/tests/test_util_excel.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from fava.beans.funcs import run_query
from fava.util import excel
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
def test_to_csv(example_ledger: FavaLedger) -> None:
types, rows = run_query(
example_ledger.all_entries,
example_ledger.options,
"balances",
numberify=True,
)
assert excel.to_csv(types, rows)
types, rows = run_query(
example_ledger.all_entries,
example_ledger.options,
"select account, tags, date, day",
numberify=True,
)
assert excel.to_csv(types, rows)
@pytest.mark.skipif(not excel.HAVE_EXCEL, reason="pyexcel not installed")
def test_to_excel(example_ledger: FavaLedger) -> None:
types, rows = run_query(
example_ledger.all_entries,
example_ledger.options,
"balances",
numberify=True,
)
assert excel.to_excel(types, rows, "ods", "balances")
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,410
|
beancount/fava
|
refs/heads/main
|
/src/fava/plugins/__init__.py
|
"""Some Beancount plugins."""
from __future__ import annotations
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,411
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/prices.py
|
"""Price helpers."""
from __future__ import annotations
import datetime
from bisect import bisect
from collections import Counter
from collections import defaultdict
from decimal import Decimal
from typing import Iterable
from typing import Sequence
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from typing import TypeAlias
from fava.beans.abc import Price
BaseQuote: TypeAlias = tuple[str, str]
PricePoint: TypeAlias = tuple[datetime.date, Decimal]
ZERO = Decimal()
ONE = Decimal("1")
class DateKeyWrapper(Sequence[datetime.date]):
"""A class wrapping a list of prices for bisect.
This is needed before Python 3.10, which adds the key argument.
"""
__slots__ = ("inner",)
def __init__(self, inner: list[PricePoint]) -> None:
self.inner = inner
def __len__(self) -> int:
return len(self.inner)
def __getitem__(self, k: int) -> datetime.date: # type: ignore[override]
return self.inner[k][0]
def _keep_last_per_day(
prices: list[PricePoint],
) -> Iterable[PricePoint]:
"""In a sorted non-empty list of prices, keep the last one for each day."""
last: PricePoint | None = None
for price in prices:
if last is not None and price[0] > last[0]:
yield last
last = price
if last is not None:
yield last
class FavaPriceMap:
"""A Fava alternative to Beancount's PriceMap.
By having some more methods on this class, fewer helper functions need to
be imported. Also, this is fully typed and allows to more easily reproduce
issues with the whole price logic.
This behaves slightly differently than Beancount. Beancount creates a list
for each currency pair and then merges the inverse rates. We just create
both the lists in tandem and count the directions that prices occur in.
Args:
price_entries: A sorted list of price entries.
"""
def __init__(self, price_entries: list[Price]) -> None:
raw_map: dict[BaseQuote, list[PricePoint]] = defaultdict(list)
counts: Counter[BaseQuote] = Counter()
for price in price_entries:
rate = price.amount.number
base_quote = (price.currency, price.amount.currency)
raw_map[base_quote].append((price.date, rate))
counts[base_quote] += 1
if rate != ZERO:
raw_map[(price.amount.currency, price.currency)].append(
(price.date, ONE / rate),
)
self._forward_pairs = [
(base, quote)
for (base, quote), count in counts.items()
if counts.get((quote, base), 0) < count
]
self._map = {
k: list(_keep_last_per_day(rates)) for k, rates in raw_map.items()
}
def commodity_pairs(
self,
operating_currencies: list[str],
) -> list[BaseQuote]:
"""List pairs of commodities.
Args:
operating_currencies: A list of operating currencies.
Returns:
A list of pairs of commodities. Pairs of operating currencies will
be given in both directions not just in the one most commonly found
in the file.
"""
forward_pairs = self._forward_pairs
extra_operating_pairs = []
for base, quote in forward_pairs:
if base in operating_currencies and quote in operating_currencies:
extra_operating_pairs.append((quote, base))
return sorted(forward_pairs + extra_operating_pairs)
def get_all_prices(self, base_quote: BaseQuote) -> list[PricePoint] | None:
"""Get all prices for the given currency pair."""
return self._map.get(base_quote)
def get_price(
self,
base_quote: BaseQuote,
date: datetime.date | None = None,
) -> Decimal | None:
"""Get the price for the given currency pair."""
return self.get_price_point(base_quote, date)[1]
def get_price_point(
self,
base_quote: BaseQuote,
date: datetime.date | None = None,
) -> PricePoint | tuple[None, Decimal] | tuple[None, None]:
"""Get the price point for the given currency pair."""
base, quote = base_quote
if base == quote:
return (None, ONE)
price_list = self._map.get(base_quote)
if price_list is None:
return (None, None)
if date is None:
return price_list[-1]
index = bisect(DateKeyWrapper(price_list), date)
if index == 0:
return (None, None)
return price_list[index - 1]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,412
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/str.py
|
"""Convert Beancount types to string."""
from __future__ import annotations
from decimal import Decimal
from functools import singledispatch
from beancount.core.position import CostSpec
from beancount.parser.printer import format_entry # type: ignore[import]
from fava.beans.abc import Amount
from fava.beans.abc import Cost
from fava.beans.abc import Directive
from fava.beans.abc import Position
from fava.beans.helpers import replace
from fava.core.misc import align
@singledispatch
def to_string(
obj: Amount | Cost | CostSpec | Directive | Position,
_currency_column: int | None = None,
_indent: int | None = None,
) -> str:
"""Convert to a string."""
raise TypeError(f"Unsupported object of type {type(obj)}")
@to_string.register(Amount)
def _(obj: Amount) -> str:
return f"{obj.number} {obj.currency}"
@to_string.register(Cost)
def _(cost: Cost) -> str:
strs = [f"{cost.number} {cost.currency}"]
if cost.date:
strs.append(cost.date.isoformat())
if cost.label:
strs.append(f'"{cost.label}"')
return ", ".join(strs)
@to_string.register(CostSpec)
def _(cost: CostSpec) -> str:
strs = []
if isinstance(cost.number_per, Decimal) or isinstance(
cost.number_total,
Decimal,
):
amountlist = []
if isinstance(cost.number_per, Decimal):
amountlist.append(f"{cost.number_per}")
if isinstance(cost.number_total, Decimal):
amountlist.append("#")
amountlist.append(f"{cost.number_total}")
if cost.currency:
amountlist.append(cost.currency)
strs.append(" ".join(amountlist))
if cost.date:
strs.append(cost.date.isoformat())
if cost.label:
strs.append(f'"{cost.label}"')
if cost.merge:
strs.append("*")
return ", ".join(strs)
@to_string.register(Position)
def _(obj: Position) -> str:
units_str = to_string(obj.units)
if obj.cost is None:
return units_str
cost_str = to_string(obj.cost)
return f"{units_str} {{{cost_str}}}"
@to_string.register(Directive)
def _format_entry(
entry: Directive,
currency_column: int = 61,
indent: int = 2,
) -> str:
meta = {
key: entry.meta[key] for key in entry.meta if not key.startswith("_")
}
entry = replace(entry, meta=meta)
string = align(format_entry(entry, prefix=" " * indent), currency_column)
string = string.replace("<class 'beancount.core.number.MISSING'>", "")
return "\n".join(line.rstrip() for line in string.split("\n"))
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,413
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_query_shell.py
|
from __future__ import annotations
from typing import Any
from typing import TYPE_CHECKING
import pytest
from fava.beans.funcs import run_query
from fava.helpers import FavaAPIError
if TYPE_CHECKING: # pragma: no cover
from .conftest import GetFavaLedger
from .conftest import SnapshotFunc
def test_query(snapshot: SnapshotFunc, get_ledger: GetFavaLedger) -> None:
query_ledger = get_ledger("query-example")
def run(query_string: str) -> Any:
return query_ledger.query_shell.execute_query(
query_ledger.all_entries,
query_string,
)
def run_text(query_string: str) -> str:
"""Run a query that should only return string contents."""
contents, types, result = run(query_string)
assert types is None
assert result is None
assert isinstance(contents, str)
return contents
assert run_text("help")
assert (
run_text("help exit") == "Doesn't do anything in Fava's query shell."
)
snapshot(run_text("explain select date, balance"))
assert (
run("lex select date, balance")[0]
== "LexToken(SELECT,'SELECT',1,0)\nLexToken(ID,'date',1,7)\nL"
"exToken(COMMA,',',1,11)\nLexToken(ID,'balance',1,13)"
)
assert run_text("run") == "custom_query\ncustom query with space"
bal = run("balances")
snapshot(bal)
assert run("run custom_query") == bal
assert run("run 'custom query with space'") == bal
assert run("balances")[1:] == run_query(
query_ledger.all_entries,
query_ledger.options,
"balances",
)
assert (
run_text("asdf")
== "ERROR: Syntax error near 'asdf' (at 0)\n asdf\n ^"
)
def test_query_to_file(
snapshot: SnapshotFunc,
get_ledger: GetFavaLedger,
) -> None:
query_ledger = get_ledger("query-example")
entries = query_ledger.all_entries
query_shell = query_ledger.query_shell
name, data = query_shell.query_to_file(entries, "run custom_query", "csv")
assert name == "custom_query"
name, data = query_shell.query_to_file(entries, "balances", "csv")
assert name == "query_result"
snapshot(data.getvalue())
with pytest.raises(FavaAPIError):
query_shell.query_to_file(entries, "select sdf", "csv")
with pytest.raises(FavaAPIError):
query_shell.query_to_file(entries, "run testsetest", "csv")
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,414
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/budgets.py
|
"""Parsing and computing budgets."""
from __future__ import annotations
from collections import Counter
from collections import defaultdict
from decimal import Decimal
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import TYPE_CHECKING
from fava.core.module_base import FavaModule
from fava.helpers import BeancountError
from fava.util.date import days_in_daterange
from fava.util.date import Interval
from fava.util.date import number_of_days_in_period
if TYPE_CHECKING: # pragma: no cover
import datetime
from fava.beans.abc import Custom
from fava.core import FavaLedger
class Budget(NamedTuple):
"""A budget entry."""
account: str
date_start: datetime.date
period: Interval
number: Decimal
currency: str
BudgetDict = Dict[str, List[Budget]]
"""A map of account names to lists of budget entries."""
class BudgetError(BeancountError):
"""Error with a budget."""
class BudgetModule(FavaModule):
"""Parses budget entries."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
self.budget_entries: BudgetDict = {}
def load_file(self) -> None:
self.budget_entries, errors = parse_budgets(
self.ledger.all_entries_by_type.Custom,
)
self.ledger.errors.extend(errors)
def calculate(
self,
account: str,
begin_date: datetime.date,
end_date: datetime.date,
) -> dict[str, Decimal]:
"""Calculate the budget for an account in an interval."""
return calculate_budget(
self.budget_entries,
account,
begin_date,
end_date,
)
def calculate_children(
self,
account: str,
begin_date: datetime.date,
end_date: datetime.date,
) -> dict[str, Decimal]:
"""Calculate the budget for an account including its children."""
return calculate_budget_children(
self.budget_entries,
account,
begin_date,
end_date,
)
def __bool__(self) -> bool:
return bool(self.budget_entries)
def parse_budgets(
custom_entries: list[Custom],
) -> tuple[BudgetDict, list[BudgetError]]:
"""Parse budget directives from custom entries.
Args:
custom_entries: the Custom entries to parse budgets from.
Returns:
A dict of accounts to lists of budgets.
Example:
2015-04-09 custom "budget" Expenses:Books "monthly" 20.00 EUR
"""
budgets: BudgetDict = defaultdict(list)
errors = []
interval_map = {
"daily": Interval.DAY,
"weekly": Interval.WEEK,
"monthly": Interval.MONTH,
"quarterly": Interval.QUARTER,
"yearly": Interval.YEAR,
}
for entry in (entry for entry in custom_entries if entry.type == "budget"):
try:
interval = interval_map.get(str(entry.values[1].value))
if not interval:
errors.append(
BudgetError(
entry.meta,
"Invalid interval for budget entry",
entry,
),
)
continue
budget = Budget(
entry.values[0].value,
entry.date,
interval,
entry.values[2].value.number,
entry.values[2].value.currency,
)
budgets[budget.account].append(budget)
except (IndexError, TypeError):
errors.append(
BudgetError(entry.meta, "Failed to parse budget entry", entry),
)
return budgets, errors
def _matching_budgets(
budgets: list[Budget],
date_active: datetime.date,
) -> dict[str, Budget]:
"""Find matching budgets.
Returns:
The budget that is active on the specified date for the
specified account.
"""
last_seen_budgets = {}
for budget in budgets:
if budget.date_start <= date_active:
last_seen_budgets[budget.currency] = budget
else:
break
return last_seen_budgets
def calculate_budget(
budgets: BudgetDict,
account: str,
date_from: datetime.date,
date_to: datetime.date,
) -> dict[str, Decimal]:
"""Calculate budget for an account.
Args:
budgets: A list of :class:`Budget` entries.
account: An account name.
date_from: Starting date.
date_to: End date (exclusive).
Returns:
A dictionary of currency to Decimal with the budget for the
specified account and period.
"""
budget_list = budgets.get(account, None)
if budget_list is None:
return {}
currency_dict: dict[str, Decimal] = defaultdict(Decimal)
for single_day in days_in_daterange(date_from, date_to):
matches = _matching_budgets(budget_list, single_day)
for budget in matches.values():
currency_dict[
budget.currency
] += budget.number / number_of_days_in_period(
budget.period,
single_day,
)
return dict(currency_dict)
def calculate_budget_children(
budgets: BudgetDict,
account: str,
date_from: datetime.date,
date_to: datetime.date,
) -> dict[str, Decimal]:
"""Calculate budget for an account including budgets of its children.
Args:
budgets: A list of :class:`Budget` entries.
account: An account name.
date_from: Starting date.
date_to: End date (exclusive).
Returns:
A dictionary of currency to Decimal with the budget for the
specified account and period.
"""
currency_dict: dict[str, Decimal] = Counter() # type: ignore[assignment]
for child in budgets:
if child.startswith(account):
currency_dict.update(
calculate_budget(budgets, child, date_from, date_to),
)
return dict(currency_dict)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,415
|
beancount/fava
|
refs/heads/main
|
/src/fava/ext/__init__.py
|
"""Fava's extension system."""
from __future__ import annotations
import ast
import importlib
import inspect
import sys
from functools import cached_property
from pathlib import Path
from typing import Any
from typing import TYPE_CHECKING
import jinja2
from flask import current_app
from fava.helpers import BeancountError
if TYPE_CHECKING: # pragma: no cover
from typing import Callable
from flask.wrappers import Response
from fava.beans.abc import Directive
from fava.core import FavaLedger
class FavaExtensionError(BeancountError):
"""Error in one of Fava's extensions."""
class FavaExtensionBase:
"""Base class for extensions for Fava.
Any extension should inherit from this class. :func:`find_extension` will
discover all subclasses of this class in the specified modules.
"""
#: Name for a HTML report for this extension.
report_title: str | None = None
#: Whether this extension includes a Javascript module.
has_js_module: bool = False
config: Any
endpoints: dict[tuple[str, str], Callable[[FavaExtensionBase], Any]]
def __init__(self, ledger: FavaLedger, config: str | None = None) -> None:
"""Initialise extension.
Args:
ledger: Input ledger file.
config: Configuration options string passed from the
beancount file's 'fava-extension' line.
"""
self.endpoints = {}
# Go through each of the subclass's functions to find the ones
# marked as endpoints by @extension_endpoint
for _, func in inspect.getmembers(self.__class__, inspect.isfunction):
if hasattr(func, "endpoint_key"):
name, methods = func.endpoint_key
for method in methods:
self.endpoints[(name, method)] = func
self.ledger = ledger
try:
self.config = ast.literal_eval(config) if config else None
except ValueError:
self.config = None
@property
def name(self) -> str:
"""Unique name of this extension."""
return self.__class__.__qualname__
@property
def extension_dir(self) -> Path:
"""Directory to look for templates directory and Javascript code."""
return Path(inspect.getfile(self.__class__)).parent
@cached_property
def jinja_env(self) -> jinja2.Environment:
"""Jinja env for this extension."""
if not current_app.jinja_loader:
raise ValueError("Expected Flask app to have jinja_loader.")
ext_loader = jinja2.FileSystemLoader(self.extension_dir / "templates")
loader = jinja2.ChoiceLoader([ext_loader, current_app.jinja_loader])
return current_app.jinja_env.overlay(loader=loader)
def after_entry_modified(self, entry: Directive, new_lines: str) -> None:
"""Run after an `entry` has been modified."""
def after_insert_entry(self, entry: Directive) -> None:
"""Run after an `entry` has been inserted."""
def after_delete_entry(self, entry: Directive) -> None:
"""Run after an `entry` has been deleted."""
def after_insert_metadata(
self,
entry: Directive,
key: str,
value: str,
) -> None:
"""Run after metadata (key: value) was added to an entry."""
def after_write_source(self, path: str, source: str) -> None:
"""Run after `source` has been written to path."""
def find_extensions(
base_path: Path,
name: str,
) -> tuple[list[type[FavaExtensionBase]], list[FavaExtensionError]]:
"""Find extensions in a module.
Args:
base_path: The module can be relative to this path.
name: The name of the module containing the extensions.
Returns:
A tuple (classes, errors) where classes is a list of subclasses of
:class:`FavaExtensionBase` found in ``name``.
"""
classes = []
sys.path.insert(0, str(base_path))
try:
module = importlib.import_module(name)
except ImportError as err:
error = FavaExtensionError(
None,
f'Importing module "{name}" failed.\nError: "{err.msg}"',
None,
)
return (
[],
[error],
)
for _, obj in inspect.getmembers(module, inspect.isclass):
if issubclass(obj, FavaExtensionBase) and obj != FavaExtensionBase:
classes.append(obj)
sys.path.pop(0)
if not classes:
error = FavaExtensionError(
None,
f'Module "{name}" contains no extensions.',
None,
)
return (
[],
[error],
)
return classes, []
def extension_endpoint(
func_or_endpoint_name: Callable[[FavaExtensionBase], Any]
| str
| None = None,
methods: list[str] | None = None,
) -> (
Callable[[FavaExtensionBase], Response]
| Callable[
[Callable[[FavaExtensionBase], Response]],
Callable[[FavaExtensionBase], Response],
]
):
"""Decorator to mark a function as an endpoint.
Can be used as `@extension_endpoint` or
`@extension_endpoint(endpoint_name, methods)`.
When used as @extension_endpoint, the endpoint name is the name of the
function and methods is "GET".
When used as @extension_endpoint(endpoint_name, methods), the given endpoint
name and methods are used, but both are optional. If endpoint_name is None,
default to the function name, and if methods is None, default to "GET"
"""
endpoint_name = (
func_or_endpoint_name
if isinstance(func_or_endpoint_name, str)
else None
)
def decorator(
func: Callable[[FavaExtensionBase], Response],
) -> Callable[[FavaExtensionBase], Response]:
f: Any = func
f.endpoint_key = (
endpoint_name if endpoint_name else func.__name__,
methods or ["GET"],
)
return func
return (
decorator(func_or_endpoint_name)
if callable(func_or_endpoint_name)
else decorator
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,416
|
beancount/fava
|
refs/heads/main
|
/src/fava/ext/auto_commit.py
|
"""Auto-commit hook for Fava.
This mainly serves as an example how Fava's extension systems, which only
really does hooks at the moment, works.
"""
from __future__ import annotations
from pathlib import Path
from subprocess import call
from subprocess import DEVNULL
from typing import Any
from typing import TYPE_CHECKING
from fava.ext import FavaExtensionBase
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
class AutoCommit(FavaExtensionBase):
"""Auto-commit hook for Fava."""
def _run(self, args: list[str]) -> None:
cwd = Path(self.ledger.beancount_file_path).parent
call(args, cwd=cwd, stdout=DEVNULL) # noqa: S603
def after_write_source(self, path: str, _: Any) -> None:
message = "autocommit: file saved"
self._run(["git", "add", path])
self._run(["git", "commit", "-m", message])
def after_insert_metadata(self, *_: Any) -> None:
message = "autocommit: metadata added"
self._run(["git", "commit", "-am", message])
def after_insert_entry(self, entry: Directive) -> None:
message = f"autocommit: entry on {entry.date}"
self._run(["git", "commit", "-am", message])
def after_delete_entry(self, entry: Directive) -> None:
message = f"autocommit: deleted entry on {entry.date}"
self._run(["git", "commit", "-am", message])
def after_entry_modified(self, entry: Directive, _: Any) -> None:
message = f"autocommit: modified entry on {entry.date}"
self._run(["git", "commit", "-am", message])
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,417
|
beancount/fava
|
refs/heads/main
|
/tests/data/import_config.py
|
"""Test importer for Fava."""
from __future__ import annotations
import csv
import datetime
from decimal import Decimal
from pathlib import Path
from beancount.core import amount
from beancount.core import data
from beancount.ingest import importer
from dateutil.parser import parse
from fava.beans import create
# mypy: ignore-errors
class TestImporter(importer.ImporterProtocol):
"""Test importer for Fava."""
account = "Assets:Checking"
currency = "EUR"
def identify(self, file):
return Path(file.name).name == "import.csv"
def file_name(self, file):
return f"examplebank.{Path(file.name).name}"
def file_account(self, _):
return self.account
def file_date(self, _file):
return datetime.date.today()
def extract(self, file):
entries = []
index = 0
with Path(file.name).open(encoding="utf-8") as file_:
csv_reader = csv.DictReader(file_, delimiter=";")
for index, row in enumerate(csv_reader):
meta = data.new_metadata(file.name, index)
meta["__source__"] = ";".join(list(row.values()))
date = parse(row["Buchungsdatum"]).date()
desc = f"{row['Umsatztext']}"
if not row["IBAN"]:
entries.append(data.Note(meta, date, self.account, desc))
else:
units_d = round(
Decimal(row["Betrag"].replace(",", ".")),
2,
)
units = amount.Amount(units_d, self.currency)
posting1 = data.Posting("", -units, None, None, None, None)
posting2 = data.Posting(
self.account,
units,
None,
None,
None,
None,
)
txn = data.Transaction(
meta,
date,
self.FLAG,
"",
desc,
set(),
set(),
[posting1, posting2],
)
entries.append(txn)
if index:
meta = data.new_metadata(file.name, 0)
meta["__source__"] = "Balance"
entries.append(
create.balance(
meta,
datetime.date.today(),
self.account,
create.amount("10 USD"),
),
)
return entries
CONFIG = [
TestImporter(),
]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,418
|
beancount/fava
|
refs/heads/main
|
/src/fava/util/excel.py
|
"""Writing query results to CSV and spreadsheet documents."""
from __future__ import annotations
import csv
import datetime
import io
from decimal import Decimal
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from fava.beans.funcs import ResultRow
from fava.beans.funcs import ResultType
try:
import pyexcel # type: ignore[import]
HAVE_EXCEL = True
except ImportError: # pragma: no cover
HAVE_EXCEL = False
def to_excel(
types: list[ResultType],
rows: list[ResultRow],
result_format: str,
query_string: str,
) -> io.BytesIO:
"""Save result to spreadsheet document.
Args:
types: query result_types.
rows: query result_rows.
result_format: 'xlsx' or 'ods'.
query_string: The query string (is written to the document).
Returns:
The (binary) file contents.
"""
if result_format not in ("xlsx", "ods"):
raise ValueError(f"Invalid result format: {result_format}")
resp = io.BytesIO()
book = pyexcel.Book(
{
"Results": _result_array(types, rows),
"Query": [["Query"], [query_string]],
},
)
book.save_to_memory(result_format, resp)
resp.seek(0)
return resp
def to_csv(types: list[ResultType], rows: list[ResultRow]) -> io.BytesIO:
"""Save result to CSV.
Args:
types: query result_types.
rows: query result_rows.
Returns:
The (binary) file contents.
"""
resp = io.StringIO()
result_array = _result_array(types, rows)
csv.writer(resp).writerows(result_array)
return io.BytesIO(resp.getvalue().encode("utf-8"))
def _result_array(
types: list[ResultType],
rows: list[ResultRow],
) -> list[list[str]]:
result_array = [[name for name, t in types]]
result_array.extend(_row_to_pyexcel(row, types) for row in rows)
return result_array
def _row_to_pyexcel(row: ResultRow, header: list[ResultType]) -> list[str]:
result = []
for idx, column in enumerate(header):
value = row[idx]
if not value:
result.append(value)
continue
type_ = column[1]
if type_ == Decimal:
result.append(float(value))
elif type_ == int:
result.append(value)
elif type_ == set:
result.append(" ".join(value))
elif type_ == datetime.date:
result.append(str(value))
else:
if not isinstance(value, str):
raise TypeError(f"unexpected type {type(value)}")
result.append(value)
return result
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,419
|
beancount/fava
|
refs/heads/main
|
/tests/test_ext.py
|
from __future__ import annotations
from pathlib import Path
from fava.ext import find_extensions
def test_find_extensions() -> None:
classes, errors = find_extensions(Path(), "NOMODULENAME")
assert not classes
assert len(errors) == 1
assert (
errors[0].message
== 'Importing module "NOMODULENAME" failed.'
"\nError: \"No module named 'NOMODULENAME'\""
)
classes, errors = find_extensions(Path(), "fava")
assert not classes
assert len(errors) == 1
assert errors[0].message == 'Module "fava" contains no extensions.'
path = Path(__file__).parent.parent / "src" / "fava" / "ext"
classes, errors = find_extensions(path, "auto_commit")
assert len(classes) == 1
assert classes[0].__name__ == "AutoCommit"
assert not errors
path = Path(__file__).parent.parent / "src" / "fava" / "ext"
classes, errors = find_extensions(path, "portfolio_list")
assert len(classes) == 1
assert classes[0].__name__ == "PortfolioList"
assert not errors
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,420
|
beancount/fava
|
refs/heads/main
|
/src/fava/util/sets.py
|
"""Utils for Python sets."""
from __future__ import annotations
from typing import AbstractSet
def add_to_set(set_: AbstractSet[str] | None, new: str) -> set[str]:
"""Add an entry to a set (or create it if doesn't exist).
Args:
set_: The (optional) set to add an element to.
new: The string to add to the set.
"""
return set(set_).union([new]) if set_ is not None else {new}
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,421
|
beancount/fava
|
refs/heads/main
|
/src/fava/template_filters.py
|
"""Template filters for Fava.
All functions in this module will be automatically added as template filters.
"""
from __future__ import annotations
from decimal import Decimal
from pathlib import Path
from typing import Callable
from typing import TYPE_CHECKING
from unicodedata import normalize
from fava.beans import funcs
from fava.context import g
from fava.core.conversion import cost_or_value as cost_or_value_without_context
from fava.core.conversion import units
if TYPE_CHECKING: # pragma: no cover
import datetime
from fava.beans.abc import Meta
from fava.beans.abc import MetaValue
from fava.core.inventory import CounterInventory
from fava.core.inventory import SimpleCounterInventory
ZERO = Decimal()
def meta_items(meta: Meta | None) -> list[tuple[str, MetaValue]]:
"""Remove keys from a dictionary."""
if not meta:
return []
return [
(key, value)
for key, value in meta.items()
if not (key == "filename" or key == "lineno" or key.startswith("__"))
]
def cost_or_value(
inventory: CounterInventory,
date: datetime.date | None = None,
) -> SimpleCounterInventory:
"""Get the cost or value of an inventory."""
return cost_or_value_without_context(
inventory,
g.conversion,
g.ledger.prices,
date,
)
def format_currency(
value: Decimal,
currency: str | None = None,
show_if_zero: bool = False,
invert: bool = False,
) -> str:
"""Format a value using the derived precision for a specified currency."""
if not value and not show_if_zero:
return ""
if value == ZERO:
return g.ledger.format_decimal(ZERO, currency)
if invert:
value = -value
return g.ledger.format_decimal(value, currency)
FLAGS_TO_TYPES = {"*": "cleared", "!": "pending"}
def flag_to_type(flag: str) -> str:
"""Names for entry flags."""
return FLAGS_TO_TYPES.get(flag, "other")
def basename(file_path: str) -> str:
"""Return the basename of a filepath."""
return normalize("NFC", Path(file_path).name)
FILTERS: list[
Callable[
...,
(str | bool | SimpleCounterInventory | list[tuple[str, MetaValue]]),
]
] = [
basename,
cost_or_value,
flag_to_type,
format_currency,
funcs.hash_entry,
meta_items,
units,
]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,422
|
beancount/fava
|
refs/heads/main
|
/src/fava/context.py
|
"""Specify types for the flask application context."""
from __future__ import annotations
from typing import TYPE_CHECKING
from flask import g as flask_g
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
from fava.core import FilteredLedger
from fava.ext import FavaExtensionBase
from fava.util.date import Interval
class Context:
"""The allowed context values."""
#: Slug for the active Beancount file.
beancount_file_slug: str | None
#: Conversion to apply
conversion: str
#: Interval to group by
interval: Interval
#: The ledger
ledger: FavaLedger
#: The filtered ledger
filtered: FilteredLedger
#: The current extension, if this is an extension endpoint
extension: FavaExtensionBase | None
g: Context = flask_g # type: ignore[assignment]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,423
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_budgets.py
|
"""Fava's budget syntax."""
from __future__ import annotations
from datetime import date
from decimal import Decimal
from typing import TYPE_CHECKING
from fava.core.budgets import calculate_budget
from fava.core.budgets import calculate_budget_children
from fava.core.budgets import parse_budgets
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Custom
from fava.core.budgets import BudgetDict
def test_budgets(load_doc_custom_entries: list[Custom]) -> None:
"""
2016-01-01 custom "budget" Expenses:Groceries "weekly" 100.00 CNY
2016-06-01 custom "budget" Expenses:Groceries "weekly" 10.00 EUR
2016-06-01 custom "budget" Expenses:Groceries "asdfasdf" 10.00 EUR
2016-06-01 custom "budget" Expenses:Groceries 10.00 EUR
"""
budgets, errors = parse_budgets(load_doc_custom_entries)
assert len(errors) == 2
empty = calculate_budget(
budgets,
"Expenses",
date(2016, 6, 1),
date(2016, 6, 8),
)
assert empty == {}
budgets_ = calculate_budget(
budgets,
"Expenses:Groceries",
date(2016, 6, 1),
date(2016, 6, 8),
)
assert budgets_["CNY"] == Decimal("100")
assert budgets_["EUR"] == Decimal("10")
def test_budgets_daily(budgets_doc: BudgetDict) -> None:
"""
2016-05-01 custom "budget" Expenses:Books "daily" 2.5 EUR"""
assert "EUR" not in calculate_budget(
budgets_doc,
"Expenses:Books",
date(2010, 2, 1),
date(2010, 2, 2),
)
for start, end, num in [
(date(2016, 5, 1), date(2016, 5, 2), Decimal("2.5")),
(date(2016, 5, 1), date(2016, 5, 3), Decimal("5.0")),
(date(2016, 9, 2), date(2016, 9, 3), Decimal("2.5")),
(date(2018, 12, 31), date(2019, 1, 1), Decimal("2.5")),
]:
budget = calculate_budget(budgets_doc, "Expenses:Books", start, end)
assert budget["EUR"] == num
def test_budgets_weekly(budgets_doc: BudgetDict) -> None:
"""
2016-05-01 custom "budget" Expenses:Books "weekly" 21 EUR"""
for start, end, num in [
(date(2016, 5, 1), date(2016, 5, 2), Decimal("21") / 7),
(date(2016, 9, 1), date(2016, 9, 2), Decimal("21") / 7),
]:
budget = calculate_budget(budgets_doc, "Expenses:Books", start, end)
assert budget["EUR"] == num
def test_budgets_monthly(budgets_doc: BudgetDict) -> None:
"""
2014-05-01 custom "budget" Expenses:Books "monthly" 100 EUR"""
for start, end, num in [
(date(2016, 5, 1), date(2016, 5, 2), Decimal("100") / 31),
(date(2016, 2, 1), date(2016, 2, 2), Decimal("100") / 29),
(date(2018, 3, 31), date(2018, 4, 1), Decimal("100") / 31),
]:
budget = calculate_budget(budgets_doc, "Expenses:Books", start, end)
assert budget["EUR"] == num
def test_budgets_doc_quarterly(budgets_doc: BudgetDict) -> None:
"""
2014-05-01 custom "budget" Expenses:Books "quarterly" 123456.7 EUR"""
for start, end, num in [
(date(2016, 5, 1), date(2016, 5, 2), Decimal("123456.7") / 91),
(date(2016, 8, 15), date(2016, 8, 16), Decimal("123456.7") / 92),
]:
budget = calculate_budget(budgets_doc, "Expenses:Books", start, end)
assert budget["EUR"] == num
def test_budgets_doc_yearly(budgets_doc: BudgetDict) -> None:
"""
2010-01-01 custom "budget" Expenses:Books "yearly" 99999.87 EUR"""
budget = calculate_budget(
budgets_doc,
"Expenses:Books",
date(2011, 2, 1),
date(2011, 2, 2),
)
assert budget["EUR"] == Decimal("99999.87") / 365
def test_budgets_children(budgets_doc: BudgetDict) -> None:
"""
2017-01-01 custom "budget" Expenses:Books "daily" 10.00 USD
2017-01-01 custom "budget" Expenses:Books:Notebooks "daily" 2.00 USD"""
budget = calculate_budget_children(
budgets_doc,
"Expenses",
date(2017, 1, 1),
date(2017, 1, 2),
)
assert budget["USD"] == Decimal("12.00")
budget = calculate_budget_children(
budgets_doc,
"Expenses:Books",
date(2017, 1, 1),
date(2017, 1, 2),
)
assert budget["USD"] == Decimal("12.00")
budget = calculate_budget_children(
budgets_doc,
"Expenses:Books:Notebooks",
date(2017, 1, 1),
date(2017, 1, 2),
)
assert budget["USD"] == Decimal("2.00")
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,424
|
beancount/fava
|
refs/heads/main
|
/src/fava/help/__init__.py
|
"""List of all available help pages."""
from __future__ import annotations
HELP_PAGES = {
"_index": "Index",
"budgets": "Budgets",
"import": "Import",
"options": "Options",
"beancount_syntax": "Beancount Syntax",
"features": "Fava's features",
"filters": "Filtering entries",
"extensions": "Extensions",
}
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,425
|
beancount/fava
|
refs/heads/main
|
/src/fava/util/ranking.py
|
"""Ranking utilities."""
from __future__ import annotations
import math
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
import datetime
ZERO = 0.0
DEFAULT_RATE = math.log(2) * 1 / 365
class ExponentialDecayRanker:
"""Rank a list by exponential decay.
Maintains scores for the items in a list. We can think of this as the sum
of all 'likes', where the value of a 'like' starts at 1 and decays
exponentially. So the current score would be given by (where `t` is the
current time and `l` is the time of the 'like')
s = Σ exp(-RATE * (t - l))
As only the relative order on the items is relevant, we can multiply all
scores by exp(RATE * t) and so we need to compute the following
score:
s = Σ exp(RATE * l)
To avoid huge numbers, we actually compute and store the logarithm of that
sum.
Args:
list_: If given, this list is ranked is by ``.sort()`` otherwise all
items with at least one 'like' will be ranked.
rate: This sets the rate of decay. ``1/rate`` will be the time (in
days) that it takes for the value of a 'like' to decrease by
``1/e``. The default rate is set to ``math.log(2) * 1/365`` so
that a 'like' from a year ago will count half as much as one from
today.
"""
__slots__ = ("list", "rate", "scores")
def __init__(
self,
list_: list[str] | None = None,
rate: float = DEFAULT_RATE,
) -> None:
self.list = list_
self.rate = rate
# We don't need to start with float('-inf') here as only the relative
# scores matter.
self.scores: dict[str, float] = {}
def update(self, item: str, date: datetime.date) -> None:
"""Add 'like' for item.
Args:
item: An item in the list that is being ranked.
date: The date on which the item has been liked.
"""
score = self.get(item)
time = date.toordinal()
higher = max(score, time * self.rate)
lower = min(score, time * self.rate)
self.scores[item] = higher + math.log1p(math.exp(lower - higher))
def get(self, item: str) -> float:
"""Get the current score for an item, or zero."""
return self.scores.get(item, ZERO)
def sort(self) -> list[str]:
"""Return items sorted by rank."""
if self.list is None:
return sorted(self.scores.keys(), key=self.get, reverse=True)
return sorted(self.list, key=self.get, reverse=True)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,426
|
beancount/fava
|
refs/heads/main
|
/tests/test_beans.py
|
from __future__ import annotations
import datetime
from decimal import Decimal
from typing import TYPE_CHECKING
from fava.beans.abc import Price
from fava.beans.account import parent
from fava.beans.prices import FavaPriceMap
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
def test_account_parent() -> None:
assert parent("Assets") is None
assert parent("Assets:Cash") == "Assets"
assert parent("Assets:Cash:AA") == "Assets:Cash"
assert parent("Assets:asdfasdf") == "Assets"
def test_fava_price_map(load_doc_entries: list[Directive]) -> None:
"""
option "operating_currency" "CHF"
option "operating_currency" "USD"
1850-07-01 commodity CHF
1792-04-02 commodity USD
2020-12-18 price USD 0 ZEROUSD
2020-12-18 price USD 0.88 CHF
2022-12-19 price USD 0.9287 CHF
2022-12-19 price USD 0.9288 CHF
2021-11-12 open Assets:A CHF
2019-05-01 open Assets:B CHF
2022-12-19 *
Assets:A 1 CHF
Assets:B
2022-12-27 *
Assets:A 1 CHF
Assets:B
"""
price_entries = [e for e in load_doc_entries if isinstance(e, Price)]
assert len(price_entries) == 4
prices = FavaPriceMap(price_entries)
assert prices.commodity_pairs([]) == [("USD", "CHF"), ("USD", "ZEROUSD")]
assert prices.commodity_pairs(["USD", "CHF"]) == [
("CHF", "USD"),
("USD", "CHF"),
("USD", "ZEROUSD"),
]
assert prices.get_all_prices(("NO", "PRICES")) is None
assert prices.get_all_prices(("USD", "PRICES")) is None
assert prices.get_price(("SAME", "SAME")) == Decimal("1")
usd_chf = ("USD", "CHF")
assert prices.get_all_prices(usd_chf) == [
(datetime.date(2020, 12, 18), Decimal("0.88")),
(datetime.date(2022, 12, 19), Decimal("0.9288")),
]
assert prices.get_all_prices(("CHF", "USD")) == [
(datetime.date(2020, 12, 18), Decimal("1") / Decimal("0.88")),
(datetime.date(2022, 12, 19), Decimal("1") / Decimal("0.9288")),
]
assert prices.get_price_point(usd_chf) == (
datetime.date(2022, 12, 19),
Decimal("0.9288"),
)
assert prices.get_price(usd_chf) == Decimal("0.9288")
assert prices.get_price(usd_chf, datetime.date(2022, 12, 18)) == Decimal(
"0.88",
)
assert prices.get_price(usd_chf, datetime.date(2022, 12, 19)) == Decimal(
"0.9288",
)
assert prices.get_price(usd_chf, datetime.date(2022, 12, 20)) == Decimal(
"0.9288",
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,427
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_misc.py
|
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING
from fava.beans.load import load_string
from fava.core.misc import sidebar_links
from fava.core.misc import upcoming_events
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Custom
def test_sidebar_links(load_doc_custom_entries: list[Custom]) -> None:
"""
2016-01-01 custom "fava-sidebar-link" "title" "link"
2016-01-02 custom "fava-sidebar-link" "titl1" "lin1"
"""
links = sidebar_links(load_doc_custom_entries)
assert links == [("title", "link"), ("titl1", "lin1")]
def test_upcoming_events() -> None:
entries, _, _ = load_string(
f'{datetime.date.today()} event "some_event" "test"\n'
'2012-12-12 event "test" "test"',
)
events = upcoming_events(entries, 1) # type: ignore[arg-type]
assert len(events) == 1
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,428
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_inventory.py
|
from __future__ import annotations
from decimal import Decimal
from fava.beans import create
from fava.core.inventory import CounterInventory
def test_add() -> None:
inv = CounterInventory()
key = ("KEY", None)
inv.add(key, Decimal("10"))
assert len(inv) == 1
inv.add(key, Decimal("-10"))
assert inv.is_empty()
def test_add_amount() -> None:
inv = CounterInventory()
inv.add_amount(create.amount("10 USD"))
inv.add_amount(create.amount("30 USD"))
assert len(inv) == 1
inv.add_amount(create.amount("-40 USD"))
assert inv.is_empty()
inv.add_amount(create.amount("10 USD"))
inv.add_amount(create.amount("20 CAD"))
inv.add_amount(create.amount("10 USD"))
assert len(inv) == 2
inv.add_amount(create.amount("-20 CAD"))
assert len(inv) == 1
def test_add_inventory() -> None:
inv = CounterInventory()
inv2 = CounterInventory()
inv3 = CounterInventory()
inv.add_amount(create.amount("10 USD"))
inv2.add_amount(create.amount("30 USD"))
inv3.add_amount(create.amount("-40 USD"))
inv.add_inventory(inv2)
assert len(inv) == 1
inv.add_inventory(inv3)
assert inv.is_empty()
inv = CounterInventory()
inv.add_inventory(inv2)
assert len(inv) == 1
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,429
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/ingest.py
|
"""Ingest helper functions."""
from __future__ import annotations
import datetime
import sys
import traceback
from dataclasses import dataclass
from os import altsep
from os import sep
from pathlib import Path
from runpy import run_path
from typing import Any
from typing import TYPE_CHECKING
from beancount.ingest import cache # type: ignore[import]
from beancount.ingest import extract
from beancount.ingest import identify
from fava.core.module_base import FavaModule
from fava.helpers import BeancountError
from fava.helpers import FavaAPIError
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from fava.core import FavaLedger
class IngestError(BeancountError):
"""An error with one of the importers."""
@dataclass(frozen=True)
class FileImportInfo:
"""Info about one file/importer combination."""
importer_name: str
account: str
date: datetime.date
name: str
@dataclass(frozen=True)
class FileImporters:
"""Importers for a file."""
name: str
basename: str
importers: list[FileImportInfo]
def file_import_info(filename: str, importer: Any) -> FileImportInfo:
"""Generate info about a file with an importer."""
file = cache.get_file(filename)
try:
account = importer.file_account(file)
date = importer.file_date(file)
name = importer.file_name(file)
except Exception as err: # noqa: BLE001
raise FavaAPIError(f"Error calling importer method: {err}") from err
return FileImportInfo(
importer.name(),
account or "",
date or datetime.date.today(),
name or Path(filename).name,
)
class IngestModule(FavaModule):
"""Exposes ingest functionality."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
self.config: list[Any] = []
self.importers: dict[str, Any] = {}
self.hooks: list[Any] = []
self.mtime: int | None = None
@property
def module_path(self) -> Path | None:
"""The path to the importer configuration."""
config_path = self.ledger.fava_options.import_config
if not config_path:
return None
return self.ledger.join_path(config_path)
def _error(self, msg: str) -> None:
self.ledger.errors.append(IngestError(None, msg, None))
def load_file(self) -> None:
if self.module_path is None:
return
module_path = self.module_path
if not module_path.exists() or module_path.is_dir():
self._error(f"File does not exist: '{module_path}'")
return
if module_path.stat().st_mtime_ns == self.mtime:
return
try:
mod = run_path(str(self.module_path))
except Exception: # noqa: BLE001
message = "".join(traceback.format_exception(*sys.exc_info()))
self._error(f"Error in importer '{module_path}': {message}")
return
self.mtime = module_path.stat().st_mtime_ns
self.config = mod["CONFIG"]
self.hooks = [extract.find_duplicate_entries]
if "HOOKS" in mod:
hooks = mod["HOOKS"]
if not isinstance(hooks, list) or not all(
callable(fn) for fn in hooks
):
message = "HOOKS is not a list of callables"
self._error(f"Error in importer '{module_path}': {message}")
else:
self.hooks = hooks
self.importers = {
importer.name(): importer for importer in self.config
}
def import_data(self) -> list[FileImporters]:
"""Identify files and importers that can be imported.
Returns:
A list of :class:`.FileImportInfo`.
"""
if not self.config:
return []
ret: list[FileImporters] = []
for directory in self.ledger.fava_options.import_dirs:
full_path = self.ledger.join_path(directory)
files = list(identify.find_imports(self.config, str(full_path)))
for filename, importers in files:
base = Path(filename).name
infos = [
file_import_info(filename, importer)
for importer in importers
]
ret.append(FileImporters(filename, base, infos))
return ret
def extract(self, filename: str, importer_name: str) -> list[Directive]:
"""Extract entries from filename with the specified importer.
Args:
filename: The full path to a file.
importer_name: The name of an importer that matched the file.
Returns:
A list of new imported entries.
"""
if not self.module_path:
raise FavaAPIError("Missing import-config option")
if (
self.mtime is None
or self.module_path.stat().st_mtime_ns > self.mtime
):
self.load_file()
new_entries = extract.extract_from_file(
filename,
self.importers.get(importer_name),
existing_entries=self.ledger.all_entries,
)
new_entries_list: list[tuple[str, list[Directive]]] = [
(filename, new_entries),
]
for hook_fn in self.hooks:
new_entries_list = hook_fn(
new_entries_list,
self.ledger.all_entries,
)
return new_entries_list[0][1]
def filepath_in_primary_imports_folder(
filename: str,
ledger: FavaLedger,
) -> Path:
"""File path for a document to upload to the primary import folder.
Args:
filename: The filename of the document.
ledger: The FavaLedger.
Returns:
The path that the document should be saved at.
"""
primary_imports_folder = next(iter(ledger.fava_options.import_dirs), None)
if primary_imports_folder is None:
raise FavaAPIError("You need to set at least one imports-dir.")
for separator in sep, altsep:
if separator:
filename = filename.replace(separator, " ")
return ledger.join_path(primary_imports_folder, filename)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,430
|
beancount/fava
|
refs/heads/main
|
/tests/test_util_sets.py
|
from __future__ import annotations
from fava.util.sets import add_to_set
def test_add_to_set_basic() -> None:
assert add_to_set(None, "test") == {"test"}
assert add_to_set(set(), "test") == {"test"}
assert add_to_set({"test"}, "test") == {"test"}
def test_add_to_set_no_mutation() -> None:
test_set = {"test"}
assert add_to_set(test_set, "test2") == {"test", "test2"}
assert test_set == {"test"}
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,431
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/__init__.py
|
"""This module provides the data required by Fava's reports."""
from __future__ import annotations
from datetime import date
from datetime import timedelta
from functools import cached_property
from functools import lru_cache
from pathlib import Path
from typing import Iterable
from typing import TYPE_CHECKING
from typing import TypeVar
from beancount.core import realization
from beancount.core.data import iter_entry_dates
from beancount.core.inventory import Inventory
from beancount.loader import _load # type: ignore[attr-defined]
from beancount.loader import load_file
from beancount.utils.encryption import is_encrypted_file
from fava.beans.abc import Balance
from fava.beans.abc import Price
from fava.beans.abc import Transaction
from fava.beans.account import get_entry_accounts
from fava.beans.funcs import hash_entry
from fava.beans.prices import FavaPriceMap
from fava.beans.str import to_string
from fava.core.accounts import AccountDict
from fava.core.attributes import AttributesModule
from fava.core.budgets import BudgetModule
from fava.core.charts import ChartModule
from fava.core.commodities import CommoditiesModule
from fava.core.extensions import ExtensionModule
from fava.core.fava_options import parse_options
from fava.core.file import FileModule
from fava.core.file import get_entry_slice
from fava.core.filters import AccountFilter
from fava.core.filters import AdvancedFilter
from fava.core.filters import TimeFilter
from fava.core.group_entries import group_entries_by_type
from fava.core.ingest import IngestModule
from fava.core.misc import FavaMisc
from fava.core.number import DecimalFormatModule
from fava.core.query_shell import QueryShell
from fava.core.tree import Tree
from fava.core.watcher import Watcher
from fava.helpers import FavaAPIError
from fava.util import listify
from fava.util.date import dateranges
if TYPE_CHECKING: # pragma: no cover
from decimal import Decimal
from beancount.core.realization import RealAccount
from fava.beans.abc import Directive
from fava.beans.types import BeancountOptions
from fava.core.fava_options import FavaOptions
from fava.core.group_entries import EntriesByType
from fava.helpers import BeancountError
from fava.util.date import DateRange
from fava.util.date import Interval
MODULES = [
"accounts",
"attributes",
"budgets",
"charts",
"commodities",
"extensions",
"file",
"format_decimal",
"misc",
"query_shell",
"ingest",
]
T = TypeVar("T")
class FilteredLedger:
"""Filtered Beancount ledger."""
__slots__ = (
"__dict__", # for the cached_property decorator
"ledger",
"entries",
"date_range",
"_date_first",
"_date_last",
)
_date_first: date | None
_date_last: date | None
def __init__(
self,
ledger: FavaLedger,
account: str | None = None,
filter: str | None = None, # noqa: A002
time: str | None = None,
) -> None:
self.ledger = ledger
self.date_range: DateRange | None = None
entries = ledger.all_entries
if account:
entries = AccountFilter(account).apply(entries)
if filter and filter.strip():
entries = AdvancedFilter(filter.strip()).apply(entries)
if time:
time_filter = TimeFilter(ledger.options, ledger.fava_options, time)
entries = time_filter.apply(entries)
self.date_range = time_filter.date_range
self.entries = entries
if self.date_range:
self._date_first = self.date_range.begin
self._date_last = self.date_range.end
return
self._date_first = None
self._date_last = None
for entry in self.entries:
if isinstance(entry, (Transaction, Price)):
self._date_first = entry.date
break
for entry in reversed(self.entries):
if isinstance(entry, (Transaction, Price)):
self._date_last = entry.date + timedelta(1)
break
@cached_property
def root_account(self) -> RealAccount:
"""A realized account for the filtered entries."""
return realization.realize(
self.entries, # type: ignore[arg-type]
self.ledger.root_accounts,
)
@property
def end_date(self) -> date | None:
"""The date to use for prices."""
date_range = self.date_range
if date_range:
return date_range.end_inclusive
return None
@cached_property
def root_tree(self) -> Tree:
"""A root tree."""
return Tree(self.entries)
@cached_property
def root_tree_closed(self) -> Tree:
"""A root tree for the balance sheet."""
tree = Tree(self.entries)
tree.cap(self.ledger.options, self.ledger.fava_options.unrealized)
return tree
@listify
def interval_ranges(self, interval: Interval) -> Iterable[DateRange]:
"""Yield date ranges corresponding to interval boundaries."""
if not self._date_first or not self._date_last:
return []
return dateranges(self._date_first, self._date_last, interval)
def prices(self, base: str, quote: str) -> list[tuple[date, Decimal]]:
"""List all prices."""
all_prices = self.ledger.prices.get_all_prices((base, quote))
if all_prices is None:
return []
date_range = self.date_range
if date_range:
return [
price_point
for price_point in all_prices
if date_range.begin <= price_point[0] < date_range.end
]
return all_prices
def account_is_closed(self, account_name: str) -> bool:
"""Check if the account is closed.
Args:
account_name: An account name.
Returns:
True if the account is closed before the end date of the current
time filter.
"""
date_range = self.date_range
close_date = self.ledger.accounts[account_name].close_date
if close_date is None:
return False
return close_date < date_range.end if date_range else True
class FavaLedger:
"""Create an interface for a Beancount ledger.
Arguments:
path: Path to the main Beancount file.
"""
__slots__ = (
"accounts",
"all_entries",
"all_entries_by_type",
"beancount_file_path",
"errors",
"fava_options",
"_is_encrypted",
"options",
"prices",
"_watcher",
*MODULES,
)
#: List of all (unfiltered) entries.
all_entries: list[Directive]
#: A list of all errors reported by Beancount.
errors: list[BeancountError]
#: The Beancount options map.
options: BeancountOptions
#: A dict with all of Fava's option values.
fava_options: FavaOptions
#: The price map.
prices: FavaPriceMap
#: Dict of list of all (unfiltered) entries by type.
all_entries_by_type: EntriesByType
def __init__(self, path: str) -> None:
#: The path to the main Beancount file.
self.beancount_file_path = path
self._is_encrypted = is_encrypted_file(path)
#: An :class:`AttributesModule` instance.
self.attributes = AttributesModule(self)
#: A :class:`.BudgetModule` instance.
self.budgets = BudgetModule(self)
#: A :class:`.ChartModule` instance.
self.charts = ChartModule(self)
#: A :class:`.CommoditiesModule` instance.
self.commodities = CommoditiesModule(self)
#: A :class:`.ExtensionModule` instance.
self.extensions = ExtensionModule(self)
#: A :class:`.FileModule` instance.
self.file = FileModule(self)
#: A :class:`.IngestModule` instance.
self.ingest = IngestModule(self)
#: A :class:`.FavaMisc` instance.
self.misc = FavaMisc(self)
#: A :class:`.DecimalFormatModule` instance.
self.format_decimal = DecimalFormatModule(self)
#: A :class:`.QueryShell` instance.
self.query_shell = QueryShell(self)
#: A :class:`.AccountDict` module - a dict with information about the accounts.
self.accounts = AccountDict(self)
self._watcher = Watcher()
self.load_file()
def load_file(self) -> None:
"""Load the main file and all included files and set attributes."""
# use the internal function to disable cache
if not self._is_encrypted:
# pylint: disable=protected-access
self.all_entries, self.errors, self.options = _load(
[(self.beancount_file_path, True)],
None,
None,
None,
)
else:
self.all_entries, self.errors, self.options = load_file(
self.beancount_file_path,
)
self.get_filtered.cache_clear()
self.all_entries_by_type = group_entries_by_type(self.all_entries)
self.prices = FavaPriceMap(self.all_entries_by_type.Price)
self.fava_options, errors = parse_options(
self.all_entries_by_type.Custom,
)
self.errors.extend(errors)
if not self._is_encrypted:
self._watcher.update(*self.paths_to_watch())
for mod in MODULES:
getattr(self, mod).load_file()
@lru_cache(maxsize=16) # noqa: B019
def get_filtered(
self,
account: str | None = None,
filter: str | None = None, # noqa: A002
time: str | None = None,
) -> FilteredLedger:
"""Filter the ledger."""
return FilteredLedger(
ledger=self,
account=account,
filter=filter,
time=time,
)
@property
def mtime(self) -> int:
"""The timestamp to the latest change of the underlying files."""
return self._watcher.last_checked
@property
def root_accounts(self) -> tuple[str, str, str, str, str]:
"""The five root accounts."""
options = self.options
return (
options["name_assets"],
options["name_liabilities"],
options["name_equity"],
options["name_income"],
options["name_expenses"],
)
def join_path(self, *args: str) -> Path:
"""Path relative to the directory of the ledger."""
return Path(self.beancount_file_path).parent.joinpath(*args).resolve()
def paths_to_watch(self) -> tuple[list[Path], list[Path]]:
"""Get paths to included files and document directories.
Returns:
A tuple (files, directories).
"""
files = [Path(i) for i in self.options["include"]]
if self.ingest.module_path:
files.append(self.ingest.module_path)
return (
files,
[
self.join_path(path, account)
for account in self.root_accounts
for path in self.options["documents"]
],
)
def changed(self) -> bool:
"""Check if the file needs to be reloaded.
Returns:
True if a change in one of the included files or a change in a
document folder was detected and the file has been reloaded.
"""
# We can't reload an encrypted file, so act like it never changes.
if self._is_encrypted:
return False
changed = self._watcher.check()
if changed:
self.load_file()
return changed
def interval_balances(
self,
filtered: FilteredLedger,
interval: Interval,
account_name: str,
accumulate: bool = False,
) -> tuple[list[Tree], list[DateRange]]:
"""Balances by interval.
Arguments:
filtered: The currently filtered ledger.
interval: An interval.
account_name: An account name.
accumulate: A boolean, ``True`` if the balances for an interval
should include all entries up to the end of the interval.
Returns:
A pair of a list of Tree instances and the intervals.
"""
min_accounts = [
account
for account in self.accounts
if account.startswith(account_name)
]
interval_ranges = list(reversed(filtered.interval_ranges(interval)))
interval_balances = [
Tree(
iter_entry_dates(
filtered.entries,
date.min if accumulate else date_range.begin,
date_range.end,
),
min_accounts,
)
for date_range in interval_ranges
]
return interval_balances, interval_ranges
def account_journal(
self,
filtered: FilteredLedger,
account_name: str,
with_journal_children: bool = False,
) -> Iterable[tuple[Directive, Inventory, Inventory]]:
"""Journal for an account.
Args:
filtered: The currently filtered ledger.
account_name: An account name.
with_journal_children: Whether to include postings of subaccounts
of the given account.
Returns:
A generator of ``(entry, change, balance)`` tuples.
change and balance have already been reduced to units.
"""
real_account = realization.get_or_create(
filtered.root_account,
account_name,
)
txn_postings = (
realization.get_postings(real_account)
if with_journal_children
else real_account.txn_postings
)
return (
(entry, change, balance)
for (
entry,
_postings,
change,
balance,
) in realization.iterate_with_balance(
txn_postings, # type: ignore[arg-type]
)
)
def get_entry(self, entry_hash: str) -> Directive:
"""Find an entry.
Arguments:
entry_hash: Hash of the entry.
Returns:
The entry with the given hash.
Raises:
FavaAPIError: If there is no entry for the given hash.
"""
try:
return next(
entry
for entry in self.all_entries
if entry_hash == hash_entry(entry)
)
except StopIteration as exc:
raise FavaAPIError(
f'No entry found for hash "{entry_hash}"',
) from exc
def context(
self,
entry_hash: str,
) -> tuple[
Directive,
dict[str, list[str]] | None,
dict[str, list[str]] | None,
str,
str,
]:
"""Context for an entry.
Arguments:
entry_hash: Hash of entry.
Returns:
A tuple ``(entry, before, after, source_slice, sha256sum)`` of the
(unique) entry with the given ``entry_hash``. If the entry is a
Balance or Transaction then ``before`` and ``after`` contain
the balances before and after the entry of the affected accounts.
"""
entry = self.get_entry(entry_hash)
source_slice, sha256sum = get_entry_slice(entry)
if not isinstance(entry, (Balance, Transaction)):
return entry, None, None, source_slice, sha256sum
entry_accounts = get_entry_accounts(entry)
balances = {account: Inventory() for account in entry_accounts}
for entry_ in self.all_entries:
if entry_ is entry:
break
if isinstance(entry_, Transaction):
for posting in entry_.postings:
balance = balances.get(posting.account, None)
if balance is not None:
balance.add_position(posting)
def visualise(inv: Inventory) -> list[str]:
return [to_string(pos) for pos in sorted(inv)]
before = {acc: visualise(inv) for acc, inv in balances.items()}
if isinstance(entry, Balance):
return entry, before, None, source_slice, sha256sum
for posting in entry.postings:
balances[posting.account].add_position(posting)
after = {acc: visualise(inv) for acc, inv in balances.items()}
return entry, before, after, source_slice, sha256sum
def commodity_pairs(self) -> list[tuple[str, str]]:
"""List pairs of commodities.
Returns:
A list of pairs of commodities. Pairs of operating currencies will
be given in both directions not just in the one found in file.
"""
return self.prices.commodity_pairs(self.options["operating_currency"])
def statement_path(self, entry_hash: str, metadata_key: str) -> str:
"""Get the path for a statement found in the specified entry."""
entry = self.get_entry(entry_hash)
value = entry.meta[metadata_key]
accounts = set(get_entry_accounts(entry))
full_path = Path(entry.meta["filename"]).parent / value
for document in self.all_entries_by_type.Document:
if document.filename == str(full_path):
return document.filename
if (
document.account in accounts
and Path(document.filename).name == value
):
return document.filename
raise FavaAPIError("Statement not found.")
group_entries_by_type = staticmethod(group_entries_by_type)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,432
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/fava_options.py
|
"""Fava's options.
Options for Fava can be specified through Custom entries in the Beancount file.
This module contains a list of possible options, the defaults and the code for
parsing the options.
"""
from __future__ import annotations
import re
from dataclasses import dataclass
from dataclasses import field
from dataclasses import fields
from typing import Pattern
from typing import TYPE_CHECKING
from babel.core import Locale
from babel.core import UnknownLocaleError
from fava.helpers import BeancountError
from fava.util.date import END_OF_YEAR
from fava.util.date import parse_fye_string
if TYPE_CHECKING: # pragma: no cover
import datetime
from fava.beans.abc import Custom
from fava.util.date import FiscalYearEnd
class OptionError(BeancountError):
"""An error for one the Fava options."""
@dataclass(frozen=True)
class InsertEntryOption:
"""Insert option.
An option that determines where entries for matching accounts should be
inserted.
"""
date: datetime.date
re: Pattern[str]
filename: str
lineno: int
@dataclass
class FavaOptions:
"""Options for Fava that can be set in the Beancount file."""
account_journal_include_children: bool = True
auto_reload: bool = False
collapse_pattern: list[Pattern[str]] = field(default_factory=list)
currency_column: int = 61
conversion_currencies: tuple[str, ...] = ()
default_file: str | None = None
default_page: str = "income_statement/"
fiscal_year_end: FiscalYearEnd = END_OF_YEAR
import_config: str | None = None
import_dirs: tuple[str, ...] = ()
indent: int = 2
insert_entry: list[InsertEntryOption] = field(default_factory=list)
invert_income_liabilities_equity: bool = False
language: str | None = None
locale: str | None = None
show_accounts_with_zero_balance: bool = True
show_accounts_with_zero_transactions: bool = True
show_closed_accounts: bool = False
sidebar_show_queries: int = 5
unrealized: str = "Unrealized"
upcoming_events: int = 7
uptodate_indicator_grey_lookback_days: int = 60
use_external_editor: bool = False
_fields = fields(FavaOptions)
All_OPTS = {f.name for f in _fields}
BOOL_OPTS = {f.name for f in _fields if str(f.type) == "bool"}
INT_OPTS = {f.name for f in _fields if str(f.type) == "int"}
TUPLE_OPTS = {f.name for f in _fields if f.type.startswith("tuple[str,")}
STR_OPTS = {f.name for f in _fields if f.type.startswith("str")}
def parse_option_custom_entry( # noqa: PLR0912
entry: Custom,
options: FavaOptions,
) -> None:
"""Parse a single custom fava-option entry and set option accordingly."""
key = entry.values[0].value.replace("-", "_")
if key not in All_OPTS:
raise ValueError(f"unknown option `{key}`")
if key == "default_file":
options.default_file = entry.meta["filename"]
return
value = entry.values[1].value
if not isinstance(value, str):
raise TypeError(f"expected string value for option `{key}`")
if key == "insert_entry":
try:
pattern = re.compile(value)
except re.error as err:
raise TypeError(
f"Should be a regular expression: '{value}'.",
) from err
opt = InsertEntryOption(
entry.date,
pattern,
entry.meta["filename"],
entry.meta["lineno"],
)
options.insert_entry.append(opt)
elif key == "collapse_pattern":
try:
pattern = re.compile(value)
except re.error as err:
raise TypeError(
f"Should be a regular expression: '{value}'.",
) from err
options.collapse_pattern.append(pattern)
elif key == "locale":
try:
Locale.parse(value)
options.locale = value
except UnknownLocaleError as err:
raise ValueError(f"Unknown locale: '{value}'.") from err
elif key == "fiscal_year_end":
fye = parse_fye_string(value)
if fye is None:
raise ValueError("Invalid 'fiscal_year_end' option.")
options.fiscal_year_end = fye
elif key in STR_OPTS:
setattr(options, key, value)
elif key in BOOL_OPTS:
setattr(options, key, value.lower() == "true")
elif key in INT_OPTS:
setattr(options, key, int(value))
else: # key in TUPLE_OPTS
setattr(options, key, tuple(value.strip().split(" ")))
def parse_options(
custom_entries: list[Custom],
) -> tuple[FavaOptions, list[OptionError]]:
"""Parse custom entries for Fava options.
The format for option entries is the following::
2016-04-01 custom "fava-option" "[name]" "[value]"
Args:
custom_entries: A list of Custom entries.
Returns:
A tuple (options, errors) where options is a dictionary of all options
to values, and errors contains possible parsing errors.
"""
options = FavaOptions()
errors = []
for entry in (e for e in custom_entries if e.type == "fava-option"):
try:
parse_option_custom_entry(entry, options)
except (IndexError, TypeError, ValueError) as err:
msg = f"Failed to parse fava-option entry: {err!s}"
errors.append(OptionError(entry.meta, msg, entry))
return options, errors
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,433
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/inventory.py
|
"""Alternative implementation of Beancount's Inventory."""
from __future__ import annotations
from decimal import Decimal
from typing import Callable
from typing import Dict
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing import TYPE_CHECKING
from fava.beans import create
from fava.beans.abc import Cost
if TYPE_CHECKING: # pragma: no cover
from typing import Concatenate
from typing import Iterable
from typing import Iterator
from typing import ParamSpec
from fava.beans.abc import Amount
from fava.beans.abc import Position
P = ParamSpec("P")
ZERO = Decimal()
InventoryKey = Tuple[str, Optional[Cost]]
class _Amount(NamedTuple):
currency: str
number: Decimal
class SimpleCounterInventory(Dict[str, Decimal]):
"""A simple inventory mapping just strings to numbers."""
def is_empty(self) -> bool:
"""Check if the inventory is empty."""
return not bool(self)
def add(self, key: str, number: Decimal) -> None:
"""Add a number to key."""
new_num = number + self.get(key, ZERO)
if new_num == ZERO:
self.pop(key, None)
else:
self[key] = new_num
def amounts(self) -> Iterable[_Amount]:
"""Get the amounts in this inventory."""
return (_Amount(currency, number) for currency, number in self.items())
def __iter__(self) -> Iterator[str]:
raise NotImplementedError
def __neg__(self) -> SimpleCounterInventory:
return SimpleCounterInventory({key: -num for key, num in self.items()})
class CounterInventory(Dict[InventoryKey, Decimal]):
"""A lightweight inventory.
This is intended as a faster alternative to Beancount's Inventory class.
Due to not using a list, for inventories with a lot of different positions,
inserting is much faster.
The keys should be tuples ``(currency, cost)``.
"""
def is_empty(self) -> bool:
"""Check if the inventory is empty."""
return not bool(self)
def add(self, key: InventoryKey, number: Decimal) -> None:
"""Add a number to key."""
new_num = number + self.get(key, ZERO)
if new_num == ZERO:
self.pop(key, None)
else:
self[key] = new_num
def reduce(
self,
reducer: Callable[Concatenate[Position, P], Amount],
*args: P.args,
**_kwargs: P.kwargs,
) -> SimpleCounterInventory:
"""Reduce inventory.
Note that this returns a simple :class:`CounterInventory` with just
currencies as keys.
"""
counter = SimpleCounterInventory()
for (currency, cost), number in self.items():
pos = create.position(create.amount((number, currency)), cost)
amount = reducer(pos, *args)
counter.add(amount.currency, amount.number)
return counter
def add_amount(self, amount: Amount, cost: Cost | None = None) -> None:
"""Add an Amount to the inventory."""
key = (amount.currency, cost)
self.add(key, amount.number)
def add_position(self, pos: Position) -> None:
"""Add a Position or Posting to the inventory."""
self.add_amount(pos.units, pos.cost)
def __neg__(self) -> CounterInventory:
return CounterInventory({key: -num for key, num in self.items()})
def __add__(self, other: CounterInventory) -> CounterInventory:
counter = CounterInventory(self)
counter.add_inventory(other)
return counter
def add_inventory(self, counter: CounterInventory) -> None:
"""Add another :class:`CounterInventory`."""
if not self:
self.update(counter)
else:
self_get = self.get
for key, num in counter.items():
new_num = num + self_get(key, ZERO)
if new_num == ZERO:
self.pop(key, None)
else:
self[key] = new_num
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,434
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/extensions.py
|
"""Fava extensions."""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING
from fava.core.module_base import FavaModule
from fava.ext import find_extensions
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Custom
from fava.beans.abc import Directive
from fava.core import FavaLedger
from fava.ext import FavaExtensionBase
@dataclass
class ExtensionDetails:
"""The information about an extension that is needed for the frontend."""
name: str
report_title: str | None
has_js_module: bool
class ExtensionModule(FavaModule):
"""Fava extensions."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
self._instances: dict[str, FavaExtensionBase] = {}
self._loaded_extensions: set[type[FavaExtensionBase]] = set()
def load_file(self) -> None:
all_extensions = []
custom_entries = self.ledger.all_entries_by_type.Custom
_extension_entries = extension_entries(custom_entries)
for extension in _extension_entries:
extensions, errors = find_extensions(
Path(self.ledger.beancount_file_path).parent,
extension,
)
all_extensions.extend(extensions)
self.ledger.errors.extend(errors)
for cls in all_extensions:
module = cls.__module__
ext_config = (
_extension_entries[module]
if (module in _extension_entries)
else None
)
if cls not in self._loaded_extensions:
self._loaded_extensions.add(cls)
ext = cls(self.ledger, ext_config)
self._instances[ext.name] = ext
@property
def _exts(self) -> list[FavaExtensionBase]:
return list(self._instances.values())
@property
def extension_details(self) -> list[ExtensionDetails]:
"""Extension information to provide to the Frontend."""
return [
ExtensionDetails(ext.name, ext.report_title, ext.has_js_module)
for ext in self._exts
]
def get_extension(self, name: str) -> FavaExtensionBase | None:
"""Get the extension with the given name."""
return self._instances.get(name, None)
def after_entry_modified(self, entry: Directive, new_lines: str) -> None:
for ext in self._exts:
ext.after_entry_modified(entry, new_lines)
def after_insert_entry(self, entry: Directive) -> None:
for ext in self._exts:
ext.after_insert_entry(entry)
def after_delete_entry(self, entry: Directive) -> None:
for ext in self._exts:
ext.after_delete_entry(entry)
def after_insert_metadata(
self,
entry: Directive,
key: str,
value: str,
) -> None:
for ext in self._exts:
ext.after_insert_metadata(entry, key, value)
def after_write_source(self, path: str, source: str) -> None:
for ext in self._exts:
ext.after_write_source(path, source)
def extension_entries(
custom_entries: list[Custom],
) -> dict[str, str | None]:
"""Parse custom entries for extensions.
They have the following format::
2016-04-01 custom "fava-extension" "my_extension" "{'my_option': {}}"
"""
_extension_entries = [
entry for entry in custom_entries if entry.type == "fava-extension"
]
return {
entry.values[0].value: (
entry.values[1].value if (len(entry.values) == 2) else None
)
for entry in _extension_entries
}
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,435
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/funcs.py
|
"""Various functions to deal with Beancount data."""
from __future__ import annotations
from typing import Any
from typing import TYPE_CHECKING
from beancount.core import compare # type: ignore[attr-defined]
from beancount.query import query # type: ignore[attr-defined]
from beancount.query import query_execute # type: ignore[attr-defined]
if TYPE_CHECKING: # pragma: no cover
from typing import TypeAlias
from fava.beans.abc import Directive
from fava.beans.types import BeancountOptions
ResultType: TypeAlias = tuple[str, type[Any]]
ResultRow: TypeAlias = tuple[Any, ...]
QueryResult: TypeAlias = tuple[list[ResultType], list[ResultRow]]
def hash_entry(entry: Directive) -> str:
"""Hash an entry."""
return compare.hash_entry(entry) # type: ignore[no-any-return]
def execute_query(
query_: str,
entries: list[Directive],
options_map: BeancountOptions,
) -> QueryResult:
"""Execture a query."""
return query_execute.execute_query( # type: ignore[no-any-return]
query_,
entries,
options_map,
)
def run_query(
entries: list[Directive],
options_map: BeancountOptions,
_query: str,
numberify: bool = False,
) -> QueryResult:
"""Run a query."""
return query.run_query( # type: ignore[no-any-return]
entries,
options_map,
_query,
numberify=numberify,
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,436
|
beancount/fava
|
refs/heads/main
|
/src/fava/beans/create.py
|
"""Helpers to create entries."""
from __future__ import annotations
from decimal import Decimal
from typing import TYPE_CHECKING
from beancount.core import data
from beancount.core.amount import A as BEANCOUNT_A # type: ignore[attr-defined]
from beancount.core.amount import Amount as BeancountAmount
from beancount.core.position import Position as BeancountPosition
from fava.beans.abc import Amount
if TYPE_CHECKING: # pragma: no cover
import datetime
from fava.beans.abc import Balance
from fava.beans.abc import Cost
from fava.beans.abc import Meta
from fava.beans.abc import Position
from fava.beans.abc import Posting
from fava.beans.abc import TagsOrLinks
from fava.beans.abc import Transaction
from fava.beans.flags import Flag
def decimal(num: Decimal | str) -> Decimal:
"""Decimal from a string."""
if isinstance(num, str):
return Decimal(num)
return num
def amount(amt: Amount | tuple[Decimal, str] | str) -> Amount:
"""Amount from a string."""
if isinstance(amt, Amount):
return amt
if isinstance(amt, str):
return BEANCOUNT_A(amt) # type: ignore[no-any-return]
return BeancountAmount(*amt) # type: ignore[return-value]
def position(units: Amount, cost: Cost | None) -> Position:
"""Create a position."""
return BeancountPosition(units, cost) # type: ignore[arg-type,return-value]
def posting(
account: str,
units: Amount | str,
cost: Cost | None = None,
price: Amount | str | None = None,
flag: str | None = None,
meta: Meta | None = None,
) -> Posting:
"""Create a Beancount Posting."""
if price is not None:
price = amount(price)
return data.Posting( # type: ignore[return-value]
account,
amount(units), # type: ignore[arg-type]
cost, # type: ignore[arg-type]
price, # type: ignore[arg-type]
flag,
meta,
)
def transaction(
meta: Meta,
date: datetime.date,
flag: Flag,
payee: str | None,
narration: str,
tags: TagsOrLinks,
links: TagsOrLinks,
postings: list[Posting],
) -> Transaction:
"""Create a Beancount Transaction."""
return data.Transaction( # type: ignore[return-value]
meta,
date,
flag,
payee,
narration,
tags,
links,
postings, # type: ignore[arg-type]
)
def balance(
meta: Meta,
date: datetime.date,
account: str,
_amount: Amount | str,
tolerance: Decimal | None = None,
diff_amount: Amount | None = None,
) -> Balance:
"""Create a Beancount Balance."""
return data.Balance( # type: ignore[return-value]
meta,
date,
account,
amount(_amount), # type: ignore[arg-type]
tolerance,
diff_amount, # type: ignore[arg-type]
)
def note(
meta: Meta,
date: datetime.date,
account: str,
comment: str,
) -> Balance:
"""Create a Beancount Note."""
return data.Note( # type: ignore[return-value]
meta,
date,
account,
comment,
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,437
|
beancount/fava
|
refs/heads/main
|
/docs/conf.py
|
from __future__ import annotations
extensions = [
"sphinx.ext.extlinks",
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
]
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
# General information about the project.
project = "Fava"
copyright = "2016, Dominik Aumayr" # noqa: A001
author = "Dominik Aumayr"
extlinks = {
"bug": ("https://github.com/beancount/fava/issues/%s", "#%s"),
"user": ("https://github.com/%s", "@%s"),
}
autodoc_default_options = {
"members": True,
"undoc-members": True,
}
def skip_namedtuples(_app, _what, _name, obj, _options, _lines):
docstr = obj.__doc__
if isinstance(docstr, str) and docstr.startswith("Alias for field number"):
return True
return None
def setup(app):
app.connect("autodoc-skip-member", skip_namedtuples)
desc = 'Web interface for <a href="http://furius.ca/beancount/">Beancount</a>'
# Options for HTML output
html_theme = "alabaster"
html_static_path = ["static"]
html_theme_options = {
"logo": "logo.png",
"logo_name": True,
"logo_text_align": "center",
"description": desc,
"github_user": "beancount",
"github_repo": "fava",
"github_button": "false",
"show_powered_by": "false",
"extra_nav_links": {
"fava @ GitHub": "https://github.com/beancount/fava",
"Chat": "https://gitter.im/beancount/fava",
"Issue Tracker": "https://github.com/beancount/fava/issues",
},
"link": "#3572b0",
"link_hover": "#1A2F59",
}
html_sidebars = {"**": ["about.html", "navigation.html"]}
htmlhelp_basename = "favadoc"
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,438
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/watcher.py
|
"""A simple file and folder watcher."""
from __future__ import annotations
from os import walk
from pathlib import Path
from typing import Iterable
class Watcher:
"""A simple file and folder watcher.
For folders, only checks mtime of the folder and all subdirectories.
So a file change won't be noticed, but only new/deleted files.
"""
__slots__ = ("_files", "_folders", "last_checked")
def __init__(self) -> None:
self._files: list[Path] = []
self._folders: list[Path] = []
self.last_checked = 0
def update(self, files: Iterable[Path], folders: Iterable[Path]) -> None:
"""Update the folders/files to watch.
Args:
files: A list of file paths.
folders: A list of paths to folders.
"""
self._files = list(files)
self._folders = list(folders)
self.check()
def check(self) -> bool:
"""Check for changes.
Returns:
`True` if there was a file change in one of the files or folders,
`False` otherwise.
"""
latest_mtime = 0
for path in self._files:
try:
mtime = path.stat().st_mtime_ns
except FileNotFoundError:
return True
if mtime > latest_mtime:
latest_mtime = mtime
for path in self._folders:
for dirpath, _, _ in walk(path):
mtime = Path(dirpath).stat().st_mtime_ns
if mtime > latest_mtime:
latest_mtime = mtime
changed = bool(latest_mtime != self.last_checked)
self.last_checked = latest_mtime
return changed
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,439
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/tree.py
|
"""Account balance trees."""
from __future__ import annotations
import collections
from dataclasses import dataclass
from operator import attrgetter
from typing import Dict
from typing import Iterable
from typing import TYPE_CHECKING
from fava.beans.abc import Open
from fava.beans.account import parent as account_parent
from fava.context import g
from fava.core.conversion import cost_or_value
from fava.core.conversion import get_cost
from fava.core.inventory import CounterInventory
if TYPE_CHECKING: # pragma: no cover
import datetime
from fava.beans.abc import Directive
from fava.beans.prices import FavaPriceMap
from fava.beans.types import BeancountOptions
from fava.core.inventory import SimpleCounterInventory
@dataclass(frozen=True)
class SerialisedTreeNode:
"""A serialised TreeNode."""
account: str
balance: SimpleCounterInventory
balance_children: SimpleCounterInventory
children: list[SerialisedTreeNode]
has_txns: bool
@dataclass(frozen=True)
class SerialisedTreeNodeWithCost(SerialisedTreeNode):
"""A serialised TreeNode with cost."""
cost: SimpleCounterInventory
cost_children: SimpleCounterInventory
class TreeNode:
"""A node in the account tree."""
__slots__ = ("name", "children", "balance", "balance_children", "has_txns")
def __init__(self, name: str) -> None:
#: Account name.
self.name: str = name
#: A list of :class:`.TreeNode`, its children.
self.children: list[TreeNode] = []
#: The cumulative account balance.
self.balance_children = CounterInventory()
#: The account balance.
self.balance = CounterInventory()
#: Whether the account has any transactions.
self.has_txns = False
def serialise(
self,
conversion: str,
prices: FavaPriceMap,
end: datetime.date | None,
with_cost: bool = False,
) -> SerialisedTreeNode | SerialisedTreeNodeWithCost:
"""Serialise the account.
Args:
conversion: The conversion to use.
prices: The price map to use.
end: A date to use for cost conversions.
with_cost: Additionally convert to cost.
"""
children = [
child.serialise(conversion, prices, end, with_cost=with_cost)
for child in sorted(self.children, key=attrgetter("name"))
]
return (
SerialisedTreeNodeWithCost(
self.name,
cost_or_value(self.balance, conversion, prices, end),
cost_or_value(self.balance_children, conversion, prices, end),
children,
self.has_txns,
self.balance.reduce(get_cost),
self.balance_children.reduce(get_cost),
)
if with_cost
else SerialisedTreeNode(
self.name,
cost_or_value(self.balance, conversion, prices, end),
cost_or_value(self.balance_children, conversion, prices, end),
children,
self.has_txns,
)
)
def serialise_with_context(
self,
) -> SerialisedTreeNode | SerialisedTreeNodeWithCost:
return self.serialise(
g.conversion,
g.ledger.prices,
g.filtered.end_date,
with_cost=g.conversion == "at_value",
)
class Tree(Dict[str, TreeNode]):
"""Account tree.
Args:
entries: A list of entries to compute balances from.
create_accounts: A list of accounts that the tree should contain.
"""
def __init__(
self,
entries: Iterable[Directive] | None = None,
create_accounts: list[str] | None = None,
) -> None:
super().__init__(self)
self.get("", insert=True)
if create_accounts:
for account in create_accounts:
self.get(account, insert=True)
if entries:
account_balances: dict[str, CounterInventory] = (
collections.defaultdict(CounterInventory)
)
for entry in entries:
if isinstance(entry, Open):
self.get(entry.account, insert=True)
for posting in getattr(entry, "postings", []):
account_balances[posting.account].add_position(posting)
for name, balance in sorted(account_balances.items()):
self.insert(name, balance)
@property
def accounts(self) -> list[str]:
"""The accounts in this tree."""
return sorted(self.keys())
def ancestors(self, name: str) -> Iterable[TreeNode]:
"""Ancestors of an account.
Args:
name: An account name.
Yields:
The ancestors of the given account from the bottom up.
"""
while name:
name = account_parent(name) or ""
yield self.get(name)
def insert(self, name: str, balance: CounterInventory) -> None:
"""Insert account with a balance.
Insert account and update its balance and the balances of its
ancestors.
Args:
name: An account name.
balance: The balance of the account.
"""
node = self.get(name, insert=True)
node.balance.add_inventory(balance)
node.balance_children.add_inventory(balance)
node.has_txns = True
for parent_node in self.ancestors(name):
parent_node.balance_children.add_inventory(balance)
def get( # type: ignore[override]
self,
name: str,
insert: bool = False,
) -> TreeNode:
"""Get an account.
Args:
name: An account name.
insert: If True, insert the name into the tree if it does not
exist.
Returns:
TreeNode: The account of that name or an empty account if the
account is not in the tree.
"""
try:
return self[name]
except KeyError:
node = TreeNode(name)
if insert:
if name:
parent = self.get(account_parent(name) or "", insert=True)
parent.children.append(node)
self[name] = node
return node
def net_profit(
self,
options: BeancountOptions,
account_name: str,
) -> TreeNode:
"""Calculate the net profit.
Args:
options: The Beancount options.
account_name: The name to use for the account containing the net
profit.
"""
income = self.get(options["name_income"])
expenses = self.get(options["name_expenses"])
net_profit = Tree()
net_profit.insert(
account_name,
income.balance_children + expenses.balance_children,
)
return net_profit.get(account_name)
def cap(self, options: BeancountOptions, unrealized_account: str) -> None:
"""Transfer Income and Expenses, add conversions and unrealized gains.
Args:
options: The Beancount options.
unrealized_account: The name of the account to post unrealized
gains to (as a subaccount of Equity).
"""
equity = options["name_equity"]
conversions = CounterInventory(
{
(currency, None): -number
for currency, number in self.get("")
.balance_children.reduce(get_cost)
.items()
},
)
# Add conversions
self.insert(
equity + ":" + options["account_current_conversions"],
conversions,
)
# Insert unrealized gains.
self.insert(
equity + ":" + unrealized_account,
-self.get("").balance_children,
)
# Transfer Income and Expenses
self.insert(
equity + ":" + options["account_current_earnings"],
self.get(options["name_income"]).balance_children,
)
self.insert(
equity + ":" + options["account_current_earnings"],
self.get(options["name_expenses"]).balance_children,
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,440
|
beancount/fava
|
refs/heads/main
|
/tests/test_plugins_link_documents.py
|
from __future__ import annotations
from pathlib import Path
from textwrap import dedent
from typing import TYPE_CHECKING
from beancount.loader import load_file
from fava.beans.abc import Document
from fava.beans.abc import Transaction
from fava.beans.load import load_string
from fava.plugins.link_documents import DocumentError
if TYPE_CHECKING: # pragma: no cover
from fava.beans.types import LoaderResult
def test_plugins(tmp_path: Path) -> None:
# Create sample files
expenses_foo = tmp_path / "documents" / "Expenses" / "Foo"
expenses_foo.mkdir(parents=True)
(expenses_foo / "2016-11-02 Test 1.pdf").touch()
(expenses_foo / "2016-11-03 Test 2.pdf").touch()
(expenses_foo / "2016-11-04 Test 3 discovered.pdf").touch()
assets_cash = tmp_path / "documents" / "Assets" / "Cash"
assets_cash.mkdir(parents=True)
(assets_cash / "2016-11-05 Test 4.pdf").touch()
(assets_cash / "Test 5.pdf").touch()
expenses_foo_rel = Path("documents") / "Expenses" / "Foo"
assets_cash_rel = Path("documents") / "Assets" / "Cash"
beancount_file = tmp_path / "example.beancount"
beancount_file.write_text(dedent(f"""
option "title" "Test"
option "operating_currency" "EUR"
option "documents" "{tmp_path / "documents"}"
plugin "fava.plugins.link_documents"
2016-10-30 open Expenses:Foo
2016-10-31 open Assets:Cash
2016-11-01 * "Foo" "Bar"
document: "{expenses_foo / "2016-11-03 Test 2.pdf"}"
document-2: "{assets_cash_rel / "2016-11-05 Test 4.pdf"}"
Expenses:Foo 100 EUR
Assets:Cash
2016-11-07 * "Foo" "Bar"
document: "{expenses_foo_rel / "2016-11-02 Test 1.pdf"}"
document-2: "{assets_cash_rel / "2016-11-05 Test 4.pdf"}"
Expenses:Foo 100 EUR
Assets:Cash
2016-11-06 document Assets:Cash "{assets_cash_rel / "Test 5.pdf"}"
2017-11-06 balance Assets:Cash -200 EUR
document: "{assets_cash_rel / "Test 5.pdf"}"
""".replace("\\", "\\\\")))
entries, errors, _ = load_file(str(beancount_file))
assert not errors
assert len(entries) == 10
assert isinstance(entries[3], Document)
assert entries[3].tags
assert "linked" in entries[3].tags
assert isinstance(entries[4], Document)
assert entries[4].tags
assert "linked" in entries[4].tags
# Document can be linked twice
assert isinstance(entries[6], Document)
assert entries[6].links
assert len(entries[6].links) == 2
assert isinstance(entries[2], Transaction)
assert isinstance(entries[8], Transaction)
assert entries[2].links == entries[4].links
assert entries[8].links == entries[3].links
def test_link_documents_error(load_doc: LoaderResult) -> None:
"""
plugin "fava.plugins.link_documents"
2016-10-31 open Expenses:Foo
2016-10-31 open Assets:Cash
2016-11-01 * "Foo" "Bar"
document: "asdf"
Expenses:Foo 100 EUR
Assets:Cash
"""
entries, errors, _ = load_doc
assert len(errors) == 1
assert len(entries) == 3
def test_link_documents_missing(tmp_path: Path) -> None:
bfile = dedent(f"""
option "documents" "{tmp_path}"
plugin "fava.plugins.link_documents"
2016-10-31 open Expenses:Foo
2016-10-31 open Assets:Cash
2016-11-01 * "Foo" "Bar"
document: "{Path("test") / "Foobar.pdf"}"
Expenses:Foo 100 EUR
Assets:Cash
""".replace("\\", "\\\\"))
entries, errors, _ = load_string(bfile)
assert len(errors) == 1
assert isinstance(errors[0], DocumentError)
assert len(entries) == 3
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,441
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/misc.py
|
"""Some miscellaneous reports."""
from __future__ import annotations
import datetime
import io
import re
from typing import TYPE_CHECKING
from beancount.core.amount import CURRENCY_RE
from fava.core.module_base import FavaModule
from fava.helpers import BeancountError
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Custom
from fava.beans.abc import Event
from fava.core import FavaLedger
SidebarLinks = list[tuple[str, str]]
class FavaError(BeancountError):
"""Generic Fava-specific error."""
class FavaMisc(FavaModule):
"""Provides access to some miscellaneous reports."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
#: User-chosen links to show in the sidebar.
self.sidebar_links: SidebarLinks = []
#: Upcoming events in the next few days.
self.upcoming_events: list[Event] = []
def load_file(self) -> None:
custom_entries = self.ledger.all_entries_by_type.Custom
self.sidebar_links = sidebar_links(custom_entries)
self.upcoming_events = upcoming_events(
self.ledger.all_entries_by_type.Event,
self.ledger.fava_options.upcoming_events,
)
if not self.ledger.options["operating_currency"]:
self.ledger.errors.append(
FavaError(
None,
"No operating currency specified. "
"Please add one to your beancount file.",
None,
),
)
def sidebar_links(custom_entries: list[Custom]) -> list[tuple[str, str]]:
"""Parse custom entries for links.
They have the following format:
2016-04-01 custom "fava-sidebar-link" "2014" "/income_statement/?time=2014"
"""
sidebar_link_entries = [
entry for entry in custom_entries if entry.type == "fava-sidebar-link"
]
return [
(entry.values[0].value, entry.values[1].value)
for entry in sidebar_link_entries
]
def upcoming_events(events: list[Event], max_delta: int) -> list[Event]:
"""Parse entries for upcoming events.
Args:
events: A list of events.
max_delta: Number of days that should be considered.
Returns:
A list of the Events in entries that are less than `max_delta` days
away.
"""
today = datetime.date.today()
upcoming = []
for event in events:
delta = event.date - today
if delta.days >= 0 and delta.days < max_delta:
upcoming.append(event)
return upcoming
ALIGN_RE = re.compile(
rf'([^";]*?)\s+([-+]?\s*[\d,]+(?:\.\d*)?)\s+({CURRENCY_RE}\b.*)',
)
def align(string: str, currency_column: int) -> str:
"""Align currencies in one column."""
output = io.StringIO()
for line in string.splitlines():
match = ALIGN_RE.match(line)
if match:
prefix, number, rest = match.groups()
num_of_spaces = currency_column - len(prefix) - len(number) - 4
spaces = " " * num_of_spaces
output.write(prefix + spaces + " " + number + " " + rest)
else:
output.write(line)
output.write("\n")
return output.getvalue()
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,442
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_ingest.py
|
from __future__ import annotations
import datetime
from pathlib import Path
from typing import Any
from typing import TYPE_CHECKING
import pytest
from beancount.ingest.importer import ImporterProtocol # type: ignore[import]
from fava.beans.abc import Amount
from fava.beans.abc import Note
from fava.beans.abc import Transaction
from fava.core.ingest import file_import_info
from fava.core.ingest import FileImporters
from fava.core.ingest import FileImportInfo
from fava.core.ingest import filepath_in_primary_imports_folder
from fava.helpers import FavaAPIError
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
from .conftest import GetFavaLedger
def test_ingest_file_import_info(
test_data_dir: Path,
get_ledger: GetFavaLedger,
) -> None:
class Imp(ImporterProtocol): # type: ignore[misc]
def __init__(self, acc: str) -> None:
self.acc = acc
def name(self) -> str:
return self.acc
def identify(self, file: Any) -> bool:
return self.acc in file.name
class Invalid(ImporterProtocol): # type: ignore[misc]
def __init__(self, acc: str) -> None:
self.acc = acc
def name(self) -> str:
return self.acc
def identify(self, file: Any) -> bool:
return self.acc in file.name
def file_account(self, _file: Any) -> bool:
raise ValueError("Some error reason...")
ingest_ledger = get_ledger("import")
importer = next(iter(ingest_ledger.ingest.importers.values()))
assert importer
info = file_import_info(str(test_data_dir / "import.csv"), importer)
assert info.account == "Assets:Checking"
info2 = file_import_info("/asdf/basename", Imp("rawfile"))
assert isinstance(info2.account, str)
assert info2 == FileImportInfo(
"rawfile",
"",
datetime.date.today(),
"basename",
)
with pytest.raises(FavaAPIError) as err:
file_import_info("/asdf/basename", Invalid("rawfile"))
assert "Some error reason..." in err.value.message
def test_ingest_no_config(small_example_ledger: FavaLedger) -> None:
assert not small_example_ledger.ingest.import_data()
with pytest.raises(FavaAPIError):
small_example_ledger.ingest.extract("import.csv", "import_name")
def test_ingest_examplefile(
test_data_dir: Path,
get_ledger: GetFavaLedger,
) -> None:
ingest_ledger = get_ledger("import")
files = ingest_ledger.ingest.import_data()
files_with_importers = [f for f in files if f.importers]
assert len(files) > 10 # all files in the test datafolder
assert files_with_importers == [
FileImporters(
name=str(test_data_dir / "import.csv"),
basename="import.csv",
importers=[
FileImportInfo(
"<run_path>.TestImporter",
"Assets:Checking",
datetime.date.today(),
"examplebank.import.csv",
),
],
),
]
entries = ingest_ledger.ingest.extract(
str(test_data_dir / "import.csv"),
"<run_path>.TestImporter",
)
assert len(entries) == 4
assert entries[0].date == datetime.date(2017, 2, 12)
assert isinstance(entries[0], Note)
assert entries[0].comment == "Hinweis: Zinssatz auf 0,15% geändert"
assert isinstance(entries[1], Transaction)
assert entries[1].date == datetime.date(2017, 2, 13)
assert (
entries[1].narration
== "Payment to Company XYZ REF: 31000161205-6944556-0000463"
)
assert not entries[1].postings[0].account
assert isinstance(entries[1].postings[0].units, Amount)
assert entries[1].postings[0].units.number == 50.00
assert entries[1].postings[0].units.currency == "EUR"
assert entries[1].postings[1].account == "Assets:Checking"
assert isinstance(entries[1].postings[1].units, Amount)
assert entries[1].postings[1].units.number == -50.00
assert entries[1].postings[1].units.currency == "EUR"
assert "__duplicate__" not in entries[1].meta
assert "__duplicate__" in entries[2].meta
def test_filepath_in_primary_imports_folder(
example_ledger: FavaLedger,
monkeypatch: pytest.MonkeyPatch,
) -> None:
monkeypatch.setattr(example_ledger.fava_options, "import_dirs", ["/test"])
def _join(start: str, *args: str) -> Path:
return Path(start).joinpath(*args).resolve()
assert filepath_in_primary_imports_folder(
"filename",
example_ledger,
) == _join("/test", "filename")
assert filepath_in_primary_imports_folder(
"file/name",
example_ledger,
) == _join("/test", "file name")
assert filepath_in_primary_imports_folder(
"/../file/name",
example_ledger,
) == _join("/test", " .. file name")
monkeypatch.setattr(example_ledger.fava_options, "import_dirs", [])
with pytest.raises(FavaAPIError):
filepath_in_primary_imports_folder("filename", example_ledger)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,443
|
beancount/fava
|
refs/heads/main
|
/tests/test_cli.py
|
from __future__ import annotations
import sys
from socket import socket
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from time import sleep
from time import time
from typing import TYPE_CHECKING
import pytest
if TYPE_CHECKING: # pragma: no cover
from pathlib import Path
def get_port() -> int:
sock = socket()
sock.bind(("127.0.0.1", 0))
port = sock.getsockname()[1]
sock.close()
assert isinstance(port, int)
return port
def output_contains(process: Popen[str], output: str, timeout: int) -> bool:
endtime = time() + timeout
while True:
if time() > endtime or not process.stdout:
return False
if output in process.stdout.readline():
return True
sleep(0.1)
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
def test_cli(monkeypatch: pytest.MonkeyPatch, test_data_dir: Path) -> None:
port = str(get_port())
monkeypatch.delenv("BEANCOUNT_FILE", raising=False)
args = ("fava", str(test_data_dir / "example.beancount"), "-p", port)
with Popen(
args, # noqa: S603
stdout=PIPE,
stderr=STDOUT,
universal_newlines=True,
) as process:
assert output_contains(process, "Starting Fava on", 20)
with Popen(
args, # noqa: S603
stdout=PIPE,
stderr=STDOUT,
universal_newlines=True,
) as process2:
process2.wait()
process.terminate()
assert process2.stdout
assert "in use" in "".join(process2.stdout.readlines())
assert process2.returncode > 0
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,444
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/conversion.py
|
"""Commodity conversion helpers for Fava.
All functions in this module will be automatically added as template filters.
"""
from __future__ import annotations
from typing import overload
from typing import TYPE_CHECKING
from fava.beans import create
if TYPE_CHECKING: # pragma: no cover
import datetime
from beancount.core.inventory import Inventory
from fava.beans.abc import Amount
from fava.beans.abc import Position
from fava.beans.prices import FavaPriceMap
from fava.core.inventory import CounterInventory
from fava.core.inventory import SimpleCounterInventory
def get_units(pos: Position) -> Amount:
"""Return the units of a Position."""
return pos.units
def get_cost(pos: Position) -> Amount:
"""Return the total cost of a Position."""
cost_ = pos.cost
return (
create.amount((cost_.number * pos.units.number, cost_.currency))
if cost_ is not None and cost_.number is not None
else pos.units
)
def get_market_value(
pos: Position,
prices: FavaPriceMap,
date: datetime.date | None = None,
) -> Amount:
"""Get the market value of a Position.
This differs from the convert.get_value function in Beancount by returning
the cost value if no price can be found.
Args:
pos: A Position.
prices: A FavaPriceMap
date: A datetime.date instance to evaluate the value at, or None.
Returns:
An Amount, with value converted or if the conversion failed just the
cost value (or the units if the position has no cost).
"""
units_ = pos.units
cost_ = pos.cost
if cost_:
value_currency = cost_.currency
base_quote = (units_.currency, value_currency)
price_number = prices.get_price(base_quote, date)
if price_number is not None:
return create.amount(
(units_.number * price_number, value_currency),
)
return create.amount((units_.number * cost_.number, value_currency))
return units_
def convert_position(
pos: Position,
target_currency: str,
prices: FavaPriceMap,
date: datetime.date | None = None,
) -> Amount:
"""Get the value of a Position in a particular currency.
Args:
pos: A Position.
target_currency: The target currency to convert to.
prices: A FavaPriceMap
date: A datetime.date instance to evaluate the value at, or None.
Returns:
An Amount, with value converted or if the conversion failed just the
cost value (or the units if the position has no cost).
"""
units_ = pos.units
# try the direct conversion
base_quote = (units_.currency, target_currency)
price_number = prices.get_price(base_quote, date)
if price_number is not None:
return create.amount((units_.number * price_number, target_currency))
cost_ = pos.cost
if cost_:
cost_currency = cost_.currency
if cost_currency != target_currency:
base_quote1 = (units_.currency, cost_currency)
rate1 = prices.get_price(base_quote1, date)
if rate1 is not None:
base_quote2 = (cost_currency, target_currency)
rate2 = prices.get_price(base_quote2, date)
if rate2 is not None:
return create.amount(
(units_.number * rate1 * rate2, target_currency),
)
return units_
@overload
def units(inventory: Inventory) -> Inventory: # pragma: no cover
...
@overload
def units(
inventory: CounterInventory,
) -> SimpleCounterInventory: # pragma: no cover
...
def units(
inventory: Inventory | CounterInventory,
) -> Inventory | SimpleCounterInventory:
"""Get the units of an inventory."""
return inventory.reduce(get_units)
@overload
def cost(inventory: Inventory) -> Inventory: # pragma: no cover
...
@overload
def cost(
inventory: CounterInventory,
) -> SimpleCounterInventory: # pragma: no cover
...
def cost(
inventory: Inventory | CounterInventory,
) -> Inventory | SimpleCounterInventory:
"""Get the cost of an inventory."""
return inventory.reduce(get_cost)
@overload
def cost_or_value(
inventory: Inventory,
conversion: str,
prices: FavaPriceMap,
date: datetime.date | None,
) -> Inventory: # pragma: no cover
...
@overload
def cost_or_value(
inventory: CounterInventory,
conversion: str,
prices: FavaPriceMap,
date: datetime.date | None,
) -> SimpleCounterInventory: # pragma: no cover
...
def cost_or_value(
inventory: Inventory | CounterInventory,
conversion: str,
prices: FavaPriceMap,
date: datetime.date | None = None,
) -> Inventory | SimpleCounterInventory:
"""Get the cost or value of an inventory."""
if not conversion or conversion == "at_cost":
return inventory.reduce(get_cost)
if conversion == "at_value":
return inventory.reduce(get_market_value, prices, date)
if conversion == "units":
return inventory.reduce(get_units)
return inventory.reduce(convert_position, conversion, prices, date)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,445
|
beancount/fava
|
refs/heads/main
|
/tests/conftest.py
|
"""Test fixtures."""
# pylint: disable=redefined-outer-name
from __future__ import annotations
import datetime
import os
import re
from pathlib import Path
from pprint import pformat
from textwrap import dedent
from typing import Any
from typing import Callable
from typing import Counter
from typing import TYPE_CHECKING
import pytest
from fava.application import create_app
from fava.beans.abc import Custom
from fava.beans.load import load_string
from fava.core import FavaLedger
from fava.core.budgets import parse_budgets
if TYPE_CHECKING: # pragma: no cover
from typing import Literal
from typing import Protocol
from typing import TypeAlias
from typing import TypeGuard
from flask.app import Flask
from flask.testing import FlaskClient
from fava.beans.abc import Directive
from fava.beans.types import LoaderResult
from fava.core.budgets import BudgetDict
class SnapshotFunc(Protocol):
"""Callable protocol for the snapshot function."""
def __call__(self, data: Any, name: str = ..., /) -> None:
...
@pytest.fixture(scope="session")
def test_data_dir() -> Path:
"""Path to the test data files."""
return Path(__file__).parent / "data"
@pytest.fixture(scope="module")
def module_path(request: pytest.FixtureRequest) -> Path:
"""Path to the tested module."""
fspath = getattr(request, "fspath") # noqa: B009
return Path(getattr(request, "path", fspath))
@pytest.fixture(scope="module")
def snap_count() -> Counter[str]:
"""Counter for the number of snapshots per function in a module."""
return Counter()
@pytest.fixture(scope="module")
def snap_dir(module_path: Path) -> Path:
"""Path to snapshot directory."""
snap_dir = module_path.parent / "__snapshots__"
if not snap_dir.exists():
snap_dir.mkdir()
return snap_dir
@pytest.fixture()
def snapshot(
request: pytest.FixtureRequest,
test_data_dir: Path,
module_path: Path,
snap_dir: Path,
snap_count: Counter[str],
) -> SnapshotFunc:
"""Create a snaphot for some given data."""
fn_name = request.function.__name__
module_name = module_path.name
def snapshot_data(data: Any, name: str | None = None) -> None:
if os.environ.get("SNAPSHOT_IGNORE"):
# For the tests with old dependencies, we avoid comparing the snapshots,
# as they might change in subtle ways between dependency versions.
return
snap_count[fn_name] += 1
filename = f"{module_name}-{fn_name}"
if name:
filename = f"{filename}-{name}"
elif snap_count[fn_name] > 1:
filename = f"{filename}-{snap_count[fn_name]}"
snap_file = snap_dir / filename
# print strings directly, otherwise try pretty-printing
out = data if isinstance(data, str) else pformat(data)
# replace today
out = out.replace(str(datetime.date.today()), "TODAY")
# replace relative dates
out = re.sub(r"\d+ days ago", "X days ago", out)
# replace entry hashes
out = re.sub(r'"[0-9a-f]{32}', '"ENTRY_HASH', out)
out = re.sub(r"context-[0-9a-f]{32}", "context-ENTRY_HASH", out)
# replace mtimes
out = re.sub(r"mtime=\d+", "mtime=MTIME", out)
out = re.sub(r'id="ledger-mtime">\d+', 'id="ledger-mtime">MTIME', out)
# replace env-dependant info
out = out.replace('have_excel": false', 'have_excel": true')
for dir_path, replacement in [
(str(test_data_dir), "TEST_DATA_DIR"),
]:
if os.name == "nt":
search = dir_path.replace("\\", "\\\\") + "\\\\"
out = out.replace(search, replacement + "/")
else:
out = out.replace(dir_path, replacement)
if os.environ.get("SNAPSHOT_UPDATE"):
snap_file.write_text(out, "utf-8")
else:
contents = (
snap_file.read_text("utf-8") if snap_file.exists() else ""
)
assert out == contents, (
"Snaphot test failed. Snapshots can be updated with "
"`SNAPSHOT_UPDATE=1 pytest`"
)
return snapshot_data
@pytest.fixture(scope="session")
def app(test_data_dir: Path) -> Flask:
"""Get the Fava Flask app."""
fava_app = create_app(
[
test_data_dir / filename
for filename in [
"long-example.beancount",
"example.beancount",
"extension-report-example.beancount",
"import.beancount",
"query-example.beancount",
"errors.beancount",
"off-by-one.beancount",
"invalid-unicode.beancount",
]
],
load=True,
)
fava_app.testing = True
return fava_app
@pytest.fixture()
def test_client(app: Flask) -> FlaskClient:
"""Get the test client for the Fava Flask app."""
return app.test_client()
@pytest.fixture()
def load_doc(request: pytest.FixtureRequest) -> LoaderResult:
"""Load the docstring as a Beancount file."""
contents = dedent(request.function.__doc__)
return load_string(contents)
@pytest.fixture()
def load_doc_entries(load_doc: LoaderResult) -> list[Directive]:
"""Load the docstring as Beancount entries."""
entries, _errors, _options = load_doc
return entries
def _is_custom_entries_list(
entries: list[Directive],
) -> TypeGuard[list[Custom]]:
return all(isinstance(e, Custom) for e in entries)
@pytest.fixture()
def load_doc_custom_entries(load_doc_entries: list[Directive]) -> list[Custom]:
"""Load the docstring as Beancount custom entries."""
assert _is_custom_entries_list(load_doc_entries)
return load_doc_entries
@pytest.fixture()
def budgets_doc(load_doc_custom_entries: list[Custom]) -> BudgetDict:
"""Load the budgets from the custom entries in the docstring."""
budgets, _ = parse_budgets(load_doc_custom_entries)
return budgets
if TYPE_CHECKING:
#: Slugs of the ledgers that are loaded for the test cases.
LedgerSlug: TypeAlias = Literal[
"example",
"query-example",
"long-example",
"extension-report",
"import",
"off-by-one",
"invalid-unicode",
]
GetFavaLedger: TypeAlias = Callable[[LedgerSlug], FavaLedger]
@pytest.fixture(scope="session")
def get_ledger(app: Flask) -> GetFavaLedger:
"""Getter for one of the loaded ledgers."""
def _get_ledger(name: LedgerSlug) -> FavaLedger:
loaded_ledgers = app.config["LEDGERS"]
assert name in loaded_ledgers, loaded_ledgers.keys()
ledger = app.config["LEDGERS"][name]
assert isinstance(ledger, FavaLedger)
return ledger
return _get_ledger
@pytest.fixture()
def small_example_ledger(get_ledger: GetFavaLedger) -> FavaLedger:
"""Get the small example ledger."""
return get_ledger("example")
@pytest.fixture()
def example_ledger(get_ledger: GetFavaLedger) -> FavaLedger:
"""Get the long example ledger."""
return get_ledger("long-example")
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.