Spaces:
Sleeping
Sleeping
File size: 6,299 Bytes
c3c0d39 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
from .database import Database
from .query import TargetProfile, WeightedLeximaxOptimizer, WeightedSumOptimizer
from .utils import str_to_date
from .emotions import EmotionWheel
import argparse
import logging
import os
import sys
class CliOptions:
"""
A class used to manage the CLI options.
"""
def __init__(self):
"""
Loads the options using argparse and check them, throwing an exception
in case the arguments do not fit the requirements.
"""
parser = argparse.ArgumentParser(
prog="art_pieces_db", description="Manage the database of art pieces"
)
subparsers = parser.add_subparsers(required=True)
parser_help = subparsers.add_parser(
"check-csv", help="checks the CSV input data and exit"
)
parser_help.add_argument("input_csv")
parser_help.set_defaults(func=self.execute_check_csv)
parser_query = subparsers.add_parser(
"query",
help="query the CSV input data to find the the pieces that are the closest to a target profile",
)
parser_query.add_argument("input_csv")
parser_query.add_argument("--name")
parser_query.add_argument("--date")
parser_query.add_argument("--emotion")
parser_query.add_argument("--place")
parser_query.add_argument(
"--aggregator", choices=["sum", "leximax"], default="leximax"
)
parser_query.add_argument("--limit", type=int, default=10)
parser_query.add_argument(
"--weight-name",
type=float,
default=1.0,
help="Weight for name similarity (default: 1.0)",
)
parser_query.add_argument(
"--weight-date",
type=float,
default=1.0,
help="Weight for date similarity (default: 1.0)",
)
parser_query.add_argument(
"--weight-emotion",
type=float,
default=1.0,
help="Weight for emotion similarity (default: 1.0)",
)
parser_query.add_argument(
"--weight-place",
type=float,
default=1.0,
help="Weight for place similarity (default: 1.0)",
)
parser_query.set_defaults(func=self.execute_query)
parser_emotions = subparsers.add_parser(
"list-emotions", help="list all valid emotions from Plutchik's wheel"
)
parser_emotions.set_defaults(func=self.execute_list_emotions)
args = parser.parse_args()
args.func(args)
def execute_check_csv(self, args):
"""
Checks an input CSV file.
"""
CliOptions.load_database(args)
def execute_query(self, args):
"""
Query the CSV input data to find the the pieces that are the closest to a target profile.
"""
database = CliOptions.load_database(args)
optimizer = CliOptions.create_optimizer(args)
df = optimizer.optimize_max(database).head(args.limit)
df.index.name = "result_index"
df.to_csv(
sys.stdout,
columns=[
"database_id",
"related_names",
"related_dates",
"related_places",
"related_emotions",
"score",
],
)
def execute_list_emotions(self, args):
"""
List all valid emotions from Plutchik's wheel.
"""
wheel = EmotionWheel()
print("\nPlutchik's Wheel of Emotions")
print("=" * 50)
print("\nPrimary Emotions with Intensity Levels:")
print("-" * 50)
for primary, emotion in wheel.emotions.items():
print(f"\n{primary.value.upper()}:")
print(f" Mild: {emotion.mild}")
print(f" Basic: {emotion.basic}")
print(f" Intense: {emotion.intense}")
print("\n\nEmotion Opposites:")
print("-" * 50)
shown = set()
for e1, e2 in wheel.opposites.items():
pair = tuple(sorted([e1.value, e2.value]))
if pair not in shown:
print(f" {e1.value} <-> {e2.value}")
shown.add(pair)
print("\n\nEmotion Combinations (Dyads):")
print("-" * 50)
for (e1, e2), result in sorted(wheel.dyads.items()):
print(f" {e1} + {e2} = {result}")
def load_database(args):
csv_file = os.path.abspath(args.input_csv)
if not os.access(csv_file, os.R_OK):
logging.fatal(f"cannot read input file {csv_file}")
CliOptions.exit_on_param_error()
logging.info(f"reading CSV file {csv_file}")
database = Database(csv_file)
logging.info(f"read a database with {database.n_pieces()} art pieces")
return database
def create_optimizer(args):
profile = TargetProfile()
if args.name is not None:
profile.set_target_name(args.name)
if args.date is not None:
try:
profile.set_target_date(str_to_date(args.date))
except ValueError:
logging.fatal(
f'cannot translate argument "{args.date}" into a date (type e.g. 25/12/2025)'
)
CliOptions.exit_on_param_error()
if args.emotion is not None:
profile.set_target_emotion(args.emotion.lower())
if args.place is not None:
profile.set_target_place(args.place)
logging.info(f"target profile is {profile}")
weights = {
"related_names": args.weight_name,
"related_dates": args.weight_date,
"related_emotions": args.weight_emotion,
"related_places": args.weight_place,
}
if args.aggregator == "sum":
logging.info("aggregator is Sum")
return WeightedSumOptimizer(profile, weights)
elif args.aggregator == "leximax":
logging.info("aggregator is Leximax")
return WeightedLeximaxOptimizer(profile, weights)
else:
logging.fatal(f'unknown aggregator "{args.aggregator}"')
CliOptions.exit_on_param_error()
def exit_on_param_error():
os._exit(3)
|